diff --git a/.changes/2.32.10.json b/.changes/2.32.10.json new file mode 100644 index 000000000000..05ab239ec51c --- /dev/null +++ b/.changes/2.32.10.json @@ -0,0 +1,7 @@ +[ + { + "category": "``lambda``", + "description": "Add DisallowedByVpcEncryptionControl to the LastUpdateStatusReasonCode and StateReasonCode enums to represent failures caused by VPC Encryption Controls.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.11.json b/.changes/2.32.11.json new file mode 100644 index 000000000000..136f0f117b3d --- /dev/null +++ b/.changes/2.32.11.json @@ -0,0 +1,27 @@ +[ + { + "category": "``inspector2``", + "description": "This release adds a new ScanStatus called \"Unsupported Code Artifacts\". This ScanStatus will be returned when a Lambda function was not code scanned because it has unsupported code artifacts.", + "type": "api-change" + }, + { + "category": "``iam``", + "description": "Adding the ExpirationTime attribute to the delegation request resource.", + "type": "api-change" + }, + { + "category": "``partnercentral-account``", + "description": "Adding Verification API's to Partner Central Account SDK.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Updating stop-task API to encapsulate containers with custom stop signal", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Updating the desired url for `PutEmailIdentityDkimSigningAttributes` from v1 to v2", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.12.json b/.changes/2.32.12.json new file mode 100644 index 000000000000..97b185dcf3b4 --- /dev/null +++ b/.changes/2.32.12.json @@ -0,0 +1,42 @@ +[ + { + "category": "``ce``", + "description": "Add support for Cost Category resource associations including filtering by resource type on ListCostCategoryDefinitions and new ListCostCategoryResourceAssociations API.", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Update Mail Manager Archive ARN validation", + "type": "api-change" + }, + { + "category": "``rolesanywhere``", + "description": "Increases certificate string length for trust anchor source data to support ML-DSA certificates.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Amazon EC2 P6-B300 instances provide 8x NVIDIA Blackwell Ultra GPUs with 2.1 TB high bandwidth GPU memory, 6.4 Tbps EFA networking, 300 Gbps dedicated ENA throughput, and 4 TB of system memory. Amazon EC2 C8a instances are powered by 5th Gen AMD EPYC processors with a maximum frequency of 4.5 GHz.", + "type": "api-change" + }, + { + "category": "``partnercentral-selling``", + "description": "Deal Sizing Service for AI-based deal size estimation with AWS service-level breakdown, supporting Expansion and Migration deals across Technology, and Reseller partner cohorts, including Pricing Calculator AddOn for MAP deals and funding incentives.", + "type": "api-change" + }, + { + "category": "``identitystore``", + "description": "Updating AWS Identity Store APIs to support Attribute Extensions capability, with the first release adding Enterprise Attributes. This launch aligns Identity Store APIs with SCIM for enterprise attributes, reducing cases when customers are forced to use SCIM due to lack of SigV4 API support.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "Adding support for tagging RDS Instance/Cluster Automated Backups", + "type": "api-change" + }, + { + "category": "``redshift-serverless``", + "description": "Added GetIdentityCenterAuthToken API to retrieve encrypted authentication tokens for Identity Center integrated serverless workgroups. This API enables programmatic access to secure Identity Center tokens with proper error handling and parameter validation across supported SDK languages.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.13.json b/.changes/2.32.13.json new file mode 100644 index 000000000000..0acb8bdf3cda --- /dev/null +++ b/.changes/2.32.13.json @@ -0,0 +1,32 @@ +[ + { + "category": "``account``", + "description": "This release adds a new API (GetGovCloudAccountInformation) used to retrieve information about a linked GovCloud account from the standard AWS partition.", + "type": "api-change" + }, + { + "category": "``appsync``", + "description": "Update Event API to require EventConfig parameter in creation and update requests.", + "type": "api-change" + }, + { + "category": "``route53``", + "description": "Amazon Route 53 now supports the EU (Germany) Region (eusc-de-east-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Adding support for Ec2LaunchTemplate Version field", + "type": "api-change" + }, + { + "category": "``ivs-realtime``", + "description": "Token Exchange introduces seamless token exchange capabilities for IVS RTX, enabling customers to upgrade or downgrade token capabilities and update token attributes within the IVS client SDK without forcing clients to disconnect and reconnect.", + "type": "api-change" + }, + { + "category": "``mgn``", + "description": "Added parameters encryption, IPv4/IPv6 protocol configuration, and enhanced tagging support for replication operations.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.14.json b/.changes/2.32.14.json new file mode 100644 index 000000000000..2c14eb2bbe42 --- /dev/null +++ b/.changes/2.32.14.json @@ -0,0 +1,42 @@ +[ + { + "category": "Serializers", + "description": "Adds support for host prefix to BaseRpcV2Serializer class", + "type": "bugfix" + }, + { + "category": "``billingconductor``", + "description": "Launch itemized custom line item and service line item filter", + "type": "api-change" + }, + { + "category": "``bedrock``", + "description": "Automated Reasoning checks in Amazon Bedrock Guardrails is capable of generating policy scenarios to validate policies. The GetAutomatedReasoningPolicyBuildWorkflowResultAssets API now adds POLICY SCENARIO asset type, allowing customers to retrieve scenarios generated by the build workflow.", + "type": "api-change" + }, + { + "category": "``signer``", + "description": "Adds support for Signer GetRevocationStatus with updated endpoints", + "type": "api-change" + }, + { + "category": "``cloudwatch``", + "description": "This release introduces two additional protocols AWS JSON 1.1 and Smithy RPC v2 CBOR, replacing the currently utilized one, AWSQuery. AWS SDKs will prioritize the protocol that is the most performant for each language.", + "type": "api-change" + }, + { + "category": "``partnercentral-selling``", + "description": "Adds support for the new Project.AwsPartition field on Opportunity and AWS Opportunity Summary. Use this field to specify the AWS partition where the opportunity will be deployed.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "The CreateApplication API now supports an optional kms key arn parameter to allow customers to specify a CMK for application encryption.", + "type": "api-change" + }, + { + "category": "``odb``", + "description": "The following APIs now return CloudExadataInfrastructureArn and OdbNetworkArn fields for improved resource identification and AWS service integration - GetCloudVmCluster, ListCloudVmClusters, GetCloudAutonomousVmCluster, and ListCloudAutonomousVmClusters.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.15.json b/.changes/2.32.15.json new file mode 100644 index 000000000000..fed061d09591 --- /dev/null +++ b/.changes/2.32.15.json @@ -0,0 +1,32 @@ +[ + { + "category": "``lambda``", + "description": "Add Dotnet 10 (dotnet10) support to AWS Lambda.", + "type": "api-change" + }, + { + "category": "``secretsmanager``", + "description": "Add SortBy parameter to ListSecrets", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Update GetEmailIdentity and CreateEmailIdentity response to include SigningHostedZone in DkimAttributes. Updated PutEmailIdentityDkimSigningAttributes Response to include SigningHostedZone.", + "type": "api-change" + }, + { + "category": "``quicksight``", + "description": "This release adds new GetIdentityContext API, Dashboard customization options for tables and pivot tables, Visual styling options- borders and decals, map GeocodingPreferences, KeyPairCredentials for DataSourceCredentials. Snapshot APIs now support registered users. Parameters limit increased to 400", + "type": "api-change" + }, + { + "category": "python", + "description": "Upgrade bundled Python interpreter to version 3.13.11.", + "type": "enhancement" + }, + { + "category": "``organizations``", + "description": "Add support for policy operations on the NETWORK SECURITY DIRECTOR POLICY policy type.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.16.json b/.changes/2.32.16.json new file mode 100644 index 000000000000..a9feed7554ae --- /dev/null +++ b/.changes/2.32.16.json @@ -0,0 +1,22 @@ +[ + { + "category": "``connect``", + "description": "Amazon Connect now offers automated post-chat surveys triggered when customers end conversations. This captures timely feedback while experience is fresh, using either a no-code form builder or Amazon Lex-powered interactive surveys.", + "type": "api-change" + }, + { + "category": "``bcm-recommended-actions``", + "description": "Added new freetier action types to RecommendedAction.type.", + "type": "api-change" + }, + { + "category": "``datasync``", + "description": "Adds Enhanced mode support for NFS and SMB locations. SMB credentials are now managed via Secrets Manager, and may be encrypted with service or customer managed keys. Increases AgentArns maximum count to 8 (max 4 per TaskMode). Adds folder counters to DescribeTaskExecution for Enhanced mode tasks.", + "type": "api-change" + }, + { + "category": "``workspaces-web``", + "description": "Adds support for portal branding customization, enabling administrators to personalize end-user portals with custom assets.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.17.json b/.changes/2.32.17.json new file mode 100644 index 000000000000..ec1be538a243 --- /dev/null +++ b/.changes/2.32.17.json @@ -0,0 +1,57 @@ +[ + { + "category": "``bedrock-agentcore-control``", + "description": "This release updates broken links for AgentCore Policy APIs in the AWS CLI and SDK resources.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "EC2 Capacity Manager now supports SpotTotalCount, SpotTotalInterruptions and SpotInterruptionRate metrics for both vCPU and instance units.", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "This release allows you to import your historical CloudTrail Lake data into CloudWatch with a few steps, enabling you to easily consolidate operational, security, and compliance data in one place.", + "type": "api-change" + }, + { + "category": "``health``", + "description": "Updating Health API endpoint generation for dualstack only regions", + "type": "api-change" + }, + { + "category": "``route53resolver``", + "description": "Adds support for enabling detailed metrics on Route 53 Resolver endpoints using RniEnhancedMetricsEnabled and TargetNameServerMetricsEnabled in the CreateResolverEndpoint and UpdateResolverEndpoint APIs, providing enhanced visibility into Resolver endpoint and target name server performance.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Amazon Connect now supports outbound WhatsApp contacts via the Send message block or StartOutboundChatContact API. Send proactive messages for surveys, reminders, and updates. Offer customers the option to switch to WhatsApp while in queue, eliminating hold time.", + "type": "api-change" + }, + { + "category": "``service-quotas``", + "description": "Add support for SQ Dashboard Api", + "type": "api-change" + }, + { + "category": "``mediatailor``", + "description": "Added support for Ad Decision Server Configuration enabling HTTP POST requests with custom bodies, headers, GZIP compression, and dynamic variables. No changes required for existing GET request configurations.", + "type": "api-change" + }, + { + "category": "``glacier``", + "description": "Documentation updates for Amazon Glacier's maintenance mode", + "type": "api-change" + }, + { + "category": "``entityresolution``", + "description": "Support Customer Profiles Integration for AWS Entity Resolution", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "This release adds support for the new optional field 'LifecycleExpirationDate' in S3 Inventory configurations.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.18.json b/.changes/2.32.18.json new file mode 100644 index 000000000000..07089a522af1 --- /dev/null +++ b/.changes/2.32.18.json @@ -0,0 +1,12 @@ +[ + { + "category": "``iot``", + "description": "Add support for dynamic payloads in IoT Device Management Commands", + "type": "api-change" + }, + { + "category": "``timestream-influxdb``", + "description": "This release adds support for rebooting InfluxDB DbInstances and DbClusters", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.19.json b/.changes/2.32.19.json new file mode 100644 index 000000000000..be1ab3e00aca --- /dev/null +++ b/.changes/2.32.19.json @@ -0,0 +1,47 @@ +[ + { + "category": "``payment-cryptography``", + "description": "Support for AS2805 standard. Modifications to import-key and export-key to support AS2805 variants.", + "type": "api-change" + }, + { + "category": "``kafkaconnect``", + "description": "Support dual-stack network connectivity for connectors via NetworkType field.", + "type": "api-change" + }, + { + "category": "``inspector-scan``", + "description": "Adds an additional OutputFormat", + "type": "api-change" + }, + { + "category": "``mediaconvert``", + "description": "Adds support for tile encoding in HEVC and audio for video overlays.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Adding the newly launched p6-b300.48xlarge ec2 instance support in Sagemaker(Hyperpod,Training and Sceptor)", + "type": "api-change" + }, + { + "category": "``payment-cryptography-data``", + "description": "Support for AS2805 standard. New API GenerateAs2805KekValidation and changes to translate pin, GenerateMac and VerifyMac to support AS2805 key variants.", + "type": "api-change" + }, + { + "category": "``mediapackagev2``", + "description": "This release adds support for SPEKE V2 content key encryption in MediaPackage v2 Origin Endpoints.", + "type": "api-change" + }, + { + "category": "``gameliftstreams``", + "description": "Added new stream group operation parameters for scale-on-demand capacity with automatic prewarming. Added new Gen6 stream classes based on the EC2 G6 instance family. Added new StartStreamSession parameter for exposure of real-time performance stats to clients.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Add support for dbiResourceId in finding.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.20.json b/.changes/2.32.20.json new file mode 100644 index 000000000000..0e5ecd97decb --- /dev/null +++ b/.changes/2.32.20.json @@ -0,0 +1,72 @@ +[ + { + "category": "``iot``", + "description": "This release adds message batching for the IoT Rules Engine HTTP action.", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "This release adds AvailabilityZoneId support for CreateFleet, ModifyFleet, DescribeFleets, RequestSpotFleet, ModifySpotFleetRequests and DescribeSpotFleetRequests APIs.", + "type": "api-change" + }, + { + "category": "``ecr``", + "description": "Adds support for ECR Create On Push", + "type": "api-change" + }, + { + "category": "``sesv2``", + "description": "Amazon SES introduces Email Validation feature which checks email addresses for syntax errors, domain validity, and risky addresses to help maintain deliverability and protect sender reputation. SES also adds resource tagging and ABAC support for EmailTemplates and CustomVerificationEmailTemplates.", + "type": "api-change" + }, + { + "category": "``ssm-sap``", + "description": "Added \"Stopping\" for the HANA Database Status.", + "type": "api-change" + }, + { + "category": "``cleanrooms``", + "description": "Adding support for collaboration change requests requiring an approval workflow. Adding support for change requests that grant or revoke results receiver ability and modifying auto approved change types in an existing collaboration.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "Amazon OpenSearch Service adds support for warm nodes, enabling new multi-tier architecture.", + "type": "api-change" + }, + { + "category": "``bedrock-agentcore-control``", + "description": "Feature to support header exchanges between Bedrock AgentCore Gateway Targets and client, along with propagating query parameter to the configured targets.", + "type": "api-change" + }, + { + "category": "``ecs``", + "description": "Adding support for Event Windows via a new ECS account setting \"fargateEventWindows\". When enabled, ECS Fargate will use the configured event window for patching tasks. Introducing \"CapacityOptionType\" for CreateCapacityProvider API, allowing support for Spot capacity for ECS Managed Instances.", + "type": "api-change" + }, + { + "category": "``arc-region-switch``", + "description": "New API to list Route 53 health checks created by ARC region switch for a plan in a specific AWS Region using the Region switch Regional data plane.", + "type": "api-change" + }, + { + "category": "``artifact``", + "description": "Add support for ListReportVersions API for the calling AWS account.", + "type": "api-change" + }, + { + "category": "``appstream``", + "description": "Added support for new operating systems (1) Ubuntu 24.04 Pro LTS on Elastic fleets, and (2) Microsoft Server 2025 on Always-On and On-Demand fleets", + "type": "api-change" + }, + { + "category": "``bedrock-data-automation``", + "description": "Blueprint Optimization (BPO) is a new Amazon Bedrock Data Automation (BDA) capability that improves blueprint inference accuracy using example content assets and ground truth data. BPO works by generating better instructions for fields in the Blueprint using provided data.", + "type": "api-change" + }, + { + "category": "cloudtrail", + "description": "Fixed performance issue in cloudtrail validate-logs command by scoping S3 digest file listing to the trail's region instead of processing digest files from all regions.", + "type": "enhancement" + } +] \ No newline at end of file diff --git a/.changes/2.32.21.json b/.changes/2.32.21.json new file mode 100644 index 000000000000..13f0474241d9 --- /dev/null +++ b/.changes/2.32.21.json @@ -0,0 +1,37 @@ +[ + { + "category": "``qbusiness``", + "description": "It is a internal bug fix for region expansion", + "type": "api-change" + }, + { + "category": "``iot``", + "description": "This release adds event-based logging feature that enables granular event logging controls for AWS IoT logs.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Adding support for Custom Metrics and Pre-Defined Attributes to GetCurrentMetricData API.", + "type": "api-change" + }, + { + "category": "``workspaces-web``", + "description": "Add support for WebAuthn under user settings.", + "type": "api-change" + }, + { + "category": "``wickr``", + "description": "AWS Wickr now provides a suite of admin APIs to allow you to programmatically manage secure communication for Wickr networks at scale. These APIs enable you to automate administrative workflows including user lifecycle management, network configuration, and security group administration.", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "Added JobLevelCostAllocationConfiguration field to enable cost allocation reporting at the job level, providing more granular visibility into EMR Serverless charges", + "type": "api-change" + }, + { + "category": "``arc-region-switch``", + "description": "Automatic Plan Execution Reports allow customers to maintain a concise record of their Region switch Plan executions. This enables customer SREs and leadership to have a clear view of their recovery posture based on the generated reports for their Plan executions.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.22.json b/.changes/2.32.22.json new file mode 100644 index 000000000000..bf5dd341e60a --- /dev/null +++ b/.changes/2.32.22.json @@ -0,0 +1,27 @@ +[ + { + "category": "``ecs``", + "description": "Introduces a text-only mode to the existing ECS Express Mode service commands. Text-only mode can be enabled via using the ``--mode TEXT-ONLY`` flag with the ``ecs monitor-express-gateway-service`` command, or via using the ``--monitor-mode TEXT-ONLY`` and ``--monitor-resources`` flags with the ``ecs create-express-gateway-service``, ``ecs update-express-gateway-service``, or ``ecs delete-express-gateway-service`` commands.", + "type": "enhancement" + }, + { + "category": "``guardduty``", + "description": "Make accountIds a required field in GetRemainingFreeTrialDays API to reflect service behavior.", + "type": "api-change" + }, + { + "category": "``config``", + "description": "Added supported resourceTypes for Config from July to November 2025", + "type": "api-change" + }, + { + "category": "``pcs``", + "description": "Change API Reference Documentation for default Mode in Accounting and SlurmRest", + "type": "api-change" + }, + { + "category": "``ec2``", + "description": "Adds support for linkedGroupId on the CreatePlacementGroup and DescribePlacementGroups APIs. The linkedGroupId parameter is reserved for future use.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.23.json b/.changes/2.32.23.json new file mode 100644 index 000000000000..38ee2fcf9dce --- /dev/null +++ b/.changes/2.32.23.json @@ -0,0 +1,22 @@ +[ + { + "category": "``s3``", + "description": "Adds new parameter ``--case-conflict`` that configures how case conflicts are handled on case-insensitive filesystems", + "type": "enhancement" + }, + { + "category": "``geo-places``", + "description": "Adds support for InferredSecondaryAddress place type, Designator in SecondaryAddressComponent and Heading in ReverseGeocode.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "Add additional validation to Outpost bucket names.", + "type": "api-change" + }, + { + "category": "``pinpoint-sms-voice-v2``", + "description": "This release adds support for the Registration Reviewer feature, which provides generative AI feedback on a phone number or sender ID registration to ensure completeness before sending to downstream (carrier) review.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.24.json b/.changes/2.32.24.json new file mode 100644 index 000000000000..27f7a7737ace --- /dev/null +++ b/.changes/2.32.24.json @@ -0,0 +1,7 @@ +[ + { + "category": "``medialive``", + "description": "AWS Elemental MediaLive now supports Pipeline Locking using Video Alignment as well as linked single pipeline channels to enable cross-channel and cross-region Pipeline Locking workflows.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.25.json b/.changes/2.32.25.json new file mode 100644 index 000000000000..b0a0cb439f49 --- /dev/null +++ b/.changes/2.32.25.json @@ -0,0 +1,17 @@ +[ + { + "category": "``quicksight``", + "description": "This release adds support for quick users to be able to perform role upgrades on their own. Additionally it allows admins to make this feature admin or auto approval along with new self upgrade capability that can be restricted by Admins.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Changes for Contact for Global Search", + "type": "api-change" + }, + { + "category": "``elastictranscoder``", + "description": "The elastictranscoder client has been removed following the deprecation of the service.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.26.json b/.changes/2.32.26.json new file mode 100644 index 000000000000..b2170764a679 --- /dev/null +++ b/.changes/2.32.26.json @@ -0,0 +1,12 @@ +[ + { + "category": "``kafkaconnect``", + "description": "This change sets the KafkaConnect GovCloud FIPS and FIPS DualStack endpoints to use kafkaconnect instead of kafkaconnect-fips as the service name. This is done to match the Kafka endpoints.", + "type": "api-change" + }, + { + "category": "``connect``", + "description": "Adds support for searching global contacts using the ActiveRegions filter, and pagination support for ListSecurityProfileFlowModules and ListEntitySecurityProfiles.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.27.json b/.changes/2.32.27.json new file mode 100644 index 000000000000..521f8792a477 --- /dev/null +++ b/.changes/2.32.27.json @@ -0,0 +1,12 @@ +[ + { + "category": "``cleanrooms``", + "description": "Added support for publishing detailed metrics to CloudWatch for operational monitoring of collaborations, including query performance and resource utilization.", + "type": "api-change" + }, + { + "category": "``identitystore``", + "description": "This change introduces \"Roles\" attribute for User entities supported by AWS Identity Store SDK.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.28.json b/.changes/2.32.28.json new file mode 100644 index 000000000000..aaf13139942b --- /dev/null +++ b/.changes/2.32.28.json @@ -0,0 +1,7 @@ +[ + { + "category": "``s3``", + "description": "Reverts addition of ``--case-conflict`` feature which caused a performance regression when copying from S3 to large local directories", + "type": "bugfix" + } +] \ No newline at end of file diff --git a/.changes/2.32.29.json b/.changes/2.32.29.json new file mode 100644 index 000000000000..afce51b75727 --- /dev/null +++ b/.changes/2.32.29.json @@ -0,0 +1,7 @@ +[ + { + "category": "``cleanroomsml``", + "description": "AWS Clean Rooms ML now supports advanced Spark configurations to optimize SQL performance when creating an MLInputChannel or an audience generation job.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.30.json b/.changes/2.32.30.json new file mode 100644 index 000000000000..cd950676433e --- /dev/null +++ b/.changes/2.32.30.json @@ -0,0 +1,12 @@ +[ + { + "category": "``ce``", + "description": "This release updates existing reservation recommendations API to support deployment model.", + "type": "api-change" + }, + { + "category": "``emr-serverless``", + "description": "Added support for enabling disk encryption using customer managed AWS KMS keys to CreateApplication, UpdateApplication and StartJobRun APIs.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.31.json b/.changes/2.32.31.json new file mode 100644 index 000000000000..477d788c36c4 --- /dev/null +++ b/.changes/2.32.31.json @@ -0,0 +1,7 @@ +[ + { + "category": "``workspaces``", + "description": "Add StateMessage and ProgressPercentage fields to DescribeCustomWorkspaceImageImport API response.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.8.json b/.changes/2.32.8.json new file mode 100644 index 000000000000..39eeb138558b --- /dev/null +++ b/.changes/2.32.8.json @@ -0,0 +1,112 @@ +[ + { + "category": "``bedrock-agentcore``", + "description": "Support for AgentCore Evaluations and Episodic memory strategy for AgentCore Memory.", + "type": "api-change" + }, + { + "category": "``datazone``", + "description": "Amazon DataZone now supports exporting Catalog datasets as Amazon S3 tables, and provides automatic business glossary term suggestions for data assets.", + "type": "api-change" + }, + { + "category": "``securityhub``", + "description": "ITSM enhancements: DRYRUN mode for testing ticket creation, ServiceNow now uses AWS Secrets Manager for credentials, ConnectorRegistrationsV2 renamed to RegisterConnectorV2, added ServiceQuotaExceededException error, and ConnectorStatus visibility in CreateConnectorV2.", + "type": "api-change" + }, + { + "category": "``s3``", + "description": "New S3 Storage Class FSX_ONTAP", + "type": "api-change" + }, + { + "category": "``lambda``", + "description": "Launching Lambda durable functions - a new feature to build reliable multi-step applications and AI workflows natively within the Lambda developer experience.", + "type": "api-change" + }, + { + "category": "``s3control``", + "description": "Add support for S3 Storage Lens Advanced Performance Metrics, Expanded Prefixes metrics report, and export to S3 Tables.", + "type": "api-change" + }, + { + "category": "``nova-act``", + "description": "Initial release of Nova Act SDK. The Nova Act service enables customers to build and manage fleets of agents for automating production UI workflows with high reliability, fastest time-to-value, and ease of implementation at scale.", + "type": "api-change" + }, + { + "category": "``bedrock-agentcore-control``", + "description": "Supports AgentCore Evaluations, Policy, Episodic Memory Strategy, Resource Based Policy for Runtime and Gateway APIs, API Gateway Rest API Targets and enhances JWT authorizer.", + "type": "api-change" + }, + { + "category": "``bedrock``", + "description": "Adds the audioDataDeliveryEnabled boolean field to the Model Invocation Logging Configuration.", + "type": "api-change" + }, + { + "category": "``opensearchserverless``", + "description": "GPU-acceleration helps you build large-scale vector databases faster and more efficiently. You can enable this feature on new OpenSearch domains and OpenSearch Serverless collections. This feature uses GPU-acceleration to reduce the time needed to index data into vector indexes.", + "type": "api-change" + }, + { + "category": "``savingsplans``", + "description": "Added support for Amazon Database Savings Plans", + "type": "api-change" + }, + { + "category": "``logs``", + "description": "CloudWatch Logs adds managed S3 Tables integration to access logs using other analytical tools, as well as facets and field indexing to simplify log analytics in CloudWatch Logs Insights.", + "type": "api-change" + }, + { + "category": "``opensearch``", + "description": "GPU-acceleration helps you build large-scale vector databases faster and more efficiently. You can enable this feature on new OpenSearch domains and OpenSearch Serverless collections. This feature uses GPU-acceleration to reduce the time needed to index data into vector indexes.", + "type": "api-change" + }, + { + "category": "``observabilityadmin``", + "description": "CloudWatch Observability Admin adds pipelines configuration for third party log ingestion and transformation of all logs ingested, integration of CloudWatch logs with S3 Tables, and AWS account or organization level enablement for 7 AWS services.", + "type": "api-change" + }, + { + "category": "``s3vectors``", + "description": "Amazon S3 Vectors provides cost-effective, elastic, and durable vector storage for queries based on semantic meaning and similarity.", + "type": "api-change" + }, + { + "category": "``bedrock-runtime``", + "description": "Adds support for Audio Blocks and Streaming Image Output plus new Stop Reasons of malformed_model_output and malformed_tool_use.", + "type": "api-change" + }, + { + "category": "``sagemaker``", + "description": "Added support for serverless MLflow Apps.\n\nAdded support for new HubContentTypes (DataSet and JsonDoc) in Private Hub for AI model customization assets, enabling tracking and management of training datasets and evaluators (reward functions/prompts) throughout the ML lifecycle.", + "type": "api-change" + }, + { + "category": "``rds``", + "description": "RDS Oracle and SQL Server: Add support for adding, modifying, and removing additional storage volumes, offering up to 256TiB storage; RDS SQL Server: Support Developer Edition via custom engine versions for development and testing purposes; M7i/R7i instances with Optimize CPU for cost savings.", + "type": "api-change" + }, + { + "category": "``s3tables``", + "description": "Add storage class, replication, and table record expiration features to S3 Tables.", + "type": "api-change" + }, + { + "category": "``ce``", + "description": "This release updates existing Savings Plans Purchase Analyzer and Recommendations APIs to support Database Savings Plans.", + "type": "api-change" + }, + { + "category": "``guardduty``", + "description": "Adding support for extended threat detection for Amazon EC2 and Amazon ECS. Adding support for wild card suppression rules.", + "type": "api-change" + }, + { + "category": "``fsx``", + "description": "S3 Access Points support for FSx for NetApp ONTAP", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/2.32.9.json b/.changes/2.32.9.json new file mode 100644 index 000000000000..3e00c2bf89b2 --- /dev/null +++ b/.changes/2.32.9.json @@ -0,0 +1,12 @@ +[ + { + "category": "``sagemaker``", + "description": "Introduces Serverless training: A fully managed compute infrastructure that abstracts away all infrastructure complexity, allowing you to focus purely on model development.\n\nAdded AI model customization assets used to train, refine, and evaluate custom models during the model customization process.", + "type": "api-change" + }, + { + "category": "``bedrock``", + "description": "Adding support in Amazon Bedrock to customize models with reinforcement fine-tuning (RFT) and support for updating the existing Custom Model Deployments.", + "type": "api-change" + } +] \ No newline at end of file diff --git a/.changes/next-release/enhancement-HTTP-34569.json b/.changes/next-release/enhancement-HTTP-34569.json new file mode 100644 index 000000000000..86e68003779b --- /dev/null +++ b/.changes/next-release/enhancement-HTTP-34569.json @@ -0,0 +1,5 @@ +{ + "type": "enhancement", + "category": "HTTP", + "description": "Move 100-continue behavior to use `HTTPConnections` request interface." +} diff --git a/.changes/next-release/enhancement-login-27668.json b/.changes/next-release/enhancement-login-27668.json new file mode 100644 index 000000000000..d34fab79d458 --- /dev/null +++ b/.changes/next-release/enhancement-login-27668.json @@ -0,0 +1,5 @@ +{ + "type": "enhancement", + "category": "``login``", + "description": "Prevent ``aws login`` from updating a profile with a different style of existing credentials." +} diff --git a/.changes/next-release/enhancement-urllib3-48038.json b/.changes/next-release/enhancement-urllib3-48038.json new file mode 100644 index 000000000000..8e3702e5ffe6 --- /dev/null +++ b/.changes/next-release/enhancement-urllib3-48038.json @@ -0,0 +1,5 @@ +{ + "type": "enhancement", + "category": "urllib3", + "description": "Update urllib3 to version 2.6.3" +} diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml index 443b2333aed6..72fd065a69a0 100644 --- a/.github/workflows/changelog.yml +++ b/.github/workflows/changelog.yml @@ -41,6 +41,9 @@ on: link. required: true +permissions: + contents: write + jobs: add-changelog: runs-on: Ubuntu-latest diff --git a/.github/workflows/closed-issue-message.yml b/.github/workflows/closed-issue-message.yml index 895153ce2567..9f8a6d06e6f3 100644 --- a/.github/workflows/closed-issue-message.yml +++ b/.github/workflows/closed-issue-message.yml @@ -2,6 +2,9 @@ name: Closed Issue Message on: issues: types: [closed] +permissions: + issues: write + jobs: auto_comment: runs-on: ubuntu-latest diff --git a/.github/workflows/fail-master-prs.yml b/.github/workflows/fail-master-prs.yml index 671b81edb066..e6ca087aab00 100644 --- a/.github/workflows/fail-master-prs.yml +++ b/.github/workflows/fail-master-prs.yml @@ -4,6 +4,9 @@ on: pull_request: branches: [ master ] +permissions: + contents: read + jobs: fail: runs-on: ubuntu-latest diff --git a/.github/workflows/pull-request-build.yml b/.github/workflows/pull-request-build.yml new file mode 100644 index 000000000000..ea5e395e8702 --- /dev/null +++ b/.github/workflows/pull-request-build.yml @@ -0,0 +1,55 @@ +name: Build internal AWS CLI v2 +on: + pull_request: + types: [ opened, synchronize, ready_for_review ] + +concurrency: + group: start-pull-request-build-${{ github.ref }} + cancel-in-progress: true + +env: + # constants + DOWNLOAD_FOLDER: '.build-scripts/' + SCRIPT_LOCATION: 'workflows/start-pull-request-build/pull-request-build-v1.sh' + + # custom variables + IAM_ROLE_ARN: 'arn:aws:iam::807479859547:role/AwsCliGitHubRole' + ROLE_SESSION_DURATION_SECONDS: 7200 + +jobs: + aws-cli-v2-pr-build: + # Don't run on drafts or for fork-orign PRs, since they won't have access + if: github.event.pull_request.draft == false && github.event.pull_request.head.repo.full_name == github.repository + runs-on: ubuntu-latest + permissions: + id-token: write + issues: write + pull-requests: write + contents: read + steps: + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@main + with: + role-to-assume: ${{ env.IAM_ROLE_ARN }} + role-session-name: PullRequestBuildGitHubAction + role-duration-seconds: ${{ env.ROLE_SESSION_DURATION_SECONDS }} + aws-region: us-west-2 + - name: Download Build Script + run: | + aws s3 cp s3://aws-sdk-builds-github-assets-prod-us-west-2/$SCRIPT_LOCATION ./$DOWNLOAD_FOLDER/$SCRIPT_LOCATION --no-progress + chmod +x ./$DOWNLOAD_FOLDER/$SCRIPT_LOCATION + - name: Build + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + HEAD_REF: ${{ github.event.pull_request.head.ref }} + PR_NUMBER: ${{ github.event.pull_request.number }} + RUN_ID: ${{ github.run_id }} + run: | + ./$DOWNLOAD_FOLDER/$SCRIPT_LOCATION \ + --repo "$REPO" \ + --branch "$HEAD_REF" \ + --pr-number "$PR_NUMBER" \ + --run-id "$RUN_ID" + timeout-minutes: 120 + diff --git a/.github/workflows/run-dep-tests.yml b/.github/workflows/run-dep-tests.yml index 386345e23203..8a927a41d289 100644 --- a/.github/workflows/run-dep-tests.yml +++ b/.github/workflows/run-dep-tests.yml @@ -5,6 +5,9 @@ on: pull_request: branches-ignore: [ master ] +permissions: + contents: read + jobs: build: diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 72b44b52082d..d08e8ed8b493 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -6,6 +6,9 @@ on: pull_request: branches-ignore: [ master ] +permissions: + contents: read + jobs: build: diff --git a/.github/workflows/source-dist-tests.yml b/.github/workflows/source-dist-tests.yml index db7d7012bbee..76fc13ff8244 100644 --- a/.github/workflows/source-dist-tests.yml +++ b/.github/workflows/source-dist-tests.yml @@ -6,6 +6,9 @@ on: pull_request: branches-ignore: [ master ] +permissions: + contents: read + jobs: build: diff --git a/.github/workflows/stale_community_prs.yml b/.github/workflows/stale_community_prs.yml index 95db22970161..26d97a66169a 100644 --- a/.github/workflows/stale_community_prs.yml +++ b/.github/workflows/stale_community_prs.yml @@ -1,6 +1,9 @@ name: 'Check stale community PRs.' on: workflow_dispatch +permissions: + pull-requests: write + jobs: stale-implementation-stage: runs-on: ubuntu-latest diff --git a/.github/workflows/update-lockfiles.yml b/.github/workflows/update-lockfiles.yml index 4a9703d2b674..6e5db65315c5 100644 --- a/.github/workflows/update-lockfiles.yml +++ b/.github/workflows/update-lockfiles.yml @@ -20,6 +20,9 @@ on: the generated files. +permissions: + contents: write + jobs: update-lockfiles: diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 9cb8b15f8318..f59fe5a825e8 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,257 @@ CHANGELOG ========= +2.32.31 +======= + +* api-change:``workspaces``: Add StateMessage and ProgressPercentage fields to DescribeCustomWorkspaceImageImport API response. + + +2.32.30 +======= + +* api-change:``ce``: This release updates existing reservation recommendations API to support deployment model. +* api-change:``emr-serverless``: Added support for enabling disk encryption using customer managed AWS KMS keys to CreateApplication, UpdateApplication and StartJobRun APIs. + + +2.32.29 +======= + +* api-change:``cleanroomsml``: AWS Clean Rooms ML now supports advanced Spark configurations to optimize SQL performance when creating an MLInputChannel or an audience generation job. + + +2.32.28 +======= + +* bugfix:``s3``: Reverts addition of ``--case-conflict`` feature which caused a performance regression when copying from S3 to large local directories + + +2.32.27 +======= + +* api-change:``cleanrooms``: Added support for publishing detailed metrics to CloudWatch for operational monitoring of collaborations, including query performance and resource utilization. +* api-change:``identitystore``: This change introduces "Roles" attribute for User entities supported by AWS Identity Store SDK. + + +2.32.26 +======= + +* api-change:``kafkaconnect``: This change sets the KafkaConnect GovCloud FIPS and FIPS DualStack endpoints to use kafkaconnect instead of kafkaconnect-fips as the service name. This is done to match the Kafka endpoints. +* api-change:``connect``: Adds support for searching global contacts using the ActiveRegions filter, and pagination support for ListSecurityProfileFlowModules and ListEntitySecurityProfiles. + + +2.32.25 +======= + +* api-change:``quicksight``: This release adds support for quick users to be able to perform role upgrades on their own. Additionally it allows admins to make this feature admin or auto approval along with new self upgrade capability that can be restricted by Admins. +* api-change:``connect``: Changes for Contact for Global Search +* api-change:``elastictranscoder``: The elastictranscoder client has been removed following the deprecation of the service. + + +2.32.24 +======= + +* api-change:``medialive``: AWS Elemental MediaLive now supports Pipeline Locking using Video Alignment as well as linked single pipeline channels to enable cross-channel and cross-region Pipeline Locking workflows. + + +2.32.23 +======= + +* enhancement:``s3``: Adds new parameter ``--case-conflict`` that configures how case conflicts are handled on case-insensitive filesystems +* api-change:``geo-places``: Adds support for InferredSecondaryAddress place type, Designator in SecondaryAddressComponent and Heading in ReverseGeocode. +* api-change:``s3``: Add additional validation to Outpost bucket names. +* api-change:``pinpoint-sms-voice-v2``: This release adds support for the Registration Reviewer feature, which provides generative AI feedback on a phone number or sender ID registration to ensure completeness before sending to downstream (carrier) review. + + +2.32.22 +======= + +* enhancement:``ecs``: Introduces a text-only mode to the existing ECS Express Mode service commands. Text-only mode can be enabled via using the ``--mode TEXT-ONLY`` flag with the ``ecs monitor-express-gateway-service`` command, or via using the ``--monitor-mode TEXT-ONLY`` and ``--monitor-resources`` flags with the ``ecs create-express-gateway-service``, ``ecs update-express-gateway-service``, or ``ecs delete-express-gateway-service`` commands. +* api-change:``guardduty``: Make accountIds a required field in GetRemainingFreeTrialDays API to reflect service behavior. +* api-change:``config``: Added supported resourceTypes for Config from July to November 2025 +* api-change:``pcs``: Change API Reference Documentation for default Mode in Accounting and SlurmRest +* api-change:``ec2``: Adds support for linkedGroupId on the CreatePlacementGroup and DescribePlacementGroups APIs. The linkedGroupId parameter is reserved for future use. + + +2.32.21 +======= + +* api-change:``qbusiness``: It is a internal bug fix for region expansion +* api-change:``iot``: This release adds event-based logging feature that enables granular event logging controls for AWS IoT logs. +* api-change:``connect``: Adding support for Custom Metrics and Pre-Defined Attributes to GetCurrentMetricData API. +* api-change:``workspaces-web``: Add support for WebAuthn under user settings. +* api-change:``wickr``: AWS Wickr now provides a suite of admin APIs to allow you to programmatically manage secure communication for Wickr networks at scale. These APIs enable you to automate administrative workflows including user lifecycle management, network configuration, and security group administration. +* api-change:``emr-serverless``: Added JobLevelCostAllocationConfiguration field to enable cost allocation reporting at the job level, providing more granular visibility into EMR Serverless charges +* api-change:``arc-region-switch``: Automatic Plan Execution Reports allow customers to maintain a concise record of their Region switch Plan executions. This enables customer SREs and leadership to have a clear view of their recovery posture based on the generated reports for their Plan executions. + + +2.32.20 +======= + +* api-change:``iot``: This release adds message batching for the IoT Rules Engine HTTP action. +* api-change:``ec2``: This release adds AvailabilityZoneId support for CreateFleet, ModifyFleet, DescribeFleets, RequestSpotFleet, ModifySpotFleetRequests and DescribeSpotFleetRequests APIs. +* api-change:``ecr``: Adds support for ECR Create On Push +* api-change:``sesv2``: Amazon SES introduces Email Validation feature which checks email addresses for syntax errors, domain validity, and risky addresses to help maintain deliverability and protect sender reputation. SES also adds resource tagging and ABAC support for EmailTemplates and CustomVerificationEmailTemplates. +* api-change:``ssm-sap``: Added "Stopping" for the HANA Database Status. +* api-change:``cleanrooms``: Adding support for collaboration change requests requiring an approval workflow. Adding support for change requests that grant or revoke results receiver ability and modifying auto approved change types in an existing collaboration. +* api-change:``opensearch``: Amazon OpenSearch Service adds support for warm nodes, enabling new multi-tier architecture. +* api-change:``bedrock-agentcore-control``: Feature to support header exchanges between Bedrock AgentCore Gateway Targets and client, along with propagating query parameter to the configured targets. +* api-change:``ecs``: Adding support for Event Windows via a new ECS account setting "fargateEventWindows". When enabled, ECS Fargate will use the configured event window for patching tasks. Introducing "CapacityOptionType" for CreateCapacityProvider API, allowing support for Spot capacity for ECS Managed Instances. +* api-change:``arc-region-switch``: New API to list Route 53 health checks created by ARC region switch for a plan in a specific AWS Region using the Region switch Regional data plane. +* api-change:``artifact``: Add support for ListReportVersions API for the calling AWS account. +* api-change:``appstream``: Added support for new operating systems (1) Ubuntu 24.04 Pro LTS on Elastic fleets, and (2) Microsoft Server 2025 on Always-On and On-Demand fleets +* api-change:``bedrock-data-automation``: Blueprint Optimization (BPO) is a new Amazon Bedrock Data Automation (BDA) capability that improves blueprint inference accuracy using example content assets and ground truth data. BPO works by generating better instructions for fields in the Blueprint using provided data. +* enhancement:cloudtrail: Fixed performance issue in cloudtrail validate-logs command by scoping S3 digest file listing to the trail's region instead of processing digest files from all regions. + + +2.32.19 +======= + +* api-change:``payment-cryptography``: Support for AS2805 standard. Modifications to import-key and export-key to support AS2805 variants. +* api-change:``kafkaconnect``: Support dual-stack network connectivity for connectors via NetworkType field. +* api-change:``inspector-scan``: Adds an additional OutputFormat +* api-change:``mediaconvert``: Adds support for tile encoding in HEVC and audio for video overlays. +* api-change:``sagemaker``: Adding the newly launched p6-b300.48xlarge ec2 instance support in Sagemaker(Hyperpod,Training and Sceptor) +* api-change:``payment-cryptography-data``: Support for AS2805 standard. New API GenerateAs2805KekValidation and changes to translate pin, GenerateMac and VerifyMac to support AS2805 key variants. +* api-change:``mediapackagev2``: This release adds support for SPEKE V2 content key encryption in MediaPackage v2 Origin Endpoints. +* api-change:``gameliftstreams``: Added new stream group operation parameters for scale-on-demand capacity with automatic prewarming. Added new Gen6 stream classes based on the EC2 G6 instance family. Added new StartStreamSession parameter for exposure of real-time performance stats to clients. +* api-change:``guardduty``: Add support for dbiResourceId in finding. + + +2.32.18 +======= + +* api-change:``iot``: Add support for dynamic payloads in IoT Device Management Commands +* api-change:``timestream-influxdb``: This release adds support for rebooting InfluxDB DbInstances and DbClusters + + +2.32.17 +======= + +* api-change:``bedrock-agentcore-control``: This release updates broken links for AgentCore Policy APIs in the AWS CLI and SDK resources. +* api-change:``ec2``: EC2 Capacity Manager now supports SpotTotalCount, SpotTotalInterruptions and SpotInterruptionRate metrics for both vCPU and instance units. +* api-change:``logs``: This release allows you to import your historical CloudTrail Lake data into CloudWatch with a few steps, enabling you to easily consolidate operational, security, and compliance data in one place. +* api-change:``health``: Updating Health API endpoint generation for dualstack only regions +* api-change:``route53resolver``: Adds support for enabling detailed metrics on Route 53 Resolver endpoints using RniEnhancedMetricsEnabled and TargetNameServerMetricsEnabled in the CreateResolverEndpoint and UpdateResolverEndpoint APIs, providing enhanced visibility into Resolver endpoint and target name server performance. +* api-change:``connect``: Amazon Connect now supports outbound WhatsApp contacts via the Send message block or StartOutboundChatContact API. Send proactive messages for surveys, reminders, and updates. Offer customers the option to switch to WhatsApp while in queue, eliminating hold time. +* api-change:``service-quotas``: Add support for SQ Dashboard Api +* api-change:``mediatailor``: Added support for Ad Decision Server Configuration enabling HTTP POST requests with custom bodies, headers, GZIP compression, and dynamic variables. No changes required for existing GET request configurations. +* api-change:``glacier``: Documentation updates for Amazon Glacier's maintenance mode +* api-change:``entityresolution``: Support Customer Profiles Integration for AWS Entity Resolution +* api-change:``s3``: This release adds support for the new optional field 'LifecycleExpirationDate' in S3 Inventory configurations. + + +2.32.16 +======= + +* api-change:``connect``: Amazon Connect now offers automated post-chat surveys triggered when customers end conversations. This captures timely feedback while experience is fresh, using either a no-code form builder or Amazon Lex-powered interactive surveys. +* api-change:``bcm-recommended-actions``: Added new freetier action types to RecommendedAction.type. +* api-change:``datasync``: Adds Enhanced mode support for NFS and SMB locations. SMB credentials are now managed via Secrets Manager, and may be encrypted with service or customer managed keys. Increases AgentArns maximum count to 8 (max 4 per TaskMode). Adds folder counters to DescribeTaskExecution for Enhanced mode tasks. +* api-change:``workspaces-web``: Adds support for portal branding customization, enabling administrators to personalize end-user portals with custom assets. + + +2.32.15 +======= + +* api-change:``lambda``: Add Dotnet 10 (dotnet10) support to AWS Lambda. +* api-change:``secretsmanager``: Add SortBy parameter to ListSecrets +* api-change:``sesv2``: Update GetEmailIdentity and CreateEmailIdentity response to include SigningHostedZone in DkimAttributes. Updated PutEmailIdentityDkimSigningAttributes Response to include SigningHostedZone. +* api-change:``quicksight``: This release adds new GetIdentityContext API, Dashboard customization options for tables and pivot tables, Visual styling options- borders and decals, map GeocodingPreferences, KeyPairCredentials for DataSourceCredentials. Snapshot APIs now support registered users. Parameters limit increased to 400 +* enhancement:python: Upgrade bundled Python interpreter to version 3.13.11. +* api-change:``organizations``: Add support for policy operations on the NETWORK SECURITY DIRECTOR POLICY policy type. + + +2.32.14 +======= + +* bugfix:Serializers: Adds support for host prefix to BaseRpcV2Serializer class +* api-change:``billingconductor``: Launch itemized custom line item and service line item filter +* api-change:``bedrock``: Automated Reasoning checks in Amazon Bedrock Guardrails is capable of generating policy scenarios to validate policies. The GetAutomatedReasoningPolicyBuildWorkflowResultAssets API now adds POLICY SCENARIO asset type, allowing customers to retrieve scenarios generated by the build workflow. +* api-change:``signer``: Adds support for Signer GetRevocationStatus with updated endpoints +* api-change:``cloudwatch``: This release introduces two additional protocols AWS JSON 1.1 and Smithy RPC v2 CBOR, replacing the currently utilized one, AWSQuery. AWS SDKs will prioritize the protocol that is the most performant for each language. +* api-change:``partnercentral-selling``: Adds support for the new Project.AwsPartition field on Opportunity and AWS Opportunity Summary. Use this field to specify the AWS partition where the opportunity will be deployed. +* api-change:``opensearch``: The CreateApplication API now supports an optional kms key arn parameter to allow customers to specify a CMK for application encryption. +* api-change:``odb``: The following APIs now return CloudExadataInfrastructureArn and OdbNetworkArn fields for improved resource identification and AWS service integration - GetCloudVmCluster, ListCloudVmClusters, GetCloudAutonomousVmCluster, and ListCloudAutonomousVmClusters. + + +2.32.13 +======= + +* api-change:``account``: This release adds a new API (GetGovCloudAccountInformation) used to retrieve information about a linked GovCloud account from the standard AWS partition. +* api-change:``appsync``: Update Event API to require EventConfig parameter in creation and update requests. +* api-change:``route53``: Amazon Route 53 now supports the EU (Germany) Region (eusc-de-east-1) for latency records, geoproximity records, and private DNS for Amazon VPCs in that region +* api-change:``guardduty``: Adding support for Ec2LaunchTemplate Version field +* api-change:``ivs-realtime``: Token Exchange introduces seamless token exchange capabilities for IVS RTX, enabling customers to upgrade or downgrade token capabilities and update token attributes within the IVS client SDK without forcing clients to disconnect and reconnect. +* api-change:``mgn``: Added parameters encryption, IPv4/IPv6 protocol configuration, and enhanced tagging support for replication operations. + + +2.32.12 +======= + +* api-change:``ce``: Add support for Cost Category resource associations including filtering by resource type on ListCostCategoryDefinitions and new ListCostCategoryResourceAssociations API. +* api-change:``sesv2``: Update Mail Manager Archive ARN validation +* api-change:``rolesanywhere``: Increases certificate string length for trust anchor source data to support ML-DSA certificates. +* api-change:``ec2``: Amazon EC2 P6-B300 instances provide 8x NVIDIA Blackwell Ultra GPUs with 2.1 TB high bandwidth GPU memory, 6.4 Tbps EFA networking, 300 Gbps dedicated ENA throughput, and 4 TB of system memory. Amazon EC2 C8a instances are powered by 5th Gen AMD EPYC processors with a maximum frequency of 4.5 GHz. +* api-change:``partnercentral-selling``: Deal Sizing Service for AI-based deal size estimation with AWS service-level breakdown, supporting Expansion and Migration deals across Technology, and Reseller partner cohorts, including Pricing Calculator AddOn for MAP deals and funding incentives. +* api-change:``identitystore``: Updating AWS Identity Store APIs to support Attribute Extensions capability, with the first release adding Enterprise Attributes. This launch aligns Identity Store APIs with SCIM for enterprise attributes, reducing cases when customers are forced to use SCIM due to lack of SigV4 API support. +* api-change:``rds``: Adding support for tagging RDS Instance/Cluster Automated Backups +* api-change:``redshift-serverless``: Added GetIdentityCenterAuthToken API to retrieve encrypted authentication tokens for Identity Center integrated serverless workgroups. This API enables programmatic access to secure Identity Center tokens with proper error handling and parameter validation across supported SDK languages. + + +2.32.11 +======= + +* api-change:``inspector2``: This release adds a new ScanStatus called "Unsupported Code Artifacts". This ScanStatus will be returned when a Lambda function was not code scanned because it has unsupported code artifacts. +* api-change:``iam``: Adding the ExpirationTime attribute to the delegation request resource. +* api-change:``partnercentral-account``: Adding Verification API's to Partner Central Account SDK. +* api-change:``ecs``: Updating stop-task API to encapsulate containers with custom stop signal +* api-change:``sesv2``: Updating the desired url for `PutEmailIdentityDkimSigningAttributes` from v1 to v2 + + +2.32.10 +======= + +* api-change:``lambda``: Add DisallowedByVpcEncryptionControl to the LastUpdateStatusReasonCode and StateReasonCode enums to represent failures caused by VPC Encryption Controls. + + +2.32.9 +====== + +* api-change:``sagemaker``: Introduces Serverless training: A fully managed compute infrastructure that abstracts away all infrastructure complexity, allowing you to focus purely on model development. + +Added AI model customization assets used to train, refine, and evaluate custom models during the model customization process. +* api-change:``bedrock``: Adding support in Amazon Bedrock to customize models with reinforcement fine-tuning (RFT) and support for updating the existing Custom Model Deployments. + + +2.32.8 +====== + +* api-change:``bedrock-agentcore``: Support for AgentCore Evaluations and Episodic memory strategy for AgentCore Memory. +* api-change:``datazone``: Amazon DataZone now supports exporting Catalog datasets as Amazon S3 tables, and provides automatic business glossary term suggestions for data assets. +* api-change:``securityhub``: ITSM enhancements: DRYRUN mode for testing ticket creation, ServiceNow now uses AWS Secrets Manager for credentials, ConnectorRegistrationsV2 renamed to RegisterConnectorV2, added ServiceQuotaExceededException error, and ConnectorStatus visibility in CreateConnectorV2. +* api-change:``s3``: New S3 Storage Class FSX_ONTAP +* api-change:``lambda``: Launching Lambda durable functions - a new feature to build reliable multi-step applications and AI workflows natively within the Lambda developer experience. +* api-change:``s3control``: Add support for S3 Storage Lens Advanced Performance Metrics, Expanded Prefixes metrics report, and export to S3 Tables. +* api-change:``nova-act``: Initial release of Nova Act SDK. The Nova Act service enables customers to build and manage fleets of agents for automating production UI workflows with high reliability, fastest time-to-value, and ease of implementation at scale. +* api-change:``bedrock-agentcore-control``: Supports AgentCore Evaluations, Policy, Episodic Memory Strategy, Resource Based Policy for Runtime and Gateway APIs, API Gateway Rest API Targets and enhances JWT authorizer. +* api-change:``bedrock``: Adds the audioDataDeliveryEnabled boolean field to the Model Invocation Logging Configuration. +* api-change:``opensearchserverless``: GPU-acceleration helps you build large-scale vector databases faster and more efficiently. You can enable this feature on new OpenSearch domains and OpenSearch Serverless collections. This feature uses GPU-acceleration to reduce the time needed to index data into vector indexes. +* api-change:``savingsplans``: Added support for Amazon Database Savings Plans +* api-change:``logs``: CloudWatch Logs adds managed S3 Tables integration to access logs using other analytical tools, as well as facets and field indexing to simplify log analytics in CloudWatch Logs Insights. +* api-change:``opensearch``: GPU-acceleration helps you build large-scale vector databases faster and more efficiently. You can enable this feature on new OpenSearch domains and OpenSearch Serverless collections. This feature uses GPU-acceleration to reduce the time needed to index data into vector indexes. +* api-change:``observabilityadmin``: CloudWatch Observability Admin adds pipelines configuration for third party log ingestion and transformation of all logs ingested, integration of CloudWatch logs with S3 Tables, and AWS account or organization level enablement for 7 AWS services. +* api-change:``s3vectors``: Amazon S3 Vectors provides cost-effective, elastic, and durable vector storage for queries based on semantic meaning and similarity. +* api-change:``bedrock-runtime``: Adds support for Audio Blocks and Streaming Image Output plus new Stop Reasons of malformed_model_output and malformed_tool_use. +* api-change:``sagemaker``: Added support for serverless MLflow Apps. + +Added support for new HubContentTypes (DataSet and JsonDoc) in Private Hub for AI model customization assets, enabling tracking and management of training datasets and evaluators (reward functions/prompts) throughout the ML lifecycle. +* api-change:``rds``: RDS Oracle and SQL Server: Add support for adding, modifying, and removing additional storage volumes, offering up to 256TiB storage; RDS SQL Server: Support Developer Edition via custom engine versions for development and testing purposes; M7i/R7i instances with Optimize CPU for cost savings. +* api-change:``s3tables``: Add storage class, replication, and table record expiration features to S3 Tables. +* api-change:``ce``: This release updates existing Savings Plans Purchase Analyzer and Recommendations APIs to support Database Savings Plans. +* api-change:``guardduty``: Adding support for extended threat detection for Amazon EC2 and Amazon ECS. Adding support for wild card suppression rules. +* api-change:``fsx``: S3 Access Points support for FSx for NetApp ONTAP + + 2.32.7 ====== diff --git a/awscli/__init__.py b/awscli/__init__.py index 03d6ce236234..b52139762261 100644 --- a/awscli/__init__.py +++ b/awscli/__init__.py @@ -20,7 +20,7 @@ import os import sys -__version__ = '2.32.7' +__version__ = '2.32.31' # # Get our data path to be added to botocore's search path diff --git a/awscli/botocore/awsrequest.py b/awscli/botocore/awsrequest.py index e06c89d2c548..f7a18a3d8ab6 100644 --- a/awscli/botocore/awsrequest.py +++ b/awscli/botocore/awsrequest.py @@ -66,34 +66,34 @@ class AWSConnection: def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self._original_response_cls = self.response_class - # We'd ideally hook into httplib's states, but they're all - # __mangled_vars so we use our own state var. This variable is set - # when we receive an early response from the server. If this value is - # set to True, any calls to send() are noops. This value is reset to - # false every time _send_request is called. This is to workaround the - # fact that py2.6 (and only py2.6) has a separate send() call for the - # body in _send_request, as opposed to endheaders(), which is where the - # body is sent in all versions > 2.6. + # This variable is set when we receive an early response from the + # server. If this value is set to True, any calls to send() are noops. + # This value is reset to false every time _send_request is called. + # This is to workaround changes in urllib3 2.0 which uses separate + # send() calls in request() instead of delegating to endheaders(), + # which is where the body is sent in CPython's HTTPConnection. self._response_received = False self._expect_header_set = False + self._send_called = False def close(self): super().close() # Reset all of our instance state we were tracking. self._response_received = False self._expect_header_set = False + self._send_called = False self.response_class = self._original_response_cls - def _send_request(self, method, url, body, headers, *args, **kwargs): + def request(self, method, url, body=None, headers=None, *args, **kwargs): + if headers is None: + headers = {} self._response_received = False if headers.get('Expect', b'') == b'100-continue': self._expect_header_set = True else: self._expect_header_set = False self.response_class = self._original_response_cls - rval = super()._send_request( - method, url, body, headers, *args, **kwargs - ) + rval = super().request(method, url, body, headers, *args, **kwargs) self._expect_header_set = False return rval @@ -210,10 +210,15 @@ def _send_message_body(self, message_body): def send(self, str): if self._response_received: - logger.debug( - "send() called, but reseponse already received. " - "Not sending data." - ) + if not self._send_called: + # urllib3 2.0 chunks and calls send potentially + # thousands of times inside `request` unlike the + # standard library. Only log this once for sanity. + logger.debug( + "send() called, but response already received. " + "Not sending data." + ) + self._send_called = True return return super().send(str) diff --git a/awscli/botocore/config.py b/awscli/botocore/config.py index 83f8f9541b9b..e8cf6c83da74 100644 --- a/awscli/botocore/config.py +++ b/awscli/botocore/config.py @@ -41,7 +41,7 @@ class Config: :type user_agent_appid: str :param user_agent_appid: A value that gets included in the User-Agent string in the format "app/". Allowed characters are - ASCII alphanumerics and ``!$%&'*+-.^_`|~``. All other characters will + ASCII alphanumerics and ``!#$%&'*+-.^_`|~``. All other characters will be replaced by a ``-``. :type connect_timeout: float or int diff --git a/awscli/botocore/data/account/2021-02-01/service-2.json b/awscli/botocore/data/account/2021-02-01/service-2.json index 20d207edc47e..a4ba2ca4acd8 100644 --- a/awscli/botocore/data/account/2021-02-01/service-2.json +++ b/awscli/botocore/data/account/2021-02-01/service-2.json @@ -48,7 +48,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes the specified alternate contact from an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enabling trusted access for Amazon Web Services Account Management.

", + "documentation":"

Deletes the specified alternate contact from an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Update the alternate contacts for your Amazon Web Services account.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enable trusted access for Amazon Web Services Account Management.

", "idempotent":true }, "DisableRegion":{ @@ -118,7 +118,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enabling trusted access for Amazon Web Services Account Management.

" + "documentation":"

Retrieves the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Update the alternate contacts for your Amazon Web Services account.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enable trusted access for Amazon Web Services Account Management.

" }, "GetContactInformation":{ "name":"GetContactInformation", @@ -136,7 +136,26 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary and alternate contact information.

" + "documentation":"

Retrieves the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary contact for your Amazon Web Services account.

" + }, + "GetGovCloudAccountInformation":{ + "name":"GetGovCloudAccountInformation", + "http":{ + "method":"POST", + "requestUri":"/getGovCloudAccountInformation", + "responseCode":200 + }, + "input":{"shape":"GetGovCloudAccountInformationRequest"}, + "output":{"shape":"GetGovCloudAccountInformationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ResourceUnavailableException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves information about the GovCloud account linked to the specified standard account (if it exists) including the GovCloud account ID and state. To use this API, an IAM user or role must have the account:GetGovCloudAccountInformation IAM permission.

" }, "GetPrimaryEmail":{ "name":"GetPrimaryEmail", @@ -221,7 +240,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Modifies the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enabling trusted access for Amazon Web Services Account Management.

", + "documentation":"

Modifies the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Update the alternate contacts for your Amazon Web Services account.

Before you can update the alternate contact information for an Amazon Web Services account that is managed by Organizations, you must first enable integration between Amazon Web Services Account Management and Organizations. For more information, see Enable trusted access for Amazon Web Services Account Management.

", "idempotent":true }, "PutContactInformation":{ @@ -238,7 +257,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary and alternate contact information.

", + "documentation":"

Updates the primary contact information of an Amazon Web Services account.

For complete details about how to use the primary contact operations, see Update the primary contact for your Amazon Web Services account.

", "idempotent":true }, "StartPrimaryEmailUpdate":{ @@ -367,6 +386,15 @@ "SECURITY" ] }, + "AwsAccountState":{ + "type":"string", + "enum":[ + "PENDING_ACTIVATION", + "ACTIVE", + "SUSPENDED", + "CLOSED" + ] + }, "City":{ "type":"string", "max":50, @@ -479,7 +507,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "AlternateContactType":{ "shape":"AlternateContactType", @@ -539,7 +567,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" } } }, @@ -566,7 +594,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "AlternateContactType":{ "shape":"AlternateContactType", @@ -601,6 +629,32 @@ } } }, + "GetGovCloudAccountInformationRequest":{ + "type":"structure", + "members":{ + "StandardAccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + } + } + }, + "GetGovCloudAccountInformationResponse":{ + "type":"structure", + "required":[ + "AccountState", + "GovCloudAccountId" + ], + "members":{ + "AccountState":{ + "shape":"AwsAccountState", + "documentation":"

The account state of the linked GovCloud account.

" + }, + "GovCloudAccountId":{ + "shape":"AccountId", + "documentation":"

The 12-digit account ID number of the linked GovCloud account.

" + } + } + }, "GetPrimaryEmailRequest":{ "type":"structure", "required":["AccountId"], @@ -753,7 +807,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "AccountName":{ "shape":"AccountName", @@ -773,7 +827,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "AlternateContactType":{ "shape":"AlternateContactType", @@ -803,7 +857,7 @@ "members":{ "AccountId":{ "shape":"AccountId", - "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + "documentation":"

Specifies the 12-digit account ID number of the Amazon Web Services account that you want to access or modify with this operation. If you don't specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation. To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account. The specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated administrator account assigned.

The management account can't specify its own AccountId. It must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, don't specify this parameter. Instead, call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" }, "ContactInformation":{ "shape":"ContactInformation", @@ -867,6 +921,25 @@ }, "exception":true }, + "ResourceUnavailableException":{ + "type":"structure", + "required":["message"], + "members":{ + "errorType":{ + "shape":"String", + "documentation":"

The value populated to the x-amzn-ErrorType response header by API Gateway.

", + "location":"header", + "locationName":"x-amzn-ErrorType" + }, + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because it specified a resource that is not currently available.

", + "error":{ + "httpStatusCode":424, + "senderFault":true + }, + "exception":true + }, "SensitiveString":{ "type":"string", "sensitive":true diff --git a/awscli/botocore/data/appstream/2016-12-01/service-2.json b/awscli/botocore/data/appstream/2016-12-01/service-2.json index 8bbb9176bee4..82e597095032 100644 --- a/awscli/botocore/data/appstream/2016-12-01/service-2.json +++ b/awscli/botocore/data/appstream/2016-12-01/service-2.json @@ -2396,7 +2396,7 @@ }, "Platforms":{ "shape":"Platforms", - "documentation":"

The platforms the application supports. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets.

" + "documentation":"

The platforms the application supports. WINDOWS_SERVER_2019, AMAZON_LINUX2 and UBUNTU_PRO_2404 are supported for Elastic fleets.

" }, "InstanceFamilies":{ "shape":"StringList", @@ -2552,7 +2552,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

The following instance types are available for Elastic fleets:

" + "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

The following instance types are available for Elastic fleets:

" }, "FleetType":{ "shape":"FleetType", @@ -2608,7 +2608,7 @@ }, "Platform":{ "shape":"PlatformType", - "documentation":"

The fleet platform. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets.

" + "documentation":"

The fleet platform. WINDOWS_SERVER_2019, AMAZON_LINUX2 and UBUNTU_PRO_2404 are supported for Elastic fleets.

" }, "MaxConcurrentSessions":{ "shape":"Integer", @@ -2662,7 +2662,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type to use when launching the image builder. The following instance types are available:

" + "documentation":"

The instance type to use when launching the image builder. The following instance types are available:

" }, "Description":{ "shape":"Description", @@ -4400,7 +4400,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

" + "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

" }, "FleetType":{ "shape":"FleetType", @@ -4672,7 +4672,7 @@ }, "SupportedInstanceFamilies":{ "shape":"StringList", - "documentation":"

The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported:

" + "documentation":"

The supported instances families that determine which image a customer can use when the customer launches a fleet or image builder. The following instances families are supported:

" }, "DynamicAppProvidersEnabled":{ "shape":"DynamicAppProvidersEnabled", @@ -4723,7 +4723,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type for the image builder. The following instance types are available:

" + "documentation":"

The instance type for the image builder. The following instance types are available:

" }, "Platform":{ "shape":"PlatformType", @@ -5200,9 +5200,11 @@ "WINDOWS_SERVER_2016", "WINDOWS_SERVER_2019", "WINDOWS_SERVER_2022", + "WINDOWS_SERVER_2025", "AMAZON_LINUX2", "RHEL8", - "ROCKY_LINUX8" + "ROCKY_LINUX8", + "UBUNTU_PRO_2404" ] }, "Platforms":{ @@ -6182,7 +6184,7 @@ }, "InstanceType":{ "shape":"String", - "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

The following instance types are available for Elastic fleets:

" + "documentation":"

The instance type to use when launching fleet instances. The following instance types are available:

The following instance types are available for Elastic fleets:

" }, "ComputeCapacity":{ "shape":"ComputeCapacity", @@ -6239,7 +6241,7 @@ }, "Platform":{ "shape":"PlatformType", - "documentation":"

The platform of the fleet. WINDOWS_SERVER_2019 and AMAZON_LINUX2 are supported for Elastic fleets.

" + "documentation":"

The platform of the fleet. WINDOWS_SERVER_2019, AMAZON_LINUX2 and UBUNTU_PRO_2404 are supported for Elastic fleets.

" }, "MaxConcurrentSessions":{ "shape":"Integer", diff --git a/awscli/botocore/data/appsync/2017-07-25/service-2.json b/awscli/botocore/data/appsync/2017-07-25/service-2.json index 723f353a0db8..5d9b4da358ce 100644 --- a/awscli/botocore/data/appsync/2017-07-25/service-2.json +++ b/awscli/botocore/data/appsync/2017-07-25/service-2.json @@ -2032,7 +2032,10 @@ }, "CreateApiRequest":{ "type":"structure", - "required":["name"], + "required":[ + "name", + "eventConfig" + ], "members":{ "name":{ "shape":"ApiName", @@ -5460,7 +5463,8 @@ "type":"structure", "required":[ "apiId", - "name" + "name", + "eventConfig" ], "members":{ "apiId":{ diff --git a/awscli/botocore/data/arc-region-switch/2022-07-26/endpoint-rule-set-1.json b/awscli/botocore/data/arc-region-switch/2022-07-26/endpoint-rule-set-1.json index 0a42812d7b62..abd72333dde9 100644 --- a/awscli/botocore/data/arc-region-switch/2022-07-26/endpoint-rule-set-1.json +++ b/awscli/botocore/data/arc-region-switch/2022-07-26/endpoint-rule-set-1.json @@ -6,24 +6,24 @@ "required": true, "default": false, "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "Boolean" + "type": "boolean" }, "Endpoint": { "builtIn": "SDK::Endpoint", "required": false, "documentation": "Override the endpoint used to send this request", - "type": "String" + "type": "string" }, "Region": { "builtIn": "AWS::Region", "required": false, "documentation": "The AWS region used to dispatch the request.", - "type": "String" + "type": "string" }, "UseControlPlaneEndpoint": { "required": false, "documentation": "Whether the operation is a control plane operation. Control plane operations are routed to a centralized endpoint in the partition leader.", - "type": "Boolean" + "type": "boolean" } }, "rules": [ diff --git a/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.json b/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.json index 0bac1960ce59..2e521c97a9dd 100644 --- a/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.json +++ b/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.json @@ -41,6 +41,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "healthChecks" + }, + "ListRoute53HealthChecksInRegion": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "healthChecks" } } } diff --git a/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.sdk-extras.json b/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.sdk-extras.json index e07c05710c29..f8c1737219d0 100644 --- a/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.sdk-extras.json +++ b/awscli/botocore/data/arc-region-switch/2022-07-26/paginators-1.sdk-extras.json @@ -25,7 +25,8 @@ "planArn", "actualRecoveryTime", "version", - "mode" + "mode", + "generatedReportDetails" ] } } diff --git a/awscli/botocore/data/arc-region-switch/2022-07-26/service-2.json b/awscli/botocore/data/arc-region-switch/2022-07-26/service-2.json index 016289076324..cfe804cd819e 100644 --- a/awscli/botocore/data/arc-region-switch/2022-07-26/service-2.json +++ b/awscli/botocore/data/arc-region-switch/2022-07-26/service-2.json @@ -89,6 +89,7 @@ {"shape":"ResourceNotFoundException"} ], "documentation":"

Retrieves detailed information about a Region switch plan. You must specify the ARN of the plan.

", + "readonly":true, "staticContextParams":{ "UseControlPlaneEndpoint":{"value":true} } @@ -105,7 +106,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves the evaluation status of a Region switch plan. The evaluation status provides information about the last time the plan was evaluated and any warnings or issues detected.

" + "documentation":"

Retrieves the evaluation status of a Region switch plan. The evaluation status provides information about the last time the plan was evaluated and any warnings or issues detected.

", + "readonly":true }, "GetPlanExecution":{ "name":"GetPlanExecution", @@ -119,7 +121,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves detailed information about a specific plan execution. You must specify the plan ARN and execution ID.

" + "documentation":"

Retrieves detailed information about a specific plan execution. You must specify the plan ARN and execution ID.

", + "readonly":true }, "GetPlanInRegion":{ "name":"GetPlanInRegion", @@ -133,7 +136,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves information about a Region switch plan in a specific Amazon Web Services Region. This operation is useful for getting Region-specific information about a plan.

" + "documentation":"

Retrieves information about a Region switch plan in a specific Amazon Web Services Region. This operation is useful for getting Region-specific information about a plan.

", + "readonly":true }, "ListPlanExecutionEvents":{ "name":"ListPlanExecutionEvents", @@ -147,7 +151,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the events that occurred during a plan execution. These events provide a detailed timeline of the execution process.

" + "documentation":"

Lists the events that occurred during a plan execution. These events provide a detailed timeline of the execution process.

", + "readonly":true }, "ListPlanExecutions":{ "name":"ListPlanExecutions", @@ -161,7 +166,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the executions of a Region switch plan. This operation returns information about both current and historical executions.

" + "documentation":"

Lists the executions of a Region switch plan. This operation returns information about both current and historical executions.

", + "readonly":true }, "ListPlans":{ "name":"ListPlans", @@ -172,6 +178,7 @@ "input":{"shape":"ListPlansRequest"}, "output":{"shape":"ListPlansResponse"}, "documentation":"

Lists all Region switch plans in your Amazon Web Services account.

", + "readonly":true, "staticContextParams":{ "UseControlPlaneEndpoint":{"value":true} } @@ -187,7 +194,8 @@ "errors":[ {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all Region switch plans in your Amazon Web Services account that are available in the current Amazon Web Services Region.

" + "documentation":"

Lists all Region switch plans in your Amazon Web Services account that are available in the current Amazon Web Services Region.

", + "readonly":true }, "ListRoute53HealthChecks":{ "name":"ListRoute53HealthChecks", @@ -203,10 +211,28 @@ {"shape":"InternalServerException"} ], "documentation":"

List the Amazon Route 53 health checks.

", + "readonly":true, "staticContextParams":{ "UseControlPlaneEndpoint":{"value":true} } }, + "ListRoute53HealthChecksInRegion":{ + "name":"ListRoute53HealthChecksInRegion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListRoute53HealthChecksInRegionRequest"}, + "output":{"shape":"ListRoute53HealthChecksInRegionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

List the Amazon Route 53 health checks in a specific Amazon Web Services Region.

", + "readonly":true + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -220,6 +246,7 @@ {"shape":"InternalServerException"} ], "documentation":"

Lists the tags attached to a Region switch resource.

", + "readonly":true, "staticContextParams":{ "UseControlPlaneEndpoint":{"value":true} } @@ -598,7 +625,7 @@ }, "AsgArn":{ "type":"string", - "pattern":"arn:aws:autoscaling:[a-z0-9-]+:\\d{12}:autoScalingGroup:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}:autoScalingGroupName/[\\S\\s]{1,255}" + "pattern":"arn:aws[a-zA-Z-]*:autoscaling:[a-z0-9-]+:\\d{12}:autoScalingGroup:[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}:autoScalingGroupName/[\\S\\s]{1,255}" }, "AsgList":{ "type":"list", @@ -701,6 +728,7 @@ "shape":"TriggerList", "documentation":"

The triggers associated with a Region switch plan.

" }, + "reportConfiguration":{"shape":"ReportConfiguration"}, "name":{ "shape":"PlanName", "documentation":"

The name of a Region switch plan.

" @@ -788,6 +816,83 @@ "type":"structure", "members":{} }, + "DocumentDbClusterArn":{ + "type":"string", + "pattern":"arn:aws[a-zA-Z-]*:rds:[a-z0-9-]+:\\d{12}:cluster:[a-zA-Z0-9][a-zA-Z0-9-_]{0,99}" + }, + "DocumentDbClusterArns":{ + "type":"list", + "member":{"shape":"DocumentDbClusterArn"} + }, + "DocumentDbConfiguration":{ + "type":"structure", + "required":[ + "behavior", + "globalClusterIdentifier", + "databaseClusterArns" + ], + "members":{ + "timeoutMinutes":{ + "shape":"DocumentDbConfigurationTimeoutMinutesInteger", + "documentation":"

The timeout value specified for the configuration.

" + }, + "crossAccountRole":{ + "shape":"IamRoleArn", + "documentation":"

The cross account role for the configuration.

" + }, + "externalId":{ + "shape":"String", + "documentation":"

The external ID (secret key) for the configuration.

" + }, + "behavior":{ + "shape":"DocumentDbDefaultBehavior", + "documentation":"

The behavior for a global cluster, that is, only allow switchover or also allow failover.

" + }, + "ungraceful":{ + "shape":"DocumentDbUngraceful", + "documentation":"

The settings for ungraceful execution.

" + }, + "globalClusterIdentifier":{ + "shape":"DocumentDbGlobalClusterIdentifier", + "documentation":"

The global cluster identifier for a DocumentDB global cluster.

" + }, + "databaseClusterArns":{ + "shape":"DocumentDbClusterArns", + "documentation":"

The database cluster Amazon Resource Names (ARNs) for a DocumentDB global cluster.

" + } + }, + "documentation":"

Configuration for Amazon DocumentDB global clusters used in a Region switch plan.

" + }, + "DocumentDbConfigurationTimeoutMinutesInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "DocumentDbDefaultBehavior":{ + "type":"string", + "enum":[ + "switchoverOnly", + "failover" + ] + }, + "DocumentDbGlobalClusterIdentifier":{ + "type":"string", + "pattern":"[A-Za-z][0-9A-Za-z-:._]*" + }, + "DocumentDbUngraceful":{ + "type":"structure", + "members":{ + "ungraceful":{ + "shape":"DocumentDbUngracefulBehavior", + "documentation":"

The settings for ungraceful execution.

" + } + }, + "documentation":"

Configuration for handling failures when performing operations on DocumentDB global clusters.

" + }, + "DocumentDbUngracefulBehavior":{ + "type":"string", + "enum":["failover"] + }, "Duration":{ "type":"string", "pattern":"P(?!$)(\\d+Y)?(\\d+M)?(\\d+D)?(T(?=\\d)(\\d+H)?(\\d+M)?(\\d+S)?)?" @@ -889,11 +994,11 @@ }, "EcsClusterArn":{ "type":"string", - "pattern":"arn:aws:ecs:[a-z0-9-]+:\\d{12}:cluster/[a-zA-Z0-9_-]{1,255}" + "pattern":"arn:aws[a-zA-Z-]*:ecs:[a-z0-9-]+:\\d{12}:cluster/[a-zA-Z0-9_-]{1,255}" }, "EcsServiceArn":{ "type":"string", - "pattern":"arn:aws:ecs:[a-z0-9-]+:\\d{12}:service/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]{1,255}" + "pattern":"arn:aws[a-zA-Z-]*:ecs:[a-z0-9-]+:\\d{12}:service/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]{1,255}" }, "EcsUngraceful":{ "type":"structure", @@ -1084,7 +1189,8 @@ "route53HealthCheckConfig":{ "shape":"Route53HealthCheckConfiguration", "documentation":"

The Amazon Route 53 health check configuration.

" - } + }, + "documentDbConfig":{"shape":"DocumentDbConfiguration"} }, "documentation":"

Execution block configurations for a workflow in a Region switch plan. An execution block represents a specific type of action to perform during a Region switch.

", "union":true @@ -1101,7 +1207,8 @@ "Parallel", "ECSServiceScaling", "EKSResourceScaling", - "Route53HealthCheck" + "Route53HealthCheck", + "DocumentDb" ] }, "ExecutionComment":{ @@ -1183,7 +1290,8 @@ "stepCanceled", "stepPendingApproval", "stepExecutionBehaviorChangedToUngraceful", - "stepPendingApplicationHealthMonitor" + "stepPendingApplicationHealthMonitor", + "planEvaluationWarning" ] }, "ExecutionId":{"type":"string"}, @@ -1210,10 +1318,52 @@ "completedMonitoringApplicationHealth" ] }, + "FailedReportErrorCode":{ + "type":"string", + "enum":[ + "insufficientPermissions", + "invalidResource", + "configurationError" + ] + }, + "FailedReportOutput":{ + "type":"structure", + "members":{ + "errorCode":{ + "shape":"FailedReportErrorCode", + "documentation":"

The error code for the failed report generation.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

The error message for the failed report generation.

" + } + }, + "documentation":"

Information about a report generation that failed.

" + }, "Float":{ "type":"float", "box":true }, + "GeneratedReport":{ + "type":"structure", + "members":{ + "reportGenerationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the report was generated.

" + }, + "reportOutput":{ + "shape":"ReportOutput", + "documentation":"

The output location or cause of a failure in report generation.

" + } + }, + "documentation":"

Information about a generated execution report.

" + }, + "GeneratedReportDetails":{ + "type":"list", + "member":{"shape":"GeneratedReport"}, + "max":1, + "min":0 + }, "GetPlanEvaluationStatusRequest":{ "type":"structure", "required":["planArn"], @@ -1359,6 +1509,10 @@ "shape":"Duration", "documentation":"

The actual recovery time that Region switch calculates for a plan execution. Actual recovery time includes the time for the plan to run added to the time elapsed until the application health alarms that you've specified are healthy again.

" }, + "generatedReportDetails":{ + "shape":"GeneratedReportDetails", + "documentation":"

Information about the location of a generated report, or the cause of its failure.

" + }, "nextToken":{ "shape":"String", "documentation":"

Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

" @@ -1754,6 +1908,51 @@ } } }, + "ListRoute53HealthChecksInRegionRequest":{ + "type":"structure", + "required":["arn"], + "members":{ + "arn":{ + "shape":"PlanArn", + "documentation":"

The Amazon Resource Name (ARN) of the Arc Region Switch Plan.

" + }, + "hostedZoneId":{ + "shape":"Route53HostedZoneId", + "documentation":"

The hosted zone ID for the health checks.

" + }, + "recordName":{ + "shape":"Route53RecordName", + "documentation":"

The record name for the health checks.

" + }, + "maxResults":{ + "shape":"ListRoute53HealthChecksInRegionRequestMaxResultsInteger", + "documentation":"

The number of objects that you want to return with this call.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

" + } + } + }, + "ListRoute53HealthChecksInRegionRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListRoute53HealthChecksInRegionResponse":{ + "type":"structure", + "members":{ + "healthChecks":{ + "shape":"Route53HealthCheckList", + "documentation":"

List of the health checks requested.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

Specifies that you want to receive the next page of results. Valid only if you received a nextToken response in the previous request. If you did, it indicates that more output is available. Set this parameter to the value provided by the previous call's nextToken response to request the next page of results.

" + } + } + }, "ListRoute53HealthChecksRequest":{ "type":"structure", "required":["arn"], @@ -1894,6 +2093,10 @@ "shape":"TriggerList", "documentation":"

The triggers for a plan.

" }, + "reportConfiguration":{ + "shape":"ReportConfiguration", + "documentation":"

The report configuration for a plan.

" + }, "name":{ "shape":"PlanName", "documentation":"

The name for a plan.

" @@ -2002,6 +2205,48 @@ "key":{"shape":"Region"}, "value":{"shape":"KubernetesScalingResource"} }, + "ReportConfiguration":{ + "type":"structure", + "members":{ + "reportOutput":{ + "shape":"ReportOutputList", + "documentation":"

The output configuration for the report.

" + } + }, + "documentation":"

Configuration for automatic report generation for plan executions. When configured, Region switch automatically generates a report after each plan execution that includes execution events, plan configuration, and CloudWatch alarm states.

" + }, + "ReportOutput":{ + "type":"structure", + "members":{ + "s3ReportOutput":{ + "shape":"S3ReportOutput", + "documentation":"

Information about a report delivered to Amazon S3.

" + }, + "failedReportOutput":{ + "shape":"FailedReportOutput", + "documentation":"

The details about a failed report generation.

" + } + }, + "documentation":"

The output location or cause of a failure in report generation.

", + "union":true + }, + "ReportOutputConfiguration":{ + "type":"structure", + "members":{ + "s3Configuration":{ + "shape":"S3ReportOutputConfiguration", + "documentation":"

Configuration for delivering reports to an Amazon S3 bucket.

" + } + }, + "documentation":"

Configuration for report output destinations used in a Region switch plan.

", + "union":true + }, + "ReportOutputList":{ + "type":"list", + "member":{"shape":"ReportOutputConfiguration"}, + "max":1, + "min":1 + }, "ResourceArn":{"type":"string"}, "ResourceNotFoundException":{ "type":"structure", @@ -2088,6 +2333,10 @@ "shape":"Route53HealthCheckId", "documentation":"

The Amazon Route 53 health check ID.

" }, + "status":{ + "shape":"Route53HealthCheckStatus", + "documentation":"

The Amazon Route 53 health check status.

" + }, "region":{ "shape":"Region", "documentation":"

The Amazon Route 53 Region.

" @@ -2143,6 +2392,14 @@ "type":"list", "member":{"shape":"Route53HealthCheck"} }, + "Route53HealthCheckStatus":{ + "type":"string", + "enum":[ + "healthy", + "unhealthy", + "unknown" + ] + }, "Route53HostedZoneId":{ "type":"string", "max":32, @@ -2184,6 +2441,36 @@ "Off" ] }, + "S3ReportOutput":{ + "type":"structure", + "members":{ + "s3ObjectKey":{ + "shape":"String", + "documentation":"

The S3 object key where the generated report is stored.

" + } + }, + "documentation":"

Information about a report delivered to Amazon S3.

" + }, + "S3ReportOutputConfiguration":{ + "type":"structure", + "members":{ + "bucketPath":{ + "shape":"S3ReportOutputConfigurationBucketPathString", + "documentation":"

The S3 bucket name and optional prefix where reports are stored. Format: bucket-name or bucket-name/prefix.

" + }, + "bucketOwner":{ + "shape":"AccountId", + "documentation":"

The Amazon Web Services account ID that owns the S3 bucket. Required to ensure the bucket is still owned by the same expected owner at generation time.

" + } + }, + "documentation":"

Configuration for delivering generated reports to an Amazon S3 bucket.

" + }, + "S3ReportOutputConfigurationBucketPathString":{ + "type":"string", + "max":512, + "min":3, + "pattern":"(?:s3://)?[a-z0-9][a-z0-9-]{1,61}[a-z0-9](?:/[^/ ][^/]*)*/?" + }, "Service":{ "type":"structure", "members":{ @@ -2589,6 +2876,10 @@ "triggers":{ "shape":"TriggerList", "documentation":"

The updated conditions that can automatically trigger the execution of the plan.

" + }, + "reportConfiguration":{ + "shape":"ReportConfiguration", + "documentation":"

The updated report configuration for the plan.

" } } }, diff --git a/awscli/botocore/data/artifact/2018-05-10/paginators-1.json b/awscli/botocore/data/artifact/2018-05-10/paginators-1.json index ba4271a9e5c0..ae2de6dee1d8 100644 --- a/awscli/botocore/data/artifact/2018-05-10/paginators-1.json +++ b/awscli/botocore/data/artifact/2018-05-10/paginators-1.json @@ -11,6 +11,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "customerAgreements" + }, + "ListReportVersions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "reports" } } } diff --git a/awscli/botocore/data/artifact/2018-05-10/service-2.json b/awscli/botocore/data/artifact/2018-05-10/service-2.json index f7b10da97124..7327dba275f9 100644 --- a/awscli/botocore/data/artifact/2018-05-10/service-2.json +++ b/awscli/botocore/data/artifact/2018-05-10/service-2.json @@ -31,7 +31,8 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Get the account settings for Artifact.

" + "documentation":"

Get the account settings for Artifact.

", + "readonly":true }, "GetReport":{ "name":"GetReport", @@ -51,7 +52,8 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Get the content for a single report.

" + "documentation":"

Get the content for a single report.

", + "readonly":true }, "GetReportMetadata":{ "name":"GetReportMetadata", @@ -70,7 +72,8 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Get the metadata for a single report.

" + "documentation":"

Get the metadata for a single report.

", + "readonly":true }, "GetTermForReport":{ "name":"GetTermForReport", @@ -90,7 +93,8 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Get the Term content associated with a single report.

" + "documentation":"

Get the Term content associated with a single report.

", + "readonly":true }, "ListCustomerAgreements":{ "name":"ListCustomerAgreements", @@ -107,7 +111,28 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

List active customer-agreements applicable to calling identity.

" + "documentation":"

List active customer-agreements applicable to calling identity.

", + "readonly":true + }, + "ListReportVersions":{ + "name":"ListReportVersions", + "http":{ + "method":"GET", + "requestUri":"/v1/report/listVersions", + "responseCode":200 + }, + "input":{"shape":"ListReportVersionsRequest"}, + "output":{"shape":"ListReportVersionsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

List available report versions for a given report.

", + "readonly":true }, "ListReports":{ "name":"ListReports", @@ -126,7 +151,8 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

List available reports.

" + "documentation":"

List available reports.

", + "readonly":true }, "PutAccountSettings":{ "name":"PutAccountSettings", @@ -296,8 +322,7 @@ }, "GetAccountSettingsRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "GetAccountSettingsResponse":{ "type":"structure", @@ -462,6 +487,44 @@ } } }, + "ListReportVersionsRequest":{ + "type":"structure", + "required":["reportId"], + "members":{ + "reportId":{ + "shape":"ReportId", + "documentation":"

Unique resource ID for the report resource.

", + "location":"querystring", + "locationName":"reportId" + }, + "maxResults":{ + "shape":"MaxResultsAttribute", + "documentation":"

Maximum number of resources to return in the paginated response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListReportVersionsResponse":{ + "type":"structure", + "required":["reports"], + "members":{ + "reports":{ + "shape":"ReportsList", + "documentation":"

List of report resources.

" + }, + "nextToken":{ + "shape":"NextTokenAttribute", + "documentation":"

Pagination token to request the next page of resources.

" + } + } + }, "ListReportsRequest":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/bcm-recommended-actions/2024-11-14/service-2.json b/awscli/botocore/data/bcm-recommended-actions/2024-11-14/service-2.json index 18310b7498f1..48636249423b 100644 --- a/awscli/botocore/data/bcm-recommended-actions/2024-11-14/service-2.json +++ b/awscli/botocore/data/bcm-recommended-actions/2024-11-14/service-2.json @@ -91,6 +91,8 @@ "REVIEW_EXPIRING_RI", "REVIEW_EXPIRING_SP", "REVIEW_FREETIER_USAGE_ALERTS", + "REVIEW_FREETIER_CREDITS_REMAINING", + "REVIEW_FREETIER_DAYS_REMAINING", "REVIEW_SAVINGS_OPPORTUNITY_RECOMMENDATIONS", "UPDATE_EXPIRED_PAYMENT_METHOD", "UPDATE_INVALID_PAYMENT_METHOD", diff --git a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/paginators-1.json b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/paginators-1.json index 7929e4f55b1e..745be8273355 100644 --- a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/paginators-1.json +++ b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/paginators-1.json @@ -65,6 +65,42 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "workloadIdentities" + }, + "ListEvaluators": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "evaluators" + }, + "ListOnlineEvaluationConfigs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "onlineEvaluationConfigs" + }, + "ListPolicies": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policies" + }, + "ListPolicyEngines": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyEngines" + }, + "ListPolicyGenerationAssets": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyGenerationAssets" + }, + "ListPolicyGenerations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "policyGenerations" } } } diff --git a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/service-2.json b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/service-2.json index 7c8052ebb3c4..b5bf2129d4c1 100644 --- a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/service-2.json +++ b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/service-2.json @@ -119,6 +119,25 @@ "documentation":"

Creates a custom code interpreter.

", "idempotent":true }, + "CreateEvaluator":{ + "name":"CreateEvaluator", + "http":{ + "method":"POST", + "requestUri":"/evaluators/create", + "responseCode":202 + }, + "input":{"shape":"CreateEvaluatorRequest"}, + "output":{"shape":"CreateEvaluatorResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a custom evaluator for agent quality assessment. Custom evaluators use LLM-as-a-Judge configurations with user-defined prompts, rating scales, and model settings to evaluate agent performance at tool call, trace, or session levels.

" + }, "CreateGateway":{ "name":"CreateGateway", "http":{ @@ -206,6 +225,64 @@ "documentation":"

Creates a new OAuth2 credential provider.

", "idempotent":true }, + "CreateOnlineEvaluationConfig":{ + "name":"CreateOnlineEvaluationConfig", + "http":{ + "method":"POST", + "requestUri":"/online-evaluation-configs/create", + "responseCode":202 + }, + "input":{"shape":"CreateOnlineEvaluationConfigRequest"}, + "output":{"shape":"CreateOnlineEvaluationConfigResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates an online evaluation configuration for continuous monitoring of agent performance. Online evaluation automatically samples live traffic from CloudWatch logs at specified rates and applies evaluators to assess agent quality in production.

" + }, + "CreatePolicy":{ + "name":"CreatePolicy", + "http":{ + "method":"POST", + "requestUri":"/policy-engines/{policyEngineId}/policies", + "responseCode":202 + }, + "input":{"shape":"CreatePolicyRequest"}, + "output":{"shape":"CreatePolicyResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a policy within the AgentCore Policy system. Policies provide real-time, deterministic control over agentic interactions with AgentCore Gateway. Using the Cedar policy language, you can define fine-grained policies that specify which interactions with Gateway tools are permitted based on input parameters and OAuth claims, ensuring agents operate within defined boundaries and business rules. The policy is validated during creation against the Cedar schema generated from the Gateway's tools' input schemas, which defines the available tools, their parameters, and expected data types. This is an asynchronous operation. Use the GetPolicy operation to poll the status field to track completion.

" + }, + "CreatePolicyEngine":{ + "name":"CreatePolicyEngine", + "http":{ + "method":"POST", + "requestUri":"/policy-engines", + "responseCode":202 + }, + "input":{"shape":"CreatePolicyEngineRequest"}, + "output":{"shape":"CreatePolicyEngineResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates a new policy engine within the AgentCore Policy system. A policy engine is a collection of policies that evaluates and authorizes agent tool calls. When associated with Gateways (each Gateway can be associated with at most one policy engine, but multiple Gateways can be associated with the same engine), the policy engine intercepts all agent requests and determines whether to allow or deny each action based on the defined policies. This is an asynchronous operation. Use the GetPolicyEngine operation to poll the status field to track completion.

" + }, "CreateWorkloadIdentity":{ "name":"CreateWorkloadIdentity", "http":{ @@ -326,6 +403,26 @@ "documentation":"

Deletes a custom code interpreter.

", "idempotent":true }, + "DeleteEvaluator":{ + "name":"DeleteEvaluator", + "http":{ + "method":"DELETE", + "requestUri":"/evaluators/{evaluatorId}", + "responseCode":202 + }, + "input":{"shape":"DeleteEvaluatorRequest"}, + "output":{"shape":"DeleteEvaluatorResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes a custom evaluator. Builtin evaluators cannot be deleted. The evaluator must not be referenced by any active online evaluation configurations.

", + "idempotent":true + }, "DeleteGateway":{ "name":"DeleteGateway", "http":{ @@ -379,6 +476,7 @@ {"shape":"ServiceException"}, {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottledException"} ], @@ -405,6 +503,85 @@ "documentation":"

Deletes an OAuth2 credential provider.

", "idempotent":true }, + "DeleteOnlineEvaluationConfig":{ + "name":"DeleteOnlineEvaluationConfig", + "http":{ + "method":"DELETE", + "requestUri":"/online-evaluation-configs/{onlineEvaluationConfigId}", + "responseCode":202 + }, + "input":{"shape":"DeleteOnlineEvaluationConfigRequest"}, + "output":{"shape":"DeleteOnlineEvaluationConfigResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an online evaluation configuration and stops any ongoing evaluation processes associated with it.

", + "idempotent":true + }, + "DeletePolicy":{ + "name":"DeletePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/policy-engines/{policyEngineId}/policies/{policyId}", + "responseCode":202 + }, + "input":{"shape":"DeletePolicyRequest"}, + "output":{"shape":"DeletePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an existing policy from the AgentCore Policy system. Once deleted, the policy can no longer be used for agent behavior control and all references to it become invalid. This is an asynchronous operation. Use the GetPolicy operation to poll the status field to track completion.

", + "idempotent":true + }, + "DeletePolicyEngine":{ + "name":"DeletePolicyEngine", + "http":{ + "method":"DELETE", + "requestUri":"/policy-engines/{policyEngineId}", + "responseCode":202 + }, + "input":{"shape":"DeletePolicyEngineRequest"}, + "output":{"shape":"DeletePolicyEngineResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an existing policy engine from the AgentCore Policy system. The policy engine must not have any associated policies before deletion. Once deleted, the policy engine and all its configurations become unavailable for policy management and evaluation. This is an asynchronous operation. Use the GetPolicyEngine operation to poll the status field to track completion.

", + "idempotent":true + }, + "DeleteResourcePolicy":{ + "name":"DeleteResourcePolicy", + "http":{ + "method":"DELETE", + "requestUri":"/resourcepolicy/{resourceArn}", + "responseCode":204 + }, + "input":{"shape":"DeleteResourcePolicyRequest"}, + "output":{"shape":"DeleteResourcePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes the resource-based policy for a specified resource.

This feature is currently available only for AgentCore Runtime and Gateway.

", + "idempotent":true + }, "DeleteWorkloadIdentity":{ "name":"DeleteWorkloadIdentity", "http":{ @@ -522,6 +699,25 @@ "documentation":"

Gets information about a custom code interpreter.

", "readonly":true }, + "GetEvaluator":{ + "name":"GetEvaluator", + "http":{ + "method":"GET", + "requestUri":"/evaluators/{evaluatorId}", + "responseCode":200 + }, + "input":{"shape":"GetEvaluatorRequest"}, + "output":{"shape":"GetEvaluatorResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves detailed information about an evaluator, including its configuration, status, and metadata. Works with both built-in and custom evaluators.

", + "readonly":true + }, "GetGateway":{ "name":"GetGateway", "http":{ @@ -600,6 +796,101 @@ "documentation":"

Retrieves information about an OAuth2 credential provider.

", "readonly":true }, + "GetOnlineEvaluationConfig":{ + "name":"GetOnlineEvaluationConfig", + "http":{ + "method":"GET", + "requestUri":"/online-evaluation-configs/{onlineEvaluationConfigId}", + "responseCode":200 + }, + "input":{"shape":"GetOnlineEvaluationConfigRequest"}, + "output":{"shape":"GetOnlineEvaluationConfigResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves detailed information about an online evaluation configuration, including its rules, data sources, evaluators, and execution status.

", + "readonly":true + }, + "GetPolicy":{ + "name":"GetPolicy", + "http":{ + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}/policies/{policyId}", + "responseCode":200 + }, + "input":{"shape":"GetPolicyRequest"}, + "output":{"shape":"GetPolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves detailed information about a specific policy within the AgentCore Policy system. This operation returns the complete policy definition, metadata, and current status, allowing administrators to review and manage policy configurations.

", + "readonly":true + }, + "GetPolicyEngine":{ + "name":"GetPolicyEngine", + "http":{ + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}", + "responseCode":200 + }, + "input":{"shape":"GetPolicyEngineRequest"}, + "output":{"shape":"GetPolicyEngineResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves detailed information about a specific policy engine within the AgentCore Policy system. This operation returns the complete policy engine configuration, metadata, and current status, allowing administrators to review and manage policy engine settings.

", + "readonly":true + }, + "GetPolicyGeneration":{ + "name":"GetPolicyGeneration", + "http":{ + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}/policy-generations/{policyGenerationId}", + "responseCode":200 + }, + "input":{"shape":"GetPolicyGenerationRequest"}, + "output":{"shape":"GetPolicyGenerationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves information about a policy generation request within the AgentCore Policy system. Policy generation converts natural language descriptions into Cedar policy statements using AI-powered translation, enabling non-technical users to create policies.

", + "readonly":true + }, + "GetResourcePolicy":{ + "name":"GetResourcePolicy", + "http":{ + "method":"GET", + "requestUri":"/resourcepolicy/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"GetResourcePolicyRequest"}, + "output":{"shape":"GetResourcePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves the resource-based policy for a specified resource.

This feature is currently available only for AgentCore Runtime and Gateway.

", + "readonly":true + }, "GetTokenVault":{ "name":"GetTokenVault", "http":{ @@ -751,6 +1042,24 @@ "documentation":"

Lists all custom code interpreters in your account.

", "readonly":true }, + "ListEvaluators":{ + "name":"ListEvaluators", + "http":{ + "method":"POST", + "requestUri":"/evaluators", + "responseCode":200 + }, + "input":{"shape":"ListEvaluatorsRequest"}, + "output":{"shape":"ListEvaluatorsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all available evaluators, including both builtin evaluators provided by the service and custom evaluators created by the user.

", + "readonly":true + }, "ListGatewayTargets":{ "name":"ListGatewayTargets", "http":{ @@ -826,74 +1135,206 @@ "documentation":"

Lists all OAuth2 credential providers in your account.

", "readonly":true }, - "ListTagsForResource":{ - "name":"ListTagsForResource", + "ListOnlineEvaluationConfigs":{ + "name":"ListOnlineEvaluationConfigs", "http":{ - "method":"GET", - "requestUri":"/tags/{resourceArn}", + "method":"POST", + "requestUri":"/online-evaluation-configs", "responseCode":200 }, - "input":{"shape":"ListTagsForResourceRequest"}, - "output":{"shape":"ListTagsForResourceResponse"}, + "input":{"shape":"ListOnlineEvaluationConfigsRequest"}, + "output":{"shape":"ListOnlineEvaluationConfigsResponse"}, "errors":[ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the tags associated with the specified resource.

This feature is currently available only for AgentCore Runtime, Browser, Code Interpreter tool, and Gateway.

", + "documentation":"

Lists all online evaluation configurations in the account, providing summary information about each configuration's status and settings.

", "readonly":true }, - "ListWorkloadIdentities":{ - "name":"ListWorkloadIdentities", + "ListPolicies":{ + "name":"ListPolicies", "http":{ - "method":"POST", - "requestUri":"/identities/ListWorkloadIdentities", + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}/policies", "responseCode":200 }, - "input":{"shape":"ListWorkloadIdentitiesRequest"}, - "output":{"shape":"ListWorkloadIdentitiesResponse"}, + "input":{"shape":"ListPoliciesRequest"}, + "output":{"shape":"ListPoliciesResponse"}, "errors":[ - {"shape":"UnauthorizedException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all workload identities in your account.

", + "documentation":"

Retrieves a list of policies within the AgentCore Policy engine. This operation supports pagination and filtering to help administrators manage and discover policies across policy engines. Results can be filtered by policy engine or resource associations.

", "readonly":true }, - "SetTokenVaultCMK":{ - "name":"SetTokenVaultCMK", + "ListPolicyEngines":{ + "name":"ListPolicyEngines", "http":{ - "method":"POST", - "requestUri":"/identities/set-token-vault-cmk", + "method":"GET", + "requestUri":"/policy-engines", "responseCode":200 }, - "input":{"shape":"SetTokenVaultCMKRequest"}, - "output":{"shape":"SetTokenVaultCMKResponse"}, + "input":{"shape":"ListPolicyEnginesRequest"}, + "output":{"shape":"ListPolicyEnginesResponse"}, "errors":[ - {"shape":"UnauthorizedException"}, - {"shape":"ConcurrentModificationException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Sets the customer master key (CMK) for a token vault.

" + "documentation":"

Retrieves a list of policy engines within the AgentCore Policy system. This operation supports pagination to help administrators discover and manage policy engines across their account. Each policy engine serves as a container for related policies.

", + "readonly":true }, - "SynchronizeGatewayTargets":{ - "name":"SynchronizeGatewayTargets", + "ListPolicyGenerationAssets":{ + "name":"ListPolicyGenerationAssets", "http":{ - "method":"PUT", - "requestUri":"/gateways/{gatewayIdentifier}/synchronizeTargets", - "responseCode":202 + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}/policy-generations/{policyGenerationId}/assets", + "responseCode":200 }, - "input":{"shape":"SynchronizeGatewayTargetsRequest"}, - "output":{"shape":"SynchronizeGatewayTargetsResponse"}, + "input":{"shape":"ListPolicyGenerationAssetsRequest"}, + "output":{"shape":"ListPolicyGenerationAssetsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves a list of generated policy assets from a policy generation request within the AgentCore Policy system. This operation returns the actual Cedar policies and related artifacts produced by the AI-powered policy generation process, allowing users to review and select from multiple generated policy options.

", + "readonly":true + }, + "ListPolicyGenerations":{ + "name":"ListPolicyGenerations", + "http":{ + "method":"GET", + "requestUri":"/policy-engines/{policyEngineId}/policy-generations", + "responseCode":200 + }, + "input":{"shape":"ListPolicyGenerationsRequest"}, + "output":{"shape":"ListPolicyGenerationsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves a list of policy generation requests within the AgentCore Policy system. This operation supports pagination and filtering to help track and manage AI-powered policy generation operations.

", + "readonly":true + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists the tags associated with the specified resource.

This feature is currently available only for AgentCore Runtime, Browser, Code Interpreter tool, and Gateway.

", + "readonly":true + }, + "ListWorkloadIdentities":{ + "name":"ListWorkloadIdentities", + "http":{ + "method":"POST", + "requestUri":"/identities/ListWorkloadIdentities", + "responseCode":200 + }, + "input":{"shape":"ListWorkloadIdentitiesRequest"}, + "output":{"shape":"ListWorkloadIdentitiesResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all workload identities in your account.

", + "readonly":true + }, + "PutResourcePolicy":{ + "name":"PutResourcePolicy", + "http":{ + "method":"PUT", + "requestUri":"/resourcepolicy/{resourceArn}", + "responseCode":201 + }, + "input":{"shape":"PutResourcePolicyRequest"}, + "output":{"shape":"PutResourcePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Creates or updates a resource-based policy for a resource with the specified resourceArn.

This feature is currently available only for AgentCore Runtime and Gateway.

", + "idempotent":true + }, + "SetTokenVaultCMK":{ + "name":"SetTokenVaultCMK", + "http":{ + "method":"POST", + "requestUri":"/identities/set-token-vault-cmk", + "responseCode":200 + }, + "input":{"shape":"SetTokenVaultCMKRequest"}, + "output":{"shape":"SetTokenVaultCMKResponse"}, + "errors":[ + {"shape":"UnauthorizedException"}, + {"shape":"ConcurrentModificationException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Sets the customer master key (CMK) for a token vault.

" + }, + "StartPolicyGeneration":{ + "name":"StartPolicyGeneration", + "http":{ + "method":"POST", + "requestUri":"/policy-engines/{policyEngineId}/policy-generations", + "responseCode":202 + }, + "input":{"shape":"StartPolicyGenerationRequest"}, + "output":{"shape":"StartPolicyGenerationResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Initiates the AI-powered generation of Cedar policies from natural language descriptions within the AgentCore Policy system. This feature enables both technical and non-technical users to create policies by describing their authorization requirements in plain English, which is then automatically translated into formal Cedar policy statements. The generation process analyzes the natural language input along with the Gateway's tool context to produce validated policy options. Generated policy assets are automatically deleted after 7 days, so you should review and create policies from the generated assets within this timeframe. Once created, policies are permanent and not subject to this expiration. Generated policies should be reviewed and tested in log-only mode before deploying to production. Use this when you want to describe policy intent naturally rather than learning Cedar syntax, though generated policies may require refinement for complex scenarios.

" + }, + "SynchronizeGatewayTargets":{ + "name":"SynchronizeGatewayTargets", + "http":{ + "method":"PUT", + "requestUri":"/gateways/{gatewayIdentifier}/synchronizeTargets", + "responseCode":202 + }, + "input":{"shape":"SynchronizeGatewayTargetsRequest"}, + "output":{"shape":"SynchronizeGatewayTargetsResponse"}, "errors":[ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"}, @@ -1010,6 +1451,27 @@ "documentation":"

Updates an existing API key credential provider.

", "idempotent":true }, + "UpdateEvaluator":{ + "name":"UpdateEvaluator", + "http":{ + "method":"PUT", + "requestUri":"/evaluators/{evaluatorId}", + "responseCode":202 + }, + "input":{"shape":"UpdateEvaluatorRequest"}, + "output":{"shape":"UpdateEvaluatorResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates a custom evaluator's configuration, description, or evaluation level. Built-in evaluators cannot be updated. The evaluator must not be locked for modification.

", + "idempotent":true + }, "UpdateGateway":{ "name":"UpdateGateway", "http":{ @@ -1096,6 +1558,67 @@ ], "documentation":"

Updates an existing OAuth2 credential provider.

" }, + "UpdateOnlineEvaluationConfig":{ + "name":"UpdateOnlineEvaluationConfig", + "http":{ + "method":"PUT", + "requestUri":"/online-evaluation-configs/{onlineEvaluationConfigId}", + "responseCode":202 + }, + "input":{"shape":"UpdateOnlineEvaluationConfigRequest"}, + "output":{"shape":"UpdateOnlineEvaluationConfigResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an online evaluation configuration's settings, including rules, data sources, evaluators, and execution status. Changes take effect immediately for ongoing evaluations.

", + "idempotent":true + }, + "UpdatePolicy":{ + "name":"UpdatePolicy", + "http":{ + "method":"PUT", + "requestUri":"/policy-engines/{policyEngineId}/policies/{policyId}", + "responseCode":202 + }, + "input":{"shape":"UpdatePolicyRequest"}, + "output":{"shape":"UpdatePolicyResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an existing policy within the AgentCore Policy system. This operation allows modification of the policy description and definition while maintaining the policy's identity. The updated policy is validated against the Cedar schema before being applied. This is an asynchronous operation. Use the GetPolicy operation to poll the status field to track completion.

", + "idempotent":true + }, + "UpdatePolicyEngine":{ + "name":"UpdatePolicyEngine", + "http":{ + "method":"PUT", + "requestUri":"/policy-engines/{policyEngineId}", + "responseCode":202 + }, + "input":{"shape":"UpdatePolicyEngineRequest"}, + "output":{"shape":"UpdatePolicyEngineResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Updates an existing policy engine within the AgentCore Policy system. This operation allows modification of the policy engine description while maintaining its identity. This is an asynchronous operation. Use the GetPolicyEngine operation to poll the status field to track completion.

", + "idempotent":true + }, "UpdateWorkloadIdentity":{ "name":"UpdateWorkloadIdentity", "http":{ @@ -1130,6 +1653,11 @@ }, "exception":true }, + "AdditionalModelRequestFields":{ + "type":"structure", + "members":{}, + "document":true + }, "AgentEndpointDescription":{ "type":"string", "max":256, @@ -1325,6 +1853,126 @@ "member":{"shape":"AllowedClient"}, "min":1 }, + "AllowedQueryParameters":{ + "type":"list", + "member":{"shape":"HttpQueryParameterName"}, + "max":10, + "min":1 + }, + "AllowedRequestHeaders":{ + "type":"list", + "member":{"shape":"HttpHeaderName"}, + "max":10, + "min":1 + }, + "AllowedResponseHeaders":{ + "type":"list", + "member":{"shape":"HttpHeaderName"}, + "max":10, + "min":1 + }, + "AllowedScopeType":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[\\x21\\x23-\\x5B\\x5D-\\x7E]+" + }, + "AllowedScopesType":{ + "type":"list", + "member":{"shape":"AllowedScopeType"}, + "min":1 + }, + "ApiGatewayTargetConfiguration":{ + "type":"structure", + "required":[ + "restApiId", + "stage", + "apiGatewayToolConfiguration" + ], + "members":{ + "restApiId":{ + "shape":"String", + "documentation":"

The ID of the API Gateway REST API.

" + }, + "stage":{ + "shape":"String", + "documentation":"

The ID of the stage of the REST API to add as a target.

" + }, + "apiGatewayToolConfiguration":{ + "shape":"ApiGatewayToolConfiguration", + "documentation":"

The configuration for defining REST API tool filters and overrides for the gateway target.

" + } + }, + "documentation":"

The configuration for an Amazon API Gateway target.

" + }, + "ApiGatewayToolConfiguration":{ + "type":"structure", + "required":["toolFilters"], + "members":{ + "toolOverrides":{ + "shape":"ApiGatewayToolOverrides", + "documentation":"

A list of explicit tool definitions with optional custom names and descriptions.

" + }, + "toolFilters":{ + "shape":"ApiGatewayToolFilters", + "documentation":"

A list of path and method patterns to expose as tools using metadata from the REST API's OpenAPI specification.

" + } + }, + "documentation":"

The configuration for defining REST API tool filters and overrides for the gateway target.

" + }, + "ApiGatewayToolFilter":{ + "type":"structure", + "required":[ + "filterPath", + "methods" + ], + "members":{ + "filterPath":{ + "shape":"String", + "documentation":"

Resource path to match in the REST API. Supports exact paths (for example, /pets) or wildcard paths (for example, /pets/* to match all paths under /pets). Must match existing paths in the REST API.

" + }, + "methods":{ + "shape":"RestApiMethods", + "documentation":"

The methods to filter for.

" + } + }, + "documentation":"

Specifies which operations from an API Gateway REST API are exposed as tools. Tool names and descriptions are derived from the operationId and description fields in the API's exported OpenAPI specification.

" + }, + "ApiGatewayToolFilters":{ + "type":"list", + "member":{"shape":"ApiGatewayToolFilter"} + }, + "ApiGatewayToolOverride":{ + "type":"structure", + "required":[ + "name", + "path", + "method" + ], + "members":{ + "name":{ + "shape":"String", + "documentation":"

The name of tool. Identifies the tool in the Model Context Protocol.

" + }, + "description":{ + "shape":"String", + "documentation":"

The description of the tool. Provides information about the purpose and usage of the tool. If not provided, uses the description from the API's OpenAPI specification.

" + }, + "path":{ + "shape":"String", + "documentation":"

Resource path in the REST API (e.g., /pets). Must explicitly match an existing path in the REST API.

" + }, + "method":{ + "shape":"RestApiMethod", + "documentation":"

The HTTP method to expose for the specified path.

" + } + }, + "documentation":"

Settings to override configurations for a tool.

" + }, + "ApiGatewayToolOverrides":{ + "type":"list", + "member":{"shape":"ApiGatewayToolOverride"} + }, "ApiKeyCredentialLocation":{ "type":"string", "enum":[ @@ -1477,17 +2125,59 @@ "NONE" ] }, + "AuthorizingClaimMatchValueType":{ + "type":"structure", + "required":[ + "claimMatchValue", + "claimMatchOperator" + ], + "members":{ + "claimMatchValue":{ + "shape":"ClaimMatchValueType", + "documentation":"

The value or values to match for.

" + }, + "claimMatchOperator":{ + "shape":"ClaimMatchOperatorType", + "documentation":"

Defines the relationship between the claim field value and the value or values you're matching for.

" + } + }, + "documentation":"

Defines the value or values to match for and the relationship of the match.

" + }, "AwsAccountId":{ "type":"string", "pattern":"[0-9]{12}" }, - "Boolean":{ - "type":"boolean", - "box":true - }, - "BrowserArn":{ + "BedrockAgentcoreResourceArn":{ "type":"string", - "pattern":"arn:(-[^:]+)?:bedrock-agentcore:[a-z0-9-]+:(aws|[0-9]{12}):browser(-custom)?/(aws\\.browser\\.v1|[a-zA-Z][a-zA-Z0-9_]{0,47}-[a-zA-Z0-9]{10})" + "max":1011, + "min":20 + }, + "BedrockEvaluatorModelConfig":{ + "type":"structure", + "required":["modelId"], + "members":{ + "modelId":{ + "shape":"ModelId", + "documentation":"

The identifier of the Amazon Bedrock model to use for evaluation. Must be a supported foundation model available in your region.

" + }, + "inferenceConfig":{ + "shape":"InferenceConfiguration", + "documentation":"

The inference configuration parameters that control model behavior during evaluation, including temperature, token limits, and sampling settings.

" + }, + "additionalModelRequestFields":{ + "shape":"AdditionalModelRequestFields", + "documentation":"

Additional model-specific request fields to customize model behavior beyond the standard inference configuration.

" + } + }, + "documentation":"

The configuration for using Amazon Bedrock models in evaluator assessments, including model selection and inference parameters.

" + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "BrowserArn":{ + "type":"string", + "pattern":"arn:(-[^:]+)?:bedrock-agentcore:[a-z0-9-]+:(aws|[0-9]{12}):browser(-custom)?/(aws\\.browser\\.v1|[a-zA-Z][a-zA-Z0-9_]{0,47}-[a-zA-Z0-9]{10})" }, "BrowserId":{ "type":"string", @@ -1589,6 +2279,67 @@ }, "documentation":"

Contains summary information about a browser. A browser enables Amazon Bedrock Agent to interact with web content.

" }, + "CategoricalScaleDefinition":{ + "type":"structure", + "required":[ + "definition", + "label" + ], + "members":{ + "definition":{ + "shape":"String", + "documentation":"

The description that explains what this categorical rating represents and when it should be used.

" + }, + "label":{ + "shape":"CategoricalScaleDefinitionLabelString", + "documentation":"

The label or name of this categorical rating option.

" + } + }, + "documentation":"

The definition of a categorical rating scale option that provides a named category with its description for evaluation scoring.

" + }, + "CategoricalScaleDefinitionLabelString":{ + "type":"string", + "max":100, + "min":1 + }, + "CategoricalScaleDefinitions":{ + "type":"list", + "member":{"shape":"CategoricalScaleDefinition"} + }, + "CedarPolicy":{ + "type":"structure", + "required":["statement"], + "members":{ + "statement":{ + "shape":"Statement", + "documentation":"

The Cedar policy statement that defines the authorization logic. This statement follows Cedar syntax and specifies principals, actions, resources, and conditions that determine when access should be allowed or denied.

" + } + }, + "documentation":"

Represents a Cedar policy statement within the AgentCore Policy system. Cedar is a policy language designed for authorization that provides human-readable, analyzable, and high-performance policy evaluation for controlling agent behavior and access decisions.

" + }, + "ClaimMatchOperatorType":{ + "type":"string", + "enum":[ + "EQUALS", + "CONTAINS", + "CONTAINS_ANY" + ] + }, + "ClaimMatchValueType":{ + "type":"structure", + "members":{ + "matchValueString":{ + "shape":"MatchValueString", + "documentation":"

The string value to match for.

" + }, + "matchValueStringList":{ + "shape":"MatchValueStringList", + "documentation":"

An array of strings to check for a match.

" + } + }, + "documentation":"

The value or values to match for.

", + "union":true + }, "ClientIdType":{ "type":"string", "max":256, @@ -1606,6 +2357,47 @@ "min":33, "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}" }, + "CloudWatchLogsInputConfig":{ + "type":"structure", + "required":[ + "logGroupNames", + "serviceNames" + ], + "members":{ + "logGroupNames":{ + "shape":"CloudWatchLogsInputConfigLogGroupNamesList", + "documentation":"

The list of CloudWatch log group names to monitor for agent traces.

" + }, + "serviceNames":{ + "shape":"CloudWatchLogsInputConfigServiceNamesList", + "documentation":"

The list of service names to filter traces within the specified log groups. Used to identify relevant agent sessions.

" + } + }, + "documentation":"

The configuration for reading agent traces from CloudWatch logs as input for online evaluation.

" + }, + "CloudWatchLogsInputConfigLogGroupNamesList":{ + "type":"list", + "member":{"shape":"LogGroupName"}, + "max":5, + "min":1 + }, + "CloudWatchLogsInputConfigServiceNamesList":{ + "type":"list", + "member":{"shape":"ServiceName"}, + "max":1, + "min":1 + }, + "CloudWatchOutputConfig":{ + "type":"structure", + "required":["logGroupName"], + "members":{ + "logGroupName":{ + "shape":"LogGroupName", + "documentation":"

The name of the CloudWatch log group where evaluation results will be written. The log group will be created if it doesn't exist.

" + } + }, + "documentation":"

The configuration for writing evaluation results to CloudWatch logs with embedded metric format (EMF) for monitoring.

" + }, "Code":{ "type":"structure", "members":{ @@ -1776,6 +2568,17 @@ }, "documentation":"

Representation of a container configuration.

" }, + "Content":{ + "type":"structure", + "members":{ + "rawText":{ + "shape":"NaturalLanguage", + "documentation":"

The raw text content containing natural language descriptions of desired policy behavior. This text is processed by AI to generate corresponding Cedar policy statements that match the described intent.

" + } + }, + "documentation":"

Represents content input for policy generation operations. This structure encapsulates the natural language descriptions or other content formats that are used as input for AI-powered policy generation.

", + "union":true + }, "CreateAgentRuntimeEndpointRequest":{ "type":"structure", "required":[ @@ -2118,6 +2921,64 @@ } } }, + "CreateEvaluatorRequest":{ + "type":"structure", + "required":[ + "evaluatorName", + "evaluatorConfig", + "level" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If you don't specify this field, a value is randomly generated for you. If this token matches a previous request, the service ignores the request, but doesn't return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "evaluatorName":{ + "shape":"CustomEvaluatorName", + "documentation":"

The name of the evaluator. Must be unique within your account.

" + }, + "description":{ + "shape":"EvaluatorDescription", + "documentation":"

The description of the evaluator that explains its purpose and evaluation criteria.

" + }, + "evaluatorConfig":{ + "shape":"EvaluatorConfig", + "documentation":"

The configuration for the evaluator, including LLM-as-a-Judge settings with instructions, rating scale, and model configuration.

" + }, + "level":{ + "shape":"EvaluatorLevel", + "documentation":"

The evaluation level that determines the scope of evaluation. Valid values are TOOL_CALL for individual tool invocations, TRACE for single request-response interactions, or SESSION for entire conversation sessions.

" + } + } + }, + "CreateEvaluatorResponse":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "createdAt", + "status" + ], + "members":{ + "evaluatorArn":{ + "shape":"CustomEvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the created evaluator.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the created evaluator.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was created.

" + }, + "status":{ + "shape":"EvaluatorStatus", + "documentation":"

The status of the evaluator creation operation.

" + } + } + }, "CreateGatewayRequest":{ "type":"structure", "required":[ @@ -2168,6 +3029,10 @@ "shape":"GatewayInterceptorConfigurations", "documentation":"

A list of configuration settings for a gateway interceptor. Gateway interceptors allow custom code to be invoked during gateway invocations.

" }, + "policyEngineConfiguration":{ + "shape":"GatewayPolicyEngineConfiguration", + "documentation":"

The policy engine configuration for the gateway. A policy engine is a collection of policies that evaluates and authorizes agent tool calls. When associated with a gateway, the policy engine intercepts all agent requests and determines whether to allow or deny each action based on the defined policies.

" + }, "exceptionLevel":{ "shape":"ExceptionLevel", "documentation":"

The level of detail in error messages returned when invoking the gateway.

" @@ -2255,6 +3120,10 @@ "shape":"GatewayInterceptorConfigurations", "documentation":"

The list of interceptor configurations for the created gateway.

" }, + "policyEngineConfiguration":{ + "shape":"GatewayPolicyEngineConfiguration", + "documentation":"

The policy engine configuration for the created gateway.

" + }, "workloadIdentityDetails":{ "shape":"WorkloadIdentityDetails", "documentation":"

The workload identity details for the created gateway.

" @@ -2299,6 +3168,10 @@ "credentialProviderConfigurations":{ "shape":"CredentialProviderConfigurations", "documentation":"

The credential provider configurations for the target. These configurations specify how the gateway authenticates with the target endpoint.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

Optional configuration for HTTP header and query parameter propagation to and from the gateway target.

" } } }, @@ -2358,6 +3231,10 @@ "lastSynchronizedAt":{ "shape":"DateTimestamp", "documentation":"

The last synchronization of the target.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

The metadata configuration that was applied to the created gateway target.

" } } }, @@ -2412,7 +3289,7 @@ "type":"integer", "box":true, "max":365, - "min":7 + "min":3 }, "CreateMemoryOutput":{ "type":"structure", @@ -2476,6 +3353,247 @@ "oauth2ProviderConfigOutput":{"shape":"Oauth2ProviderConfigOutput"} } }, + "CreateOnlineEvaluationConfigRequest":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigName", + "rule", + "dataSourceConfig", + "evaluators", + "evaluationExecutionRoleArn", + "enableOnCreate" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If you don't specify this field, a value is randomly generated for you. If this token matches a previous request, the service ignores the request, but doesn't return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "onlineEvaluationConfigName":{ + "shape":"EvaluationConfigName", + "documentation":"

The name of the online evaluation configuration. Must be unique within your account.

" + }, + "description":{ + "shape":"EvaluationConfigDescription", + "documentation":"

The description of the online evaluation configuration that explains its monitoring purpose and scope.

" + }, + "rule":{ + "shape":"Rule", + "documentation":"

The evaluation rule that defines sampling configuration, filters, and session detection settings for the online evaluation.

" + }, + "dataSourceConfig":{ + "shape":"DataSourceConfig", + "documentation":"

The data source configuration that specifies CloudWatch log groups and service names to monitor for agent traces.

" + }, + "evaluators":{ + "shape":"EvaluatorList", + "documentation":"

The list of evaluators to apply during online evaluation. Can include both built-in evaluators and custom evaluators created with CreateEvaluator.

" + }, + "evaluationExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants permissions to read from CloudWatch logs, write evaluation results, and invoke Amazon Bedrock models for evaluation.

" + }, + "enableOnCreate":{ + "shape":"Boolean", + "documentation":"

Whether to enable the online evaluation configuration immediately upon creation. If true, evaluation begins automatically.

" + } + } + }, + "CreateOnlineEvaluationConfigResponse":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigArn", + "onlineEvaluationConfigId", + "createdAt", + "status", + "executionStatus" + ], + "members":{ + "onlineEvaluationConfigArn":{ + "shape":"OnlineEvaluationConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the created online evaluation configuration.

" + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the created online evaluation configuration.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was created.

" + }, + "outputConfig":{"shape":"OutputConfig"}, + "status":{ + "shape":"OnlineEvaluationConfigStatus", + "documentation":"

The status of the online evaluation configuration.

" + }, + "executionStatus":{ + "shape":"OnlineEvaluationExecutionStatus", + "documentation":"

The execution status indicating whether the online evaluation is currently running.

" + }, + "failureReason":{ + "shape":"String", + "documentation":"

The reason for failure if the online evaluation configuration creation or execution failed.

" + } + } + }, + "CreatePolicyEngineRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The customer-assigned immutable name for the policy engine. This name identifies the policy engine and cannot be changed after creation.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A human-readable description of the policy engine's purpose and scope (1-4,096 characters). This helps administrators understand the policy engine's role in the overall governance strategy. Document which Gateway this engine will be associated with, what types of tools or workflows it governs, and the team or service responsible for maintaining it. Clear descriptions are essential when managing multiple policy engines across different services or environments.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If you retry a request with the same client token, the service returns the same response without creating a duplicate policy engine.

", + "idempotencyToken":true + } + } + }, + "CreatePolicyEngineResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "name", + "createdAt", + "updatedAt", + "policyEngineArn", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the created policy engine. This system-generated identifier consists of the user name plus a 10-character generated suffix and is used for all subsequent policy engine operations.

" + }, + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The customer-assigned name of the created policy engine. This matches the name provided in the request and serves as the human-readable identifier.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A human-readable description of the policy engine's purpose.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was created. This is automatically set by the service and used for auditing and lifecycle management.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was last updated. For newly created policy engines, this matches the createdAt timestamp.

" + }, + "policyEngineArn":{ + "shape":"PolicyEngineArn", + "documentation":"

The Amazon Resource Name (ARN) of the created policy engine. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyEngineStatus", + "documentation":"

The current status of the policy engine. A status of ACTIVE indicates the policy engine is ready for use.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy engine status. This provides details about any failures or the current state of the policy engine creation process.

" + } + } + }, + "CreatePolicyRequest":{ + "type":"structure", + "required":[ + "name", + "definition", + "policyEngineId" + ], + "members":{ + "name":{ + "shape":"PolicyName", + "documentation":"

The customer-assigned immutable name for the policy. Must be unique within the account. This name is used for policy identification and cannot be changed after creation.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The Cedar policy statement that defines the access control rules. This contains the actual policy logic written in Cedar policy language, specifying effect (permit or forbid), principals, actions, resources, and conditions for agent behavior control.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A human-readable description of the policy's purpose and functionality (1-4,096 characters). This helps policy administrators understand the policy's intent, business rules, and operational scope. Use this field to document why the policy exists, what business requirement it addresses, and any special considerations for maintenance. Clear descriptions are essential for policy governance, auditing, and troubleshooting.

" + }, + "validationMode":{ + "shape":"PolicyValidationMode", + "documentation":"

The validation mode for the policy creation. Determines how Cedar analyzer validation results are handled during policy creation. FAIL_ON_ANY_FINDINGS (default) runs the Cedar analyzer to validate the policy against the Cedar schema and tool context, failing creation if the analyzer detects any validation issues to ensure strict conformance. IGNORE_ALL_FINDINGS runs the Cedar analyzer but allows policy creation even if validation issues are detected, useful for testing or when the policy schema is evolving. Use FAIL_ON_ANY_FINDINGS for production policies to ensure correctness, and IGNORE_ALL_FINDINGS only when you understand and accept the analyzer findings.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine which contains this policy. Policy engines group related policies and provide the execution context for policy evaluation.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure the idempotency of the request. The AWS SDK automatically generates this token, so you don't need to provide it in most cases. If you retry a request with the same client token, the service returns the same response without creating a duplicate policy.

", + "idempotencyToken":true + } + } + }, + "CreatePolicyResponse":{ + "type":"structure", + "required":[ + "policyId", + "name", + "policyEngineId", + "definition", + "createdAt", + "updatedAt", + "policyArn", + "status", + "statusReasons" + ], + "members":{ + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the created policy. This is a system-generated identifier consisting of the user name plus a 10-character generated suffix, used for all subsequent policy operations.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The customer-assigned name of the created policy. This matches the name provided in the request and serves as the human-readable identifier for the policy.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages this policy. This confirms the policy engine assignment and is used for policy evaluation routing.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The Cedar policy statement that was created. This is the validated policy definition that will be used for agent behavior control and access decisions.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The human-readable description of the policy's purpose and functionality. This helps administrators understand and manage the policy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was created. This is automatically set by the service and used for auditing and lifecycle management.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was last updated. For newly created policies, this matches the createdAt timestamp.

" + }, + "policyArn":{ + "shape":"PolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the created policy. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyStatus", + "documentation":"

The current status of the policy. A status of ACTIVE indicates the policy is ready for use.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy status. This provides details about any failures or the current state of the policy creation process.

" + } + } + }, "CreateWorkloadIdentityRequest":{ "type":"structure", "required":["name"], @@ -2599,6 +3717,34 @@ "CognitoOauth2" ] }, + "CustomClaimValidationType":{ + "type":"structure", + "required":[ + "inboundTokenClaimName", + "inboundTokenClaimValueType", + "authorizingClaimMatchValue" + ], + "members":{ + "inboundTokenClaimName":{ + "shape":"InboundTokenClaimNameType", + "documentation":"

The name of the custom claim field to check.

" + }, + "inboundTokenClaimValueType":{ + "shape":"InboundTokenClaimValueType", + "documentation":"

The data type of the claim value to check for.

" + }, + "authorizingClaimMatchValue":{ + "shape":"AuthorizingClaimMatchValueType", + "documentation":"

Defines the value or values to match for and the relationship of the match.

" + } + }, + "documentation":"

Defines the name of a custom claim field and rules for finding matches to authenticate its value.

" + }, + "CustomClaimValidationsType":{ + "type":"list", + "member":{"shape":"CustomClaimValidationType"}, + "min":1 + }, "CustomConfigurationInput":{ "type":"structure", "members":{ @@ -2614,6 +3760,10 @@ "shape":"UserPreferenceOverrideConfigurationInput", "documentation":"

The user preference override configuration for a custom memory strategy.

" }, + "episodicOverride":{ + "shape":"EpisodicOverrideConfigurationInput", + "documentation":"

The episodic memory strategy override configuration for a custom memory strategy.

" + }, "selfManagedConfiguration":{ "shape":"SelfManagedConfigurationInput", "documentation":"

The self managed configuration for a custom memory strategy.

" @@ -2636,6 +3786,10 @@ "userPreferenceConsolidationOverride":{ "shape":"UserPreferenceConsolidationOverride", "documentation":"

The user preference consolidation override configuration.

" + }, + "episodicConsolidationOverride":{ + "shape":"EpisodicConsolidationOverride", + "documentation":"

The configurations to override the default consolidation step for the episodic memory strategy.

" } }, "documentation":"

Contains custom consolidation configuration information.

", @@ -2655,11 +3809,23 @@ "userPreferenceConsolidationOverride":{ "shape":"UserPreferenceOverrideConsolidationConfigurationInput", "documentation":"

The user preference consolidation override configuration input.

" + }, + "episodicConsolidationOverride":{ + "shape":"EpisodicOverrideConsolidationConfigurationInput", + "documentation":"

Configurations to override the consolidation step of the episodic strategy.

" } }, "documentation":"

Input for a custom consolidation configuration.

", "union":true }, + "CustomEvaluatorArn":{ + "type":"string", + "pattern":"arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:evaluator\\/[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}" + }, + "CustomEvaluatorName":{ + "type":"string", + "pattern":"[a-zA-Z][a-zA-Z0-9_]{0,47}" + }, "CustomExtractionConfiguration":{ "type":"structure", "members":{ @@ -2670,6 +3836,10 @@ "userPreferenceExtractionOverride":{ "shape":"UserPreferenceExtractionOverride", "documentation":"

The user preference extraction override configuration.

" + }, + "episodicExtractionOverride":{ + "shape":"EpisodicExtractionOverride", + "documentation":"

The configurations to override the default extraction step for the episodic memory strategy.

" } }, "documentation":"

Contains custom extraction configuration information.

", @@ -2685,6 +3855,10 @@ "userPreferenceExtractionOverride":{ "shape":"UserPreferenceOverrideExtractionConfigurationInput", "documentation":"

The user preference extraction override configuration input.

" + }, + "episodicExtractionOverride":{ + "shape":"EpisodicOverrideExtractionConfigurationInput", + "documentation":"

Configurations to override the extraction step of the episodic strategy.

" } }, "documentation":"

Input for a custom extraction configuration.

", @@ -2705,6 +3879,14 @@ "allowedClients":{ "shape":"AllowedClientsList", "documentation":"

Represents individual client IDs that are validated in the incoming JWT token validation process.

" + }, + "allowedScopes":{ + "shape":"AllowedScopesType", + "documentation":"

An array of scopes that are allowed to access the token.

" + }, + "customClaims":{ + "shape":"CustomClaimValidationsType", + "documentation":"

An array of objects that define a custom claim validation name, value, and operation

" } }, "documentation":"

Configuration for inbound JWT-based authorization, specifying how incoming requests should be authenticated.

" @@ -2770,6 +3952,39 @@ }, "documentation":"

Output configuration for a custom OAuth2 provider.

" }, + "CustomReflectionConfiguration":{ + "type":"structure", + "members":{ + "episodicReflectionOverride":{ + "shape":"EpisodicReflectionOverride", + "documentation":"

The configuration for a reflection strategy to override the default one.

" + } + }, + "documentation":"

Contains configurations for a custom reflection strategy.

", + "union":true + }, + "CustomReflectionConfigurationInput":{ + "type":"structure", + "members":{ + "episodicReflectionOverride":{ + "shape":"EpisodicOverrideReflectionConfigurationInput", + "documentation":"

The reflection override configuration input.

" + } + }, + "documentation":"

Input for a custom reflection configuration.

", + "union":true + }, + "DataSourceConfig":{ + "type":"structure", + "members":{ + "cloudWatchLogs":{ + "shape":"CloudWatchLogsInputConfig", + "documentation":"

The CloudWatch logs configuration for reading agent traces from log groups.

" + } + }, + "documentation":"

The configuration that specifies where to read agent traces for online evaluation.

", + "union":true + }, "DateTimestamp":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -2962,6 +4177,40 @@ } } }, + "DeleteEvaluatorRequest":{ + "type":"structure", + "required":["evaluatorId"], + "members":{ + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator to delete.

", + "location":"uri", + "locationName":"evaluatorId" + } + } + }, + "DeleteEvaluatorResponse":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "status" + ], + "members":{ + "evaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the deleted evaluator.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the deleted evaluator.

" + }, + "status":{ + "shape":"EvaluatorStatus", + "documentation":"

The status of the evaluator deletion operation.

" + } + } + }, "DeleteGatewayRequest":{ "type":"structure", "required":["gatewayIdentifier"], @@ -3109,6 +4358,188 @@ "type":"structure", "members":{} }, + "DeleteOnlineEvaluationConfigRequest":{ + "type":"structure", + "required":["onlineEvaluationConfigId"], + "members":{ + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the online evaluation configuration to delete.

", + "location":"uri", + "locationName":"onlineEvaluationConfigId" + } + } + }, + "DeleteOnlineEvaluationConfigResponse":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigArn", + "onlineEvaluationConfigId", + "status" + ], + "members":{ + "onlineEvaluationConfigArn":{ + "shape":"OnlineEvaluationConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the deleted online evaluation configuration.

" + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the deleted online evaluation configuration.

" + }, + "status":{ + "shape":"OnlineEvaluationConfigStatus", + "documentation":"

The status of the online evaluation configuration deletion operation.

" + } + } + }, + "DeletePolicyEngineRequest":{ + "type":"structure", + "required":["policyEngineId"], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy engine to be deleted. This must be a valid policy engine ID that exists within the account.

", + "location":"uri", + "locationName":"policyEngineId" + } + } + }, + "DeletePolicyEngineResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "name", + "createdAt", + "updatedAt", + "policyEngineArn", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy engine being deleted. This confirms which policy engine the deletion operation targets.

" + }, + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The customer-assigned name of the deleted policy engine.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The human-readable description of the deleted policy engine.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the deleted policy engine was originally created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the deleted policy engine was last modified before deletion. This tracks the final state of the policy engine before it was removed from the system.

" + }, + "policyEngineArn":{ + "shape":"PolicyEngineArn", + "documentation":"

The Amazon Resource Name (ARN) of the deleted policy engine. This globally unique identifier confirms which policy engine resource was successfully removed.

" + }, + "status":{ + "shape":"PolicyEngineStatus", + "documentation":"

The status of the policy engine deletion operation. This provides status about any issues that occurred during the deletion process.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the deletion status. This provides details about the deletion process or any issues that may have occurred.

" + } + } + }, + "DeletePolicyRequest":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyId" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages the policy to be deleted. This ensures the policy is deleted from the correct policy engine context.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy to be deleted. This must be a valid policy ID that exists within the specified policy engine.

", + "location":"uri", + "locationName":"policyId" + } + } + }, + "DeletePolicyResponse":{ + "type":"structure", + "required":[ + "policyId", + "name", + "policyEngineId", + "definition", + "createdAt", + "updatedAt", + "policyArn", + "status", + "statusReasons" + ], + "members":{ + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy being deleted. This confirms which policy the deletion operation targets.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The customer-assigned name of the deleted policy. This confirms which policy was successfully removed from the system and matches the name that was originally assigned during policy creation.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine from which the policy was deleted. This confirms the policy engine context for the deletion operation.

" + }, + "definition":{"shape":"PolicyDefinition"}, + "description":{ + "shape":"Description", + "documentation":"

The human-readable description of the deleted policy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the deleted policy was originally created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the deleted policy was last modified before deletion. This tracks the final state of the policy before it was removed from the system.

" + }, + "policyArn":{ + "shape":"PolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the deleted policy. This globally unique identifier confirms which policy resource was successfully removed.

" + }, + "status":{ + "shape":"PolicyStatus", + "documentation":"

The status of the policy deletion operation. This provides information about any issues that occurred during the deletion process.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the deletion status. This provides details about the deletion process or any issues that may have occurred.

" + } + } + }, + "DeleteResourcePolicyRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"BedrockAgentcoreResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which to delete the resource policy.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "DeleteResourcePolicyResponse":{ + "type":"structure", + "members":{} + }, "DeleteWorkloadIdentityRequest":{ "type":"structure", "required":["name"], @@ -3137,6 +4568,10 @@ "type":"string", "pattern":".+/\\.well-known/openid-configuration" }, + "Double":{ + "type":"double", + "box":true + }, "EncryptionFailure":{ "type":"structure", "required":["message"], @@ -3173,109 +4608,578 @@ "min":0, "sensitive":true }, - "ExceptionLevel":{ - "type":"string", - "enum":["DEBUG"] - }, - "ExtractionConfiguration":{ + "EpisodicConsolidationOverride":{ "type":"structure", + "required":[ + "appendToPrompt", + "modelId" + ], "members":{ - "customExtractionConfiguration":{ - "shape":"CustomExtractionConfiguration", - "documentation":"

The custom extraction configuration.

" + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text appended to the prompt for the consolidation step of the episodic memory strategy.

" + }, + "modelId":{ + "shape":"String", + "documentation":"

The model ID used for the consolidation step of the episodic memory strategy.

" } }, - "documentation":"

Contains extraction configuration information for a memory strategy.

", - "union":true - }, - "GatewayArn":{ - "type":"string", - "pattern":"arn:aws(|-cn|-us-gov):bedrock-agentcore:[a-z0-9-]{1,20}:[0-9]{12}:gateway/[0-9a-zA-Z]{10}" - }, - "GatewayDescription":{ - "type":"string", - "max":200, - "min":1, - "sensitive":true - }, - "GatewayId":{ - "type":"string", - "pattern":"([0-9a-z][-]?){1,100}-[0-9a-z]{10}" - }, - "GatewayIdentifier":{ - "type":"string", - "pattern":"([0-9a-z][-]?){1,100}-[0-9a-z]{10}" - }, - "GatewayInterceptionPoint":{ - "type":"string", - "enum":[ - "REQUEST", - "RESPONSE" - ] - }, - "GatewayInterceptionPoints":{ - "type":"list", - "member":{"shape":"GatewayInterceptionPoint"}, - "max":2, - "min":1 + "documentation":"

Contains configurations to override the default consolidation step for the episodic memory strategy.

" }, - "GatewayInterceptorConfiguration":{ + "EpisodicExtractionOverride":{ "type":"structure", "required":[ - "interceptor", - "interceptionPoints" + "appendToPrompt", + "modelId" ], "members":{ - "interceptor":{ - "shape":"InterceptorConfiguration", - "documentation":"

The infrastructure settings of an interceptor configuration. This structure defines how the interceptor can be invoked.

" - }, - "interceptionPoints":{ - "shape":"GatewayInterceptionPoints", - "documentation":"

The supported points of interception. This field specifies which points during the gateway invocation to invoke the interceptor

" + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text appended to the prompt for the extraction step of the episodic memory strategy.

" }, - "inputConfiguration":{ - "shape":"InterceptorInputConfiguration", - "documentation":"

The configuration for the input of the interceptor. This field specifies how the input to the interceptor is constructed

" + "modelId":{ + "shape":"String", + "documentation":"

The model ID used for the extraction step of the episodic memory strategy.

" } }, - "documentation":"

The configuration for an interceptor on a gateway. This structure defines settings for an interceptor that will be invoked during the invocation of the gateway.

" - }, - "GatewayInterceptorConfigurations":{ - "type":"list", - "member":{"shape":"GatewayInterceptorConfiguration"}, - "max":2, - "min":1 - }, - "GatewayMaxResults":{ - "type":"integer", - "box":true, - "max":1000, - "min":1 - }, - "GatewayName":{ - "type":"string", - "pattern":"([0-9a-zA-Z][-]?){1,100}", - "sensitive":true - }, - "GatewayNextToken":{ - "type":"string", - "max":2048, - "min":1, - "pattern":"\\S*" + "documentation":"

Contains configurations to override the default extraction step for the episodic memory strategy.

" }, - "GatewayProtocolConfiguration":{ + "EpisodicMemoryStrategyInput":{ "type":"structure", + "required":["name"], "members":{ - "mcp":{ - "shape":"MCPGatewayConfiguration", - "documentation":"

The configuration for the Model Context Protocol (MCP). This protocol enables communication between Amazon Bedrock Agent and external tools.

" + "name":{ + "shape":"Name", + "documentation":"

The name of the episodic memory strategy.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the episodic memory strategy.

" + }, + "namespaces":{ + "shape":"NamespacesList", + "documentation":"

The namespaces for which to create episodes.

" + }, + "reflectionConfiguration":{ + "shape":"EpisodicReflectionConfigurationInput", + "documentation":"

The configuration for the reflections created with the episodic memory strategy.

" } }, - "documentation":"

The configuration for a gateway protocol. This structure defines how the gateway communicates with external services.

", - "union":true + "documentation":"

Input for creating an episodic memory strategy.

" }, - "GatewayProtocolType":{ + "EpisodicOverrideConfigurationInput":{ + "type":"structure", + "members":{ + "extraction":{ + "shape":"EpisodicOverrideExtractionConfigurationInput", + "documentation":"

Contains configurations for overriding the extraction step of the episodic memory strategy.

" + }, + "consolidation":{ + "shape":"EpisodicOverrideConsolidationConfigurationInput", + "documentation":"

Contains configurations for overriding the consolidation step of the episodic memory strategy.

" + }, + "reflection":{ + "shape":"EpisodicOverrideReflectionConfigurationInput", + "documentation":"

Contains configurations for overriding the reflection step of the episodic memory strategy.

" + } + }, + "documentation":"

Input for the configuration to override the episodic memory strategy.

" + }, + "EpisodicOverrideConsolidationConfigurationInput":{ + "type":"structure", + "required":[ + "appendToPrompt", + "modelId" + ], + "members":{ + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text to append to the prompt for the consolidation step of the episodic memory strategy.

" + }, + "modelId":{ + "shape":"String", + "documentation":"

The model ID to use for the consolidation step of the episodic memory strategy.

" + } + }, + "documentation":"

Configurations for overriding the consolidation step of the episodic memory strategy.

" + }, + "EpisodicOverrideExtractionConfigurationInput":{ + "type":"structure", + "required":[ + "appendToPrompt", + "modelId" + ], + "members":{ + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text to append to the prompt for the extraction step of the episodic memory strategy.

" + }, + "modelId":{ + "shape":"String", + "documentation":"

The model ID to use for the extraction step of the episodic memory strategy.

" + } + }, + "documentation":"

Configurations for overriding the extraction step of the episodic memory strategy.

" + }, + "EpisodicOverrideReflectionConfigurationInput":{ + "type":"structure", + "required":[ + "appendToPrompt", + "modelId" + ], + "members":{ + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text to append to the prompt for reflection step of the episodic memory strategy.

" + }, + "modelId":{ + "shape":"String", + "documentation":"

The model ID to use for the reflection step of the episodic memory strategy.

" + }, + "namespaces":{ + "shape":"NamespacesList", + "documentation":"

The namespaces to use for episodic reflection. Can be less nested than the episodic namespaces.

" + } + }, + "documentation":"

Configurations for overriding the reflection step of the episodic memory strategy.

" + }, + "EpisodicReflectionConfiguration":{ + "type":"structure", + "required":["namespaces"], + "members":{ + "namespaces":{ + "shape":"NamespacesList", + "documentation":"

The namespaces for which to create reflections. Can be less nested than the episodic namespaces.

" + } + }, + "documentation":"

The configuration for the reflections created with the episodic memory strategy.

" + }, + "EpisodicReflectionConfigurationInput":{ + "type":"structure", + "required":["namespaces"], + "members":{ + "namespaces":{ + "shape":"NamespacesList", + "documentation":"

The namespaces over which to create reflections. Can be less nested than episode namespaces.

" + } + }, + "documentation":"

An episodic reflection configuration input.

" + }, + "EpisodicReflectionOverride":{ + "type":"structure", + "required":[ + "appendToPrompt", + "modelId" + ], + "members":{ + "appendToPrompt":{ + "shape":"Prompt", + "documentation":"

The text appended to the prompt for the reflection step of the episodic memory strategy.

" + }, + "modelId":{ + "shape":"String", + "documentation":"

The model ID used for the reflection step of the episodic memory strategy.

" + }, + "namespaces":{ + "shape":"NamespacesList", + "documentation":"

The namespaces over which reflections were created. Can be less nested than the episodic namespaces.

" + } + }, + "documentation":"

Contains configurations to override the default reflection step for the episodic memory strategy.

" + }, + "EvaluationConfigDescription":{ + "type":"string", + "max":200, + "min":1, + "pattern":".+", + "sensitive":true + }, + "EvaluationConfigName":{ + "type":"string", + "pattern":"[a-zA-Z][a-zA-Z0-9_]{0,47}" + }, + "EvaluatorArn":{ + "type":"string", + "pattern":"arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:evaluator\\/[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}$|^arn:aws:bedrock-agentcore:::evaluator/Builtin.[a-zA-Z0-9_-]+" + }, + "EvaluatorConfig":{ + "type":"structure", + "members":{ + "llmAsAJudge":{ + "shape":"LlmAsAJudgeEvaluatorConfig", + "documentation":"

The LLM-as-a-Judge configuration that uses a language model to evaluate agent performance based on custom instructions and rating scales.

" + } + }, + "documentation":"

The configuration that defines how an evaluator assesses agent performance, including the evaluation method and parameters.

", + "union":true + }, + "EvaluatorDescription":{ + "type":"string", + "max":200, + "min":1, + "sensitive":true + }, + "EvaluatorId":{ + "type":"string", + "pattern":"(Builtin.[a-zA-Z0-9_-]+|[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10})" + }, + "EvaluatorInstructions":{ + "type":"string", + "sensitive":true + }, + "EvaluatorLevel":{ + "type":"string", + "enum":[ + "TOOL_CALL", + "TRACE", + "SESSION" + ] + }, + "EvaluatorList":{ + "type":"list", + "member":{"shape":"EvaluatorReference"}, + "max":10, + "min":1 + }, + "EvaluatorModelConfig":{ + "type":"structure", + "members":{ + "bedrockEvaluatorModelConfig":{ + "shape":"BedrockEvaluatorModelConfig", + "documentation":"

The Amazon Bedrock model configuration for evaluation.

" + } + }, + "documentation":"

The model configuration that specifies which foundation model to use for evaluation and how to configure it.

", + "union":true + }, + "EvaluatorName":{ + "type":"string", + "pattern":"(Builtin.[a-zA-Z0-9_-]+|[a-zA-Z][a-zA-Z0-9_]{0,47})" + }, + "EvaluatorReference":{ + "type":"structure", + "members":{ + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator. Can reference builtin evaluators (e.g., Builtin.Helpfulness) or custom evaluators.

" + } + }, + "documentation":"

The reference to an evaluator used in online evaluation configurations, containing the evaluator identifier.

", + "union":true + }, + "EvaluatorStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "CREATING", + "CREATE_FAILED", + "UPDATING", + "UPDATE_FAILED", + "DELETING" + ] + }, + "EvaluatorSummary":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "evaluatorName", + "evaluatorType", + "status", + "createdAt", + "updatedAt" + ], + "members":{ + "evaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the evaluator.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator.

" + }, + "evaluatorName":{ + "shape":"EvaluatorName", + "documentation":"

The name of the evaluator.

" + }, + "description":{ + "shape":"EvaluatorDescription", + "documentation":"

The description of the evaluator.

" + }, + "evaluatorType":{ + "shape":"EvaluatorType", + "documentation":"

The type of evaluator, indicating whether it is a built-in evaluator provided by the service or a custom evaluator created by the user.

" + }, + "level":{ + "shape":"EvaluatorLevel", + "documentation":"

The evaluation level (TOOL_CALL, TRACE, or SESSION) that determines the scope of evaluation.

" + }, + "status":{ + "shape":"EvaluatorStatus", + "documentation":"

The current status of the evaluator.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was last updated.

" + }, + "lockedForModification":{ + "shape":"Boolean", + "documentation":"

Whether the evaluator is locked for modification due to being referenced by active online evaluation configurations.

" + } + }, + "documentation":"

The summary information about an evaluator, including basic metadata and status information.

" + }, + "EvaluatorSummaryList":{ + "type":"list", + "member":{"shape":"EvaluatorSummary"} + }, + "EvaluatorType":{ + "type":"string", + "enum":[ + "Builtin", + "Custom" + ] + }, + "ExceptionLevel":{ + "type":"string", + "enum":["DEBUG"] + }, + "ExtractionConfiguration":{ + "type":"structure", + "members":{ + "customExtractionConfiguration":{ + "shape":"CustomExtractionConfiguration", + "documentation":"

The custom extraction configuration.

" + } + }, + "documentation":"

Contains extraction configuration information for a memory strategy.

", + "union":true + }, + "Filter":{ + "type":"structure", + "required":[ + "key", + "operator", + "value" + ], + "members":{ + "key":{ + "shape":"FilterKeyString", + "documentation":"

The key or field name to filter on within the agent trace data.

" + }, + "operator":{ + "shape":"FilterOperator", + "documentation":"

The comparison operator to use for filtering.

" + }, + "value":{ + "shape":"FilterValue", + "documentation":"

The value to compare against using the specified operator.

" + } + }, + "documentation":"

The filter that applies conditions to agent traces during online evaluation to determine which traces should be evaluated.

" + }, + "FilterKeyString":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"}, + "max":5, + "min":0 + }, + "FilterOperator":{ + "type":"string", + "enum":[ + "Equals", + "NotEquals", + "GreaterThan", + "LessThan", + "GreaterThanOrEqual", + "LessThanOrEqual", + "Contains", + "NotContains" + ] + }, + "FilterValue":{ + "type":"structure", + "members":{ + "stringValue":{ + "shape":"FilterValueStringValueString", + "documentation":"

The string value for text-based filtering.

" + }, + "doubleValue":{ + "shape":"Double", + "documentation":"

The numeric value for numerical filtering and comparisons.

" + }, + "booleanValue":{ + "shape":"Boolean", + "documentation":"

The boolean value for true/false filtering conditions.

" + } + }, + "documentation":"

The value used in filter comparisons, supporting different data types for flexible filtering criteria.

", + "union":true + }, + "FilterValueStringValueString":{ + "type":"string", + "max":1024, + "min":1 + }, + "Finding":{ + "type":"structure", + "members":{ + "type":{ + "shape":"FindingType", + "documentation":"

The type or category of the finding. This classifies the finding as an error, warning, recommendation, or informational message to help users understand the severity and nature of the issue.

" + }, + "description":{ + "shape":"String", + "documentation":"

A human-readable description of the finding. This provides detailed information about the issue, recommendation, or validation result to help users understand and address the finding.

" + } + }, + "documentation":"

Represents a finding or issue discovered during policy generation or validation. Findings provide insights about potential problems, recommendations, or validation results from policy analysis operations. Finding types include: VALID (policy is ready to use), INVALID (policy has validation errors that must be fixed), NOT_TRANSLATABLE (input couldn't be converted to policy), ALLOW_ALL (policy would allow all actions, potential security risk), ALLOW_NONE (policy would allow no actions, unusable), DENY_ALL (policy would deny all actions, may be too restrictive), and DENY_NONE (policy would deny no actions, ineffective). Review all findings before creating policies from generated assets to ensure they match your security requirements.

" + }, + "FindingType":{ + "type":"string", + "enum":[ + "VALID", + "INVALID", + "NOT_TRANSLATABLE", + "ALLOW_ALL", + "ALLOW_NONE", + "DENY_ALL", + "DENY_NONE" + ] + }, + "Findings":{ + "type":"list", + "member":{"shape":"Finding"} + }, + "GatewayArn":{ + "type":"string", + "pattern":"arn:aws(|-cn|-us-gov):bedrock-agentcore:[a-z0-9-]{1,20}:[0-9]{12}:gateway/[0-9a-zA-Z]{10}" + }, + "GatewayDescription":{ + "type":"string", + "max":200, + "min":1, + "sensitive":true + }, + "GatewayId":{ + "type":"string", + "pattern":"([0-9a-z][-]?){1,100}-[0-9a-z]{10}" + }, + "GatewayIdentifier":{ + "type":"string", + "pattern":"([0-9a-z][-]?){1,100}-[0-9a-z]{10}" + }, + "GatewayInterceptionPoint":{ + "type":"string", + "enum":[ + "REQUEST", + "RESPONSE" + ] + }, + "GatewayInterceptionPoints":{ + "type":"list", + "member":{"shape":"GatewayInterceptionPoint"}, + "max":2, + "min":1 + }, + "GatewayInterceptorConfiguration":{ + "type":"structure", + "required":[ + "interceptor", + "interceptionPoints" + ], + "members":{ + "interceptor":{ + "shape":"InterceptorConfiguration", + "documentation":"

The infrastructure settings of an interceptor configuration. This structure defines how the interceptor can be invoked.

" + }, + "interceptionPoints":{ + "shape":"GatewayInterceptionPoints", + "documentation":"

The supported points of interception. This field specifies which points during the gateway invocation to invoke the interceptor

" + }, + "inputConfiguration":{ + "shape":"InterceptorInputConfiguration", + "documentation":"

The configuration for the input of the interceptor. This field specifies how the input to the interceptor is constructed

" + } + }, + "documentation":"

The configuration for an interceptor on a gateway. This structure defines settings for an interceptor that will be invoked during the invocation of the gateway.

" + }, + "GatewayInterceptorConfigurations":{ + "type":"list", + "member":{"shape":"GatewayInterceptorConfiguration"}, + "max":2, + "min":1 + }, + "GatewayMaxResults":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "GatewayName":{ + "type":"string", + "pattern":"([0-9a-zA-Z][-]?){1,100}", + "sensitive":true + }, + "GatewayNextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"\\S*" + }, + "GatewayPolicyEngineArn":{ + "type":"string", + "max":170, + "min":1, + "pattern":"arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:policy-engine\\/[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9_]{10}" + }, + "GatewayPolicyEngineConfiguration":{ + "type":"structure", + "required":[ + "arn", + "mode" + ], + "members":{ + "arn":{ + "shape":"GatewayPolicyEngineArn", + "documentation":"

The ARN of the policy engine. The policy engine contains Cedar policies that define fine-grained authorization rules specifying who can perform what actions on which resources as agents interact through the gateway.

" + }, + "mode":{ + "shape":"GatewayPolicyEngineMode", + "documentation":"

The enforcement mode for the policy engine. Valid values include:

" + } + }, + "documentation":"

The configuration for a policy engine associated with a gateway. A policy engine is a collection of policies that evaluates and authorizes agent tool calls. When associated with a gateway, the policy engine intercepts all agent requests and determines whether to allow or deny each action based on the defined policies.

" + }, + "GatewayPolicyEngineMode":{ + "type":"string", + "enum":[ + "LOG_ONLY", + "ENFORCE" + ] + }, + "GatewayProtocolConfiguration":{ + "type":"structure", + "members":{ + "mcp":{ + "shape":"MCPGatewayConfiguration", + "documentation":"

The configuration for the Model Context Protocol (MCP). This protocol enables communication between Amazon Bedrock Agent and external tools.

" + } + }, + "documentation":"

The configuration for a gateway protocol. This structure defines how the gateway communicates with external services.

", + "union":true + }, + "GatewayProtocolType":{ "type":"string", "enum":["MCP"] }, @@ -3394,6 +5298,10 @@ "lastSynchronizedAt":{ "shape":"DateTimestamp", "documentation":"

The last synchronization time.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

The metadata configuration for HTTP header and query parameter propagation to and from this gateway target.

" } }, "documentation":"

The gateway target.

" @@ -3559,6 +5467,10 @@ "shape":"LifecycleConfiguration", "documentation":"

The life cycle configuration for the AgentCore Runtime.

" }, + "failureReason":{ + "shape":"String", + "documentation":"

The reason for failure if the AgentCore Runtime is in a failed state.

" + }, "description":{ "shape":"Description", "documentation":"

The description of the AgentCore Runtime.

" @@ -3759,6 +5671,73 @@ } } }, + "GetEvaluatorRequest":{ + "type":"structure", + "required":["evaluatorId"], + "members":{ + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator to retrieve. Can be a built-in evaluator ID (e.g., Builtin.Helpfulness) or a custom evaluator ID.

", + "location":"uri", + "locationName":"evaluatorId" + } + } + }, + "GetEvaluatorResponse":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "evaluatorName", + "evaluatorConfig", + "level", + "status", + "createdAt", + "updatedAt" + ], + "members":{ + "evaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the evaluator.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator.

" + }, + "evaluatorName":{ + "shape":"EvaluatorName", + "documentation":"

The name of the evaluator.

" + }, + "description":{ + "shape":"EvaluatorDescription", + "documentation":"

The description of the evaluator.

" + }, + "evaluatorConfig":{ + "shape":"EvaluatorConfig", + "documentation":"

The configuration of the evaluator, including LLM-as-a-Judge settings for custom evaluators.

" + }, + "level":{ + "shape":"EvaluatorLevel", + "documentation":"

The evaluation level (TOOL_CALL, TRACE, or SESSION) that determines the scope of evaluation.

" + }, + "status":{ + "shape":"EvaluatorStatus", + "documentation":"

The current status of the evaluator.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was last updated.

" + }, + "lockedForModification":{ + "shape":"Boolean", + "documentation":"

Whether the evaluator is locked for modification due to being referenced by active online evaluation configurations.

" + } + } + }, "GetGatewayRequest":{ "type":"structure", "required":["gatewayIdentifier"], @@ -3845,6 +5824,10 @@ "shape":"GatewayInterceptorConfigurations", "documentation":"

The interceptors configured on the gateway.

" }, + "policyEngineConfiguration":{ + "shape":"GatewayPolicyEngineConfiguration", + "documentation":"

The policy engine configuration for the gateway.

" + }, "workloadIdentityDetails":{ "shape":"WorkloadIdentityDetails", "documentation":"

The workload identity details for the gateway.

" @@ -3929,6 +5912,10 @@ "lastSynchronizedAt":{ "shape":"DateTimestamp", "documentation":"

The last synchronization of the target.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

The metadata configuration for HTTP header and query parameter propagation for the retrieved gateway target.

" } } }, @@ -4010,6 +5997,324 @@ } } }, + "GetOnlineEvaluationConfigRequest":{ + "type":"structure", + "required":["onlineEvaluationConfigId"], + "members":{ + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the online evaluation configuration to retrieve.

", + "location":"uri", + "locationName":"onlineEvaluationConfigId" + } + } + }, + "GetOnlineEvaluationConfigResponse":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigArn", + "onlineEvaluationConfigId", + "onlineEvaluationConfigName", + "rule", + "dataSourceConfig", + "evaluators", + "status", + "executionStatus", + "createdAt", + "updatedAt" + ], + "members":{ + "onlineEvaluationConfigArn":{ + "shape":"OnlineEvaluationConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the online evaluation configuration.

" + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the online evaluation configuration.

" + }, + "onlineEvaluationConfigName":{ + "shape":"EvaluationConfigName", + "documentation":"

The name of the online evaluation configuration.

" + }, + "description":{ + "shape":"EvaluationConfigDescription", + "documentation":"

The description of the online evaluation configuration.

" + }, + "rule":{ + "shape":"Rule", + "documentation":"

The evaluation rule containing sampling configuration, filters, and session settings.

" + }, + "dataSourceConfig":{ + "shape":"DataSourceConfig", + "documentation":"

The data source configuration specifying CloudWatch log groups and service names to monitor.

" + }, + "evaluators":{ + "shape":"EvaluatorList", + "documentation":"

The list of evaluators applied during online evaluation.

" + }, + "outputConfig":{ + "shape":"OutputConfig", + "documentation":"

The output configuration specifying where evaluation results are written.

" + }, + "evaluationExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used for evaluation execution.

" + }, + "status":{ + "shape":"OnlineEvaluationConfigStatus", + "documentation":"

The status of the online evaluation configuration.

" + }, + "executionStatus":{ + "shape":"OnlineEvaluationExecutionStatus", + "documentation":"

The execution status indicating whether the online evaluation is currently running.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was last updated.

" + }, + "failureReason":{ + "shape":"String", + "documentation":"

The reason for failure if the online evaluation configuration execution failed.

" + } + } + }, + "GetPolicyEngineRequest":{ + "type":"structure", + "required":["policyEngineId"], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy engine to be retrieved. This must be a valid policy engine ID that exists within the account.

", + "location":"uri", + "locationName":"policyEngineId" + } + } + }, + "GetPolicyEngineResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "name", + "createdAt", + "updatedAt", + "policyEngineArn", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the retrieved policy engine. This matches the policy engine ID provided in the request and serves as the system identifier.

" + }, + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The customer-assigned name of the policy engine. This is the human-readable identifier that was specified when the policy engine was created.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The human-readable description of the policy engine's purpose and scope. This helps administrators understand the policy engine's role in governance.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was originally created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was last modified. This tracks the most recent changes to the policy engine configuration.

" + }, + "policyEngineArn":{ + "shape":"PolicyEngineArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy engine. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyEngineStatus", + "documentation":"

The current status of the policy engine.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy engine status. This provides details about any failures or the current state of the policy engine.

" + } + } + }, + "GetPolicyGenerationRequest":{ + "type":"structure", + "required":[ + "policyGenerationId", + "policyEngineId" + ], + "members":{ + "policyGenerationId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy generation request to be retrieved. This must be a valid generation ID from a previous StartPolicyGeneration call.

", + "location":"uri", + "locationName":"policyGenerationId" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine associated with the policy generation request. This provides the context for the generation operation and schema validation.

", + "location":"uri", + "locationName":"policyEngineId" + } + } + }, + "GetPolicyGenerationResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyGenerationId", + "name", + "policyGenerationArn", + "resource", + "createdAt", + "updatedAt", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine associated with this policy generation. This confirms the policy engine context for the generation operation.

" + }, + "policyGenerationId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy generation request. This matches the generation ID provided in the request and serves as the tracking identifier.

" + }, + "name":{ + "shape":"PolicyGenerationName", + "documentation":"

The customer-assigned name for the policy generation request. This helps identify and track generation operations across multiple requests.

" + }, + "policyGenerationArn":{ + "shape":"PolicyGenerationArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy generation. This globally unique identifier can be used for tracking, auditing, and cross-service references.

" + }, + "resource":{ + "shape":"Resource", + "documentation":"

The resource information associated with the policy generation. This provides context about the target resources for which the policies are being generated.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy generation request was created. This is used for tracking and auditing generation operations and their lifecycle.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy generation was last updated. This tracks the progress of the generation process and any status changes.

" + }, + "status":{ + "shape":"PolicyGenerationStatus", + "documentation":"

The current status of the policy generation. This indicates whether the generation is in progress, completed successfully, or failed during processing.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the generation status. This provides details about any failures, warnings, or the current state of the generation process.

" + }, + "findings":{ + "shape":"String", + "documentation":"

The findings and results from the policy generation process. This includes any issues, recommendations, validation results, or insights from the generated policies.

" + } + } + }, + "GetPolicyRequest":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyId" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages the policy to be retrieved.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy to be retrieved. This must be a valid policy ID that exists within the specified policy engine.

", + "location":"uri", + "locationName":"policyId" + } + } + }, + "GetPolicyResponse":{ + "type":"structure", + "required":[ + "policyId", + "name", + "policyEngineId", + "definition", + "createdAt", + "updatedAt", + "policyArn", + "status", + "statusReasons" + ], + "members":{ + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the retrieved policy. This matches the policy ID provided in the request and serves as the system identifier for the policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The customer-assigned name of the policy. This is the human-readable identifier that was specified when the policy was created.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages this policy. This confirms the policy engine context for the retrieved policy.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The Cedar policy statement that defines the access control rules. This contains the actual policy logic used for agent behavior control and access decisions.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The human-readable description of the policy's purpose and functionality. This helps administrators understand and manage the policy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was originally created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was last modified. This tracks the most recent changes to the policy configuration.

" + }, + "policyArn":{ + "shape":"PolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyStatus", + "documentation":"

The current status of the policy.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy status. This provides details about any failures or the current state of the policy.

" + } + } + }, + "GetResourcePolicyRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"BedrockAgentcoreResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which to retrieve the resource policy.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "GetResourcePolicyResponse":{ + "type":"structure", + "members":{ + "policy":{ + "shape":"ResourcePolicyBody", + "documentation":"

The resource policy associated with the specified resource.

" + } + } + }, "GetTokenVaultRequest":{ "type":"structure", "members":{ @@ -4154,6 +6459,29 @@ "min":1, "pattern":"(Authorization|X-Amzn-Bedrock-AgentCore-Runtime-Custom-[a-zA-Z0-9-]+)" }, + "HttpHeaderName":{ + "type":"string", + "max":100, + "min":1 + }, + "HttpQueryParameterName":{ + "type":"string", + "max":40, + "min":1 + }, + "InboundTokenClaimNameType":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z0-9_.-:]+" + }, + "InboundTokenClaimValueType":{ + "type":"string", + "enum":[ + "STRING", + "STRING_ARRAY" + ] + }, "IncludedOauth2ProviderConfigInput":{ "type":"structure", "required":[ @@ -4196,6 +6524,51 @@ }, "documentation":"

The configuration details returned for a supported OAuth2 provider, including client credentials and OAuth2 discovery information.

" }, + "InferenceConfiguration":{ + "type":"structure", + "members":{ + "maxTokens":{ + "shape":"InferenceConfigurationMaxTokensInteger", + "documentation":"

The maximum number of tokens to generate in the model response during evaluation.

" + }, + "temperature":{ + "shape":"InferenceConfigurationTemperatureFloat", + "documentation":"

The temperature value that controls randomness in the model's responses. Lower values produce more deterministic outputs.

" + }, + "topP":{ + "shape":"InferenceConfigurationTopPFloat", + "documentation":"

The top-p sampling parameter that controls the diversity of the model's responses by limiting the cumulative probability of token choices.

" + }, + "stopSequences":{ + "shape":"InferenceConfigurationStopSequencesList", + "documentation":"

The list of sequences that will cause the model to stop generating tokens when encountered.

" + } + }, + "documentation":"

The configuration parameters that control how the foundation model behaves during evaluation, including response generation settings.

" + }, + "InferenceConfigurationMaxTokensInteger":{ + "type":"integer", + "box":true, + "min":1 + }, + "InferenceConfigurationStopSequencesList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":2500, + "min":0 + }, + "InferenceConfigurationTemperatureFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, + "InferenceConfigurationTopPFloat":{ + "type":"float", + "box":true, + "max":1, + "min":0 + }, "InlinePayload":{ "type":"string", "sensitive":true @@ -4586,6 +6959,43 @@ } } }, + "ListEvaluatorsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListEvaluatorsRequestMaxResultsInteger", + "documentation":"

The maximum number of evaluators to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListEvaluatorsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListEvaluatorsResponse":{ + "type":"structure", + "required":["evaluators"], + "members":{ + "evaluators":{ + "shape":"EvaluatorSummaryList", + "documentation":"

The list of evaluator summaries containing basic information about each evaluator.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The pagination token to use in a subsequent request to retrieve the next page of results.

" + } + } + }, "ListGatewayTargetsRequest":{ "type":"structure", "required":["gatewayIdentifier"], @@ -4721,6 +7131,202 @@ } } }, + "ListOnlineEvaluationConfigsRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"String", + "documentation":"

The pagination token from a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"ListOnlineEvaluationConfigsRequestMaxResultsInteger", + "documentation":"

The maximum number of online evaluation configurations to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListOnlineEvaluationConfigsRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListOnlineEvaluationConfigsResponse":{ + "type":"structure", + "required":["onlineEvaluationConfigs"], + "members":{ + "onlineEvaluationConfigs":{ + "shape":"OnlineEvaluationConfigSummaryList", + "documentation":"

The list of online evaluation configuration summaries containing basic information about each configuration.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

The pagination token to use in a subsequent request to retrieve the next page of results.

" + } + } + }, + "ListPoliciesRequest":{ + "type":"structure", + "required":["policyEngineId"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token returned from a previous ListPolicies call. Use this token to retrieve the next page of results when the response is paginated.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of policies to return in a single response. If not specified, the default is 10 policies per page, with a maximum of 100 per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine whose policies to retrieve.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "targetResourceScope":{ + "shape":"BedrockAgentcoreResourceArn", + "documentation":"

Optional filter to list policies that apply to a specific resource scope or resource type. This helps narrow down policy results to those relevant for particular Amazon Web Services resources, agent tools, or operational contexts within the policy engine ecosystem.

", + "location":"querystring", + "locationName":"targetResourceScope" + } + } + }, + "ListPoliciesResponse":{ + "type":"structure", + "required":["policies"], + "members":{ + "policies":{ + "shape":"Policies", + "documentation":"

An array of policy objects that match the specified criteria. Each policy object contains the policy metadata, status, and key identifiers for further operations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in subsequent ListPolicies calls to retrieve additional results. This token is only present when there are more results available.

" + } + } + }, + "ListPolicyEnginesRequest":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token returned from a previous ListPolicyEngines call. Use this token to retrieve the next page of results when the response is paginated.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of policy engines to return in a single response. If not specified, the default is 10 policy engines per page, with a maximum of 100 per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPolicyEnginesResponse":{ + "type":"structure", + "required":["policyEngines"], + "members":{ + "policyEngines":{ + "shape":"PolicyEngines", + "documentation":"

An array of policy engine objects that exist in the account. Each policy engine object contains the engine metadata, status, and key identifiers for further operations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in subsequent ListPolicyEngines calls to retrieve additional results. This token is only present when there are more results available.

" + } + } + }, + "ListPolicyGenerationAssetsRequest":{ + "type":"structure", + "required":[ + "policyGenerationId", + "policyEngineId" + ], + "members":{ + "policyGenerationId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy generation request whose assets are to be retrieved. This must be a valid generation ID from a previous StartPolicyGeneration call that has completed processing.

", + "location":"uri", + "locationName":"policyGenerationId" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy engine associated with the policy generation request. This provides the context for the generation operation and ensures assets are retrieved from the correct policy engine.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token returned from a previous ListPolicyGenerationAssets call. Use this token to retrieve the next page of assets when the response is paginated due to large numbers of generated policy options.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of policy generation assets to return in a single response. If not specified, the default is 10 assets per page, with a maximum of 100 per page. This helps control response size when dealing with policy generations that produce many alternative policy options.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListPolicyGenerationAssetsResponse":{ + "type":"structure", + "members":{ + "policyGenerationAssets":{ + "shape":"PolicyGenerationAssets", + "documentation":"

An array of generated policy assets including Cedar policies and related artifacts from the AI-powered policy generation process. Each asset represents a different policy option or variation generated from the original natural language input.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token that can be used in subsequent ListPolicyGenerationAssets calls to retrieve additional assets. This token is only present when there are more generated policy assets available beyond the current response.

" + } + } + }, + "ListPolicyGenerationsRequest":{ + "type":"structure", + "required":["policyEngineId"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token for retrieving additional policy generations when results are paginated.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of policy generations to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine whose policy generations to retrieve.

", + "location":"uri", + "locationName":"policyEngineId" + } + } + }, + "ListPolicyGenerationsResponse":{ + "type":"structure", + "required":["policyGenerations"], + "members":{ + "policyGenerations":{ + "shape":"PolicyGenerations", + "documentation":"

An array of policy generation objects that match the specified criteria.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

A pagination token for retrieving additional policy generations if more results are available.

" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -4775,6 +7381,35 @@ } } }, + "LlmAsAJudgeEvaluatorConfig":{ + "type":"structure", + "required":[ + "instructions", + "ratingScale", + "modelConfig" + ], + "members":{ + "instructions":{ + "shape":"EvaluatorInstructions", + "documentation":"

The evaluation instructions that guide the language model in assessing agent performance, including criteria and evaluation guidelines.

" + }, + "ratingScale":{ + "shape":"RatingScale", + "documentation":"

The rating scale that defines how the evaluator should score agent performance, either numerical or categorical.

" + }, + "modelConfig":{ + "shape":"EvaluatorModelConfig", + "documentation":"

The model configuration that specifies which foundation model to use and how to configure it for evaluation.

" + } + }, + "documentation":"

The configuration for LLM-as-a-Judge evaluation that uses a language model to assess agent performance based on custom instructions and rating scales.

" + }, + "LogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[.\\-_/#A-Za-z0-9]+" + }, "MCPGatewayConfiguration":{ "type":"structure", "members":{ @@ -4793,6 +7428,17 @@ }, "documentation":"

The configuration for a Model Context Protocol (MCP) gateway. This structure defines how the gateway implements the MCP protocol.

" }, + "MatchValueString":{ + "type":"string", + "max":255, + "min":1, + "pattern":"[A-Za-z0-9_.-]+" + }, + "MatchValueStringList":{ + "type":"list", + "member":{"shape":"MatchValueString"}, + "min":1 + }, "MaxResults":{ "type":"integer", "box":true, @@ -4859,6 +7505,10 @@ "mcpServer":{ "shape":"McpServerTargetConfiguration", "documentation":"

The MCP server specified as the gateway target.

" + }, + "apiGateway":{ + "shape":"ApiGatewayTargetConfiguration", + "documentation":"

The configuration for an Amazon API Gateway target.

" } }, "documentation":"

The Model Context Protocol (MCP) configuration for a target. This structure defines how the gateway uses MCP to communicate with the target.

", @@ -5023,6 +7673,10 @@ "customMemoryStrategy":{ "shape":"CustomMemoryStrategyInput", "documentation":"

Input for creating a custom memory strategy.

" + }, + "episodicMemoryStrategy":{ + "shape":"EpisodicMemoryStrategyInput", + "documentation":"

Input for creating an episodic memory strategy

" } }, "documentation":"

Contains input information for creating a memory strategy.

", @@ -5051,7 +7705,8 @@ "SEMANTIC", "SUMMARIZATION", "USER_PREFERENCE", - "CUSTOM" + "CUSTOM", + "EPISODIC" ] }, "MemorySummary":{ @@ -5114,6 +7769,24 @@ "max":50, "min":1 }, + "MetadataConfiguration":{ + "type":"structure", + "members":{ + "allowedRequestHeaders":{ + "shape":"AllowedRequestHeaders", + "documentation":"

A list of HTTP headers that are allowed to be propagated from incoming client requests to the target.

" + }, + "allowedQueryParameters":{ + "shape":"AllowedQueryParameters", + "documentation":"

A list of URL query parameters that are allowed to be propagated from incoming gateway URL to the target.

" + }, + "allowedResponseHeaders":{ + "shape":"AllowedResponseHeaders", + "documentation":"

A list of HTTP headers that are allowed to be propagated from the target response back to the client.

" + } + }, + "documentation":"

Configuration for HTTP header and query parameter propagation between the gateway and target servers.

" + }, "MicrosoftOauth2ProviderConfigInput":{ "type":"structure", "required":[ @@ -5151,6 +7824,7 @@ }, "documentation":"

Output configuration for a Microsoft OAuth2 provider.

" }, + "ModelId":{"type":"string"}, "ModifyConsolidationConfiguration":{ "type":"structure", "members":{ @@ -5236,6 +7910,21 @@ }, "documentation":"

Input for modifying a memory strategy.

" }, + "ModifyReflectionConfiguration":{ + "type":"structure", + "members":{ + "episodicReflectionConfiguration":{ + "shape":"EpisodicReflectionConfigurationInput", + "documentation":"

The updated episodic reflection configuration.

" + }, + "customReflectionConfiguration":{ + "shape":"CustomReflectionConfigurationInput", + "documentation":"

The updated custom reflection configuration.

" + } + }, + "documentation":"

Contains information for modifying a reflection configuration.

", + "union":true + }, "ModifySelfManagedConfiguration":{ "type":"structure", "members":{ @@ -5271,6 +7960,10 @@ "shape":"ModifyConsolidationConfiguration", "documentation":"

The updated consolidation configuration.

" }, + "reflection":{ + "shape":"ModifyReflectionConfiguration", + "documentation":"

The updated reflection configuration.

" + }, "selfManagedConfiguration":{ "shape":"ModifySelfManagedConfiguration", "documentation":"

The updated self-managed configuration.

" @@ -5293,6 +7986,11 @@ "member":{"shape":"Namespace"}, "min":1 }, + "NaturalLanguage":{ + "type":"string", + "max":2000, + "min":1 + }, "NetworkConfiguration":{ "type":"structure", "required":["networkMode"], @@ -5321,9 +8019,50 @@ "min":1, "pattern":"\\S*" }, - "NonBlankString":{ + "NonBlankString":{ + "type":"string", + "pattern":"[\\s\\S]+" + }, + "NonEmptyString":{ + "type":"string", + "min":1 + }, + "NumericalScaleDefinition":{ + "type":"structure", + "required":[ + "definition", + "value", + "label" + ], + "members":{ + "definition":{ + "shape":"String", + "documentation":"

The description that explains what this numerical rating represents and when it should be used.

" + }, + "value":{ + "shape":"NumericalScaleDefinitionValueDouble", + "documentation":"

The numerical value for this rating scale option.

" + }, + "label":{ + "shape":"NumericalScaleDefinitionLabelString", + "documentation":"

The label or name that describes this numerical rating option.

" + } + }, + "documentation":"

The definition of a numerical rating scale option that provides a numeric value with its description for evaluation scoring.

" + }, + "NumericalScaleDefinitionLabelString":{ "type":"string", - "pattern":"[\\s\\S]+" + "max":100, + "min":1 + }, + "NumericalScaleDefinitionValueDouble":{ + "type":"double", + "box":true, + "min":0 + }, + "NumericalScaleDefinitions":{ + "type":"list", + "member":{"shape":"NumericalScaleDefinition"} }, "OAuthCredentialProvider":{ "type":"structure", @@ -5343,6 +8082,14 @@ "customParameters":{ "shape":"OAuthCustomParameters", "documentation":"

The custom parameters for the OAuth credential provider. These parameters provide additional configuration for the OAuth authentication process.

" + }, + "grantType":{ + "shape":"OAuthGrantType", + "documentation":"

Specifies the kind of credentials to use for authorization:

" + }, + "defaultReturnUrl":{ + "shape":"OAuthDefaultReturnUrl", + "documentation":"

The URL where the end user's browser is redirected after obtaining the authorization code. Generally points to the customer's application.

" } }, "documentation":"

An OAuth credential provider for gateway authentication. This structure contains the configuration for authenticating with the target endpoint using OAuth.

" @@ -5369,6 +8116,19 @@ "min":1, "sensitive":true }, + "OAuthDefaultReturnUrl":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"\\w+:(\\/?\\/?)[^\\s]+" + }, + "OAuthGrantType":{ + "type":"string", + "enum":[ + "CLIENT_CREDENTIALS", + "AUTHORIZATION_CODE" + ] + }, "OAuthScope":{ "type":"string", "max":64, @@ -5549,13 +8309,404 @@ "documentation":"

Contains the output configuration for an OAuth2 provider.

", "union":true }, + "OnlineEvaluationConfigArn":{ + "type":"string", + "pattern":"arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:online-evaluation-config\\/[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}" + }, + "OnlineEvaluationConfigId":{ + "type":"string", + "pattern":"[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}" + }, + "OnlineEvaluationConfigStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "CREATING", + "CREATE_FAILED", + "UPDATING", + "UPDATE_FAILED", + "DELETING" + ] + }, + "OnlineEvaluationConfigSummary":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigArn", + "onlineEvaluationConfigId", + "onlineEvaluationConfigName", + "status", + "executionStatus", + "createdAt", + "updatedAt" + ], + "members":{ + "onlineEvaluationConfigArn":{ + "shape":"OnlineEvaluationConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the online evaluation configuration.

" + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the online evaluation configuration.

" + }, + "onlineEvaluationConfigName":{ + "shape":"EvaluationConfigName", + "documentation":"

The name of the online evaluation configuration.

" + }, + "description":{ + "shape":"EvaluationConfigDescription", + "documentation":"

The description of the online evaluation configuration.

" + }, + "status":{ + "shape":"OnlineEvaluationConfigStatus", + "documentation":"

The status of the online evaluation configuration.

" + }, + "executionStatus":{ + "shape":"OnlineEvaluationExecutionStatus", + "documentation":"

The execution status indicating whether the online evaluation is currently running.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was created.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was last updated.

" + }, + "failureReason":{ + "shape":"String", + "documentation":"

The reason for failure if the online evaluation configuration execution failed.

" + } + }, + "documentation":"

The summary information about an online evaluation configuration, including basic metadata and execution status.

" + }, + "OnlineEvaluationConfigSummaryList":{ + "type":"list", + "member":{"shape":"OnlineEvaluationConfigSummary"} + }, + "OnlineEvaluationExecutionStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "OutputConfig":{ + "type":"structure", + "required":["cloudWatchConfig"], + "members":{ + "cloudWatchConfig":{ + "shape":"CloudWatchOutputConfig", + "documentation":"

The CloudWatch configuration for writing evaluation results to CloudWatch logs with embedded metric format.

" + } + }, + "documentation":"

The configuration that specifies where evaluation results should be written for monitoring and analysis.

" + }, "OverrideType":{ "type":"string", "enum":[ "SEMANTIC_OVERRIDE", "SUMMARY_OVERRIDE", "USER_PREFERENCE_OVERRIDE", - "SELF_MANAGED" + "SELF_MANAGED", + "EPISODIC_OVERRIDE" + ] + }, + "Policies":{ + "type":"list", + "member":{"shape":"Policy"}, + "max":100, + "min":0 + }, + "Policy":{ + "type":"structure", + "required":[ + "policyId", + "name", + "policyEngineId", + "definition", + "createdAt", + "updatedAt", + "policyArn", + "status", + "statusReasons" + ], + "members":{ + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the policy. This system-generated identifier consists of the user name plus a 10-character generated suffix and serves as the primary key for policy operations.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The customer-assigned immutable name for the policy. This human-readable identifier must be unique within the account and cannot exceed 48 characters.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages this policy. This establishes the policy engine context for policy evaluation and management.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The Cedar policy statement that defines the access control rules. This contains the actual policy logic used for agent behavior control and access decisions.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A human-readable description of the policy's purpose and functionality. Limited to 4,096 characters, this helps administrators understand and manage the policy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was originally created. This is automatically set by the service and used for auditing and lifecycle management.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was last modified. This tracks the most recent changes to the policy configuration or metadata.

" + }, + "policyArn":{ + "shape":"PolicyArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyStatus", + "documentation":"

The current status of the policy.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy status. This provides details about any failures or the current state of the policy lifecycle.

" + } + }, + "documentation":"

Represents a complete policy resource within the AgentCore Policy system. Policies are ARN-able resources that contain Cedar policy statements and associated metadata for controlling agent behavior and access decisions. Each policy belongs to a policy engine and defines fine-grained authorization rules that are evaluated in real-time as agents interact with tools through Gateway. Policies use the Cedar policy language to specify who (principals based on OAuth claims like username, role, or scope) can perform what actions (tool calls) on which resources (Gateways), with optional conditions for attribute-based access control. Multiple policies can apply to a single request, with Cedar's forbid-wins semantics ensuring that security restrictions are never accidentally overridden.

" + }, + "PolicyArn":{ + "type":"string", + "max":203, + "min":96, + "pattern":"arn:aws[-a-z]{0,7}:bedrock-agentcore:[a-z0-9-]{9,15}:[0-9]{12}:policy-engine/[a-zA-Z][a-zA-Z0-9-_]{0,47}-[a-zA-Z0-9_]{10}/policy/[a-zA-Z][a-zA-Z0-9-_]{0,47}-[a-zA-Z0-9_]{10}" + }, + "PolicyDefinition":{ + "type":"structure", + "members":{ + "cedar":{ + "shape":"CedarPolicy", + "documentation":"

The Cedar policy definition within the policy definition structure. This contains the Cedar policy statement that defines the authorization logic using Cedar's human-readable, analyzable policy language. Cedar policies specify principals (who can access), actions (what operations are allowed), resources (what can be accessed), and optional conditions for fine-grained control. Cedar provides a formal policy language designed for authorization with deterministic evaluation, making policies testable, reviewable, and auditable. All Cedar policies follow a default-deny model where actions are denied unless explicitly permitted, and forbid policies always override permit policies.

" + } + }, + "documentation":"

Represents the definition structure for policies within the AgentCore Policy system. This structure encapsulates different policy formats and languages that can be used to define access control rules.

", + "union":true + }, + "PolicyEngine":{ + "type":"structure", + "required":[ + "policyEngineId", + "name", + "createdAt", + "updatedAt", + "policyEngineArn", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for the policy engine. This system-generated identifier consists of the user name plus a 10-character generated suffix and serves as the primary key for policy engine operations.

" + }, + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The customer-assigned immutable name for the policy engine. This human-readable identifier must be unique within the account and cannot exceed 48 characters.

" + }, + "description":{ + "shape":"Description", + "documentation":"

A human-readable description of the policy engine's purpose and scope. Limited to 4,096 characters, this helps administrators understand the policy engine's role in the overall governance strategy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was originally created. This is automatically set by the service and used for auditing and lifecycle management.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was last modified. This tracks the most recent changes to the policy engine configuration or metadata.

" + }, + "policyEngineArn":{ + "shape":"PolicyEngineArn", + "documentation":"

The Amazon Resource Name (ARN) of the policy engine. This globally unique identifier can be used for cross-service references and IAM policy statements.

" + }, + "status":{ + "shape":"PolicyEngineStatus", + "documentation":"

The current status of the policy engine.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the policy engine status. This provides details about any failures or the current state of the policy engine lifecycle.

" + } + }, + "documentation":"

Represents a policy engine resource within the AgentCore Policy system. Policy engines serve as containers for grouping related policies and provide the execution context for policy evaluation and management. Each policy engine can be associated with one Gateway (one engine per Gateway), where it intercepts all agent tool calls and evaluates them against the contained policies before allowing tools to execute. The policy engine maintains the Cedar schema generated from the Gateway's tool manifest, ensuring that policies are validated against the actual tools and parameters available. Policy engines support two enforcement modes that can be configured when associating with a Gateway: log-only mode for testing (evaluates decisions without blocking) and enforce mode for production (actively allows or denies based on policy evaluation).

" + }, + "PolicyEngineArn":{ + "type":"string", + "max":136, + "min":76, + "pattern":"arn:aws[-a-z]{0,7}:bedrock-agentcore:[a-z0-9-]{9,15}:[0-9]{12}:policy-engine/[a-zA-Z][a-zA-Z0-9-_]{0,47}-[a-zA-Z0-9_]{10}" + }, + "PolicyEngineName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[A-Za-z][A-Za-z0-9_]*" + }, + "PolicyEngineStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATE_FAILED", + "UPDATE_FAILED", + "DELETE_FAILED" + ] + }, + "PolicyEngines":{ + "type":"list", + "member":{"shape":"PolicyEngine"}, + "max":100, + "min":0 + }, + "PolicyGeneration":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyGenerationId", + "name", + "policyGenerationArn", + "resource", + "createdAt", + "updatedAt", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine associated with this generation request.

" + }, + "policyGenerationId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for this policy generation request.

" + }, + "name":{ + "shape":"PolicyGenerationName", + "documentation":"

The customer-assigned name for this policy generation request.

" + }, + "policyGenerationArn":{ + "shape":"PolicyGenerationArn", + "documentation":"

The ARN of this policy generation request.

" + }, + "resource":{ + "shape":"Resource", + "documentation":"

The resource information associated with this policy generation.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when this policy generation request was created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when this policy generation was last updated.

" + }, + "status":{ + "shape":"PolicyGenerationStatus", + "documentation":"

The current status of this policy generation request.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the generation status.

" + }, + "findings":{ + "shape":"String", + "documentation":"

Findings and insights from this policy generation process.

" + } + }, + "documentation":"

Represents a policy generation request within the AgentCore Policy system. Tracks the AI-powered conversion of natural language descriptions into Cedar policy statements, enabling users to author policies by describing authorization requirements in plain English. The generation process analyzes the natural language input along with the Gateway's tool context and Cedar schema to produce one or more validated policy options. Each generation request tracks the status of the conversion process and maintains findings about the generated policies, including validation results and potential issues. Generated policy assets remain available for one week after successful generation, allowing time to review and create policies from the generated options.

" + }, + "PolicyGenerationArn":{ + "type":"string", + "max":210, + "min":103, + "pattern":"arn:aws[-a-z]{0,7}:bedrock-agentcore:[a-z0-9-]{9,15}:[0-9]{12}:policy-engine/[a-zA-Z][a-zA-Z0-9-_]{0,47}-[a-zA-Z0-9_]{10}/policy-generation/[a-zA-Z][a-zA-Z0-9-_]{0,47}-[a-zA-Z0-9_]{10}" + }, + "PolicyGenerationAsset":{ + "type":"structure", + "required":[ + "policyGenerationAssetId", + "rawTextFragment", + "findings" + ], + "members":{ + "policyGenerationAssetId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier for this generated policy asset within the policy generation request. This ID can be used to reference specific generated policy options when creating actual policies from the generation results.

" + }, + "definition":{"shape":"PolicyDefinition"}, + "rawTextFragment":{ + "shape":"NaturalLanguage", + "documentation":"

The portion of the original natural language input that this generated policy asset addresses. This helps users understand which part of their policy description was translated into this specific Cedar policy statement, enabling better policy selection and refinement. When a single natural language input describes multiple authorization requirements, the generation process creates separate policy assets for each requirement, with each asset's rawTextFragment showing which requirement it addresses. Use this mapping to verify that all parts of your natural language input were correctly translated into Cedar policies.

" + }, + "findings":{ + "shape":"Findings", + "documentation":"

Analysis findings and insights related to this specific generated policy asset. These findings may include validation results, potential issues, or recommendations for improvement to help users evaluate the quality and appropriateness of the generated policy.

" + } + }, + "documentation":"

Represents a generated policy asset from the AI-powered policy generation process within the AgentCore Policy system. Each asset contains a Cedar policy statement generated from natural language input, along with associated metadata and analysis findings to help users evaluate and select the most appropriate policy option.

" + }, + "PolicyGenerationAssets":{ + "type":"list", + "member":{"shape":"PolicyGenerationAsset"} + }, + "PolicyGenerationName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[A-Za-z][A-Za-z0-9_]*" + }, + "PolicyGenerationStatus":{ + "type":"string", + "enum":[ + "GENERATING", + "GENERATED", + "GENERATE_FAILED", + "DELETE_FAILED" + ] + }, + "PolicyGenerations":{ + "type":"list", + "member":{"shape":"PolicyGeneration"}, + "max":100, + "min":0 + }, + "PolicyName":{ + "type":"string", + "max":48, + "min":1, + "pattern":"[A-Za-z][A-Za-z0-9_]*" + }, + "PolicyStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATE_FAILED", + "UPDATE_FAILED", + "DELETE_FAILED" + ] + }, + "PolicyStatusReasons":{ + "type":"list", + "member":{"shape":"String"} + }, + "PolicyValidationMode":{ + "type":"string", + "enum":[ + "FAIL_ON_ANY_FINDINGS", + "IGNORE_ALL_FINDINGS" ] }, "Prompt":{ @@ -5573,7 +8724,51 @@ "documentation":"

The server protocol for the agent runtime. This field specifies which protocol the agent runtime uses to communicate with clients.

" } }, - "documentation":"

The protocol configuration for an agent runtime. This structure defines how the agent runtime communicates with clients.

" + "documentation":"

The protocol configuration for an agent runtime. This structure defines how the agent runtime communicates with clients.

" + }, + "PutResourcePolicyRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "policy" + ], + "members":{ + "resourceArn":{ + "shape":"BedrockAgentcoreResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource for which to create or update the resource policy.

", + "location":"uri", + "locationName":"resourceArn" + }, + "policy":{ + "shape":"ResourcePolicyBody", + "documentation":"

The resource policy to create or update.

" + } + } + }, + "PutResourcePolicyResponse":{ + "type":"structure", + "required":["policy"], + "members":{ + "policy":{ + "shape":"ResourcePolicyBody", + "documentation":"

The resource policy that was created or updated.

" + } + } + }, + "RatingScale":{ + "type":"structure", + "members":{ + "numerical":{ + "shape":"NumericalScaleDefinitions", + "documentation":"

The numerical rating scale with defined score values and descriptions for quantitative evaluation.

" + }, + "categorical":{ + "shape":"CategoricalScaleDefinitions", + "documentation":"

The categorical rating scale with named categories and definitions for qualitative evaluation.

" + } + }, + "documentation":"

The rating scale that defines how evaluators should score agent performance, supporting both numerical and categorical scales.

", + "union":true }, "RecordingConfig":{ "type":"structure", @@ -5589,6 +8784,21 @@ }, "documentation":"

The recording configuration for a browser. This structure defines how browser sessions are recorded.

" }, + "ReflectionConfiguration":{ + "type":"structure", + "members":{ + "customReflectionConfiguration":{ + "shape":"CustomReflectionConfiguration", + "documentation":"

The configuration for a custom reflection strategy.

" + }, + "episodicReflectionConfiguration":{ + "shape":"EpisodicReflectionConfiguration", + "documentation":"

The configuration for the episodic reflection strategy.

" + } + }, + "documentation":"

Contains reflection configuration information for a memory strategy.

", + "union":true + }, "RequestHeaderAllowlist":{ "type":"list", "member":{"shape":"HeaderName"}, @@ -5610,6 +8820,23 @@ "type":"list", "member":{"shape":"String"} }, + "Resource":{ + "type":"structure", + "members":{ + "arn":{ + "shape":"BedrockAgentcoreResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the resource. This globally unique identifier specifies the exact resource that policies will be evaluated against for access control decisions.

" + } + }, + "documentation":"

Represents a resource within the AgentCore Policy system. Resources are the targets of policy evaluation. Currently, only AgentCore Gateways are supported as resources for policy enforcement.

", + "union":true + }, + "ResourceId":{ + "type":"string", + "max":59, + "min":12, + "pattern":"[A-Za-z][A-Za-z0-9_]*-[a-z0-9_]{10}" + }, "ResourceLimitExceededException":{ "type":"structure", "members":{ @@ -5644,6 +8871,11 @@ "min":1, "pattern":"\\w+:(\\/?\\/?)[^\\s]+" }, + "ResourcePolicyBody":{ + "type":"string", + "max":20480, + "min":1 + }, "ResourceType":{ "type":"string", "enum":[ @@ -5656,12 +8888,47 @@ "member":{"shape":"ResponseType"} }, "ResponseType":{"type":"string"}, + "RestApiMethod":{ + "type":"string", + "enum":[ + "GET", + "DELETE", + "HEAD", + "OPTIONS", + "PATCH", + "PUT", + "POST" + ] + }, + "RestApiMethods":{ + "type":"list", + "member":{"shape":"RestApiMethod"} + }, "RoleArn":{ "type":"string", "max":2048, "min":1, "pattern":"arn:aws(-[^:]+)?:iam::([0-9]{12})?:role/.+" }, + "Rule":{ + "type":"structure", + "required":["samplingConfig"], + "members":{ + "samplingConfig":{ + "shape":"SamplingConfig", + "documentation":"

The sampling configuration that determines what percentage of agent traces to evaluate.

" + }, + "filters":{ + "shape":"FilterList", + "documentation":"

The list of filters that determine which agent traces should be included in the evaluation based on trace properties.

" + }, + "sessionConfig":{ + "shape":"SessionConfig", + "documentation":"

The session configuration that defines timeout settings for detecting when agent sessions are complete and ready for evaluation.

" + } + }, + "documentation":"

The evaluation rule that defines sampling configuration, filtering criteria, and session detection settings for online evaluation.

" + }, "RuntimeContainerUri":{ "type":"string", "max":1024, @@ -5755,6 +9022,23 @@ }, "documentation":"

Output configuration for a Salesforce OAuth2 provider.

" }, + "SamplingConfig":{ + "type":"structure", + "required":["samplingPercentage"], + "members":{ + "samplingPercentage":{ + "shape":"SamplingConfigSamplingPercentageDouble", + "documentation":"

The percentage of agent traces to sample for evaluation, ranging from 0.01% to 100%.

" + } + }, + "documentation":"

The configuration that controls what percentage of agent traces are sampled for evaluation to manage evaluation volume and costs.

" + }, + "SamplingConfigSamplingPercentageDouble":{ + "type":"double", + "box":true, + "max":100.0, + "min":0.01 + }, "SandboxName":{ "type":"string", "pattern":"[a-zA-Z][a-zA-Z0-9_]{0,47}" @@ -6003,6 +9287,12 @@ "fault":true, "retryable":{"throttling":false} }, + "ServiceName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9._-]+" + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ @@ -6015,6 +9305,23 @@ }, "exception":true }, + "SessionConfig":{ + "type":"structure", + "required":["sessionTimeoutMinutes"], + "members":{ + "sessionTimeoutMinutes":{ + "shape":"SessionConfigSessionTimeoutMinutesInteger", + "documentation":"

The number of minutes of inactivity after which an agent session is considered complete and ready for evaluation. Default is 15 minutes.

" + } + }, + "documentation":"

The configuration that defines how agent sessions are detected and when they are considered complete for evaluation.

" + }, + "SessionConfigSessionTimeoutMinutesInteger":{ + "type":"integer", + "box":true, + "max":1440, + "min":1 + }, "SetTokenVaultCMKRequest":{ "type":"structure", "required":["kmsConfiguration"], @@ -6084,6 +9391,101 @@ }, "documentation":"

Output configuration for a Slack OAuth2 provider.

" }, + "StartPolicyGenerationRequest":{ + "type":"structure", + "required":[ + "policyEngineId", + "resource", + "content", + "name" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that provides the context for policy generation. This engine's schema and tool context are used to ensure generated policies are valid and applicable.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "resource":{ + "shape":"Resource", + "documentation":"

The resource information that provides context for policy generation. This helps the AI understand the target resources and generate appropriate access control rules.

" + }, + "content":{ + "shape":"Content", + "documentation":"

The natural language description of the desired policy behavior. This content is processed by AI to generate corresponding Cedar policy statements that match the described intent.

" + }, + "name":{ + "shape":"PolicyGenerationName", + "documentation":"

A customer-assigned name for the policy generation request. This helps track and identify generation operations, especially when running multiple generations simultaneously.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure the idempotency of the request. The AWS SDK automatically generates this token, so you don't need to provide it in most cases. If you retry a request with the same client token, the service returns the same response without starting a duplicate generation.

", + "idempotencyToken":true + } + } + }, + "StartPolicyGenerationResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyGenerationId", + "name", + "policyGenerationArn", + "resource", + "createdAt", + "updatedAt", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine associated with the started policy generation.

" + }, + "policyGenerationId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier assigned to the policy generation request for tracking progress.

" + }, + "name":{ + "shape":"PolicyGenerationName", + "documentation":"

The customer-assigned name for the policy generation request.

" + }, + "policyGenerationArn":{ + "shape":"PolicyGenerationArn", + "documentation":"

The ARN of the created policy generation request.

" + }, + "resource":{ + "shape":"Resource", + "documentation":"

The resource information associated with the policy generation request.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy generation request was created.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy generation was last updated.

" + }, + "status":{ + "shape":"PolicyGenerationStatus", + "documentation":"

The initial status of the policy generation request.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the generation status.

" + }, + "findings":{ + "shape":"String", + "documentation":"

Initial findings from the policy generation process.

" + } + } + }, + "Statement":{ + "type":"string", + "max":153600, + "min":35 + }, "StatusReason":{ "type":"string", "max":2048, @@ -6110,6 +9512,10 @@ "shape":"ConsolidationConfiguration", "documentation":"

The consolidation configuration for the memory strategy.

" }, + "reflection":{ + "shape":"ReflectionConfiguration", + "documentation":"

The reflection configuration for the memory strategy.

" + }, "selfManagedConfiguration":{ "shape":"SelfManagedConfiguration", "documentation":"

Self-managed configuration settings.

" @@ -6823,6 +10229,62 @@ } } }, + "UpdateEvaluatorRequest":{ + "type":"structure", + "required":["evaluatorId"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If you don't specify this field, a value is randomly generated for you. If this token matches a previous request, the service ignores the request, but doesn't return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator to update.

", + "location":"uri", + "locationName":"evaluatorId" + }, + "description":{ + "shape":"EvaluatorDescription", + "documentation":"

The updated description of the evaluator.

" + }, + "evaluatorConfig":{ + "shape":"EvaluatorConfig", + "documentation":"

The updated configuration for the evaluator, including LLM-as-a-Judge settings with instructions, rating scale, and model configuration.

" + }, + "level":{ + "shape":"EvaluatorLevel", + "documentation":"

The updated evaluation level (TOOL_CALL, TRACE, or SESSION) that determines the scope of evaluation.

" + } + } + }, + "UpdateEvaluatorResponse":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "updatedAt", + "status" + ], + "members":{ + "evaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated evaluator.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the updated evaluator.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the evaluator was last updated.

" + }, + "status":{ + "shape":"EvaluatorStatus", + "documentation":"

The status of the evaluator update operation.

" + } + } + }, "UpdateGatewayRequest":{ "type":"structure", "required":[ @@ -6872,6 +10334,10 @@ "shape":"GatewayInterceptorConfigurations", "documentation":"

The updated interceptor configurations for the gateway.

" }, + "policyEngineConfiguration":{ + "shape":"GatewayPolicyEngineConfiguration", + "documentation":"

The updated policy engine configuration for the gateway. A policy engine is a collection of policies that evaluates and authorizes agent tool calls. When associated with a gateway, the policy engine intercepts all agent requests and determines whether to allow or deny each action based on the defined policies.

" + }, "exceptionLevel":{ "shape":"ExceptionLevel", "documentation":"

The level of detail in error messages returned when invoking the gateway.

" @@ -6952,6 +10418,10 @@ "shape":"GatewayInterceptorConfigurations", "documentation":"

The updated interceptor configurations for the gateway.

" }, + "policyEngineConfiguration":{ + "shape":"GatewayPolicyEngineConfiguration", + "documentation":"

The updated policy engine configuration for the gateway.

" + }, "workloadIdentityDetails":{ "shape":"WorkloadIdentityDetails", "documentation":"

The workload identity details for the updated gateway.

" @@ -6995,6 +10465,10 @@ "credentialProviderConfigurations":{ "shape":"CredentialProviderConfigurations", "documentation":"

The updated credential provider configurations for the gateway target.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

Configuration for HTTP header and query parameter propagation to the gateway target.

" } } }, @@ -7051,6 +10525,10 @@ "lastSynchronizedAt":{ "shape":"DateTimestamp", "documentation":"

The date and time at which the targets were last synchronized.

" + }, + "metadataConfiguration":{ + "shape":"MetadataConfiguration", + "documentation":"

The metadata configuration that was applied to the gateway target.

" } } }, @@ -7096,7 +10574,7 @@ "type":"integer", "box":true, "max":365, - "min":7 + "min":3 }, "UpdateMemoryOutput":{ "type":"structure", @@ -7175,6 +10653,235 @@ } } }, + "UpdateOnlineEvaluationConfigRequest":{ + "type":"structure", + "required":["onlineEvaluationConfigId"], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure that the API request completes no more than one time. If you don't specify this field, a value is randomly generated for you. If this token matches a previous request, the service ignores the request, but doesn't return an error. For more information, see Ensuring idempotency.

", + "idempotencyToken":true + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the online evaluation configuration to update.

", + "location":"uri", + "locationName":"onlineEvaluationConfigId" + }, + "description":{ + "shape":"EvaluationConfigDescription", + "documentation":"

The updated description of the online evaluation configuration.

" + }, + "rule":{ + "shape":"Rule", + "documentation":"

The updated evaluation rule containing sampling configuration, filters, and session settings.

" + }, + "dataSourceConfig":{ + "shape":"DataSourceConfig", + "documentation":"

The updated data source configuration specifying CloudWatch log groups and service names to monitor.

" + }, + "evaluators":{ + "shape":"EvaluatorList", + "documentation":"

The updated list of evaluators to apply during online evaluation.

" + }, + "evaluationExecutionRoleArn":{ + "shape":"RoleArn", + "documentation":"

The updated Amazon Resource Name (ARN) of the IAM role used for evaluation execution.

" + }, + "executionStatus":{ + "shape":"OnlineEvaluationExecutionStatus", + "documentation":"

The updated execution status to enable or disable the online evaluation.

" + } + } + }, + "UpdateOnlineEvaluationConfigResponse":{ + "type":"structure", + "required":[ + "onlineEvaluationConfigArn", + "onlineEvaluationConfigId", + "updatedAt", + "status", + "executionStatus" + ], + "members":{ + "onlineEvaluationConfigArn":{ + "shape":"OnlineEvaluationConfigArn", + "documentation":"

The Amazon Resource Name (ARN) of the updated online evaluation configuration.

" + }, + "onlineEvaluationConfigId":{ + "shape":"OnlineEvaluationConfigId", + "documentation":"

The unique identifier of the updated online evaluation configuration.

" + }, + "updatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the online evaluation configuration was last updated.

" + }, + "status":{ + "shape":"OnlineEvaluationConfigStatus", + "documentation":"

The status of the online evaluation configuration.

" + }, + "executionStatus":{ + "shape":"OnlineEvaluationExecutionStatus", + "documentation":"

The execution status indicating whether the online evaluation is currently running.

" + }, + "failureReason":{ + "shape":"String", + "documentation":"

The reason for failure if the online evaluation configuration update or execution failed.

" + } + } + }, + "UpdatePolicyEngineRequest":{ + "type":"structure", + "required":["policyEngineId"], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy engine to be updated.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "description":{ + "shape":"Description", + "documentation":"

The new description for the policy engine.

" + } + } + }, + "UpdatePolicyEngineResponse":{ + "type":"structure", + "required":[ + "policyEngineId", + "name", + "createdAt", + "updatedAt", + "policyEngineArn", + "status", + "statusReasons" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the updated policy engine.

" + }, + "name":{ + "shape":"PolicyEngineName", + "documentation":"

The name of the updated policy engine.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The updated description of the policy engine.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The original creation timestamp of the policy engine.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy engine was last updated.

" + }, + "policyEngineArn":{ + "shape":"PolicyEngineArn", + "documentation":"

The ARN of the updated policy engine.

" + }, + "status":{ + "shape":"PolicyEngineStatus", + "documentation":"

The current status of the updated policy engine.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the update status.

" + } + } + }, + "UpdatePolicyRequest":{ + "type":"structure", + "required":[ + "policyEngineId", + "policyId", + "definition" + ], + "members":{ + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine that manages the policy to be updated. This ensures the policy is updated within the correct policy engine context.

", + "location":"uri", + "locationName":"policyEngineId" + }, + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the policy to be updated. This must be a valid policy ID that exists within the specified policy engine.

", + "location":"uri", + "locationName":"policyId" + }, + "description":{ + "shape":"Description", + "documentation":"

The new human-readable description for the policy. This optional field allows updating the policy's documentation while keeping the same policy logic.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The new Cedar policy statement that defines the access control rules. This replaces the existing policy definition with new logic while maintaining the policy's identity.

" + }, + "validationMode":{ + "shape":"PolicyValidationMode", + "documentation":"

The validation mode for the policy update. Determines how Cedar analyzer validation results are handled during policy updates. FAIL_ON_ANY_FINDINGS runs the Cedar analyzer and fails the update if validation issues are detected, ensuring the policy conforms to the Cedar schema and tool context. IGNORE_ALL_FINDINGS runs the Cedar analyzer but allows updates despite validation warnings. Use FAIL_ON_ANY_FINDINGS to ensure policy correctness during updates, especially when modifying policy logic or conditions.

" + } + } + }, + "UpdatePolicyResponse":{ + "type":"structure", + "required":[ + "policyId", + "name", + "policyEngineId", + "definition", + "createdAt", + "updatedAt", + "policyArn", + "status", + "statusReasons" + ], + "members":{ + "policyId":{ + "shape":"ResourceId", + "documentation":"

The unique identifier of the updated policy.

" + }, + "name":{ + "shape":"PolicyName", + "documentation":"

The name of the updated policy.

" + }, + "policyEngineId":{ + "shape":"ResourceId", + "documentation":"

The identifier of the policy engine managing the updated policy.

" + }, + "definition":{ + "shape":"PolicyDefinition", + "documentation":"

The updated Cedar policy statement.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The updated description of the policy.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The original creation timestamp of the policy.

" + }, + "updatedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the policy was last updated.

" + }, + "policyArn":{ + "shape":"PolicyArn", + "documentation":"

The ARN of the updated policy.

" + }, + "status":{ + "shape":"PolicyStatus", + "documentation":"

The current status of the updated policy.

" + }, + "statusReasons":{ + "shape":"PolicyStatusReasons", + "documentation":"

Additional information about the update status.

" + } + } + }, "UpdateWorkloadIdentityRequest":{ "type":"structure", "required":["name"], diff --git a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/waiters-2.json b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/waiters-2.json index f4e5da6f546d..f6b282129c39 100644 --- a/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/waiters-2.json +++ b/awscli/botocore/data/bedrock-agentcore-control/2023-06-05/waiters-2.json @@ -21,6 +21,129 @@ "state" : "failure", "expected" : "FAILED" } ] + }, + "PolicyActive" : { + "description" : "Wait until a Policy is active", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetPolicy", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CREATE_FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "UPDATE_FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "DELETE_FAILED" + } ] + }, + "PolicyDeleted" : { + "description" : "Wait until a Policy is deleted", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetPolicy", + "acceptors" : [ { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "DELETE_FAILED" + } ] + }, + "PolicyEngineActive" : { + "description" : "Wait until a PolicyEngine is active", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetPolicyEngine", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "CREATE_FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "UPDATE_FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "DELETE_FAILED" + } ] + }, + "PolicyEngineDeleted" : { + "description" : "Wait until a PolicyEngine is deleted", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetPolicyEngine", + "acceptors" : [ { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "DELETING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "DELETE_FAILED" + } ] + }, + "PolicyGenerationCompleted" : { + "description" : "Wait until policy generation is completed", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "GetPolicyGeneration", + "acceptors" : [ { + "matcher" : "path", + "argument" : "status", + "state" : "success", + "expected" : "GENERATED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "retry", + "expected" : "GENERATING" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "GENERATE_FAILED" + }, { + "matcher" : "path", + "argument" : "status", + "state" : "failure", + "expected" : "DELETE_FAILED" + } ] } } } \ No newline at end of file diff --git a/awscli/botocore/data/bedrock-agentcore/2024-02-28/service-2.json b/awscli/botocore/data/bedrock-agentcore/2024-02-28/service-2.json index 268258213f00..d25e3240267b 100644 --- a/awscli/botocore/data/bedrock-agentcore/2024-02-28/service-2.json +++ b/awscli/botocore/data/bedrock-agentcore/2024-02-28/service-2.json @@ -151,6 +151,28 @@ ], "documentation":"

Deletes a memory record from an AgentCore Memory resource. When you delete a memory record, it is permanently removed.

To use this operation, you must have the bedrock-agentcore:DeleteMemoryRecord permission.

" }, + "Evaluate":{ + "name":"Evaluate", + "http":{ + "method":"POST", + "requestUri":"/evaluations/evaluate/{evaluatorId}", + "responseCode":200 + }, + "input":{"shape":"EvaluateRequest"}, + "output":{"shape":"EvaluateResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"UnauthorizedException"}, + {"shape":"DuplicateIdException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Performs on-demand evaluation of agent traces using a specified evaluator. This synchronous API accepts traces in OpenTelemetry format and returns immediate scoring results with detailed explanations.

" + }, "GetAgentCard":{ "name":"GetAgentCard", "http":{ @@ -187,7 +209,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves detailed information about a specific browser session in Amazon Bedrock. This operation returns the session's configuration, current status, associated streams, and metadata.

To get a browser session, you must specify both the browser identifier and the session ID. The response includes information about the session's viewport configuration, timeout settings, and stream endpoints.

The following operations are related to GetBrowserSession:

", + "documentation":"

Retrieves detailed information about a specific browser session in Amazon Bedrock. This operation returns the session's configuration, current status, associated streams, and metadata.

To get a browser session, you must specify both the browser identifier and the session ID. The response includes information about the session's viewport configuration, timeout settings, and stream endpoints.

The following operations are related to GetBrowserSession:

", "readonly":true }, "GetCodeInterpreterSession":{ @@ -206,7 +228,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves detailed information about a specific code interpreter session in Amazon Bedrock. This operation returns the session's configuration, current status, and metadata.

To get a code interpreter session, you must specify both the code interpreter identifier and the session ID. The response includes information about the session's timeout settings and current status.

The following operations are related to GetCodeInterpreterSession:

", + "documentation":"

Retrieves detailed information about a specific code interpreter session in Amazon Bedrock. This operation returns the session's configuration, current status, and metadata.

To get a code interpreter session, you must specify both the code interpreter identifier and the session ID. The response includes information about the session's timeout settings and current status.

The following operations are related to GetCodeInterpreterSession:

", "readonly":true }, "GetEvent":{ @@ -386,7 +408,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Executes code within an active code interpreter session in Amazon Bedrock. This operation processes the provided code, runs it in a secure environment, and returns the execution results including output, errors, and generated visualizations.

To execute code, you must specify the code interpreter identifier, session ID, and the code to run in the arguments parameter. The operation returns a stream containing the execution results, which can include text output, error messages, and data visualizations.

This operation is subject to request rate limiting based on your account's service quotas.

The following operations are related to InvokeCodeInterpreter:

" + "documentation":"

Executes code within an active code interpreter session in Amazon Bedrock. This operation processes the provided code, runs it in a secure environment, and returns the execution results including output, errors, and generated visualizations.

To execute code, you must specify the code interpreter identifier, session ID, and the code to run in the arguments parameter. The operation returns a stream containing the execution results, which can include text output, error messages, and data visualizations.

This operation is subject to request rate limiting based on your account's service quotas.

The following operations are related to InvokeCodeInterpreter:

" }, "ListActors":{ "name":"ListActors", @@ -425,7 +447,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves a list of browser sessions in Amazon Bedrock that match the specified criteria. This operation returns summary information about each session, including identifiers, status, and timestamps.

You can filter the results by browser identifier and session status. The operation supports pagination to handle large result sets efficiently.

We recommend using pagination to ensure that the operation returns quickly and successfully when retrieving large numbers of sessions.

The following operations are related to ListBrowserSessions:

", + "documentation":"

Retrieves a list of browser sessions in Amazon Bedrock that match the specified criteria. This operation returns summary information about each session, including identifiers, status, and timestamps.

You can filter the results by browser identifier and session status. The operation supports pagination to handle large result sets efficiently.

We recommend using pagination to ensure that the operation returns quickly and successfully when retrieving large numbers of sessions.

The following operations are related to ListBrowserSessions:

", "readonly":true }, "ListCodeInterpreterSessions":{ @@ -444,7 +466,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves a list of code interpreter sessions in Amazon Bedrock that match the specified criteria. This operation returns summary information about each session, including identifiers, status, and timestamps.

You can filter the results by code interpreter identifier and session status. The operation supports pagination to handle large result sets efficiently.

We recommend using pagination to ensure that the operation returns quickly and successfully when retrieving large numbers of sessions.

The following operations are related to ListCodeInterpreterSessions:

", + "documentation":"

Retrieves a list of code interpreter sessions in Amazon Bedrock that match the specified criteria. This operation returns summary information about each session, including identifiers, status, and timestamps.

You can filter the results by code interpreter identifier and session status. The operation supports pagination to handle large result sets efficiently.

We recommend using pagination to ensure that the operation returns quickly and successfully when retrieving large numbers of sessions.

The following operations are related to ListCodeInterpreterSessions:

", "readonly":true }, "ListEvents":{ @@ -567,7 +589,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates and initializes a browser session in Amazon Bedrock. The session enables agents to navigate and interact with web content, extract information from websites, and perform web-based tasks as part of their response generation.

To create a session, you must specify a browser identifier and a name. You can also configure the viewport dimensions to control the visible area of web content. The session remains active until it times out or you explicitly stop it using the StopBrowserSession operation.

The following operations are related to StartBrowserSession:

", + "documentation":"

Creates and initializes a browser session in Amazon Bedrock. The session enables agents to navigate and interact with web content, extract information from websites, and perform web-based tasks as part of their response generation.

To create a session, you must specify a browser identifier and a name. You can also configure the viewport dimensions to control the visible area of web content. The session remains active until it times out or you explicitly stop it using the StopBrowserSession operation.

The following operations are related to StartBrowserSession:

", "idempotent":true }, "StartCodeInterpreterSession":{ @@ -588,7 +610,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates and initializes a code interpreter session in Amazon Bedrock. The session enables agents to execute code as part of their response generation, supporting programming languages such as Python for data analysis, visualization, and computation tasks.

To create a session, you must specify a code interpreter identifier and a name. The session remains active until it times out or you explicitly stop it using the StopCodeInterpreterSession operation.

The following operations are related to StartCodeInterpreterSession:

", + "documentation":"

Creates and initializes a code interpreter session in Amazon Bedrock. The session enables agents to execute code as part of their response generation, supporting programming languages such as Python for data analysis, visualization, and computation tasks.

To create a session, you must specify a code interpreter identifier and a name. The session remains active until it times out or you explicitly stop it using the StopCodeInterpreterSession operation.

The following operations are related to StartCodeInterpreterSession:

", "idempotent":true }, "StartMemoryExtractionJob":{ @@ -629,7 +651,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Terminates an active browser session in Amazon Bedrock. This operation stops the session, releases associated resources, and makes the session unavailable for further use.

To stop a browser session, you must specify both the browser identifier and the session ID. Once stopped, a session cannot be restarted; you must create a new session using StartBrowserSession.

The following operations are related to StopBrowserSession:

", + "documentation":"

Terminates an active browser session in Amazon Bedrock. This operation stops the session, releases associated resources, and makes the session unavailable for further use.

To stop a browser session, you must specify both the browser identifier and the session ID. Once stopped, a session cannot be restarted; you must create a new session using StartBrowserSession.

The following operations are related to StopBrowserSession:

", "idempotent":true }, "StopCodeInterpreterSession":{ @@ -650,7 +672,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Terminates an active code interpreter session in Amazon Bedrock. This operation stops the session, releases associated resources, and makes the session unavailable for further use.

To stop a code interpreter session, you must specify both the code interpreter identifier and the session ID. Once stopped, a session cannot be restarted; you must create a new session using StartCodeInterpreterSession.

The following operations are related to StopCodeInterpreterSession:

", + "documentation":"

Terminates an active code interpreter session in Amazon Bedrock. This operation stops the session, releases associated resources, and makes the session unavailable for further use.

To stop a code interpreter session, you must specify both the code interpreter identifier and the session ID. Once stopped, a session cannot be restarted; you must create a new session using StartCodeInterpreterSession.

The following operations are related to StopCodeInterpreterSession:

", "idempotent":true }, "StopRuntimeSession":{ @@ -1230,6 +1252,17 @@ "min":1, "sensitive":true }, + "Context":{ + "type":"structure", + "members":{ + "spanContext":{ + "shape":"SpanContext", + "documentation":"

The span context information that uniquely identifies the trace and span being evaluated, including session ID, trace ID, and span ID for precise targeting within the agent's execution flow.

" + } + }, + "documentation":"

The contextual information associated with an evaluation, including span context details that identify the specific traces and sessions being evaluated within the agent's execution flow.

", + "union":true + }, "Conversational":{ "type":"structure", "required":[ @@ -1417,6 +1450,161 @@ "type":"double", "box":true }, + "DuplicateIdException":{ + "type":"structure", + "members":{ + "message":{"shape":"NonBlankString"} + }, + "documentation":"

An exception thrown when attempting to create a resource with an identifier that already exists.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "EvaluateRequest":{ + "type":"structure", + "required":[ + "evaluatorId", + "evaluationInput" + ], + "members":{ + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator to use for scoring. Can be a built-in evaluator (e.g., Builtin.Helpfulness, Builtin.Correctness) or a custom evaluator ARN created through the control plane API.

", + "location":"uri", + "locationName":"evaluatorId" + }, + "evaluationInput":{ + "shape":"EvaluationInput", + "documentation":"

The input data containing agent session spans to be evaluated. Includes a list of spans in OpenTelemetry format from supported frameworks like Strands (AgentCore Runtime) or LangGraph with OpenInference instrumentation.

" + }, + "evaluationTarget":{ + "shape":"EvaluationTarget", + "documentation":"

The specific trace or span IDs to evaluate within the provided input. Allows targeting evaluation at different levels: individual tool calls, single request-response interactions (traces), or entire conversation sessions.

" + } + } + }, + "EvaluateResponse":{ + "type":"structure", + "required":["evaluationResults"], + "members":{ + "evaluationResults":{ + "shape":"EvaluationResults", + "documentation":"

The detailed evaluation results containing scores, explanations, and metadata. Includes the evaluator information, numerical or categorical ratings based on the evaluator's rating scale, and token usage statistics for the evaluation process.

" + } + } + }, + "EvaluationErrorCode":{ + "type":"string", + "max":1024, + "min":1 + }, + "EvaluationErrorMessage":{ + "type":"string", + "max":2048, + "min":0 + }, + "EvaluationExplanation":{ + "type":"string", + "max":2048, + "min":0, + "sensitive":true + }, + "EvaluationInput":{ + "type":"structure", + "members":{ + "sessionSpans":{ + "shape":"Spans", + "documentation":"

The collection of spans representing agent execution traces within a session. Each span contains detailed information about tool calls, model interactions, and other agent activities that can be evaluated for quality and performance.

" + } + }, + "documentation":"

The input data structure containing agent session spans in OpenTelemetry format. Supports traces from frameworks like Strands (AgentCore Runtime) and LangGraph with OpenInference instrumentation for comprehensive evaluation.

", + "union":true + }, + "EvaluationResultContent":{ + "type":"structure", + "required":[ + "evaluatorArn", + "evaluatorId", + "evaluatorName", + "context" + ], + "members":{ + "evaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The Amazon Resource Name (ARN) of the evaluator used to generate this result. For custom evaluators, this is the full ARN; for built-in evaluators, this follows the pattern Builtin.{EvaluatorName}.

" + }, + "evaluatorId":{ + "shape":"EvaluatorId", + "documentation":"

The unique identifier of the evaluator that produced this result. This matches the evaluatorId provided in the evaluation request and can be used to identify which evaluator generated specific results.

" + }, + "evaluatorName":{ + "shape":"EvaluatorName", + "documentation":"

The human-readable name of the evaluator used for this evaluation. For built-in evaluators, this is the descriptive name (e.g., \"Helpfulness\", \"Correctness\"); for custom evaluators, this is the user-defined name.

" + }, + "explanation":{ + "shape":"EvaluationExplanation", + "documentation":"

The detailed explanation provided by the evaluator describing the reasoning behind the assigned score. This qualitative feedback helps understand why specific ratings were given and provides actionable insights for improvement.

" + }, + "context":{ + "shape":"Context", + "documentation":"

The contextual information associated with this evaluation result, including span context details that identify the specific traces and sessions that were evaluated.

" + }, + "value":{ + "shape":"Double", + "documentation":"

The numerical score assigned by the evaluator according to its configured rating scale. For numerical scales, this is a decimal value within the defined range. This field is not allowed for categorical scales.

" + }, + "label":{ + "shape":"String", + "documentation":"

The categorical label assigned by the evaluator when using a categorical rating scale. This provides a human-readable description of the evaluation result (e.g., \"Excellent\", \"Good\", \"Poor\") corresponding to the numerical value. For numerical scales, this field is optional and provides a natural language explanation of what the value means (e.g., value 0.5 = \"Somewhat Helpful\").

" + }, + "tokenUsage":{ + "shape":"TokenUsage", + "documentation":"

The token consumption statistics for this evaluation, including input tokens, output tokens, and total tokens used by the underlying language model during the evaluation process.

" + }, + "errorMessage":{ + "shape":"EvaluationErrorMessage", + "documentation":"

The error message describing what went wrong if the evaluation failed. Provides detailed information about evaluation failures to help diagnose and resolve issues with evaluator configuration or input data.

" + }, + "errorCode":{ + "shape":"EvaluationErrorCode", + "documentation":"

The error code indicating the type of failure that occurred during evaluation. Used to programmatically identify and handle different categories of evaluation errors.

" + } + }, + "documentation":"

The comprehensive result of an evaluation containing the score, explanation, evaluator metadata, and execution details. Provides both quantitative ratings and qualitative insights about agent performance.

" + }, + "EvaluationResults":{ + "type":"list", + "member":{"shape":"EvaluationResultContent"} + }, + "EvaluationTarget":{ + "type":"structure", + "members":{ + "spanIds":{ + "shape":"SpanIds", + "documentation":"

The list of specific span IDs to evaluate within the provided traces. Used to target evaluation at individual tool calls or specific operations within the agent's execution flow.

" + }, + "traceIds":{ + "shape":"TraceIds", + "documentation":"

The list of trace IDs to evaluate, representing complete request-response interactions. Used to evaluate entire conversation turns or specific agent interactions within a session.

" + } + }, + "documentation":"

The specification of which trace or span IDs to evaluate within the provided input data. Allows precise targeting of evaluation at different levels: tool calls, traces, or sessions.

", + "union":true + }, + "EvaluatorArn":{ + "type":"string", + "pattern":"arn:aws:bedrock-agentcore:[a-z0-9-]+:[0-9]{12}:evaluator\\/[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}$|^arn:aws:bedrock-agentcore:::evaluator/Builtin.[a-zA-Z0-9_-]+" + }, + "EvaluatorId":{ + "type":"string", + "pattern":"(Builtin.[a-zA-Z0-9_-]+|[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10})" + }, + "EvaluatorName":{ + "type":"string", + "pattern":"(Builtin.[a-zA-Z0-9_-]+|[a-zA-Z][a-zA-Z0-9_]{0,47})" + }, "Event":{ "type":"structure", "required":[ @@ -2679,6 +2867,28 @@ "min":12, "pattern":"[a-zA-Z][a-zA-Z0-9-_]{0,99}-[a-zA-Z0-9]{10}" }, + "MemoryMetadataFilterExpression":{ + "type":"structure", + "required":[ + "left", + "operator" + ], + "members":{ + "left":{"shape":"LeftExpression"}, + "operator":{ + "shape":"OperatorType", + "documentation":"

The relationship between the metadata key and value to match when applying the metadata filter.

" + }, + "right":{"shape":"RightExpression"} + }, + "documentation":"

Filters to apply to metadata associated with a memory. Specify the metadata key and value in the left and right fields and use the operator field to define the relationship to match.

" + }, + "MemoryMetadataFilterList":{ + "type":"list", + "member":{"shape":"MemoryMetadataFilterExpression"}, + "max":1, + "min":1 + }, "MemoryRecord":{ "type":"structure", "required":[ @@ -2708,6 +2918,10 @@ "createdAt":{ "shape":"Timestamp", "documentation":"

The timestamp when the memory record was created.

" + }, + "metadata":{ + "shape":"MetadataMap", + "documentation":"

A map of metadata key-value pairs associated with a memory record.

" } }, "documentation":"

Contains information about a memory record in an AgentCore Memory resource.

" @@ -2831,6 +3045,10 @@ "score":{ "shape":"Double", "documentation":"

The relevance score of the memory record when returned as part of a search result. Higher values indicate greater relevance to the search query.

" + }, + "metadata":{ + "shape":"MetadataMap", + "documentation":"

A map of metadata key-value pairs associated with a memory record.

" } }, "documentation":"

Contains summary information about a memory record.

" @@ -3197,6 +3415,10 @@ "topK":{ "shape":"SearchCriteriaTopKInteger", "documentation":"

The maximum number of top-scoring memory records to return. This value is used for semantic search ranking.

" + }, + "metadataFilters":{ + "shape":"MemoryMetadataFilterList", + "documentation":"

Filters to apply to metadata associated with a memory.

" } }, "documentation":"

Contains search criteria for retrieving memory records.

" @@ -3281,6 +3503,48 @@ "max":256, "min":33 }, + "Span":{ + "type":"structure", + "members":{}, + "document":true + }, + "SpanContext":{ + "type":"structure", + "required":["sessionId"], + "members":{ + "sessionId":{ + "shape":"String", + "documentation":"

The unique identifier of the session containing this span. Sessions represent complete conversation flows and are detected using configurable SessionTimeoutMinutes (default 15 minutes).

" + }, + "traceId":{ + "shape":"String", + "documentation":"

The unique identifier of the trace containing this span. Traces represent individual request-response interactions within a session and group related spans together.

" + }, + "spanId":{ + "shape":"String", + "documentation":"

The unique identifier of the specific span being referenced. Spans represent individual operations like tool calls, model invocations, or other discrete actions within the agent's execution.

" + } + }, + "documentation":"

The contextual information that uniquely identifies a span within the distributed tracing system. Contains session, trace, and span identifiers used to correlate evaluation results with specific agent execution points.

" + }, + "SpanId":{ + "type":"string", + "max":16, + "min":16 + }, + "SpanIds":{ + "type":"list", + "member":{"shape":"SpanId"}, + "max":10, + "min":1 + }, + "Spans":{ + "type":"list", + "member":{"shape":"Span"}, + "max":1000, + "min":1, + "sensitive":true + }, "StartBrowserSessionRequest":{ "type":"structure", "required":["browserIdentifier"], @@ -3712,6 +3976,24 @@ "exception":true }, "Timestamp":{"type":"timestamp"}, + "TokenUsage":{ + "type":"structure", + "members":{ + "inputTokens":{ + "shape":"Integer", + "documentation":"

The number of tokens consumed for input processing during the evaluation. Includes tokens from the evaluation prompt, agent traces, and any additional context provided to the evaluator model.

" + }, + "outputTokens":{ + "shape":"Integer", + "documentation":"

The number of tokens generated by the evaluator model in its response. Includes tokens for the score, explanation, and any additional output produced during the evaluation process.

" + }, + "totalTokens":{ + "shape":"Integer", + "documentation":"

The total number of tokens consumed during the evaluation, calculated as the sum of input and output tokens. Used for cost calculation and rate limiting within the service limits.

" + } + }, + "documentation":"

The token consumption statistics for language model operations during evaluation. Provides detailed breakdown of input, output, and total tokens used for cost tracking and performance monitoring.

" + }, "ToolArguments":{ "type":"structure", "members":{ @@ -3798,6 +4080,17 @@ }, "documentation":"

Contains structured content from a tool result.

" }, + "TraceId":{ + "type":"string", + "max":32, + "min":32 + }, + "TraceIds":{ + "type":"list", + "member":{"shape":"TraceId"}, + "max":10, + "min":1 + }, "UnauthorizedException":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/bedrock-data-automation/2023-07-26/service-2.json b/awscli/botocore/data/bedrock-data-automation/2023-07-26/service-2.json index a3619c37cda9..3de22bc2da39 100644 --- a/awscli/botocore/data/bedrock-data-automation/2023-07-26/service-2.json +++ b/awscli/botocore/data/bedrock-data-automation/2023-07-26/service-2.json @@ -13,6 +13,25 @@ "uid":"bedrock-data-automation-2023-07-26" }, "operations":{ + "CopyBlueprintStage":{ + "name":"CopyBlueprintStage", + "http":{ + "method":"PUT", + "requestUri":"/blueprints/{blueprintArn}/copy-stage", + "responseCode":200 + }, + "input":{"shape":"CopyBlueprintStageRequest"}, + "output":{"shape":"CopyBlueprintStageResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Copies a Blueprint from one stage to another

", + "idempotent":true + }, "CreateBlueprint":{ "name":"CreateBlueprint", "http":{ @@ -130,6 +149,25 @@ "documentation":"

Gets an existing Amazon Bedrock Data Automation Blueprint

", "readonly":true }, + "GetBlueprintOptimizationStatus":{ + "name":"GetBlueprintOptimizationStatus", + "http":{ + "method":"POST", + "requestUri":"/getBlueprintOptimizationStatus/{invocationArn}", + "responseCode":200 + }, + "input":{"shape":"GetBlueprintOptimizationStatusRequest"}, + "output":{"shape":"GetBlueprintOptimizationStatusResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

API used to get blueprint optimization status.

", + "readonly":true + }, "GetDataAutomationProject":{ "name":"GetDataAutomationProject", "http":{ @@ -149,6 +187,26 @@ "documentation":"

Gets an existing Amazon Bedrock Data Automation Project

", "readonly":true }, + "InvokeBlueprintOptimizationAsync":{ + "name":"InvokeBlueprintOptimizationAsync", + "http":{ + "method":"POST", + "requestUri":"/invokeBlueprintOptimizationAsync", + "responseCode":200 + }, + "input":{"shape":"InvokeBlueprintOptimizationAsyncRequest"}, + "output":{"shape":"InvokeBlueprintOptimizationAsyncResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Invoke an async job to perform Blueprint Optimization

", + "idempotent":true + }, "ListBlueprints":{ "name":"ListBlueprints", "http":{ @@ -419,7 +477,9 @@ "blueprintVersion":{"shape":"BlueprintVersion"}, "blueprintStage":{"shape":"BlueprintStage"}, "kmsKeyId":{"shape":"KmsKeyId"}, - "kmsEncryptionContext":{"shape":"KmsEncryptionContext"} + "kmsEncryptionContext":{"shape":"KmsEncryptionContext"}, + "optimizationSamples":{"shape":"BlueprintOptimizationSamples"}, + "optimizationTime":{"shape":"DateTimestamp"} }, "documentation":"

Contains the information of a Blueprint.

" }, @@ -463,6 +523,73 @@ "pattern":"[a-zA-Z0-9-_]+", "sensitive":true }, + "BlueprintOptimizationInvocationArn":{ + "type":"string", + "documentation":"

Invocation arn.

", + "max":128, + "min":1, + "pattern":"arn:aws(|-cn|-iso|-iso-[a-z]|-us-gov):bedrock:[a-zA-Z0-9-]*:[0-9]{12}:blueprint-optimization-invocation/[a-zA-Z0-9-_]+" + }, + "BlueprintOptimizationJobStatus":{ + "type":"string", + "documentation":"

List of status supported by optimization jobs

", + "enum":[ + "Created", + "InProgress", + "Success", + "ServiceError", + "ClientError" + ] + }, + "BlueprintOptimizationObject":{ + "type":"structure", + "required":["blueprintArn"], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

Arn of blueprint.

" + }, + "stage":{ + "shape":"BlueprintStage", + "documentation":"

Stage of blueprint.

" + } + }, + "documentation":"

Structure for single blueprint entity.

" + }, + "BlueprintOptimizationOutputConfiguration":{ + "type":"structure", + "required":["s3Object"], + "members":{ + "s3Object":{ + "shape":"S3Object", + "documentation":"

S3 object.

" + } + }, + "documentation":"

Blueprint Optimization Output configuration.

" + }, + "BlueprintOptimizationSample":{ + "type":"structure", + "required":[ + "assetS3Object", + "groundTruthS3Object" + ], + "members":{ + "assetS3Object":{ + "shape":"S3Object", + "documentation":"

S3 Object of the asset

" + }, + "groundTruthS3Object":{ + "shape":"S3Object", + "documentation":"

Ground truth for the Blueprint and Asset combination

" + } + }, + "documentation":"

Blueprint Recommendation Sample

" + }, + "BlueprintOptimizationSamples":{ + "type":"list", + "member":{"shape":"BlueprintOptimizationSample"}, + "documentation":"

List of Blueprint Optimization Samples

" + }, "BlueprintSchema":{ "type":"string", "documentation":"

Schema of the blueprint

", @@ -546,6 +673,41 @@ }, "exception":true }, + "CopyBlueprintStageRequest":{ + "type":"structure", + "required":[ + "blueprintArn", + "sourceStage", + "targetStage" + ], + "members":{ + "blueprintArn":{ + "shape":"BlueprintArn", + "documentation":"

Blueprint to be copied

", + "location":"uri", + "locationName":"blueprintArn" + }, + "sourceStage":{ + "shape":"BlueprintStage", + "documentation":"

Source stage to copy from

" + }, + "targetStage":{ + "shape":"BlueprintStage", + "documentation":"

Target stage to copy to

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

Client token for idempotency

", + "idempotencyToken":true + } + }, + "documentation":"

CopyBlueprintStage Request

" + }, + "CopyBlueprintStageResponse":{ + "type":"structure", + "members":{}, + "documentation":"

CopyBlueprintStage Response

" + }, "CreateBlueprintRequest":{ "type":"structure", "required":[ @@ -640,6 +802,13 @@ }, "documentation":"

Custom output configuration

" }, + "DataAutomationProfileArn":{ + "type":"string", + "documentation":"

Data automation profile arn.

", + "max":128, + "min":1, + "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-zA-Z0-9-]*:(aws|[0-9]{12}):data-automation-profile/[a-zA-Z0-9-_.]+" + }, "DataAutomationProject":{ "type":"structure", "required":[ @@ -944,6 +1113,41 @@ "min":1, "pattern":".*\\S.*" }, + "GetBlueprintOptimizationStatusRequest":{ + "type":"structure", + "required":["invocationArn"], + "members":{ + "invocationArn":{ + "shape":"BlueprintOptimizationInvocationArn", + "documentation":"

Invocation arn.

", + "location":"uri", + "locationName":"invocationArn" + } + }, + "documentation":"

Structure for request of GetBlueprintOptimizationStatus API.

" + }, + "GetBlueprintOptimizationStatusResponse":{ + "type":"structure", + "members":{ + "status":{ + "shape":"BlueprintOptimizationJobStatus", + "documentation":"

Job Status.

" + }, + "errorType":{ + "shape":"String", + "documentation":"

Error Type.

" + }, + "errorMessage":{ + "shape":"String", + "documentation":"

Error Message.

" + }, + "outputConfiguration":{ + "shape":"BlueprintOptimizationOutputConfiguration", + "documentation":"

Output configuration.

" + } + }, + "documentation":"

Response of GetBlueprintOptimizationStatus API.

" + }, "GetBlueprintRequest":{ "type":"structure", "required":["blueprintArn"], @@ -1087,6 +1291,53 @@ "exception":true, "fault":true }, + "InvokeBlueprintOptimizationAsyncRequest":{ + "type":"structure", + "required":[ + "blueprint", + "samples", + "outputConfiguration", + "dataAutomationProfileArn" + ], + "members":{ + "blueprint":{ + "shape":"BlueprintOptimizationObject", + "documentation":"

Blueprint to be optimized

" + }, + "samples":{ + "shape":"BlueprintOptimizationSamples", + "documentation":"

List of Blueprint Optimization Samples

" + }, + "outputConfiguration":{ + "shape":"BlueprintOptimizationOutputConfiguration", + "documentation":"

Output configuration where the results should be placed

" + }, + "dataAutomationProfileArn":{ + "shape":"DataAutomationProfileArn", + "documentation":"

Data automation profile ARN

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

Encryption configuration.

" + }, + "tags":{ + "shape":"TagList", + "documentation":"

List of tags.

" + } + }, + "documentation":"

Invoke Blueprint Optimization Async Request

" + }, + "InvokeBlueprintOptimizationAsyncResponse":{ + "type":"structure", + "required":["invocationArn"], + "members":{ + "invocationArn":{ + "shape":"BlueprintOptimizationInvocationArn", + "documentation":"

ARN of the blueprint optimization job

" + } + }, + "documentation":"

Invoke Blueprint Optimization Async Response

" + }, "KmsEncryptionContext":{ "type":"map", "key":{"shape":"EncryptionContextKey"}, @@ -1306,6 +1557,34 @@ "ACCOUNT" ] }, + "S3Object":{ + "type":"structure", + "required":["s3Uri"], + "members":{ + "s3Uri":{ + "shape":"S3Uri", + "documentation":"

S3 uri.

" + }, + "version":{ + "shape":"S3ObjectVersion", + "documentation":"

S3 object version.

" + } + }, + "documentation":"

S3 object

" + }, + "S3ObjectVersion":{ + "type":"string", + "documentation":"

S3 object version.

", + "max":1024, + "min":1 + }, + "S3Uri":{ + "type":"string", + "documentation":"

A path in S3

", + "max":1024, + "min":1, + "pattern":"s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + }, "SensitiveDataConfiguration":{ "type":"structure", "required":["detectionMode"], @@ -1393,6 +1672,7 @@ "DISABLED" ] }, + "String":{"type":"string"}, "Tag":{ "type":"structure", "required":[ @@ -1451,7 +1731,7 @@ "documentation":"

ARN of a taggable resource

", "max":1011, "min":20, - "pattern":"arn:aws(|-cn|-us-gov):bedrock:[a-z0-9-]*:[0-9]{12}:(blueprint|data-automation-project)/[a-zA-Z0-9-]{12,36}" + "pattern":"arn:aws(|-cn|-iso|-iso-[a-z]|-us-gov):bedrock:[a-z0-9-]*:[0-9]{12}:(blueprint|data-automation-project|blueprint-optimization-invocation)/[a-zA-Z0-9-]{12,36}" }, "ThrottlingException":{ "type":"structure", diff --git a/awscli/botocore/data/bedrock-runtime/2023-09-30/service-2.json b/awscli/botocore/data/bedrock-runtime/2023-09-30/service-2.json index d2fdf012ee23..1ab57597aa26 100644 --- a/awscli/botocore/data/bedrock-runtime/2023-09-30/service-2.json +++ b/awscli/botocore/data/bedrock-runtime/2023-09-30/service-2.json @@ -463,6 +463,68 @@ }, "documentation":"

A summary of an asynchronous invocation.

" }, + "AudioBlock":{ + "type":"structure", + "required":[ + "format", + "source" + ], + "members":{ + "format":{ + "shape":"AudioFormat", + "documentation":"

The format of the audio data, such as MP3, WAV, FLAC, or other supported audio formats.

" + }, + "source":{ + "shape":"AudioSource", + "documentation":"

The source of the audio data, which can be provided as raw bytes or an S3 location.

" + }, + "error":{ + "shape":"ErrorBlock", + "documentation":"

Error information if the audio block could not be processed or contains invalid data.

" + } + }, + "documentation":"

An audio content block that contains audio data in various supported formats.

" + }, + "AudioFormat":{ + "type":"string", + "enum":[ + "mp3", + "opus", + "wav", + "aac", + "flac", + "mp4", + "ogg", + "mkv", + "mka", + "x-aac", + "m4a", + "mpeg", + "mpga", + "pcm", + "webm" + ] + }, + "AudioSource":{ + "type":"structure", + "members":{ + "bytes":{ + "shape":"AudioSourceBytesBlob", + "documentation":"

Audio data encoded in base64.

" + }, + "s3Location":{ + "shape":"S3Location", + "documentation":"

A reference to audio data stored in an Amazon S3 bucket. To see which models support S3 uploads, see Supported models and features for Converse.

" + } + }, + "documentation":"

The source of audio data, which can be provided either as raw bytes or a reference to an S3 location.

", + "sensitive":true, + "union":true + }, + "AudioSourceBytesBlob":{ + "type":"blob", + "min":1 + }, "AutoToolChoice":{ "type":"structure", "members":{}, @@ -699,6 +761,10 @@ "shape":"VideoBlock", "documentation":"

Video to include in the message.

" }, + "audio":{ + "shape":"AudioBlock", + "documentation":"

An audio content block containing audio data in the conversation.

" + }, "toolUse":{ "shape":"ToolUseBlock", "documentation":"

Information about a tool use request from a model.

" @@ -753,6 +819,10 @@ "citation":{ "shape":"CitationsDelta", "documentation":"

Incremental citation information that is streamed as part of the response generation process.

" + }, + "image":{ + "shape":"ImageBlockDelta", + "documentation":"

A streaming delta event containing incremental image data.

" } }, "documentation":"

A block of content in a streaming response.

", @@ -787,6 +857,10 @@ "toolResult":{ "shape":"ToolResultBlockStart", "documentation":"

The

" + }, + "image":{ + "shape":"ImageBlockStart", + "documentation":"

The initial event indicating the start of a streaming image block.

" } }, "documentation":"

Content block start information.

", @@ -1431,6 +1505,17 @@ "type":"blob", "min":1 }, + "ErrorBlock":{ + "type":"structure", + "members":{ + "message":{ + "shape":"String", + "documentation":"

A human-readable error message describing what went wrong during content processing.

" + } + }, + "documentation":"

A block containing error information when content processing fails.

", + "sensitive":true + }, "FoundationModelVersionIdentifier":{ "type":"string", "documentation":"

ARN or ID of a Bedrock model

", @@ -2768,10 +2853,39 @@ "source":{ "shape":"ImageSource", "documentation":"

The source for the image.

" + }, + "error":{ + "shape":"ErrorBlock", + "documentation":"

Error information if the image block could not be processed or contains invalid data.

" } }, "documentation":"

Image content for a message.

" }, + "ImageBlockDelta":{ + "type":"structure", + "members":{ + "source":{ + "shape":"ImageSource", + "documentation":"

The incremental image source data for this delta event.

" + }, + "error":{ + "shape":"ErrorBlock", + "documentation":"

Error information if this image delta could not be processed.

" + } + }, + "documentation":"

A streaming delta event that contains incremental image data during streaming responses.

" + }, + "ImageBlockStart":{ + "type":"structure", + "required":["format"], + "members":{ + "format":{ + "shape":"ImageFormat", + "documentation":"

The format of the image data that will be streamed in subsequent delta events.

" + } + }, + "documentation":"

The initial event in a streaming image block that indicates the start of image content.

" + }, "ImageFormat":{ "type":"string", "enum":[ @@ -2794,6 +2908,7 @@ } }, "documentation":"

The source for an image.

", + "sensitive":true, "union":true }, "ImageSourceBytesBlob":{ @@ -3771,6 +3886,8 @@ "stop_sequence", "guardrail_intervened", "content_filtered", + "malformed_model_output", + "malformed_tool_use", "model_context_window_exceeded" ] }, @@ -4032,6 +4149,10 @@ "text":{ "shape":"String", "documentation":"

The reasoning the model used to return the output.

" + }, + "json":{ + "shape":"Document", + "documentation":"

The JSON schema for the tool result content block. see JSON Schema Reference.

" } }, "documentation":"

Contains incremental updates to tool results information during streaming responses. This allows clients to build up tool results data progressively as the response is generated.

", diff --git a/awscli/botocore/data/bedrock/2023-04-20/service-2.json b/awscli/botocore/data/bedrock/2023-04-20/service-2.json index 59fc7a86ac56..a1e66c00a4f2 100644 --- a/awscli/botocore/data/bedrock/2023-04-20/service-2.json +++ b/awscli/botocore/data/bedrock/2023-04-20/service-2.json @@ -1836,6 +1836,25 @@ "documentation":"

Updates an existing Automated Reasoning policy test. You can modify the content, query, expected result, and confidence threshold.

", "idempotent":true }, + "UpdateCustomModelDeployment":{ + "name":"UpdateCustomModelDeployment", + "http":{ + "method":"PATCH", + "requestUri":"/model-customization/custom-model-deployments/{customModelDeploymentIdentifier}", + "responseCode":202 + }, + "input":{"shape":"UpdateCustomModelDeploymentRequest"}, + "output":{"shape":"UpdateCustomModelDeploymentResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Updates a custom model deployment with a new custom model. This allows you to deploy updated models without creating new deployment endpoints.

", + "idempotent":true + }, "UpdateGuardrail":{ "name":"UpdateGuardrail", "http":{ @@ -2735,7 +2754,8 @@ "BUILD_LOG", "QUALITY_REPORT", "POLICY_DEFINITION", - "GENERATED_TEST_CASES" + "GENERATED_TEST_CASES", + "POLICY_SCENARIOS" ] }, "AutomatedReasoningPolicyBuildResultAssets":{ @@ -2756,6 +2776,10 @@ "generatedTestCases":{ "shape":"AutomatedReasoningPolicyGeneratedTestCases", "documentation":"

A comprehensive test suite generated by the build workflow, providing validation capabilities for automated reasoning policies.

" + }, + "policyScenarios":{ + "shape":"AutomatedReasoningPolicyScenarios", + "documentation":"

An entity encompassing all the policy scenarios generated by the build workflow, which can be used to validate an Automated Reasoning policy.

" } }, "documentation":"

Contains the various assets generated during a policy build workflow, including logs, quality reports, test cases, and the final policy definition.

", @@ -3491,8 +3515,8 @@ "required":[ "expression", "alternateExpression", - "ruleIds", - "expectedResult" + "expectedResult", + "ruleIds" ], "members":{ "expression":{ @@ -3503,13 +3527,13 @@ "shape":"AutomatedReasoningPolicyScenarioAlternateExpression", "documentation":"

An alternative way to express the same test scenario, used for validation and comparison purposes.

" }, - "ruleIds":{ - "shape":"AutomatedReasoningPolicyDefinitionRuleIdList", - "documentation":"

The list of rule identifiers that are expected to be triggered or evaluated by this test scenario.

" - }, "expectedResult":{ "shape":"AutomatedReasoningCheckResult", "documentation":"

The expected outcome when this scenario is evaluated against the policy (e.g., PASS, FAIL, VIOLATION).

" + }, + "ruleIds":{ + "shape":"AutomatedReasoningPolicyDefinitionRuleIdList", + "documentation":"

The list of rule identifiers that are expected to be triggered or evaluated by this test scenario.

" } }, "documentation":"

Represents a test scenario used to validate an Automated Reasoning policy, including the test conditions and expected outcomes.

" @@ -3526,6 +3550,21 @@ "min":0, "sensitive":true }, + "AutomatedReasoningPolicyScenarioList":{ + "type":"list", + "member":{"shape":"AutomatedReasoningPolicyScenario"} + }, + "AutomatedReasoningPolicyScenarios":{ + "type":"structure", + "required":["policyScenarios"], + "members":{ + "policyScenarios":{ + "shape":"AutomatedReasoningPolicyScenarioList", + "documentation":"

Represents a collection of generated policy scenarios.

" + } + }, + "documentation":"

Contains a comprehensive entity encompassing all the scenarios generated by the build workflow, which can be used to validate an Automated Reasoning policy.

" + }, "AutomatedReasoningPolicySummaries":{ "type":"list", "member":{"shape":"AutomatedReasoningPolicySummary"}, @@ -5189,6 +5228,32 @@ "type":"list", "member":{"shape":"CustomModelDeploymentSummary"} }, + "CustomModelDeploymentUpdateDetails":{ + "type":"structure", + "required":[ + "modelArn", + "updateStatus" + ], + "members":{ + "modelArn":{ + "shape":"ModelArn", + "documentation":"

ARN of the new custom model being deployed as part of the update.

" + }, + "updateStatus":{ + "shape":"CustomModelDeploymentUpdateStatus", + "documentation":"

Current status of the deployment update.

" + } + }, + "documentation":"

Details about an update to a custom model deployment, including the new custom model resource ARN and current update status.

" + }, + "CustomModelDeploymentUpdateStatus":{ + "type":"string", + "enum":[ + "Updating", + "UpdateCompleted", + "UpdateFailed" + ] + }, "CustomModelName":{ "type":"string", "max":63, @@ -5268,6 +5333,10 @@ "distillationConfig":{ "shape":"DistillationConfig", "documentation":"

The Distillation configuration for the custom model.

" + }, + "rftConfig":{ + "shape":"RFTConfig", + "documentation":"

Configuration settings for reinforcement fine-tuning (RFT) model customization, including grader configuration and hyperparameters.

" } }, "documentation":"

A model customization configuration

", @@ -5279,6 +5348,7 @@ "FINE_TUNING", "CONTINUED_PRE_TRAINING", "DISTILLATION", + "REINFORCEMENT_FINE_TUNING", "IMPORTED" ] }, @@ -5630,6 +5700,12 @@ "NOT_AVAILABLE" ] }, + "EpochCount":{ + "type":"integer", + "box":true, + "max":50, + "min":1 + }, "ErrorMessage":{ "type":"string", "max":2048, @@ -6821,6 +6897,10 @@ "shape":"CustomModelDeploymentDescription", "documentation":"

The description of the custom model deployment.

" }, + "updateDetails":{ + "shape":"CustomModelDeploymentUpdateDetails", + "documentation":"

Details about any pending or completed updates to the custom model deployment, including the new model ARN and update status.

" + }, "failureMessage":{ "shape":"ErrorMessage", "documentation":"

If the deployment status is FAILED, this field contains a message describing the failure reason.

" @@ -7846,6 +7926,17 @@ } } }, + "GraderConfig":{ + "type":"structure", + "members":{ + "lambdaGrader":{ + "shape":"LambdaGraderConfig", + "documentation":"

Configuration for using an AWS Lambda function as the grader for evaluating model responses and provide reward signals in reinforcement fine-tuning.

" + } + }, + "documentation":"

Configuration for the grader used in reinforcement fine-tuning to evaluate model responses and provide reward signals.

", + "union":true + }, "GuardrailArn":{ "type":"string", "max":2048, @@ -9563,6 +9654,23 @@ "max":100, "min":1 }, + "LambdaArn":{ + "type":"string", + "max":512, + "min":1, + "pattern":"arn:(aws[a-zA-Z-]*)?:lambda:[a-z]{2}(-gov)?-[a-z]+-\\d{1}:\\d{12}:function:[a-zA-Z0-9-_\\.]+(:(\\$LATEST|[a-zA-Z0-9-_]+))?" + }, + "LambdaGraderConfig":{ + "type":"structure", + "required":["lambdaArn"], + "members":{ + "lambdaArn":{ + "shape":"LambdaArn", + "documentation":"

ARN of the AWS Lambda function that will evaluate model responses and return reward scores for RFT training.

" + } + }, + "documentation":"

Configuration for using an AWS Lambda function to grade model responses during reinforcement fine-tuning training.

" + }, "LegalTerm":{ "type":"structure", "members":{ @@ -10662,6 +10770,10 @@ "videoDataDeliveryEnabled":{ "shape":"Boolean", "documentation":"

Set to include video data in the log delivery.

" + }, + "audioDataDeliveryEnabled":{ + "shape":"Boolean", + "documentation":"

Set to include audio data in the log delivery.

" } }, "documentation":"

Configuration fields for invocation logging.

" @@ -11757,6 +11869,90 @@ "max":1000, "min":1 }, + "RFTBatchSize":{ + "type":"integer", + "box":true, + "max":512, + "min":16 + }, + "RFTConfig":{ + "type":"structure", + "members":{ + "graderConfig":{ + "shape":"GraderConfig", + "documentation":"

Configuration for the grader that evaluates model responses and provides reward signals during RFT training.

" + }, + "hyperParameters":{ + "shape":"RFTHyperParameters", + "documentation":"

Hyperparameters that control the reinforcement fine-tuning training process, including learning rate, batch size, and epoch count.

" + } + }, + "documentation":"

Configuration settings for reinforcement fine-tuning (RFT), including grader configuration and training hyperparameters.

" + }, + "RFTEvalInterval":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "RFTHyperParameters":{ + "type":"structure", + "members":{ + "epochCount":{ + "shape":"EpochCount", + "documentation":"

Number of training epochs to run during reinforcement fine-tuning. Higher values may improve performance but increase training time.

" + }, + "batchSize":{ + "shape":"RFTBatchSize", + "documentation":"

Number of training samples processed in each batch during reinforcement fine-tuning (RFT) training. Larger batches may improve training stability.

" + }, + "learningRate":{ + "shape":"RFTLearningRate", + "documentation":"

Learning rate for the reinforcement fine-tuning. Controls how quickly the model adapts to reward signals.

" + }, + "maxPromptLength":{ + "shape":"RFTMaxPromptLength", + "documentation":"

Maximum length of input prompts during RFT training, measured in tokens. Longer prompts allow more context but increase memory usage and training-time.

" + }, + "trainingSamplePerPrompt":{ + "shape":"RFTTrainingSamplePerPrompt", + "documentation":"

Number of response samples generated per prompt during RFT training. More samples provide better reward signal estimation.

" + }, + "inferenceMaxTokens":{ + "shape":"RFTInferenceMaxTokens", + "documentation":"

Maximum number of tokens the model can generate in response to each prompt during RFT training.

" + }, + "reasoningEffort":{ + "shape":"ReasoningEffort", + "documentation":"

Level of reasoning effort applied during RFT training. Higher values may improve response quality but increase training time.

" + }, + "evalInterval":{ + "shape":"RFTEvalInterval", + "documentation":"

Interval between evaluation runs during RFT training, measured in training steps. More frequent evaluation provides better monitoring.

" + } + }, + "documentation":"

Hyperparameters for controlling the reinforcement fine-tuning training process, including learning settings and evaluation intervals.

" + }, + "RFTInferenceMaxTokens":{ + "type":"integer", + "box":true + }, + "RFTLearningRate":{ + "type":"float", + "box":true, + "max":0.001, + "min":0.0000001 + }, + "RFTMaxPromptLength":{ + "type":"integer", + "box":true + }, + "RFTTrainingSamplePerPrompt":{ + "type":"integer", + "box":true, + "max":16, + "min":2 + }, "RagConfigs":{ "type":"list", "member":{"shape":"RAGConfig"}, @@ -11816,6 +12012,14 @@ "max":100, "min":1 }, + "ReasoningEffort":{ + "type":"string", + "enum":[ + "low", + "medium", + "high" + ] + }, "RegionAvailability":{ "type":"string", "enum":[ @@ -12849,6 +13053,35 @@ } } }, + "UpdateCustomModelDeploymentRequest":{ + "type":"structure", + "required":[ + "modelArn", + "customModelDeploymentIdentifier" + ], + "members":{ + "modelArn":{ + "shape":"CustomModelArn", + "documentation":"

ARN of the new custom model to deploy. This replaces the currently deployed model.

" + }, + "customModelDeploymentIdentifier":{ + "shape":"CustomModelDeploymentIdentifier", + "documentation":"

Identifier of the custom model deployment to update with the new custom model.

", + "location":"uri", + "locationName":"customModelDeploymentIdentifier" + } + } + }, + "UpdateCustomModelDeploymentResponse":{ + "type":"structure", + "required":["customModelDeploymentArn"], + "members":{ + "customModelDeploymentArn":{ + "shape":"CustomModelDeploymentArn", + "documentation":"

ARN of the custom model deployment being updated.

" + } + } + }, "UpdateGuardrailRequest":{ "type":"structure", "required":[ diff --git a/awscli/botocore/data/billingconductor/2021-07-30/service-2.json b/awscli/botocore/data/billingconductor/2021-07-30/service-2.json index 32696ec288ab..452850cee75d 100644 --- a/awscli/botocore/data/billingconductor/2021-07-30/service-2.json +++ b/awscli/botocore/data/billingconductor/2021-07-30/service-2.json @@ -832,6 +832,18 @@ }, "documentation":"

The key-value pair that represents the attribute by which the BillingGroupCostReportResults are grouped. For example, if you want a service-level breakdown for Amazon Simple Storage Service (Amazon S3) of the billing group, the attribute will be a key-value pair of \"PRODUCT_NAME\" and \"S3\".

" }, + "AttributeValue":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z0-9]+" + }, + "AttributeValueList":{ + "type":"list", + "member":{"shape":"AttributeValue"}, + "max":1, + "min":0 + }, "AttributesList":{ "type":"list", "member":{"shape":"Attribute"} @@ -1130,7 +1142,10 @@ "ComputationRuleEnum":{ "type":"string", "documentation":"

The display settings of the custom line item

", - "enum":["CONSOLIDATED"] + "enum":[ + "ITEMIZED", + "CONSOLIDATED" + ] }, "ConflictException":{ "type":"structure", @@ -1931,8 +1946,7 @@ "type":"structure", "required":[ "Attribute", - "MatchOption", - "Values" + "MatchOption" ], "members":{ "Attribute":{ @@ -1946,13 +1960,20 @@ "Values":{ "shape":"LineItemFilterValuesList", "documentation":"

The values of the line item filter. This specifies the values to filter on. Currently, you can only exclude Savings Plans discounts.

" + }, + "AttributeValues":{ + "shape":"AttributeValueList", + "documentation":"

The values of the line item filter. This specifies the values to filter on.

" } }, "documentation":"

A representation of the line item filter for your custom line item. You can use line item filters to include or exclude specific resource values from the billing group's total cost. For example, if you create a custom line item and you want to filter out a value, such as Savings Plans discounts, you can update LineItemFilter to exclude it.

" }, "LineItemFilterAttributeName":{ "type":"string", - "enum":["LINE_ITEM_TYPE"] + "enum":[ + "LINE_ITEM_TYPE", + "SERVICE" + ] }, "LineItemFilterValue":{ "type":"string", @@ -2590,7 +2611,10 @@ "MarginPercentage":{"type":"string"}, "MatchOption":{ "type":"string", - "enum":["NOT_EQUAL"] + "enum":[ + "NOT_EQUAL", + "EQUAL" + ] }, "MaxBillingGroupCostReportResults":{ "type":"integer", @@ -3504,6 +3528,7 @@ "ILLEGAL_MODIFIER_PERCENTAGE", "ILLEGAL_TYPE", "ILLEGAL_BILLING_GROUP_TYPE", + "ILLEGAL_BILLING_GROUP_PRICING_PLAN", "ILLEGAL_ENDED_BILLINGGROUP", "ILLEGAL_TIERING_INPUT", "ILLEGAL_OPERATION", diff --git a/awscli/botocore/data/ce/2017-10-25/paginators-1.json b/awscli/botocore/data/ce/2017-10-25/paginators-1.json index 64c2cce947c4..764af9f4ec8d 100644 --- a/awscli/botocore/data/ce/2017-10-25/paginators-1.json +++ b/awscli/botocore/data/ce/2017-10-25/paginators-1.json @@ -29,6 +29,54 @@ "output_token": "NextPageToken", "limit_key": "MaxResults", "result_key": "CostComparisonDrivers" + }, + "ListCostAllocationTagBackfillHistory": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "BackfillRequests" + }, + "ListCostAllocationTags": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CostAllocationTags" + }, + "ListCostCategoryDefinitions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CostCategoryReferences" + }, + "ListCostCategoryResourceAssociations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "CostCategoryResourceAssociations" + }, + "GetReservationPurchaseRecommendation": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "PageSize", + "result_key": "Recommendations" + }, + "GetRightsizingRecommendation": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "PageSize", + "result_key": "RightsizingRecommendations" + }, + "ListCommitmentPurchaseAnalyses": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "PageSize", + "result_key": "AnalysisSummaryList" + }, + "ListSavingsPlansPurchaseRecommendationGeneration": { + "input_token": "NextPageToken", + "output_token": "NextPageToken", + "limit_key": "PageSize", + "result_key": "GenerationSummaryList" } } } diff --git a/awscli/botocore/data/ce/2017-10-25/paginators-1.sdk-extras.json b/awscli/botocore/data/ce/2017-10-25/paginators-1.sdk-extras.json index e7ea26fc3db5..83b187a11573 100644 --- a/awscli/botocore/data/ce/2017-10-25/paginators-1.sdk-extras.json +++ b/awscli/botocore/data/ce/2017-10-25/paginators-1.sdk-extras.json @@ -6,6 +6,18 @@ "non_aggregate_keys": [ "TotalCostAndUsage" ] + }, + "GetReservationPurchaseRecommendation": { + "non_aggregate_keys": [ + "Metadata" + ] + }, + "GetRightsizingRecommendation": { + "non_aggregate_keys": [ + "Configuration", + "Metadata", + "Summary" + ] } } } diff --git a/awscli/botocore/data/ce/2017-10-25/service-2.json b/awscli/botocore/data/ce/2017-10-25/service-2.json index 56d25ff29abd..3851248748a5 100644 --- a/awscli/botocore/data/ce/2017-10-25/service-2.json +++ b/awscli/botocore/data/ce/2017-10-25/service-2.json @@ -55,7 +55,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates a new Cost Category with the requested name and rules.

" + "documentation":"

Creates a new cost category with the requested name and rules.

" }, "DeleteAnomalyMonitor":{ "name":"DeleteAnomalyMonitor", @@ -97,7 +97,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Deletes a Cost Category. Expenses from this month going forward will no longer be categorized with this Cost Category.

" + "documentation":"

Deletes a cost category. Expenses from this month going forward will no longer be categorized with this cost category.

" }, "DescribeCostCategoryDefinition":{ "name":"DescribeCostCategoryDefinition", @@ -111,7 +111,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Returns the name, Amazon Resource Name (ARN), rules, definition, and effective dates of a Cost Category that's defined in the account.

You have the option to use EffectiveOn to return a Cost Category that's active on a specific date. If there's no EffectiveOn specified, you see a Cost Category that's effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response.

" + "documentation":"

Returns the name, Amazon Resource Name (ARN), rules, definition, and effective dates of a cost category that's defined in the account.

You have the option to use EffectiveOn to return a cost category that's active on a specific date. If there's no EffectiveOn specified, you see a Cost Category that's effective on the current date. If cost category is still effective, EffectiveEnd is omitted in the response.

" }, "GetAnomalies":{ "name":"GetAnomalies", @@ -258,7 +258,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"BillingViewHealthStatusException"} ], - "documentation":"

Retrieves an array of Cost Category names and values incurred cost.

If some Cost Category names and values are not associated with any cost, they will not be returned by this API.

" + "documentation":"

Retrieves an array of cost category names and values incurred cost.

If some cost category names and values are not associated with any cost, they will not be returned by this API.

" }, "GetCostComparisonDrivers":{ "name":"GetCostComparisonDrivers", @@ -325,7 +325,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the reservation coverage for your account, which you can use to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data about reservation usage by the following dimensions:

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the reservation coverage for your account, which you can use to see how much of your Amazon Elastic Compute Cloud, Amazon ElastiCache, Amazon Relational Database Service, or Amazon Redshift usage is covered by a reservation. An organization's management account can see the coverage of the associated member accounts. This supports dimensions, cost categories, and nested expressions. For any time period, you can filter data about reservation usage by the following dimensions:

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetReservationPurchaseRecommendation":{ "name":"GetReservationPurchaseRecommendation", @@ -398,7 +398,7 @@ {"shape":"DataUnavailableException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, Cost Categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:

To determine valid values for a dimension, use the GetDimensionValues operation.

" + "documentation":"

Retrieves the Savings Plans covered for your account. This enables you to see how much of your cost is covered by a Savings Plan. An organization’s management account can see the coverage of the associated member accounts. This supports dimensions, cost categories, and nested expressions. For any time period, you can filter data for Savings Plans usage with the following dimensions:

To determine valid values for a dimension, use the GetDimensionValues operation.

" }, "GetSavingsPlansPurchaseRecommendation":{ "name":"GetSavingsPlansPurchaseRecommendation", @@ -533,7 +533,21 @@ "errors":[ {"shape":"LimitExceededException"} ], - "documentation":"

Returns the name, Amazon Resource Name (ARN), NumberOfRules and effective dates of all Cost Categories defined in the account. You have the option to use EffectiveOn to return a list of Cost Categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see Cost Categories that are effective on the current date. If Cost Category is still effective, EffectiveEnd is omitted in the response. ListCostCategoryDefinitions supports pagination. The request can have a MaxResults range up to 100.

" + "documentation":"

Returns the name, Amazon Resource Name (ARN), NumberOfRules and effective dates of all cost categories defined in the account. You have the option to use EffectiveOn and SupportedResourceTypes to return a list of cost categories that were active on a specific date. If there is no EffectiveOn specified, you’ll see cost categories that are effective on the current date. If cost category is still effective, EffectiveEnd is omitted in the response. ListCostCategoryDefinitions supports pagination. The request can have a MaxResults range up to 100.

" + }, + "ListCostCategoryResourceAssociations":{ + "name":"ListCostCategoryResourceAssociations", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListCostCategoryResourceAssociationsRequest"}, + "output":{"shape":"ListCostCategoryResourceAssociationsResponse"}, + "errors":[ + {"shape":"LimitExceededException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns resource associations of all cost categories defined in the account. You have the option to use CostCategoryArn to get the association for a specific cost category. ListCostCategoryResourceAssociations supports pagination. The request can have a MaxResults range up to 100.

" }, "ListSavingsPlansPurchaseRecommendationGeneration":{ "name":"ListSavingsPlansPurchaseRecommendationGeneration", @@ -707,7 +721,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Updates an existing Cost Category. Changes made to the Cost Category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" + "documentation":"

Updates an existing cost category. Changes made to the cost category rules will be used to categorize the current month’s expenses and future expenses. This won’t change categorization for the previous months.

" } }, "shapes":{ @@ -726,6 +740,11 @@ }, "AmortizedRecurringFee":{"type":"string"}, "AmortizedUpfrontFee":{"type":"string"}, + "AnalysesPageSize":{ + "type":"integer", + "max":600, + "min":0 + }, "AnalysisDetails":{ "type":"structure", "members":{ @@ -1269,25 +1288,25 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The effective start date of your Cost Category.

" + "documentation":"

The effective start date of your cost category.

" }, "EffectiveEnd":{ "shape":"ZonedDateTime", - "documentation":"

The effective end date of your Cost Category.

" + "documentation":"

The effective end date of your cost category.

" }, "Name":{"shape":"CostCategoryName"}, "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

The rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

The rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that cost category value.

" }, "SplitChargeRules":{ "shape":"CostCategorySplitChargeRulesList", - "documentation":"

The split charge rules that are used to allocate your charges between your Cost Category values.

" + "documentation":"

The split charge rules that are used to allocate your charges between your cost category values.

" }, "ProcessingStatus":{ "shape":"CostCategoryProcessingStatusList", @@ -1325,7 +1344,7 @@ }, "CostCategoryName":{ "type":"string", - "documentation":"

The unique name of the Cost Category.

", + "documentation":"

The unique name of the cost category.

", "max":50, "min":1, "pattern":"^(?! )[\\p{L}\\p{N}\\p{Z}-_]*(?The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "Name":{"shape":"CostCategoryName"}, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective start date.

" + "documentation":"

The cost category's effective start date.

" }, "EffectiveEnd":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective end date.

" + "documentation":"

The cost category's effective end date.

" }, "NumberOfRules":{ "shape":"NonNegativeInteger", - "documentation":"

The number of rules that are associated with a specific Cost Category.

" + "documentation":"

The number of rules that are associated with a specific cost category.

" }, "ProcessingStatus":{ "shape":"CostCategoryProcessingStatusList", @@ -1380,21 +1399,44 @@ "shape":"CostCategoryValuesList", "documentation":"

A list of unique cost category values in a specific cost category.

" }, - "DefaultValue":{"shape":"CostCategoryValue"} + "DefaultValue":{"shape":"CostCategoryValue"}, + "SupportedResourceTypes":{ + "shape":"ResourceTypes", + "documentation":"

The resource types supported by a specific cost category.

" + } }, - "documentation":"

A reference to a Cost Category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full Cost Category information using DescribeCostCategory.

" + "documentation":"

A reference to a cost category containing only enough information to identify the Cost Category.

You can use this information to retrieve the full cost category information using DescribeCostCategory.

" }, "CostCategoryReferencesList":{ "type":"list", "member":{"shape":"CostCategoryReference"} }, + "CostCategoryResourceAssociation":{ + "type":"structure", + "members":{ + "ResourceArn":{ + "shape":"GenericArn", + "documentation":"

The unique identifier for an associated resource.

" + }, + "CostCategoryName":{"shape":"CostCategoryName"}, + "CostCategoryArn":{ + "shape":"Arn", + "documentation":"

The unique identifier for your cost category.

" + } + }, + "documentation":"

A reference to a cost category association that contains information on an associated resource.

" + }, + "CostCategoryResourceAssociations":{ + "type":"list", + "member":{"shape":"CostCategoryResourceAssociation"} + }, "CostCategoryRule":{ "type":"structure", "members":{ "Value":{"shape":"CostCategoryValue"}, "Rule":{ "shape":"Expression", - "documentation":"

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, LINKED_ACCOUNT_NAME, REGION, and USAGE_TYPE.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for Cost Category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.

" + "documentation":"

An Expression object used to categorize costs. This supports dimensions, tags, and nested expressions. Currently the only dimensions supported are LINKED_ACCOUNT, SERVICE_CODE, RECORD_TYPE, LINKED_ACCOUNT_NAME, REGION, and USAGE_TYPE.

RECORD_TYPE is a dimension used for Cost Explorer APIs, and is also supported for cost category expressions. This dimension uses different terms, depending on whether you're using the console or API/JSON editor. For a detailed comparison, see Term Comparisons in the Billing and Cost Management User Guide.

" }, "InheritedValue":{ "shape":"CostCategoryInheritedValueDimension", @@ -1405,7 +1447,7 @@ "documentation":"

You can define the CostCategoryRule rule type as either REGULAR or INHERITED_VALUE. The INHERITED_VALUE rule type adds the flexibility to define a rule that dynamically inherits the cost category value. This value is from the dimension value that's defined by CostCategoryInheritedValueDimension. For example, suppose that you want to costs to be dynamically grouped based on the value of a specific tag key. First, choose an inherited value rule type, and then choose the tag dimension and specify the tag key to use.

" } }, - "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that Cost Category value.

" + "documentation":"

Rules are processed in order. If there are multiple rules that match the line item, then the first rule to match is used to determine that cost category value.

" }, "CostCategoryRuleType":{ "type":"string", @@ -1416,7 +1458,7 @@ }, "CostCategoryRuleVersion":{ "type":"string", - "documentation":"

The rule schema version in this particular Cost Category.

", + "documentation":"

The rule schema version in this particular cost category.

", "enum":["CostCategoryExpression.v1"] }, "CostCategoryRulesList":{ @@ -1443,11 +1485,11 @@ "members":{ "Source":{ "shape":"GenericString", - "documentation":"

The Cost Category value that you want to split. That value can't be used as a source or a target in other split charge rules. To indicate uncategorized costs, you can use an empty string as the source.

" + "documentation":"

The cost category value that you want to split. That value can't be used as a source or a target in other split charge rules. To indicate uncategorized costs, you can use an empty string as the source.

" }, "Targets":{ "shape":"CostCategorySplitChargeRuleTargetsList", - "documentation":"

The Cost Category values that you want to split costs across. These values can't be used as a source in other split charge rules.

" + "documentation":"

The cost category values that you want to split costs across. These values can't be used as a source in other split charge rules.

" }, "Method":{ "shape":"CostCategorySplitChargeMethod", @@ -1458,7 +1500,7 @@ "documentation":"

The parameters for a split charge method. This is only required for the FIXED method.

" } }, - "documentation":"

Use the split charge rule to split the cost of one Cost Category value across several other target values.

" + "documentation":"

Use the split charge rule to split the cost of one cost category value across several other target values.

" }, "CostCategorySplitChargeRuleParameter":{ "type":"structure", @@ -1530,7 +1572,7 @@ "Key":{"shape":"CostCategoryName"}, "Values":{ "shape":"Values", - "documentation":"

The specific value of the Cost Category.

" + "documentation":"

The specific value of the cost category.

" }, "MatchOptions":{ "shape":"MatchOptions", @@ -1745,17 +1787,17 @@ "Name":{"shape":"CostCategoryName"}, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective start date. It can only be a billing start date (first day of the month). If the date isn't provided, it's the first day of the current month. Dates can't be before the previous twelve months, or in the future.

" + "documentation":"

The cost category's effective start date. It can only be a billing start date (first day of the month). If the date isn't provided, it's the first day of the current month. Dates can't be before the previous twelve months, or in the future.

" }, "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ "shape":"CostCategoryRulesList", - "documentation":"

The Cost Category rules used to categorize costs. For more information, see CostCategoryRule.

" + "documentation":"

The cost category rules used to categorize costs. For more information, see CostCategoryRule.

" }, "DefaultValue":{"shape":"CostCategoryValue"}, "SplitChargeRules":{ "shape":"CostCategorySplitChargeRulesList", - "documentation":"

The split charge rules used to allocate your charges between your Cost Category values.

" + "documentation":"

The split charge rules used to allocate your charges between your cost category values.

" }, "ResourceTags":{ "shape":"ResourceTagList", @@ -1768,11 +1810,11 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your newly created Cost Category.

" + "documentation":"

The unique identifier for your newly created cost category.

" }, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective start date. It can only be a billing start date (first day of the month).

" + "documentation":"

The cost category's effective start date. It can only be a billing start date (first day of the month).

" } } }, @@ -1886,7 +1928,7 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" } } }, @@ -1895,11 +1937,11 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "EffectiveEnd":{ "shape":"ZonedDateTime", - "documentation":"

The effective end date of the Cost Category as a result of deleting it. No costs after this date is categorized by the deleted Cost Category.

" + "documentation":"

The effective end date of the cost category as a result of deleting it. No costs after this date is categorized by the deleted cost category.

" } } }, @@ -1909,11 +1951,11 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "EffectiveOn":{ "shape":"ZonedDateTime", - "documentation":"

The date when the Cost Category was effective.

" + "documentation":"

The date when the cost category was effective.

" } } }, @@ -1976,7 +2018,7 @@ }, "MatchOptions":{ "shape":"MatchOptions", - "documentation":"

The match options that you can use to filter your results.

MatchOptions is only applicable for actions related to Cost Category and Anomaly Subscriptions. Refer to the documentation for each specific API to see what is supported.

The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" + "documentation":"

The match options that you can use to filter your results.

MatchOptions is only applicable for actions related to cost category and Anomaly Subscriptions. Refer to the documentation for each specific API to see what is supported.

The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" } }, "documentation":"

The metadata that you can use to filter and group your results. You can use GetDimensionValues to find specific values.

" @@ -2377,6 +2419,12 @@ "type":"list", "member":{"shape":"GenerationSummary"} }, + "GenericArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:aws[-a-z0-9]*:[a-z0-9]+:[-a-z0-9]*:[0-9]{12}:[-a-zA-Z0-9/:_]+" + }, "GenericBoolean":{"type":"boolean"}, "GenericDouble":{"type":"double"}, "GenericString":{ @@ -2724,7 +2772,7 @@ }, "Filter":{ "shape":"Expression", - "documentation":"

Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression.

Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE.

Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE.

" + "documentation":"

Filters Amazon Web Services costs by different dimensions. For example, you can specify SERVICE and LINKED_ACCOUNT and get the costs that are associated with that account's usage of that service. You can nest Expression objects to define any combination of dimension filters. For more information, see Expression.

The GetCostAndUsageWithResources operation requires that you either group by or filter by a ResourceId. It requires the Expression \"SERVICE = Amazon Elastic Compute Cloud - Compute\" in the filter.

Valid values for MatchOptions for Dimensions are EQUALS and CASE_SENSITIVE.

Valid values for MatchOptions for CostCategories and Tags are EQUALS, ABSENT, and CASE_SENSITIVE. Default values are EQUALS and CASE_SENSITIVE.

" }, "Metrics":{ "shape":"MetricNames", @@ -2771,7 +2819,7 @@ "members":{ "SearchString":{ "shape":"SearchString", - "documentation":"

The value that you want to search the filter values for.

If you don't specify a CostCategoryName, SearchString is used to filter Cost Category names that match the SearchString pattern. If you specify a CostCategoryName, SearchString is used to filter Cost Category values that match the SearchString pattern.

" + "documentation":"

The value that you want to search the filter values for.

If you don't specify a CostCategoryName, SearchString is used to filter cost category names that match the SearchString pattern. If you specify a CostCategoryName, SearchString is used to filter cost category values that match the SearchString pattern.

" }, "TimePeriod":{"shape":"DateInterval"}, "CostCategoryName":{"shape":"CostCategoryName"}, @@ -2807,11 +2855,11 @@ }, "CostCategoryNames":{ "shape":"CostCategoryNamesList", - "documentation":"

The names of the Cost Categories.

" + "documentation":"

The names of the cost categories.

" }, "CostCategoryValues":{ "shape":"CostCategoryValuesList", - "documentation":"

The Cost Category values.

If the CostCategoryName key isn't specified in the request, the CostCategoryValues fields aren't returned.

" + "documentation":"

The cost category values.

If the CostCategoryName key isn't specified in the request, the CostCategoryValues fields aren't returned.

" }, "ReturnSize":{ "shape":"PageSize", @@ -2894,7 +2942,7 @@ }, "Granularity":{ "shape":"Granularity", - "documentation":"

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.

The GetCostForecast operation supports only DAILY and MONTHLY granularities.

" + "documentation":"

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 18 months of MONTHLY forecasts.

The GetCostForecast operation supports only DAILY and MONTHLY granularities.

" }, "Filter":{ "shape":"Expression", @@ -3083,7 +3131,7 @@ "documentation":"

The hardware specifications for the service instances that you want recommendations for, such as standard or convertible Amazon EC2 instances.

" }, "PageSize":{ - "shape":"NonNegativeInteger", + "shape":"RecommendationsPageSize", "documentation":"

The number of recommendations that you want returned in a single response object.

" }, "NextPageToken":{ @@ -3176,7 +3224,7 @@ "documentation":"

The specific service that you want recommendations for. The only valid value for GetRightsizingRecommendation is \"AmazonEC2\".

" }, "PageSize":{ - "shape":"NonNegativeInteger", + "shape":"RecommendationsPageSize", "documentation":"

The number of recommendations that you want returned in a single response object.

" }, "NextPageToken":{ @@ -3316,7 +3364,7 @@ "documentation":"

The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

" }, "PageSize":{ - "shape":"NonNegativeInteger", + "shape":"RecommendationsPageSize", "documentation":"

The number of recommendations that you want returned in a single response object.

" }, "LookbackPeriodInDays":{ @@ -3514,7 +3562,7 @@ }, "Granularity":{ "shape":"Granularity", - "documentation":"

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 12 months of MONTHLY forecasts.

The GetUsageForecast operation supports only DAILY and MONTHLY granularities.

" + "documentation":"

How granular you want the forecast to be. You can get 3 months of DAILY forecasts or 18 months of MONTHLY forecasts.

The GetUsageForecast operation supports only DAILY and MONTHLY granularities.

" }, "Filter":{ "shape":"Expression", @@ -3691,7 +3739,7 @@ "documentation":"

The token to retrieve the next set of results.

" }, "PageSize":{ - "shape":"NonNegativeInteger", + "shape":"AnalysesPageSize", "documentation":"

The number of analyses that you want returned in a single response object.

" }, "AnalysisIds":{ @@ -3784,7 +3832,7 @@ "members":{ "EffectiveOn":{ "shape":"ZonedDateTime", - "documentation":"

The date when the Cost Category was effective.

" + "documentation":"

The date when the cost category was effective.

" }, "NextToken":{ "shape":"NextPageToken", @@ -3794,6 +3842,10 @@ "shape":"CostCategoryMaxResults", "documentation":"

The number of entries a paginated response contains.

", "box":true + }, + "SupportedResourceTypes":{ + "shape":"ResourceTypesFilterInput", + "documentation":"

Filter cost category definitions that are supported by given resource types based on the latest version. If the filter is present, the result only includes Cost Categories that supports input resource type. If the filter isn't provided, no filtering is applied. The valid values are billing:rispgroupsharing.

" } } }, @@ -3802,7 +3854,7 @@ "members":{ "CostCategoryReferences":{ "shape":"CostCategoryReferencesList", - "documentation":"

A reference to a Cost Category that contains enough information to identify the Cost Category.

" + "documentation":"

A reference to a cost category that contains enough information to identify the Cost Category.

" }, "NextToken":{ "shape":"NextPageToken", @@ -3810,6 +3862,37 @@ } } }, + "ListCostCategoryResourceAssociationsRequest":{ + "type":"structure", + "members":{ + "CostCategoryArn":{ + "shape":"Arn", + "documentation":"

The unique identifier for your cost category.

" + }, + "NextToken":{ + "shape":"NextPageToken", + "documentation":"

The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

" + }, + "MaxResults":{ + "shape":"CostCategoryMaxResults", + "documentation":"

The number of entries a paginated response contains.

", + "box":true + } + } + }, + "ListCostCategoryResourceAssociationsResponse":{ + "type":"structure", + "members":{ + "CostCategoryResourceAssociations":{ + "shape":"CostCategoryResourceAssociations", + "documentation":"

A reference to a cost category association that contains information on an associated resource.

" + }, + "NextToken":{ + "shape":"NextPageToken", + "documentation":"

The token to retrieve the next set of results. Amazon Web Services provides the token when the response from a previous call has more results than the maximum page size.

" + } + } + }, "ListSavingsPlansPurchaseRecommendationGenerationRequest":{ "type":"structure", "members":{ @@ -3822,7 +3905,7 @@ "documentation":"

The IDs for each specific recommendation.

" }, "PageSize":{ - "shape":"NonNegativeInteger", + "shape":"RecommendationsPageSize", "documentation":"

The number of recommendations that you want returned in a single response object.

" }, "NextPageToken":{ @@ -4156,6 +4239,10 @@ "SizeFlexEligible":{ "shape":"GenericBoolean", "documentation":"

Determines whether the recommended reservation is size flexible.

" + }, + "DeploymentModel":{ + "shape":"GenericString", + "documentation":"

Determines whether the recommendation is for a reservation for RDS Custom.

" } }, "documentation":"

Details about the Amazon RDS reservations that Amazon Web Services recommends that you purchase.

" @@ -4324,6 +4411,11 @@ "CROSS_INSTANCE_FAMILY" ] }, + "RecommendationsPageSize":{ + "type":"integer", + "max":6000, + "min":0 + }, "RedshiftInstanceDetails":{ "type":"structure", "members":{ @@ -4731,6 +4823,20 @@ "min":0, "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" }, + "ResourceType":{ + "type":"string", + "pattern":"^[-a-zA-Z0-9/_]+:[-a-zA-Z0-9/_]+" + }, + "ResourceTypes":{ + "type":"list", + "member":{"shape":"ResourceType"} + }, + "ResourceTypesFilterInput":{ + "type":"list", + "member":{"shape":"ResourceType"}, + "max":5, + "min":0 + }, "ResourceUtilization":{ "type":"structure", "members":{ @@ -5659,7 +5765,8 @@ "enum":[ "COMPUTE_SP", "EC2_INSTANCE_SP", - "SAGEMAKER_SP" + "SAGEMAKER_SP", + "DATABASE_SP" ] }, "TagKey":{ @@ -5706,7 +5813,7 @@ }, "MatchOptions":{ "shape":"MatchOptions", - "documentation":"

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to Cost Category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" + "documentation":"

The match options that you can use to filter your results. MatchOptions is only applicable for actions related to cost category. The default values for MatchOptions are EQUALS and CASE_SENSITIVE.

" } }, "documentation":"

The values that are available for a tag.

If Values and Key aren't specified, the ABSENT MatchOption is applied to all tags. That is, it's filtered on resources with no tags.

If Key is provided and Values isn't specified, the ABSENT MatchOption is applied to the tag Key only. That is, it's filtered on resources without the given tag key.

" @@ -5982,11 +6089,11 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective start date. It can only be a billing start date (first day of the month). If the date isn't provided, it's the first day of the current month. Dates can't be before the previous twelve months, or in the future.

" + "documentation":"

The cost category's effective start date. It can only be a billing start date (first day of the month). If the date isn't provided, it's the first day of the current month. Dates can't be before the previous twelve months, or in the future.

" }, "RuleVersion":{"shape":"CostCategoryRuleVersion"}, "Rules":{ @@ -5996,7 +6103,7 @@ "DefaultValue":{"shape":"CostCategoryValue"}, "SplitChargeRules":{ "shape":"CostCategorySplitChargeRulesList", - "documentation":"

The split charge rules used to allocate your charges between your Cost Category values.

" + "documentation":"

The split charge rules used to allocate your charges between your cost category values.

" } } }, @@ -6005,11 +6112,11 @@ "members":{ "CostCategoryArn":{ "shape":"Arn", - "documentation":"

The unique identifier for your Cost Category.

" + "documentation":"

The unique identifier for your cost category.

" }, "EffectiveStart":{ "shape":"ZonedDateTime", - "documentation":"

The Cost Category's effective start date. It can only be a billing start date (first day of the month).

" + "documentation":"

The cost category's effective start date. It can only be a billing start date (first day of the month).

" } } }, diff --git a/awscli/botocore/data/cleanrooms/2022-02-17/service-2.json b/awscli/botocore/data/cleanrooms/2022-02-17/service-2.json index d6b95a05460f..76e1dcc30fb2 100644 --- a/awscli/botocore/data/cleanrooms/2022-02-17/service-2.json +++ b/awscli/botocore/data/cleanrooms/2022-02-17/service-2.json @@ -1456,6 +1456,25 @@ ], "documentation":"

Updates collaboration metadata and can only be called by the collaboration owner.

" }, + "UpdateCollaborationChangeRequest":{ + "name":"UpdateCollaborationChangeRequest", + "http":{ + "method":"PATCH", + "requestUri":"/collaborations/{collaborationIdentifier}/changeRequests/{changeRequestIdentifier}", + "responseCode":200 + }, + "input":{"shape":"UpdateCollaborationChangeRequestInput"}, + "output":{"shape":"UpdateCollaborationChangeRequestOutput"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates an existing collaboration change request. This operation allows approval actions for pending change requests in collaborations (APPROVE, DENY, CANCEL, COMMIT).

For change requests without automatic approval, a member in the collaboration can manually APPROVE or DENY a change request. The collaboration owner can manually CANCEL or COMMIT a change request.

" + }, "UpdateConfiguredAudienceModelAssociation":{ "name":"UpdateConfiguredAudienceModelAssociation", "http":{ @@ -2552,6 +2571,32 @@ "CLEAN_ROOMS_SQL" ] }, + "ApprovalStatus":{ + "type":"string", + "enum":[ + "APPROVED", + "DENIED", + "PENDING" + ] + }, + "ApprovalStatusDetails":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"ApprovalStatus", + "documentation":"

The approval status of a member's vote on the change request. Valid values are PENDING (if they haven't voted), APPROVED, or DENIED.

" + } + }, + "documentation":"

Contains detailed information about the approval state of a given member in the collaboration for a given collaboration change request.

" + }, + "ApprovalStatuses":{ + "type":"map", + "key":{"shape":"AccountId"}, + "value":{"shape":"ApprovalStatusDetails"}, + "max":50, + "min":1 + }, "AthenaDatabaseName":{ "type":"string", "max":128, @@ -2609,7 +2654,11 @@ }, "AutoApprovedChangeType":{ "type":"string", - "enum":["ADD_MEMBER"] + "enum":[ + "ADD_MEMBER", + "GRANT_RECEIVE_RESULTS_ABILITY", + "REVOKE_RECEIVE_RESULTS_ABILITY" + ] }, "AutoApprovedChangeTypeList":{ "type":"list", @@ -2941,6 +2990,15 @@ "max":10, "min":1 }, + "ChangeRequestAction":{ + "type":"string", + "enum":[ + "APPROVE", + "DENY", + "CANCEL", + "COMMIT" + ] + }, "ChangeRequestStatus":{ "type":"string", "enum":[ @@ -2957,6 +3015,10 @@ "member":{ "shape":"MemberChangeSpecification", "documentation":"

The member change specification when the change type is MEMBER.

" + }, + "collaboration":{ + "shape":"CollaborationChangeSpecification", + "documentation":"

The collaboration configuration changes being requested. Currently, this only supports modifying which change types are auto-approved for the collaboration.

" } }, "documentation":"

A union that contains the specification details for different types of changes.

", @@ -2964,11 +3026,19 @@ }, "ChangeSpecificationType":{ "type":"string", - "enum":["MEMBER"] + "enum":[ + "MEMBER", + "COLLABORATION" + ] }, "ChangeType":{ "type":"string", - "enum":["ADD_MEMBER"] + "enum":[ + "ADD_MEMBER", + "GRANT_RECEIVE_RESULTS_ABILITY", + "REVOKE_RECEIVE_RESULTS_ABILITY", + "EDIT_AUTO_APPROVED_CHANGE_TYPES" + ] }, "ChangeTypeList":{ "type":"list", @@ -3062,6 +3132,10 @@ "allowedResultRegions":{ "shape":"AllowedResultRegions", "documentation":"

The Amazon Web Services Regions where collaboration query results can be stored. Returns the list of Region identifiers that were specified when the collaboration was created. This list is used to enforce regional storage policies and compliance requirements.

" + }, + "isMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

An indicator as to whether metrics are enabled for the collaboration.

When true, collaboration members can opt in to Amazon CloudWatch metrics for their membership queries.

" } }, "documentation":"

The multi-party data share environment. The collaboration contains metadata about its purpose and participants.

" @@ -3263,6 +3337,10 @@ "changes":{ "shape":"ChangeList", "documentation":"

The list of changes specified in this change request.

" + }, + "approvals":{ + "shape":"ApprovalStatuses", + "documentation":"

A list of approval details from collaboration members, including approval status and multi-party approval workflow information.

" } }, "documentation":"

Represents a request to modify a collaboration. Change requests enable structured modifications to collaborations after they have been created.

" @@ -3312,6 +3390,10 @@ "changes":{ "shape":"ChangeList", "documentation":"

Summary of the changes in this change request.

" + }, + "approvals":{ + "shape":"ApprovalStatuses", + "documentation":"

Summary of approval statuses from all collaboration members for this change request.

" } }, "documentation":"

Summary information about a collaboration change request.

" @@ -3320,6 +3402,16 @@ "type":"list", "member":{"shape":"CollaborationChangeRequestSummary"} }, + "CollaborationChangeSpecification":{ + "type":"structure", + "members":{ + "autoApprovedChangeTypes":{ + "shape":"AutoApprovedChangeTypeList", + "documentation":"

Defines requested updates to properties of the collaboration. Currently, this only supports modifying which change types are auto-approved for the collaboration.

" + } + }, + "documentation":"

Defines the specific changes being requested for a collaboration, including configuration modifications and approval requirements.

" + }, "CollaborationConfiguredAudienceModelAssociation":{ "type":"structure", "required":[ @@ -4916,6 +5008,10 @@ "allowedResultRegions":{ "shape":"AllowedResultRegions", "documentation":"

The Amazon Web Services Regions where collaboration query results can be stored. When specified, results can only be written to these Regions. This parameter enables you to meet your compliance and data governance requirements, and implement regional data governance policies.

" + }, + "isMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

An indicator as to whether metrics have been enabled or disabled for the collaboration.

When true, collaboration members can opt in to Amazon CloudWatch metrics for their membership queries. The default value is false.

" } } }, @@ -5273,6 +5369,10 @@ "paymentConfiguration":{ "shape":"MembershipPaymentConfiguration", "documentation":"

The payment responsibilities accepted by the collaboration member.

Not required if the collaboration member has the member ability to run queries.

Required if the collaboration member doesn't have the member ability to run queries but is configured as a payer by the collaboration creator.

" + }, + "isMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

An indicator as to whether Amazon CloudWatch metrics have been enabled or disabled for the membership.

Amazon CloudWatch metrics are only available when the collaboration has metrics enabled. This option can be set by collaboration members who have the ability to run queries (analysis runners) or by members who are configured as payers.

When true, metrics about query execution are collected in Amazon CloudWatch. The default value is false.

" } } }, @@ -8246,6 +8346,10 @@ "paymentConfiguration":{ "shape":"MembershipPaymentConfiguration", "documentation":"

The payment responsibilities accepted by the collaboration member.

" + }, + "isMetricsEnabled":{ + "shape":"Boolean", + "documentation":"

An indicator as to whether Amazon CloudWatch metrics are enabled for the membership.

When true, metrics about query execution are collected in Amazon CloudWatch.

" } }, "documentation":"

The membership object.

" @@ -8577,7 +8681,7 @@ }, "ParameterValue":{ "type":"string", - "max":250, + "max":1000, "min":0 }, "PaymentConfiguration":{ @@ -10714,6 +10818,39 @@ } } }, + "UpdateCollaborationChangeRequestInput":{ + "type":"structure", + "required":[ + "collaborationIdentifier", + "changeRequestIdentifier", + "action" + ], + "members":{ + "collaborationIdentifier":{ + "shape":"CollaborationIdentifier", + "documentation":"

The unique identifier of the collaboration that contains the change request to be updated.

", + "location":"uri", + "locationName":"collaborationIdentifier" + }, + "changeRequestIdentifier":{ + "shape":"CollaborationChangeRequestIdentifier", + "documentation":"

The unique identifier of the specific change request to be updated within the collaboration.

", + "location":"uri", + "locationName":"changeRequestIdentifier" + }, + "action":{ + "shape":"ChangeRequestAction", + "documentation":"

The action to perform on the change request. Valid values include APPROVE (approve the change), DENY (reject the change), CANCEL (cancel the request), and COMMIT (commit after the request is approved).

For change requests without automatic approval, a member in the collaboration can manually APPROVE or DENY a change request. The collaboration owner can manually CANCEL or COMMIT a change request.

" + } + } + }, + "UpdateCollaborationChangeRequestOutput":{ + "type":"structure", + "required":["collaborationChangeRequest"], + "members":{ + "collaborationChangeRequest":{"shape":"CollaborationChangeRequest"} + } + }, "UpdateCollaborationInput":{ "type":"structure", "required":["collaborationIdentifier"], diff --git a/awscli/botocore/data/cleanroomsml/2023-09-06/service-2.json b/awscli/botocore/data/cleanroomsml/2023-09-06/service-2.json index 007bf5c7a5ad..7641646d28b9 100644 --- a/awscli/botocore/data/cleanroomsml/2023-09-06/service-2.json +++ b/awscli/botocore/data/cleanroomsml/2023-09-06/service-2.json @@ -5426,8 +5426,7 @@ "type":"structure", "required":[ "epsilon", - "maxMembershipInferenceAttackScore", - "columnClassification" + "maxMembershipInferenceAttackScore" ], "members":{ "epsilon":{ @@ -5628,17 +5627,17 @@ "NONE" ] }, - "ParameterKey":{ + "ParameterMap":{ + "type":"map", + "key":{"shape":"ParameterName"}, + "value":{"shape":"ParameterValue"} + }, + "ParameterName":{ "type":"string", "max":100, "min":1, "pattern":"[0-9a-zA-Z_]+" }, - "ParameterMap":{ - "type":"map", - "key":{"shape":"ParameterKey"}, - "value":{"shape":"ParameterValue"} - }, "ParameterValue":{ "type":"string", "max":250, @@ -5833,7 +5832,7 @@ }, "volumeSizeInGB":{ "shape":"ResourceConfigVolumeSizeInGBInteger", - "documentation":"

The maximum size of the instance that is used to train the model.

" + "documentation":"

The volume size of the instance that is used to train the model. Please see EC2 volume limit for volume size limitations on different instance types.

" } }, "documentation":"

Information about the EC2 resources that are used to train the model.

" @@ -5940,6 +5939,23 @@ "NONE" ] }, + "SparkProperties":{ + "type":"map", + "key":{"shape":"SparkPropertyKey"}, + "value":{"shape":"SparkPropertyValue"}, + "max":50, + "min":0 + }, + "SparkPropertyKey":{ + "type":"string", + "max":200, + "min":1 + }, + "SparkPropertyValue":{ + "type":"string", + "max":500, + "min":0 + }, "StartAudienceExportJobRequest":{ "type":"structure", "required":[ @@ -6829,7 +6845,8 @@ "number":{ "shape":"WorkerComputeConfigurationNumberInteger", "documentation":"

The number of compute workers that are used.

" - } + }, + "properties":{"shape":"WorkerComputeConfigurationProperties"} }, "documentation":"

Configuration information about the compute workers that perform the transform job.

" }, @@ -6839,6 +6856,17 @@ "max":400, "min":2 }, + "WorkerComputeConfigurationProperties":{ + "type":"structure", + "members":{ + "spark":{ + "shape":"SparkProperties", + "documentation":"

The Spark configuration properties for SQL workloads. This map contains key-value pairs that configure Apache Spark settings to optimize performance for your data processing jobs. You can specify up to 50 Spark properties, with each key being 1-200 characters and each value being 0-500 characters. These properties allow you to adjust compute capacity for large datasets and complex workloads.

" + } + }, + "documentation":"

The configuration properties for the worker compute environment. These properties allow you to customize the compute settings for your Clean Rooms workloads.

", + "union":true + }, "WorkerComputeType":{ "type":"string", "enum":[ diff --git a/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json b/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json index 3a97b9faef81..11399fe7d7a0 100644 --- a/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/awscli/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -2,13 +2,20 @@ "version":"2.0", "metadata":{ "apiVersion":"2010-08-01", + "awsQueryCompatible":{}, "endpointPrefix":"monitoring", - "protocol":"query", - "protocols":["query"], + "jsonVersion":"1.0", + "protocol":"smithy-rpc-v2-cbor", + "protocols":[ + "smithy-rpc-v2-cbor", + "json", + "query" + ], "serviceAbbreviation":"CloudWatch", "serviceFullName":"Amazon CloudWatch", "serviceId":"CloudWatch", "signatureVersion":"v4", + "targetPrefix":"GraniteServiceVersion20100801", "uid":"monitoring-2010-08-01", "xmlNamespace":"http://monitoring.amazonaws.com/doc/2010-08-01/", "auth":["aws.auth#sigv4"] @@ -1069,6 +1076,7 @@ "Message":{"shape":"ErrorMessage"} }, "documentation":"

This operation attempted to create a resource that already exists.

", + "error":{"httpStatusCode":409}, "exception":true }, "ContributorAttributes":{ diff --git a/awscli/botocore/data/config/2014-11-12/service-2.json b/awscli/botocore/data/config/2014-11-12/service-2.json index 14fcde570b11..faec2eb8c6e2 100644 --- a/awscli/botocore/data/config/2014-11-12/service-2.json +++ b/awscli/botocore/data/config/2014-11-12/service-2.json @@ -2612,7 +2612,7 @@ }, "roleARN":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role assumed by Config and used by the specified configuration recorder.

The server will reject a request without a defined roleARN for the configuration recorder

While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.

Policies and compliance results

IAM policies and other policies managed in Organizations can impact whether Config has permissions to record configuration changes for your resources. Additionally, rules directly evaluate the configuration of a resource and rules don't take into account these policies when running evaluations. Make sure that the policies in effect align with how you intend to use Config.

Keep Minimum Permisions When Reusing an IAM role

If you use an Amazon Web Services service that uses Config, such as Security Hub or Control Tower, and an IAM role has already been created, make sure that the IAM role that you use when setting up Config keeps the same minimum permissions as the pre-existing IAM role. You must do this to ensure that the other Amazon Web Services service continues to run as expected.

For example, if Control Tower has an IAM role that allows Config to read S3 objects, make sure that the same permissions are granted to the IAM role you use when setting up Config. Otherwise, it may interfere with how Control Tower operates.

The service-linked IAM role for Config must be used for service-linked configuration recorders

For service-linked configuration recorders, you must use the service-linked IAM role for Config: AWSServiceRoleForConfig.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role assumed by Config and used by the specified configuration recorder.

The server will reject a request without a defined roleARN for the configuration recorder

While the API model does not require this field, the server will reject a request without a defined roleARN for the configuration recorder.

Policies and compliance results

IAM policies and other policies managed in Organizations can impact whether Config has permissions to record configuration changes for your resources. Additionally, rules directly evaluate the configuration of a resource and rules don't take into account these policies when running evaluations. Make sure that the policies in effect align with how you intend to use Config.

Keep Minimum Permisions When Reusing an IAM role

If you use an Amazon Web Services service that uses Config, such as Security Hub CSPM or Control Tower, and an IAM role has already been created, make sure that the IAM role that you use when setting up Config keeps the same minimum permissions as the pre-existing IAM role. You must do this to ensure that the other Amazon Web Services service continues to run as expected.

For example, if Control Tower has an IAM role that allows Config to read S3 objects, make sure that the same permissions are granted to the IAM role you use when setting up Config. Otherwise, it may interfere with how Control Tower operates.

The service-linked IAM role for Config must be used for service-linked configuration recorders

For service-linked configuration recorders, you must use the service-linked IAM role for Config: AWSServiceRoleForConfig.

" }, "recordingGroup":{ "shape":"RecordingGroup", @@ -8022,7 +8022,98 @@ "AWS::S3Express::DirectoryBucket", "AWS::SageMaker::InferenceExperiment", "AWS::SecurityHub::Standard", - "AWS::Transfer::Profile" + "AWS::Transfer::Profile", + "AWS::CloudFormation::StackSet", + "AWS::MediaPackageV2::Channel", + "AWS::S3::AccessGrantsLocation", + "AWS::S3::AccessGrant", + "AWS::S3::AccessGrantsInstance", + "AWS::EMRServerless::Application", + "AWS::Config::AggregationAuthorization", + "AWS::Bedrock::ApplicationInferenceProfile", + "AWS::ApiGatewayV2::Integration", + "AWS::SageMaker::MlflowTrackingServer", + "AWS::SageMaker::ModelBiasJobDefinition", + "AWS::SecretsManager::RotationSchedule", + "AWS::Deadline::QueueFleetAssociation", + "AWS::ECR::RepositoryCreationTemplate", + "AWS::CloudFormation::LambdaHook", + "AWS::EC2::SubnetNetworkAclAssociation", + "AWS::ApiGateway::UsagePlan", + "AWS::AppConfig::Extension", + "AWS::Deadline::Fleet", + "AWS::EMR::Studio", + "AWS::S3Tables::TableBucket", + "AWS::CloudFront::RealtimeLogConfig", + "AWS::BackupGateway::Hypervisor", + "AWS::BCMDataExports::Export", + "AWS::CloudFormation::GuardHook", + "AWS::CloudFront::PublicKey", + "AWS::CloudTrail::EventDataStore", + "AWS::EntityResolution::IdMappingWorkflow", + "AWS::EntityResolution::SchemaMapping", + "AWS::IoT::DomainConfiguration", + "AWS::PCAConnectorAD::DirectoryRegistration", + "AWS::RDS::Integration", + "AWS::Config::ConformancePack", + "AWS::RolesAnywhere::Profile", + "AWS::CodeArtifact::Domain", + "AWS::Backup::RestoreTestingPlan", + "AWS::Config::StoredQuery", + "AWS::SageMaker::DataQualityJobDefinition", + "AWS::SageMaker::ModelExplainabilityJobDefinition", + "AWS::SageMaker::ModelQualityJobDefinition", + "AWS::SageMaker::StudioLifecycleConfig", + "AWS::SES::DedicatedIpPool", + "AWS::SES::MailManagerTrafficPolicy", + "AWS::SSM::ResourceDataSync", + "AWS::BedrockAgentCore::Runtime", + "AWS::BedrockAgentCore::BrowserCustom", + "AWS::ElasticLoadBalancingV2::TargetGroup", + "AWS::EMRContainers::VirtualCluster", + "AWS::EntityResolution::MatchingWorkflow", + "AWS::IoTCoreDeviceAdvisor::SuiteDefinition", + "AWS::EC2::SecurityGroupVpcAssociation", + "AWS::EC2::VerifiedAccessInstance", + "AWS::KafkaConnect::CustomPlugin", + "AWS::NetworkManager::TransitGatewayPeering", + "AWS::OpenSearchServerless::SecurityConfig", + "AWS::Redshift::Integration", + "AWS::RolesAnywhere::TrustAnchor", + "AWS::Route53Profiles::ProfileAssociation", + "AWS::SSMIncidents::ResponsePlan", + "AWS::Transfer::Server", + "AWS::Glue::Database", + "AWS::Organizations::OrganizationalUnit", + "AWS::EC2::IPAMPoolCidr", + "AWS::EC2::VPCGatewayAttachment", + "AWS::Bedrock::Prompt", + "AWS::Comprehend::Flywheel", + "AWS::DataSync::Agent", + "AWS::MediaTailor::LiveSource", + "AWS::MSK::ServerlessCluster", + "AWS::IoTSiteWise::Asset", + "AWS::B2BI::Capability", + "AWS::CloudFront::KeyValueStore", + "AWS::Deadline::Monitor", + "AWS::GuardDuty::MalwareProtectionPlan", + "AWS::Location::APIKey", + "AWS::MediaPackageV2::OriginEndpoint", + "AWS::PCAConnectorAD::Connector", + "AWS::S3Tables::TableBucketPolicy", + "AWS::SecretsManager::ResourcePolicy", + "AWS::SSMContacts::Contact", + "AWS::IoT::ThingGroup", + "AWS::ImageBuilder::LifecyclePolicy", + "AWS::GameLift::Build", + "AWS::ECR::ReplicationConfiguration", + "AWS::EC2::SubnetCidrBlock", + "AWS::Connect::SecurityProfile", + "AWS::CleanRoomsML::TrainingDataset", + "AWS::AppStream::AppBlockBuilder", + "AWS::Route53::DNSSEC", + "AWS::SageMaker::UserProfile", + "AWS::ApiGateway::Method" ] }, "ResourceTypeList":{ diff --git a/awscli/botocore/data/connect/2017-08-08/paginators-1.json b/awscli/botocore/data/connect/2017-08-08/paginators-1.json index 05901c621117..64bb2588ca99 100644 --- a/awscli/botocore/data/connect/2017-08-08/paginators-1.json +++ b/awscli/botocore/data/connect/2017-08-08/paginators-1.json @@ -529,6 +529,22 @@ ], "output_token": "NextToken", "result_key": "Workspaces" + }, + "ListEntitySecurityProfiles": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "output_token": "NextToken", + "result_key": "SecurityProfiles" + }, + "ListSecurityProfileFlowModules": { + "input_token": "NextToken", + "limit_key": "MaxResults", + "non_aggregate_keys": [ + "LastModifiedRegion", + "LastModifiedTime" + ], + "output_token": "NextToken", + "result_key": "AllowedFlowModules" } } } diff --git a/awscli/botocore/data/connect/2017-08-08/service-2.json b/awscli/botocore/data/connect/2017-08-08/service-2.json index 2ba0933d65ce..5cab75626cbc 100644 --- a/awscli/botocore/data/connect/2017-08-08/service-2.json +++ b/awscli/botocore/data/connect/2017-08-08/service-2.json @@ -711,7 +711,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a new data table with the specified properties. Supports the creation of all table properties except for attributes and values. A table with no attributes and values is a valid state for a table. The number of tables per instance is limited to 100 per instance. Customers can request an increase by using AWS Service Quotas.

" + "documentation":"

Creates a new data table with the specified properties. Supports the creation of all table properties except for attributes and values. A table with no attributes and values is a valid state for a table. The number of tables per instance is limited to 100 per instance. Customers can request an increase by using Amazon Web Services Service Quotas.

" }, "CreateDataTableAttribute":{ "name":"CreateDataTableAttribute", @@ -2665,7 +2665,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Evaluates values at the time of the request and returns them. It considers the request's timezone or the table's timezone, in that order, when accessing time based tables. When a value is accessed, the accessor's identity and the time of access are saved alongside the value to help identify values that are actively in use. The term \"Batch\" is not included in the operation name since it does not meet all the criteria for a batch operation as specified in Batch Operations: AWS API Standards.

" + "documentation":"

Evaluates values at the time of the request and returns them. It considers the request's timezone or the table's timezone, in that order, when accessing time based tables. When a value is accessed, the accessor's identity and the time of access are saved alongside the value to help identify values that are actively in use. The term \"Batch\" is not included in the operation name since it does not meet all the criteria for a batch operation as specified in Batch Operations: Amazon Web Services API Standards.

" }, "GetAttachedFile":{ "name":"GetAttachedFile", @@ -3180,7 +3180,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterException"} ], - "documentation":"

Returns all attributes for a specified data table. A maximum of 100 attributes per data table is allowed. Customers can request an increase by using AWS Service Quotas. The response can be filtered by specific attribute IDs for CloudFormation integration.

" + "documentation":"

Returns all attributes for a specified data table. A maximum of 100 attributes per data table is allowed. Customers can request an increase by using Amazon Web Services Service Quotas. The response can be filtered by specific attribute IDs for CloudFormation integration.

" }, "ListDataTablePrimaryValues":{ "name":"ListDataTablePrimaryValues", @@ -4090,7 +4090,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

When a contact is being recorded, and the recording has been suspended using SuspendContactRecording, this API resumes recording whatever recording is selected in the flow configuration: call, screen, or both. If only call recording or only screen recording is enabled, then it would resume.

Voice and screen recordings are supported.

" }, @@ -4626,7 +4627,8 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Starts recording the contact:

StartContactRecording is a one-time action. For example, if you use StopContactRecording to stop recording an ongoing call, you can't use StartContactRecording to restart it. For scenarios where the recording has started and you want to suspend and resume it, such as when collecting sensitive information (for example, a credit card number), use SuspendContactRecording and ResumeContactRecording.

You can use this API to override the recording behavior configured in the Set recording behavior block.

Only voice recordings are supported at this time.

" }, @@ -4683,7 +4685,7 @@ {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Initiates a new outbound SMS contact to a customer. Response of this API provides the ContactId of the outbound SMS contact created.

SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as Type. ContactFlowId initiates the flow to manage the new SMS contact created.

This API can be used to initiate outbound SMS contacts for an agent, or it can also deflect an ongoing contact to an outbound SMS contact by using the StartOutboundChatContact Flow Action.

For more information about using SMS in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide:

" + "documentation":"

Initiates a new outbound SMS or WhatsApp contact to a customer. Response of this API provides the ContactId of the outbound SMS or WhatsApp contact created.

SourceEndpoint only supports Endpoints with CONNECT_PHONENUMBER_ARN as Type and DestinationEndpoint only supports Endpoints with TELEPHONE_NUMBER as Type. ContactFlowId initiates the flow to manage the new contact created.

This API can be used to initiate outbound SMS or WhatsApp contacts for an agent, or it can also deflect an ongoing contact to an outbound SMS or WhatsApp contact by using the StartOutboundChatContact Flow Action.

For more information about using SMS or WhatsApp in Amazon Connect, see the following topics in the Amazon Connect Administrator Guide:

" }, "StartOutboundEmailContact":{ "name":"StartOutboundEmailContact", @@ -4789,7 +4791,8 @@ {"shape":"ContactNotFoundException"}, {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Ends the specified contact. Use this API to stop queued callbacks. It does not work for voice contacts that use the following initiation methods:

Chat and task contacts can be terminated in any state, regardless of initiation method.

" }, @@ -4822,7 +4825,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Stops recording a call when a contact is being recorded. StopContactRecording is a one-time action. If you use StopContactRecording to stop recording an ongoing call, you can't use StartContactRecording to restart it. For scenarios where the recording has started and you want to suspend it for sensitive information (for example, to collect a credit card number), and then restart it, use SuspendContactRecording and ResumeContactRecording.

Only voice recordings are supported at this time.

" }, @@ -4870,7 +4874,8 @@ "errors":[ {"shape":"InvalidRequestException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

When a contact is being recorded, this API suspends recording whatever is selected in the flow configuration: call (IVR or agent), screen, or both. If only call recording or only screen recording is enabled, then it would be suspended. For example, you might suspend the screen recording while collecting sensitive information, such as a credit card number. Then use ResumeContactRecording to restart recording the screen.

The period of time that the recording is suspended is filled with silence in the final recording.

Voice (IVR, agent) and screen recordings are supported.

" }, @@ -4887,7 +4892,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"}, {"shape":"InvalidParameterException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Adds the specified tags to the contact resource. For more information about this API is used, see Set up granular billing for a detailed view of your Amazon Connect usage.

", "idempotent":true @@ -4940,7 +4946,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"}, {"shape":"InvalidParameterException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Removes the specified tags from the contact resource. For more information about this API is used, see Set up granular billing for a detailed view of your Amazon Connect usage.

", "idempotent":true @@ -5010,7 +5017,8 @@ {"shape":"InternalServiceException"}, {"shape":"ThrottlingException"}, {"shape":"AccessDeniedException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

This API is in preview release for Amazon Connect and is subject to change.

Adds or updates user-defined contact information associated with the specified contact. At least one field to be updated must be present in the request.

You can add or update user-defined contact information for both ongoing and completed contacts.

" }, @@ -5026,7 +5034,8 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidParameterException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} + {"shape":"InternalServiceException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Creates or updates user-defined contact attributes associated with the specified contact.

You can create or update user-defined attributes for both ongoing and completed contacts. For example, while the call is active, you can update the customer's name or the reason the customer called. You can add notes about steps that the agent took during the call that display to the next agent that takes the call. You can also update attributes for a contact using data from your CRM application and save the data with the contact in Amazon Connect. You could also flag calls for additional analysis, such as legal review or to identify abusive callers.

Contact attributes are available in Amazon Connect for 24 months, and are then deleted. For information about contact record retention and the maximum size of the contact record attributes section, see Feature specifications in the Amazon Connect Administrator Guide.

" }, @@ -5172,7 +5181,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceException"}, {"shape":"ThrottlingException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"InvalidActiveRegionException"} ], "documentation":"

Updates routing priority and age on the contact (QueuePriority and QueueTimeAdjustmentInSeconds). These properties can be used to change a customer's position in the queue. For example, you can move a contact to the back of the queue by setting a lower routing priority relative to other contacts in queue; or you can move a contact to the front of the queue by increasing the routing age which will make the contact look artificially older and therefore higher up in the first-in-first-out routing order. Note that adjusting the routing age of a contact affects only its position in queue, and not its actual queue wait time as reported through metrics. These properties can also be updated by using the Set routing priority / age flow block.

Either QueuePriority or QueueTimeAdjustmentInSeconds should be provided within the request body, but not both.

" }, @@ -6091,6 +6101,15 @@ } } }, + "ActiveRegion":{ + "type":"string", + "max":1024, + "min":0 + }, + "ActiveRegionList":{ + "type":"list", + "member":{"shape":"RegionName"} + }, "AdditionalEmailRecipients":{ "type":"structure", "members":{ @@ -9080,6 +9099,10 @@ "NextContacts":{ "shape":"NextContacts", "documentation":"

List of next contact entries for the contact.

" + }, + "GlobalResiliencyMetadata":{ + "shape":"GlobalResiliencyMetadata", + "documentation":"

Information about the global resiliency configuration for the contact, including traffic distribution details.

" } }, "documentation":"

Contains information about a contact.

" @@ -9293,33 +9316,33 @@ "members":{ "TagConditions":{ "shape":"TagAndConditionList", - "documentation":"

Tag-based conditions for contact flow filtering.

" + "documentation":"

Tag-based conditions for contact flow filtering.

" }, "ContactFlowTypeCondition":{ "shape":"ContactFlowTypeCondition", - "documentation":"

Contact flow type condition.

" + "documentation":"

Contact flow type condition.

" } }, - "documentation":"

A list of conditions which would be applied together with an AND condition.

" + "documentation":"

A list of conditions which would be applied together with an AND condition.

" }, "ContactFlowAttributeFilter":{ "type":"structure", "members":{ "OrConditions":{ "shape":"ContactFlowAttributeOrConditionList", - "documentation":"

A list of conditions which would be applied together with an OR condition.

" + "documentation":"

A list of conditions which would be applied together with an OR condition.

" }, "AndCondition":{ "shape":"ContactFlowAttributeAndCondition", - "documentation":"

A list of conditions which would be applied together with a AND condition.

" + "documentation":"

A list of conditions which would be applied together with a AND condition.

" }, "TagCondition":{"shape":"TagCondition"}, "ContactFlowTypeCondition":{ "shape":"ContactFlowTypeCondition", - "documentation":"

Contact flow type condition within attribute filter.

" + "documentation":"

Contact flow type condition within attribute filter.

" } }, - "documentation":"

Filter for contact flow attributes with multiple condition types.

" + "documentation":"

Filter for contact flow attributes with multiple condition types.

" }, "ContactFlowAttributeOrConditionList":{ "type":"list", @@ -9716,10 +9739,10 @@ "members":{ "ContactFlowType":{ "shape":"ContactFlowType", - "documentation":"

Contact flow type of the contact flow type condition.

" + "documentation":"

Contact flow type of the contact flow type condition.

" } }, - "documentation":"

The contact flow type condition.

" + "documentation":"

The contact flow type condition.

" }, "ContactFlowTypes":{ "type":"list", @@ -9925,7 +9948,11 @@ "shape":"Name", "documentation":"

Indicates name of the contact.

" }, - "RoutingCriteria":{"shape":"RoutingCriteria"} + "RoutingCriteria":{"shape":"RoutingCriteria"}, + "GlobalResiliencyMetadata":{ + "shape":"GlobalResiliencyMetadata", + "documentation":"

Additional routing information for contacts created in ACGR instances.

" + } }, "documentation":"

Information of returned contact.

" }, @@ -10046,14 +10073,14 @@ }, "TagCondition":{"shape":"TagCondition"} }, - "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR or AND (List of List) input where:

" + "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR or AND (List of List) input where:

" }, "ControlPlaneTagFilter":{ "type":"structure", "members":{ "OrConditions":{ "shape":"TagOrConditionList", - "documentation":"

A list of conditions which would be applied together with an OR condition.

" + "documentation":"

A list of conditions which would be applied together with an OR condition.

" }, "AndConditions":{ "shape":"TagAndConditionList", @@ -10061,10 +10088,10 @@ }, "TagCondition":{ "shape":"TagCondition", - "documentation":"

A leaf node condition which can be used to specify a tag condition.

" + "documentation":"

A leaf node condition which can be used to specify a tag condition.

" } }, - "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR of AND (List of List) input where:

" + "documentation":"

An object that can be used to specify Tag conditions inside the SearchFilter. This accepts an OR of AND (List of List) input where:

" }, "ControlPlaneUserAttributeFilter":{ "type":"structure", @@ -10080,7 +10107,7 @@ "TagCondition":{"shape":"TagCondition"}, "HierarchyGroupCondition":{"shape":"HierarchyGroupCondition"} }, - "documentation":"

An object that can be used to specify Tag conditions or Hierarchy Group conditions inside the SearchFilter.

This accepts an OR of AND (List of List) input where:

Only one field can be populated. Maximum number of allowed Tag conditions is 25. Maximum number of allowed Hierarchy Group conditions is 20.

" + "documentation":"

An object that can be used to specify Tag conditions or Hierarchy Group conditions inside the SearchFilter.

This accepts an OR of AND (List of List) input where:

Only one field can be populated. Maximum number of allowed Tag conditions is 25. Maximum number of allowed Hierarchy Group conditions is 20.

" }, "Count":{"type":"integer"}, "CreateAgentStatusRequest":{ @@ -12030,12 +12057,16 @@ "shape":"CurrentMetricName", "documentation":"

The name of the metric.

" }, + "MetricId":{ + "shape":"CurrentMetricId", + "documentation":"

Out of the box current metrics or custom metrics can be referenced via this field. This field is a valid AWS Connect Arn or a UUID.

" + }, "Unit":{ "shape":"Unit", - "documentation":"

The unit for the metric.

" + "documentation":"

The Unit parameter is not supported for custom metrics.

The unit for the metric.

" } }, - "documentation":"

Contains information about a real-time metric. For a description of each metric, see Metrics definitions in the Amazon Connect Administrator Guide.

" + "documentation":"

Contains information about a real-time metric. For a description of each metric, see Metrics definitions in the Amazon Connect Administrator Guide.

Only one of either the Name or MetricId is required.

" }, "CurrentMetricData":{ "type":"structure", @@ -12056,6 +12087,10 @@ "type":"list", "member":{"shape":"CurrentMetricData"} }, + "CurrentMetricId":{ + "type":"string", + "pattern":"^([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})|(arn:[a-z0-9-]+:connect:[a-z0-9-]+:(?:([0-9]{12}):instance/[a-z0-9-]+/metric/[a-z0-9-]+(?::[a-z0-9-]+)?|aws:metric/[A-Z_]+))$" + }, "CurrentMetricName":{ "type":"string", "documentation":"

The current metric names.

", @@ -12230,7 +12265,7 @@ }, "LastModifiedRegion":{ "shape":"RegionName", - "documentation":"

The AWS region where the data table was last modified, used for region replication.

" + "documentation":"

The Amazon Web Services Region where the data table was last modified, used for region replication.

" }, "Tags":{ "shape":"TagMap", @@ -12298,7 +12333,7 @@ }, "LastModifiedRegion":{ "shape":"RegionName", - "documentation":"

The AWS region where this attribute was last modified, used for region replication.

" + "documentation":"

The Amazon Web Services Region where this attribute was last modified, used for region replication.

" }, "Validation":{ "shape":"Validation", @@ -12699,15 +12734,15 @@ }, "MinValue":{ "shape":"DateTimeFormat", - "documentation":"

A minimum value of the property.

" + "documentation":"

A minimum value of the property.

" }, "MaxValue":{ "shape":"DateTimeFormat", - "documentation":"

A maximum value of the property.

" + "documentation":"

A maximum value of the property.

" }, "ComparisonType":{ "shape":"DateTimeComparisonType", - "documentation":"

Datetime property comparison type.

" + "documentation":"

Datetime property comparison type.

" } }, "documentation":"

A datetime search condition for Search APIs.

" @@ -12797,7 +12832,7 @@ }, "ComparisonType":{ "shape":"DecimalComparisonType", - "documentation":"

The type of comparison to be made when evaluating the decimal condition.

" + "documentation":"

The type of comparison to be made when evaluating the decimal condition.

" } }, "documentation":"

A decimal search condition for Search APIs.

" @@ -14783,6 +14818,14 @@ "AgentStatus":{ "shape":"AgentStatusIdentifier", "documentation":"

Information about the agent status assigned to the user.

" + }, + "Subtype":{ + "shape":"Subtype", + "documentation":"

The subtype of the channel used for the contact.

" + }, + "ValidationTestType":{ + "shape":"ValidationTestType", + "documentation":"

The testing and simulation type

" } }, "documentation":"

Contains information about the dimensions for a set of metrics.

" @@ -15294,6 +15337,16 @@ }, "documentation":"

Information about the call disconnect experience.

" }, + "DisconnectOnCustomerExit":{ + "type":"list", + "member":{"shape":"DisconnectOnCustomerExitParticipantType"}, + "max":1, + "min":1 + }, + "DisconnectOnCustomerExitParticipantType":{ + "type":"string", + "enum":["AGENT"] + }, "DisconnectReason":{ "type":"structure", "members":{ @@ -17875,6 +17928,14 @@ "AgentStatuses":{ "shape":"AgentStatuses", "documentation":"

A list of up to 50 agent status IDs or ARNs.

" + }, + "Subtypes":{ + "shape":"Subtypes", + "documentation":"

A list of up to 10 subtypes can be provided.

" + }, + "ValidationTestTypes":{ + "shape":"ValidationTestTypes", + "documentation":"

A list of up to 10 validationTestTypes can be provided.

" } }, "documentation":"

Contains the filter to apply when retrieving metrics.

" @@ -18161,15 +18222,15 @@ }, "Filters":{ "shape":"Filters", - "documentation":"

The filters to apply to returned metrics. You can filter up to the following limits:

Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.

When using AgentStatuses as filter make sure Queues is added as primary filter.

When using the RoutingStepExpression filter, you need to pass exactly one QueueId. The filter is also case sensitive so when using the RoutingStepExpression filter, grouping by ROUTING_STEP_EXPRESSION is required.

Currently tagging is only supported on the resources that are passed in the filter.

" + "documentation":"

The filters to apply to returned metrics. You can filter up to the following limits:

Metric data is retrieved only for the resources associated with the queues or routing profiles, and by any channels included in the filter. (You cannot filter by both queue AND routing profile.) You can include both resource IDs and resource ARNs in the same request.

When using AgentStatuses as filter make sure Queues is added as primary filter.

When using Subtypes as filter make sure Queues is added as primary filter.

When using ValidationTestTypes as filter make sure Queues is added as primary filter.

When using the RoutingStepExpression filter, you need to pass exactly one QueueId. The filter is also case sensitive so when using the RoutingStepExpression filter, grouping by ROUTING_STEP_EXPRESSION is required.

Currently tagging is only supported on the resources that are passed in the filter.

" }, "Groupings":{ "shape":"Groupings", - "documentation":"

Defines the level of aggregation for metrics data by a dimension(s). Its similar to sorting items into buckets based on a common characteristic, then counting or calculating something for each bucket. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues.

The grouping list is an ordered list, with the first item in the list defined as the primary grouping. If no grouping is included in the request, the aggregation happens at the instance-level.

" + "documentation":"

Defines the level of aggregation for metrics data by a dimension(s). Its similar to sorting items into buckets based on a common characteristic, then counting or calculating something for each bucket. For example, when grouped by QUEUE, the metrics returned apply to each queue rather than aggregated for all queues.

The grouping list is an ordered list, with the first item in the list defined as the primary grouping. If no grouping is included in the request, the aggregation happens at the instance-level.

" }, "CurrentMetrics":{ "shape":"CurrentMetrics", - "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Metrics definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS and the Value is returned in SECONDS.

When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

When the filter RoutingStepExpression is used, this metric is still calculated from enqueue time. For example, if a contact that has been queued under <Expression 1> for 10 seconds has expired and <Expression 2> becomes active, then OLDEST_CONTACT_AGE for this queue will be counted starting from 10, not 0.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" + "documentation":"

The metrics to retrieve. Specify the name or metricId, and unit for each metric. The following metrics are available. For a description of all the metrics, see Metrics definitions in the Amazon Connect Administrator Guide.

MetricId should be used to reference custom metrics or out of the box metrics as Arn. If using MetricId, the limit is 10 MetricId per request.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS and the Value is returned in SECONDS.

When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

When the filter RoutingStepExpression is used, this metric is still calculated from enqueue time. For example, if a contact that has been queued under <Expression 1> for 10 seconds has expired and <Expression 2> becomes active, then OLDEST_CONTACT_AGE for this queue will be counted starting from 10, not 0.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" }, "NextToken":{ "shape":"NextToken", @@ -18668,6 +18729,24 @@ } } }, + "GlobalResiliencyMetadata":{ + "type":"structure", + "members":{ + "ActiveRegion":{ + "shape":"ActiveRegion", + "documentation":"

The current AWS region in which the contact is active. This indicates where the contact is being processed in real-time.

" + }, + "OriginRegion":{ + "shape":"OriginRegion", + "documentation":"

The AWS region where the contact was originally created and initiated. This may differ from the ActiveRegion if the contact has been transferred across regions.

" + }, + "TrafficDistributionGroupId":{ + "shape":"TrafficDistributionGroupId", + "documentation":"

The identifier of the traffic distribution group.

" + } + }, + "documentation":"

Information about the global resiliency configuration for the contact, including traffic distribution details.

" + }, "GlobalSignInEndpoint":{ "type":"string", "max":128, @@ -18690,7 +18769,9 @@ "CHANNEL", "ROUTING_PROFILE", "ROUTING_STEP_EXPRESSION", - "AGENT_STATUS" + "AGENT_STATUS", + "SUBTYPE", + "VALIDATION_TEST_TYPE" ] }, "GroupingV2":{"type":"string"}, @@ -19909,6 +19990,15 @@ "TOTAL" ] }, + "InvalidActiveRegionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"Message"} + }, + "documentation":"

This exception occurs when an API request is made to a non-active region in an Amazon Connect instance configured with Amazon Connect Global Resiliency. For example, if the active region is US West (Oregon) and a request is made to US East (N. Virginia), the exception will be returned.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidContactFlowException":{ "type":"structure", "members":{ @@ -21622,7 +21712,7 @@ }, "InstanceId":{ "shape":"InstanceId", - "documentation":"

The identifier of the Amazon Connect instance that phone numbers are claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. If both TargetArn and InstanceId are not provided, this API lists numbers claimed to all the Amazon Connect instances belonging to your account in the same AWS Region as the request.

" + "documentation":"

The identifier of the Amazon Connect instance that phone numbers are claimed to. You can find the instance ID in the Amazon Resource Name (ARN) of the instance. If both TargetArn and InstanceId are not provided, this API lists numbers claimed to all the Amazon Connect instances belonging to your account in the same Amazon Web Services Region as the request.

" }, "MaxResults":{ "shape":"MaxResult1000", @@ -23564,6 +23654,11 @@ "type":"string", "max":267 }, + "OriginRegion":{ + "type":"string", + "max":1024, + "min":0 + }, "OriginsList":{ "type":"list", "member":{"shape":"Origin"} @@ -27504,6 +27599,10 @@ "SearchableSegmentAttributes":{ "shape":"SearchableSegmentAttributes", "documentation":"

The search criteria based on searchable segment attributes of a contact.

" + }, + "ActiveRegions":{ + "shape":"ActiveRegionList", + "documentation":"

The list of active regions for contacts in ACGR instances.

" } }, "documentation":"

A structure of search criteria to be used to return contacts.

" @@ -29185,6 +29284,10 @@ "CustomerId":{ "shape":"CustomerIdNonEmpty", "documentation":"

The customer's identification number. For example, the CustomerId may be a customer number from your CRM.

" + }, + "DisconnectOnCustomerExit":{ + "shape":"DisconnectOnCustomerExit", + "documentation":"

A list of participant types to automatically disconnect when the end customer ends the chat session, allowing them to continue through disconnect flows such as surveys or feedback forms.

" } } }, @@ -29452,7 +29555,7 @@ }, "SegmentAttributes":{ "shape":"SegmentAttributes", - "documentation":"

A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows.

" + "documentation":"

A set of system defined key-value pairs stored on individual contact segments using an attribute map. The attributes are standard Amazon Connect attributes. They can be accessed in flows.

" }, "Attributes":{ "shape":"Attributes", @@ -29468,6 +29571,7 @@ }, "ParticipantDetails":{"shape":"ParticipantDetails"}, "InitialSystemMessage":{"shape":"ChatMessage"}, + "InitialTemplatedSystemMessage":{"shape":"TemplatedMessageConfig"}, "RelatedContactId":{ "shape":"ContactId", "documentation":"

The unique identifier for an Amazon Connect contact. This identifier is related to the contact starting.

" @@ -29478,7 +29582,7 @@ }, "ClientToken":{ "shape":"ClientToken", - "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the AWS SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned.

", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. If not provided, the Amazon Web Services SDK populates this field. For more information about idempotency, see Making retries safe with idempotent APIs. The token is valid for 7 days after creation. If a contact is already started, the contact ID is returned.

", "idempotencyToken":true } } @@ -30071,6 +30175,11 @@ "max":100, "min":1 }, + "Subtypes":{ + "type":"list", + "member":{"shape":"Subtype"}, + "max":10 + }, "SuccessfulBatchAssociationSummary":{ "type":"structure", "members":{ @@ -33595,7 +33704,7 @@ }, "AndConditions":{ "shape":"UserSearchConditionList", - "documentation":"

A list of conditions which would be applied together with an AND condition.

" + "documentation":"

A list of conditions which would be applied together with an AND condition.

" }, "StringCondition":{ "shape":"StringCondition", @@ -33773,6 +33882,12 @@ "type":"list", "member":{"shape":"String"} }, + "ValidationTestType":{"type":"string"}, + "ValidationTestTypes":{ + "type":"list", + "member":{"shape":"ValidationTestType"}, + "max":10 + }, "Value":{"type":"double"}, "ValueBoundary":{ "type":"integer", @@ -34315,7 +34430,7 @@ }, "LastModifiedRegion":{ "shape":"RegionName", - "documentation":"

The AWS Region where the workspace was last modified.

" + "documentation":"

The Amazon Web Services Region where the workspace was last modified.

" }, "Tags":{ "shape":"TagMap", @@ -34558,7 +34673,7 @@ }, "LastModifiedRegion":{ "shape":"RegionName", - "documentation":"

The AWS Region where the workspace was last modified.

" + "documentation":"

The Amazon Web Services Region where the workspace was last modified.

" } }, "documentation":"

Contains summary information about a workspace.

" diff --git a/awscli/botocore/data/datasync/2018-11-09/service-2.json b/awscli/botocore/data/datasync/2018-11-09/service-2.json index 4aae9ae9ee63..e84fa182eda3 100644 --- a/awscli/botocore/data/datasync/2018-11-09/service-2.json +++ b/awscli/botocore/data/datasync/2018-11-09/service-2.json @@ -773,7 +773,7 @@ "AgentArnList":{ "type":"list", "member":{"shape":"AgentArn"}, - "max":4, + "max":8, "min":1 }, "AgentList":{ @@ -900,7 +900,7 @@ "documentation":"

Specifies the ARN for the customer-managed KMS key that DataSync uses to encrypt the DataSync-managed secret stored for SecretArn. DataSync provides this key to Secrets Manager.

" } }, - "documentation":"

Specifies configuration information for a DataSync-managed secret, such as an authentication token or secret key that DataSync uses to access a specific storage location, with a customer-managed KMS key.

You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a DataSync-managed secret, such as an authentication token, secret key, password, or Kerberos keytab that DataSync uses to access a specific storage location, with a customer-managed KMS key.

You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

" }, "CreateAgentRequest":{ "type":"structure", @@ -984,11 +984,11 @@ }, "CmkSecretConfig":{ "shape":"CmkSecretConfig", - "documentation":"

Specifies configuration information for a DataSync-managed secret, which includes the authentication token that DataSync uses to access a specific AzureBlob storage location, with a customer-managed KMS key.

When you include this paramater as part of a CreateLocationAzureBlob request, you provide only the KMS key ARN. DataSync uses this KMS key together with the authentication token you specify for SasConfiguration to create a DataSync-managed secret to store the location access credentials.

Make sure the DataSync has permission to access the KMS key that you specify.

You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a DataSync-managed secret, which includes the authentication token that DataSync uses to access a specific AzureBlob storage location, with a customer-managed KMS key.

When you include this parameter as part of a CreateLocationAzureBlob request, you provide only the KMS key ARN. DataSync uses this KMS key together with the authentication token you specify for SasConfiguration to create a DataSync-managed secret to store the location access credentials.

Make sure that DataSync has permission to access the KMS key that you specify.

You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

" }, "CustomSecretConfig":{ "shape":"CustomSecretConfig", - "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where the authentication token for an AzureBlob storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where the authentication token for an AzureBlob storage location is stored in plain text, in Secrets Manager. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationAzureBlob request. Do not provide both parameters for the same request.

" } } }, @@ -1367,11 +1367,11 @@ }, "CmkSecretConfig":{ "shape":"CmkSecretConfig", - "documentation":"

Specifies configuration information for a DataSync-managed secret, which includes the SecretKey that DataSync uses to access a specific object storage location, with a customer-managed KMS key.

When you include this paramater as part of a CreateLocationObjectStorage request, you provide only the KMS key ARN. DataSync uses this KMS key together with the value you specify for the SecretKey parameter to create a DataSync-managed secret to store the location access credentials.

Make sure the DataSync has permission to access the KMS key that you specify.

You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a DataSync-managed secret, which includes the SecretKey that DataSync uses to access a specific object storage location, with a customer-managed KMS key.

When you include this parameter as part of a CreateLocationObjectStorage request, you provide only the KMS key ARN. DataSync uses this KMS key together with the value you specify for the SecretKey parameter to create a DataSync-managed secret to store the location access credentials.

Make sure that DataSync has permission to access the KMS key that you specify.

You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

" }, "CustomSecretConfig":{ "shape":"CustomSecretConfig", - "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where the secret key for a specific object storage location is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where the secret key for a specific object storage location is stored in plain text, in Secrets Manager. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig (with SecretKey) or CustomSecretConfig (without SecretKey) to provide credentials for a CreateLocationObjectStorage request. Do not provide both parameters for the same request.

" } }, "documentation":"

CreateLocationObjectStorageRequest

" @@ -1455,6 +1455,14 @@ "shape":"SmbPassword", "documentation":"

Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if AuthenticationType is set to NTLM.

" }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

Specifies configuration information for a DataSync-managed secret, either a Password or KerberosKeytab (for NTLM (default) and KERBEROS authentication types, respectively) that DataSync uses to access a specific SMB storage location, with a customer-managed KMS key.

When you include this parameter as part of a CreateLocationSmbRequest request, you provide only the KMS key ARN. DataSync uses this KMS key together with either the Password or KerberosKeytab you specify to create a DataSync-managed secret to store the location access credentials.

Make sure that DataSync has permission to access the KMS key that you specify.

You can use either CmkSecretConfig (with either Password or KerberosKeytab) or CustomSecretConfig (without any Password and KerberosKeytab) to provide credentials for a CreateLocationSmbRequest request. Do not provide both CmkSecretConfig and CustomSecretConfig parameters for the same request.

" + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where the SMB storage location credentials is stored in Secrets Manager as plain text (for Password) or binary (for KerberosKeytab). This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig (with SasConfiguration) or CustomSecretConfig (without SasConfiguration) to provide credentials for a CreateLocationSmbRequest request. Do not provide both parameters for the same request.

" + }, "AgentArns":{ "shape":"AgentArnList", "documentation":"

Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN).

" @@ -1580,7 +1588,7 @@ "documentation":"

Specifies the ARN for the Identity and Access Management role that DataSync uses to access the secret specified for SecretArn.

" } }, - "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where a storage location authentication token or secret key is stored in plain text. This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

" + "documentation":"

Specifies configuration information for a customer-managed Secrets Manager secret where a storage location credentials is stored in Secrets Manager as plain text (for authentication token, secret key, or password) or as binary (for Kerberos keytab). This configuration includes the secret ARN, and the ARN for an IAM role that provides access to the secret.

You can use either CmkSecretConfig or CustomSecretConfig to provide credentials for a CreateLocation request. Do not provide both parameters for the same request.

" }, "DeleteAgentRequest":{ "type":"structure", @@ -2167,6 +2175,18 @@ "AuthenticationType":{ "shape":"SmbAuthenticationType", "documentation":"

The authentication protocol that DataSync uses to connect to your SMB file server.

" + }, + "ManagedSecretConfig":{ + "shape":"ManagedSecretConfig", + "documentation":"

Describes configuration information for a DataSync-managed secret, such as a Password or KerberosKeytab that DataSync uses to access a specific storage location. DataSync uses the default Amazon Web Services-managed KMS key to encrypt this secret in Secrets Manager.

" + }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

Describes configuration information for a DataSync-managed secret, such as a Password or KerberosKeytab that DataSync uses to access a specific storage location, with a customer-managed KMS key.

" + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

Describes configuration information for a customer-managed secret, such as a Password or KerberosKeytab that DataSync uses to access a specific storage location, with a customer-managed KMS key.

" } }, "documentation":"

DescribeLocationSmbResponse

" @@ -2212,7 +2232,7 @@ }, "EstimatedFilesToTransfer":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync expects to transfer over the network. This value is calculated while DataSync prepares the transfer.

How this gets calculated depends primarily on your task’s transfer mode configuration:

" + "documentation":"

The number of files, objects, and directories that DataSync expects to transfer over the network. This value is calculated while DataSync prepares the transfer.

How this gets calculated depends primarily on your task’s transfer mode configuration:

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in EstimatedFoldersToTransfer.

" }, "EstimatedBytesToTransfer":{ "shape":"long", @@ -2220,7 +2240,7 @@ }, "FilesTransferred":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync actually transfers over the network. This value is updated periodically during your task execution when something is read from the source and sent over the network.

If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an exact indication of what's transferring or to monitor your task execution.

" + "documentation":"

The number of files, objects, and directories that DataSync actually transfers over the network. This value is updated periodically during your task execution when something is read from the source and sent over the network.

If DataSync fails to transfer something, this value can be less than EstimatedFilesToTransfer. In some cases, this value can also be greater than EstimatedFilesToTransfer. This element is implementation-specific for some location types, so don't use it as an exact indication of what's transferring or to monitor your task execution.

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in FoldersTransferred.

" }, "BytesWritten":{ "shape":"long", @@ -2244,15 +2264,15 @@ }, "FilesDeleted":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync actually deletes in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

" + "documentation":"

The number of files, objects, and directories that DataSync actually deletes in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in FoldersDeleted.

" }, "FilesSkipped":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync skips during your transfer.

" + "documentation":"

The number of files, objects, and directories that DataSync skips during your transfer.

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in FoldersSkipped.

" }, "FilesVerified":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync verifies during your transfer.

When you configure your task to verify only the data that's transferred, DataSync doesn't verify directories in some situations or files that fail to transfer.

" + "documentation":"

The number of files, objects, and directories that DataSync verifies during your transfer.

When you configure your task to verify only the data that's transferred, DataSync doesn't verify directories in some situations or files that fail to transfer.

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in FoldersVerified.

" }, "ReportResult":{ "shape":"ReportResult", @@ -2260,7 +2280,7 @@ }, "EstimatedFilesToDelete":{ "shape":"long", - "documentation":"

The number of files, objects, and directories that DataSync expects to delete in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

" + "documentation":"

The number of files, objects, and directories that DataSync expects to delete in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

For Enhanced mode tasks, this counter only includes files or objects. Directories are counted in EstimatedFoldersToDelete.

" }, "TaskMode":{ "shape":"TaskMode", @@ -2268,15 +2288,51 @@ }, "FilesPrepared":{ "shape":"long", - "documentation":"

The number of objects that DataSync will attempt to transfer after comparing your source and destination locations.

Applies only to Enhanced mode tasks.

This counter isn't applicable if you configure your task to transfer all data. In that scenario, DataSync copies everything from the source to the destination without comparing differences between the locations.

" + "documentation":"

The number of files or objects that DataSync will attempt to transfer after comparing your source and destination locations.

Applies only to Enhanced mode tasks.

This counter isn't applicable if you configure your task to transfer all data. In that scenario, DataSync copies everything from the source to the destination without comparing differences between the locations.

" }, "FilesListed":{ "shape":"TaskExecutionFilesListedDetail", - "documentation":"

The number of objects that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" + "documentation":"

The number of files or objects that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" }, "FilesFailed":{ "shape":"TaskExecutionFilesFailedDetail", - "documentation":"

The number of objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" + "documentation":"

The number of files or objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" + }, + "EstimatedFoldersToDelete":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync expects to delete in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

Applies only to Enhanced mode tasks.

" + }, + "EstimatedFoldersToTransfer":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync expects to transfer over the network. This value is calculated as DataSync prepares directories to transfer.

How this gets calculated depends primarily on your task’s transfer mode configuration:

Applies only to Enhanced mode tasks.

" + }, + "FoldersSkipped":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync skips during your transfer.

Applies only to Enhanced mode tasks.

" + }, + "FoldersPrepared":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync will attempt to transfer after comparing your source and destination locations.

Applies only to Enhanced mode tasks.

This counter isn't applicable if you configure your task to transfer all data. In that scenario, DataSync copies everything from the source to the destination without comparing differences between the locations.

" + }, + "FoldersTransferred":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync actually transfers over the network. This value is updated periodically during your task execution when something is read from the source and sent over the network.

If DataSync fails to transfer something, this value can be less than EstimatedFoldersToTransfer. In some cases, this value can also be greater than EstimatedFoldersToTransfer.

Applies only to Enhanced mode tasks.

" + }, + "FoldersVerified":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync verifies during your transfer.

Applies only to Enhanced mode tasks.

" + }, + "FoldersDeleted":{ + "shape":"ItemCount", + "documentation":"

The number of directories that DataSync actually deletes in your destination location. If you don't configure your task to delete data in the destination that isn't in the source, the value is always 0.

Applies only to Enhanced mode tasks.

" + }, + "FoldersListed":{ + "shape":"TaskExecutionFoldersListedDetail", + "documentation":"

The number of directories that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" + }, + "FoldersFailed":{ + "shape":"TaskExecutionFoldersFailedDetail", + "documentation":"

The number of directories that DataSync fails to list, prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" }, "LaunchTime":{ "shape":"Time", @@ -2738,6 +2794,10 @@ "documentation":"

This exception is thrown when the client submits a malformed request.

", "exception":true }, + "ItemCount":{ + "type":"long", + "box":true + }, "KerberosKeytabFile":{ "type":"blob", "max":65536 @@ -3195,7 +3255,7 @@ }, "BytesPerSecond":{ "shape":"BytesPerSecond", - "documentation":"

Limits the bandwidth used by a DataSync task. For example, if you want DataSync to use a maximum of 1 MB, set this value to 1048576 (=1024*1024).

Not applicable to Enhanced mode tasks.

" + "documentation":"

Limits the bandwidth used by a DataSync task. For example, if you want DataSync to use a maximum of 1 MB, set this value to 1048576 (=1024*1024).

" }, "TaskQueueing":{ "shape":"TaskQueueing", @@ -3716,36 +3776,76 @@ "members":{ "Prepare":{ "shape":"long", - "documentation":"

The number of objects that DataSync fails to prepare during your task execution.

" + "documentation":"

The number of files or objects that DataSync fails to prepare during your task execution.

" }, "Transfer":{ "shape":"long", - "documentation":"

The number of objects that DataSync fails to transfer during your task execution.

" + "documentation":"

The number of files or objects that DataSync fails to transfer during your task execution.

" }, "Verify":{ "shape":"long", - "documentation":"

The number of objects that DataSync fails to verify during your task execution.

" + "documentation":"

The number of files or objects that DataSync fails to verify during your task execution.

" }, "Delete":{ "shape":"long", - "documentation":"

The number of objects that DataSync fails to delete during your task execution.

" + "documentation":"

The number of files or objects that DataSync fails to delete during your task execution.

" } }, - "documentation":"

The number of objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" + "documentation":"

The number of files or objects that DataSync fails to prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" }, "TaskExecutionFilesListedDetail":{ "type":"structure", "members":{ "AtSource":{ "shape":"long", - "documentation":"

The number of objects that DataSync finds at your source location.

" + "documentation":"

The number of files or objects that DataSync finds at your source location.

" + }, + "AtDestinationForDelete":{ + "shape":"long", + "documentation":"

The number of files or objects that DataSync finds at your destination location. This counter is only applicable if you configure your task to delete data in the destination that isn't in the source.

" + } + }, + "documentation":"

The number of files or objects that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" + }, + "TaskExecutionFoldersFailedDetail":{ + "type":"structure", + "members":{ + "List":{ + "shape":"long", + "documentation":"

The number of directories that DataSync fails to list during your task execution.

" + }, + "Prepare":{ + "shape":"long", + "documentation":"

The number of directories that DataSync fails to prepare during your task execution.

" + }, + "Transfer":{ + "shape":"long", + "documentation":"

The number of directories that DataSync fails to transfer during your task execution.

" + }, + "Verify":{ + "shape":"long", + "documentation":"

The number of directories that DataSync fails to verify during your task execution.

" + }, + "Delete":{ + "shape":"long", + "documentation":"

The number of directories that DataSync fails to delete during your task execution.

" + } + }, + "documentation":"

The number of directories that DataSync fails to list, prepare, transfer, verify, and delete during your task execution.

Applies only to Enhanced mode tasks.

" + }, + "TaskExecutionFoldersListedDetail":{ + "type":"structure", + "members":{ + "AtSource":{ + "shape":"long", + "documentation":"

The number of directories that DataSync finds at your source location.

" }, "AtDestinationForDelete":{ "shape":"long", - "documentation":"

The number of objects that DataSync finds at your destination location. This counter is only applicable if you configure your task to delete data in the destination that isn't in the source.

" + "documentation":"

The number of directories that DataSync finds at your destination location. This counter is only applicable if you configure your task to delete data in the destination that isn't in the source.

" } }, - "documentation":"

The number of objects that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" + "documentation":"

The number of directories that DataSync finds at your locations.

Applies only to Enhanced mode tasks.

" }, "TaskExecutionList":{ "type":"list", @@ -4381,6 +4481,14 @@ "shape":"SmbPassword", "documentation":"

Specifies the password of the user who can mount your SMB file server and has permission to access the files and folders involved in your transfer. This parameter applies only if AuthenticationType is set to NTLM.

" }, + "CmkSecretConfig":{ + "shape":"CmkSecretConfig", + "documentation":"

Specifies configuration information for a DataSync-managed secret, such as a Password or KerberosKeytab or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

" + }, + "CustomSecretConfig":{ + "shape":"CustomSecretConfig", + "documentation":"

Specifies configuration information for a customer-managed secret, such as a Password or KerberosKeytab or set of credentials that DataSync uses to access a specific transfer location, and a customer-managed KMS key.

" + }, "AgentArns":{ "shape":"AgentArnList", "documentation":"

Specifies the DataSync agent (or agents) that can connect to your SMB file server. You specify an agent by using its Amazon Resource Name (ARN).

" diff --git a/awscli/botocore/data/datazone/2018-05-10/service-2.json b/awscli/botocore/data/datazone/2018-05-10/service-2.json index 860e9078ad60..1643035d6074 100644 --- a/awscli/botocore/data/datazone/2018-05-10/service-2.json +++ b/awscli/botocore/data/datazone/2018-05-10/service-2.json @@ -1468,6 +1468,25 @@ ], "documentation":"

Gets a connection. In Amazon DataZone, a connection enables you to connect your resources (domains, projects, and environments) to external resources and services.

" }, + "GetDataExportConfiguration":{ + "name":"GetDataExportConfiguration", + "http":{ + "method":"GET", + "requestUri":"/v2/domains/{domainIdentifier}/data-export-configuration", + "responseCode":200 + }, + "input":{"shape":"GetDataExportConfigurationInput"}, + "output":{"shape":"GetDataExportConfigurationOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Gets data export configuration details.

" + }, "GetDataProduct":{ "name":"GetDataProduct", "http":{ @@ -2712,6 +2731,28 @@ "documentation":"

Posts time series data points to Amazon DataZone for the specified asset.

", "idempotent":true }, + "PutDataExportConfiguration":{ + "name":"PutDataExportConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/v2/domains/{domainIdentifier}/data-export-configuration", + "responseCode":200 + }, + "input":{"shape":"PutDataExportConfigurationInput"}, + "output":{"shape":"PutDataExportConfigurationOutput"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"UnauthorizedException"} + ], + "documentation":"

Creates data export configuration details.

In the current release, you can enable exporting asset metadata only for one domain per Amazon Web Services account per region. If you disable exporting asset metadata feature for a domain where it's already enabled, you cannot enable this feature for another domain in the same Amazon Web Services account and region.

", + "idempotent":true + }, "PutEnvironmentBlueprintConfiguration":{ "name":"PutEnvironmentBlueprintConfiguration", "http":{ @@ -2968,7 +3009,7 @@ {"shape":"ValidationException"}, {"shape":"UnauthorizedException"} ], - "documentation":"

Starts the metadata generation run.

Prerequisites:

", + "documentation":"

Starts the metadata generation run.

Prerequisites:

", "idempotent":true }, "TagResource":{ @@ -5435,6 +5476,13 @@ }, "documentation":"

The configurable action of a Amazon DataZone environment.

" }, + "ConfigurationStatus":{ + "type":"string", + "enum":[ + "COMPLETED", + "FAILED" + ] + }, "ConflictException":{ "type":"structure", "required":["message"], @@ -10667,6 +10715,20 @@ "member":{"shape":"RegionName"}, "min":0 }, + "EncryptionConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key to use for encryption. This field is required only when sseAlgorithm is set to aws:kms.

" + }, + "sseAlgorithm":{ + "shape":"String", + "documentation":"

The server-side encryption algorithm to use. Valid values are AES256 for S3-managed encryption keys, or aws:kms for Amazon Web Services KMS-managed encryption keys. If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

" + } + }, + "documentation":"

The encryption configuration details.

" + }, "EntityId":{ "type":"string", "pattern":"^[a-zA-Z0-9_-]{1,36}$" @@ -12064,6 +12126,47 @@ } } }, + "GetDataExportConfigurationInput":{ + "type":"structure", + "required":["domainIdentifier"], + "members":{ + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The ID of the domain where you want to get the data export configuration details.

", + "location":"uri", + "locationName":"domainIdentifier" + } + } + }, + "GetDataExportConfigurationOutput":{ + "type":"structure", + "members":{ + "createdAt":{ + "shape":"CreatedAt", + "documentation":"

The timestamp at which the data export configuration report was created.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration as part of the data export configuration details.

" + }, + "isExportEnabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether the export is enabled.

" + }, + "s3TableBucketArn":{ + "shape":"String", + "documentation":"

The Amazon S3 table bucket ARN as part of the data export configuration details.

" + }, + "status":{ + "shape":"ConfigurationStatus", + "documentation":"

The status of the data export configuration.

" + }, + "updatedAt":{ + "shape":"UpdatedAt", + "documentation":"

The timestamp at which the data export configuration report was updated.

" + } + } + }, "GetDataProductInput":{ "type":"structure", "required":[ @@ -13634,6 +13737,12 @@ "documentation":"

The identifier of the metadata generation run.

", "location":"uri", "locationName":"identifier" + }, + "type":{ + "shape":"MetadataGenerationRunType", + "documentation":"

The type of the metadata generation run.

", + "location":"querystring", + "locationName":"type" } } }, @@ -13675,7 +13784,17 @@ }, "type":{ "shape":"MetadataGenerationRunType", - "documentation":"

The type of metadata generation run.

" + "documentation":"

The type of metadata generation run.

", + "deprecated":true, + "deprecatedMessage":"This field is going to be deprecated, please use the 'types' field to provide the MetadataGenerationRun types" + }, + "typeStats":{ + "shape":"MetadataGenerationRunTypeStats", + "documentation":"

The type stats included in the metadata generation run output details.

" + }, + "types":{ + "shape":"MetadataGenerationRunTypes", + "documentation":"

The types of the metadata generation run.

" } } }, @@ -17019,6 +17138,12 @@ "location":"querystring", "locationName":"status" }, + "targetIdentifier":{ + "shape":"EntityId", + "documentation":"

The target ID for which you want to list metadata generation runs.

", + "location":"querystring", + "locationName":"targetIdentifier" + }, "type":{ "shape":"MetadataGenerationRunType", "documentation":"

The type of the metadata generation runs.

", @@ -18212,7 +18337,13 @@ }, "type":{ "shape":"MetadataGenerationRunType", - "documentation":"

The type of the metadata generation run.

" + "documentation":"

The type of the metadata generation run.

", + "deprecated":true, + "deprecatedMessage":"This field is going to be deprecated, please use the 'types' field to provide the MetadataGenerationRun types" + }, + "types":{ + "shape":"MetadataGenerationRunTypes", + "documentation":"

The types of the metadata generation run.

" } }, "documentation":"

The metadata generation run.

" @@ -18224,7 +18355,8 @@ "IN_PROGRESS", "CANCELED", "SUCCEEDED", - "FAILED" + "FAILED", + "PARTIALLY_SUCCEEDED" ] }, "MetadataGenerationRunTarget":{ @@ -18251,7 +18383,43 @@ }, "MetadataGenerationRunType":{ "type":"string", - "enum":["BUSINESS_DESCRIPTIONS"] + "enum":[ + "BUSINESS_DESCRIPTIONS", + "BUSINESS_NAMES", + "BUSINESS_GLOSSARY_ASSOCIATIONS" + ] + }, + "MetadataGenerationRunTypeStat":{ + "type":"structure", + "required":[ + "status", + "type" + ], + "members":{ + "errorMessage":{ + "shape":"String", + "documentation":"

The error message displayed if the action fails to run.

" + }, + "status":{ + "shape":"MetadataGenerationRunStatus", + "documentation":"

The status of the metadata generation run type statistics.

" + }, + "type":{ + "shape":"MetadataGenerationRunType", + "documentation":"

The type of the metadata generation run type statistics.

" + } + }, + "documentation":"

The statistics of the metadata generation run type.

" + }, + "MetadataGenerationRunTypeStats":{ + "type":"list", + "member":{"shape":"MetadataGenerationRunTypeStat"} + }, + "MetadataGenerationRunTypes":{ + "type":"list", + "member":{"shape":"MetadataGenerationRunType"}, + "max":2, + "min":1 }, "MetadataGenerationRuns":{ "type":"list", @@ -19330,6 +19498,38 @@ "documentation":"

The provisioning properties of an environment blueprint.

", "union":true }, + "PutDataExportConfigurationInput":{ + "type":"structure", + "required":[ + "domainIdentifier", + "enableExport" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier to ensure idempotency of the request. This field is automatically populated if not provided.

", + "idempotencyToken":true + }, + "domainIdentifier":{ + "shape":"DomainId", + "documentation":"

The domain ID where you want to create data export configuration details.

", + "location":"uri", + "locationName":"domainIdentifier" + }, + "enableExport":{ + "shape":"Boolean", + "documentation":"

Specifies that the export is to be enabled as part of creating data export configuration details.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration as part of creating data export configuration details.

The KMS key provided here as part of encryptionConfiguration must have the required permissions as described in KMS permissions for exporting asset metadata in Amazon SageMaker Unified Studio.

" + } + } + }, + "PutDataExportConfigurationOutput":{ + "type":"structure", + "members":{} + }, "PutEnvironmentBlueprintConfigurationInput":{ "type":"structure", "required":[ @@ -21792,8 +21992,7 @@ "required":[ "domainIdentifier", "owningProjectIdentifier", - "target", - "type" + "target" ], "members":{ "clientToken":{ @@ -21817,7 +22016,13 @@ }, "type":{ "shape":"MetadataGenerationRunType", - "documentation":"

The type of the metadata generation run.

" + "documentation":"

The type of the metadata generation run.

", + "deprecated":true, + "deprecatedMessage":"This field is going to be deprecated, please use the 'types' field to provide the MetadataGenerationRun types" + }, + "types":{ + "shape":"MetadataGenerationRunTypes", + "documentation":"

The types of the metadata generation run.

" } } }, @@ -21854,7 +22059,13 @@ }, "type":{ "shape":"MetadataGenerationRunType", - "documentation":"

The type of the metadata generation run.

" + "documentation":"

The type of the metadata generation run.

", + "deprecated":true, + "deprecatedMessage":"This field is going to be deprecated, please use the 'types' field to provide the MetadataGenerationRun types" + }, + "types":{ + "shape":"MetadataGenerationRunTypes", + "documentation":"

The types of the metadata generation run.

" } } }, diff --git a/awscli/botocore/data/ec2/2016-11-15/service-2.json b/awscli/botocore/data/ec2/2016-11-15/service-2.json index de7bde269e49..59f2dee5fd72 100644 --- a/awscli/botocore/data/ec2/2016-11-15/service-2.json +++ b/awscli/botocore/data/ec2/2016-11-15/service-2.json @@ -3247,7 +3247,7 @@ }, "input":{"shape":"DescribeInstanceStatusRequest"}, "output":{"shape":"DescribeInstanceStatusResult"}, - "documentation":"

Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

The Amazon EC2 API follows an eventual consistency model. This means that the result of an API command you run that creates or modifies resources might not be immediately available to all subsequent commands you run. For guidance on how to manage eventual consistency, see Eventual consistency in the Amazon EC2 API in the Amazon EC2 Developer Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" + "documentation":"

Describes the status of the specified instances or all of your instances. By default, only running instances are described, unless you specifically indicate to return the status of all instances.

Instance status includes the following components:

The Amazon EC2 API follows an eventual consistency model. This means that the result of an API command you run that creates or modifies resources might not be immediately available to all subsequent commands you run. For guidance on how to manage eventual consistency, see Eventual consistency in the Amazon EC2 API in the Amazon EC2 Developer Guide.

The order of the elements in the response, including those within nested structures, might vary. Applications should not assume the elements appear in a particular order.

" }, "DescribeInstanceTopology":{ "name":"DescribeInstanceTopology", @@ -17542,6 +17542,10 @@ "shape":"SpreadLevel", "documentation":"

Determines how placement groups spread instances.

" }, + "LinkedGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

Reserved for future use.

" + }, "DryRun":{ "shape":"Boolean", "documentation":"

Checks whether you have the required permissions for the operation, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation. Otherwise, it is UnauthorizedOperation.

", @@ -26391,7 +26395,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters.

", + "documentation":"

The filters.

", "locationName":"Filter" }, "NextToken":{ @@ -36462,7 +36466,7 @@ }, "AvailabilityZone":{ "shape":"AvailabilityZoneName", - "documentation":"

The Availability Zone in which to launch the instances.

", + "documentation":"

The Availability Zone in which to launch the instances. For example, us-east-2a.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", "locationName":"availabilityZone" }, "WeightedCapacity":{ @@ -36494,6 +36498,11 @@ "shape":"BlockDeviceMappingResponseList", "documentation":"

The block device mappings, which define the EBS volumes and instance store volumes to attach to the instance at launch.

Supported only for fleets of type instant.

For more information, see Block device mappings for volumes on Amazon EC2 instances in the Amazon EC2 User Guide.

", "locationName":"blockDeviceMappingSet" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

The ID of the Availability Zone in which to launch the instances. For example, use2-az1.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", + "locationName":"availabilityZoneId" } }, "documentation":"

Describes overrides for a launch template.

" @@ -36529,7 +36538,7 @@ }, "AvailabilityZone":{ "shape":"AvailabilityZoneName", - "documentation":"

The Availability Zone in which to launch the instances.

" + "documentation":"

The Availability Zone in which to launch the instances. For example, us-east-2a.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

" }, "WeightedCapacity":{ "shape":"Double", @@ -36555,6 +36564,10 @@ "ImageId":{ "shape":"String", "documentation":"

The ID of the AMI in the format ami-17characters00000.

Alternatively, you can specify a Systems Manager parameter, using one of the following formats. The Systems Manager parameter will resolve to an AMI ID on launch.

To reference a public parameter:

To reference a parameter stored in the same account:

To reference a parameter shared from another Amazon Web Services account:

For more information, see Use a Systems Manager parameter instead of an AMI ID in the Amazon EC2 User Guide.

This parameter is only available for fleets of type instant. For fleets of type maintain and request, you must specify the AMI ID in the launch template.

" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

The ID of the Availability Zone in which to launch the instances. For example, use2-az1.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

" } }, "documentation":"

Describes overrides for a launch template.

" @@ -45754,7 +45767,20 @@ "r8a.24xlarge", "r8a.48xlarge", "r8a.metal-24xl", - "r8a.metal-48xl" + "r8a.metal-48xl", + "p6-b300.48xlarge", + "c8a.medium", + "c8a.large", + "c8a.xlarge", + "c8a.2xlarge", + "c8a.4xlarge", + "c8a.8xlarge", + "c8a.12xlarge", + "c8a.16xlarge", + "c8a.24xlarge", + "c8a.48xlarge", + "c8a.metal-24xl", + "c8a.metal-48xl" ] }, "InstanceTypeHypervisor":{ @@ -49884,7 +49910,7 @@ }, "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone in which to launch the instances.

", + "documentation":"

The Availability Zone in which to launch the instances. For example, us-east-2a.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", "locationName":"availabilityZone" }, "WeightedCapacity":{ @@ -49901,6 +49927,11 @@ "shape":"InstanceRequirements", "documentation":"

The instance requirements. When you specify instance requirements, Amazon EC2 will identify instance types with the provided requirements, and then use your On-Demand and Spot allocation strategies to launch instances from these instance types, in the same way as when you specify a list of instance types.

If you specify InstanceRequirements, you can't specify InstanceType.

", "locationName":"instanceRequirements" + }, + "AvailabilityZoneId":{ + "shape":"AvailabilityZoneId", + "documentation":"

The ID of the Availability Zone in which to launch the instances. For example, use2-az1.

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", + "locationName":"availabilityZoneId" } }, "documentation":"

Describes overrides for a launch template.

" @@ -51746,7 +51777,13 @@ "spot-total-estimated-cost", "spot-avg-run-time-before-interruption-inst", "spot-max-run-time-before-interruption-inst", - "spot-min-run-time-before-interruption-inst" + "spot-min-run-time-before-interruption-inst", + "spot-total-interruptions-inst", + "spot-total-interruptions-vcpu", + "spot-total-count-inst", + "spot-total-count-vcpu", + "spot-interruption-rate-inst", + "spot-interruption-rate-vcpu" ] }, "MetricDataResult":{ @@ -58305,6 +58342,11 @@ "shape":"SpreadLevel", "documentation":"

The spread level for the placement group. Only Outpost placement groups can be spread across hosts.

", "locationName":"spreadLevel" + }, + "LinkedGroupId":{ + "shape":"PlacementGroupId", + "documentation":"

Reserved for future use.

", + "locationName":"linkedGroupId" } }, "documentation":"

Describes a placement group.

" @@ -66836,7 +66878,7 @@ "members":{ "AvailabilityZone":{ "shape":"String", - "documentation":"

The Availability Zone.

[Spot Fleet only] To specify multiple Availability Zones, separate them using commas; for example, \"us-west-2a, us-west-2b\".

", + "documentation":"

The Availability Zone. For example, us-east-2a.

[Spot Fleet only] To specify multiple Availability Zones, separate them using commas; for example, \"us-east-2a, us-east-2b\".

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", "locationName":"availabilityZone" }, "GroupName":{ @@ -66848,6 +66890,11 @@ "shape":"Tenancy", "documentation":"

The tenancy of the instance (if the instance is running in a VPC). An instance with a tenancy of dedicated runs on single-tenant hardware. The host tenancy is not supported for Spot Instances.

", "locationName":"tenancy" + }, + "AvailabilityZoneId":{ + "shape":"String", + "documentation":"

The ID of the Availability Zone. For example, use2-az1.

[Spot Fleet only] To specify multiple Availability Zones, separate them using commas; for example, \"use2-az1, use2-bz1\".

Either AvailabilityZone or AvailabilityZoneId must be specified in the request, but not both.

", + "locationName":"availabilityZoneId" } }, "documentation":"

Describes Spot Instance placement.

" diff --git a/awscli/botocore/data/ecr/2015-09-21/service-2.json b/awscli/botocore/data/ecr/2015-09-21/service-2.json index 40ea39bc36bf..9913cbd3f5f6 100644 --- a/awscli/botocore/data/ecr/2015-09-21/service-2.json +++ b/awscli/botocore/data/ecr/2015-09-21/service-2.json @@ -1435,7 +1435,7 @@ }, "appliedFor":{ "shape":"RCTAppliedForList", - "documentation":"

A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + "documentation":"

A list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The supported scenarios are PULL_THROUGH_CACHE, REPLICATION, and CREATE_ON_PUSH

" }, "customRoleArn":{ "shape":"CustomRoleArn", @@ -3139,7 +3139,7 @@ "type":"string", "max":2048, "min":0, - "pattern":"^$|arn:aws:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+" + "pattern":"^$|arn:aws[a-z0-9-]*:kms:[a-z0-9-]+:[0-9]{12}:key\\/[a-z0-9-]+" }, "LastActivatedAtTimestamp":{"type":"timestamp"}, "LastArchivedAtTimestamp":{"type":"timestamp"}, @@ -4016,7 +4016,8 @@ "type":"string", "enum":[ "REPLICATION", - "PULL_THROUGH_CACHE" + "PULL_THROUGH_CACHE", + "CREATE_ON_PUSH" ] }, "RCTAppliedForList":{ @@ -4304,7 +4305,7 @@ }, "appliedFor":{ "shape":"RCTAppliedForList", - "documentation":"

A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + "documentation":"

A list of enumerable Strings representing the repository creation scenarios that this template will apply towards. The supported scenarios are PULL_THROUGH_CACHE, REPLICATION, and CREATE_ON_PUSH

" }, "customRoleArn":{ "shape":"CustomRoleArn", @@ -5165,7 +5166,7 @@ }, "appliedFor":{ "shape":"RCTAppliedForList", - "documentation":"

Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The two supported scenarios are PULL_THROUGH_CACHE and REPLICATION

" + "documentation":"

Updates the list of enumerable strings representing the Amazon ECR repository creation scenarios that this template will apply towards. The supported scenarios are PULL_THROUGH_CACHE, REPLICATION, and CREATE_ON_PUSH

" }, "customRoleArn":{ "shape":"CustomRoleArn", diff --git a/awscli/botocore/data/ecs/2014-11-13/service-2.json b/awscli/botocore/data/ecs/2014-11-13/service-2.json index 89ffe0a98e76..63930f8f896b 100644 --- a/awscli/botocore/data/ecs/2014-11-13/service-2.json +++ b/awscli/botocore/data/ecs/2014-11-13/service-2.json @@ -849,7 +849,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ClusterNotFoundException"} ], - "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When you call StopTask on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a SIGTERM value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

For Windows containers, POSIX signals do not work and runtime stops the container by sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows) container #25982 on GitHub.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" + "documentation":"

Stops a running task. Any tags associated with the task will be deleted.

When you call StopTask on a task, the equivalent of docker stop is issued to the containers running in the task. This results in a stop signal value and a default 30-second timeout, after which the SIGKILL value is sent and the containers are forcibly stopped. This signal can be defined in your container image with the STOPSIGNAL instruction and will default to SIGTERM. If the container handles the SIGTERM value gracefully and exits within 30 seconds from receiving it, no SIGKILL value is sent.

For Windows containers, POSIX signals do not work and runtime stops the container by sending a CTRL_SHUTDOWN_EVENT. For more information, see Unable to react to graceful shutdown of (Windows) container #25982 on GitHub.

The default 30-second timeout can be configured on the Amazon ECS container agent with the ECS_CONTAINER_STOP_TIMEOUT variable. For more information, see Amazon ECS Container Agent Configuration in the Amazon Elastic Container Service Developer Guide.

" }, "SubmitAttachmentStateChanges":{ "name":"SubmitAttachmentStateChanges", @@ -1494,6 +1494,13 @@ }, "documentation":"

Configuration for a canary deployment strategy that shifts a fixed percentage of traffic to the new service revision, waits for a specified bake time, then shifts the remaining traffic.

This is only valid when you run CreateService or UpdateService with deploymentController set to ECS and a deploymentConfiguration with a strategy set to CANARY.

" }, + "CapacityOptionType":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SPOT" + ] + }, "CapacityProvider":{ "type":"structure", "members":{ @@ -4504,6 +4511,10 @@ "shape":"ManagedInstancesMonitoringOptions", "documentation":"

CloudWatch provides two categories of monitoring: basic monitoring and detailed monitoring. By default, your managed instance is configured for basic monitoring. You can optionally enable detailed monitoring to help you more quickly identify and act on operational issues. You can enable or turn off detailed monitoring at launch or when the managed instance is running or stopped. For more information, see Detailed monitoring for Amazon ECS Managed Instances in the Amazon ECS Developer Guide.

" }, + "capacityOptionType":{ + "shape":"CapacityOptionType", + "documentation":"

The capacity option type. This determines whether Amazon ECS launches On-Demand or Spot Instances for your managed instance capacity provider.

Valid values are:

The default is On-Demand

For more information about Amazon EC2 capacity options, see Instance purchasing options in the Amazon EC2 User Guide.

" + }, "instanceRequirements":{ "shape":"InstanceRequirementsRequest", "documentation":"

The instance requirements. You can specify:

Amazon ECS automatically selects the instances that match the specified criteria.

" @@ -6251,7 +6262,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The resource name for which to modify the account setting.

The following are the valid values for the account setting name.

" + "documentation":"

The resource name for which to modify the account setting.

The following are the valid values for the account setting name.

" }, "value":{ "shape":"String", @@ -6277,7 +6288,7 @@ "members":{ "name":{ "shape":"SettingName", - "documentation":"

The Amazon ECS account setting name to modify.

The following are the valid values for the account setting name.

" + "documentation":"

The Amazon ECS account setting name to modify.

The following are the valid values for the account setting name.

" }, "value":{ "shape":"String", @@ -7708,7 +7719,8 @@ "tagResourceAuthorization", "fargateTaskRetirementWaitPeriod", "guardDutyActivate", - "defaultLogDriverMode" + "defaultLogDriverMode", + "fargateEventWindows" ] }, "SettingType":{ diff --git a/awscli/botocore/data/elastictranscoder/2012-09-25/completions-1.json b/awscli/botocore/data/elastictranscoder/2012-09-25/completions-1.json deleted file mode 100644 index 8d3495d99b60..000000000000 --- a/awscli/botocore/data/elastictranscoder/2012-09-25/completions-1.json +++ /dev/null @@ -1,247 +0,0 @@ -{ - "version": "1.0", - "resources": { - "Pipeline": { - "operation": "ListPipelines", - "resourceIdentifier": { - "Status": "Pipelines[].Status", - "InputBucket": "Pipelines[].InputBucket", - "OutputBucket": "Pipelines[].OutputBucket", - "Role": "Pipelines[].Role", - "AwsKmsKeyArn": "Pipelines[].AwsKmsKeyArn", - "Notifications": "Pipelines[].Notifications", - "ContentConfig": "Pipelines[].ContentConfig", - "ThumbnailConfig": "Pipelines[].ThumbnailConfig" - } - }, - "Preset": { - "operation": "ListPresets", - "resourceIdentifier": { - "Id": "Presets[].Id", - "Name": "Presets[].Name" - } - } - }, - "operations": { - "CancelJob": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "DeletePipeline": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "DeletePreset": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "ListJobsByStatus": { - "Status": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Status" - } - ] - } - }, - "ReadJob": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "ReadPipeline": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "ReadPreset": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - } - }, - "TestRole": { - "Role": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Role" - } - ] - }, - "InputBucket": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "InputBucket" - } - ] - }, - "OutputBucket": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "OutputBucket" - } - ] - } - }, - "UpdatePipeline": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - }, - "Name": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Name" - } - ] - }, - "InputBucket": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "InputBucket" - } - ] - }, - "Role": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Role" - } - ] - }, - "AwsKmsKeyArn": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "AwsKmsKeyArn" - } - ] - }, - "Notifications": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Notifications" - } - ] - }, - "ContentConfig": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "ContentConfig" - } - ] - }, - "ThumbnailConfig": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "ThumbnailConfig" - } - ] - } - }, - "UpdatePipelineNotifications": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - }, - "Notifications": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Notifications" - } - ] - } - }, - "UpdatePipelineStatus": { - "Id": { - "completions": [ - { - "parameters": {}, - "resourceName": "Preset", - "resourceIdentifier": "Id" - } - ] - }, - "Status": { - "completions": [ - { - "parameters": {}, - "resourceName": "Pipeline", - "resourceIdentifier": "Status" - } - ] - } - } - } -} diff --git a/awscli/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json b/awscli/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json deleted file mode 100644 index fe4524b0b419..000000000000 --- a/awscli/botocore/data/elastictranscoder/2012-09-25/endpoint-rule-set-1.json +++ /dev/null @@ -1,314 +0,0 @@ -{ - "version": "1.0", - "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "string" - }, - "UseDualStack": { - "builtIn": "AWS::UseDualStack", - "required": true, - "default": false, - "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", - "type": "boolean" - }, - "UseFIPS": { - "builtIn": "AWS::UseFIPS", - "required": true, - "default": false, - "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", - "type": "boolean" - }, - "Endpoint": { - "builtIn": "SDK::Endpoint", - "required": false, - "documentation": "Override the endpoint used to send this request", - "type": "string" - } - }, - "rules": [ - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "error": "Invalid Configuration: FIPS and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "aws.partition", - "argv": [ - { - "ref": "Region" - } - ], - "assign": "PartitionResult" - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - } - ] - }, - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elastictranscoder-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] - }, - true - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elastictranscoder-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseDualStack" - }, - true - ] - } - ], - "rules": [ - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ - true, - { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsDualStack" - ] - } - ] - } - ], - "rules": [ - { - "conditions": [], - "endpoint": { - "url": "https://elastictranscoder.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://elastictranscoder.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - } - ], - "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" - } - ] -} \ No newline at end of file diff --git a/awscli/botocore/data/elastictranscoder/2012-09-25/paginators-1.json b/awscli/botocore/data/elastictranscoder/2012-09-25/paginators-1.json deleted file mode 100644 index 5a145d3688b8..000000000000 --- a/awscli/botocore/data/elastictranscoder/2012-09-25/paginators-1.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "pagination": { - "ListJobsByPipeline": { - "input_token": "PageToken", - "output_token": "NextPageToken", - "result_key": "Jobs" - }, - "ListJobsByStatus": { - "input_token": "PageToken", - "output_token": "NextPageToken", - "result_key": "Jobs" - }, - "ListPipelines": { - "input_token": "PageToken", - "output_token": "NextPageToken", - "result_key": "Pipelines" - }, - "ListPresets": { - "input_token": "PageToken", - "output_token": "NextPageToken", - "result_key": "Presets" - } - } -} diff --git a/awscli/botocore/data/elastictranscoder/2012-09-25/service-2.json b/awscli/botocore/data/elastictranscoder/2012-09-25/service-2.json deleted file mode 100644 index 1ddff000721a..000000000000 --- a/awscli/botocore/data/elastictranscoder/2012-09-25/service-2.json +++ /dev/null @@ -1,2329 +0,0 @@ -{ - "version":"2.0", - "metadata":{ - "apiVersion":"2012-09-25", - "endpointPrefix":"elastictranscoder", - "protocol":"rest-json", - "protocols":["rest-json"], - "serviceFullName":"Amazon Elastic Transcoder", - "serviceId":"Elastic Transcoder", - "signatureVersion":"v4", - "uid":"elastictranscoder-2012-09-25", - "auth":["aws.auth#sigv4"] - }, - "operations":{ - "CancelJob":{ - "name":"CancelJob", - "http":{ - "method":"DELETE", - "requestUri":"/2012-09-25/jobs/{Id}", - "responseCode":202 - }, - "input":{"shape":"CancelJobRequest"}, - "output":{"shape":"CancelJobResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The CancelJob operation cancels an unfinished job.

You can only cancel a job that has a status of Submitted. To prevent a pipeline from starting to process a job while you're getting the job identifier, use UpdatePipelineStatus to temporarily pause the pipeline.

" - }, - "CreateJob":{ - "name":"CreateJob", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/jobs", - "responseCode":201 - }, - "input":{"shape":"CreateJobRequest"}, - "output":{"shape":"CreateJobResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"LimitExceededException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

When you create a job, Elastic Transcoder returns JSON data that includes the values that you specified plus information about the job that is created.

If you have specified more than one output for your jobs (for example, one output for the Kindle Fire and another output for the Apple iPhone 4s), you currently must use the Elastic Transcoder API to list the jobs (as opposed to the AWS Console).

" - }, - "CreatePipeline":{ - "name":"CreatePipeline", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/pipelines", - "responseCode":201 - }, - "input":{"shape":"CreatePipelineRequest"}, - "output":{"shape":"CreatePipelineResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"LimitExceededException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The CreatePipeline operation creates a pipeline with settings that you specify.

" - }, - "CreatePreset":{ - "name":"CreatePreset", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/presets", - "responseCode":201 - }, - "input":{"shape":"CreatePresetRequest"}, - "output":{"shape":"CreatePresetResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"AccessDeniedException"}, - {"shape":"LimitExceededException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The CreatePreset operation creates a preset with settings that you specify.

Elastic Transcoder checks the CreatePreset settings to ensure that they meet Elastic Transcoder requirements and to determine whether they comply with H.264 standards. If your settings are not valid for Elastic Transcoder, Elastic Transcoder returns an HTTP 400 response (ValidationException) and does not create the preset. If the settings are valid for Elastic Transcoder but aren't strictly compliant with the H.264 standard, Elastic Transcoder creates the preset and returns a warning message in the response. This helps you determine whether your settings comply with the H.264 standard while giving you greater flexibility with respect to the video that Elastic Transcoder produces.

Elastic Transcoder uses the H.264 video-compression format. For more information, see the International Telecommunication Union publication Recommendation ITU-T H.264: Advanced video coding for generic audiovisual services.

" - }, - "DeletePipeline":{ - "name":"DeletePipeline", - "http":{ - "method":"DELETE", - "requestUri":"/2012-09-25/pipelines/{Id}", - "responseCode":202 - }, - "input":{"shape":"DeletePipelineRequest"}, - "output":{"shape":"DeletePipelineResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The DeletePipeline operation removes a pipeline.

You can only delete a pipeline that has never been used or that is not currently in use (doesn't contain any active jobs). If the pipeline is currently in use, DeletePipeline returns an error.

" - }, - "DeletePreset":{ - "name":"DeletePreset", - "http":{ - "method":"DELETE", - "requestUri":"/2012-09-25/presets/{Id}", - "responseCode":202 - }, - "input":{"shape":"DeletePresetRequest"}, - "output":{"shape":"DeletePresetResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The DeletePreset operation removes a preset that you've added in an AWS region.

You can't delete the default presets that are included with Elastic Transcoder.

" - }, - "ListJobsByPipeline":{ - "name":"ListJobsByPipeline", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/jobsByPipeline/{PipelineId}" - }, - "input":{"shape":"ListJobsByPipelineRequest"}, - "output":{"shape":"ListJobsByPipelineResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ListJobsByPipeline operation gets a list of the jobs currently in a pipeline.

Elastic Transcoder returns all of the jobs currently in the specified pipeline. The response body contains one element for each job that satisfies the search criteria.

" - }, - "ListJobsByStatus":{ - "name":"ListJobsByStatus", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/jobsByStatus/{Status}" - }, - "input":{"shape":"ListJobsByStatusRequest"}, - "output":{"shape":"ListJobsByStatusResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ListJobsByStatus operation gets a list of jobs that have a specified status. The response body contains one element for each job that satisfies the search criteria.

" - }, - "ListPipelines":{ - "name":"ListPipelines", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/pipelines" - }, - "input":{"shape":"ListPipelinesRequest"}, - "output":{"shape":"ListPipelinesResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ListPipelines operation gets a list of the pipelines associated with the current AWS account.

" - }, - "ListPresets":{ - "name":"ListPresets", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/presets" - }, - "input":{"shape":"ListPresetsRequest"}, - "output":{"shape":"ListPresetsResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ListPresets operation gets a list of the default presets included with Elastic Transcoder and the presets that you've added in an AWS region.

" - }, - "ReadJob":{ - "name":"ReadJob", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/jobs/{Id}" - }, - "input":{"shape":"ReadJobRequest"}, - "output":{"shape":"ReadJobResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ReadJob operation returns detailed information about a job.

" - }, - "ReadPipeline":{ - "name":"ReadPipeline", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/pipelines/{Id}" - }, - "input":{"shape":"ReadPipelineRequest"}, - "output":{"shape":"ReadPipelineResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ReadPipeline operation gets detailed information about a pipeline.

" - }, - "ReadPreset":{ - "name":"ReadPreset", - "http":{ - "method":"GET", - "requestUri":"/2012-09-25/presets/{Id}" - }, - "input":{"shape":"ReadPresetRequest"}, - "output":{"shape":"ReadPresetResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The ReadPreset operation gets detailed information about a preset.

" - }, - "TestRole":{ - "name":"TestRole", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/roleTests", - "responseCode":200 - }, - "input":{"shape":"TestRoleRequest"}, - "output":{"shape":"TestRoleResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The TestRole operation tests the IAM role used to create the pipeline.

The TestRole action lets you determine whether the IAM role you are using has sufficient permissions to let Elastic Transcoder perform tasks associated with the transcoding process. The action attempts to assume the specified IAM role, checks read access to the input and output buckets, and tries to send a test notification to Amazon SNS topics that you specify.

", - "deprecated":true - }, - "UpdatePipeline":{ - "name":"UpdatePipeline", - "http":{ - "method":"PUT", - "requestUri":"/2012-09-25/pipelines/{Id}", - "responseCode":200 - }, - "input":{"shape":"UpdatePipelineRequest"}, - "output":{"shape":"UpdatePipelineResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"AccessDeniedException"}, - {"shape":"ResourceInUseException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

Use the UpdatePipeline operation to update settings for a pipeline.

When you change pipeline settings, your changes take effect immediately. Jobs that you have already submitted and that Elastic Transcoder has not started to process are affected in addition to jobs that you submit after you change settings.

" - }, - "UpdatePipelineNotifications":{ - "name":"UpdatePipelineNotifications", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/pipelines/{Id}/notifications" - }, - "input":{"shape":"UpdatePipelineNotificationsRequest"}, - "output":{"shape":"UpdatePipelineNotificationsResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

With the UpdatePipelineNotifications operation, you can update Amazon Simple Notification Service (Amazon SNS) notifications for a pipeline.

When you update notifications for a pipeline, Elastic Transcoder returns the values that you specified in the request.

" - }, - "UpdatePipelineStatus":{ - "name":"UpdatePipelineStatus", - "http":{ - "method":"POST", - "requestUri":"/2012-09-25/pipelines/{Id}/status" - }, - "input":{"shape":"UpdatePipelineStatusRequest"}, - "output":{"shape":"UpdatePipelineStatusResponse"}, - "errors":[ - {"shape":"ValidationException"}, - {"shape":"IncompatibleVersionException"}, - {"shape":"ResourceNotFoundException"}, - {"shape":"ResourceInUseException"}, - {"shape":"AccessDeniedException"}, - {"shape":"InternalServiceException"} - ], - "documentation":"

The UpdatePipelineStatus operation pauses or reactivates a pipeline, so that the pipeline stops or restarts the processing of jobs.

Changing the pipeline status is useful if you want to cancel one or more jobs. You can't cancel jobs after Elastic Transcoder has started processing them; if you pause the pipeline to which you submitted the jobs, you have more time to get the job IDs for the jobs that you want to cancel, and to send a CancelJob request.

" - } - }, - "shapes":{ - "AccessControl":{ - "type":"string", - "pattern":"(^FullControl$)|(^Read$)|(^ReadAcp$)|(^WriteAcp$)" - }, - "AccessControls":{ - "type":"list", - "member":{"shape":"AccessControl"}, - "max":30 - }, - "AccessDeniedException":{ - "type":"structure", - "members":{}, - "documentation":"

General authentication failure. The request was not signed correctly.

", - "error":{"httpStatusCode":403}, - "exception":true - }, - "Artwork":{ - "type":"structure", - "members":{ - "InputKey":{ - "shape":"WatermarkKey", - "documentation":"

The name of the file to be used as album art. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by PipelineId; the InputBucket object in that pipeline identifies the bucket.

If the file name includes a prefix, for example, cooking/pie.jpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

" - }, - "MaxWidth":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum width of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.

" - }, - "MaxHeight":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum height of the output album art in pixels. If you specify auto, Elastic Transcoder uses 600 as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.

" - }, - "SizingPolicy":{ - "shape":"SizingPolicy", - "documentation":"

Specify one of the following values to control scaling of the output album art:

" - }, - "PaddingPolicy":{ - "shape":"PaddingPolicy", - "documentation":"

When you set PaddingPolicy to Pad, Elastic Transcoder may add white bars to the top and bottom and/or left and right sides of the output album art to make the total size of the output art match the values that you specified for MaxWidth and MaxHeight.

" - }, - "AlbumArtFormat":{ - "shape":"JpgOrPng", - "documentation":"

The format of album art, if any. Valid formats are .jpg and .png.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your artwork.

" - } - }, - "documentation":"

The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20.

To remove artwork or leave the artwork empty, you can either set Artwork to null, or set the Merge Policy to \"Replace\" and use an empty Artwork array.

To pass through existing artwork unchanged, set the Merge Policy to \"Prepend\", \"Append\", or \"Fallback\", and use an empty Artwork array.

" - }, - "Artworks":{ - "type":"list", - "member":{"shape":"Artwork"} - }, - "Ascending":{ - "type":"string", - "pattern":"(^true$)|(^false$)" - }, - "AspectRatio":{ - "type":"string", - "pattern":"(^auto$)|(^1:1$)|(^4:3$)|(^3:2$)|(^16:9$)" - }, - "AudioBitDepth":{ - "type":"string", - "pattern":"(^8$)|(^16$)|(^24$)|(^32$)" - }, - "AudioBitOrder":{ - "type":"string", - "pattern":"(^LittleEndian$)" - }, - "AudioBitRate":{ - "type":"string", - "pattern":"^\\d{1,3}$" - }, - "AudioChannels":{ - "type":"string", - "pattern":"(^auto$)|(^0$)|(^1$)|(^2$)" - }, - "AudioCodec":{ - "type":"string", - "pattern":"(^AAC$)|(^vorbis$)|(^mp3$)|(^mp2$)|(^pcm$)|(^flac$)" - }, - "AudioCodecOptions":{ - "type":"structure", - "members":{ - "Profile":{ - "shape":"AudioCodecProfile", - "documentation":"

You can only choose an audio profile when you specify AAC for the value of Audio:Codec.

Specify the AAC profile for the output file. Elastic Transcoder supports the following profiles:

All outputs in a Smooth playlist must have the same value for Profile.

If you created any presets before AAC profiles were added, Elastic Transcoder automatically updated your presets to use AAC-LC. You can change the value as required.

" - }, - "BitDepth":{ - "shape":"AudioBitDepth", - "documentation":"

You can only choose an audio bit depth when you specify flac or pcm for the value of Audio:Codec.

The bit depth of a sample is how many bits of information are included in the audio samples. The higher the bit depth, the better the audio, but the larger the file.

Valid values are 16 and 24.

The most common bit depth is 24.

" - }, - "BitOrder":{ - "shape":"AudioBitOrder", - "documentation":"

You can only choose an audio bit order when you specify pcm for the value of Audio:Codec.

The order the bits of a PCM sample are stored in.

The supported value is LittleEndian.

" - }, - "Signed":{ - "shape":"AudioSigned", - "documentation":"

You can only choose whether an audio sample is signed when you specify pcm for the value of Audio:Codec.

Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned).

The supported value is Signed.

" - } - }, - "documentation":"

Options associated with your audio codec.

" - }, - "AudioCodecProfile":{ - "type":"string", - "pattern":"(^auto$)|(^AAC-LC$)|(^HE-AAC$)|(^HE-AACv2$)" - }, - "AudioPackingMode":{ - "type":"string", - "pattern":"(^SingleTrack$)|(^OneChannelPerTrack$)|(^OneChannelPerTrackWithMosTo8Tracks$)" - }, - "AudioParameters":{ - "type":"structure", - "members":{ - "Codec":{ - "shape":"AudioCodec", - "documentation":"

The audio codec for the output file. Valid values include aac, flac, mp2, mp3, pcm, and vorbis.

" - }, - "SampleRate":{ - "shape":"AudioSampleRate", - "documentation":"

The sample rate of the audio stream in the output file, in Hertz. Valid values include:

auto, 22050, 32000, 44100, 48000, 96000

If you specify auto, Elastic Transcoder automatically detects the sample rate.

" - }, - "BitRate":{ - "shape":"AudioBitRate", - "documentation":"

The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.

" - }, - "Channels":{ - "shape":"AudioChannels", - "documentation":"

The number of audio channels in the output file. The following values are valid:

auto, 0, 1, 2

One channel carries the information played by a single speaker. For example, a stereo track with two channels sends one channel to the left speaker, and the other channel to the right speaker. The output channels are organized into tracks. If you want Elastic Transcoder to automatically detect the number of audio channels in the input file and use that value for the output file, select auto.

The output of a specific channel value and inputs are as follows:

For more information about how Elastic Transcoder organizes channels and tracks, see Audio:AudioPackingMode.

" - }, - "AudioPackingMode":{ - "shape":"AudioPackingMode", - "documentation":"

The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack.

The following values are valid:

SingleTrack, OneChannelPerTrack, and OneChannelPerTrackWithMosTo8Tracks

When you specify SingleTrack, Elastic Transcoder creates a single track for your output. The track can have up to eight channels. Use SingleTrack for all non-mxf containers.

The outputs of SingleTrack for a specific channel value and inputs are as follows:

When you specify OneChannelPerTrack, Elastic Transcoder creates a new track for every channel in your output. Your output can have up to eight single-channel tracks.

The outputs of OneChannelPerTrack for a specific channel value and inputs are as follows:

When you specify OneChannelPerTrackWithMosTo8Tracks, Elastic Transcoder creates eight single-channel tracks for your output. All tracks that do not contain audio data from an input channel are MOS, or Mit Out Sound, tracks.

The outputs of OneChannelPerTrackWithMosTo8Tracks for a specific channel value and inputs are as follows:

" - }, - "CodecOptions":{ - "shape":"AudioCodecOptions", - "documentation":"

If you specified AAC for Audio:Codec, this is the AAC compression profile to use. Valid values include:

auto, AAC-LC, HE-AAC, HE-AACv2

If you specify auto, Elastic Transcoder chooses a profile based on the bit rate of the output file.

" - } - }, - "documentation":"

Parameters required for transcoding audio.

" - }, - "AudioSampleRate":{ - "type":"string", - "pattern":"(^auto$)|(^22050$)|(^32000$)|(^44100$)|(^48000$)|(^96000$)|(^192000$)" - }, - "AudioSigned":{ - "type":"string", - "pattern":"(^Unsigned$)|(^Signed$)" - }, - "Base64EncodedString":{ - "type":"string", - "pattern":"^$|(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" - }, - "BucketName":{ - "type":"string", - "pattern":"^(\\w|\\.|-){1,255}$" - }, - "CancelJobRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the job that you want to cancel.

To get a list of the jobs (including their jobId) that have a status of Submitted, use the ListJobsByStatus API action.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The CancelJobRequest structure.

" - }, - "CancelJobResponse":{ - "type":"structure", - "members":{}, - "documentation":"

The response body contains a JSON object. If the job is successfully canceled, the value of Success is true.

" - }, - "CaptionFormat":{ - "type":"structure", - "members":{ - "Format":{ - "shape":"CaptionFormatFormat", - "documentation":"

The format you specify determines whether Elastic Transcoder generates an embedded or sidecar caption for this output.

" - }, - "Pattern":{ - "shape":"CaptionFormatPattern", - "documentation":"

The prefix for caption filenames, in the form description-{language}, where:

If you don't include {language} in the file name pattern, Elastic Transcoder automatically appends \"{language}\" to the value that you specify for the description. In addition, Elastic Transcoder automatically appends the count to the end of the segment files.

For example, suppose you're transcoding into srt format. When you enter \"Sydney-{language}-sunrise\", and the language of the captions is English (en), the name of the first caption file is be Sydney-en-sunrise00000.srt.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your caption formats.

" - } - }, - "documentation":"

The file format of the output captions. If you leave this value blank, Elastic Transcoder returns an error.

" - }, - "CaptionFormatFormat":{ - "type":"string", - "pattern":"(^mov-text$)|(^srt$)|(^scc$)|(^webvtt$)|(^dfxp$)|(^cea-708$)" - }, - "CaptionFormatPattern":{ - "type":"string", - "pattern":"(^$)|(^.*\\{language\\}.*$)" - }, - "CaptionFormats":{ - "type":"list", - "member":{"shape":"CaptionFormat"}, - "max":4 - }, - "CaptionMergePolicy":{ - "type":"string", - "pattern":"(^MergeOverride$)|(^MergeRetain$)|(^Override$)" - }, - "CaptionSource":{ - "type":"structure", - "members":{ - "Key":{ - "shape":"LongKey", - "documentation":"

The name of the sidecar caption file that you want Elastic Transcoder to include in the output file.

" - }, - "Language":{ - "shape":"Key", - "documentation":"

A string that specifies the language of the caption. If you specified multiple inputs with captions, the caption language must match in order to be included in the output. Specify this as one of:

For more information on ISO language codes and language names, see the List of ISO 639-1 codes.

" - }, - "TimeOffset":{ - "shape":"TimeOffset", - "documentation":"

For clip generation or captions that do not start at the same time as the associated video file, the TimeOffset tells Elastic Transcoder how much of the video to encode before including captions.

Specify the TimeOffset in the form [+-]SS.sss or [+-]HH:mm:SS.ss.

" - }, - "Label":{ - "shape":"Name", - "documentation":"

The label of the caption shown in the player when choosing a language. We recommend that you put the caption language name here, in the language of the captions.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that Elastic Transcoder needs to decyrpt your caption sources, or that you want Elastic Transcoder to apply to your caption sources.

" - } - }, - "documentation":"

A source file for the input sidecar captions used during the transcoding process.

" - }, - "CaptionSources":{ - "type":"list", - "member":{"shape":"CaptionSource"}, - "max":20 - }, - "Captions":{ - "type":"structure", - "members":{ - "MergePolicy":{ - "shape":"CaptionMergePolicy", - "documentation":"

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

MergePolicy cannot be null.

", - "deprecated":true - }, - "CaptionSources":{ - "shape":"CaptionSources", - "documentation":"

Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

", - "deprecated":true - }, - "CaptionFormats":{ - "shape":"CaptionFormats", - "documentation":"

The array of file formats for the output captions. If you leave this value blank, Elastic Transcoder returns an error.

" - } - }, - "documentation":"

The captions to be created, if any.

" - }, - "Clip":{ - "type":"structure", - "members":{ - "TimeSpan":{ - "shape":"TimeSpan", - "documentation":"

Settings that determine when a clip begins and how long it lasts.

" - } - }, - "documentation":"

Settings for one clip in a composition. All jobs in a playlist must have the same clip settings.

", - "deprecated":true - }, - "CodecOption":{ - "type":"string", - "max":255, - "min":1 - }, - "CodecOptions":{ - "type":"map", - "key":{"shape":"CodecOption"}, - "value":{"shape":"CodecOption"}, - "max":30 - }, - "Composition":{ - "type":"list", - "member":{"shape":"Clip"}, - "deprecated":true - }, - "CreateJobOutput":{ - "type":"structure", - "members":{ - "Key":{ - "shape":"Key", - "documentation":"

The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID. If a file with the specified name already exists in the output bucket, the job fails.

" - }, - "ThumbnailPattern":{ - "shape":"ThumbnailPattern", - "documentation":"

Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

If you don't want Elastic Transcoder to create thumbnails, specify \"\".

If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

" - }, - "ThumbnailEncryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

" - }, - "Rotate":{ - "shape":"Rotate", - "documentation":"

The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values: auto, 0, 90, 180, 270. The value auto generally works only if the file that you're transcoding contains rotation metadata.

" - }, - "PresetId":{ - "shape":"Id", - "documentation":"

The Id of the preset to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding.

" - }, - "SegmentDuration":{ - "shape":"FloatString", - "documentation":"

(Outputs in Fragmented MP4 or MPEG-TS format only.

If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4 and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

" - }, - "Watermarks":{ - "shape":"JobWatermarks", - "documentation":"

Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset for the current output.

" - }, - "AlbumArt":{ - "shape":"JobAlbumArt", - "documentation":"

Information about the album art that you want Elastic Transcoder to add to the file during transcoding. You can specify up to twenty album artworks for each output. Settings for each artwork must be defined in the job for the current output.

" - }, - "Composition":{ - "shape":"Composition", - "documentation":"

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

", - "deprecated":true - }, - "Captions":{ - "shape":"Captions", - "documentation":"

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

For more information on embedded files, see the Subtitles Wikipedia page.

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

You can specify encryption settings for any output files that you want to use for a transcoding job. This includes the output file and any watermarks, thumbnails, album art, or captions that you want to use. You must specify encryption settings for each file individually.

" - } - }, - "documentation":"

The CreateJobOutput structure.

" - }, - "CreateJobOutputs":{ - "type":"list", - "member":{"shape":"CreateJobOutput"}, - "max":30 - }, - "CreateJobPlaylist":{ - "type":"structure", - "members":{ - "Name":{ - "shape":"Filename", - "documentation":"

The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

" - }, - "Format":{ - "shape":"PlaylistFormat", - "documentation":"

The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

" - }, - "OutputKeys":{ - "shape":"OutputKeys", - "documentation":"

For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

" - }, - "HlsContentProtection":{ - "shape":"HlsContentProtection", - "documentation":"

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

" - }, - "PlayReadyDrm":{ - "shape":"PlayReadyDrm", - "documentation":"

The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

" - } - }, - "documentation":"

Information about the master playlist.

" - }, - "CreateJobPlaylists":{ - "type":"list", - "member":{"shape":"CreateJobPlaylist"}, - "max":30 - }, - "CreateJobRequest":{ - "type":"structure", - "required":["PipelineId"], - "members":{ - "PipelineId":{ - "shape":"Id", - "documentation":"

The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

" - }, - "Input":{ - "shape":"JobInput", - "documentation":"

A section of the request body that provides information about the file that is being transcoded.

" - }, - "Inputs":{ - "shape":"JobInputs", - "documentation":"

A section of the request body that provides information about the files that are being transcoded.

" - }, - "Output":{ - "shape":"CreateJobOutput", - "documentation":"

A section of the request body that provides information about the transcoded (target) file. We strongly recommend that you use the Outputs syntax instead of the Output syntax.

" - }, - "Outputs":{ - "shape":"CreateJobOutputs", - "documentation":"

A section of the request body that provides information about the transcoded (target) files. We recommend that you use the Outputs syntax instead of the Output syntax.

" - }, - "OutputKeyPrefix":{ - "shape":"Key", - "documentation":"

The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists.

" - }, - "Playlists":{ - "shape":"CreateJobPlaylists", - "documentation":"

If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

The maximum number of master playlists in a job is 30.

" - }, - "UserMetadata":{ - "shape":"UserMetadata", - "documentation":"

User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs are returned in the same order in which you specify them.

" - } - }, - "documentation":"

The CreateJobRequest structure.

" - }, - "CreateJobResponse":{ - "type":"structure", - "members":{ - "Job":{ - "shape":"Job", - "documentation":"

A section of the response body that provides information about the job that is created.

" - } - }, - "documentation":"

The CreateJobResponse structure.

" - }, - "CreatePipelineRequest":{ - "type":"structure", - "required":[ - "Name", - "InputBucket", - "Role" - ], - "members":{ - "Name":{ - "shape":"Name", - "documentation":"

The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

Constraints: Maximum 40 characters.

" - }, - "InputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket in which you saved the media files that you want to transcode.

" - }, - "OutputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. (Use this, or use ContentConfig:Bucket plus ThumbnailConfig:Bucket.)

Specify this value when all of the following are true:

If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

" - }, - "Role":{ - "shape":"Role", - "documentation":"

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to create the pipeline.

" - }, - "AwsKmsKeyArn":{ - "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" - }, - "Notifications":{ - "shape":"Notifications", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

" - }, - "ContentConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

" - }, - "ThumbnailConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

" - } - }, - "documentation":"

The CreatePipelineRequest structure.

" - }, - "CreatePipelineResponse":{ - "type":"structure", - "members":{ - "Pipeline":{ - "shape":"Pipeline", - "documentation":"

A section of the response body that provides information about the pipeline that is created.

" - }, - "Warnings":{ - "shape":"Warnings", - "documentation":"

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

" - } - }, - "documentation":"

When you create a pipeline, Elastic Transcoder returns the values that you specified in the request.

" - }, - "CreatePresetRequest":{ - "type":"structure", - "required":[ - "Name", - "Container" - ], - "members":{ - "Name":{ - "shape":"Name", - "documentation":"

The name of the preset. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the preset.

" - }, - "Container":{ - "shape":"PresetContainer", - "documentation":"

The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

" - }, - "Video":{ - "shape":"VideoParameters", - "documentation":"

A section of the request body that specifies the video parameters.

" - }, - "Audio":{ - "shape":"AudioParameters", - "documentation":"

A section of the request body that specifies the audio parameters.

" - }, - "Thumbnails":{ - "shape":"Thumbnails", - "documentation":"

A section of the request body that specifies the thumbnail parameters, if any.

" - } - }, - "documentation":"

The CreatePresetRequest structure.

" - }, - "CreatePresetResponse":{ - "type":"structure", - "members":{ - "Preset":{ - "shape":"Preset", - "documentation":"

A section of the response body that provides information about the preset that is created.

" - }, - "Warning":{ - "shape":"String", - "documentation":"

If the preset settings don't comply with the standards for the video codec but Elastic Transcoder created the preset, this message explains the reason the preset settings don't meet the standard. Elastic Transcoder created the preset because the settings might produce acceptable output.

" - } - }, - "documentation":"

The CreatePresetResponse structure.

" - }, - "DeletePipelineRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the pipeline that you want to delete.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The DeletePipelineRequest structure.

" - }, - "DeletePipelineResponse":{ - "type":"structure", - "members":{}, - "documentation":"

The DeletePipelineResponse structure.

" - }, - "DeletePresetRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the preset for which you want to get detailed information.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The DeletePresetRequest structure.

" - }, - "DeletePresetResponse":{ - "type":"structure", - "members":{}, - "documentation":"

The DeletePresetResponse structure.

" - }, - "Description":{ - "type":"string", - "max":255, - "min":0 - }, - "DetectedProperties":{ - "type":"structure", - "members":{ - "Width":{ - "shape":"NullableInteger", - "documentation":"

The detected width of the input file, in pixels.

" - }, - "Height":{ - "shape":"NullableInteger", - "documentation":"

The detected height of the input file, in pixels.

" - }, - "FrameRate":{ - "shape":"FloatString", - "documentation":"

The detected frame rate of the input file, in frames per second.

" - }, - "FileSize":{ - "shape":"NullableLong", - "documentation":"

The detected file size of the input file, in bytes.

" - }, - "DurationMillis":{ - "shape":"NullableLong", - "documentation":"

The detected duration of the input file, in milliseconds.

" - } - }, - "documentation":"

The detected properties of the input file. Elastic Transcoder identifies these values from the input file.

" - }, - "Digits":{ - "type":"string", - "pattern":"^\\d{1,5}$" - }, - "DigitsOrAuto":{ - "type":"string", - "pattern":"(^auto$)|(^\\d{2,4}$)" - }, - "Encryption":{ - "type":"structure", - "members":{ - "Mode":{ - "shape":"EncryptionMode", - "documentation":"

The specific server-side encryption mode that you want Elastic Transcoder to use when decrypting your input files or encrypting your output files. Elastic Transcoder supports the following options:

For all three AES options, you must provide the following settings, which must be base64-encoded:

For the AES modes, your private encryption keys and your unencrypted data are never stored by AWS; therefore, it is important that you safely manage your encryption keys. If you lose them, you won't be able to unencrypt your data.

" - }, - "Key":{ - "shape":"Base64EncodedString", - "documentation":"

The data encryption key that you want Elastic Transcoder to use to encrypt your output file, or that was used to encrypt your input file. The key must be base64-encoded and it must be one of the following bit lengths before being base64-encoded:

128, 192, or 256.

The key must also be encrypted by using the Amazon Key Management Service.

" - }, - "KeyMd5":{ - "shape":"Base64EncodedString", - "documentation":"

The MD5 digest of the key that you used to encrypt your input file, or that you want Elastic Transcoder to use to encrypt your output file. Elastic Transcoder uses the key digest as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

" - }, - "InitializationVector":{ - "shape":"ZeroTo255String", - "documentation":"

The series of random bits created by a random bit generator, unique for every encryption operation, that you used to encrypt your input files or that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes long before being base64-encoded.

" - } - }, - "documentation":"

The encryption settings, if any, that are used for decrypting your input files or encrypting your output files. If your input file is encrypted, you must specify the mode that Elastic Transcoder uses to decrypt your file, otherwise you must specify the mode you want Elastic Transcoder to use to encrypt your output files.

" - }, - "EncryptionMode":{ - "type":"string", - "pattern":"(^s3$)|(^s3-aws-kms$)|(^aes-cbc-pkcs7$)|(^aes-ctr$)|(^aes-gcm$)" - }, - "ExceptionMessages":{ - "type":"list", - "member":{"shape":"String"} - }, - "Filename":{ - "type":"string", - "max":255, - "min":1 - }, - "FixedGOP":{ - "type":"string", - "pattern":"(^true$)|(^false$)" - }, - "FloatString":{ - "type":"string", - "pattern":"^\\d{1,5}(\\.\\d{0,5})?$" - }, - "FrameRate":{ - "type":"string", - "pattern":"(^auto$)|(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" - }, - "Grantee":{ - "type":"string", - "max":255, - "min":1 - }, - "GranteeType":{ - "type":"string", - "pattern":"(^Canonical$)|(^Email$)|(^Group$)" - }, - "HlsContentProtection":{ - "type":"structure", - "members":{ - "Method":{ - "shape":"HlsContentProtectionMethod", - "documentation":"

The content protection method for your output. The only valid value is: aes-128.

This value is written into the method attribute of the EXT-X-KEY metadata tag in the output playlist.

" - }, - "Key":{ - "shape":"Base64EncodedString", - "documentation":"

If you want Elastic Transcoder to generate a key for you, leave this field blank.

If you choose to supply your own key, you must encrypt the key by using AWS KMS. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

128, 192, or 256.

" - }, - "KeyMd5":{ - "shape":"Base64EncodedString", - "documentation":"

If Elastic Transcoder is generating your key for you, you must leave this field blank.

The MD5 digest of the key that you want Elastic Transcoder to use to encrypt your output file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64- encoded.

" - }, - "InitializationVector":{ - "shape":"ZeroTo255String", - "documentation":"

If Elastic Transcoder is generating your key for you, you must leave this field blank.

The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your output files. The initialization vector must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

" - }, - "LicenseAcquisitionUrl":{ - "shape":"ZeroTo512String", - "documentation":"

The location of the license key required to decrypt your HLS playlist. The URL must be an absolute path, and is referenced in the URI attribute of the EXT-X-KEY metadata tag in the playlist file.

" - }, - "KeyStoragePolicy":{ - "shape":"KeyStoragePolicy", - "documentation":"

Specify whether you want Elastic Transcoder to write your HLS license key to an Amazon S3 bucket. If you choose WithVariantPlaylists, LicenseAcquisitionUrl must be left blank and Elastic Transcoder writes your data key into the same bucket as the associated playlist.

" - } - }, - "documentation":"

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to your output files.

" - }, - "HlsContentProtectionMethod":{ - "type":"string", - "pattern":"(^aes-128$)" - }, - "HorizontalAlign":{ - "type":"string", - "pattern":"(^Left$)|(^Right$)|(^Center$)" - }, - "Id":{ - "type":"string", - "pattern":"^\\d{13}-\\w{6}$" - }, - "IncompatibleVersionException":{ - "type":"structure", - "members":{}, - "error":{"httpStatusCode":400}, - "exception":true - }, - "InputCaptions":{ - "type":"structure", - "members":{ - "MergePolicy":{ - "shape":"CaptionMergePolicy", - "documentation":"

A policy that determines how Elastic Transcoder handles the existence of multiple captions.

MergePolicy cannot be null.

" - }, - "CaptionSources":{ - "shape":"CaptionSources", - "documentation":"

Source files for the input sidecar captions used during the transcoding process. To omit all sidecar captions, leave CaptionSources blank.

" - } - }, - "documentation":"

The captions to be created, if any.

" - }, - "Interlaced":{ - "type":"string", - "pattern":"(^auto$)|(^true$)|(^false$)" - }, - "InternalServiceException":{ - "type":"structure", - "members":{}, - "documentation":"

Elastic Transcoder encountered an unexpected exception while trying to fulfill the request.

", - "exception":true, - "fault":true - }, - "Job":{ - "type":"structure", - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier that Elastic Transcoder assigned to the job. You use this value to get settings for the job or to delete the job.

" - }, - "Arn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the job.

" - }, - "PipelineId":{ - "shape":"Id", - "documentation":"

The Id of the pipeline that you want Elastic Transcoder to use for transcoding. The pipeline determines several settings, including the Amazon S3 bucket from which Elastic Transcoder gets the files to transcode and the bucket into which Elastic Transcoder puts the transcoded files.

" - }, - "Input":{ - "shape":"JobInput", - "documentation":"

A section of the request or response body that provides information about the file that is being transcoded.

" - }, - "Inputs":{ - "shape":"JobInputs", - "documentation":"

Information about the files that you're transcoding. If you specified multiple files for this job, Elastic Transcoder stitches the files together to make one output.

" - }, - "Output":{ - "shape":"JobOutput", - "documentation":"

If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

Outputs recommended instead.

A section of the request or response body that provides information about the transcoded (target) file.

" - }, - "Outputs":{ - "shape":"JobOutputs", - "documentation":"

Information about the output files. We recommend that you use the Outputs syntax for all jobs, even when you want Elastic Transcoder to transcode a file into only one format. Do not use both the Outputs and Output syntaxes in the same request. You can create a maximum of 30 outputs per job.

If you specify more than one output for a job, Elastic Transcoder creates the files for each output in the order in which you specify them in the job.

" - }, - "OutputKeyPrefix":{ - "shape":"Key", - "documentation":"

The value, if any, that you want Elastic Transcoder to prepend to the names of all files that this job creates, including output files, thumbnails, and playlists. We recommend that you add a / or some other delimiter to the end of the OutputKeyPrefix.

" - }, - "Playlists":{ - "shape":"Playlists", - "documentation":"

Outputs in Fragmented MP4 or MPEG-TS format only.

If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create.

The maximum number of master playlists in a job is 30.

" - }, - "Status":{ - "shape":"JobStatus", - "documentation":"

The status of the job: Submitted, Progressing, Complete, Canceled, or Error.

" - }, - "UserMetadata":{ - "shape":"UserMetadata", - "documentation":"

User-defined metadata that you want to associate with an Elastic Transcoder job. You specify metadata in key/value pairs, and you can add up to 10 key/value pairs per job. Elastic Transcoder does not guarantee that key/value pairs are returned in the same order in which you specify them.

Metadata keys and values must use characters from the following list:

" - }, - "Timing":{ - "shape":"Timing", - "documentation":"

Details about the timing of a job.

" - } - }, - "documentation":"

A section of the response body that provides information about the job that is created.

" - }, - "JobAlbumArt":{ - "type":"structure", - "members":{ - "MergePolicy":{ - "shape":"MergePolicy", - "documentation":"

A policy that determines how Elastic Transcoder handles the existence of multiple album artwork files.

" - }, - "Artwork":{ - "shape":"Artworks", - "documentation":"

The file to be used as album art. There can be multiple artworks associated with an audio file, to a maximum of 20. Valid formats are .jpg and .png

" - } - }, - "documentation":"

The .jpg or .png file associated with an audio file.

" - }, - "JobContainer":{ - "type":"string", - "pattern":"(^auto$)|(^3gp$)|(^asf$)|(^avi$)|(^divx$)|(^flv$)|(^mkv$)|(^mov$)|(^mp4$)|(^mpeg$)|(^mpeg-ps$)|(^mpeg-ts$)|(^mxf$)|(^ogg$)|(^ts$)|(^vob$)|(^wav$)|(^webm$)|(^mp3$)|(^m4a$)|(^aac$)" - }, - "JobInput":{ - "type":"structure", - "members":{ - "Key":{ - "shape":"LongKey", - "documentation":"

The name of the file to transcode. Elsewhere in the body of the JSON block is the the ID of the pipeline to use for processing the job. The InputBucket object in that pipeline tells Elastic Transcoder which Amazon S3 bucket to get the file from.

If the file name includes a prefix, such as cooking/lasagna.mpg, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

" - }, - "FrameRate":{ - "shape":"FrameRate", - "documentation":"

The frame rate of the input file. If you want Elastic Transcoder to automatically detect the frame rate of the input file, specify auto. If you want to specify the frame rate for the input file, enter one of the following values:

10, 15, 23.97, 24, 25, 29.97, 30, 60

If you specify a value other than auto, Elastic Transcoder disables automatic detection of the frame rate.

" - }, - "Resolution":{ - "shape":"Resolution", - "documentation":"

This value must be auto, which causes Elastic Transcoder to automatically detect the resolution of the input file.

" - }, - "AspectRatio":{ - "shape":"AspectRatio", - "documentation":"

The aspect ratio of the input file. If you want Elastic Transcoder to automatically detect the aspect ratio of the input file, specify auto. If you want to specify the aspect ratio for the output file, enter one of the following values:

1:1, 4:3, 3:2, 16:9

If you specify a value other than auto, Elastic Transcoder disables automatic detection of the aspect ratio.

" - }, - "Interlaced":{ - "shape":"Interlaced", - "documentation":"

Whether the input file is interlaced. If you want Elastic Transcoder to automatically detect whether the input file is interlaced, specify auto. If you want to specify whether the input file is interlaced, enter one of the following values:

true, false

If you specify a value other than auto, Elastic Transcoder disables automatic detection of interlacing.

" - }, - "Container":{ - "shape":"JobContainer", - "documentation":"

The container type for the input file. If you want Elastic Transcoder to automatically detect the container type of the input file, specify auto. If you want to specify the container type for the input file, enter one of the following values:

3gp, aac, asf, avi, divx, flv, m4a, mkv, mov, mp3, mp4, mpeg, mpeg-ps, mpeg-ts, mxf, ogg, vob, wav, webm

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that are used for decrypting your input files. If your input file is encrypted, you must specify the mode that Elastic Transcoder uses to decrypt your file.

" - }, - "TimeSpan":{ - "shape":"TimeSpan", - "documentation":"

Settings for clipping an input. Each input can have different clip settings.

" - }, - "InputCaptions":{ - "shape":"InputCaptions", - "documentation":"

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

For more information on embedded files, see the Subtitles Wikipedia page.

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

" - }, - "DetectedProperties":{ - "shape":"DetectedProperties", - "documentation":"

The detected properties of the input file.

" - } - }, - "documentation":"

Information about the file that you're transcoding.

" - }, - "JobInputs":{ - "type":"list", - "member":{"shape":"JobInput"}, - "max":200 - }, - "JobOutput":{ - "type":"structure", - "members":{ - "Id":{ - "shape":"String", - "documentation":"

A sequential counter, starting with 1, that identifies an output among the outputs from the current job. In the Output syntax, this value is always 1.

" - }, - "Key":{ - "shape":"Key", - "documentation":"

The name to assign to the transcoded file. Elastic Transcoder saves the file in the Amazon S3 bucket specified by the OutputBucket object in the pipeline that is specified by the pipeline ID.

" - }, - "ThumbnailPattern":{ - "shape":"ThumbnailPattern", - "documentation":"

Whether you want Elastic Transcoder to create thumbnails for your videos and, if so, how you want Elastic Transcoder to name the files.

If you don't want Elastic Transcoder to create thumbnails, specify \"\".

If you do want Elastic Transcoder to create thumbnails, specify the information that you want to include in the file name for each thumbnail. You can specify the following values in any sequence:

When creating thumbnails, Elastic Transcoder automatically saves the files in the format (.jpg or .png) that appears in the preset that you specified in the PresetID value of CreateJobOutput. Elastic Transcoder also appends the applicable file name extension.

" - }, - "ThumbnailEncryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your thumbnail.

" - }, - "Rotate":{ - "shape":"Rotate", - "documentation":"

The number of degrees clockwise by which you want Elastic Transcoder to rotate the output relative to the input. Enter one of the following values:

auto, 0, 90, 180, 270

The value auto generally works only if the file that you're transcoding contains rotation metadata.

" - }, - "PresetId":{ - "shape":"Id", - "documentation":"

The value of the Id object for the preset that you want to use for this job. The preset determines the audio, video, and thumbnail settings that Elastic Transcoder uses for transcoding. To use a preset that you created, specify the preset ID that Elastic Transcoder returned in the response when you created the preset. You can also use the Elastic Transcoder system presets, which you can get with ListPresets.

" - }, - "SegmentDuration":{ - "shape":"FloatString", - "documentation":"

(Outputs in Fragmented MP4 or MPEG-TS format only.

If you specify a preset in PresetId for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), SegmentDuration is the target maximum duration of each segment in seconds. For HLSv3 format playlists, each media segment is stored in a separate .ts file. For HLSv4, MPEG-DASH, and Smooth playlists, all media segments for an output are stored in a single file. Each segment is approximately the length of the SegmentDuration, though individual segments might be shorter or longer.

The range of valid values is 1 to 60 seconds. If the duration of the video is not evenly divisible by SegmentDuration, the duration of the last segment is the remainder of total length/SegmentDuration.

Elastic Transcoder creates an output-specific playlist for each output HLS output that you specify in OutputKeys. To add an output to the master playlist for this job, include it in the OutputKeys of the associated playlist.

" - }, - "Status":{ - "shape":"JobStatus", - "documentation":"

The status of one output in a job. If you specified only one output for the job, Outputs:Status is always the same as Job:Status. If you specified more than one output:

The value of Status is one of the following: Submitted, Progressing, Complete, Canceled, or Error.

" - }, - "StatusDetail":{ - "shape":"Description", - "documentation":"

Information that further explains Status.

" - }, - "Duration":{ - "shape":"NullableLong", - "documentation":"

Duration of the output file, in seconds.

" - }, - "Width":{ - "shape":"NullableInteger", - "documentation":"

Specifies the width of the output file in pixels.

" - }, - "Height":{ - "shape":"NullableInteger", - "documentation":"

Height of the output file, in pixels.

" - }, - "FrameRate":{ - "shape":"FloatString", - "documentation":"

Frame rate of the output file, in frames per second.

" - }, - "FileSize":{ - "shape":"NullableLong", - "documentation":"

File size of the output file, in bytes.

" - }, - "DurationMillis":{ - "shape":"NullableLong", - "documentation":"

Duration of the output file, in milliseconds.

" - }, - "Watermarks":{ - "shape":"JobWatermarks", - "documentation":"

Information about the watermarks that you want Elastic Transcoder to add to the video during transcoding. You can specify up to four watermarks for each output. Settings for each watermark must be defined in the preset that you specify in Preset for the current output.

Watermarks are added to the output video in the sequence in which you list them in the job output—the first watermark in the list is added to the output video first, the second watermark in the list is added next, and so on. As a result, if the settings in a preset cause Elastic Transcoder to place all watermarks in the same location, the second watermark that you add covers the first one, the third one covers the second, and the fourth one covers the third.

" - }, - "AlbumArt":{ - "shape":"JobAlbumArt", - "documentation":"

The album art to be associated with the output file, if any.

" - }, - "Composition":{ - "shape":"Composition", - "documentation":"

You can create an output file that contains an excerpt from the input file. This excerpt, called a clip, can come from the beginning, middle, or end of the file. The Composition object contains settings for the clips that make up an output file. For the current release, you can only specify settings for a single clip per output file. The Composition object cannot be null.

", - "deprecated":true - }, - "Captions":{ - "shape":"Captions", - "documentation":"

You can configure Elastic Transcoder to transcode captions, or subtitles, from one format to another. All captions must be in UTF-8. Elastic Transcoder supports two types of captions:

If you want ttml or smpte-tt compatible captions, specify dfxp as your output format.

Elastic Transcoder does not support OCR (Optical Character Recognition), does not accept pictures as a valid input for captions, and is not available for audio-only transcoding. Elastic Transcoder does not preserve text formatting (for example, italics) during the transcoding process.

To remove captions or leave the captions empty, set Captions to null. To pass through existing captions unchanged, set the MergePolicy to MergeRetain, and pass in a null CaptionSources array.

For more information on embedded files, see the Subtitles Wikipedia page.

For more information on sidecar files, see the Extensible Metadata Platform and Sidecar file Wikipedia pages.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your output files. If you choose to use encryption, you must specify a mode to use. If you choose not to use encryption, Elastic Transcoder writes an unencrypted file to your Amazon S3 bucket.

" - }, - "AppliedColorSpaceConversion":{ - "shape":"String", - "documentation":"

If Elastic Transcoder used a preset with a ColorSpaceConversionMode to transcode the output file, the AppliedColorSpaceConversion parameter shows the conversion used. If no ColorSpaceConversionMode was defined in the preset, this parameter is not be included in the job response.

" - } - }, - "documentation":"

Outputs recommended instead.

If you specified one output for a job, information about that output. If you specified multiple outputs for a job, the Output object lists information about the first output. This duplicates the information that is listed for the first output in the Outputs object.

" - }, - "JobOutputs":{ - "type":"list", - "member":{"shape":"JobOutput"} - }, - "JobStatus":{ - "type":"string", - "pattern":"(^Submitted$)|(^Progressing$)|(^Complete$)|(^Canceled$)|(^Error$)" - }, - "JobWatermark":{ - "type":"structure", - "members":{ - "PresetWatermarkId":{ - "shape":"PresetWatermarkId", - "documentation":"

The ID of the watermark settings that Elastic Transcoder uses to add watermarks to the video during transcoding. The settings are in the preset specified by Preset for the current output. In that preset, the value of Watermarks Id tells Elastic Transcoder which settings to use.

" - }, - "InputKey":{ - "shape":"WatermarkKey", - "documentation":"

The name of the .png or .jpg file that you want to use for the watermark. To determine which Amazon S3 bucket contains the specified file, Elastic Transcoder checks the pipeline specified by Pipeline; the Input Bucket object in that pipeline identifies the bucket.

If the file name includes a prefix, for example, logos/128x64.png, include the prefix in the key. If the file isn't in the specified bucket, Elastic Transcoder returns an error.

" - }, - "Encryption":{ - "shape":"Encryption", - "documentation":"

The encryption settings, if any, that you want Elastic Transcoder to apply to your watermarks.

" - } - }, - "documentation":"

Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

" - }, - "JobWatermarks":{ - "type":"list", - "member":{"shape":"JobWatermark"} - }, - "Jobs":{ - "type":"list", - "member":{"shape":"Job"} - }, - "JpgOrPng":{ - "type":"string", - "pattern":"(^jpg$)|(^png$)" - }, - "Key":{ - "type":"string", - "max":255, - "min":1 - }, - "KeyArn":{ - "type":"string", - "max":255, - "min":0 - }, - "KeyIdGuid":{ - "type":"string", - "pattern":"(^[0-9A-Fa-f]{8}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{4}-[0-9A-Fa-f]{12}$)|(^[0-9A-Fa-f]{32}$)" - }, - "KeyStoragePolicy":{ - "type":"string", - "pattern":"(^NoStore$)|(^WithVariantPlaylists$)" - }, - "KeyframesMaxDist":{ - "type":"string", - "pattern":"^\\d{1,6}$" - }, - "LimitExceededException":{ - "type":"structure", - "members":{}, - "documentation":"

Too many operations for a given AWS account. For example, the number of pipelines exceeds the maximum allowed.

", - "error":{"httpStatusCode":429}, - "exception":true - }, - "ListJobsByPipelineRequest":{ - "type":"structure", - "required":["PipelineId"], - "members":{ - "PipelineId":{ - "shape":"Id", - "documentation":"

The ID of the pipeline for which you want to get job information.

", - "location":"uri", - "locationName":"PipelineId" - }, - "Ascending":{ - "shape":"Ascending", - "documentation":"

To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

", - "location":"querystring", - "locationName":"Ascending" - }, - "PageToken":{ - "shape":"Id", - "documentation":"

When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

", - "location":"querystring", - "locationName":"PageToken" - } - }, - "documentation":"

The ListJobsByPipelineRequest structure.

" - }, - "ListJobsByPipelineResponse":{ - "type":"structure", - "members":{ - "Jobs":{ - "shape":"Jobs", - "documentation":"

An array of Job objects that are in the specified pipeline.

" - }, - "NextPageToken":{ - "shape":"Id", - "documentation":"

A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

" - } - }, - "documentation":"

The ListJobsByPipelineResponse structure.

" - }, - "ListJobsByStatusRequest":{ - "type":"structure", - "required":["Status"], - "members":{ - "Status":{ - "shape":"JobStatus", - "documentation":"

To get information about all of the jobs associated with the current AWS account that have a given status, specify the following status: Submitted, Progressing, Complete, Canceled, or Error.

", - "location":"uri", - "locationName":"Status" - }, - "Ascending":{ - "shape":"Ascending", - "documentation":"

To list jobs in chronological order by the date and time that they were submitted, enter true. To list jobs in reverse chronological order, enter false.

", - "location":"querystring", - "locationName":"Ascending" - }, - "PageToken":{ - "shape":"Id", - "documentation":"

When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

", - "location":"querystring", - "locationName":"PageToken" - } - }, - "documentation":"

The ListJobsByStatusRequest structure.

" - }, - "ListJobsByStatusResponse":{ - "type":"structure", - "members":{ - "Jobs":{ - "shape":"Jobs", - "documentation":"

An array of Job objects that have the specified status.

" - }, - "NextPageToken":{ - "shape":"Id", - "documentation":"

A value that you use to access the second and subsequent pages of results, if any. When the jobs in the specified pipeline fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

" - } - }, - "documentation":"

The ListJobsByStatusResponse structure.

" - }, - "ListPipelinesRequest":{ - "type":"structure", - "members":{ - "Ascending":{ - "shape":"Ascending", - "documentation":"

To list pipelines in chronological order by the date and time that they were created, enter true. To list pipelines in reverse chronological order, enter false.

", - "location":"querystring", - "locationName":"Ascending" - }, - "PageToken":{ - "shape":"Id", - "documentation":"

When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

", - "location":"querystring", - "locationName":"PageToken" - } - }, - "documentation":"

The ListPipelineRequest structure.

" - }, - "ListPipelinesResponse":{ - "type":"structure", - "members":{ - "Pipelines":{ - "shape":"Pipelines", - "documentation":"

An array of Pipeline objects.

" - }, - "NextPageToken":{ - "shape":"Id", - "documentation":"

A value that you use to access the second and subsequent pages of results, if any. When the pipelines fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

" - } - }, - "documentation":"

A list of the pipelines associated with the current AWS account.

" - }, - "ListPresetsRequest":{ - "type":"structure", - "members":{ - "Ascending":{ - "shape":"Ascending", - "documentation":"

To list presets in chronological order by the date and time that they were created, enter true. To list presets in reverse chronological order, enter false.

", - "location":"querystring", - "locationName":"Ascending" - }, - "PageToken":{ - "shape":"Id", - "documentation":"

When Elastic Transcoder returns more than one page of results, use pageToken in subsequent GET requests to get each successive page of results.

", - "location":"querystring", - "locationName":"PageToken" - } - }, - "documentation":"

The ListPresetsRequest structure.

" - }, - "ListPresetsResponse":{ - "type":"structure", - "members":{ - "Presets":{ - "shape":"Presets", - "documentation":"

An array of Preset objects.

" - }, - "NextPageToken":{ - "shape":"Id", - "documentation":"

A value that you use to access the second and subsequent pages of results, if any. When the presets fit on one page or when you've reached the last page of results, the value of NextPageToken is null.

" - } - }, - "documentation":"

The ListPresetsResponse structure.

" - }, - "LongKey":{ - "type":"string", - "max":1024, - "min":1 - }, - "MaxFrameRate":{ - "type":"string", - "pattern":"(^10$)|(^15$)|(^23.97$)|(^24$)|(^25$)|(^29.97$)|(^30$)|(^50$)|(^60$)" - }, - "MergePolicy":{ - "type":"string", - "pattern":"(^Replace$)|(^Prepend$)|(^Append$)|(^Fallback$)" - }, - "Name":{ - "type":"string", - "max":40, - "min":1 - }, - "NonEmptyBase64EncodedString":{ - "type":"string", - "pattern":"(^(?:[A-Za-z0-9\\+/]{4})*(?:[A-Za-z0-9\\+/]{2}==|[A-Za-z0-9\\+/]{3}=)?$)" - }, - "Notifications":{ - "type":"structure", - "members":{ - "Progressing":{ - "shape":"SnsTopic", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process the job.

" - }, - "Completed":{ - "shape":"SnsTopic", - "documentation":"

The Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing the job.

" - }, - "Warning":{ - "shape":"SnsTopic", - "documentation":"

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition.

" - }, - "Error":{ - "shape":"SnsTopic", - "documentation":"

The Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition.

" - } - }, - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic or topics to notify in order to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

" - }, - "NullableInteger":{"type":"integer"}, - "NullableLong":{"type":"long"}, - "OneTo512String":{ - "type":"string", - "max":512, - "min":1 - }, - "Opacity":{ - "type":"string", - "pattern":"^\\d{1,3}(\\.\\d{0,20})?$" - }, - "OutputKeys":{ - "type":"list", - "member":{"shape":"Key"}, - "max":30 - }, - "PaddingPolicy":{ - "type":"string", - "pattern":"(^Pad$)|(^NoPad$)" - }, - "Permission":{ - "type":"structure", - "members":{ - "GranteeType":{ - "shape":"GranteeType", - "documentation":"

The type of value that appears in the Grantee object:

" - }, - "Grantee":{ - "shape":"Grantee", - "documentation":"

The AWS user or group that you want to have access to transcoded files and playlists. To identify the user or group, you can specify the canonical user ID for an AWS account, an origin access identity for a CloudFront distribution, the registered email address of an AWS account, or a predefined Amazon S3 group.

" - }, - "Access":{ - "shape":"AccessControls", - "documentation":"

The permission that you want to give to the AWS user that is listed in Grantee. Valid values include:

" - } - }, - "documentation":"

The Permission structure.

" - }, - "Permissions":{ - "type":"list", - "member":{"shape":"Permission"}, - "max":30 - }, - "Pipeline":{ - "type":"structure", - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier for the pipeline. You use this value to identify the pipeline in which you want to perform a variety of operations, such as creating a job or a preset.

" - }, - "Arn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the pipeline.

" - }, - "Name":{ - "shape":"Name", - "documentation":"

The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

Constraints: Maximum 40 characters

" - }, - "Status":{ - "shape":"PipelineStatus", - "documentation":"

The current status of the pipeline:

" - }, - "InputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket from which Elastic Transcoder gets media files for transcoding and the graphics files, if any, that you want to use for watermarks.

" - }, - "OutputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files, thumbnails, and playlists. Either you specify this value, or you specify both ContentConfig and ThumbnailConfig.

" - }, - "Role":{ - "shape":"Role", - "documentation":"

The IAM Amazon Resource Name (ARN) for the role that Elastic Transcoder uses to transcode jobs for this pipeline.

" - }, - "AwsKmsKeyArn":{ - "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" - }, - "Notifications":{ - "shape":"Notifications", - "documentation":"

The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

" - }, - "ContentConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

Information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

" - }, - "ThumbnailConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

Information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. Either you specify both ContentConfig and ThumbnailConfig, or you specify OutputBucket.

" - } - }, - "documentation":"

The pipeline (queue) that is used to manage jobs.

" - }, - "PipelineOutputConfig":{ - "type":"structure", - "members":{ - "Bucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files. Specify this value when all of the following are true:

If you want to save transcoded files and playlists in one bucket and thumbnails in another bucket, specify which users can access the transcoded files or the permissions the users have, or change the Amazon S3 storage class, omit OutputBucket and specify values for ContentConfig and ThumbnailConfig instead.

" - }, - "StorageClass":{ - "shape":"StorageClass", - "documentation":"

The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the video files and playlists that it stores in your Amazon S3 bucket.

" - }, - "Permissions":{ - "shape":"Permissions", - "documentation":"

Optional. The Permissions object specifies which users and/or predefined Amazon S3 groups you want to have access to transcoded files and playlists, and the type of access you want them to have. You can grant permissions to a maximum of 30 users and/or predefined Amazon S3 groups.

If you include Permissions, Elastic Transcoder grants only the permissions that you specify. It does not grant full permissions to the owner of the role specified by Role. If you want that user to have full control, you must explicitly grant full control to the user.

If you omit Permissions, Elastic Transcoder grants full control over the transcoded files and playlists to the owner of the role specified by Role, and grants no other permissions to any other user or group.

" - } - }, - "documentation":"

The PipelineOutputConfig structure.

" - }, - "PipelineStatus":{ - "type":"string", - "pattern":"(^Active$)|(^Paused$)" - }, - "Pipelines":{ - "type":"list", - "member":{"shape":"Pipeline"} - }, - "PixelsOrPercent":{ - "type":"string", - "pattern":"(^\\d{1,3}(\\.\\d{0,5})?%$)|(^\\d{1,4}?px$)" - }, - "PlayReadyDrm":{ - "type":"structure", - "members":{ - "Format":{ - "shape":"PlayReadyDrmFormatString", - "documentation":"

The type of DRM, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

" - }, - "Key":{ - "shape":"NonEmptyBase64EncodedString", - "documentation":"

The DRM key for your file, provided by your DRM license provider. The key must be base64-encoded, and it must be one of the following bit lengths before being base64-encoded:

128, 192, or 256.

The key must also be encrypted by using AWS KMS.

" - }, - "KeyMd5":{ - "shape":"NonEmptyBase64EncodedString", - "documentation":"

The MD5 digest of the key used for DRM on your file, and that you want Elastic Transcoder to use as a checksum to make sure your key was not corrupted in transit. The key MD5 must be base64-encoded, and it must be exactly 16 bytes before being base64-encoded.

" - }, - "KeyId":{ - "shape":"KeyIdGuid", - "documentation":"

The ID for your DRM key, so that your DRM license provider knows which key to provide.

The key ID must be provided in big endian, and Elastic Transcoder converts it to little endian before inserting it into the PlayReady DRM headers. If you are unsure whether your license server provides your key ID in big or little endian, check with your DRM provider.

" - }, - "InitializationVector":{ - "shape":"ZeroTo255String", - "documentation":"

The series of random bits created by a random bit generator, unique for every encryption operation, that you want Elastic Transcoder to use to encrypt your files. The initialization vector must be base64-encoded, and it must be exactly 8 bytes long before being base64-encoded. If no initialization vector is provided, Elastic Transcoder generates one for you.

" - }, - "LicenseAcquisitionUrl":{ - "shape":"OneTo512String", - "documentation":"

The location of the license key required to play DRM content. The URL must be an absolute path, and is referenced by the PlayReady header. The PlayReady header is referenced in the protection header of the client manifest for Smooth Streaming outputs, and in the EXT-X-DXDRM and EXT-XDXDRMINFO metadata tags for HLS playlist outputs. An example URL looks like this: https://www.example.com/exampleKey/

" - } - }, - "documentation":"

The PlayReady DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

PlayReady DRM encrypts your media files using aes-ctr encryption.

If you use DRM for an HLSv3 playlist, your outputs must have a master playlist.

" - }, - "PlayReadyDrmFormatString":{ - "type":"string", - "pattern":"(^microsoft$)|(^discretix-3.0$)" - }, - "Playlist":{ - "type":"structure", - "members":{ - "Name":{ - "shape":"Filename", - "documentation":"

The name that you want Elastic Transcoder to assign to the master playlist, for example, nyc-vacation.m3u8. If the name includes a / character, the section of the name before the last / must be identical for all Name objects. If you create more than one master playlist, the values of all Name objects must be unique.

Elastic Transcoder automatically appends the relevant file extension to the file name (.m3u8 for HLSv3 and HLSv4 playlists, and .ism and .ismc for Smooth playlists). If you include a file extension in Name, the file name will have two extensions.

" - }, - "Format":{ - "shape":"PlaylistFormat", - "documentation":"

The format of the output playlist. Valid formats include HLSv3, HLSv4, and Smooth.

" - }, - "OutputKeys":{ - "shape":"OutputKeys", - "documentation":"

For each output in this job that you want to include in a master playlist, the value of the Outputs:Key object.

Elastic Transcoder automatically appends the relevant file extension to the file name. If you include a file extension in Output Key, the file name will have two extensions.

If you include more than one output in a playlist, any segment duration settings, clip settings, or caption settings must be the same for all outputs in the playlist. For Smooth playlists, the Audio:Profile, Video:Profile, and Video:FrameRate to Video:KeyframesMaxDist ratio must be the same for all outputs.

" - }, - "HlsContentProtection":{ - "shape":"HlsContentProtection", - "documentation":"

The HLS content protection settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

" - }, - "PlayReadyDrm":{ - "shape":"PlayReadyDrm", - "documentation":"

The DRM settings, if any, that you want Elastic Transcoder to apply to the output files associated with this playlist.

" - }, - "Status":{ - "shape":"JobStatus", - "documentation":"

The status of the job with which the playlist is associated.

" - }, - "StatusDetail":{ - "shape":"Description", - "documentation":"

Information that further explains the status.

" - } - }, - "documentation":"

Use Only for Fragmented MP4 or MPEG-TS Outputs. If you specify a preset for which the value of Container is fmp4 (Fragmented MP4) or ts (MPEG-TS), Playlists contains information about the master playlists that you want Elastic Transcoder to create. We recommend that you create only one master playlist per output format. The maximum number of master playlists in a job is 30.

" - }, - "PlaylistFormat":{ - "type":"string", - "pattern":"(^HLSv3$)|(^HLSv4$)|(^Smooth$)|(^MPEG-DASH$)" - }, - "Playlists":{ - "type":"list", - "member":{"shape":"Playlist"} - }, - "Preset":{ - "type":"structure", - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

Identifier for the new preset. You use this value to get settings for the preset or to delete it.

" - }, - "Arn":{ - "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) for the preset.

" - }, - "Name":{ - "shape":"Name", - "documentation":"

The name of the preset.

" - }, - "Description":{ - "shape":"Description", - "documentation":"

A description of the preset.

" - }, - "Container":{ - "shape":"PresetContainer", - "documentation":"

The container type for the output file. Valid values include flac, flv, fmp4, gif, mp3, mp4, mpg, mxf, oga, ogg, ts, and webm.

" - }, - "Audio":{ - "shape":"AudioParameters", - "documentation":"

A section of the response body that provides information about the audio preset values.

" - }, - "Video":{ - "shape":"VideoParameters", - "documentation":"

A section of the response body that provides information about the video preset values.

" - }, - "Thumbnails":{ - "shape":"Thumbnails", - "documentation":"

A section of the response body that provides information about the thumbnail preset values, if any.

" - }, - "Type":{ - "shape":"PresetType", - "documentation":"

Whether the preset is a default preset provided by Elastic Transcoder (System) or a preset that you have defined (Custom).

" - } - }, - "documentation":"

Presets are templates that contain most of the settings for transcoding media files from one format to another. Elastic Transcoder includes some default presets for common formats, for example, several iPod and iPhone versions. You can also create your own presets for formats that aren't included among the default presets. You specify which preset you want to use when you create a job.

" - }, - "PresetContainer":{ - "type":"string", - "pattern":"(^mp4$)|(^ts$)|(^webm$)|(^mp3$)|(^flac$)|(^oga$)|(^ogg$)|(^fmp4$)|(^mpg$)|(^flv$)|(^gif$)|(^mxf$)|(^wav$)|(^mp2$)" - }, - "PresetType":{ - "type":"string", - "pattern":"(^System$)|(^Custom$)" - }, - "PresetWatermark":{ - "type":"structure", - "members":{ - "Id":{ - "shape":"PresetWatermarkId", - "documentation":"

A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long.

" - }, - "MaxWidth":{ - "shape":"PixelsOrPercent", - "documentation":"

The maximum width of the watermark in one of the following formats:

" - }, - "MaxHeight":{ - "shape":"PixelsOrPercent", - "documentation":"

The maximum height of the watermark in one of the following formats:

If you specify the value in pixels, it must be less than or equal to the value of MaxHeight.

" - }, - "SizingPolicy":{ - "shape":"WatermarkSizingPolicy", - "documentation":"

A value that controls scaling of the watermark:

" - }, - "HorizontalAlign":{ - "shape":"HorizontalAlign", - "documentation":"

The horizontal position of the watermark unless you specify a non-zero value for HorizontalOffset:

" - }, - "HorizontalOffset":{ - "shape":"PixelsOrPercent", - "documentation":"

The amount by which you want the horizontal position of the watermark to be offset from the position specified by HorizontalAlign:

For example, if you specify Left for HorizontalAlign and 5px for HorizontalOffset, the left side of the watermark appears 5 pixels from the left border of the output video.

HorizontalOffset is only valid when the value of HorizontalAlign is Left or Right. If you specify an offset that causes the watermark to extend beyond the left or right border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

Use the value of Target to specify whether you want to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

" - }, - "VerticalAlign":{ - "shape":"VerticalAlign", - "documentation":"

The vertical position of the watermark unless you specify a non-zero value for VerticalOffset:

" - }, - "VerticalOffset":{ - "shape":"PixelsOrPercent", - "documentation":"

VerticalOffset

The amount by which you want the vertical position of the watermark to be offset from the position specified by VerticalAlign:

For example, if you specify Top for VerticalAlign and 5px for VerticalOffset, the top of the watermark appears 5 pixels from the top border of the output video.

VerticalOffset is only valid when the value of VerticalAlign is Top or Bottom.

If you specify an offset that causes the watermark to extend beyond the top or bottom border and Elastic Transcoder has not added black bars, the watermark is cropped. If Elastic Transcoder has added black bars, the watermark extends into the black bars. If the watermark extends beyond the black bars, it is cropped.

Use the value of Target to specify whether you want Elastic Transcoder to include the black bars that are added by Elastic Transcoder, if any, in the offset calculation.

" - }, - "Opacity":{ - "shape":"Opacity", - "documentation":"

A percentage that indicates how much you want a watermark to obscure the video in the location where it appears. Valid values are 0 (the watermark is invisible) to 100 (the watermark completely obscures the video in the specified location). The datatype of Opacity is float.

Elastic Transcoder supports transparent .png graphics. If you use a transparent .png, the transparent portion of the video appears as if you had specified a value of 0 for Opacity. The .jpg file format doesn't support transparency.

" - }, - "Target":{ - "shape":"Target", - "documentation":"

A value that determines how Elastic Transcoder interprets values that you specified for HorizontalOffset, VerticalOffset, MaxWidth, and MaxHeight:

" - } - }, - "documentation":"

Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

" - }, - "PresetWatermarkId":{ - "type":"string", - "max":40, - "min":1 - }, - "PresetWatermarks":{ - "type":"list", - "member":{"shape":"PresetWatermark"} - }, - "Presets":{ - "type":"list", - "member":{"shape":"Preset"} - }, - "ReadJobRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the job for which you want to get detailed information.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The ReadJobRequest structure.

" - }, - "ReadJobResponse":{ - "type":"structure", - "members":{ - "Job":{ - "shape":"Job", - "documentation":"

A section of the response body that provides information about the job.

" - } - }, - "documentation":"

The ReadJobResponse structure.

" - }, - "ReadPipelineRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the pipeline to read.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The ReadPipelineRequest structure.

" - }, - "ReadPipelineResponse":{ - "type":"structure", - "members":{ - "Pipeline":{ - "shape":"Pipeline", - "documentation":"

A section of the response body that provides information about the pipeline.

" - }, - "Warnings":{ - "shape":"Warnings", - "documentation":"

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

" - } - }, - "documentation":"

The ReadPipelineResponse structure.

" - }, - "ReadPresetRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the preset for which you want to get detailed information.

", - "location":"uri", - "locationName":"Id" - } - }, - "documentation":"

The ReadPresetRequest structure.

" - }, - "ReadPresetResponse":{ - "type":"structure", - "members":{ - "Preset":{ - "shape":"Preset", - "documentation":"

A section of the response body that provides information about the preset.

" - } - }, - "documentation":"

The ReadPresetResponse structure.

" - }, - "Resolution":{ - "type":"string", - "pattern":"(^auto$)|(^\\d{1,5}x\\d{1,5}$)" - }, - "ResourceInUseException":{ - "type":"structure", - "members":{}, - "documentation":"

The resource you are attempting to change is in use. For example, you are attempting to delete a pipeline that is currently in use.

", - "error":{"httpStatusCode":409}, - "exception":true - }, - "ResourceNotFoundException":{ - "type":"structure", - "members":{}, - "documentation":"

The requested resource does not exist or is not available. For example, the pipeline to which you're trying to add a job doesn't exist or is still being created.

", - "error":{"httpStatusCode":404}, - "exception":true - }, - "Role":{ - "type":"string", - "pattern":"^arn:aws:iam::\\w{12}:role/.+$" - }, - "Rotate":{ - "type":"string", - "pattern":"(^auto$)|(^0$)|(^90$)|(^180$)|(^270$)" - }, - "SizingPolicy":{ - "type":"string", - "pattern":"(^Fit$)|(^Fill$)|(^Stretch$)|(^Keep$)|(^ShrinkToFit$)|(^ShrinkToFill$)" - }, - "SnsTopic":{ - "type":"string", - "pattern":"(^$)|(^arn:aws:sns:.*:\\w{12}:.+$)" - }, - "SnsTopics":{ - "type":"list", - "member":{"shape":"SnsTopic"}, - "max":30 - }, - "StorageClass":{ - "type":"string", - "pattern":"(^ReducedRedundancy$)|(^Standard$)" - }, - "String":{"type":"string"}, - "Success":{ - "type":"string", - "pattern":"(^true$)|(^false$)" - }, - "Target":{ - "type":"string", - "pattern":"(^Content$)|(^Frame$)" - }, - "TestRoleRequest":{ - "type":"structure", - "required":[ - "Role", - "InputBucket", - "OutputBucket", - "Topics" - ], - "members":{ - "Role":{ - "shape":"Role", - "documentation":"

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to test.

" - }, - "InputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket that contains media files to be transcoded. The action attempts to read from this bucket.

" - }, - "OutputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket that Elastic Transcoder writes transcoded media files to. The action attempts to read from this bucket.

" - }, - "Topics":{ - "shape":"SnsTopics", - "documentation":"

The ARNs of one or more Amazon Simple Notification Service (Amazon SNS) topics that you want the action to send a test notification to.

" - } - }, - "documentation":"

The TestRoleRequest structure.

", - "deprecated":true - }, - "TestRoleResponse":{ - "type":"structure", - "members":{ - "Success":{ - "shape":"Success", - "documentation":"

If the operation is successful, this value is true; otherwise, the value is false.

" - }, - "Messages":{ - "shape":"ExceptionMessages", - "documentation":"

If the Success element contains false, this value is an array of one or more error messages that were generated during the test process.

" - } - }, - "documentation":"

The TestRoleResponse structure.

", - "deprecated":true - }, - "ThumbnailPattern":{ - "type":"string", - "pattern":"(^$)|(^.*\\{count\\}.*$)" - }, - "ThumbnailResolution":{ - "type":"string", - "pattern":"^\\d{1,5}x\\d{1,5}$" - }, - "Thumbnails":{ - "type":"structure", - "members":{ - "Format":{ - "shape":"JpgOrPng", - "documentation":"

The format of thumbnails, if any. Valid values are jpg and png.

You specify whether you want Elastic Transcoder to create thumbnails when you create a job.

" - }, - "Interval":{ - "shape":"Digits", - "documentation":"

The approximate number of seconds between thumbnails. Specify an integer value.

" - }, - "Resolution":{ - "shape":"ThumbnailResolution", - "documentation":"

To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

The width and height of thumbnail files in pixels. Specify a value in the format width x height where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object.

" - }, - "AspectRatio":{ - "shape":"AspectRatio", - "documentation":"

To better control resolution and aspect ratio of thumbnails, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, and PaddingPolicy instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

The aspect ratio of thumbnails. Valid values include:

auto, 1:1, 4:3, 3:2, 16:9

If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the video in the output file.

" - }, - "MaxWidth":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum width of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096.

" - }, - "MaxHeight":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum height of thumbnails in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072.

" - }, - "SizingPolicy":{ - "shape":"SizingPolicy", - "documentation":"

Specify one of the following values to control scaling of thumbnails:

" - }, - "PaddingPolicy":{ - "shape":"PaddingPolicy", - "documentation":"

When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.

" - } - }, - "documentation":"

Thumbnails for videos.

" - }, - "Time":{ - "type":"string", - "pattern":"(^\\d{1,5}(\\.\\d{0,3})?$)|(^([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" - }, - "TimeOffset":{ - "type":"string", - "pattern":"(^[+-]?\\d{1,5}(\\.\\d{0,3})?$)|(^[+-]?([0-1]?[0-9]:|2[0-3]:)?([0-5]?[0-9]:)?[0-5]?[0-9](\\.\\d{0,3})?$)" - }, - "TimeSpan":{ - "type":"structure", - "members":{ - "StartTime":{ - "shape":"Time", - "documentation":"

The place in the input file where you want a clip to start. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder starts at the beginning of the input file.

" - }, - "Duration":{ - "shape":"Time", - "documentation":"

The duration of the clip. The format can be either HH:mm:ss.SSS (maximum value: 23:59:59.999; SSS is thousandths of a second) or sssss.SSS (maximum value: 86399.999). If you don't specify a value, Elastic Transcoder creates an output file from StartTime to the end of the file.

If you specify a value longer than the duration of the input file, Elastic Transcoder transcodes the file and returns a warning message.

" - } - }, - "documentation":"

Settings that determine when a clip begins and how long it lasts.

" - }, - "Timing":{ - "type":"structure", - "members":{ - "SubmitTimeMillis":{ - "shape":"NullableLong", - "documentation":"

The time the job was submitted to Elastic Transcoder, in epoch milliseconds.

" - }, - "StartTimeMillis":{ - "shape":"NullableLong", - "documentation":"

The time the job began transcoding, in epoch milliseconds.

" - }, - "FinishTimeMillis":{ - "shape":"NullableLong", - "documentation":"

The time the job finished transcoding, in epoch milliseconds.

" - } - }, - "documentation":"

Details about the timing of a job.

" - }, - "UpdatePipelineNotificationsRequest":{ - "type":"structure", - "required":[ - "Id", - "Notifications" - ], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the pipeline for which you want to change notification settings.

", - "location":"uri", - "locationName":"Id" - }, - "Notifications":{ - "shape":"Notifications", - "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

" - } - }, - "documentation":"

The UpdatePipelineNotificationsRequest structure.

" - }, - "UpdatePipelineNotificationsResponse":{ - "type":"structure", - "members":{ - "Pipeline":{ - "shape":"Pipeline", - "documentation":"

A section of the response body that provides information about the pipeline associated with this notification.

" - } - }, - "documentation":"

The UpdatePipelineNotificationsResponse structure.

" - }, - "UpdatePipelineRequest":{ - "type":"structure", - "required":["Id"], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The ID of the pipeline that you want to update.

", - "location":"uri", - "locationName":"Id" - }, - "Name":{ - "shape":"Name", - "documentation":"

The name of the pipeline. We recommend that the name be unique within the AWS account, but uniqueness is not enforced.

Constraints: Maximum 40 characters

" - }, - "InputBucket":{ - "shape":"BucketName", - "documentation":"

The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.

" - }, - "Role":{ - "shape":"Role", - "documentation":"

The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.

" - }, - "AwsKmsKeyArn":{ - "shape":"KeyArn", - "documentation":"

The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.

If you use either s3 or s3-aws-kms as your Encryption:Mode, you don't need to provide a key with your job because a default key, known as an AWS-KMS key, is created for you automatically. You need to provide an AWS-KMS key only if you want to use a non-default AWS-KMS key, or if you are using an Encryption:Mode of aes-cbc-pkcs7, aes-ctr, or aes-gcm.

" - }, - "Notifications":{ - "shape":"Notifications", - "documentation":"

The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status.

To receive notifications, you must also subscribe to the new topic in the Amazon SNS console.

" - }, - "ContentConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

The optional ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists: which bucket to use, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

If you specify values for ContentConfig, you must also specify values for ThumbnailConfig.

If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

" - }, - "ThumbnailConfig":{ - "shape":"PipelineOutputConfig", - "documentation":"

The ThumbnailConfig object specifies several values, including the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files, which users you want to have access to the files, the type of access you want users to have, and the storage class that you want to assign to the files.

If you specify values for ContentConfig, you must also specify values for ThumbnailConfig even if you don't want to create thumbnails.

If you specify values for ContentConfig and ThumbnailConfig, omit the OutputBucket object.

" - } - }, - "documentation":"

The UpdatePipelineRequest structure.

" - }, - "UpdatePipelineResponse":{ - "type":"structure", - "members":{ - "Pipeline":{ - "shape":"Pipeline", - "documentation":"

The pipeline updated by this UpdatePipelineResponse call.

" - }, - "Warnings":{ - "shape":"Warnings", - "documentation":"

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

" - } - }, - "documentation":"

When you update a pipeline, Elastic Transcoder returns the values that you specified in the request.

" - }, - "UpdatePipelineStatusRequest":{ - "type":"structure", - "required":[ - "Id", - "Status" - ], - "members":{ - "Id":{ - "shape":"Id", - "documentation":"

The identifier of the pipeline to update.

", - "location":"uri", - "locationName":"Id" - }, - "Status":{ - "shape":"PipelineStatus", - "documentation":"

The desired status of the pipeline:

" - } - }, - "documentation":"

The UpdatePipelineStatusRequest structure.

" - }, - "UpdatePipelineStatusResponse":{ - "type":"structure", - "members":{ - "Pipeline":{ - "shape":"Pipeline", - "documentation":"

A section of the response body that provides information about the pipeline.

" - } - }, - "documentation":"

When you update status for a pipeline, Elastic Transcoder returns the values that you specified in the request.

" - }, - "UserMetadata":{ - "type":"map", - "key":{"shape":"String"}, - "value":{"shape":"String"} - }, - "ValidationException":{ - "type":"structure", - "members":{}, - "documentation":"

One or more required parameter values were not provided in the request.

", - "error":{"httpStatusCode":400}, - "exception":true - }, - "VerticalAlign":{ - "type":"string", - "pattern":"(^Top$)|(^Bottom$)|(^Center$)" - }, - "VideoBitRate":{ - "type":"string", - "pattern":"(^\\d{2,5}$)|(^auto$)" - }, - "VideoCodec":{ - "type":"string", - "pattern":"(^H\\.264$)|(^vp8$)|(^vp9$)|(^mpeg2$)|(^gif$)" - }, - "VideoParameters":{ - "type":"structure", - "members":{ - "Codec":{ - "shape":"VideoCodec", - "documentation":"

The video codec for the output file. Valid values include gif, H.264, mpeg2, vp8, and vp9. You can only specify vp8 and vp9 when the container type is webm, gif when the container type is gif, and mpeg2 when the container type is mpg.

" - }, - "CodecOptions":{ - "shape":"CodecOptions", - "documentation":"

Profile (H.264/VP8/VP9 Only)

The H.264 profile that you want to use for the output file. Elastic Transcoder supports the following profiles:

Level (H.264 Only)

The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels:

1, 1b, 1.1, 1.2, 1.3, 2, 2.1, 2.2, 3, 3.1, 3.2, 4, 4.1

MaxReferenceFrames (H.264 Only)

Applicable only when the value of Video:Codec is H.264. The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16, but we recommend that you not use a value greater than the following:

Min(Floor(Maximum decoded picture buffer in macroblocks * 256 / (Width in pixels * Height in pixels)), 16)

where Width in pixels and Height in pixels represent either MaxWidth and MaxHeight, or Resolution. Maximum decoded picture buffer in macroblocks depends on the value of the Level object. See the list below. (A macroblock is a block of pixels measuring 16x16.)

MaxBitRate (Optional, H.264/MPEG2/VP8/VP9 only)

The maximum number of bits per second in a video buffer; the size of the buffer is specified by BufferSize. Specify a value between 16 and 62,500. You can reduce the bandwidth required to stream a video by reducing the maximum bit rate, but this also reduces the quality of the video.

BufferSize (Optional, H.264/MPEG2/VP8/VP9 only)

The maximum number of bits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using FMP4 or MPEG-TS for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate.

InterlacedMode (Optional, H.264/MPEG2 Only)

The interlace mode for the output video.

Interlaced video is used to double the perceived frame rate for a video by interlacing two fields (one field on every other line, the other field on the other lines) so that the human eye registers multiple pictures per frame. Interlacing reduces the bandwidth required for transmitting a video, but can result in blurred images and flickering.

Valid values include Progressive (no interlacing, top to bottom), TopFirst (top field first), BottomFirst (bottom field first), and Auto.

If InterlaceMode is not specified, Elastic Transcoder uses Progressive for the output. If Auto is specified, Elastic Transcoder interlaces the output.

ColorSpaceConversionMode (Optional, H.264/MPEG2 Only)

The color space conversion Elastic Transcoder applies to the output video. Color spaces are the algorithms used by the computer to store information about how to render color. Bt.601 is the standard for standard definition video, while Bt.709 is the standard for high definition video.

Valid values include None, Bt709toBt601, Bt601toBt709, and Auto.

If you chose Auto for ColorSpaceConversionMode and your output is interlaced, your frame rate is one of 23.97, 24, 25, 29.97, 50, or 60, your SegmentDuration is null, and you are using one of the resolution changes from the list below, Elastic Transcoder applies the following color space conversions:

Elastic Transcoder may change the behavior of the ColorspaceConversionMode Auto mode in the future. All outputs in a playlist must use the same ColorSpaceConversionMode.

If you do not specify a ColorSpaceConversionMode, Elastic Transcoder does not change the color space of a file. If you are unsure what ColorSpaceConversionMode was applied to your output file, you can check the AppliedColorSpaceConversion parameter included in your job response. If your job does not have an AppliedColorSpaceConversion in its response, no ColorSpaceConversionMode was applied.

ChromaSubsampling

The sampling pattern for the chroma (color) channels of the output video. Valid values include yuv420p and yuv422p.

yuv420p samples the chroma information of every other horizontal and every other vertical line, yuv422p samples the color information of every horizontal line and every other vertical line.

LoopCount (Gif Only)

The number of times you want the output gif to loop. Valid values include Infinite and integers between 0 and 100, inclusive.

" - }, - "KeyframesMaxDist":{ - "shape":"KeyframesMaxDist", - "documentation":"

Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

The maximum number of frames between key frames. Key frames are fully encoded frames; the frames between key frames are encoded based, in part, on the content of the key frames. The value is an integer formatted as a string; valid values are between 1 (every frame is a key frame) and 100000, inclusive. A higher value results in higher compression but may also discernibly decrease video quality.

For Smooth outputs, the FrameRate must have a constant ratio to the KeyframesMaxDist. This allows Smooth playlists to switch between different quality levels while the file is being played.

For example, an input file can have a FrameRate of 30 with a KeyframesMaxDist of 90. The output file then needs to have a ratio of 1:3. Valid outputs would have FrameRate of 30, 25, and 10, and KeyframesMaxDist of 90, 75, and 30, respectively.

Alternately, this can be achieved by setting FrameRate to auto and having the same values for MaxFrameRate and KeyframesMaxDist.

" - }, - "FixedGOP":{ - "shape":"FixedGOP", - "documentation":"

Applicable only when the value of Video:Codec is one of H.264, MPEG2, or VP8.

Whether to use a fixed value for FixedGOP. Valid values are true and false:

FixedGOP must be set to true for fmp4 containers.

" - }, - "BitRate":{ - "shape":"VideoBitRate", - "documentation":"

The bit rate of the video stream in the output file, in kilobits/second. Valid values depend on the values of Level and Profile. If you specify auto, Elastic Transcoder uses the detected bit rate of the input source. If you specify a value other than auto, we recommend that you specify a value less than or equal to the maximum H.264-compliant value listed for your level and profile:

Level - Maximum video bit rate in kilobits/second (baseline and main Profile) : maximum video bit rate in kilobits/second (high Profile)

" - }, - "FrameRate":{ - "shape":"FrameRate", - "documentation":"

The frames per second for the video stream in the output file. Valid values include:

auto, 10, 15, 23.97, 24, 25, 29.97, 30, 60

If you specify auto, Elastic Transcoder uses the detected frame rate of the input source. If you specify a frame rate, we recommend that you perform the following calculation:

Frame rate = maximum recommended decoding speed in luma samples/second / (width in pixels * height in pixels)

where:

The maximum recommended decoding speed in Luma samples/second for each level is described in the following list (Level - Decoding speed):

" - }, - "MaxFrameRate":{ - "shape":"MaxFrameRate", - "documentation":"

If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video. Specify the maximum frame rate that you want Elastic Transcoder to use when the frame rate of the input video is greater than the desired maximum frame rate of the output video. Valid values include: 10, 15, 23.97, 24, 25, 29.97, 30, 60.

" - }, - "Resolution":{ - "shape":"Resolution", - "documentation":"

To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

The width and height of the video in the output file, in pixels. Valid values are auto and width x height:

Note the following about specifying the width and height:

" - }, - "AspectRatio":{ - "shape":"AspectRatio", - "documentation":"

To better control resolution and aspect ratio of output videos, we recommend that you use the values MaxWidth, MaxHeight, SizingPolicy, PaddingPolicy, and DisplayAspectRatio instead of Resolution and AspectRatio. The two groups of settings are mutually exclusive. Do not use them together.

The display aspect ratio of the video in the output file. Valid values include:

auto, 1:1, 4:3, 3:2, 16:9

If you specify auto, Elastic Transcoder tries to preserve the aspect ratio of the input file.

If you specify an aspect ratio for the output file that differs from aspect ratio of the input file, Elastic Transcoder adds pillarboxing (black bars on the sides) or letterboxing (black bars on the top and bottom) to maintain the aspect ratio of the active region of the video.

" - }, - "MaxWidth":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum width of the output video in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096.

" - }, - "MaxHeight":{ - "shape":"DigitsOrAuto", - "documentation":"

The maximum height of the output video in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072.

" - }, - "DisplayAspectRatio":{ - "shape":"AspectRatio", - "documentation":"

The value that Elastic Transcoder adds to the metadata in the output file.

" - }, - "SizingPolicy":{ - "shape":"SizingPolicy", - "documentation":"

Specify one of the following values to control scaling of the output video:

" - }, - "PaddingPolicy":{ - "shape":"PaddingPolicy", - "documentation":"

When you set PaddingPolicy to Pad, Elastic Transcoder may add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for MaxWidth and MaxHeight.

" - }, - "Watermarks":{ - "shape":"PresetWatermarks", - "documentation":"

Settings for the size, location, and opacity of graphics that you want Elastic Transcoder to overlay over videos that are transcoded using this preset. You can specify settings for up to four watermarks. Watermarks appear in the specified size and location, and with the specified opacity for the duration of the transcoded video.

Watermarks can be in .png or .jpg format. If you want to display a watermark that is not rectangular, use the .png format, which supports transparency.

When you create a job that uses this preset, you specify the .png or .jpg graphics that you want Elastic Transcoder to include in the transcoded videos. You can specify fewer graphics in the job than you specify watermark settings in the preset, which allows you to use the same preset for up to four watermarks that have different dimensions.

" - } - }, - "documentation":"

The VideoParameters structure.

" - }, - "Warning":{ - "type":"structure", - "members":{ - "Code":{ - "shape":"String", - "documentation":"

The code of the cross-regional warning.

" - }, - "Message":{ - "shape":"String", - "documentation":"

The message explaining what resources are in a different region from the pipeline.

AWS KMS keys must be in the same region as the pipeline.

" - } - }, - "documentation":"

Elastic Transcoder returns a warning if the resources used by your pipeline are not in the same region as the pipeline.

Using resources in the same region, such as your Amazon S3 buckets, Amazon SNS notification topics, and AWS KMS key, reduces processing time and prevents cross-regional charges.

" - }, - "Warnings":{ - "type":"list", - "member":{"shape":"Warning"} - }, - "WatermarkKey":{ - "type":"string", - "max":1024, - "min":1, - "pattern":"(^.{1,1020}.jpg$)|(^.{1,1019}.jpeg$)|(^.{1,1020}.png$)" - }, - "WatermarkSizingPolicy":{ - "type":"string", - "pattern":"(^Fit$)|(^Stretch$)|(^ShrinkToFit$)" - }, - "ZeroTo255String":{ - "type":"string", - "max":255, - "min":0 - }, - "ZeroTo512String":{ - "type":"string", - "max":512, - "min":0 - } - }, - "documentation":"AWS Elastic Transcoder Service

The AWS Elastic Transcoder Service.

" -} diff --git a/awscli/botocore/data/elastictranscoder/2012-09-25/waiters-2.json b/awscli/botocore/data/elastictranscoder/2012-09-25/waiters-2.json deleted file mode 100644 index 55c362807c2b..000000000000 --- a/awscli/botocore/data/elastictranscoder/2012-09-25/waiters-2.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "version": 2, - "waiters": { - "JobComplete": { - "delay": 30, - "operation": "ReadJob", - "maxAttempts": 120, - "acceptors": [ - { - "expected": "Complete", - "matcher": "path", - "state": "success", - "argument": "Job.Status" - }, - { - "expected": "Canceled", - "matcher": "path", - "state": "failure", - "argument": "Job.Status" - }, - { - "expected": "Error", - "matcher": "path", - "state": "failure", - "argument": "Job.Status" - } - ] - } - } -} diff --git a/awscli/botocore/data/emr-serverless/2021-07-13/service-2.json b/awscli/botocore/data/emr-serverless/2021-07-13/service-2.json index d6a472ed4cbc..25e429409ceb 100644 --- a/awscli/botocore/data/emr-serverless/2021-07-13/service-2.json +++ b/awscli/botocore/data/emr-serverless/2021-07-13/service-2.json @@ -374,6 +374,10 @@ "documentation":"

The Configuration specifications of an application. Each configuration consists of a classification and properties. You use this parameter when creating or updating an application. To see the runtimeConfiguration object of an application, run the GetApplication API operation.

" }, "monitoringConfiguration":{"shape":"MonitoringConfiguration"}, + "diskEncryptionConfiguration":{ + "shape":"DiskEncryptionConfiguration", + "documentation":"

The configuration object that allows encrypting local disks.

" + }, "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"

The interactive configuration object that enables the interactive use cases for an application.

" @@ -385,6 +389,10 @@ "identityCenterConfiguration":{ "shape":"IdentityCenterConfiguration", "documentation":"

The IAM Identity Center configuration applied to enable trusted identity propagation.

" + }, + "jobLevelCostAllocationConfiguration":{ + "shape":"JobLevelCostAllocationConfiguration", + "documentation":"

The configuration object that enables job level cost allocation.

" } }, "documentation":"

Information about an application. Amazon EMR Serverless uses applications to run jobs.

" @@ -648,6 +656,10 @@ "monitoringConfiguration":{ "shape":"MonitoringConfiguration", "documentation":"

The override configurations for monitoring.

" + }, + "diskEncryptionConfiguration":{ + "shape":"DiskEncryptionConfiguration", + "documentation":"

The override configuration to encrypt local disks.

" } }, "documentation":"

A configuration specification to be used to override existing configurations.

" @@ -752,6 +764,10 @@ "shape":"MonitoringConfiguration", "documentation":"

The configuration setting for monitoring.

" }, + "diskEncryptionConfiguration":{ + "shape":"DiskEncryptionConfiguration", + "documentation":"

The configuration object that allows encrypting local disks.

" + }, "interactiveConfiguration":{ "shape":"InteractiveConfiguration", "documentation":"

The interactive configuration object that enables the interactive use cases to use when running an application.

" @@ -763,6 +779,10 @@ "identityCenterConfiguration":{ "shape":"IdentityCenterConfigurationInput", "documentation":"

The IAM Identity Center Configuration accepts the Identity Center instance parameter required to enable trusted identity propagation. This configuration allows identity propagation between integrated services and the Identity Center instance.

" + }, + "jobLevelCostAllocationConfiguration":{ + "shape":"JobLevelCostAllocationConfiguration", + "documentation":"

The configuration object that enables job level cost allocation.

" } } }, @@ -804,6 +824,20 @@ "type":"structure", "members":{} }, + "DiskEncryptionConfiguration":{ + "type":"structure", + "members":{ + "encryptionContext":{ + "shape":"EncryptionContext", + "documentation":"

Specifies the optional encryption context that will be used when encrypting the data. An encryption context is a collection of non-secret key-value pairs that represent additional authenticated data.

" + }, + "encryptionKeyArn":{ + "shape":"EncryptionKeyArn", + "documentation":"

The KMS key ARN to encrypt local disks.

" + } + }, + "documentation":"

The configuration object that allows encrypting local disks.

" + }, "DiskSize":{ "type":"string", "max":15, @@ -823,6 +857,23 @@ "max":1000000, "min":0 }, + "EncryptionContext":{ + "type":"map", + "key":{"shape":"EncryptionContextKey"}, + "value":{"shape":"EncryptionContextValue"}, + "max":8, + "min":0 + }, + "EncryptionContextKey":{ + "type":"string", + "max":128, + "min":1 + }, + "EncryptionContextValue":{ + "type":"string", + "max":384, + "min":1 + }, "EncryptionKeyArn":{ "type":"string", "max":2048, @@ -1142,6 +1193,16 @@ "documentation":"

The driver that the job runs on.

", "union":true }, + "JobLevelCostAllocationConfiguration":{ + "type":"structure", + "members":{ + "enabled":{ + "shape":"Boolean", + "documentation":"

Enables job level cost allocation for the application.

" + } + }, + "documentation":"

The configuration object that enables job level cost allocation.

" + }, "JobRun":{ "type":"structure", "required":[ @@ -2274,6 +2335,10 @@ "shape":"MonitoringConfiguration", "documentation":"

The configuration setting for monitoring.

" }, + "diskEncryptionConfiguration":{ + "shape":"DiskEncryptionConfiguration", + "documentation":"

The configuration object that allows encrypting local disks.

" + }, "schedulerConfiguration":{ "shape":"SchedulerConfiguration", "documentation":"

The scheduler configuration for batch and streaming jobs running on this application. Supported with release labels emr-7.0.0 and above.

" @@ -2281,6 +2346,10 @@ "identityCenterConfiguration":{ "shape":"IdentityCenterConfigurationInput", "documentation":"

Specifies the IAM Identity Center configuration used to enable or disable trusted identity propagation. When provided, this configuration determines how the application interacts with IAM Identity Center for user authentication and access control.

" + }, + "jobLevelCostAllocationConfiguration":{ + "shape":"JobLevelCostAllocationConfiguration", + "documentation":"

The configuration object that enables job level cost allocation.

" } } }, diff --git a/awscli/botocore/data/endpoints.json b/awscli/botocore/data/endpoints.json index 1ea460b840de..aab9edffbf3d 100644 --- a/awscli/botocore/data/endpoints.json +++ b/awscli/botocore/data/endpoints.json @@ -1578,6 +1578,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -4106,6 +4107,7 @@ }, "cases" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-southeast-1" : { }, @@ -4960,12 +4962,14 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-5" : { }, "ca-central-1" : { "variants" : [ { "hostname" : "cloudhsmv2.ca-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { "variants" : [ { "hostname" : "cloudhsmv2.eu-central-1.api.aws", @@ -5027,6 +5031,7 @@ "tags" : [ "dualstack" ] } ] }, + "mx-central-1" : { }, "sa-east-1" : { "variants" : [ { "hostname" : "cloudhsmv2.sa-east-1.api.aws", @@ -9254,6 +9259,7 @@ "tags" : [ "dualstack" ] } ] }, + "ap-south-2" : { }, "ap-southeast-1" : { "variants" : [ { "hostname" : "elasticbeanstalk.ap-southeast-1.api.aws", @@ -9272,7 +9278,9 @@ "tags" : [ "dualstack" ] } ] }, + "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -9280,12 +9288,14 @@ "tags" : [ "dualstack" ] } ] }, + "ca-west-1" : { }, "eu-central-1" : { "variants" : [ { "hostname" : "elasticbeanstalk.eu-central-1.api.aws", "tags" : [ "dualstack" ] } ] }, + "eu-central-2" : { }, "eu-north-1" : { "variants" : [ { "hostname" : "elasticbeanstalk.eu-north-1.api.aws", @@ -10045,18 +10055,6 @@ } } }, - "elastictranscoder" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "eu-west-1" : { }, - "us-east-1" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "email" : { "endpoints" : { "af-south-1" : { }, @@ -13905,6 +13903,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -14319,6 +14318,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -17716,6 +17716,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-global" }, + "nova-act" : { + "endpoints" : { + "us-east-1" : { } + } + }, "oam" : { "endpoints" : { "af-south-1" : { }, @@ -18181,6 +18186,11 @@ } } }, + "partnercentral-channel" : { + "endpoints" : { + "us-east-1" : { } + } + }, "personalize" : { "endpoints" : { "ap-northeast-1" : { }, @@ -18835,6 +18845,7 @@ }, "hostname" : "portal.sso.ap-east-1.amazonaws.com" }, + "ap-east-2" : { }, "ap-northeast-1" : { "credentialScope" : { "region" : "ap-northeast-1" @@ -20596,6 +20607,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -25838,6 +25850,7 @@ "endpoints" : { "af-south-1" : { }, "ap-east-1" : { }, + "ap-east-2" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, "ap-northeast-3" : { }, @@ -27211,6 +27224,7 @@ "ap-southeast-3" : { }, "ap-southeast-4" : { }, "ap-southeast-5" : { }, + "ap-southeast-6" : { }, "ap-southeast-7" : { }, "ca-central-1" : { "variants" : [ { @@ -28371,6 +28385,7 @@ "tags" : [ "fips" ] } ] }, + "ap-southeast-6" : { }, "ap-southeast-7" : { "credentialScope" : { "region" : "ap-southeast-7" @@ -33804,6 +33819,12 @@ } } }, + "kafkaconnect" : { + "endpoints" : { + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "kendra" : { "endpoints" : { "fips-us-gov-west-1" : { @@ -37854,6 +37875,11 @@ } }, "services" : { + "agreement-marketplace" : { + "endpoints" : { + "us-isob-east-1" : { } + } + }, "api.ecr" : { "endpoints" : { "us-isob-east-1" : { @@ -39772,6 +39798,11 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-iso-f-global" }, + "identitystore" : { + "endpoints" : { + "us-isof-east-1" : { } + } + }, "kinesis" : { "endpoints" : { "us-isof-east-1" : { }, @@ -39943,6 +39974,12 @@ "us-isof-south-1" : { } } }, + "rolesanywhere" : { + "endpoints" : { + "us-isof-east-1" : { }, + "us-isof-south-1" : { } + } + }, "route53" : { "endpoints" : { "aws-iso-f-global" : { @@ -40189,7 +40226,490 @@ "description" : "EU (Germany)" } }, - "services" : { } + "services" : { + "access-analyzer" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "acm" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "acm-pca" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "agreement-marketplace" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "api.ecr" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "api.pricing" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "api.sagemaker" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "appconfig" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "appconfigdata" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "application-autoscaling" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "arc-zonal-shift" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "athena" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "autoscaling" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "backup" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "batch" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "bedrock" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "cloudcontrolapi" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "cloudformation" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "cloudtrail" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "codedeploy" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "compute-optimizer" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "config" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "cost-optimization-hub" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "datasync" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "datazone" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "directconnect" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "dlm" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "dms" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ds" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "dynamodb" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ebs" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ec2" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "eks" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "eks-auth" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "elasticache" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "elasticfilesystem" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "elasticloadbalancing" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "elasticmapreduce" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "email" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "entitlement.marketplace" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "es" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "events" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "firehose" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "gameliftstreams" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "glue" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "guardduty" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "health" : { + "endpoints" : { + "eusc-de-east-1" : { + "deprecated" : true + } + } + }, + "identitystore" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "internetmonitor" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "kafka" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "kendra-ranking" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "kinesis" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "kinesisanalytics" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "kms" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "lakeformation" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "lambda" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "license-manager" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "logs" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "metering.marketplace" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "metrics.sagemaker" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "monitoring" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "notifications" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "oam" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "pi" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "qbusiness" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ram" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "rbin" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "rds" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "redshift" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "resource-groups" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "rolesanywhere" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "route53resolver" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "s3" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "s3-control" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "scheduler" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "secretsmanager" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "securityhub" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "servicediscovery" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "servicequotas" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "signer" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "sms-voice" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "sns" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "sqs" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "ssm" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "states" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "storagegateway" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "streams.dynamodb" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "sts" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "swf" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "synthetics" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "tagging" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "transfer" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "trustedadvisor" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "wafv2" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + }, + "xray" : { + "endpoints" : { + "eusc-de-east-1" : { } + } + } + } } ], "version" : 3 } \ No newline at end of file diff --git a/awscli/botocore/data/entityresolution/2018-05-10/service-2.json b/awscli/botocore/data/entityresolution/2018-05-10/service-2.json index 2f2e463e11cb..ff4a6bf89fd0 100644 --- a/awscli/botocore/data/entityresolution/2018-05-10/service-2.json +++ b/awscli/botocore/data/entityresolution/2018-05-10/service-2.json @@ -256,7 +256,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the status, metrics, and errors (if there are any) that are associated with a job.

" + "documentation":"

Returns the status, metrics, and errors (if there are any) that are associated with a job.

", + "readonly":true }, "GetIdMappingWorkflow":{ "name":"GetIdMappingWorkflow", @@ -274,7 +275,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the IdMappingWorkflow with a given name, if it exists.

" + "documentation":"

Returns the IdMappingWorkflow with a given name, if it exists.

", + "readonly":true }, "GetIdNamespace":{ "name":"GetIdNamespace", @@ -292,7 +294,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the IdNamespace with a given name, if it exists.

" + "documentation":"

Returns the IdNamespace with a given name, if it exists.

", + "readonly":true }, "GetMatchId":{ "name":"GetMatchId", @@ -310,7 +313,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the corresponding Match ID of a customer record if the record has been processed in a rule-based matching workflow.

You can call this API as a dry run of an incremental load on the rule-based matching workflow.

" + "documentation":"

Returns the corresponding Match ID of a customer record if the record has been processed in a rule-based matching workflow.

You can call this API as a dry run of an incremental load on the rule-based matching workflow.

", + "readonly":true }, "GetMatchingJob":{ "name":"GetMatchingJob", @@ -328,7 +332,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the status, metrics, and errors (if there are any) that are associated with a job.

" + "documentation":"

Returns the status, metrics, and errors (if there are any) that are associated with a job.

", + "readonly":true }, "GetMatchingWorkflow":{ "name":"GetMatchingWorkflow", @@ -346,7 +351,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the MatchingWorkflow with a given name, if it exists.

" + "documentation":"

Returns the MatchingWorkflow with a given name, if it exists.

", + "readonly":true }, "GetPolicy":{ "name":"GetPolicy", @@ -364,7 +370,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the resource-based policy.

" + "documentation":"

Returns the resource-based policy.

", + "readonly":true }, "GetProviderService":{ "name":"GetProviderService", @@ -382,7 +389,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the ProviderService of a given name.

" + "documentation":"

Returns the ProviderService of a given name.

", + "readonly":true }, "GetSchemaMapping":{ "name":"GetSchemaMapping", @@ -400,7 +408,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the SchemaMapping of a given name.

" + "documentation":"

Returns the SchemaMapping of a given name.

", + "readonly":true }, "ListIdMappingJobs":{ "name":"ListIdMappingJobs", @@ -418,7 +427,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all ID mapping jobs for a given workflow.

" + "documentation":"

Lists all ID mapping jobs for a given workflow.

", + "readonly":true }, "ListIdMappingWorkflows":{ "name":"ListIdMappingWorkflows", @@ -435,7 +445,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of all the IdMappingWorkflows that have been created for an Amazon Web Services account.

" + "documentation":"

Returns a list of all the IdMappingWorkflows that have been created for an Amazon Web Services account.

", + "readonly":true }, "ListIdNamespaces":{ "name":"ListIdNamespaces", @@ -452,7 +463,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of all ID namespaces.

" + "documentation":"

Returns a list of all ID namespaces.

", + "readonly":true }, "ListMatchingJobs":{ "name":"ListMatchingJobs", @@ -470,7 +482,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all jobs for a given workflow.

" + "documentation":"

Lists all jobs for a given workflow.

", + "readonly":true }, "ListMatchingWorkflows":{ "name":"ListMatchingWorkflows", @@ -487,7 +500,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of all the MatchingWorkflows that have been created for an Amazon Web Services account.

" + "documentation":"

Returns a list of all the MatchingWorkflows that have been created for an Amazon Web Services account.

", + "readonly":true }, "ListProviderServices":{ "name":"ListProviderServices", @@ -504,7 +518,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of all the ProviderServices that are available in this Amazon Web Services Region.

" + "documentation":"

Returns a list of all the ProviderServices that are available in this Amazon Web Services Region.

", + "readonly":true }, "ListSchemaMappings":{ "name":"ListSchemaMappings", @@ -521,7 +536,8 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of all the SchemaMappings that have been created for an Amazon Web Services account.

" + "documentation":"

Returns a list of all the SchemaMappings that have been created for an Amazon Web Services account.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -537,7 +553,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Displays the tags associated with an Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

" + "documentation":"

Displays the tags associated with an Entity Resolution resource. In Entity Resolution, SchemaMapping, and MatchingWorkflow can be tagged.

", + "readonly":true }, "PutPolicy":{ "name":"PutPolicy", @@ -1193,6 +1210,32 @@ } } }, + "CustomerProfilesDomainArn":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):profile:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(domains/[a-zA-Z_0-9-]{1,255})" + }, + "CustomerProfilesIntegrationConfig":{ + "type":"structure", + "required":[ + "domainArn", + "objectTypeArn" + ], + "members":{ + "domainArn":{ + "shape":"CustomerProfilesDomainArn", + "documentation":"

The Amazon Resource Name (ARN) of the Customer Profiles domain where the matched output will be sent.

" + }, + "objectTypeArn":{ + "shape":"CustomerProfilesObjectTypeArn", + "documentation":"

The Amazon Resource Name (ARN) of the Customer Profiles object type that defines the structure for the matched customer data.

" + } + }, + "documentation":"

Specifies the configuration for integrating with Customer Profiles. This configuration enables Entity Resolution to send matched output directly to Customer Profiles instead of Amazon S3, creating a unified customer view by automatically updating customer profiles based on match clusters.

" + }, + "CustomerProfilesObjectTypeArn":{ + "type":"string", + "pattern":"arn:(aws|aws-us-gov|aws-cn):profile:[a-z]{2}-[a-z]{1,10}-[0-9]:[0-9]{12}:(domains/[a-zA-Z_0-9-]{1,255}/object-types/[a-zA-Z_0-9-]{1,255})" + }, "DeleteIdMappingWorkflowInput":{ "type":"structure", "required":["workflowName"], @@ -2263,13 +2306,13 @@ "type":"structure", "required":["outputS3Path"], "members":{ - "outputS3Path":{ - "shape":"S3Path", - "documentation":"

The S3 path to which Entity Resolution will write the output table.

" - }, "KMSArn":{ "shape":"KMSArn", "documentation":"

Customer KMS ARN for encryption at rest. If not provided, system will use an Entity Resolution managed KMS key.

" + }, + "outputS3Path":{ + "shape":"S3Path", + "documentation":"

The S3 path to which Entity Resolution will write the output table.

" } }, "documentation":"

The output source for the ID mapping workflow.

" @@ -3056,6 +3099,12 @@ "min":1, "pattern":"[a-zA-Z_0-9-=+/]*" }, + "OptionalS3Path":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"$|^s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + }, "OutputAttribute":{ "type":"structure", "required":["name"], @@ -3073,19 +3122,16 @@ }, "OutputSource":{ "type":"structure", - "required":[ - "outputS3Path", - "output" - ], + "required":["output"], "members":{ - "outputS3Path":{ - "shape":"S3Path", - "documentation":"

The S3 path to which Entity Resolution will write the output table.

" - }, "KMSArn":{ "shape":"KMSArn", "documentation":"

Customer KMS ARN for encryption at rest. If not provided, system will use an Entity Resolution managed KMS key.

" }, + "outputS3Path":{ + "shape":"OptionalS3Path", + "documentation":"

The S3 path to which Entity Resolution will write the output table.

" + }, "output":{ "shape":"OutputSourceOutputList", "documentation":"

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" @@ -3093,6 +3139,10 @@ "applyNormalization":{ "shape":"Boolean", "documentation":"

Normalizes the attributes defined in the schema in the input data. For example, if an attribute has an AttributeType of PHONE_NUMBER, and the data in the input table is in a format of 1234567890, Entity Resolution will normalize this field in the output to (123)-456-7890.

" + }, + "customerProfilesIntegrationConfig":{ + "shape":"CustomerProfilesIntegrationConfig", + "documentation":"

Specifies the Customer Profiles integration configuration for sending matched output directly to Customer Profiles. When configured, Entity Resolution automatically creates and updates customer profiles based on match clusters, eliminating the need for manual Amazon S3 integration setup.

" } }, "documentation":"

A list of OutputAttribute objects, each of which have the fields Name and Hashed. Each of these objects selects a column to be included in the output table, and whether the values of the column should be hashed.

" diff --git a/awscli/botocore/data/fsx/2018-03-01/service-2.json b/awscli/botocore/data/fsx/2018-03-01/service-2.json index 1ffdedb8031f..c2b0b864e34d 100644 --- a/awscli/botocore/data/fsx/2018-03-01/service-2.json +++ b/awscli/botocore/data/fsx/2018-03-01/service-2.json @@ -1507,6 +1507,24 @@ }, "documentation":"

Used to specify the configuration options for an FSx for ONTAP volume's storage aggregate or aggregates.

" }, + "CreateAndAttachS3AccessPointOntapConfiguration":{ + "type":"structure", + "required":[ + "VolumeId", + "FileSystemIdentity" + ], + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

The ID of the FSx for ONTAP volume to which you want the S3 access point attached.

" + }, + "FileSystemIdentity":{ + "shape":"OntapFileSystemIdentity", + "documentation":"

Specifies the file system user identity to use for authorizing file read and write requests that are made using this S3 access point.

" + } + }, + "documentation":"

Specifies the FSx for ONTAP volume that the S3 access point will be attached to, and the file system user identity.

" + }, "CreateAndAttachS3AccessPointOpenZFSConfiguration":{ "type":"structure", "required":[ @@ -1548,6 +1566,7 @@ "shape":"CreateAndAttachS3AccessPointOpenZFSConfiguration", "documentation":"

Specifies the configuration to use when creating and attaching an S3 access point to an FSx for OpenZFS volume.

" }, + "OntapConfiguration":{"shape":"CreateAndAttachS3AccessPointOntapConfiguration"}, "S3AccessPoint":{ "shape":"CreateAndAttachS3AccessPointS3Configuration", "documentation":"

Specifies the virtual private cloud (VPC) configuration if you're creating an access point that is restricted to a VPC. For more information, see Creating access points restricted to a virtual private cloud.

" @@ -4771,6 +4790,49 @@ }, "documentation":"

Configuration for the FSx for NetApp ONTAP file system.

" }, + "OntapFileSystemIdentity":{ + "type":"structure", + "required":["Type"], + "members":{ + "Type":{ + "shape":"OntapFileSystemUserType", + "documentation":"

Specifies the FSx for ONTAP user identity type. Valid values are UNIX and WINDOWS.

" + }, + "UnixUser":{ + "shape":"OntapUnixFileSystemUser", + "documentation":"

Specifies the UNIX user identity for file system operations.

" + }, + "WindowsUser":{ + "shape":"OntapWindowsFileSystemUser", + "documentation":"

Specifies the Windows user identity for file system operations.

" + } + }, + "documentation":"

Specifies the file system user identity that will be used for authorizing all file access requests that are made using the S3 access point. The identity can be either a UNIX user or a Windows user.

" + }, + "OntapFileSystemUserName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,256}$" + }, + "OntapFileSystemUserType":{ + "type":"string", + "enum":[ + "UNIX", + "WINDOWS" + ] + }, + "OntapUnixFileSystemUser":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"OntapFileSystemUserName", + "documentation":"

The name of the UNIX user. The name can be up to 256 characters long.

" + } + }, + "documentation":"

The FSx for ONTAP UNIX file system user that is used for authorizing all file access requests that are made using the S3 access point.

" + }, "OntapVolumeConfiguration":{ "type":"structure", "members":{ @@ -4849,6 +4911,17 @@ "LS" ] }, + "OntapWindowsFileSystemUser":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"OntapFileSystemUserName", + "documentation":"

The name of the Windows user. The name can be up to 256 characters long and supports Active Directory users.

" + } + }, + "documentation":"

The FSx for ONTAP Windows file system user that is used for authorizing all file access requests that are made using the S3 access point.

" + }, "OpenZFSClientConfiguration":{ "type":"structure", "required":[ @@ -5470,6 +5543,10 @@ "shape":"S3AccessPointOpenZFSConfiguration", "documentation":"

The OpenZFSConfiguration of the S3 access point attachment.

" }, + "OntapConfiguration":{ + "shape":"S3AccessPointOntapConfiguration", + "documentation":"

The ONTAP configuration of the S3 access point attachment.

" + }, "S3AccessPoint":{ "shape":"S3AccessPoint", "documentation":"

The S3 access point configuration of the S3 access point attachment.

" @@ -5484,7 +5561,8 @@ "CREATING", "DELETING", "UPDATING", - "FAILED" + "FAILED", + "MISCONFIGURED" ] }, "S3AccessPointAttachmentName":{ @@ -5508,7 +5586,10 @@ }, "S3AccessPointAttachmentType":{ "type":"string", - "enum":["OPENZFS"] + "enum":[ + "OPENZFS", + "ONTAP" + ] }, "S3AccessPointAttachments":{ "type":"list", @@ -5553,6 +5634,20 @@ "member":{"shape":"S3AccessPointAttachmentsFilter"}, "max":2 }, + "S3AccessPointOntapConfiguration":{ + "type":"structure", + "members":{ + "VolumeId":{ + "shape":"VolumeId", + "documentation":"

The ID of the FSx for ONTAP volume that the S3 access point is attached to.

" + }, + "FileSystemIdentity":{ + "shape":"OntapFileSystemIdentity", + "documentation":"

The file system identity used to authorize file access requests made using the S3 access point.

" + } + }, + "documentation":"

Describes the FSx for ONTAP attachment configuration of an S3 access point attachment.

" + }, "S3AccessPointOpenZFSConfiguration":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/gameliftstreams/2018-05-10/service-2.json b/awscli/botocore/data/gameliftstreams/2018-05-10/service-2.json index ed2d51310258..9ffc00fdd92d 100644 --- a/awscli/botocore/data/gameliftstreams/2018-05-10/service-2.json +++ b/awscli/botocore/data/gameliftstreams/2018-05-10/service-2.json @@ -89,7 +89,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Stream groups manage how Amazon GameLift Streams allocates resources and handles concurrent streams, allowing you to effectively manage capacity and costs. Within a stream group, you specify an application to stream, streaming locations and their capacity, and the stream class you want to use when streaming applications to your end-users. A stream class defines the hardware configuration of the compute resources that Amazon GameLift Streams will use when streaming, such as the CPU, GPU, and memory.

Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

Values for capacity must be whole number multiples of the tenancy value of the stream group's stream class.

To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.

If the CreateStreamGroup request is successful, Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to ACTIVATING. It can take a few minutes for Amazon GameLift Streams to finish creating the stream group while it searches for unallocated compute resources and provisions them. When complete, the stream group status will be ACTIVE and you can start stream sessions by using StartStreamSession. To check the stream group's status, call GetStreamGroup.

Stream groups should be recreated every 3-4 weeks to pick up important service updates and fixes. Stream groups that are older than 180 days can no longer be updated with new application associations. Stream groups expire when they are 365 days old, at which point they can no longer stream sessions. The exact expiration date is indicated by the date value in the ExpiresAt field.

", + "documentation":"

Stream groups manage how Amazon GameLift Streams allocates resources and handles concurrent streams, allowing you to effectively manage capacity and costs. Within a stream group, you specify an application to stream, streaming locations and their capacity, and the stream class you want to use when streaming applications to your end-users. A stream class defines the hardware configuration of the compute resources that Amazon GameLift Streams will use when streaming, such as the CPU, GPU, and memory.

Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. The following capacity settings are available:

Values for capacity must be whole number multiples of the tenancy value of the stream group's stream class.

To adjust the capacity of any ACTIVE stream group, call UpdateStreamGroup.

If the CreateStreamGroup request is successful, Amazon GameLift Streams assigns a unique ID to the stream group resource and sets the status to ACTIVATING. It can take a few minutes for Amazon GameLift Streams to finish creating the stream group while it searches for unallocated compute resources and provisions them. When complete, the stream group status will be ACTIVE and you can start stream sessions by using StartStreamSession. To check the stream group's status, call GetStreamGroup.

Stream groups should be recreated every 3-4 weeks to pick up important service updates and fixes. Stream groups that are older than 180 days can no longer be updated with new application associations. Stream groups expire when they are 365 days old, at which point they can no longer stream sessions. The exact expiration date is indicated by the date value in the ExpiresAt field.

", "idempotent":true }, "CreateStreamSessionConnection":{ @@ -462,7 +462,7 @@ {"shape":"ValidationException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Updates the configuration settings for an Amazon GameLift Streams stream group resource. To update a stream group, it must be in ACTIVE status. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. There are two types of capacity, always-on and on-demand:

Values for capacity must be whole number multiples of the tenancy value of the stream group's stream class.

To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group. Expired stream groups cannot be updated.

" + "documentation":"

Updates the configuration settings for an Amazon GameLift Streams stream group resource. To update a stream group, it must be in ACTIVE status. You can change the description, the set of locations, and the requested capacity of a stream group per location. If you want to change the stream class, create a new stream group.

Stream capacity represents the number of concurrent streams that can be active at a time. You set stream capacity per location, per stream group. The following capacity settings are available:

Values for capacity must be whole number multiples of the tenancy value of the stream group's stream class.

To update a stream group, specify the stream group's Amazon Resource Name (ARN) and provide the new values. If the request is successful, Amazon GameLift Streams returns the complete updated metadata for the stream group. Expired stream groups cannot be updated.

" } }, "shapes":{ @@ -632,6 +632,10 @@ } } }, + "Boolean":{ + "type":"boolean", + "box":true + }, "CapacityValue":{ "type":"integer", "box":true, @@ -784,7 +788,7 @@ }, "StreamClass":{ "shape":"StreamClass", - "documentation":"

The target stream quality for sessions that are hosted in this stream group. Set a stream class that is appropriate to the type of content that you're streaming. Stream class determines the type of computing resources Amazon GameLift Streams uses and impacts the cost of streaming. The following options are available:

A stream class can be one of the following:

" + "documentation":"

The target stream quality for sessions that are hosted in this stream group. Set a stream class that is appropriate to the type of content that you're streaming. Stream class determines the type of computing resources Amazon GameLift Streams uses and impacts the cost of streaming. The following options are available:

A stream class can be one of the following:

" }, "DefaultApplicationIdentifier":{ "shape":"Identifier", @@ -827,7 +831,7 @@ }, "StreamClass":{ "shape":"StreamClass", - "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" + "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" }, "Id":{ "shape":"Id", @@ -839,7 +843,7 @@ }, "StatusReason":{ "shape":"StreamGroupStatusReason", - "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" + "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1185,7 +1189,7 @@ }, "StreamClass":{ "shape":"StreamClass", - "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" + "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" }, "Id":{ "shape":"Id", @@ -1197,7 +1201,7 @@ }, "StatusReason":{ "shape":"StreamGroupStatusReason", - "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" + "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" }, "LastUpdatedAt":{ "shape":"Timestamp", @@ -1297,6 +1301,10 @@ "shape":"EnvironmentVariables", "documentation":"

A set of options that you can use to control the stream session runtime environment, expressed as a set of key-value pairs. You can use this to configure the application or stream session details. You can also provide custom environment variables that Amazon GameLift Streams passes to your game client.

If you want to debug your application with environment variables, we recommend that you do so in a local environment outside of Amazon GameLift Streams. For more information, refer to the Compatibility Guidance in the troubleshooting section of the Developer Guide.

AdditionalEnvironmentVariables and AdditionalLaunchArgs have similar purposes. AdditionalEnvironmentVariables passes data using environment variables; while AdditionalLaunchArgs passes data using command-line arguments.

" }, + "PerformanceStatsConfiguration":{ + "shape":"PerformanceStatsConfiguration", + "documentation":"

The performance stats configuration for the stream session

" + }, "LogFileLocationUri":{ "shape":"FileLocationUri", "documentation":"

Access location for log files that your content generates during a stream session. These log files are uploaded to cloud storage location at the end of a stream session. The Amazon GameLift Streams application resource defines which log files to upload.

" @@ -1538,11 +1546,22 @@ }, "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session. Default is 1 (2 for high stream classes) when creating a stream group or adding a location.

" + "documentation":"

This setting, if non-zero, indicates minimum streaming capacity which is allocated to you and is never released back to the service. You pay for this base level of capacity at all times, whether used or idle.

" }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", - "documentation":"

The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). Default is 0 when creating a stream group or adding a location.

" + "documentation":"

This field is deprecated. Use MaximumCapacity instead. This parameter cannot be used with MaximumCapacity or TargetIdleCapacity in the same location configuration.

The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). Default is 0 when creating a stream group or adding a location.

", + "deprecated":true, + "deprecatedMessage":"This input field is deprecated in favor of explicit MaximumCapacity values.", + "deprecatedSince":"2025-12-17" + }, + "TargetIdleCapacity":{ + "shape":"TargetIdleCapacity", + "documentation":"

This indicates idle capacity which the service pre-allocates and holds for you in anticipation of future activity. This helps to insulate your users from capacity-allocation delays. You pay for capacity which is held in this intentional idle state.

" + }, + "MaximumCapacity":{ + "shape":"MaximumCapacity", + "documentation":"

This indicates the maximum capacity that the service can allocate for you. Newly created streams may take a few minutes to start. Capacity is released back to the service when idle. You pay for capacity that is allocated to you until it is released.

" } }, "documentation":"

Configuration settings that define a stream group's stream capacity for a location. When configuring a location for the first time, you must specify a numeric value for at least one of the two capacity types. To update the capacity for an existing stream group, call UpdateStreamGroup. To add a new location and specify its capacity, call AddStreamGroupLocations.

" @@ -1577,19 +1596,27 @@ }, "AlwaysOnCapacity":{ "shape":"AlwaysOnCapacity", - "documentation":"

The streaming capacity that is allocated and ready to handle stream requests without delay. You pay for this capacity whether it's in use or not. Best for quickest time from streaming request to streaming session. Default is 1 (2 for high stream classes) when creating a stream group or adding a location.

" + "documentation":"

This setting, if non-zero, indicates minimum streaming capacity which is allocated to you and is never released back to the service. You pay for this base level of capacity at all times, whether used or idle.

" }, "OnDemandCapacity":{ "shape":"OnDemandCapacity", "documentation":"

The streaming capacity that Amazon GameLift Streams can allocate in response to stream requests, and then de-allocate when the session has terminated. This offers a cost control measure at the expense of a greater startup time (typically under 5 minutes). Default is 0 when creating a stream group or adding a location.

" }, + "TargetIdleCapacity":{ + "shape":"TargetIdleCapacity", + "documentation":"

This indicates idle capacity which the service pre-allocates and holds for you in anticipation of future activity. This helps to insulate your users from capacity-allocation delays. You pay for capacity which is held in this intentional idle state.

" + }, + "MaximumCapacity":{ + "shape":"MaximumCapacity", + "documentation":"

This indicates the maximum capacity that the service can allocate for you. Newly created streams may take a few minutes to start. Capacity is released back to the service when idle. You pay for capacity that is allocated to you until it is released.

" + }, "RequestedCapacity":{ "shape":"CapacityValue", "documentation":"

This value is the always-on capacity that you most recently requested for a stream group. You request capacity separately for each location in a stream group. In response to an increase in requested capacity, Amazon GameLift Streams attempts to provision compute resources to make the stream group's allocated capacity meet requested capacity. When always-on capacity is decreased, it can take a few minutes to deprovision allocated capacity to match the requested capacity.

" }, "AllocatedCapacity":{ "shape":"CapacityValue", - "documentation":"

This value is the stream capacity that Amazon GameLift Streams has provisioned in a stream group that can respond immediately to stream requests. It includes resources that are currently streaming and resources that are idle and ready to respond to stream requests. You pay for this capacity whether it's in use or not. After making changes to capacity, it can take a few minutes for the allocated capacity count to reflect the change while compute resources are allocated or deallocated. Similarly, when allocated on-demand capacity is no longer needed, it can take a few minutes for Amazon GameLift Streams to spin down the allocated capacity.

" + "documentation":"

This value is the stream capacity that Amazon GameLift Streams has provisioned in a stream group that can respond immediately to stream requests. It includes resources that are currently streaming and resources that are idle and ready to respond to stream requests. When target-idle capacity is configured, the idle resources include the capacity buffer maintained beyond ongoing sessions. You pay for this capacity whether it's in use or not. After making changes to capacity, it can take a few minutes for the allocated capacity count to reflect the change while compute resources are allocated or deallocated. Similarly, when allocated on-demand capacity is no longer needed, it can take a few minutes for Amazon GameLift Streams to spin down the allocated capacity.

" }, "IdleCapacity":{ "shape":"CapacityValue", @@ -1614,6 +1641,11 @@ "max":100, "min":1 }, + "MaximumCapacity":{ + "type":"integer", + "box":true, + "min":0 + }, "NextToken":{"type":"string"}, "OnDemandCapacity":{ "type":"integer", @@ -1626,6 +1658,16 @@ "min":0, "pattern":"s3://.*(/|\\.zip|\\.ZIP)" }, + "PerformanceStatsConfiguration":{ + "type":"structure", + "members":{ + "SharedWithClient":{ + "shape":"Boolean", + "documentation":"

Performance stats for the session are streamed to the client when set to true. Defaults to false.

" + } + }, + "documentation":"

Configuration settings for sharing the stream session's performance stats with the client

" + }, "Protocol":{ "type":"string", "enum":["WebRTC"] @@ -1813,6 +1855,10 @@ "AdditionalEnvironmentVariables":{ "shape":"EnvironmentVariables", "documentation":"

A set of options that you can use to control the stream session runtime environment, expressed as a set of key-value pairs. You can use this to configure the application or stream session details. You can also provide custom environment variables that Amazon GameLift Streams passes to your game client.

If you want to debug your application with environment variables, we recommend that you do so in a local environment outside of Amazon GameLift Streams. For more information, refer to the Compatibility Guidance in the troubleshooting section of the Developer Guide.

AdditionalEnvironmentVariables and AdditionalLaunchArgs have similar purposes. AdditionalEnvironmentVariables passes data using environment variables; while AdditionalLaunchArgs passes data using command-line arguments.

" + }, + "PerformanceStatsConfiguration":{ + "shape":"PerformanceStatsConfiguration", + "documentation":"

Configuration settings for sharing the stream session's performance stats with the client

" } } }, @@ -1875,6 +1921,10 @@ "shape":"EnvironmentVariables", "documentation":"

A set of options that you can use to control the stream session runtime environment, expressed as a set of key-value pairs. You can use this to configure the application or stream session details. You can also provide custom environment variables that Amazon GameLift Streams passes to your game client.

If you want to debug your application with environment variables, we recommend that you do so in a local environment outside of Amazon GameLift Streams. For more information, refer to the Compatibility Guidance in the troubleshooting section of the Developer Guide.

AdditionalEnvironmentVariables and AdditionalLaunchArgs have similar purposes. AdditionalEnvironmentVariables passes data using environment variables; while AdditionalLaunchArgs passes data using command-line arguments.

" }, + "PerformanceStatsConfiguration":{ + "shape":"PerformanceStatsConfiguration", + "documentation":"

The performance stats configuration for the stream session

" + }, "LogFileLocationUri":{ "shape":"FileLocationUri", "documentation":"

Access location for log files that your content generates during a stream session. These log files are uploaded to cloud storage location at the end of a stream session. The Amazon GameLift Streams application resource defines which log files to upload.

" @@ -1909,7 +1959,14 @@ "gen4n_win2022", "gen5n_high", "gen5n_ultra", - "gen5n_win2022" + "gen5n_win2022", + "gen6n_small", + "gen6n_medium", + "gen6n_high", + "gen6n_ultra", + "gen6n_ultra_win2022", + "gen6n_pro", + "gen6n_pro_win2022" ] }, "StreamGroupLocationStatus":{ @@ -1962,7 +2019,7 @@ }, "StreamClass":{ "shape":"StreamClass", - "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" + "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" }, "Status":{ "shape":"StreamGroupStatus", @@ -2030,6 +2087,10 @@ "shape":"StreamSessionStatus", "documentation":"

The current status of the stream session resource.

" }, + "StatusReason":{ + "shape":"StreamSessionStatusReason", + "documentation":"

A short description of the reason the stream session is in ERROR status or TERMINATED status.

ERROR status reasons:

TERMINATED status reasons:

" + }, "Protocol":{ "shape":"Protocol", "documentation":"

The data transfer protocol in use with the stream session.

" @@ -2108,6 +2169,11 @@ "max":50, "min":1 }, + "TargetIdleCapacity":{ + "type":"integer", + "box":true, + "min":0 + }, "TerminateStreamSessionInput":{ "type":"structure", "required":[ @@ -2304,7 +2370,7 @@ }, "StreamClass":{ "shape":"StreamClass", - "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" + "documentation":"

The target stream quality for the stream group.

A stream class can be one of the following:

" }, "Id":{ "shape":"Id", @@ -2316,7 +2382,7 @@ }, "StatusReason":{ "shape":"StreamGroupStatusReason", - "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" + "documentation":"

A short description of the reason that the stream group is in ERROR status. The possible reasons can be one of the following:

" }, "LastUpdatedAt":{ "shape":"Timestamp", diff --git a/awscli/botocore/data/geo-places/2020-11-19/service-2.json b/awscli/botocore/data/geo-places/2020-11-19/service-2.json index b71dce1abc44..d5bf5d5e1e40 100644 --- a/awscli/botocore/data/geo-places/2020-11-19/service-2.json +++ b/awscli/botocore/data/geo-places/2020-11-19/service-2.json @@ -28,7 +28,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Autocomplete completes potential places and addresses as the user types, based on the partial input. The API enhances the efficiency and accuracy of address by completing query based on a few entered keystrokes. It helps you by completing partial queries with valid address completion. Also, the API supports the filtering of results based on geographic location, country, or specific place types, and can be tailored using optional parameters like language and political views.

" + "documentation":"

Autocomplete completes potential places and addresses as the user types, based on the partial input. The API enhances the efficiency and accuracy of address by completing query based on a few entered keystrokes. It helps you by completing partial queries with valid address completion. Also, the API supports the filtering of results based on geographic location, country, or specific place types, and can be tailored using optional parameters like language and political views.

For more information, see Autocomplete in the Amazon Location Service Developer Guide.

", + "readonly":true }, "Geocode":{ "name":"Geocode", @@ -45,7 +46,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Geocode converts a textual address or place into geographic coordinates. You can obtain geographic coordinates, address component, and other related information. It supports flexible queries, including free-form text or structured queries with components like street names, postal codes, and regions. The Geocode API can also provide additional features such as time zone information and the inclusion of political views.

" + "documentation":"

Geocode converts a textual address or place into geographic coordinates. You can obtain geographic coordinates, address component, and other related information. It supports flexible queries, including free-form text or structured queries with components like street names, postal codes, and regions. The Geocode API can also provide additional features such as time zone information and the inclusion of political views.

For more information, see Geocode in the Amazon Location Service Developer Guide.

", + "readonly":true }, "GetPlace":{ "name":"GetPlace", @@ -62,7 +64,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

GetPlace finds a place by its unique ID. A PlaceId is returned by other place operations.

" + "documentation":"

GetPlace finds a place by its unique ID. A PlaceId is returned by other place operations.

For more information, see GetPlace in the Amazon Location Service Developer Guide.

", + "readonly":true }, "ReverseGeocode":{ "name":"ReverseGeocode", @@ -79,7 +82,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

ReverseGeocode converts geographic coordinates into a human-readable address or place. You can obtain address component, and other related information such as place type, category, street information. The Reverse Geocode API supports filtering to on place type so that you can refine result based on your need. Also, The Reverse Geocode API can also provide additional features such as time zone information and the inclusion of political views.

" + "documentation":"

ReverseGeocode converts geographic coordinates into a human-readable address or place. You can obtain address component, and other related information such as place type, category, street information. The Reverse Geocode API supports filtering to on place type so that you can refine result based on your need. Also, The Reverse Geocode API can also provide additional features such as time zone information and the inclusion of political views.

For more information, see Reverse Geocode in the Amazon Location Service Developer Guide.

", + "readonly":true }, "SearchNearby":{ "name":"SearchNearby", @@ -96,7 +100,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

SearchNearby queries for points of interest within a radius from a central coordinates, returning place results with optional filters such as categories, business chains, food types and more. The API returns details such as a place name, address, phone, category, food type, contact, opening hours. Also, the API can return phonemes, time zones and more based on requested parameters.

" + "documentation":"

SearchNearby queries for points of interest within a radius from a central coordinates, returning place results with optional filters such as categories, business chains, food types and more. The API returns details such as a place name, address, phone, category, food type, contact, opening hours. Also, the API can return phonemes, time zones and more based on requested parameters.

For more information, see Search Nearby in the Amazon Location Service Developer Guide.

", + "readonly":true }, "SearchText":{ "name":"SearchText", @@ -113,7 +118,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

SearchText searches for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id.

" + "documentation":"

SearchText searches for geocode and place information. You can then complete a follow-up query suggested from the Suggest API via a query id.

For more information, see Search Text in the Amazon Location Service Developer Guide.

", + "readonly":true }, "Suggest":{ "name":"Suggest", @@ -130,7 +136,8 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Suggest provides intelligent predictions or recommendations based on the user's input or context, such as relevant places, points of interest, query terms or search category. It is designed to help users find places or point of interests candidates or identify a follow on query based on incomplete or misspelled queries. It returns a list of possible matches or refinements that can be used to formulate a more accurate query. Users can select the most appropriate suggestion and use it for further searching. The API provides options for filtering results by location and other attributes, and allows for additional features like phonemes and timezones. The response includes refined query terms and detailed place information.

" + "documentation":"

Suggest provides intelligent predictions or recommendations based on the user's input or context, such as relevant places, points of interest, query terms or search category. It is designed to help users find places or point of interests candidates or identify a follow on query based on incomplete or misspelled queries. It returns a list of possible matches or refinements that can be used to formulate a more accurate query. Users can select the most appropriate suggestion and use it for further searching. The API provides options for filtering results by location and other attributes, and allows for additional features like phonemes and timezones. The response includes refined query terms and detailed place information.

For more information, see Suggest in the Amazon Location Service Developer Guide.

", + "readonly":true } }, "shapes":{ @@ -155,7 +162,7 @@ "members":{ "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" } }, "documentation":"

Position of the access point represented by longitude and latitude for a vehicle.

" @@ -251,7 +258,7 @@ }, "SecondaryAddressComponents":{ "shape":"SecondaryAddressComponentList", - "documentation":"

Components that correspond to secondary identifiers on an Address. Secondary address components include information such as Suite or Unit Number, Building, or Floor.

" + "documentation":"

Components that correspond to secondary identifiers on an Address. Secondary address components include information such as Suite or Unit Number, Building, or Floor.

Coverage for Address.SecondaryAddressComponents is available in the following countries:

AUS, CAN, NZL, USA, PRI

" } }, "documentation":"

The place address.

" @@ -327,7 +334,7 @@ }, "SecondaryAddressComponents":{ "shape":"SecondaryAddressComponentMatchScoreList", - "documentation":"

Match scores for the secondary address components in the result.

" + "documentation":"

Match scores for the secondary address components in the result.

Coverage for this functionality is available in the following countries: AUS, AUT, BRA, CAN, ESP, FRA, GBR, IDN, IND, NZL, TUR, TWN, USA.

" } }, "documentation":"

Indicates how well the entire input matches the returned. It is equal to 1 if all input tokens are recognized and matched.

" @@ -560,11 +567,11 @@ }, "MaxResults":{ "shape":"AutocompleteRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 5

" }, "BiasPosition":{ "shape":"Position", - "documentation":"

The position in longitude and latitude that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" + "documentation":"

The position in longitude and latitude that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WGS 84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" }, "Filter":{ "shape":"AutocompleteFilter", @@ -956,7 +963,7 @@ "members":{ "Center":{ "shape":"Position", - "documentation":"

The center position, in longitude and latitude, of the FilterCircle.

" + "documentation":"

The center position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Radius":{ "shape":"FilterCircleRadiusLong", @@ -1034,7 +1041,7 @@ "GeocodeAdditionalFeatureList":{ "type":"list", "member":{"shape":"GeocodeAdditionalFeature"}, - "max":2, + "max":4, "min":1 }, "GeocodeFilter":{ @@ -1143,7 +1150,7 @@ }, "SecondaryAddressComponents":{ "shape":"ParsedQuerySecondaryAddressComponentList", - "documentation":"

Parsed secondary address components from the provided query text.

" + "documentation":"

Parsed secondary address components from the provided query text.

Coverage for ParsedQuery.Address.SecondaryAddressComponents is available in the following countries:

AUS, AUT, BRA, CAN, ESP, FRA, GBR, HKG, IDN, IND, NZL, TUR, TWN, USA

" } }, "documentation":"

Parsed address components in the provided QueryText.

" @@ -1247,16 +1254,16 @@ "members":{ "QueryText":{ "shape":"GeocodeRequestQueryTextString", - "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

The fields QueryText, and QueryID are mutually exclusive.

" + "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

" }, "QueryComponents":{"shape":"GeocodeQueryComponents"}, "MaxResults":{ "shape":"GeocodeRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 20

" }, "BiasPosition":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" + "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WGS 84 format.

" }, "Filter":{ "shape":"GeocodeFilter", @@ -1304,7 +1311,7 @@ "members":{ "PricingBucket":{ "shape":"String", - "documentation":"

The pricing bucket for which the query is charged at.

For more information on pricing, please visit Amazon Location Service Pricing.

", + "documentation":"

The pricing bucket for which the query is charged at, or the maximum pricing bucket when the query is charged per item within the query.

For more information on pricing, please visit Amazon Location Service Pricing.

", "location":"header", "locationName":"x-amz-geo-pricing-bucket" }, @@ -1348,7 +1355,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -1368,7 +1375,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represented by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "TimeZone":{ "shape":"TimeZone", @@ -1396,7 +1403,7 @@ }, "SecondaryAddresses":{ "shape":"RelatedPlaceList", - "documentation":"

All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

" + "documentation":"

All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

Coverage for this functionality is available in the following countries: AUS, CAN, NZL, USA, PRI.

" } }, "documentation":"

The Geocoded result.

" @@ -1432,7 +1439,7 @@ "GetPlaceAdditionalFeatureList":{ "type":"list", "member":{"shape":"GetPlaceAdditionalFeature"}, - "max":4, + "max":5, "min":1 }, "GetPlaceIntendedUse":{ @@ -1531,7 +1538,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "MapView":{ "shape":"BoundingBox", @@ -1559,7 +1566,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point in (lng,lat).

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "AccessRestrictions":{ "shape":"AccessRestrictionList", @@ -1583,7 +1590,7 @@ }, "SecondaryAddresses":{ "shape":"RelatedPlaceList", - "documentation":"

All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

" + "documentation":"

All secondary addresses that are associated with a main address. A secondary address is one that includes secondary designators, such as a Suite or Unit Number, Building, or Floor information.

Coverage for this functionality is available in the following countries: AUS, CAN, NZL, USA, PRI.

" } } }, @@ -1599,6 +1606,12 @@ "min":0, "sensitive":true }, + "Heading":{ + "type":"double", + "max":360.0, + "min":0.0, + "sensitive":true + }, "Highlight":{ "type":"structure", "members":{ @@ -1672,7 +1685,7 @@ "Address":{"shape":"Address"}, "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -1690,7 +1703,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represented by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" } }, "documentation":"

All Intersections that are near the provided address.

" @@ -2013,7 +2026,8 @@ "PointOfInterest", "PointAddress", "InterpolatedAddress", - "SecondaryAddress" + "SecondaryAddress", + "InferredSecondaryAddress" ], "sensitive":true }, @@ -2220,11 +2234,11 @@ "Address":{"shape":"Address"}, "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represented by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" } }, "documentation":"

Place that is related to the result item.

" @@ -2257,7 +2271,7 @@ "ReverseGeocodeAdditionalFeatureList":{ "type":"list", "member":{"shape":"ReverseGeocodeAdditionalFeature"}, - "max":2, + "max":3, "min":1 }, "ReverseGeocodeFilter":{ @@ -2299,7 +2313,7 @@ "members":{ "QueryPosition":{ "shape":"Position", - "documentation":"

The position, in [lng, lat] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

" }, "QueryRadius":{ "shape":"ReverseGeocodeRequestQueryRadiusLong", @@ -2308,7 +2322,7 @@ }, "MaxResults":{ "shape":"ReverseGeocodeRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 1

" }, "Filter":{ "shape":"ReverseGeocodeFilter", @@ -2335,6 +2349,10 @@ "documentation":"

Optional: The API key to be used for authorization. Either an API key or valid SigV4 signature must be provided when making a request.

", "location":"querystring", "locationName":"key" + }, + "Heading":{ + "shape":"Heading", + "documentation":"

The heading in degrees from true north in a navigation context. The heading is measured as the angle clockwise from the North direction.

Example: North is 0 degrees, East is 90 degrees, South is 180 degrees, and West is 270 degrees.

" } } }, @@ -2400,7 +2418,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -2420,7 +2438,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represented by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "TimeZone":{ "shape":"TimeZone", @@ -2521,7 +2539,7 @@ "members":{ "QueryPosition":{ "shape":"Position", - "documentation":"

The position, in [lng, lat] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude] for which you are querying nearby results for. Results closer to the position will be ranked higher then results further away from the position

" }, "QueryRadius":{ "shape":"SearchNearbyRequestQueryRadiusLong", @@ -2530,7 +2548,7 @@ }, "MaxResults":{ "shape":"SearchNearbyRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 20

" }, "Filter":{ "shape":"SearchNearbyFilter", @@ -2626,7 +2644,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -2658,7 +2676,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represent by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "AccessRestrictions":{ "shape":"AccessRestrictionList", @@ -2739,19 +2757,19 @@ "members":{ "QueryText":{ "shape":"SearchTextRequestQueryTextString", - "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

The fields QueryText, and QueryID are mutually exclusive.

" + "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

Exactly one of the following fields must be set: QueryText or QueryId.

" }, "QueryId":{ "shape":"SearchTextRequestQueryIdString", - "documentation":"

The query Id returned by the suggest API. If passed in the request, the SearchText API will preform a SearchText query with the improved query terms for the original query made to the suggest API.

The fields QueryText, and QueryID are mutually exclusive.

" + "documentation":"

The query Id returned by the suggest API. If passed in the request, the SearchText API will preform a SearchText query with the improved query terms for the original query made to the suggest API.

Exactly one of the following fields must be set: QueryText or QueryId.

" }, "MaxResults":{ "shape":"SearchTextRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 20

" }, "BiasPosition":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" + "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WGS 84 format.

Exactly one of the following fields must be set: BiasPosition, Filter.BoundingBox, or Filter.Circle.

" }, "Filter":{ "shape":"SearchTextFilter", @@ -2853,7 +2871,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -2885,7 +2903,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represent by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "AccessRestrictions":{ "shape":"AccessRestrictionList", @@ -2931,14 +2949,24 @@ "Number":{ "shape":"SecondaryAddressComponentNumberString", "documentation":"

Number that uniquely identifies a secondary address.

" + }, + "Designator":{ + "shape":"SecondaryAddressComponentDesignatorString", + "documentation":"

The designator of the secondary address component.

Example: Apt.

" } }, "documentation":"

Components that correspond to secondary identifiers on an address. The only component type supported currently is Unit.

" }, + "SecondaryAddressComponentDesignatorString":{ + "type":"string", + "max":20, + "min":0, + "sensitive":true + }, "SecondaryAddressComponentList":{ "type":"list", "member":{"shape":"SecondaryAddressComponent"}, - "max":1, + "max":3, "min":0 }, "SecondaryAddressComponentMatchScore":{ @@ -3156,7 +3184,7 @@ }, "Position":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude.

" + "documentation":"

The position in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "Distance":{ "shape":"DistanceMeters", @@ -3180,7 +3208,7 @@ }, "AccessPoints":{ "shape":"AccessPointList", - "documentation":"

Position of the access point represent by longitude and latitude.

" + "documentation":"

Position of the access point in World Geodetic System (WGS 84) format: [longitude, latitude].

" }, "AccessRestrictions":{ "shape":"AccessRestrictionList", @@ -3212,7 +3240,7 @@ "members":{ "QueryId":{ "shape":"SuggestQueryResultQueryIdString", - "documentation":"

QueryId can be used to complete a follow up query through the SearchText API. The QueryId retains context from the original Suggest request such as filters, political view and language. See the SearchText API documentation for more details SearchText API docs.

The fields QueryText, and QueryID are mutually exclusive.

" + "documentation":"

QueryId can be used to complete a follow up query through the SearchText API. The QueryId retains context from the original Suggest request such as filters, political view and language. See the SearchText API documentation for more details SearchText API docs.

The fields QueryText, and QueryID are mutually exclusive.

" }, "QueryType":{ "shape":"QueryType", @@ -3233,11 +3261,11 @@ "members":{ "QueryText":{ "shape":"SuggestRequestQueryTextString", - "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

The fields QueryText, and QueryID are mutually exclusive.

" + "documentation":"

The free-form text query to match addresses against. This is usually a partially typed address from an end user in an address box or form.

The fields QueryText and QueryID are mutually exclusive.

" }, "MaxResults":{ "shape":"SuggestRequestMaxResultsInteger", - "documentation":"

An optional limit for the number of results returned in a single call.

" + "documentation":"

An optional limit for the number of results returned in a single call.

Default value: 20

" }, "MaxQueryRefinements":{ "shape":"SuggestRequestMaxQueryRefinementsInteger", @@ -3245,7 +3273,7 @@ }, "BiasPosition":{ "shape":"Position", - "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WSG84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" + "documentation":"

The position, in longitude and latitude, that the results should be close to. Typically, place results returned are ranked higher the closer they are to this position. Stored in [lng, lat] and in the WGS 84 format.

The fields BiasPosition, FilterBoundingBox, and FilterCircle are mutually exclusive.

" }, "Filter":{ "shape":"SuggestFilter", diff --git a/awscli/botocore/data/glacier/2012-06-01/service-2.json b/awscli/botocore/data/glacier/2012-06-01/service-2.json index f5c93276596b..de3b1690ca84 100644 --- a/awscli/botocore/data/glacier/2012-06-01/service-2.json +++ b/awscli/botocore/data/glacier/2012-06-01/service-2.json @@ -25,9 +25,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon S3 Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation aborts a multipart upload identified by the upload ID.

After the Abort Multipart Upload request succeeds, you cannot upload any more parts to the multipart upload or complete the multipart upload. Aborting a completed upload fails. However, aborting an already-aborted upload will succeed, for a short time. For more information about uploading a part and completing a multipart upload, see UploadMultipartPart and CompleteMultipartUpload.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Working with Archives in Amazon Glacier and Abort Multipart Upload in the Amazon Glacier Developer Guide.

" }, "AbortVaultLock":{ "name":"AbortVaultLock", @@ -41,7 +42,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation aborts the vault locking process if the vault lock is not in the Locked state. If the vault lock is in the Locked state when this operation is requested, the operation returns an AccessDeniedException error. Aborting the vault locking process removes the vault lock policy from the specified vault.

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can get the state of a vault lock by calling GetVaultLock. For more information about the vault locking process, see Amazon Glacier Vault Lock. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

This operation is idempotent. You can successfully invoke this operation multiple times, if the vault lock is in the InProgress state or if there is no policy associated with the vault.

" }, @@ -58,9 +60,10 @@ {"shape":"MissingParameterValueException"}, {"shape":"ResourceNotFoundException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon S3 Glacier Resources.

" + "documentation":"

This operation adds the specified tags to a vault. Each tag is composed of a key and a value. Each vault can have up to 10 tags. If your request would cause the tag limit for the vault to be exceeded, the operation throws the LimitExceededException error. If a tag already exists on the vault under a specified key, the existing key value will be overwritten. For more information about tags, see Tagging Amazon Glacier Resources.

" }, "CompleteMultipartUpload":{ "name":"CompleteMultipartUpload", @@ -75,9 +78,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

You call this operation to inform Amazon S3 Glacier (Glacier) that all the archive parts have been uploaded and that Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

" + "documentation":"

You call this operation to inform Amazon Glacier (Glacier) that all the archive parts have been uploaded and that Glacier can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource. Using the URI path, you can then access the archive. After you upload an archive, you should save the archive ID returned to retrieve the archive at a later point. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

In the request, you must include the computed SHA256 tree hash of the entire archive you have uploaded. For information about computing a SHA256 tree hash, see Computing Checksums. On the server side, Glacier also constructs the SHA256 tree hash of the assembled archive. If the values match, Glacier saves the archive to the vault; otherwise, it returns an error, and the operation fails. The ListParts operation returns a list of parts uploaded for a specific multipart upload. It includes checksum information for each uploaded part that can be used to debug a bad checksum issue.

Additionally, Glacier also checks for any missing content ranges when assembling the archive, if missing content ranges are found, Glacier returns an error and the operation fails.

Complete Multipart Upload is an idempotent operation. After your first successful complete multipart upload, if you call the operation again within a short period, the operation will succeed and return the same archive ID. This is useful in the event you experience a network issue that causes an aborted connection or receive a 500 server error, in which case you can repeat your Complete Multipart Upload request and get the same archive ID without creating duplicate archives. Note, however, that after the multipart upload completes, you cannot call the List Parts operation and the multipart upload will not appear in List Multipart Uploads response, even if idempotent complete is possible.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Complete Multipart Upload in the Amazon Glacier Developer Guide.

" }, "CompleteVaultLock":{ "name":"CompleteVaultLock", @@ -91,7 +95,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation completes the vault locking process by transitioning the vault lock from the InProgress state to the Locked state, which causes the vault lock policy to become unchangeable. A vault lock is put into the InProgress state by calling InitiateVaultLock. You can obtain the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

This operation is idempotent. This request is always successful if the vault lock is in the Locked state and the provided lock ID matches the lock ID originally used to lock the vault.

If an invalid lock ID is passed in the request when the vault lock is in the Locked state, the operation returns an AccessDeniedException error. If an invalid lock ID is passed in the request when the vault lock is in the InProgress state, the operation throws an InvalidParameter error.

" }, @@ -108,9 +113,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"}, {"shape":"LimitExceededException"} ], - "documentation":"

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon S3 Glacier.

You must use the following guidelines when naming a vault.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation creates a new vault with the specified name. The name of the vault must be unique within a region for an AWS account. You can create up to 1,000 vaults per account. If you need to create more vaults, contact Amazon Glacier.

You must use the following guidelines when naming a vault.

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Creating a Vault in Amazon Glacier and Create Vault in the Amazon Glacier Developer Guide.

" }, "DeleteArchive":{ "name":"DeleteArchive", @@ -124,9 +130,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation deletes an archive from a vault. Subsequent requests to initiate a retrieval of this archive will fail. Archive retrievals that are in progress for this archive ID may or may not succeed according to the following scenarios:

This operation is idempotent. Attempting to delete an already-deleted archive does not result in an error.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting an Archive in Amazon Glacier and Delete Archive in the Amazon Glacier Developer Guide.

" }, "DeleteVault":{ "name":"DeleteVault", @@ -140,9 +147,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation deletes a vault. Amazon S3 Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon S3 Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon S3 Glacier Developer Guide.

" + "documentation":"

This operation deletes a vault. Amazon Glacier will delete a vault only if there are no archives in the vault as of the last inventory and there have been no writes to the vault since the last inventory. If either of these conditions is not satisfied, the vault deletion fails (that is, the vault is not removed) and Amazon Glacier returns an error. You can use DescribeVault to return the number of archives in a vault, and you can use Initiate a Job (POST jobs) to initiate a new inventory retrieval for a vault. The inventory contains the archive IDs you use to delete archives using Delete Archive (DELETE archive).

This operation is idempotent.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Deleting a Vault in Amazon Glacier and Delete Vault in the Amazon Glacier Developer Guide.

" }, "DeleteVaultAccessPolicy":{ "name":"DeleteVaultAccessPolicy", @@ -156,9 +164,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

" + "documentation":"

This operation deletes the access policy associated with the specified vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely remove the access policy, and you might still see the effect of the policy for a short time after you send the delete request.

This operation is idempotent. You can invoke delete multiple times, even if there is no policy associated with the vault. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

" }, "DeleteVaultNotifications":{ "name":"DeleteVaultNotifications", @@ -172,9 +181,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon S3 Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Delete Vault Notification Configuration in the Amazon S3 Glacier Developer Guide.

" + "documentation":"

This operation deletes the notification configuration set for a vault. The operation is eventually consistent; that is, it might take some time for Amazon Glacier to completely disable the notifications and you might still receive some notifications for a short time after you send the delete request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Delete Vault Notification Configuration in the Amazon Glacier Developer Guide.

" }, "DescribeJob":{ "name":"DescribeJob", @@ -188,9 +198,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon S3 Glacier (Glacier) completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation returns information about a job you previously initiated, including the job initiation date, the user who initiated the job, the job status code/message and the Amazon SNS topic to notify after Amazon Glacier (Glacier) completes the job. For more information about initiating a job, see InitiateJob.

This operation enables you to check the status of your job. However, it is strongly recommended that you set up an Amazon SNS topic and specify it in your initiate job request so that Glacier can notify the topic after it completes the job.

A job ID will not expire for at least 24 hours after Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For more information about using this operation, see the documentation for the underlying REST API Describe Job in the Amazon Glacier Developer Guide.

" }, "DescribeVault":{ "name":"DescribeVault", @@ -204,9 +215,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon S3 Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and Describe Vault in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation returns information about a vault, including the vault's Amazon Resource Name (ARN), the date the vault was created, the number of archives it contains, and the total size of all the archives in the vault. The number of archives and their total size are as of the last inventory generation. This means that if you add or remove an archive from a vault, and then immediately use Describe Vault, the change in contents will not be immediately reflected. If you want to retrieve the latest inventory of the vault, use InitiateJob. Amazon Glacier generates vault inventories approximately daily. For more information, see Downloading a Vault Inventory in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and Describe Vault in the Amazon Glacier Developer Guide.

" }, "GetDataRetrievalPolicy":{ "name":"GetDataRetrievalPolicy", @@ -219,7 +231,8 @@ "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation returns the current data retrieval policy for the account and region specified in the GET request. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

" }, @@ -235,9 +248,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon S3 Glacier (Glacier) returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon S3 Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

" + "documentation":"

This operation downloads the output of the job you initiated using InitiateJob. Depending on the job type you specified when you initiated the job, the output will be either the content of an archive or a vault inventory.

You can download all the job output or download a portion of the output by specifying a byte range. In the case of an archive retrieval job, depending on the byte range you specify, Amazon Glacier (Glacier) returns the checksum for the portion of the data. You can compute the checksum on the client and verify that the values match to ensure the portion you downloaded is the correct data.

A job ID will not expire for at least 24 hours after Glacier completes the job. That a byte range. For both archive and inventory retrieval jobs, you should verify the downloaded size against the size returned in the headers from the Get Job Output response.

For archive retrieval jobs, you should also verify that the size is what you expected. If you download a portion of the output, the expected size is based on the range of bytes you specified. For example, if you specify a range of bytes=0-1048575, you should verify your download size is 1,048,576 bytes. If you download an entire archive, the expected size is the size of the archive when you uploaded it to Amazon Glacier The expected size is also returned in the headers from the Get Job Output response.

In the case of an archive retrieval job, depending on the byte range you specify, Glacier returns the checksum for the portion of the data. To ensure the portion you downloaded is the correct data, compute the checksum on the client, verify that the values match, and verify that the size is what you expected.

A job ID does not expire for at least 24 hours after Glacier completes the job. That is, you can download the job output within the 24 hours period after Amazon Glacier completes the job.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Downloading a Vault Inventory, Downloading an Archive, and Get Job Output

" }, "GetVaultAccessPolicy":{ "name":"GetVaultAccessPolicy", @@ -251,7 +265,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation retrieves the access-policy subresource set on the vault; for more information on setting this subresource, see Set Vault Access Policy (PUT access-policy). If there is no access policy set on the vault, the operation returns a 404 Not found error. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

" }, @@ -267,7 +282,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation retrieves the following attributes from the lock-policy subresource set on the specified vault:

A vault lock is put into the InProgress state by calling InitiateVaultLock. A vault lock is put into the Locked state by calling CompleteVaultLock. You can abort the vault locking process by calling AbortVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If there is no vault lock policy set on the vault, the operation returns a 404 Not found error. For more information about vault lock policies, Amazon Glacier Access Control with Vault Lock Policies.

" }, @@ -283,9 +299,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon S3 Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation retrieves the notification-configuration subresource of the specified vault.

For information about setting a notification configuration on a vault, see SetVaultNotifications. If a notification configuration for a vault is not set, the operation returns a 404 Not Found error. For more information about vault notifications, see Configuring Vault Notifications in Amazon Glacier.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Get Vault Notification Configuration in the Amazon Glacier Developer Guide.

" }, "InitiateJob":{ "name":"InitiateJob", @@ -302,7 +319,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"InsufficientCapacityException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation initiates a job of the specified type, which can be a select, an archival retrieval, or a vault retrieval. For more information about using this operation, see the documentation for the underlying REST API Initiate a Job.

" }, @@ -319,9 +337,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation initiates a multipart upload. Amazon S3 Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon S3 Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon S3 Glacier (Glacier) removes the multipart upload resource referenced by the ID. Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation initiates a multipart upload. Amazon Glacier creates a multipart upload resource and returns its ID in the response. The multipart upload ID is used in subsequent requests to upload parts of an archive (see UploadMultipartPart).

When you initiate a multipart upload, you specify the part size in number of bytes. The part size must be a megabyte (1024 KB) multiplied by a power of 2-for example, 1048576 (1 MB), 2097152 (2 MB), 4194304 (4 MB), 8388608 (8 MB), and so on. The minimum allowable part size is 1 MB, and the maximum is 4 GB.

Every part you upload to this resource (see UploadMultipartPart), except the last one, must have the same size. The last one can be the same size or smaller. For example, suppose you want to upload a 16.2 MB file. If you initiate the multipart upload with a part size of 4 MB, you will upload four parts of 4 MB each and one part of 0.2 MB.

You don't need to know the size of the archive when you start a multipart upload because Amazon Glacier does not require you to specify the overall archive size.

After you complete the multipart upload, Amazon Glacier (Glacier) removes the multipart upload resource referenced by the ID. Glacier also removes the multipart upload resource if you cancel the multipart upload or it may be removed if there is no activity for a period of 24 hours.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Initiate Multipart Upload in the Amazon Glacier Developer Guide.

" }, "InitiateVaultLock":{ "name":"InitiateVaultLock", @@ -336,7 +355,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation initiates the vault locking process by doing the following:

You can set one vault lock policy for each vault and this policy can be up to 20 KB in size. For more information about vault lock policies, see Amazon Glacier Access Control with Vault Lock Policies.

You must complete the vault locking process within 24 hours after the vault lock enters the InProgress state. After the 24 hour window ends, the lock ID expires, the vault automatically exits the InProgress state, and the vault lock policy is removed from the vault. You call CompleteVaultLock to complete the vault locking process by setting the state of the vault lock to Locked.

After a vault lock is in the Locked state, you cannot initiate a new vault lock for the vault.

You can abort the vault locking process by calling AbortVaultLock. You can get the state of the vault lock by calling GetVaultLock. For more information about the vault locking process, Amazon Glacier Vault Lock.

If this operation is called when the vault lock is in the InProgress state, the operation returns an AccessDeniedException error. When the vault lock is in the InProgress state you must call AbortVaultLock before you can initiate a new vault lock policy.

" }, @@ -352,7 +372,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation lists jobs for a vault, including jobs that are in-progress and jobs that have recently finished. The List Job operation returns a list of these jobs sorted by job initiation time.

Amazon Glacier retains recently completed jobs for a period before deleting them; however, it eventually removes completed jobs. The output of completed jobs can be retrieved. Retaining completed jobs for a period of time after they have completed enables you to get a job output in the event you miss the job completion notification or your first attempt to download it fails. For example, suppose you start an archive retrieval job to download an archive. After the job completes, you start to download the archive but encounter a network error. In this scenario, you can retry and download the archive while the job exists.

The List Jobs operation supports pagination. You should always check the response Marker field. If there are no more jobs to list, the Marker field is set to null. If there are more jobs to list, the Marker field is set to a non-null value, which you can use to continue the pagination of the list. To return a list of jobs that begins at a specific job, set the marker request parameter to the Marker value for that job that you obtained from a previous List Jobs request.

You can set a maximum limit for the number of jobs returned in the response by specifying the limit parameter in the request. The default limit is 50. The number of jobs returned might be fewer than the limit, but the number of returned jobs never exceeds the limit.

Additionally, you can filter the jobs list returned by specifying the optional statuscode parameter or completed parameter, or both. Using the statuscode parameter, you can specify to return only jobs that match either the InProgress, Succeeded, or Failed status. Using the completed parameter, you can specify to return only jobs that were completed (true) or jobs that were not completed (false).

For more information about using this operation, see the documentation for the underlying REST API List Jobs.

" }, @@ -368,9 +389,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists in-progress multipart uploads for the specified vault. An in-progress multipart upload is a multipart upload that has been initiated by an InitiateMultipartUpload request, but has not yet been completed or aborted. The list returned in the List Multipart Upload response has no guaranteed order.

The List Multipart Uploads operation supports pagination. By default, this operation returns up to 50 multipart uploads in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of multipart uploads that begins at a specific upload, set the marker request parameter to the value you obtained from a previous List Multipart Upload request. You can also limit the number of uploads returned in the response by specifying the limit parameter in the request.

Note the difference between this operation and listing parts (ListParts). The List Multipart Uploads operation lists all multipart uploads for a vault and does not require a multipart upload ID. The List Parts operation requires a multipart upload ID since parts are associated with a single upload.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Multipart Uploads in the Amazon Glacier Developer Guide.

" }, "ListParts":{ "name":"ListParts", @@ -384,9 +406,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon S3 Glacier and List Parts in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists the parts of an archive that have been uploaded in a specific multipart upload. You can make this request at any time during an in-progress multipart upload before you complete the upload (see CompleteMultipartUpload. List Parts returns an error for completed uploads. The list returned in the List Parts response is sorted by part range.

The List Parts operation supports pagination. By default, this operation returns up to 50 uploaded parts in the response. You should always check the response for a marker at which to continue the list; if there are no more items the marker is null. To return a list of parts that begins at a specific part, set the marker request parameter to the value you obtained from a previous List Parts request. You can also limit the number of parts returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and the underlying REST API, see Working with Archives in Amazon Glacier and List Parts in the Amazon Glacier Developer Guide.

" }, "ListProvisionedCapacity":{ "name":"ListProvisionedCapacity", @@ -399,7 +422,8 @@ "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation lists the provisioned capacity units for the specified AWS account.

" }, @@ -415,9 +439,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon S3 Glacier Resources.

" + "documentation":"

This operation lists all the tags attached to a vault. The operation returns an empty map if there are no tags. For more information about tags, see Tagging Amazon Glacier Resources.

" }, "ListVaults":{ "name":"ListVaults", @@ -431,9 +456,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon S3 Glacier and List Vaults in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation lists all vaults owned by the calling user's account. The list returned in the response is ASCII-sorted by vault name.

By default, this operation returns up to 10 items. If there are more vaults to list, the response marker field contains the vault Amazon Resource Name (ARN) at which to continue the list with a new List Vaults request; otherwise, the marker field is null. To return a list of vaults that begins at a specific vault, set the marker request parameter to the vault ARN you obtained from a previous List Vaults request. You can also limit the number of vaults returned in the response by specifying the limit parameter in the request.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Retrieving Vault Metadata in Amazon Glacier and List Vaults in the Amazon Glacier Developer Guide.

" }, "PurchaseProvisionedCapacity":{ "name":"PurchaseProvisionedCapacity", @@ -448,7 +474,8 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"LimitExceededException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation purchases a provisioned capacity unit for an AWS account.

" }, @@ -464,9 +491,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon S3 Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

" + "documentation":"

This operation removes one or more tags from the set of tags attached to a vault. For more information about tags, see Tagging Amazon Glacier Resources. This operation is idempotent. The operation will be successful, even if there are no tags attached to the vault.

" }, "SetDataRetrievalPolicy":{ "name":"SetDataRetrievalPolicy", @@ -479,7 +507,8 @@ "errors":[ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation sets and then enacts a data retrieval policy in the region specified in the PUT request. You can set one policy per region for an AWS account. The policy is enacted within a few minutes of a successful PUT operation.

The set policy operation does not affect retrieval jobs that were in progress before the policy was enacted. For more information about data retrieval policies, see Amazon Glacier Data Retrieval Policies.

" }, @@ -495,7 +524,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], "documentation":"

This operation configures an access policy for a vault and will overwrite an existing policy. To configure a vault access policy, send a PUT request to the access-policy subresource of the vault. An access policy is specific to a vault and is also called a vault subresource. You can set one access policy per vault and the policy can be up to 20 KB in size. For more information about vault access policies, see Amazon Glacier Access Control with Vault Access Policies.

" }, @@ -511,9 +541,10 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon S3 Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon S3 Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation configures notifications that will be sent when specific events happen to a vault. By default, you don't get any notifications.

To configure vault notifications, send a PUT request to the notification-configuration subresource of the vault. The request should include a JSON document that provides an Amazon SNS topic and specific events for which you want Amazon Glacier to send notifications to the topic.

Amazon SNS topics must grant permission to the vault to be allowed to publish notifications to the topic. You can configure a vault to publish a notification for the following vault events:

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Configuring Vault Notifications in Amazon Glacier and Set Vault Notification Configuration in the Amazon Glacier Developer Guide.

" }, "UploadArchive":{ "name":"UploadArchive", @@ -529,9 +560,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"RequestTimeoutException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon S3 Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon S3 Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation adds an archive to a vault. This is a synchronous operation, and for a successful upload, your data is durably persisted. Amazon Glacier returns the archive ID in the x-amz-archive-id header of the response.

You must use the archive ID to access your data in Amazon Glacier. After you upload an archive, you should save the archive ID returned so that you can retrieve or delete the archive later. Besides saving the archive ID, you can also index it and give it a friendly name to allow for better searching. You can also use the optional archive description field to specify how the archive is referred to in an external index of archives, such as you might create in Amazon DynamoDB. You can also get the vault inventory to obtain a list of archive IDs in a vault. For more information, see InitiateJob.

You must provide a SHA256 tree hash of the data you are uploading. For information about computing a SHA256 tree hash, see Computing Checksums.

You can optionally specify an archive description of up to 1,024 printable ASCII characters. You can get the archive description when you either retrieve the archive or get the vault inventory. For more information, see InitiateJob. Amazon Glacier does not interpret the description in any way. An archive description does not need to be unique. You cannot use the description to retrieve or sort the archive list.

Archives are immutable. After you upload an archive, you cannot edit the archive or its description.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading an Archive in Amazon Glacier and Upload Archive in the Amazon Glacier Developer Guide.

" }, "UploadMultipartPart":{ "name":"UploadMultipartPart", @@ -547,9 +579,10 @@ {"shape":"InvalidParameterValueException"}, {"shape":"MissingParameterValueException"}, {"shape":"RequestTimeoutException"}, - {"shape":"ServiceUnavailableException"} + {"shape":"ServiceUnavailableException"}, + {"shape":"NoLongerSupportedException"} ], - "documentation":"

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

" + "documentation":"

This operation uploads a part of an archive. You can upload archive parts in any order. You can also upload them in parallel. You can upload up to 10,000 parts for a multipart upload.

Amazon Glacier rejects your upload part request if any of the following conditions is true:

This operation is idempotent. If you upload the same part multiple times, the data included in the most recent request overwrites the previously uploaded data.

An AWS account has full permission to perform all operations (actions). However, AWS Identity and Access Management (IAM) users don't have any permissions by default. You must grant them explicit permission to perform specific actions. For more information, see Access Control Using AWS Identity and Access Management (IAM).

For conceptual information and underlying REST API, see Uploading Large Archives in Parts (Multipart Upload) and Upload Part in the Amazon Glacier Developer Guide.

" } }, "shapes":{ @@ -563,7 +596,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -580,7 +613,7 @@ "locationName":"uploadId" } }, - "documentation":"

Provides options to abort a multipart upload identified by the upload ID.

For information about the underlying REST API, see Abort Multipart Upload. For conceptual information, see Working with Archives in Amazon S3 Glacier.

" + "documentation":"

Provides options to abort a multipart upload identified by the upload ID.

For information about the underlying REST API, see Abort Multipart Upload. For conceptual information, see Working with Archives in Amazon Glacier.

" }, "AbortVaultLockInput":{ "type":"structure", @@ -625,7 +658,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -653,7 +686,7 @@ }, "checksum":{ "shape":"string", - "documentation":"

The checksum of the archive computed by Amazon S3 Glacier.

", + "documentation":"

The checksum of the archive computed by Amazon Glacier.

", "location":"header", "locationName":"x-amz-sha256-tree-hash" }, @@ -664,7 +697,7 @@ "locationName":"x-amz-archive-id" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

For information about the underlying REST API, see Upload Archive. For conceptual information, see Working with Archives in Amazon S3 Glacier.

" + "documentation":"

Contains the Amazon Glacier response to your request.

For information about the underlying REST API, see Upload Archive. For conceptual information, see Working with Archives in Amazon Glacier.

" }, "CSVInput":{ "type":"structure", @@ -744,7 +777,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -768,12 +801,12 @@ }, "checksum":{ "shape":"string", - "documentation":"

The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon S3 Glacier (Glacier), Glacier returns an error and the request fails.

", + "documentation":"

The SHA256 tree hash of the entire archive. It is the tree hash of SHA256 tree hash of the individual parts. If the value you specify in the request does not match the SHA256 tree hash of the final assembled archive as computed by Amazon Glacier (Glacier), Glacier returns an error and the request fails.

", "location":"header", "locationName":"x-amz-sha256-tree-hash" } }, - "documentation":"

Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon S3 Glacier (Glacier) can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource.

" + "documentation":"

Provides options to complete a multipart upload operation. This informs Amazon Glacier that all the archive parts have been uploaded and Amazon Glacier (Glacier) can now assemble the archive from the uploaded parts. After assembling and saving the archive to the vault, Glacier returns the URI path of the newly created archive resource.

" }, "CompleteVaultLockInput":{ "type":"structure", @@ -836,7 +869,7 @@ "locationName":"Location" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "DataRetrievalPolicy":{ "type":"structure", @@ -877,7 +910,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -894,7 +927,7 @@ "locationName":"archiveId" } }, - "documentation":"

Provides options for deleting an archive from an Amazon S3 Glacier vault.

" + "documentation":"

Provides options for deleting an archive from an Amazon Glacier vault.

" }, "DeleteVaultAccessPolicyInput":{ "type":"structure", @@ -905,7 +938,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -927,7 +960,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -938,7 +971,7 @@ "locationName":"vaultName" } }, - "documentation":"

Provides options for deleting a vault from Amazon S3 Glacier.

" + "documentation":"

Provides options for deleting a vault from Amazon Glacier.

" }, "DeleteVaultNotificationsInput":{ "type":"structure", @@ -949,7 +982,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -972,7 +1005,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1000,7 +1033,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1030,7 +1063,7 @@ }, "LastInventoryDate":{ "shape":"string", - "documentation":"

The Universal Coordinated Time (UTC) date when Amazon S3 Glacier completed the last vault inventory. This value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z.

" + "documentation":"

The Universal Coordinated Time (UTC) date when Amazon Glacier completed the last vault inventory. This value should be a string in the ISO 8601 date format, for example 2012-03-20T17:03:43.221Z.

" }, "NumberOfArchives":{ "shape":"long", @@ -1041,7 +1074,7 @@ "documentation":"

Total size, in bytes, of the archives in the vault as of the last inventory date. This field will return null if an inventory has not yet run on the vault, for example if you just created the vault.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "Encryption":{ "type":"structure", @@ -1101,7 +1134,7 @@ "documentation":"

Contains the returned data retrieval policy in JSON format.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to the GetDataRetrievalPolicy request.

" + "documentation":"

Contains the Amazon Glacier response to the GetDataRetrievalPolicy request.

" }, "GetJobOutputInput":{ "type":"structure", @@ -1113,7 +1146,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1131,12 +1164,12 @@ }, "range":{ "shape":"string", - "documentation":"

The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify the range as bytes=0-1048575. By default, this operation downloads the entire output.

If the job output is large, then you can use a range to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

  1. Download a 128 MB chunk of output by specifying the appropriate byte range. Verify that all 128 MB of data was received.

  2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

  3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

  4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon S3 Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

", + "documentation":"

The range of bytes to retrieve from the output. For example, if you want to download the first 1,048,576 bytes, specify the range as bytes=0-1048575. By default, this operation downloads the entire output.

If the job output is large, then you can use a range to retrieve a portion of the output. This allows you to download the entire output in smaller chunks of bytes. For example, suppose you have 1 GB of job output you want to download and you decide to download 128 MB chunks of data at a time, which is a total of eight Get Job Output requests. You use the following process to download the job output:

  1. Download a 128 MB chunk of output by specifying the appropriate byte range. Verify that all 128 MB of data was received.

  2. Along with the data, the response includes a SHA256 tree hash of the payload. You compute the checksum of the payload on the client and compare it with the checksum you received in the response to ensure you received all the expected data.

  3. Repeat steps 1 and 2 for all the eight 128 MB chunks of output data, each time specifying the appropriate byte range.

  4. After downloading all the parts of the job output, you have a list of eight checksum values. Compute the tree hash of these values to find the checksum of the entire output. Using the DescribeJob API, obtain job information of the job that provided you the output. The response includes the checksum of the entire archive stored in Amazon Glacier. You compare this value with the checksum you computed to ensure you have downloaded the entire archive content with no errors.

", "location":"header", "locationName":"Range" } }, - "documentation":"

Provides options for downloading output of an Amazon S3 Glacier job.

" + "documentation":"

Provides options for downloading output of an Amazon Glacier job.

" }, "GetJobOutputOutput":{ "type":"structure", @@ -1158,7 +1191,7 @@ }, "contentRange":{ "shape":"string", - "documentation":"

The range of bytes returned by Amazon S3 Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon S3 Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

", + "documentation":"

The range of bytes returned by Amazon Glacier. If only partial output is downloaded, the response provides the range of bytes Amazon Glacier returned. For example, bytes 0-1048575/8388608 returns the first 1 MB from 8 MB.

", "location":"header", "locationName":"Content-Range" }, @@ -1181,7 +1214,7 @@ "locationName":"x-amz-archive-description" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

", + "documentation":"

Contains the Amazon Glacier response to your request.

", "payload":"body" }, "GetVaultAccessPolicyInput":{ @@ -1193,7 +1226,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1226,7 +1259,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1259,7 +1292,7 @@ "documentation":"

The UTC date and time at which the vault lock was put into the InProgress state.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "GetVaultNotificationsInput":{ "type":"structure", @@ -1270,7 +1303,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1291,7 +1324,7 @@ "documentation":"

Returns the notification configuration set on the vault.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

", + "documentation":"

Contains the Amazon Glacier response to your request.

", "payload":"vaultNotificationConfig" }, "GlacierJobDescription":{ @@ -1434,7 +1467,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1449,7 +1482,7 @@ "documentation":"

Provides options for specifying job information.

" } }, - "documentation":"

Provides options for initiating an Amazon S3 Glacier job.

", + "documentation":"

Provides options for initiating an Amazon Glacier job.

", "payload":"jobParameters" }, "InitiateJobOutput":{ @@ -1474,7 +1507,7 @@ "locationName":"x-amz-job-output-path" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "InitiateMultipartUploadInput":{ "type":"structure", @@ -1485,7 +1518,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1508,14 +1541,14 @@ "locationName":"x-amz-part-size" } }, - "documentation":"

Provides options for initiating a multipart upload to an Amazon S3 Glacier vault.

" + "documentation":"

Provides options for initiating a multipart upload to an Amazon Glacier vault.

" }, "InitiateMultipartUploadOutput":{ "type":"structure", "members":{ "location":{ "shape":"string", - "documentation":"

The relative URI path of the multipart upload ID Amazon S3 Glacier created.

", + "documentation":"

The relative URI path of the multipart upload ID Amazon Glacier created.

", "location":"header", "locationName":"Location" }, @@ -1526,7 +1559,7 @@ "locationName":"x-amz-multipart-upload-id" } }, - "documentation":"

The Amazon S3 Glacier response to your request.

" + "documentation":"

The Amazon Glacier response to your request.

" }, "InitiateVaultLockInput":{ "type":"structure", @@ -1565,7 +1598,7 @@ "locationName":"x-amz-lock-id" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "InputSerialization":{ "type":"structure", @@ -1681,7 +1714,7 @@ }, "SNSTopic":{ "shape":"string", - "documentation":"

The Amazon SNS topic ARN to which Amazon S3 Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

" + "documentation":"

The Amazon SNS topic ARN to which Amazon Glacier sends a notification when the job is completed and the output is ready for you to download. The specified topic publishes the notification to its subscribers. The SNS topic must exist.

" }, "RetrievalByteRange":{ "shape":"string", @@ -1735,7 +1768,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1770,7 +1803,7 @@ "locationName":"completed" } }, - "documentation":"

Provides options for retrieving a job list for an Amazon S3 Glacier vault.

" + "documentation":"

Provides options for retrieving a job list for an Amazon Glacier vault.

" }, "ListJobsOutput":{ "type":"structure", @@ -1784,7 +1817,7 @@ "documentation":"

An opaque string used for pagination that specifies the job at which the listing of jobs should begin. You get the marker value from a previous List Jobs response. You only need to include the marker if you are continuing the pagination of the results started in a previous List Jobs request.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "ListMultipartUploadsInput":{ "type":"structure", @@ -1795,7 +1828,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1832,7 +1865,7 @@ "documentation":"

An opaque string that represents where to continue pagination of the results. You use the marker in a new List Multipart Uploads request to obtain more uploads in the list. If there are no more uploads, this value is null.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "ListPartsInput":{ "type":"structure", @@ -1844,7 +1877,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1907,7 +1940,7 @@ "documentation":"

An opaque string that represents where to continue pagination of the results. You use the marker in a new List Parts request to obtain more jobs in the list. If there are no more parts, this value is null.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "ListProvisionedCapacityInput":{ "type":"structure", @@ -1915,7 +1948,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

", + "documentation":"

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" } @@ -1939,7 +1972,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -1960,7 +1993,7 @@ "documentation":"

The tags attached to the vault. Each tag is composed of a key and a value.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "ListVaultsInput":{ "type":"structure", @@ -1999,7 +2032,7 @@ "documentation":"

The vault ARN at which to continue pagination of the results. You use the marker in another List Vaults request to obtain more vaults in the list.

" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "MissingParameterValueException":{ "type":"structure", @@ -2021,6 +2054,16 @@ "error":{"httpStatusCode":400}, "exception":true }, + "NoLongerSupportedException":{ + "type":"structure", + "members":{ + "type":{"shape":"string"}, + "code":{"shape":"string"}, + "message":{"shape":"string"} + }, + "error":{"httpStatusCode":400}, + "exception":true + }, "NotificationEventList":{ "type":"list", "member":{"shape":"string"} @@ -2059,7 +2102,7 @@ }, "SHA256TreeHash":{ "shape":"string", - "documentation":"

The SHA256 tree hash value that Amazon S3 Glacier calculated for the part. This field is never null.

" + "documentation":"

The SHA256 tree hash value that Amazon Glacier calculated for the part. This field is never null.

" } }, "documentation":"

A list of the part sizes of the multipart upload.

" @@ -2122,7 +2165,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

", + "documentation":"

The AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, don't include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" } @@ -2155,7 +2198,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -2185,10 +2228,10 @@ }, "message":{ "shape":"string", - "documentation":"

Returned if, when uploading an archive, Amazon S3 Glacier times out while receiving the upload.

" + "documentation":"

Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

" } }, - "documentation":"

Returned if, when uploading an archive, Amazon S3 Glacier times out while receiving the upload.

", + "documentation":"

Returned if, when uploading an archive, Amazon Glacier times out while receiving the upload.

", "error":{"httpStatusCode":408}, "exception":true }, @@ -2318,7 +2361,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -2345,7 +2388,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -2418,7 +2461,7 @@ }, "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -2478,7 +2521,7 @@ "members":{ "accountId":{ "shape":"string", - "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon S3 Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", + "documentation":"

The AccountId value is the AWS account ID of the account that owns the vault. You can either specify an AWS account ID or optionally a single '-' (hyphen), in which case Amazon Glacier uses the AWS account ID associated with the credentials used to sign the request. If you use an account ID, do not include any hyphens ('-') in the ID.

", "location":"uri", "locationName":"accountId" }, @@ -2502,7 +2545,7 @@ }, "range":{ "shape":"string", - "documentation":"

Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon S3 Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

", + "documentation":"

Identifies the range of bytes in the assembled archive that will be uploaded in this part. Amazon Glacier uses this information to assemble the archive in the proper sequence. The format of this header follows RFC 2616. An example header is Content-Range:bytes 0-4194303/*.

", "location":"header", "locationName":"Content-Range" }, @@ -2519,12 +2562,12 @@ "members":{ "checksum":{ "shape":"string", - "documentation":"

The SHA256 tree hash that Amazon S3 Glacier computed for the uploaded part.

", + "documentation":"

The SHA256 tree hash that Amazon Glacier computed for the uploaded part.

", "location":"header", "locationName":"x-amz-sha256-tree-hash" } }, - "documentation":"

Contains the Amazon S3 Glacier response to your request.

" + "documentation":"

Contains the Amazon Glacier response to your request.

" }, "UploadsList":{ "type":"list", @@ -2563,7 +2606,7 @@ }, "Events":{ "shape":"NotificationEventList", - "documentation":"

A list of one or more events for which Amazon S3 Glacier will send a notification to the specified Amazon SNS topic.

" + "documentation":"

A list of one or more events for which Amazon Glacier will send a notification to the specified Amazon SNS topic.

" } }, "documentation":"

Represents a vault's notification configuration.

" @@ -2578,5 +2621,5 @@ "long":{"type":"long"}, "string":{"type":"string"} }, - "documentation":"

Amazon S3 Glacier (Glacier) is a storage solution for \"cold data.\"

Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Glacier, customers can store their data cost effectively for months, years, or decades. Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Glacier.

If you are a first-time user of Glacier, we recommend that you begin by reading the following sections in the Amazon S3 Glacier Developer Guide:

" + "documentation":"

Amazon Glacier (Glacier) is a storage solution for \"cold data.\"

Glacier is an extremely low-cost storage service that provides secure, durable, and easy-to-use storage for data backup and archival. With Glacier, customers can store their data cost effectively for months, years, or decades. Glacier also enables customers to offload the administrative burdens of operating and scaling storage to AWS, so they don't have to worry about capacity planning, hardware provisioning, data replication, hardware failure and recovery, or time-consuming hardware migrations.

Glacier is a great storage choice when low storage cost is paramount and your data is rarely retrieved. If your application requires fast or frequent access to your data, consider using Amazon S3. For more information, see Amazon Simple Storage Service (Amazon S3).

You can store any kind of data in any format. There is no maximum limit on the total amount of data you can store in Glacier.

If you are a first-time user of Glacier, we recommend that you begin by reading the following sections in the Amazon Glacier Developer Guide:

" } diff --git a/awscli/botocore/data/guardduty/2017-11-28/service-2.json b/awscli/botocore/data/guardduty/2017-11-28/service-2.json index 204f78ce2e6d..fe31e8f22d3e 100644 --- a/awscli/botocore/data/guardduty/2017-11-28/service-2.json +++ b/awscli/botocore/data/guardduty/2017-11-28/service-2.json @@ -1947,6 +1947,17 @@ }, "documentation":"

Contains information about the Autonomous System (AS) associated with the network endpoints involved in an attack sequence.

" }, + "AutoscalingAutoScalingGroup":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that are part of the same Auto Scaling Group.

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the Auto Scaling Group involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, "AwsApiCallAction":{ "type":"structure", "members":{ @@ -2113,6 +2124,17 @@ }, "documentation":"

Contains information on the status of CloudTrail as a data source for the detector.

" }, + "CloudformationStack":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that were created as part of the same CloudFormation stack.

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the CloudFormation stack involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, "ClusterStatus":{ "type":"string", "enum":[ @@ -2192,6 +2214,16 @@ "shape":"Long", "documentation":"

Represents a less than or equal condition to be applied to a single field when querying for findings.

", "locationName":"lessThanOrEqual" + }, + "Matches":{ + "shape":"Matches", + "documentation":"

Represents the match condition to be applied to a single field when querying for findings.

The matches condition is available only for create-filter and update-filter APIs.

", + "locationName":"matches" + }, + "NotMatches":{ + "shape":"NotMatches", + "documentation":"

Represents the not match condition to be applied to a single field when querying for findings.

The not-matches condition is available only for create-filter and update-filter APIs.

", + "locationName":"notMatches" } }, "documentation":"

Contains information about the condition.

" @@ -4251,6 +4283,17 @@ }, "documentation":"

Describes the configuration of scanning EBS volumes as a data source.

" }, + "Ec2Image":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that were launched with the same Amazon Machine Image (AMI).

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the Amazon EC2 Image involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, "Ec2ImageDetails":{ "type":"structure", "members":{ @@ -4320,6 +4363,22 @@ "max":25, "min":0 }, + "Ec2LaunchTemplate":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that share the same Amazon EC2 launch template.

", + "locationName":"ec2InstanceUids" + }, + "Version":{ + "shape":"LaunchTemplateVersion", + "documentation":"

Version of the EC2 launch template.

", + "locationName":"version" + } + }, + "documentation":"

Contains information about the Amazon EC2 launch template involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, "Ec2NetworkInterface":{ "type":"structure", "members":{ @@ -4360,6 +4419,33 @@ "type":"list", "member":{"shape":"String"} }, + "Ec2Vpc":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that were launched within the same Virtual Private Cloud (VPC).

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the Amazon EC2 VPC involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, + "EcsCluster":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"EcsClusterStatus", + "documentation":"

The current status of the Amazon ECS cluster.

", + "locationName":"status" + }, + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the Amazon EC2 instances that serve as container instances in the Amazon ECS cluster.

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the Amazon ECS cluster involved in a GuardDuty finding, including cluster identification and status.

" + }, "EcsClusterDetails":{ "type":"structure", "members":{ @@ -4406,6 +4492,49 @@ }, "documentation":"

Contains information about the details of the ECS Cluster.

" }, + "EcsClusterStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PROVISIONING", + "DEPROVISIONING", + "FAILED", + "INACTIVE" + ] + }, + "EcsLaunchType":{ + "type":"string", + "enum":[ + "FARGATE", + "EC2" + ] + }, + "EcsTask":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp indicating when the Amazon ECS task was created, in UTC format.

", + "locationName":"createdAt" + }, + "TaskDefinitionArn":{ + "shape":"String", + "documentation":"

The ARN of task definition which describes the container and volume definitions of the Amazon ECS task.

", + "locationName":"taskDefinitionArn" + }, + "LaunchType":{ + "shape":"EcsLaunchType", + "documentation":"

The infrastructure type on which the Amazon ECS task runs.

", + "locationName":"launchType" + }, + "ContainerUids":{ + "shape":"ContainerUids", + "documentation":"

A list of unique identifiers for the containers associated with the Amazon ECS task.

", + "locationName":"containerUids" + } + }, + "documentation":"

Contains information about Amazon ECS task involved in a GuardDuty finding, including task definition and container identifiers.

" + }, "EcsTaskDetails":{ "type":"structure", "members":{ @@ -4847,7 +4976,15 @@ "ACCESS_KEY", "EKS_CLUSTER", "KUBERNETES_WORKLOAD", - "CONTAINER" + "CONTAINER", + "ECS_CLUSTER", + "ECS_TASK", + "AUTOSCALING_AUTO_SCALING_GROUP", + "IAM_INSTANCE_PROFILE", + "CLOUDFORMATION_STACK", + "EC2_LAUNCH_TEMPLATE", + "EC2_VPC", + "EC2_IMAGE" ] }, "FindingStatisticType":{ @@ -5702,7 +5839,10 @@ }, "GetRemainingFreeTrialDaysRequest":{ "type":"structure", - "required":["DetectorId"], + "required":[ + "AccountIds", + "DetectorId" + ], "members":{ "DetectorId":{ "shape":"DetectorId", @@ -6090,6 +6230,17 @@ }, "documentation":"

Contains information about the EC2 instance profile.

" }, + "IamInstanceProfileV2":{ + "type":"structure", + "members":{ + "Ec2InstanceUids":{ + "shape":"Ec2InstanceUids", + "documentation":"

A list of unique identifiers for the compromised Amazon EC2 instances that share the same IAM instance profile.

", + "locationName":"ec2InstanceUids" + } + }, + "documentation":"

Contains information about the IAM instance profile involved in a GuardDuty finding, including unique identifiers of the Amazon EC2 instances.

" + }, "ImpersonatedUser":{ "type":"structure", "members":{ @@ -6832,6 +6983,10 @@ }, "documentation":"

Information about the Lambda function involved in the finding.

" }, + "LaunchTemplateVersion":{ + "type":"string", + "max":256 + }, "Lineage":{ "type":"list", "member":{"shape":"LineageObject"} @@ -7859,6 +8014,17 @@ }, "documentation":"

Contains information about the administrator account and invitation.

" }, + "Match":{ + "type":"string", + "max":512, + "min":1 + }, + "Matches":{ + "type":"list", + "member":{"shape":"Match"}, + "max":5, + "min":1 + }, "MaxResults":{ "type":"integer", "max":50, @@ -8295,6 +8461,17 @@ "type":"list", "member":{"shape":"String"} }, + "NotMatch":{ + "type":"string", + "max":512, + "min":1 + }, + "NotMatches":{ + "type":"list", + "member":{"shape":"NotMatch"}, + "max":5, + "min":1 + }, "ObservationTexts":{ "type":"list", "member":{"shape":"String"} @@ -9047,6 +9224,11 @@ "documentation":"

The Amazon Resource Name (ARN) that identifies the database instance involved in the finding.

", "locationName":"dbInstanceArn" }, + "DbiResourceId":{ + "shape":"String", + "documentation":"

The unique ID of the database resource involved in the activity that prompted GuardDuty to generate the finding.

", + "locationName":"dbiResourceId" + }, "Tags":{ "shape":"Tags", "documentation":"

Information about the tag key-value pairs.

", @@ -9368,6 +9550,46 @@ "shape":"ContainerFindingResource", "documentation":"

Contains detailed information about the container associated with the activity that prompted GuardDuty to generate a finding.

", "locationName":"container" + }, + "EcsCluster":{ + "shape":"EcsCluster", + "documentation":"

Contains detailed information about the Amazon ECS cluster associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"ecsCluster" + }, + "EcsTask":{ + "shape":"EcsTask", + "documentation":"

Contains detailed information about the Amazon ECS task associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"ecsTask" + }, + "IamInstanceProfile":{ + "shape":"IamInstanceProfileV2", + "documentation":"

Contains detailed information about the IAM instance profile associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"iamInstanceProfile" + }, + "AutoscalingAutoScalingGroup":{ + "shape":"AutoscalingAutoScalingGroup", + "documentation":"

Contains detailed information about the Auto Scaling Group associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"autoscalingAutoScalingGroup" + }, + "Ec2LaunchTemplate":{ + "shape":"Ec2LaunchTemplate", + "documentation":"

Contains detailed information about the EC2 launch template associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"ec2LaunchTemplate" + }, + "Ec2Vpc":{ + "shape":"Ec2Vpc", + "documentation":"

Contains detailed information about the EC2 VPC associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"ec2Vpc" + }, + "Ec2Image":{ + "shape":"Ec2Image", + "documentation":"

Contains detailed information about the EC2 Image associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"ec2Image" + }, + "CloudformationStack":{ + "shape":"CloudformationStack", + "documentation":"

Contains detailed information about the CloudFormation stack associated with the activity that prompted GuardDuty to generate a finding.

", + "locationName":"cloudformationStack" } }, "documentation":"

Contains information about the Amazon Web Services resource that is associated with the activity that prompted GuardDuty to generate a finding.

" diff --git a/awscli/botocore/data/health/2016-08-04/endpoint-rule-set-1.json b/awscli/botocore/data/health/2016-08-04/endpoint-rule-set-1.json index d818b36f2521..0dc9da01e68d 100644 --- a/awscli/botocore/data/health/2016-08-04/endpoint-rule-set-1.json +++ b/awscli/botocore/data/health/2016-08-04/endpoint-rule-set-1.json @@ -29,6 +29,220 @@ } }, "rules": [ + { + "conditions": [ + { + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + }, + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + }, + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-cn" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-us-gov" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-b" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-e" + ] + } + ] + }, + { + "fn": "not", + "argv": [ + { + "fn": "stringEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "name" + ] + }, + "aws-iso-f" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "endpoint": { + "url": "https://health-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [], + "endpoint": { + "url": "https://health.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [ { diff --git a/awscli/botocore/data/iam/2010-05-08/service-2.json b/awscli/botocore/data/iam/2010-05-08/service-2.json index d7eeb1b33a8f..565157387bf8 100644 --- a/awscli/botocore/data/iam/2010-05-08/service-2.json +++ b/awscli/botocore/data/iam/2010-05-08/service-2.json @@ -3901,7 +3901,11 @@ "ApproverId":{"shape":"arnType"}, "State":{ "shape":"stateType", - "documentation":"

The state of this delegation request.

See the Understanding the Request Lifecycle for an explanation of how these states are transitioned.

" + "documentation":"

The state of this delegation request.

See the Understanding the Request Lifecycle for an explanation of how these states are transitioned.

" + }, + "ExpirationTime":{ + "shape":"dateType", + "documentation":"

The expiry time of this delegation request

See the Understanding the Request Lifecycle for details on the life time of a delegation request at each state.

" }, "RequestorId":{ "shape":"accountIdType", diff --git a/awscli/botocore/data/identitystore/2020-06-15/service-2.json b/awscli/botocore/data/identitystore/2020-06-15/service-2.json index 004e43a892e7..11abb7a10fa6 100644 --- a/awscli/botocore/data/identitystore/2020-06-15/service-2.json +++ b/awscli/botocore/data/identitystore/2020-06-15/service-2.json @@ -420,7 +420,7 @@ "documentation":"

A string representing the type of address. For example, \"Home.\"

" }, "Primary":{ - "shape":"SensitiveBooleanType", + "shape":"BooleanType", "documentation":"

A Boolean value representing whether this is the primary address for the associated resource.

" } }, @@ -480,6 +480,7 @@ "documentation":"

The value of the attribute. This is a Document type. This type is not supported by Java V1, Go V1, and older versions of the CLI.

", "document":true }, + "BooleanType":{"type":"boolean"}, "ConflictException":{ "type":"structure", "members":{ @@ -490,7 +491,7 @@ }, "Reason":{ "shape":"ConflictExceptionReason", - "documentation":"

This request cannot be completed for one of the following reasons:

" + "documentation":"

Indicates the reason for a conflict error when the service is unable to access a Customer Managed KMS key. For non-KMS permission errors, this field is not included.

" } }, "documentation":"

This request cannot be completed for one of the following reasons:

", @@ -648,6 +649,14 @@ "Birthdate":{ "shape":"SensitiveStringType", "documentation":"

The user's birthdate in YYYY-MM-DD format. This field supports standard date format for storing personal information.

" + }, + "Roles":{ + "shape":"Roles", + "documentation":"

A list of Role objects containing roles associated with the user.

" + }, + "Extensions":{ + "shape":"Extensions", + "documentation":"

A map with additional attribute extensions for the user. Each map key corresponds to an extension name, while map values represent extension data in Document type (not supported by Java V1, Go V1 and older versions of the CLI). aws:identitystore:enterprise is the only supported extension name.

" } } }, @@ -865,6 +874,10 @@ "UserId":{ "shape":"ResourceId", "documentation":"

The identifier for a user in the identity store.

" + }, + "Extensions":{ + "shape":"ExtensionNames", + "documentation":"

A collection of extension names indicating what extensions the service should retrieve alongside other user attributes. aws:identitystore:enterprise is the only supported extension name.

" } } }, @@ -955,6 +968,10 @@ "shape":"SensitiveStringType", "documentation":"

The user's birthdate in YYYY-MM-DD format. This field returns the stored birthdate information for the user.

" }, + "Roles":{ + "shape":"Roles", + "documentation":"

The roles of the user.

" + }, "CreatedAt":{ "shape":"DateType", "documentation":"

The date and time the user was created.

" @@ -970,6 +987,10 @@ "UpdatedBy":{ "shape":"StringType", "documentation":"

The identifier of the user or system that last updated the user.

" + }, + "Extensions":{ + "shape":"Extensions", + "documentation":"

A map of explicitly requested attribute extensions associated with the user. Not populated if the user has no requested extensions.

" } } }, @@ -985,7 +1006,7 @@ "documentation":"

A string representing the type of address. For example, \"Work.\"

" }, "Primary":{ - "shape":"SensitiveBooleanType", + "shape":"BooleanType", "documentation":"

A Boolean value representing whether this is the primary email address for the associated resource.

" } }, @@ -998,6 +1019,25 @@ "min":1 }, "ExceptionMessage":{"type":"string"}, + "ExtensionName":{ + "type":"string", + "max":50, + "min":1, + "pattern":"aws:identitystore:[a-z]{1,20}" + }, + "ExtensionNames":{ + "type":"list", + "member":{"shape":"ExtensionName"}, + "max":10, + "min":1 + }, + "Extensions":{ + "type":"map", + "key":{"shape":"ExtensionName"}, + "value":{"shape":"AttributeValue"}, + "max":10, + "min":1 + }, "ExternalId":{ "type":"structure", "required":[ @@ -1277,7 +1317,7 @@ "documentation":"

An object that contains the identifier of a group member. Setting the UserID field to the specific identifier for a user indicates that the user is a member of the group.

" }, "MembershipExists":{ - "shape":"SensitiveBooleanType", + "shape":"BooleanType", "documentation":"

Indicates whether a membership relation exists or not.

" } }, @@ -1478,6 +1518,10 @@ "shape":"IdentityStoreId", "documentation":"

The globally unique identifier for the identity store, such as d-1234567890. In this example, d- is a fixed prefix, and 1234567890 is a randomly generated string that contains numbers and lower case letters. This value is generated at the time that a new identity store is created.

" }, + "Extensions":{ + "shape":"ExtensionNames", + "documentation":"

A collection of extension names indicating what extensions the service should retrieve alongside other user attributes. aws:identitystore:enterprise is the only supported extension name.

" + }, "MaxResults":{ "shape":"MaxResults", "documentation":"

The maximum number of results to be returned per request. This parameter is used in the ListUsers and ListGroups requests to specify how many results to return in one page. The length limit is 50 characters.

", @@ -1574,7 +1618,7 @@ "documentation":"

A string representing the type of a phone number. For example, \"Mobile.\"

" }, "Primary":{ - "shape":"SensitiveBooleanType", + "shape":"BooleanType", "documentation":"

A Boolean value representing whether this is the primary phone number for the associated resource.

" } }, @@ -1603,7 +1647,7 @@ "documentation":"

A human-readable description of the photo for display purposes. This optional field provides context about the photo.

" }, "Primary":{ - "shape":"SensitiveBooleanType", + "shape":"BooleanType", "documentation":"

Specifies whether this is the user's primary photo. Default value is false. Only one photo can be designated as primary per user.

" } }, @@ -1661,13 +1705,34 @@ "GROUP", "USER", "IDENTITY_STORE", - "GROUP_MEMBERSHIP" + "GROUP_MEMBERSHIP", + "RESOURCE_POLICY" ] }, "RetryAfterSeconds":{"type":"integer"}, - "SensitiveBooleanType":{ - "type":"boolean", - "sensitive":true + "Role":{ + "type":"structure", + "members":{ + "Value":{ + "shape":"SensitiveStringType", + "documentation":"

A string containing a role name. For example, \"Researcher.\"

" + }, + "Type":{ + "shape":"SensitiveStringType", + "documentation":"

A string representing the type of role. For example, \"Work.\"

" + }, + "Primary":{ + "shape":"BooleanType", + "documentation":"

A Boolean value representing whether this is the primary role for the associated resource.

" + } + }, + "documentation":"

The role associated with the user.

" + }, + "Roles":{ + "type":"list", + "member":{"shape":"Role"}, + "max":1, + "min":1 }, "SensitiveStringType":{ "type":"string", @@ -1871,6 +1936,10 @@ "shape":"SensitiveStringType", "documentation":"

The user's birthdate in YYYY-MM-DD format. This field stores personal birthdate information for the user.

" }, + "Roles":{ + "shape":"Roles", + "documentation":"

A list of Role objects containing roles associated with the user.

" + }, "CreatedAt":{ "shape":"DateType", "documentation":"

The date and time the user was created.

" @@ -1886,6 +1955,10 @@ "UpdatedBy":{ "shape":"StringType", "documentation":"

The identifier of the user or system that last updated the user.

" + }, + "Extensions":{ + "shape":"Extensions", + "documentation":"

A map of explicitly requested attribute extensions associated with the user. Not populated if the user has no requested extensions.

" } }, "documentation":"

A user object that contains the metadata and attributes for a specified user.

" diff --git a/awscli/botocore/data/inspector-scan/2023-08-08/service-2.json b/awscli/botocore/data/inspector-scan/2023-08-08/service-2.json index c1cfffd58ad3..dc0d26a9f4ac 100644 --- a/awscli/botocore/data/inspector-scan/2023-08-08/service-2.json +++ b/awscli/botocore/data/inspector-scan/2023-08-08/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2023-08-08", + "auth":["aws.auth#sigv4"], "endpointPrefix":"inspector-scan", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"inspector-scan", "serviceFullName":"Inspector Scan", "serviceId":"Inspector Scan", @@ -28,7 +29,7 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Scans a provided CycloneDX 1.5 SBOM and reports on any vulnerabilities discovered in that SBOM. You can generate compatible SBOMs for your resources using the Amazon Inspector SBOM generator.

", + "documentation":"

Scans a provided CycloneDX 1.5 SBOM and reports on any vulnerabilities discovered in that SBOM. You can generate compatible SBOMs for your resources using the Amazon Inspector SBOM generator.

The output of this action reports NVD and CVSS scores when NVD and CVSS scores are available. Because the output reports both scores, you might notice a discrepency between them. However, you can triage the severity of either score depending on the vendor of your choosing.

", "idempotent":true } }, @@ -86,13 +87,13 @@ "type":"string", "enum":[ "CYCLONE_DX_1_5", - "INSPECTOR" + "INSPECTOR", + "INSPECTOR_ALT" ] }, "Sbom":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "ScanSbomRequest":{ @@ -101,7 +102,7 @@ "members":{ "sbom":{ "shape":"Sbom", - "documentation":"

The JSON file for the SBOM you want to scan. The SBOM must be in CycloneDX 1.5 format.

" + "documentation":"

The JSON file for the SBOM you want to scan. The SBOM must be in CycloneDX 1.5 format. This format limits you to passing 2000 components before throwing a ValidException error.

" }, "outputFormat":{ "shape":"OutputFormat", diff --git a/awscli/botocore/data/inspector2/2020-06-08/service-2.json b/awscli/botocore/data/inspector2/2020-06-08/service-2.json index db9a20ee021b..86fd64ed5ff8 100644 --- a/awscli/botocore/data/inspector2/2020-06-08/service-2.json +++ b/awscli/botocore/data/inspector2/2020-06-08/service-2.json @@ -8995,7 +8995,7 @@ }, "reason":{ "shape":"ScanStatusReason", - "documentation":"

The scan status. Possible return values and descriptions are:

ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy.

ACCESS_DENIED_TO_ENCRYPTION_KEY - The KMS key policy doesn't allow Amazon Inspector access. Update the key policy.

DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes.

DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance.

DEEP_INSPECTION_NO_INVENTORY - The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance.

DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account.

EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it's in a stopped state.

EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag.

IMAGE_SIZE_EXCEEDED - Reserved for future use.

INTEGRATION_CONNNECTION_LOST - Amazon Inspector couldn't communicate with the source code management platform.

INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user.

NO_INVENTORY - Amazon Inspector couldn't find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance's inventory in the SSM Fleet Manager console.

NO_RESOURCES_FOUND - Reserved for future use.

NO_SCAN_CONFIGURATION_ASSOCIATED - The code repository resource doesn't have an associated scan configuration.

PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status.

PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon.

RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up.

SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image.

SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration.

SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration.

SCAN_IN_PROGRESS - The resource is currently being scanned.

STALE_INVENTORY - Amazon Inspector wasn't able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console.

SUCCESSFUL - The scan was successful.

UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance.

UNSUPPORTED_CONFIG_FILE - Reserved for future use.

UNSUPPORTED_LANGUAGE - The scan was unsuccessful because the repository contains files in an unsupported programming language.

UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type.

UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

IMAGE_ARCHIVED - This image has been archived in Amazon ECR and is no longer available for scanning in Amazon Inspector.

" + "documentation":"

The scan status. Possible return values and descriptions are:

ACCESS_DENIED - Resource access policy restricting Amazon Inspector access. Please update the IAM policy.

ACCESS_DENIED_TO_ENCRYPTION_KEY - The KMS key policy doesn't allow Amazon Inspector access. Update the key policy.

DEEP_INSPECTION_COLLECTION_TIME_LIMIT_EXCEEDED - Amazon Inspector failed to extract the package inventory because the package collection time exceeding the maximum threshold of 15 minutes.

DEEP_INSPECTION_DAILY_SSM_INVENTORY_LIMIT_EXCEEDED - The SSM agent couldn't send inventory to Amazon Inspector because the SSM quota for Inventory data collected per instance per day has already been reached for this instance.

DEEP_INSPECTION_NO_INVENTORY - The Amazon Inspector plugin hasn't yet been able to collect an inventory of packages for this instance. This is usually the result of a pending scan, however, if this status persists after 6 hours, use SSM to ensure that the required Amazon Inspector associations exist and are running for the instance.

DEEP_INSPECTION_PACKAGE_COLLECTION_LIMIT_EXCEEDED - The instance has exceeded the 5000 package limit for Amazon Inspector Deep inspection. To resume Deep inspection for this instance you can try to adjust the custom paths associated with the account.

EC2_INSTANCE_STOPPED - This EC2 instance is in a stopped state, therefore, Amazon Inspector will pause scanning. The existing findings will continue to exist until the instance is terminated. Once the instance is re-started, Inspector will automatically start scanning the instance again. Please note that you will not be charged for this instance while it's in a stopped state.

EXCLUDED_BY_TAG - This resource was not scanned because it has been excluded by a tag.

IMAGE_SIZE_EXCEEDED - Reserved for future use.

INTEGRATION_CONNNECTION_LOST - Amazon Inspector couldn't communicate with the source code management platform.

INTERNAL_ERROR - Amazon Inspector has encountered an internal error for this resource. Amazon Inspector service will automatically resolve the issue and resume the scanning. No action required from the user.

NO_INVENTORY - Amazon Inspector couldn't find software application inventory to scan for vulnerabilities. This might be caused due to required Amazon Inspector associations being deleted or failing to run on your resource. Please verify the status of InspectorInventoryCollection-do-not-delete association in the SSM console for the resource. Additionally, you can verify the instance's inventory in the SSM Fleet Manager console.

NO_RESOURCES_FOUND - Reserved for future use.

NO_SCAN_CONFIGURATION_ASSOCIATED - The code repository resource doesn't have an associated scan configuration.

PENDING_DISABLE - This resource is pending cleanup during disablement. The customer will not be billed while a resource is in the pending disable status.

PENDING_INITIAL_SCAN - This resource has been identified for scanning, results will be available soon.

RESOURCE_TERMINATED - This resource has been terminated. The findings and coverage associated with this resource are in the process of being cleaned up.

SCAN_ELIGIBILITY_EXPIRED - The configured scan duration has lapsed for this image.

SCAN_FREQUENCY_MANUAL - This image will not be covered by Amazon Inspector due to the repository scan frequency configuration.

SCAN_FREQUENCY_SCAN_ON_PUSH - This image will be scanned one time and will not new findings because of the scan frequency configuration.

SCAN_IN_PROGRESS - The resource is currently being scanned.

STALE_INVENTORY - Amazon Inspector wasn't able to collect an updated software application inventory in the last 7 days. Please confirm the required Amazon Inspector associations still exist and you can still see an updated inventory in the SSM console.

SUCCESSFUL - The scan was successful.

UNMANAGED_EC2_INSTANCE - The EC2 instance is not managed by SSM, please use the following SSM automation to remediate the issue: https://docs.aws.amazon.com/systems-manager-automation-runbooks/latest/userguide/automation-awssupport-troubleshoot-managed-instance.html. Once the instance becomes managed by SSM, Inspector will automatically begin scanning this instance.

UNSUPPORTED_CODE_ARTIFACTS - The function was not scanned because it has an unsupported code artifacts.

UNSUPPORTED_CONFIG_FILE - Reserved for future use.

UNSUPPORTED_LANGUAGE - The scan was unsuccessful because the repository contains files in an unsupported programming language.

UNSUPPORTED_MEDIA_TYPE - The ECR image has an unsupported media type.

UNSUPPORTED_OS - Amazon Inspector does not support this OS, architecture, or image manifest type at this time. To see a complete list of supported operating systems see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

UNSUPPORTED_RUNTIME - The function was not scanned because it has an unsupported runtime. To see a complete list of supported runtimes see: https://docs.aws.amazon.com/inspector/latest/user/supported.html.

IMAGE_ARCHIVED - This image has been archived in Amazon ECR and is no longer available for scanning in Amazon Inspector.

" } }, "documentation":"

The status of the scan.

" @@ -9042,7 +9042,8 @@ "UNSUPPORTED_LANGUAGE", "NO_SCAN_CONFIGURATION_ASSOCIATED", "SCAN_IN_PROGRESS", - "IMAGE_ARCHIVED" + "IMAGE_ARCHIVED", + "UNSUPPORTED_CODE_ARTIFACTS" ] }, "ScanType":{ diff --git a/awscli/botocore/data/iot/2015-05-28/service-2.json b/awscli/botocore/data/iot/2015-05-28/service-2.json index ffee5f7d79ed..aa85d09f3407 100644 --- a/awscli/botocore/data/iot/2015-05-28/service-2.json +++ b/awscli/botocore/data/iot/2015-05-28/service-2.json @@ -1788,7 +1788,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Retrieves the encryption configuration for resources and data of your Amazon Web Services account in Amazon Web Services IoT Core. For more information, see Key management in IoT from the Amazon Web Services IoT Core Developer Guide.

" + "documentation":"

Retrieves the encryption configuration for resources and data of your Amazon Web Services account in Amazon Web Services IoT Core. For more information, see Data encryption at rest in the Amazon Web Services IoT Core Developer Guide.

" }, "DescribeEndpoint":{ "name":"DescribeEndpoint", @@ -4097,7 +4097,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Transfers the specified certificate to the specified Amazon Web Services account.

Requires permission to access the TransferCertificate action.

You can cancel the transfer until it is acknowledged by the recipient.

No notification is sent to the transfer destination's account. It's up to the caller to notify the transfer target.

The certificate being transferred must not be in the ACTIVE state. You can use the UpdateCertificate action to deactivate it.

The certificate must not have any policies attached to it. You can use the DetachPolicy action to detach them.

Customer managed key behavior: When you use a customer managed key to secure your data and then transfer the key to a customer in a different account using the TransferCertificate operation, the certificates will no longer be protected by their customer managed key configuration. During the transfer process, certificates are encrypted using IoT owned keys.

While a certificate is in the PENDING_TRANSFER state, it's always protected by IoT owned keys, regardless of the customer managed key configuration of either the source or destination account.

Once the transfer is completed through AcceptCertificateTransfer, RejectCertificateTransfer, or CancelCertificateTransfer, the certificate will be protected by the customer managed key configuration of the account that owns the certificate after the transfer operation:

" + "documentation":"

Transfers the specified certificate to the specified Amazon Web Services account.

Requires permission to access the TransferCertificate action.

You can cancel the transfer until it is accepted by the recipient.

No notification is sent to the transfer destination's account. The caller is responsible for notifying the transfer target.

The certificate being transferred must not be in the ACTIVE state. You can use the UpdateCertificate action to deactivate it.

The certificate must not have any policies attached to it. You can use the DetachPolicy action to detach them.

Customer managed key behavior: When you use a customer managed key to encrypt your data and then transfer the certificate to a customer in a different account using the TransferCertificate operation, the certificates will no longer be encrypted by their customer managed key configuration. During the transfer process, certificates are encrypted using Amazon Web Services IoT Core owned keys.

While a certificate is in the PENDING_TRANSFER state, it's always protected by Amazon Web Services IoT Core owned keys, regardless of the customer managed key configuration of either the source or destination account.

Once the transfer is completed through AcceptCertificateTransfer, RejectCertificateTransfer, or CancelCertificateTransfer, the certificate will be protected by the customer managed key configuration of the account that owns the certificate after the transfer operation:

" }, "UntagResource":{ "name":"UntagResource", @@ -4336,7 +4336,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Updates the encryption configuration. By default, all Amazon Web Services IoT Core data at rest is encrypted using Amazon Web Services owned keys. Amazon Web Services IoT Core also supports symmetric customer managed keys from Amazon Web Services Key Management Service (KMS). With customer managed keys, you create, own, and manage the KMS keys in your Amazon Web Services account. For more information, see Data encryption in the Amazon Web Services IoT Core Developer Guide.

" + "documentation":"

Updates the encryption configuration. By default, Amazon Web Services IoT Core encrypts your data at rest using Amazon Web Services owned keys. Amazon Web Services IoT Core also supports symmetric customer managed keys from Key Management Service (KMS). With customer managed keys, you create, own, and manage the KMS keys in your Amazon Web Services account.

Before using this API, you must set up permissions for Amazon Web Services IoT Core to access KMS. For more information, see Data encryption at rest in the Amazon Web Services IoT Core Developer Guide.

" }, "UpdateEventConfigurations":{ "name":"UpdateEventConfigurations", @@ -6096,6 +6096,35 @@ "documentation":"

Specifies the amount of time each device has to finish its execution of the job. A timer is started when the job execution status is set to IN_PROGRESS. If the job execution status is not set to another terminal state before the timer expires, it will be automatically set to TIMED_OUT.

" }, "AwsJobTimeoutInProgressTimeoutInMinutes":{"type":"long"}, + "AwsJsonSubstitutionCommandPreprocessorConfig":{ + "type":"structure", + "required":["outputFormat"], + "members":{ + "outputFormat":{ + "shape":"OutputFormat", + "documentation":"

Converts the command preprocessor result to the format defined by this parameter, before sending it to the device.

" + } + }, + "documentation":"

Configures the command to treat the payloadTemplate as a JSON document for preprocessing. This preprocessor substitutes placeholders with parameter values to generate the command execution request payload.

" + }, + "BatchConfig":{ + "type":"structure", + "members":{ + "maxBatchOpenMs":{ + "shape":"MaxBatchOpenMs", + "documentation":"

The maximum amount of time (in milliseconds) that an outgoing call waits for other calls with which it batches messages of the same type. The higher the setting, the longer the latency of the batched HTTP Action will be.

" + }, + "maxBatchSize":{ + "shape":"MaxBatchSize", + "documentation":"

The maximum number of messages that are batched together in a single action execution.

" + }, + "maxBatchSizeBytes":{ + "shape":"MaxBatchSizeBytes", + "documentation":"

Maximum size of a message batch, in bytes.

" + } + }, + "documentation":"

Configuration settings for batching.

" + }, "BatchMode":{"type":"boolean"}, "BeforeSubstitutionFlag":{"type":"boolean"}, "Behavior":{ @@ -7089,14 +7118,22 @@ "shape":"CommandParameterName", "documentation":"

The name of a specific parameter used in a command and command execution.

" }, + "type":{ + "shape":"CommandParameterType", + "documentation":"

The type of the command parameter.

" + }, "value":{ "shape":"CommandParameterValue", - "documentation":"

The value used to describe the command. When you assign a value to a parameter, it will override any default value that you had already specified.

" + "documentation":"

Parameter value that overrides the default value, if set.

" }, "defaultValue":{ "shape":"CommandParameterValue", "documentation":"

The default value used to describe the command. This is the value assumed by the parameter if no other value is assigned to it.

" }, + "valueConditions":{ + "shape":"CommandParameterValueConditionList", + "documentation":"

The list of conditions that a command parameter value must satisfy to create a command execution.

" + }, "description":{ "shape":"CommandParameterDescription", "documentation":"

The description of the command parameter.

" @@ -7120,6 +7157,18 @@ "min":1, "pattern":"^[.$a-zA-Z0-9_-]+$" }, + "CommandParameterType":{ + "type":"string", + "enum":[ + "STRING", + "INTEGER", + "DOUBLE", + "LONG", + "UNSIGNEDLONG", + "BOOLEAN", + "BINARY" + ] + }, "CommandParameterValue":{ "type":"structure", "members":{ @@ -7152,7 +7201,95 @@ "documentation":"

An attribute of type unsigned long.

" } }, - "documentation":"

The range of possible values that's used to describe a specific command parameter.

The commandParameterValue can only have one of the below fields listed.

" + "documentation":"

The value of a command parameter used to create a command execution.

The commandParameterValue can only have one of the below fields listed.

" + }, + "CommandParameterValueComparisonOperand":{ + "type":"structure", + "members":{ + "number":{ + "shape":"StringParameterValue", + "documentation":"

An operand of number value type, defined as a string.

" + }, + "numbers":{ + "shape":"CommandParameterValueStringList", + "documentation":"

A List of operands of numerical value type, defined as strings.

" + }, + "string":{ + "shape":"StringParameterValue", + "documentation":"

An operand of string value type.

" + }, + "strings":{ + "shape":"CommandParameterValueStringList", + "documentation":"

A List of operands of string value type.

" + }, + "numberRange":{ + "shape":"CommandParameterValueNumberRange", + "documentation":"

An operand of numerical range value type.

" + } + }, + "documentation":"

The comparison operand used to compare the defined value against the value supplied in request.

" + }, + "CommandParameterValueComparisonOperator":{ + "type":"string", + "enum":[ + "EQUALS", + "NOT_EQUALS", + "LESS_THAN", + "LESS_THAN_EQUALS", + "GREATER_THAN", + "GREATER_THAN_EQUALS", + "IN_SET", + "NOT_IN_SET", + "IN_RANGE", + "NOT_IN_RANGE" + ] + }, + "CommandParameterValueCondition":{ + "type":"structure", + "required":[ + "comparisonOperator", + "operand" + ], + "members":{ + "comparisonOperator":{ + "shape":"CommandParameterValueComparisonOperator", + "documentation":"

The comparison operator for the command parameter.

IN_RANGE, and NOT_IN_RANGE operators include boundary values.

" + }, + "operand":{ + "shape":"CommandParameterValueComparisonOperand", + "documentation":"

The comparison operand for the command parameter.

" + } + }, + "documentation":"

A condition for the command parameter that must be evaluated to true for successful creation of a command execution.

" + }, + "CommandParameterValueConditionList":{ + "type":"list", + "member":{"shape":"CommandParameterValueCondition"}, + "min":1 + }, + "CommandParameterValueNumberRange":{ + "type":"structure", + "required":[ + "min", + "max" + ], + "members":{ + "min":{ + "shape":"StringParameterValue", + "documentation":"

The minimum value of a numerical range of a command parameter value.

" + }, + "max":{ + "shape":"StringParameterValue", + "documentation":"

The maximum value of a numerical range of a command parameter value.

" + } + }, + "documentation":"

The numerical range value type to compare a command parameter value against.

" + }, + "CommandParameterValueStringList":{ + "type":"list", + "member":{"shape":"StringParameterValue"}, + "max":10, + "min":1 }, "CommandPayload":{ "type":"structure", @@ -7169,6 +7306,20 @@ "documentation":"

The command payload object that contains the instructions for the device to process.

" }, "CommandPayloadBlob":{"type":"blob"}, + "CommandPayloadTemplateString":{ + "type":"string", + "max":32768 + }, + "CommandPreprocessor":{ + "type":"structure", + "members":{ + "awsJsonSubstitution":{ + "shape":"AwsJsonSubstitutionCommandPreprocessorConfig", + "documentation":"

Configuration for the JSON substitution preprocessor.

" + } + }, + "documentation":"

Configuration that determines how the payloadTemplate is processed by the service to generate the final payload sent to devices at StartCommandExecution API invocation.

" + }, "CommandSummary":{ "type":"structure", "members":{ @@ -7263,7 +7414,7 @@ "members":{ "configurationStatus":{ "shape":"ConfigurationStatus", - "documentation":"

The health status of KMS key and KMS access role. If either KMS key or KMS access role is UNHEALTHY, the return value will be UNHEALTHY. To use a customer-managed KMS key, the value of configurationStatus must be HEALTHY.

" + "documentation":"

The health status of KMS key and KMS access role. If either KMS key or KMS access role is UNHEALTHY, the return value will be UNHEALTHY. To use a customer managed KMS key, the value of configurationStatus must be HEALTHY.

" }, "errorCode":{ "shape":"ErrorCode", @@ -7274,7 +7425,7 @@ "documentation":"

The detailed error message that corresponds to the errorCode.

" } }, - "documentation":"

The encryption configuration details that include the status information of the Amazon Web Services Key Management Service (KMS) key and the KMS access role.

" + "documentation":"

The encryption configuration details that include the status information of the Key Management Service (KMS) key and the KMS access role.

" }, "ConfigurationStatus":{ "type":"string", @@ -7594,15 +7745,23 @@ }, "payload":{ "shape":"CommandPayload", - "documentation":"

The payload object for the command. You must specify this information when using the AWS-IoT namespace.

You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type.

" + "documentation":"

The payload object for the static command.

You can upload a static payload file from your local storage that contains the instructions for the device to process. The payload file can use any format. To make sure that the device correctly interprets the payload, we recommend you to specify the payload content type.

" + }, + "payloadTemplate":{ + "shape":"CommandPayloadTemplateString", + "documentation":"

The payload template for the dynamic command.

This parameter is required for dynamic commands where the command execution placeholders are supplied either from mandatoryParameters or when StartCommandExecution is invoked.

" + }, + "preprocessor":{ + "shape":"CommandPreprocessor", + "documentation":"

Configuration that determines how payloadTemplate is processed to generate command execution payload.

This parameter is required for dynamic commands, along with payloadTemplate, and mandatoryParameters.

" }, "mandatoryParameters":{ "shape":"CommandParameterList", - "documentation":"

A list of parameters that are required by the StartCommandExecution API. These parameters need to be specified only when using the AWS-IoT-FleetWise namespace. You can either specify them here or when running the command using the StartCommandExecution API.

" + "documentation":"

A list of parameters that are used by StartCommandExecution API for execution payload generation.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not required when you use the AWS-IoT namespace.

" + "documentation":"

The IAM role that you must provide when using the AWS-IoT-FleetWise namespace. The role grants IoT Device Management the permission to access IoT FleetWise resources for generating the payload for the command. This field is not supported when you use the AWS-IoT namespace.

" }, "tags":{ "shape":"TagList", @@ -10299,15 +10458,15 @@ "members":{ "encryptionType":{ "shape":"EncryptionType", - "documentation":"

The type of the Amazon Web Services Key Management Service (KMS) key.

" + "documentation":"

The type of the KMS key.

" }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role assumed by Amazon Web Services IoT Core to call KMS on behalf of the customer.

" + "documentation":"

The ARN of the customer managed KMS key.

" }, "kmsAccessRoleArn":{ "shape":"KmsAccessRoleArn", - "documentation":"

The ARN of the customer-managed KMS key.

" + "documentation":"

The Amazon Resource Name (ARN) of the IAM role assumed by Amazon Web Services IoT Core to call KMS on behalf of the customer.

" }, "configurationDetails":{ "shape":"ConfigurationDetails", @@ -11781,6 +11940,7 @@ "ElasticsearchId":{"type":"string"}, "ElasticsearchIndex":{"type":"string"}, "ElasticsearchType":{"type":"string"}, + "EnableBatching":{"type":"boolean"}, "EnableCachingForHttp":{"type":"boolean"}, "EnableIoTLoggingParams":{ "type":"structure", @@ -12358,6 +12518,14 @@ "shape":"CommandPayload", "documentation":"

The payload object that you provided for the command.

" }, + "payloadTemplate":{ + "shape":"CommandPayloadTemplateString", + "documentation":"

The payload template for the dynamic command.

" + }, + "preprocessor":{ + "shape":"CommandPreprocessor", + "documentation":"

Configuration that determines how payloadTemplate is processed to generate command execution payload.

" + }, "roleArn":{ "shape":"RoleArn", "documentation":"

The IAM role that you provided when creating the command with AWS-IoT-FleetWise as the namespace.

" @@ -12895,7 +13063,14 @@ }, "GetV2LoggingOptionsRequest":{ "type":"structure", - "members":{} + "members":{ + "verbose":{ + "shape":"VerboseFlag", + "documentation":"

The flag is used to get all the event types and their respective configuration that event-based logging supports.

", + "location":"querystring", + "locationName":"verbose" + } + } }, "GetV2LoggingOptionsResponse":{ "type":"structure", @@ -12911,6 +13086,10 @@ "disableAllLogs":{ "shape":"DisableAllLogs", "documentation":"

Disables all logs.

" + }, + "eventConfigurations":{ + "shape":"LogEventConfigurations", + "documentation":"

The list of event configurations that override account-level logging.

" } } }, @@ -12962,6 +13141,14 @@ "auth":{ "shape":"HttpAuthorization", "documentation":"

The authentication method to use when sending data to an HTTPS endpoint.

" + }, + "enableBatching":{ + "shape":"EnableBatching", + "documentation":"

Whether to process the HTTP action messages into a single request. Value can be true or false.

" + }, + "batchConfig":{ + "shape":"BatchConfig", + "documentation":"

The configuration settings for batching. For more information, see Batching HTTP action messages.

" } }, "documentation":"

Send data to an HTTPS endpoint.

" @@ -16722,6 +16909,40 @@ }, "documentation":"

Describes how to interpret an application-defined timestamp value from an MQTT message payload and the precision of that value.

" }, + "LogDestination":{ + "type":"string", + "max":512, + "min":1, + "pattern":"^[.\\-_/#A-Za-z0-9]+$" + }, + "LogEventConfiguration":{ + "type":"structure", + "required":["eventType"], + "members":{ + "eventType":{ + "shape":"LogEventType", + "documentation":"

The type of event to log. These include event types like Connect, Publish, and Disconnect.

" + }, + "logLevel":{ + "shape":"LogLevel", + "documentation":"

The logging level for the specified event type. Determines the verbosity of log messages generated for this event type.

" + }, + "logDestination":{ + "shape":"LogDestination", + "documentation":"

CloudWatch Log Group for event-based logging. Specifies where log events should be sent. The log destination for event-based logging overrides default Log Group for the specified event type and applies to all resources associated with that event.

" + } + }, + "documentation":"

Configuration for event-based logging that specifies which event types to log and their logging settings. Used for account-level logging overrides.

" + }, + "LogEventConfigurations":{ + "type":"list", + "member":{"shape":"LogEventConfiguration"} + }, + "LogEventType":{ + "type":"string", + "max":512, + "min":1 + }, "LogGroupName":{"type":"string"}, "LogLevel":{ "type":"string", @@ -16882,6 +17103,21 @@ "max":1024, "pattern":"[A-Za-z0-9+/]+={0,2}" }, + "MaxBatchOpenMs":{ + "type":"integer", + "max":200, + "min":5 + }, + "MaxBatchSize":{ + "type":"integer", + "max":10, + "min":2 + }, + "MaxBatchSizeBytes":{ + "type":"integer", + "max":131072, + "min":100 + }, "MaxBuckets":{ "type":"integer", "max":10000, @@ -17531,6 +17767,13 @@ "type":"list", "member":{"shape":"OutgoingCertificate"} }, + "OutputFormat":{ + "type":"string", + "enum":[ + "JSON", + "CBOR" + ] + }, "OverrideDynamicGroups":{"type":"boolean"}, "PackageArn":{"type":"string"}, "PackageCatalogMaxResults":{ @@ -19276,6 +19519,10 @@ "disableAllLogs":{ "shape":"DisableAllLogs", "documentation":"

If true all logs are disabled. The default is false.

" + }, + "eventConfigurations":{ + "shape":"LogEventConfigurations", + "documentation":"

The list of event configurations that override account-level logging.

" } } }, @@ -20069,7 +20316,7 @@ "members":{ "principal":{ "shape":"Principal", - "documentation":"

The principal. Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId), thingGroupArn (arn:aws:iot:region:accountId:thinggroup/groupName) and CognitoId (region:id).

" + "documentation":"

The principal. Valid principals are CertificateArn (arn:aws:iot:region:accountId:cert/certificateId) and CognitoId (region:id).

" }, "cognitoIdentityPoolId":{ "shape":"CognitoIdentityPoolId", @@ -21533,11 +21780,11 @@ "members":{ "encryptionType":{ "shape":"EncryptionType", - "documentation":"

The type of the Amazon Web Services Key Management Service (KMS) key.

" + "documentation":"

The type of the KMS key.

" }, "kmsKeyArn":{ "shape":"KmsKeyArn", - "documentation":"

The ARN of the customer-managed KMS key.

" + "documentation":"

The ARN of the customer managedKMS key.

" }, "kmsAccessRoleArn":{ "shape":"KmsAccessRoleArn", @@ -22305,6 +22552,7 @@ "pattern":"[\\s\\S]*" }, "Variance":{"type":"double"}, + "VerboseFlag":{"type":"boolean"}, "VerificationState":{ "type":"string", "enum":[ diff --git a/awscli/botocore/data/ivs-realtime/2020-07-14/service-2.json b/awscli/botocore/data/ivs-realtime/2020-07-14/service-2.json index 5ba7b327f042..337178bdcee7 100644 --- a/awscli/botocore/data/ivs-realtime/2020-07-14/service-2.json +++ b/awscli/botocore/data/ivs-realtime/2020-07-14/service-2.json @@ -1535,6 +1535,14 @@ "replica":{ "shape":"Replica", "documentation":"

If true, this indicates the participantId is a replicated participant. If this is a subscribe event, then this flag refers to remoteParticipantId. Default: false.

" + }, + "previousToken":{ + "shape":"ExchangedParticipantToken", + "documentation":"

Source participant token for TOKEN_EXCHANGED event.

" + }, + "newToken":{ + "shape":"ExchangedParticipantToken", + "documentation":"

Participant token created during TOKEN_EXCHANGED event.

" } }, "documentation":"

An occurrence during a stage session.

" @@ -1575,9 +1583,32 @@ "SUBSCRIBE_ERROR", "JOIN_ERROR", "REPLICATION_STARTED", - "REPLICATION_STOPPED" + "REPLICATION_STOPPED", + "TOKEN_EXCHANGED" ] }, + "ExchangedParticipantToken":{ + "type":"structure", + "members":{ + "capabilities":{ + "shape":"ParticipantTokenCapabilities", + "documentation":"

Set of capabilities that the user is allowed to perform in the stage.

" + }, + "attributes":{ + "shape":"ParticipantTokenAttributes", + "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "userId":{ + "shape":"ParticipantTokenUserId", + "documentation":"

Customer-assigned name to help identify the token; this can be used to link a participant to a user in the customer’s own systems. This can be any UTF-8 encoded text. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + }, + "expirationTime":{ + "shape":"ParticipantTokenExpirationTime", + "documentation":"

ISO 8601 timestamp (returned as a string) for when this token expires.

" + } + }, + "documentation":"

Object specifying an exchanged participant token in a stage, created when an original participant token is updated.

Important: Treat tokens as opaque; i.e., do not build functionality based on token contents. The format of tokens could change in the future.

" + }, "Framerate":{ "type":"float", "box":true, @@ -2780,7 +2811,7 @@ }, "attributes":{ "shape":"ParticipantTokenAttributes", - "documentation":"

Application-provided attributes to encode into the token and attach to a stage. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" + "documentation":"

Application-provided attributes to encode into the token and attach to a stage. Map keys and values can contain UTF-8 encoded text. The maximum length of this field is 1 KB total. This field is exposed to all stage participants and should not be used for personally identifying, confidential, or sensitive information.

" }, "duration":{ "shape":"ParticipantTokenDurationMinutes", diff --git a/awscli/botocore/data/kafkaconnect/2021-09-14/endpoint-rule-set-1.json b/awscli/botocore/data/kafkaconnect/2021-09-14/endpoint-rule-set-1.json index af06d15fb158..9a6bf3aa37c2 100644 --- a/awscli/botocore/data/kafkaconnect/2021-09-14/endpoint-rule-set-1.json +++ b/awscli/botocore/data/kafkaconnect/2021-09-14/endpoint-rule-set-1.json @@ -1,12 +1,6 @@ { "version": "1.0", "parameters": { - "Region": { - "builtIn": "AWS::Region", - "required": false, - "documentation": "The AWS region used to dispatch the request.", - "type": "string" - }, "UseDualStack": { "builtIn": "AWS::UseDualStack", "required": true, @@ -26,6 +20,12 @@ "required": false, "documentation": "Override the endpoint used to send this request", "type": "string" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "string" } }, "rules": [ @@ -57,258 +57,374 @@ "type": "error" }, { - "conditions": [ + "conditions": [], + "rules": [ { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" }, - true - ] + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", - "type": "error" - }, - { - "conditions": [], - "endpoint": { - "url": { - "ref": "Endpoint" - }, - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "type": "tree" } ], "type": "tree" }, { - "conditions": [ - { - "fn": "isSet", - "argv": [ - { - "ref": "Region" - } - ] - } - ], + "conditions": [], "rules": [ { "conditions": [ { - "fn": "aws.partition", + "fn": "isSet", "argv": [ { "ref": "Region" } - ], - "assign": "PartitionResult" + ] } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", - "argv": [ - { - "ref": "UseFIPS" - }, - true - ] - }, - { - "fn": "booleanEquals", + "fn": "aws.partition", "argv": [ { - "ref": "UseDualStack" - }, - true - ] + "ref": "Region" + } + ], + "assign": "PartitionResult" } ], "rules": [ { "conditions": [ { - "fn": "booleanEquals", + "fn": "stringEquals", "argv": [ - true, { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsFIPS" + "name" ] - } + }, + "aws-us-gov" + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true ] }, { "fn": "booleanEquals", "argv": [ - true, + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + }, + { + "conditions": [ + { + "fn": "stringEquals", + "argv": [ { "fn": "getAttr", "argv": [ { "ref": "PartitionResult" }, - "supportsDualStack" + "name" ] - } + }, + "aws-us-gov" ] - } - ], - "rules": [ + }, { - "conditions": [], - "endpoint": { - "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] } ], - "type": "tree" + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" }, { - "conditions": [], - "error": "FIPS and DualStack are enabled, but this partition does not support one or both", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseFIPS" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ { - "fn": "getAttr", - "argv": [ - { - "ref": "PartitionResult" - }, - "supportsFIPS" - ] + "ref": "UseDualStack" }, true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" } ], "type": "tree" }, { - "conditions": [], - "error": "FIPS is enabled but this partition does not support FIPS", - "type": "error" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "booleanEquals", - "argv": [ + "conditions": [ { - "ref": "UseDualStack" + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] }, - true - ] - } - ], - "rules": [ - { - "conditions": [ { "fn": "booleanEquals", "argv": [ - true, { - "fn": "getAttr", + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", "argv": [ { - "ref": "PartitionResult" + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] }, - "supportsDualStack" + true ] } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kafkaconnect-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true ] } ], "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, { "conditions": [], - "endpoint": { - "url": "https://kafkaconnect.{Region}.{PartitionResult#dualStackDnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "DualStack is enabled but this partition does not support DualStack", - "type": "error" + "endpoint": { + "url": "https://kafkaconnect.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" } ], "type": "tree" - }, - { - "conditions": [], - "endpoint": { - "url": "https://kafkaconnect.{Region}.{PartitionResult#dnsSuffix}", - "properties": {}, - "headers": {} - }, - "type": "endpoint" } ], "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" } ], "type": "tree" - }, - { - "conditions": [], - "error": "Invalid Configuration: Missing Region", - "type": "error" } ] } \ No newline at end of file diff --git a/awscli/botocore/data/kafkaconnect/2021-09-14/service-2.json b/awscli/botocore/data/kafkaconnect/2021-09-14/service-2.json index 4cb6bb79e6fc..c24afee203c5 100644 --- a/awscli/botocore/data/kafkaconnect/2021-09-14/service-2.json +++ b/awscli/botocore/data/kafkaconnect/2021-09-14/service-2.json @@ -24,8 +24,8 @@ "input":{"shape":"CreateConnectorRequest"}, "output":{"shape":"CreateConnectorResponse"}, "errors":[ - {"shape":"NotFoundException"}, {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ServiceUnavailableException"}, @@ -45,8 +45,8 @@ "input":{"shape":"CreateCustomPluginRequest"}, "output":{"shape":"CreateCustomPluginResponse"}, "errors":[ - {"shape":"NotFoundException"}, {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ServiceUnavailableException"}, @@ -66,8 +66,8 @@ "input":{"shape":"CreateWorkerConfigurationRequest"}, "output":{"shape":"CreateWorkerConfigurationResponse"}, "errors":[ - {"shape":"NotFoundException"}, {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ServiceUnavailableException"}, @@ -158,7 +158,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns summary information about the connector.

" + "documentation":"

Returns summary information about the connector.

", + "readonly":true }, "DescribeConnectorOperation":{ "name":"DescribeConnectorOperation", @@ -178,7 +179,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns information about the specified connector's operations.

" + "documentation":"

Returns information about the specified connector's operations.

", + "readonly":true }, "DescribeCustomPlugin":{ "name":"DescribeCustomPlugin", @@ -198,7 +200,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

A summary description of the custom plugin.

" + "documentation":"

A summary description of the custom plugin.

", + "readonly":true }, "DescribeWorkerConfiguration":{ "name":"DescribeWorkerConfiguration", @@ -218,7 +221,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns information about a worker configuration.

" + "documentation":"

Returns information about a worker configuration.

", + "readonly":true }, "ListConnectorOperations":{ "name":"ListConnectorOperations", @@ -238,7 +242,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists information about a connector's operation(s).

" + "documentation":"

Lists information about a connector's operation(s).

", + "readonly":true }, "ListConnectors":{ "name":"ListConnectors", @@ -258,7 +263,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

" + "documentation":"

Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.

", + "readonly":true }, "ListCustomPlugins":{ "name":"ListCustomPlugins", @@ -278,7 +284,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns a list of all of the custom plugins in this account and Region.

" + "documentation":"

Returns a list of all of the custom plugins in this account and Region.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -298,7 +305,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Lists all the tags attached to the specified resource.

" + "documentation":"

Lists all the tags attached to the specified resource.

", + "readonly":true }, "ListWorkerConfigurations":{ "name":"ListWorkerConfigurations", @@ -318,7 +326,8 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Returns a list of all of the worker configurations in this account and Region.

" + "documentation":"

Returns a list of all of the worker configurations in this account and Region.

", + "readonly":true }, "TagResource":{ "name":"TagResource", @@ -330,8 +339,8 @@ "input":{"shape":"TagResourceRequest"}, "output":{"shape":"TagResourceResponse"}, "errors":[ - {"shape":"NotFoundException"}, {"shape":"ConflictException"}, + {"shape":"NotFoundException"}, {"shape":"BadRequestException"}, {"shape":"ForbiddenException"}, {"shape":"ServiceUnavailableException"}, @@ -381,7 +390,7 @@ {"shape":"UnauthorizedException"}, {"shape":"InternalServerErrorException"} ], - "documentation":"

Updates the specified connector.

", + "documentation":"

Updates the specified connector. For request body, specify only one parameter: either capacity or connectorConfiguration.

", "idempotent":true } }, @@ -757,6 +766,10 @@ "shape":"LogDeliveryDescription", "documentation":"

The settings for delivering connector logs to Amazon CloudWatch Logs.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type of the connector. It gives connectors connectivity to either IPv4 (IPV4) or IPv4 and IPv6 (DUAL) destinations. Defaults to IPV4.

" + }, "plugins":{ "shape":"__listOfPluginDescription", "documentation":"

Specifies which plugins were used for this connector.

" @@ -822,9 +835,13 @@ "shape":"LogDelivery", "documentation":"

Details about log delivery.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type of the connector. It gives connectors connectivity to either IPv4 (IPV4) or IPv4 and IPv6 (DUAL) destinations. Defaults to IPV4.

" + }, "plugins":{ "shape":"__listOfPlugin", - "documentation":"

Amazon MSK Connect does not currently support specifying multiple plugins as a list. To use more than one plugin for your connector, you can create a single custom plugin using a ZIP file that bundles multiple plugins together.

Specifies which plugin to use for the connector. You must specify a single-element list containing one customPlugin object.

" + "documentation":"

Amazon MSK Connect does not currently support specifying multiple plugins as a list. To use more than one plugin for your connector, you can create a single custom plugin using a ZIP file that bundles multiple plugins together.

Specifies which plugin to use for the connector. You must specify a single-element list containing one customPlugin object.

" }, "serviceExecutionRoleArn":{ "shape":"__string", @@ -1313,6 +1330,10 @@ "shape":"LogDeliveryDescription", "documentation":"

Details about delivering logs to Amazon CloudWatch Logs.

" }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

The network type of the connector. It gives connectors connectivity to either IPv4 (IPV4) or IPv4 and IPv6 (DUAL) destinations. Defaults to IPV4.

" + }, "plugins":{ "shape":"__listOfPluginDescription", "documentation":"

Specifies which plugins were used for this connector.

" @@ -1738,6 +1759,14 @@ "max":100, "min":1 }, + "NetworkType":{ + "type":"string", + "documentation":"

The network type of a connector.

", + "enum":[ + "IPV4", + "DUAL" + ] + }, "NotFoundException":{ "type":"structure", "members":{ @@ -2018,8 +2047,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -2080,8 +2108,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateConnectorRequest":{ "type":"structure", diff --git a/awscli/botocore/data/lambda/2015-03-31/paginators-1.json b/awscli/botocore/data/lambda/2015-03-31/paginators-1.json index 0cad2f940fae..a8a19ab72419 100644 --- a/awscli/botocore/data/lambda/2015-03-31/paginators-1.json +++ b/awscli/botocore/data/lambda/2015-03-31/paginators-1.json @@ -77,6 +77,24 @@ "output_token": "NextMarker", "limit_key": "MaxItems", "result_key": "FunctionVersions" + }, + "GetDurableExecutionHistory": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Events" + }, + "GetDurableExecutionState": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "Operations" + }, + "ListDurableExecutionsByFunction": { + "input_token": "Marker", + "output_token": "NextMarker", + "limit_key": "MaxItems", + "result_key": "DurableExecutions" } } } diff --git a/awscli/botocore/data/lambda/2015-03-31/service-2.json b/awscli/botocore/data/lambda/2015-03-31/service-2.json index 7ff3d8c071e1..4b0dcb5e83f6 100644 --- a/awscli/botocore/data/lambda/2015-03-31/service-2.json +++ b/awscli/botocore/data/lambda/2015-03-31/service-2.json @@ -53,6 +53,23 @@ ], "documentation":"

Grants a principal permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.

To grant permission to another account, specify the account ID as the Principal. To grant permission to an organization defined in Organizations, specify the organization ID as the PrincipalOrgID. For Amazon Web Services services, the principal is a domain-style identifier that the service defines, such as s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This operation adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Using resource-based policies for Lambda.

" }, + "CheckpointDurableExecution":{ + "name":"CheckpointDurableExecution", + "http":{ + "method":"POST", + "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/checkpoint", + "responseCode":200 + }, + "input":{"shape":"CheckpointDurableExecutionRequest"}, + "output":{"shape":"CheckpointDurableExecutionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Saves the progress of a durable function execution during runtime. This API is used by the Lambda durable functions SDK to checkpoint completed steps and schedule asynchronous operations. You typically don't need to call this API directly as the SDK handles checkpointing automatically.

Each checkpoint operation consumes the current checkpoint token and returns a new one for the next checkpoint. This ensures that checkpoints are applied in the correct order and prevents duplicate or out-of-order state updates.

", + "idempotent":true + }, "CreateAlias":{ "name":"CreateAlias", "http":{ @@ -429,6 +446,59 @@ "documentation":"

Returns information about the specified code signing configuration.

", "readonly":true }, + "GetDurableExecution":{ + "name":"GetDurableExecution", + "http":{ + "method":"GET", + "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}", + "responseCode":200 + }, + "input":{"shape":"GetDurableExecutionRequest"}, + "output":{"shape":"GetDurableExecutionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves detailed information about a specific durable execution, including its current status, input payload, result or error information, and execution metadata such as start time and usage statistics.

", + "readonly":true + }, + "GetDurableExecutionHistory":{ + "name":"GetDurableExecutionHistory", + "http":{ + "method":"GET", + "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/history", + "responseCode":200 + }, + "input":{"shape":"GetDurableExecutionHistoryRequest"}, + "output":{"shape":"GetDurableExecutionHistoryResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves the execution history for a durable execution, showing all the steps, callbacks, and events that occurred during the execution. This provides a detailed audit trail of the execution's progress over time.

The history is available while the execution is running and for a retention period after it completes (1-90 days, default 30 days). You can control whether to include execution data such as step results and callback payloads.

", + "readonly":true + }, + "GetDurableExecutionState":{ + "name":"GetDurableExecutionState", + "http":{ + "method":"GET", + "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/state", + "responseCode":200 + }, + "input":{"shape":"GetDurableExecutionStateRequest"}, + "output":{"shape":"GetDurableExecutionStateResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Retrieves the current execution state required for the replay process during durable function execution. This API is used by the Lambda durable functions SDK to get state information needed for replay. You typically don't need to call this API directly as the SDK handles state management automatically.

The response contains operations ordered by start sequence number in ascending order. Completed operations with children don't include child operation details since they don't need to be replayed.

", + "readonly":true + }, "GetEventSourceMapping":{ "name":"GetEventSourceMapping", "http":{ @@ -740,6 +810,7 @@ {"shape":"EFSMountTimeoutException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidRequestContentException"}, + {"shape":"DurableExecutionAlreadyStartedException"}, {"shape":"InvalidZipFileException"}, {"shape":"EFSMountFailureException"} ], @@ -860,6 +931,24 @@ "documentation":"

Returns a list of code signing configurations. A request returns up to 10,000 configurations per call. You can use the MaxItems parameter to return fewer configurations per call.

", "readonly":true }, + "ListDurableExecutionsByFunction":{ + "name":"ListDurableExecutionsByFunction", + "http":{ + "method":"GET", + "requestUri":"/2025-12-01/functions/{FunctionName}/durable-executions", + "responseCode":200 + }, + "input":{"shape":"ListDurableExecutionsByFunctionRequest"}, + "output":{"shape":"ListDurableExecutionsByFunctionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Returns a list of durable executions for a specified Lambda function. You can filter the results by execution name, status, and start time range. This API supports pagination for large result sets.

", + "readonly":true + }, "ListEventSourceMappings":{ "name":"ListEventSourceMappings", "http":{ @@ -1256,6 +1345,74 @@ ], "documentation":"

Revokes function-use permission from an Amazon Web Services service or another Amazon Web Services account. You can get the ID of the statement from the output of GetPolicy.

" }, + "SendDurableExecutionCallbackFailure":{ + "name":"SendDurableExecutionCallbackFailure", + "http":{ + "method":"POST", + "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/fail", + "responseCode":200 + }, + "input":{"shape":"SendDurableExecutionCallbackFailureRequest"}, + "output":{"shape":"SendDurableExecutionCallbackFailureResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"CallbackTimeoutException"} + ], + "documentation":"

Sends a failure response for a callback operation in a durable execution. Use this API when an external system cannot complete a callback operation successfully.

" + }, + "SendDurableExecutionCallbackHeartbeat":{ + "name":"SendDurableExecutionCallbackHeartbeat", + "http":{ + "method":"POST", + "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/heartbeat", + "responseCode":200 + }, + "input":{"shape":"SendDurableExecutionCallbackHeartbeatRequest"}, + "output":{"shape":"SendDurableExecutionCallbackHeartbeatResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"CallbackTimeoutException"} + ], + "documentation":"

Sends a heartbeat signal for a long-running callback operation to prevent timeout. Use this API to extend the callback timeout period while the external operation is still in progress.

" + }, + "SendDurableExecutionCallbackSuccess":{ + "name":"SendDurableExecutionCallbackSuccess", + "http":{ + "method":"POST", + "requestUri":"/2025-12-01/durable-execution-callbacks/{CallbackId}/succeed", + "responseCode":200 + }, + "input":{"shape":"SendDurableExecutionCallbackSuccessRequest"}, + "output":{"shape":"SendDurableExecutionCallbackSuccessResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"CallbackTimeoutException"} + ], + "documentation":"

Sends a successful completion response for a callback operation in a durable execution. Use this API when an external system has successfully completed a callback operation.

" + }, + "StopDurableExecution":{ + "name":"StopDurableExecution", + "http":{ + "method":"POST", + "requestUri":"/2025-12-01/durable-executions/{DurableExecutionArn}/stop", + "responseCode":200 + }, + "input":{"shape":"StopDurableExecutionRequest"}, + "output":{"shape":"StopDurableExecutionResponse"}, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ServiceException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Stops a running durable execution. The execution transitions to STOPPED status and cannot be resumed. Any in-progress operations are terminated.

" + }, "TagResource":{ "name":"TagResource", "http":{ @@ -1750,12 +1907,22 @@ "type":"string", "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" }, + "AttemptCount":{ + "type":"integer", + "min":0 + }, "BatchSize":{ "type":"integer", "box":true, "max":10000, "min":1 }, + "BinaryOperationPayload":{ + "type":"blob", + "max":262144, + "min":0, + "sensitive":true + }, "BisectBatchOnFunctionError":{ "type":"boolean", "box":true @@ -1769,6 +1936,112 @@ "streaming":true }, "Boolean":{"type":"boolean"}, + "CallbackDetails":{ + "type":"structure", + "members":{ + "CallbackId":{ + "shape":"CallbackId", + "documentation":"

The callback ID. Callback IDs are generated by the DurableContext when a durable function calls ctx.waitForCallback.

" + }, + "Result":{ + "shape":"OperationPayload", + "documentation":"

The response payload from the callback operation as a string.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

An error object that contains details about the failure.

" + } + }, + "documentation":"

Contains details about a callback operation in a durable execution, including the callback token and timeout configuration.

" + }, + "CallbackFailedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

An error object that contains details about the failure.

" + } + }, + "documentation":"

Contains details about a failed callback operation, including error information and the reason for failure.

" + }, + "CallbackId":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[A-Za-z0-9+/]+={0,2}" + }, + "CallbackOptions":{ + "type":"structure", + "members":{ + "TimeoutSeconds":{ + "shape":"DurationSeconds", + "documentation":"

The timeout for the callback operation in seconds. If not specified or set to 0, the callback has no timeout.

" + }, + "HeartbeatTimeoutSeconds":{ + "shape":"DurationSeconds", + "documentation":"

The heartbeat timeout for the callback operation, in seconds. If not specified or set to 0, heartbeat timeout is disabled.

" + } + }, + "documentation":"

Configuration options for callback operations in durable executions, including timeout settings and retry behavior.

" + }, + "CallbackStartedDetails":{ + "type":"structure", + "required":["CallbackId"], + "members":{ + "CallbackId":{ + "shape":"CallbackId", + "documentation":"

The callback ID. Callback IDs are generated by the DurableContext when a durable function calls ctx.waitForCallback.

" + }, + "HeartbeatTimeout":{ + "shape":"DurationSeconds", + "documentation":"

The heartbeat timeout value, in seconds.

" + }, + "Timeout":{ + "shape":"DurationSeconds", + "documentation":"

The timeout value, in seconds.

" + } + }, + "documentation":"

Contains details about a callback operation that has started, including timing information and callback metadata.

" + }, + "CallbackSucceededDetails":{ + "type":"structure", + "required":["Result"], + "members":{ + "Result":{ + "shape":"EventResult", + "documentation":"

The response payload from the successful operation.

" + } + }, + "documentation":"

Contains details about a successfully completed callback operation, including the result data and completion timestamp.

" + }, + "CallbackTimedOutDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the callback timeout.

" + } + }, + "documentation":"

Contains details about a callback operation that timed out, including timeout duration and any partial results.

" + }, + "CallbackTimeoutException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

The exception type.

" + }, + "Message":{"shape":"String"} + }, + "documentation":"

The callback ID token has either expired or the callback associated with the token has already been closed.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, "CapacityProvider":{ "type":"structure", "required":[ @@ -1949,6 +2222,175 @@ "max":50, "min":0 }, + "ChainedInvokeDetails":{ + "type":"structure", + "members":{ + "Result":{ + "shape":"OperationPayload", + "documentation":"

The response payload from the chained invocation.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Details about the chained invocation failure.

" + } + }, + "documentation":"

Contains details about a chained function invocation in a durable execution, including the target function and invocation parameters.

" + }, + "ChainedInvokeFailedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the chained invocation failure.

" + } + }, + "documentation":"

Contains details about a failed chained function invocation, including error information and failure reason.

" + }, + "ChainedInvokeOptions":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"NamespacedFunctionName", + "documentation":"

The name or ARN of the Lambda function to invoke.

" + }, + "TenantId":{ + "shape":"TenantId", + "documentation":"

The tenant identifier for the chained invocation.

" + } + }, + "documentation":"

Configuration options for chained function invocations in durable executions, including retry settings and timeout configuration.

" + }, + "ChainedInvokeStartedDetails":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"NamespacedFunctionName", + "documentation":"

The name or ARN of the Lambda function being invoked.

" + }, + "TenantId":{ + "shape":"TenantId", + "documentation":"

The tenant identifier for the chained invocation.

" + }, + "Input":{ + "shape":"EventInput", + "documentation":"

The JSON input payload provided to the chained invocation.

" + }, + "ExecutedVersion":{ + "shape":"VersionWithLatestPublished", + "documentation":"

The version of the function that was executed.

" + }, + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) that identifies the durable execution.

" + } + }, + "documentation":"

Contains details about a chained function invocation that has started execution, including start time and execution context.

" + }, + "ChainedInvokeStoppedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about why the chained invocation stopped.

" + } + }, + "documentation":"

Details about a chained invocation that was stopped.

" + }, + "ChainedInvokeSucceededDetails":{ + "type":"structure", + "required":["Result"], + "members":{ + "Result":{ + "shape":"EventResult", + "documentation":"

The response payload from the successful operation.

" + } + }, + "documentation":"

Details about a chained invocation that succeeded.

" + }, + "ChainedInvokeTimedOutDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the chained invocation timeout.

" + } + }, + "documentation":"

Details about a chained invocation that timed out.

" + }, + "CheckpointDurableExecutionRequest":{ + "type":"structure", + "required":[ + "DurableExecutionArn", + "CheckpointToken" + ], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

", + "location":"uri", + "locationName":"DurableExecutionArn" + }, + "CheckpointToken":{ + "shape":"CheckpointToken", + "documentation":"

A unique token that identifies the current checkpoint state. This token is provided by the Lambda runtime and must be used to ensure checkpoints are applied in the correct order. Each checkpoint operation consumes this token and returns a new one.

" + }, + "Updates":{ + "shape":"OperationUpdates", + "documentation":"

An array of state updates to apply during this checkpoint. Each update represents a change to the execution state, such as completing a step, starting a callback, or scheduling a timer. Updates are applied atomically as part of the checkpoint operation.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

An optional idempotency token to ensure that duplicate checkpoint requests are handled correctly. If provided, Lambda uses this token to detect and handle duplicate requests within a 15-minute window.

", + "idempotencyToken":true + } + } + }, + "CheckpointDurableExecutionResponse":{ + "type":"structure", + "required":["NewExecutionState"], + "members":{ + "CheckpointToken":{ + "shape":"CheckpointToken", + "documentation":"

A new checkpoint token to use for the next checkpoint operation. This token replaces the one provided in the request and must be used for subsequent checkpoints to maintain proper ordering.

" + }, + "NewExecutionState":{ + "shape":"CheckpointUpdatedExecutionState", + "documentation":"

Updated execution state information that includes any changes that occurred since the last checkpoint, such as completed callbacks or expired timers. This allows the SDK to update its internal state during replay.

" + } + }, + "documentation":"

The response from the CheckpointDurableExecution operation.

" + }, + "CheckpointToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[A-Za-z0-9+/]+={0,2}" + }, + "CheckpointUpdatedExecutionState":{ + "type":"structure", + "members":{ + "Operations":{ + "shape":"Operations", + "documentation":"

A list of operations that have been updated since the last checkpoint.

" + }, + "NextMarker":{ + "shape":"String", + "documentation":"

Indicates that more results are available. Use this value in a subsequent call to retrieve the next page of results.

" + } + }, + "documentation":"

Contains operations that have been updated since the last checkpoint, such as completed asynchronous work like timers or callbacks.

" + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\x21-\\x7E]+" + }, "CodeSigningConfig":{ "type":"structure", "required":[ @@ -2086,6 +2528,61 @@ } } }, + "ContextDetails":{ + "type":"structure", + "members":{ + "ReplayChildren":{ + "shape":"ReplayChildren", + "documentation":"

Whether the state data of child operations of this completed context should be included in the invoke payload and GetDurableExecutionState response.

" + }, + "Result":{ + "shape":"OperationPayload", + "documentation":"

The response payload from the context.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Details about the context failure.

" + } + }, + "documentation":"

Details about a durable execution context.

" + }, + "ContextFailedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the context failure.

" + } + }, + "documentation":"

Details about a context that failed.

" + }, + "ContextOptions":{ + "type":"structure", + "members":{ + "ReplayChildren":{ + "shape":"ReplayChildren", + "documentation":"

Whether the state data of children of the completed context should be included in the invoke payload and GetDurableExecutionState response.

" + } + }, + "documentation":"

Configuration options for a durable execution context.

" + }, + "ContextStartedDetails":{ + "type":"structure", + "members":{}, + "documentation":"

Details about a context that has started.

" + }, + "ContextSucceededDetails":{ + "type":"structure", + "required":["Result"], + "members":{ + "Result":{ + "shape":"EventResult", + "documentation":"

The JSON response payload from the successful context.

" + } + }, + "documentation":"

Details about a context that succeeded.

" + }, "Cors":{ "type":"structure", "members":{ @@ -2454,6 +2951,10 @@ "shape":"FunctionVersionLatestPublished", "documentation":"

Specifies where to publish the function version or configuration.

" }, + "DurableConfig":{ + "shape":"DurableConfig", + "documentation":"

Configuration settings for durable functions. Enables creating functions with durability that can remember their state and continue execution even after interruptions.

" + }, "TenancyConfig":{ "shape":"TenancyConfig", "documentation":"

Configuration for multi-tenant applications that use Lambda functions. Defines tenant isolation settings and resource allocations. Required for functions supporting multiple tenants.

" @@ -2789,6 +3290,57 @@ }, "documentation":"

Specific configuration settings for a DocumentDB event source.

" }, + "DurableConfig":{ + "type":"structure", + "members":{ + "RetentionPeriodInDays":{ + "shape":"RetentionPeriodInDays", + "documentation":"

The number of days to retain execution history after a durable execution completes. After this period, execution history is no longer available through the GetDurableExecutionHistory API.

" + }, + "ExecutionTimeout":{ + "shape":"ExecutionTimeout", + "documentation":"

The maximum time (in seconds) that a durable execution can run before timing out. This timeout applies to the entire durable execution, not individual function invocations.

" + } + }, + "documentation":"

Configuration settings for durable functions, including execution timeout and retention period for execution history.

" + }, + "DurableExecutionAlreadyStartedException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

The exception type.

" + }, + "Message":{"shape":"String"} + }, + "documentation":"

The durable execution with the specified name has already been started. Each durable execution name must be unique within the function. Use a different name or check the status of the existing execution.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "DurableExecutionArn":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"arn:([a-zA-Z0-9-]+):lambda:([a-zA-Z0-9-]+):(\\d{12}):function:([a-zA-Z0-9_-]+):(\\$LATEST(?:\\.PUBLISHED)?|[0-9]+)/durable-execution/([a-zA-Z0-9_-]+)/([a-zA-Z0-9_-]+)" + }, + "DurableExecutionName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-_]+" + }, + "DurableExecutions":{ + "type":"list", + "member":{"shape":"Execution"} + }, + "DurationSeconds":{ + "type":"integer", + "box":true, + "min":0 + }, "EC2AccessDeniedException":{ "type":"structure", "members":{ @@ -2983,6 +3535,199 @@ "max":10240, "min":512 }, + "ErrorData":{ + "type":"string", + "sensitive":true + }, + "ErrorMessage":{ + "type":"string", + "sensitive":true + }, + "ErrorObject":{ + "type":"structure", + "members":{ + "ErrorMessage":{ + "shape":"ErrorMessage", + "documentation":"

A human-readable error message.

" + }, + "ErrorType":{ + "shape":"ErrorType", + "documentation":"

The error type.

" + }, + "ErrorData":{ + "shape":"ErrorData", + "documentation":"

Machine-readable error data.

" + }, + "StackTrace":{ + "shape":"StackTraceEntries", + "documentation":"

Stack trace information for the error.

" + } + }, + "documentation":"

An object that contains error information.

" + }, + "ErrorType":{ + "type":"string", + "sensitive":true + }, + "Event":{ + "type":"structure", + "members":{ + "EventType":{ + "shape":"EventType", + "documentation":"

The type of event that occurred.

" + }, + "SubType":{ + "shape":"OperationSubType", + "documentation":"

The subtype of the event, providing additional categorization.

" + }, + "EventId":{ + "shape":"EventId", + "documentation":"

The unique identifier for this event. Event IDs increment sequentially.

" + }, + "Id":{ + "shape":"OperationId", + "documentation":"

The unique identifier for this operation.

" + }, + "Name":{ + "shape":"OperationName", + "documentation":"

The customer-provided name for this operation.

" + }, + "EventTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when this event occurred, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "ParentId":{ + "shape":"OperationId", + "documentation":"

The unique identifier of the parent operation, if this operation is running within a child context.

" + }, + "ExecutionStartedDetails":{ + "shape":"ExecutionStartedDetails", + "documentation":"

Details about an execution that started.

" + }, + "ExecutionSucceededDetails":{ + "shape":"ExecutionSucceededDetails", + "documentation":"

Details about an execution that succeeded.

" + }, + "ExecutionFailedDetails":{ + "shape":"ExecutionFailedDetails", + "documentation":"

Details about an execution that failed.

" + }, + "ExecutionTimedOutDetails":{ + "shape":"ExecutionTimedOutDetails", + "documentation":"

Details about an execution that timed out.

" + }, + "ExecutionStoppedDetails":{ + "shape":"ExecutionStoppedDetails", + "documentation":"

Details about an execution that was stopped.

" + }, + "ContextStartedDetails":{ + "shape":"ContextStartedDetails", + "documentation":"

Details about a context that started.

" + }, + "ContextSucceededDetails":{ + "shape":"ContextSucceededDetails", + "documentation":"

Details about a context that succeeded.

" + }, + "ContextFailedDetails":{ + "shape":"ContextFailedDetails", + "documentation":"

Details about a context that failed.

" + }, + "WaitStartedDetails":{ + "shape":"WaitStartedDetails", + "documentation":"

Details about a wait operation that started.

" + }, + "WaitSucceededDetails":{ + "shape":"WaitSucceededDetails", + "documentation":"

Details about a wait operation that succeeded.

" + }, + "WaitCancelledDetails":{ + "shape":"WaitCancelledDetails", + "documentation":"

Details about a wait operation that was cancelled.

" + }, + "StepStartedDetails":{ + "shape":"StepStartedDetails", + "documentation":"

Details about a step that started.

" + }, + "StepSucceededDetails":{ + "shape":"StepSucceededDetails", + "documentation":"

Details about a step that succeeded.

" + }, + "StepFailedDetails":{ + "shape":"StepFailedDetails", + "documentation":"

Details about a step that failed.

" + }, + "ChainedInvokeStartedDetails":{"shape":"ChainedInvokeStartedDetails"}, + "ChainedInvokeSucceededDetails":{ + "shape":"ChainedInvokeSucceededDetails", + "documentation":"

Details about a chained invocation that succeeded.

" + }, + "ChainedInvokeFailedDetails":{"shape":"ChainedInvokeFailedDetails"}, + "ChainedInvokeTimedOutDetails":{ + "shape":"ChainedInvokeTimedOutDetails", + "documentation":"

Details about a chained invocation that timed out.

" + }, + "ChainedInvokeStoppedDetails":{ + "shape":"ChainedInvokeStoppedDetails", + "documentation":"

Details about a chained invocation that was stopped.

" + }, + "CallbackStartedDetails":{"shape":"CallbackStartedDetails"}, + "CallbackSucceededDetails":{"shape":"CallbackSucceededDetails"}, + "CallbackFailedDetails":{"shape":"CallbackFailedDetails"}, + "CallbackTimedOutDetails":{"shape":"CallbackTimedOutDetails"}, + "InvocationCompletedDetails":{ + "shape":"InvocationCompletedDetails", + "documentation":"

Details about a function invocation that completed.

" + } + }, + "documentation":"

An event that occurred during the execution of a durable function.

" + }, + "EventError":{ + "type":"structure", + "members":{ + "Payload":{ + "shape":"ErrorObject", + "documentation":"

The error payload.

" + }, + "Truncated":{ + "shape":"Truncated", + "documentation":"

Indicates if the error payload was truncated due to size limits.

" + } + }, + "documentation":"

Error information for an event.

" + }, + "EventId":{ + "type":"integer", + "box":true, + "min":1 + }, + "EventInput":{ + "type":"structure", + "members":{ + "Payload":{ + "shape":"InputPayload", + "documentation":"

The input payload.

" + }, + "Truncated":{ + "shape":"Truncated", + "documentation":"

Indicates if the error payload was truncated due to size limits.

" + } + }, + "documentation":"

Input information for an event.

" + }, + "EventResult":{ + "type":"structure", + "members":{ + "Payload":{ + "shape":"OperationPayload", + "documentation":"

The result payload.

" + }, + "Truncated":{ + "shape":"Truncated", + "documentation":"

Indicates if the error payload was truncated due to size limits.

" + } + }, + "documentation":"

Result information for an event.

" + }, "EventSourceMappingArn":{ "type":"string", "max":120, @@ -3161,12 +3906,176 @@ "min":0, "pattern":"[a-zA-Z0-9._\\-]+" }, + "EventType":{ + "type":"string", + "enum":[ + "ExecutionStarted", + "ExecutionSucceeded", + "ExecutionFailed", + "ExecutionTimedOut", + "ExecutionStopped", + "ContextStarted", + "ContextSucceeded", + "ContextFailed", + "WaitStarted", + "WaitSucceeded", + "WaitCancelled", + "StepStarted", + "StepSucceeded", + "StepFailed", + "ChainedInvokeStarted", + "ChainedInvokeSucceeded", + "ChainedInvokeFailed", + "ChainedInvokeTimedOut", + "ChainedInvokeStopped", + "CallbackStarted", + "CallbackSucceeded", + "CallbackFailed", + "CallbackTimedOut", + "InvocationCompleted" + ] + }, + "Events":{ + "type":"list", + "member":{"shape":"Event"} + }, + "Execution":{ + "type":"structure", + "required":[ + "DurableExecutionArn", + "DurableExecutionName", + "FunctionArn", + "Status", + "StartTimestamp" + ], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution, if this execution is a durable execution.

" + }, + "DurableExecutionName":{ + "shape":"DurableExecutionName", + "documentation":"

The unique name of the durable execution, if one was provided when the execution was started.

" + }, + "FunctionArn":{ + "shape":"NameSpacedFunctionArn", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function.

" + }, + "Status":{ + "shape":"ExecutionStatus", + "documentation":"

The current status of the durable execution.

" + }, + "StartTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the durable execution started, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "EndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the durable execution ended, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + } + }, + "documentation":"

Information about a durable execution.

" + }, + "ExecutionDetails":{ + "type":"structure", + "members":{ + "InputPayload":{ + "shape":"InputPayload", + "documentation":"

The original input payload provided for the durable execution.

" + } + }, + "documentation":"

Details about a durable execution.

" + }, "ExecutionEnvironmentMemoryGiBPerVCpu":{ "type":"double", "box":true, "max":8.0, "min":2.0 }, + "ExecutionFailedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the execution failure.

" + } + }, + "documentation":"

Details about a failed durable execution.

" + }, + "ExecutionStartedDetails":{ + "type":"structure", + "required":[ + "Input", + "ExecutionTimeout" + ], + "members":{ + "Input":{ + "shape":"EventInput", + "documentation":"

The input payload provided for the durable execution.

" + }, + "ExecutionTimeout":{ + "shape":"DurationSeconds", + "documentation":"

The maximum amount of time that the durable execution is allowed to run, in seconds.

" + } + }, + "documentation":"

Details about a durable execution that started.

" + }, + "ExecutionStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "TIMED_OUT", + "STOPPED" + ] + }, + "ExecutionStatusList":{ + "type":"list", + "member":{"shape":"ExecutionStatus"}, + "max":10, + "min":1 + }, + "ExecutionStoppedDetails":{ + "type":"structure", + "required":["Error"], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about why the execution stopped.

" + } + }, + "documentation":"

Details about a durable execution that stopped.

" + }, + "ExecutionSucceededDetails":{ + "type":"structure", + "required":["Result"], + "members":{ + "Result":{ + "shape":"EventResult", + "documentation":"

The response payload from the successful operation.

" + } + }, + "documentation":"

Details about a durable execution that succeeded.

" + }, + "ExecutionTimedOutDetails":{ + "type":"structure", + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the execution timeout.

" + } + }, + "documentation":"

Details about a durable execution that timed out.

" + }, + "ExecutionTimeout":{ + "type":"integer", + "box":true, + "max":31622400, + "min":1 + }, + "ExecutionTimestamp":{"type":"timestamp"}, "FileSystemArn":{ "type":"string", "max":200, @@ -3475,6 +4384,10 @@ "shape":"String", "documentation":"

The SHA256 hash of the function configuration.

" }, + "DurableConfig":{ + "shape":"DurableConfig", + "documentation":"

The function's durable execution configuration settings, if the function is configured for durability.

" + }, "TenancyConfig":{ "shape":"TenancyConfig", "documentation":"

The function's tenant isolation configuration settings. Determines whether the Lambda function runs on a shared or dedicated infrastructure per unique tenant.

" @@ -3744,6 +4657,174 @@ } } }, + "GetDurableExecutionHistoryRequest":{ + "type":"structure", + "required":["DurableExecutionArn"], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

", + "location":"uri", + "locationName":"DurableExecutionArn" + }, + "IncludeExecutionData":{ + "shape":"IncludeExecutionData", + "documentation":"

Specifies whether to include execution data such as step results and callback payloads in the history events. Set to true to include data, or false to exclude it for a more compact response. The default is true.

", + "location":"querystring", + "locationName":"IncludeExecutionData" + }, + "MaxItems":{ + "shape":"ItemCount", + "documentation":"

The maximum number of history events to return per call. You can use Marker to retrieve additional pages of results. The default is 100 and the maximum allowed is 1000. A value of 0 uses the default.

", + "location":"querystring", + "locationName":"MaxItems" + }, + "Marker":{ + "shape":"String", + "documentation":"

If NextMarker was returned from a previous request, use this value to retrieve the next page of results. Each pagination token expires after 24 hours.

", + "location":"querystring", + "locationName":"Marker" + }, + "ReverseOrder":{ + "shape":"ReverseOrder", + "documentation":"

When set to true, returns the history events in reverse chronological order (newest first). By default, events are returned in chronological order (oldest first).

", + "location":"querystring", + "locationName":"ReverseOrder" + } + } + }, + "GetDurableExecutionHistoryResponse":{ + "type":"structure", + "required":["Events"], + "members":{ + "Events":{ + "shape":"Events", + "documentation":"

An array of execution history events, ordered chronologically unless ReverseOrder is set to true. Each event represents a significant occurrence during the execution, such as step completion or callback resolution.

" + }, + "NextMarker":{ + "shape":"String", + "documentation":"

If present, indicates that more history events are available. Use this value as the Marker parameter in a subsequent request to retrieve the next page of results.

" + } + }, + "documentation":"

The response from the GetDurableExecutionHistory operation, containing the execution history and events.

" + }, + "GetDurableExecutionRequest":{ + "type":"structure", + "required":["DurableExecutionArn"], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

", + "location":"uri", + "locationName":"DurableExecutionArn" + } + } + }, + "GetDurableExecutionResponse":{ + "type":"structure", + "required":[ + "DurableExecutionArn", + "DurableExecutionName", + "FunctionArn", + "StartTimestamp", + "Status" + ], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

" + }, + "DurableExecutionName":{ + "shape":"DurableExecutionName", + "documentation":"

The name of the durable execution. This is either the name you provided when invoking the function, or a system-generated unique identifier if no name was provided.

" + }, + "FunctionArn":{ + "shape":"NameSpacedFunctionArn", + "documentation":"

The Amazon Resource Name (ARN) of the Lambda function that was invoked to start this durable execution.

" + }, + "InputPayload":{ + "shape":"InputPayload", + "documentation":"

The JSON input payload that was provided when the durable execution was started. For asynchronous invocations, this is limited to 256 KB. For synchronous invocations, this can be up to 6 MB.

" + }, + "Result":{ + "shape":"OutputPayload", + "documentation":"

The JSON result returned by the durable execution if it completed successfully. This field is only present when the execution status is SUCCEEDED. The result is limited to 256 KB.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Error information if the durable execution failed. This field is only present when the execution status is FAILED, TIMED_OUT, or STOPPED. The combined size of all error fields is limited to 256 KB.

" + }, + "StartTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the durable execution started, in Unix timestamp format.

" + }, + "Status":{ + "shape":"ExecutionStatus", + "documentation":"

The current status of the durable execution. Valid values are RUNNING, SUCCEEDED, FAILED, TIMED_OUT, and STOPPED.

" + }, + "EndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the durable execution ended, in Unix timestamp format. This field is only present if the execution has completed (status is SUCCEEDED, FAILED, TIMED_OUT, or STOPPED).

" + }, + "Version":{ + "shape":"VersionWithLatestPublished", + "documentation":"

The version of the Lambda function that was invoked for this durable execution. This ensures that all replays during the execution use the same function version.

" + }, + "TraceHeader":{ + "shape":"TraceHeader", + "documentation":"

The trace headers associated with the durable execution.

" + } + }, + "documentation":"

The response from the GetDurableExecution operation, containing detailed information about the durable execution.

" + }, + "GetDurableExecutionStateRequest":{ + "type":"structure", + "required":[ + "DurableExecutionArn", + "CheckpointToken" + ], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

", + "location":"uri", + "locationName":"DurableExecutionArn" + }, + "CheckpointToken":{ + "shape":"CheckpointToken", + "documentation":"

A checkpoint token that identifies the current state of the execution. This token is provided by the Lambda runtime and ensures that state retrieval is consistent with the current execution context.

", + "location":"querystring", + "locationName":"CheckpointToken" + }, + "Marker":{ + "shape":"String", + "documentation":"

If NextMarker was returned from a previous request, use this value to retrieve the next page of operations. Each pagination token expires after 24 hours.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"ItemCount", + "documentation":"

The maximum number of operations to return per call. You can use Marker to retrieve additional pages of results. The default is 100 and the maximum allowed is 1000. A value of 0 uses the default.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "GetDurableExecutionStateResponse":{ + "type":"structure", + "required":["Operations"], + "members":{ + "Operations":{ + "shape":"Operations", + "documentation":"

An array of operations that represent the current state of the durable execution. Operations are ordered by their start sequence number in ascending order and include information needed for replay processing.

" + }, + "NextMarker":{ + "shape":"String", + "documentation":"

If present, indicates that more operations are available. Use this value as the Marker parameter in a subsequent request to retrieve the next page of results.

" + } + }, + "documentation":"

The response from the GetDurableExecutionState operation, containing the current execution state for replay.

" + }, "GetEventSourceMappingRequest":{ "type":"structure", "required":["UUID"], @@ -4291,6 +5372,16 @@ }, "documentation":"

Response to a GetFunctionConfiguration request.

" }, + "IncludeExecutionData":{ + "type":"boolean", + "box":true + }, + "InputPayload":{ + "type":"string", + "max":6291456, + "min":0, + "sensitive":true + }, "InstanceRequirements":{ "type":"structure", "members":{ @@ -4417,6 +5508,33 @@ "exception":true, "fault":true }, + "InvocationCompletedDetails":{ + "type":"structure", + "required":[ + "StartTimestamp", + "EndTimestamp", + "RequestId" + ], + "members":{ + "StartTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the invocation started, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "EndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the invocation ended, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The request ID for the invocation.

" + }, + "Error":{ + "shape":"EventError", + "documentation":"

Details about the invocation failure.

" + } + }, + "documentation":"

Details about a function invocation that completed.

" + }, "InvocationRequest":{ "type":"structure", "required":["FunctionName"], @@ -4445,6 +5563,12 @@ "location":"header", "locationName":"X-Amz-Client-Context" }, + "DurableExecutionName":{ + "shape":"DurableExecutionName", + "documentation":"

Optional unique name for the durable execution. When you start your special function, you can give it a unique name to identify this specific execution. It's like giving a nickname to a task.

", + "location":"header", + "locationName":"X-Amz-Durable-Execution-Name" + }, "Payload":{ "shape":"Blob", "documentation":"

The JSON that you want to provide to your Lambda function as input. The maximum payload size is 6 MB for synchronous invocations and 1 MB for asynchronous invocations.

You can enter the JSON directly. For example, --payload '{ \"key\": \"value\" }'. You can also specify a file path. For example, --payload file://payload.json.

" @@ -4493,6 +5617,12 @@ "documentation":"

The version of the function that executed. When you invoke a function with an alias, this indicates which version the alias resolved to.

", "location":"header", "locationName":"X-Amz-Executed-Version" + }, + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The ARN of the durable execution that was started. This is returned when invoking a durable function and provides a unique identifier for tracking the execution.

", + "location":"header", + "locationName":"X-Amz-Durable-Execution-Arn" } }, "payload":"Payload" @@ -4669,6 +5799,11 @@ "type":"boolean", "box":true }, + "ItemCount":{ + "type":"integer", + "max":1000, + "min":0 + }, "KMSAccessDeniedException":{ "type":"structure", "members":{ @@ -4855,7 +5990,8 @@ "FunctionError.InvalidWorkingDirectory", "FunctionError.PermissionDenied", "FunctionError.TooManyExtensions", - "FunctionError.InitResourceExhausted" + "FunctionError.InitResourceExhausted", + "DisallowedByVpcEncryptionControl" ] }, "Layer":{ @@ -5092,53 +6228,127 @@ "shape":"MaxFiftyListItems", "documentation":"

The maximum number of capacity providers to return.

", "location":"querystring", - "locationName":"MaxItems" - } - } - }, - "ListCapacityProvidersResponse":{ - "type":"structure", - "required":["CapacityProviders"], - "members":{ - "CapacityProviders":{ - "shape":"CapacityProvidersList", - "documentation":"

A list of capacity providers in your account.

" + "locationName":"MaxItems" + } + } + }, + "ListCapacityProvidersResponse":{ + "type":"structure", + "required":["CapacityProviders"], + "members":{ + "CapacityProviders":{ + "shape":"CapacityProvidersList", + "documentation":"

A list of capacity providers in your account.

" + }, + "NextMarker":{ + "shape":"String", + "documentation":"

The pagination token that's included if more results are available.

" + } + } + }, + "ListCodeSigningConfigsRequest":{ + "type":"structure", + "members":{ + "Marker":{ + "shape":"String", + "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", + "location":"querystring", + "locationName":"Marker" + }, + "MaxItems":{ + "shape":"MaxListItems", + "documentation":"

Maximum number of items to return.

", + "location":"querystring", + "locationName":"MaxItems" + } + } + }, + "ListCodeSigningConfigsResponse":{ + "type":"structure", + "members":{ + "NextMarker":{ + "shape":"String", + "documentation":"

The pagination token that's included if more results are available.

" + }, + "CodeSigningConfigs":{ + "shape":"CodeSigningConfigList", + "documentation":"

The code signing configurations

" + } + } + }, + "ListDurableExecutionsByFunctionRequest":{ + "type":"structure", + "required":["FunctionName"], + "members":{ + "FunctionName":{ + "shape":"NamespacedFunctionName", + "documentation":"

The name or ARN of the Lambda function. You can specify a function name, a partial ARN, or a full ARN.

", + "location":"uri", + "locationName":"FunctionName" + }, + "Qualifier":{ + "shape":"NumericLatestPublishedOrAliasQualifier", + "documentation":"

The function version or alias. If not specified, lists executions for the $LATEST version.

", + "location":"querystring", + "locationName":"Qualifier" + }, + "DurableExecutionName":{ + "shape":"DurableExecutionName", + "documentation":"

Filter executions by name. Only executions with names that contain this string are returned.

", + "location":"querystring", + "locationName":"DurableExecutionName" + }, + "Statuses":{ + "shape":"ExecutionStatusList", + "documentation":"

Filter executions by status. Valid values: RUNNING, SUCCEEDED, FAILED, TIMED_OUT, STOPPED.

", + "location":"querystring", + "locationName":"Statuses" + }, + "StartedAfter":{ + "shape":"ExecutionTimestamp", + "documentation":"

Filter executions that started after this timestamp (ISO 8601 format).

", + "location":"querystring", + "locationName":"StartedAfter" + }, + "StartedBefore":{ + "shape":"ExecutionTimestamp", + "documentation":"

Filter executions that started before this timestamp (ISO 8601 format).

", + "location":"querystring", + "locationName":"StartedBefore" + }, + "ReverseOrder":{ + "shape":"ReverseOrder", + "documentation":"

Set to true to return results in reverse chronological order (newest first). Default is false.

", + "location":"querystring", + "locationName":"ReverseOrder" }, - "NextMarker":{ - "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - } - } - }, - "ListCodeSigningConfigsRequest":{ - "type":"structure", - "members":{ "Marker":{ "shape":"String", - "documentation":"

Specify the pagination token that's returned by a previous request to retrieve the next page of results.

", + "documentation":"

Pagination token from a previous request to continue retrieving results.

", "location":"querystring", "locationName":"Marker" }, "MaxItems":{ - "shape":"MaxListItems", - "documentation":"

Maximum number of items to return.

", + "shape":"ItemCount", + "documentation":"

Maximum number of executions to return (1-1000). Default is 100.

", "location":"querystring", "locationName":"MaxItems" } } }, - "ListCodeSigningConfigsResponse":{ + "ListDurableExecutionsByFunctionResponse":{ "type":"structure", "members":{ + "DurableExecutions":{ + "shape":"DurableExecutions", + "documentation":"

List of durable execution summaries matching the filter criteria.

" + }, "NextMarker":{ "shape":"String", - "documentation":"

The pagination token that's included if more results are available.

" - }, - "CodeSigningConfigs":{ - "shape":"CodeSigningConfigList", - "documentation":"

The code signing configurations

" + "documentation":"

Pagination token for retrieving additional results. Present only if there are more results available.

" } - } + }, + "documentation":"

The response from the ListDurableExecutionsByFunction operation, containing a list of durable executions and pagination information.

" }, "ListEventSourceMappingsRequest":{ "type":"structure", @@ -5798,6 +7008,191 @@ }, "documentation":"

A destination for events that were processed successfully.

To retain records of successful asynchronous invocations, you can configure an Amazon SNS topic, Amazon SQS queue, Lambda function, or Amazon EventBridge event bus as the destination.

OnSuccess is not supported in CreateEventSourceMapping or UpdateEventSourceMapping requests.

" }, + "Operation":{ + "type":"structure", + "required":[ + "Id", + "Type", + "StartTimestamp", + "Status" + ], + "members":{ + "Id":{ + "shape":"OperationId", + "documentation":"

The unique identifier for this operation.

" + }, + "ParentId":{ + "shape":"OperationId", + "documentation":"

The unique identifier of the parent operation, if this operation is running within a child context.

" + }, + "Name":{ + "shape":"OperationName", + "documentation":"

The customer-provided name for this operation.

" + }, + "Type":{ + "shape":"OperationType", + "documentation":"

The type of operation.

" + }, + "SubType":{ + "shape":"OperationSubType", + "documentation":"

The subtype of the operation, providing additional categorization.

" + }, + "StartTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the operation started, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "EndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the operation ended, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + }, + "Status":{ + "shape":"OperationStatus", + "documentation":"

The current status of the operation.

" + }, + "ExecutionDetails":{ + "shape":"ExecutionDetails", + "documentation":"

Details about the execution, if this operation represents an execution.

" + }, + "ContextDetails":{ + "shape":"ContextDetails", + "documentation":"

Details about the context, if this operation represents a context.

" + }, + "StepDetails":{ + "shape":"StepDetails", + "documentation":"

Details about the step, if this operation represents a step.

" + }, + "WaitDetails":{ + "shape":"WaitDetails", + "documentation":"

Details about the wait operation, if this operation represents a wait.

" + }, + "CallbackDetails":{"shape":"CallbackDetails"}, + "ChainedInvokeDetails":{"shape":"ChainedInvokeDetails"} + }, + "documentation":"

Information about an operation within a durable execution.

" + }, + "OperationAction":{ + "type":"string", + "enum":[ + "START", + "SUCCEED", + "FAIL", + "RETRY", + "CANCEL" + ] + }, + "OperationId":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-_]+" + }, + "OperationName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\x20-\\x7E]+" + }, + "OperationPayload":{ + "type":"string", + "max":6291456, + "min":0, + "sensitive":true + }, + "OperationStatus":{ + "type":"string", + "enum":[ + "STARTED", + "PENDING", + "READY", + "SUCCEEDED", + "FAILED", + "CANCELLED", + "TIMED_OUT", + "STOPPED" + ] + }, + "OperationSubType":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[a-zA-Z0-9-_]+" + }, + "OperationType":{ + "type":"string", + "enum":[ + "EXECUTION", + "CONTEXT", + "STEP", + "WAIT", + "CALLBACK", + "CHAINED_INVOKE" + ] + }, + "OperationUpdate":{ + "type":"structure", + "required":[ + "Id", + "Type", + "Action" + ], + "members":{ + "Id":{ + "shape":"OperationId", + "documentation":"

The unique identifier for this operation.

" + }, + "ParentId":{ + "shape":"OperationId", + "documentation":"

The unique identifier of the parent operation, if this operation is running within a child context.

" + }, + "Name":{ + "shape":"OperationName", + "documentation":"

The customer-provided name for this operation.

" + }, + "Type":{ + "shape":"OperationType", + "documentation":"

The type of operation to update.

" + }, + "SubType":{ + "shape":"OperationSubType", + "documentation":"

The subtype of the operation, providing additional categorization.

" + }, + "Action":{ + "shape":"OperationAction", + "documentation":"

The action to take on the operation.

" + }, + "Payload":{ + "shape":"OperationPayload", + "documentation":"

The payload for successful operations.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

The error information for failed operations.

" + }, + "ContextOptions":{ + "shape":"ContextOptions", + "documentation":"

Options for context operations.

" + }, + "StepOptions":{ + "shape":"StepOptions", + "documentation":"

Options for step operations.

" + }, + "WaitOptions":{ + "shape":"WaitOptions", + "documentation":"

Options for wait operations.

" + }, + "CallbackOptions":{"shape":"CallbackOptions"}, + "ChainedInvokeOptions":{"shape":"ChainedInvokeOptions"} + }, + "documentation":"

An update to be applied to an operation during checkpointing.

" + }, + "OperationUpdates":{ + "type":"list", + "member":{"shape":"OperationUpdate"} + }, + "Operations":{ + "type":"list", + "member":{"shape":"Operation"} + }, "OrganizationId":{ "type":"string", "max":34, @@ -5810,6 +7205,12 @@ "min":1, "pattern":".*" }, + "OutputPayload":{ + "type":"string", + "max":6291456, + "min":0, + "sensitive":true + }, "PackageType":{ "type":"string", "enum":[ @@ -6438,6 +7839,10 @@ } } }, + "ReplayChildren":{ + "type":"boolean", + "box":true + }, "RequestTooLargeException":{ "type":"structure", "members":{ @@ -6529,6 +7934,30 @@ "DryRun" ] }, + "RetentionPeriodInDays":{ + "type":"integer", + "box":true, + "max":90, + "min":1 + }, + "RetryDetails":{ + "type":"structure", + "members":{ + "CurrentAttempt":{ + "shape":"AttemptCount", + "documentation":"

The current attempt number for this operation.

" + }, + "NextAttemptDelaySeconds":{ + "shape":"DurationSeconds", + "documentation":"

The delay before the next retry attempt, in seconds.

" + } + }, + "documentation":"

Information about retry attempts for an operation.

" + }, + "ReverseOrder":{ + "type":"boolean", + "box":true + }, "RoleArn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z-]*)?:iam::\\d{12}:role/?[a-zA-Z_0-9+=,.@\\-_/]+" @@ -6579,7 +8008,8 @@ "nodejs22.x", "nodejs24.x", "python3.14", - "java25" + "java25", + "dotnet10" ] }, "RuntimeVersionArn":{ @@ -6686,6 +8116,64 @@ }, "documentation":"

Specific configuration settings for a self-managed Apache Kafka event source.

" }, + "SendDurableExecutionCallbackFailureRequest":{ + "type":"structure", + "required":["CallbackId"], + "members":{ + "CallbackId":{ + "shape":"CallbackId", + "documentation":"

The unique identifier for the callback operation.

", + "location":"uri", + "locationName":"CallbackId" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Error details describing why the callback operation failed.

" + } + }, + "payload":"Error" + }, + "SendDurableExecutionCallbackFailureResponse":{ + "type":"structure", + "members":{} + }, + "SendDurableExecutionCallbackHeartbeatRequest":{ + "type":"structure", + "required":["CallbackId"], + "members":{ + "CallbackId":{ + "shape":"CallbackId", + "documentation":"

The unique identifier for the callback operation.

", + "location":"uri", + "locationName":"CallbackId" + } + } + }, + "SendDurableExecutionCallbackHeartbeatResponse":{ + "type":"structure", + "members":{} + }, + "SendDurableExecutionCallbackSuccessRequest":{ + "type":"structure", + "required":["CallbackId"], + "members":{ + "CallbackId":{ + "shape":"CallbackId", + "documentation":"

The unique identifier for the callback operation.

", + "location":"uri", + "locationName":"CallbackId" + }, + "Result":{ + "shape":"BinaryOperationPayload", + "documentation":"

The result data from the successful callback operation. Maximum size is 256 KB.

" + } + }, + "payload":"Result" + }, + "SendDurableExecutionCallbackSuccessResponse":{ + "type":"structure", + "members":{} + }, "SensitiveString":{ "type":"string", "sensitive":true @@ -6839,6 +8327,14 @@ "min":0, "pattern":"\\d{12}" }, + "StackTraceEntries":{ + "type":"list", + "member":{"shape":"StackTraceEntry"} + }, + "StackTraceEntry":{ + "type":"string", + "sensitive":true + }, "State":{ "type":"string", "enum":[ @@ -6880,6 +8376,7 @@ "InvalidRuntime", "InvalidZipFileException", "FunctionError", + "DrainingDurableExecutions", "VcpuLimitExceeded", "CapacityProviderScalingLimitExceeded", "InsufficientCapacity", @@ -6891,7 +8388,8 @@ "FunctionError.InvalidWorkingDirectory", "FunctionError.PermissionDenied", "FunctionError.TooManyExtensions", - "FunctionError.InitResourceExhausted" + "FunctionError.InitResourceExhausted", + "DisallowedByVpcEncryptionControl" ] }, "StatementId":{ @@ -6900,6 +8398,112 @@ "min":1, "pattern":"([a-zA-Z0-9-_]+)" }, + "StepDetails":{ + "type":"structure", + "members":{ + "Attempt":{ + "shape":"AttemptCount", + "documentation":"

The current attempt number for this step.

" + }, + "NextAttemptTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the next attempt is scheduled, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD). Only populated when the step is in a pending state.

" + }, + "Result":{ + "shape":"OperationPayload", + "documentation":"

The JSON response payload from the step operation.

" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Details about the step failure.

" + } + }, + "documentation":"

Details about a step operation.

" + }, + "StepFailedDetails":{ + "type":"structure", + "required":[ + "Error", + "RetryDetails" + ], + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about the step failure.

" + }, + "RetryDetails":{ + "shape":"RetryDetails", + "documentation":"

Information about retry attempts for this step operation.

" + } + }, + "documentation":"

Details about a step that failed.

" + }, + "StepOptions":{ + "type":"structure", + "members":{ + "NextAttemptDelaySeconds":{ + "shape":"StepOptionsNextAttemptDelaySecondsInteger", + "documentation":"

The delay in seconds before the next retry attempt.

" + } + }, + "documentation":"

Configuration options for a step operation.

" + }, + "StepOptionsNextAttemptDelaySecondsInteger":{ + "type":"integer", + "box":true, + "max":31622400, + "min":1 + }, + "StepStartedDetails":{ + "type":"structure", + "members":{}, + "documentation":"

Details about a step that has started.

" + }, + "StepSucceededDetails":{ + "type":"structure", + "required":[ + "Result", + "RetryDetails" + ], + "members":{ + "Result":{ + "shape":"EventResult", + "documentation":"

The response payload from the successful operation.

" + }, + "RetryDetails":{ + "shape":"RetryDetails", + "documentation":"

Information about retry attempts for this step operation.

" + } + }, + "documentation":"

Details about a step that succeeded.

" + }, + "StopDurableExecutionRequest":{ + "type":"structure", + "required":["DurableExecutionArn"], + "members":{ + "DurableExecutionArn":{ + "shape":"DurableExecutionArn", + "documentation":"

The Amazon Resource Name (ARN) of the durable execution.

", + "location":"uri", + "locationName":"DurableExecutionArn" + }, + "Error":{ + "shape":"ErrorObject", + "documentation":"

Optional error details explaining why the execution is being stopped.

" + } + }, + "payload":"Error" + }, + "StopDurableExecutionResponse":{ + "type":"structure", + "required":["StopTimestamp"], + "members":{ + "StopTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The timestamp when the execution was stopped (ISO 8601 format).

" + } + } + }, "String":{"type":"string"}, "StringList":{ "type":"list", @@ -7087,6 +8691,16 @@ "max":1, "min":1 }, + "TraceHeader":{ + "type":"structure", + "members":{ + "XAmznTraceId":{ + "shape":"XAmznTraceId", + "documentation":"

The X-Ray trace header associated with the durable execution.

" + } + }, + "documentation":"

Contains trace headers for the Lambda durable execution.

" + }, "TracingConfig":{ "type":"structure", "members":{ @@ -7114,6 +8728,10 @@ "PassThrough" ] }, + "Truncated":{ + "type":"boolean", + "box":true + }, "TumblingWindowInSeconds":{ "type":"integer", "box":true, @@ -7495,6 +9113,10 @@ "CapacityProviderConfig":{ "shape":"CapacityProviderConfig", "documentation":"

Configuration for the capacity provider that manages compute resources for Lambda functions.

" + }, + "DurableConfig":{ + "shape":"DurableConfig", + "documentation":"

Configuration settings for durable functions. Allows updating execution timeout and retention period for functions with durability enabled.

" } } }, @@ -7659,6 +9281,70 @@ "documentation":"

The VPC security groups and subnets that are attached to a Lambda function.

" }, "VpcId":{"type":"string"}, + "WaitCancelledDetails":{ + "type":"structure", + "members":{ + "Error":{ + "shape":"EventError", + "documentation":"

Details about why the wait operation was cancelled.

" + } + }, + "documentation":"

Details about a wait operation that was cancelled.

" + }, + "WaitDetails":{ + "type":"structure", + "members":{ + "ScheduledEndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the wait operation is scheduled to complete, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + } + }, + "documentation":"

Details about a wait operation.

" + }, + "WaitOptions":{ + "type":"structure", + "members":{ + "WaitSeconds":{ + "shape":"WaitOptionsWaitSecondsInteger", + "documentation":"

The duration to wait, in seconds.

" + } + }, + "documentation":"

Specifies how long to pause the durable execution.

" + }, + "WaitOptionsWaitSecondsInteger":{ + "type":"integer", + "box":true, + "max":31622400, + "min":1 + }, + "WaitStartedDetails":{ + "type":"structure", + "required":[ + "Duration", + "ScheduledEndTimestamp" + ], + "members":{ + "Duration":{ + "shape":"DurationSeconds", + "documentation":"

The duration to wait, in seconds.

" + }, + "ScheduledEndTimestamp":{ + "shape":"ExecutionTimestamp", + "documentation":"

The date and time when the wait operation is scheduled to complete, in ISO-8601 format (YYYY-MM-DDThh:mm:ss.sTZD).

" + } + }, + "documentation":"

Details about a wait operation that has started.

" + }, + "WaitSucceededDetails":{ + "type":"structure", + "members":{ + "Duration":{ + "shape":"DurationSeconds", + "documentation":"

The wait duration, in seconds.

" + } + }, + "documentation":"

Details about a wait operation that succeeded.

" + }, "Weight":{ "type":"double", "max":1.0, @@ -7668,6 +9354,11 @@ "type":"string", "max":1000, "min":0 + }, + "XAmznTraceId":{ + "type":"string", + "max":8192, + "min":0 } }, "documentation":"

Lambda

Overview

Lambda is a compute service that lets you run code without provisioning or managing servers. Lambda runs your code on a high-availability compute infrastructure and performs all of the administration of the compute resources, including server and operating system maintenance, capacity provisioning and automatic scaling, code monitoring and logging. With Lambda, you can run code for virtually any type of application or backend service. For more information about the Lambda service, see What is Lambda in the Lambda Developer Guide.

The Lambda API Reference provides information about each of the API methods, including details about the parameters in each API request and response.

You can use Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools to access the API. For installation instructions, see Tools for Amazon Web Services.

For a list of Region-specific endpoints that Lambda supports, see Lambda endpoints and quotas in the Amazon Web Services General Reference..

When making the API calls, you will need to authenticate your request by providing a signature. Lambda supports signature version 4. For more information, see Signature Version 4 signing process in the Amazon Web Services General Reference..

CA certificates

Because Amazon Web Services SDKs use the CA certificates from your computer, changes to the certificates on the Amazon Web Services servers can cause connection failures when you attempt to use an SDK. You can prevent these failures by keeping your computer's CA certificates and operating system up-to-date. If you encounter this issue in a corporate environment and do not manage your own computer, you might need to ask an administrator to assist with the update process. The following list shows minimum operating system and Java versions:

When accessing the Lambda management console or Lambda API endpoints, whether through browsers or programmatically, you will need to ensure your client machines support any of the following CAs:

Root certificates from the first two authorities are available from Amazon trust services, but keeping your computer up-to-date is the more straightforward solution. To learn more about ACM-provided certificates, see Amazon Web Services Certificate Manager FAQs.

" diff --git a/awscli/botocore/data/logs/2014-03-28/paginators-1.json b/awscli/botocore/data/logs/2014-03-28/paginators-1.json index a181e273fca2..ac2062d9f3bc 100644 --- a/awscli/botocore/data/logs/2014-03-28/paginators-1.json +++ b/awscli/botocore/data/logs/2014-03-28/paginators-1.json @@ -110,6 +110,18 @@ "limit_key": "maxResults", "output_token": "nextToken", "result_key": "scheduledQueries" + }, + "ListSourcesForS3TableIntegration": { + "input_token": "nextToken", + "limit_key": "maxResults", + "output_token": "nextToken", + "result_key": "sources" + }, + "ListAggregateLogGroupSummaries": { + "input_token": "nextToken", + "limit_key": "limit", + "output_token": "nextToken", + "result_key": "aggregateLogGroupSummaries" } } } diff --git a/awscli/botocore/data/logs/2014-03-28/service-2.json b/awscli/botocore/data/logs/2014-03-28/service-2.json index cfd334270637..7359ad7e2578 100644 --- a/awscli/botocore/data/logs/2014-03-28/service-2.json +++ b/awscli/botocore/data/logs/2014-03-28/service-2.json @@ -29,6 +29,23 @@ ], "documentation":"

Associates the specified KMS key with either one log group in the account, or with all stored CloudWatch Logs query insights results in the account.

When you use AssociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

If you delete the key that is used to encrypt log events or log group query results, then all the associated stored log events or query results that were encrypted with that key will be unencryptable and unusable.

CloudWatch Logs supports only symmetric KMS keys. Do not associate an asymmetric KMS key with your log group or query results. For more information, see Using Symmetric and Asymmetric Keys.

It can take up to 5 minutes for this operation to take effect.

If you attempt to associate a KMS key with a log group but the KMS key does not exist or the KMS key is disabled, you receive an InvalidParameterException error.

" }, + "AssociateSourceToS3TableIntegration":{ + "name":"AssociateSourceToS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"AssociateSourceToS3TableIntegrationRequest"}, + "output":{"shape":"AssociateSourceToS3TableIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Associates a data source with an S3 Table Integration for query access in the 'logs' namespace. This enables querying log data using analytics engines that support Iceberg such as Amazon Athena, Amazon Redshift, and Apache Spark.

" + }, "CancelExportTask":{ "name":"CancelExportTask", "http":{ @@ -44,6 +61,23 @@ ], "documentation":"

Cancels the specified export task.

The task must be in the PENDING or RUNNING state.

" }, + "CancelImportTask":{ + "name":"CancelImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelImportTaskRequest"}, + "output":{"shape":"CancelImportTaskResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Cancels an active import task and stops importing data from the CloudTrail Lake Event Data Store.

" + }, "CreateDelivery":{ "name":"CreateDelivery", "http":{ @@ -81,6 +115,25 @@ ], "documentation":"

Creates an export task so that you can efficiently export data from a log group to an Amazon S3 bucket. When you perform a CreateExportTask operation, you must use credentials that have permission to write to the S3 bucket that you specify as the destination.

Exporting log data to S3 buckets that are encrypted by KMS is supported. Exporting log data to Amazon S3 buckets that have S3 Object Lock enabled with a retention period is also supported.

Exporting to S3 buckets that are encrypted with AES-256 is supported.

This is an asynchronous call. If all the required information is provided, this operation initiates an export task and responds with the ID of the task. After the task has started, you can use DescribeExportTasks to get the status of the export task. Each account can only have one active (RUNNING or PENDING) export task at a time. To cancel an export task, use CancelExportTask.

You can export logs from multiple log groups or multiple time ranges to the same S3 bucket. To separate log data for each export task, specify a prefix to be used as the Amazon S3 key prefix for all exported objects.

We recommend that you don't regularly export to Amazon S3 as a way to continuously archive your logs. For that use case, we instead recommend that you use subscriptions. For more information about subscriptions, see Real-time processing of log data with subscriptions.

Time-based sorting on chunks of log data inside an exported file is not guaranteed. You can sort the exported log field data by using Linux utilities.

" }, + "CreateImportTask":{ + "name":"CreateImportTask", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateImportTaskRequest"}, + "output":{"shape":"CreateImportTaskResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Starts an import from a data source to CloudWatch Log and creates a managed log group as the destination for the imported data. Currently, CloudTrail Event Data Store is the only supported data source.

The import task must satisfy the following constraints:

" + }, "CreateLogAnomalyDetector":{ "name":"CreateLogAnomalyDetector", "http":{ @@ -146,7 +199,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new Scheduled Query that runs CloudWatch Logs Insights queries on a schedule and delivers results to specified destinations.

" + "documentation":"

Creates a scheduled query that runs CloudWatch Logs Insights queries at regular intervals. Scheduled queries enable proactive monitoring by automatically executing queries to detect patterns and anomalies in your log data. Query results can be delivered to Amazon S3 for analysis or further processing.

" }, "DeleteAccountPolicy":{ "name":"DeleteAccountPolicy", @@ -161,7 +214,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"OperationAbortedException"} ], - "documentation":"

Deletes a CloudWatch Logs account policy. This stops the account-wide policy from applying to log groups in the account. If you delete a data protection policy or subscription filter policy, any log-group level policies of those types remain in effect.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.

If you delete a field index policy, the indexing of the log events that happened before you deleted the policy will still be used for up to 30 days to improve CloudWatch Logs Insights queries.

" + "documentation":"

Deletes a CloudWatch Logs account policy. This stops the account-wide policy from applying to log groups or data sources in the account. If you delete a data protection policy or subscription filter policy, any log-group level policies of those types remain in effect. This operation supports deletion of data source-based field index policies, including facet configurations, in addition to log group-based policies.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are deleting.

If you delete a field index policy, the indexing of the log events that happened before you deleted the policy will still be used for up to 30 days to improve CloudWatch Logs Insights queries.

" }, "DeleteDataProtectionPolicy":{ "name":"DeleteDataProtectionPolicy", @@ -274,7 +327,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Deletes a log-group level field index policy that was applied to a single log group. The indexing of the log events that happened before you delete the policy will still be used for as many as 30 days to improve CloudWatch Logs Insights queries.

You can't use this operation to delete an account-level index policy. Instead, use DeletAccountPolicy.

If you delete a log-group level field index policy and there is an account-level field index policy, in a few minutes the log group begins using that account-wide policy to index new incoming log events.

" + "documentation":"

Deletes a log-group level field index policy that was applied to a single log group. The indexing of the log events that happened before you delete the policy will still be used for as many as 30 days to improve CloudWatch Logs Insights queries.

If the deleted policy included facet configurations, those facets will no longer be available for interactive exploration in the CloudWatch Logs Insights console for this log group. However, facet data is retained for up to 30 days.

You can't use this operation to delete an account-level index policy. Instead, use DeleteAccountPolicy.

If you delete a log-group level field index policy and there is an account-level field index policy, in a few minutes the log group begins using that account-wide policy to index new incoming log events. This operation only affects log group-level policies, including any facet configurations, and preserves any data source-based account policies that may apply to the log group.

" }, "DeleteIntegration":{ "name":"DeleteIntegration", @@ -414,7 +467,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an existing scheduled query and all its associated configurations. This operation permanently removes the scheduled query and cannot be undone.

" + "documentation":"

Deletes a scheduled query and stops all future executions. This operation also removes any configured actions and associated resources.

" }, "DeleteSubscriptionFilter":{ "name":"DeleteSubscriptionFilter", @@ -572,6 +625,40 @@ ], "documentation":"

Returns a list of custom and default field indexes which are discovered in log data. For more information about field index policies, see PutIndexPolicy.

" }, + "DescribeImportTaskBatches":{ + "name":"DescribeImportTaskBatches", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportTaskBatchesRequest"}, + "output":{"shape":"DescribeImportTaskBatchesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Gets detailed information about the individual batches within an import task, including their status and any error messages. For CloudTrail Event Data Store sources, a batch refers to a subset of stored events grouped by their eventTime.

" + }, + "DescribeImportTasks":{ + "name":"DescribeImportTasks", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeImportTasksRequest"}, + "output":{"shape":"DescribeImportTasksResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidOperationException"} + ], + "documentation":"

Lists and describes import tasks, with optional filtering by import status and source ARN.

" + }, "DescribeIndexPolicies":{ "name":"DescribeIndexPolicies", "http":{ @@ -601,7 +688,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns information about log groups. You can return all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" + "documentation":"

Returns information about log groups, including data sources that ingest into each log group. You can return all your log groups or filter the results by prefix. The results are ASCII-sorted by log group name.

CloudWatch Logs doesn't support IAM policies that control access to the DescribeLogGroups action by using the aws:ResourceTag/key-name condition key. Other CloudWatch Logs actions do support the use of the aws:ResourceTag/key-name condition key to control access. For more information about using tags to control access, see Controlling access to Amazon Web Services resources using tags.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" }, "DescribeLogStreams":{ "name":"DescribeLogStreams", @@ -646,7 +733,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of CloudWatch Logs Insights queries that are scheduled, running, or have been run recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.

" + "documentation":"

Returns a list of CloudWatch Logs Insights queries that are scheduled, running, or have been run recently in this account. You can request all queries or limit it to queries of a specific log group or queries with a certain status.

This operation includes both interactive queries started directly by users and automated queries executed by scheduled query configurations. Scheduled query executions appear in the results alongside manually initiated queries, providing visibility into all query activity in your account.

" }, "DescribeQueryDefinitions":{ "name":"DescribeQueryDefinitions", @@ -706,6 +793,23 @@ ], "documentation":"

Disassociates the specified KMS key from the specified log group or from all CloudWatch Logs Insights query results in the account.

When you use DisassociateKmsKey, you specify either the logGroupName parameter or the resourceIdentifier parameter. You can't specify both of those parameters in the same operation.

It can take up to 5 minutes for this operation to take effect.

" }, + "DisassociateSourceFromS3TableIntegration":{ + "name":"DisassociateSourceFromS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DisassociateSourceFromS3TableIntegrationRequest"}, + "output":{"shape":"DisassociateSourceFromS3TableIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Disassociates a data source from an S3 Table Integration, removing query access and deleting all associated data from the integration.

" + }, "FilterLogEvents":{ "name":"FilterLogEvents", "http":{ @@ -849,6 +953,22 @@ ], "documentation":"

Lists log events from the specified log stream. You can list all of the log events or filter using a time range.

GetLogEvents is a paginated operation. Each page returned can contain up to 1 MB of log events or up to 10,000 log events. A returned page might only be partially full, or even empty. For example, if the result of a query would return 15,000 log events, the first page isn't guaranteed to have 10,000 log events even if they all fit into 1 MB.

Partially full or empty pages don't necessarily mean that pagination is finished. As long as the nextBackwardToken or nextForwardToken returned is NOT equal to the nextToken that you passed into the API call, there might be more log events available. The token that you use depends on the direction you want to move in along the log stream. The returned tokens are never null.

If you set startFromHead to true and you don’t include endTime in your request, you can end up in a situation where the pagination doesn't terminate. This can happen when the new log events are being added to the target log streams faster than they are being read. This situation is a good use case for the CloudWatch Logs Live Tail feature.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must include one of these two parameters, but you can't include both.

If you are using log transformation, the GetLogEvents operation returns only the original versions of log events, before they were transformed. To view the transformed versions, you must use a CloudWatch Logs query.

" }, + "GetLogFields":{ + "name":"GetLogFields", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetLogFieldsRequest"}, + "output":{"shape":"GetLogFieldsResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationAbortedException"}, + {"shape":"ServiceUnavailableException"} + ], + "documentation":"

Discovers available fields for a specific data source and type. The response includes any field modifications introduced through pipelines, such as new fields or changed field types.

" + }, "GetLogGroupFields":{ "name":"GetLogGroupFields", "http":{ @@ -863,7 +983,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of the fields that are included in log events in the specified log group. Includes the percentage of log events that contain each field. The search is limited to a time period that you specify.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must specify one of these parameters, but you can't specify both.

In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, @timestamp is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.

The response results are sorted by the frequency percentage, starting with the highest percentage.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" + "documentation":"

Returns a list of the fields that are included in log events in the specified log group. Includes the percentage of log events that contain each field. The search is limited to a time period that you specify.

This operation is used for discovering fields within log group events. For discovering fields across data sources, use the GetLogFields operation.

You can specify the log group to search by using either logGroupIdentifier or logGroupName. You must specify one of these parameters, but you can't specify both.

In the results, fields that start with @ are fields generated by CloudWatch Logs. For example, @timestamp is the timestamp of each log event. For more information about the fields that are generated by CloudWatch logs, see Supported Logs and Discovered Fields.

The response results are sorted by the frequency percentage, starting with the highest percentage.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account and view data from the linked source accounts. For more information, see CloudWatch cross-account observability.

" }, "GetLogObject":{ "name":"GetLogObject", @@ -912,7 +1032,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field, which is the identifier for the log record. You can use the value of @ptr in a GetLogRecord operation to get the full log record.

GetQueryResults does not start running a query. To run a query, use StartQuery. For more information about how long results of previous queries are available, see CloudWatch Logs quotas.

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see CloudWatch cross-account observability.

" + "documentation":"

Returns the results from the specified query.

Only the fields requested in the query are returned, along with a @ptr field, which is the identifier for the log record. You can use the value of @ptr in a GetLogRecord operation to get the full log record.

GetQueryResults does not start running a query. To run a query, use StartQuery. For more information about how long results of previous queries are available, see CloudWatch Logs quotas.

If the value of the Status field in the output is Running, this operation returns only partial results. If you see a value of Scheduled or Running for the status, you can retry the operation later to see the final results.

This operation is used both for retrieving results from interactive queries and from automated scheduled query executions. Scheduled queries use GetQueryResults internally to retrieve query results for processing and delivery to configured destinations.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start queries in linked source accounts. For more information, see CloudWatch cross-account observability.

" }, "GetScheduledQuery":{ "name":"GetScheduledQuery", @@ -929,7 +1049,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns detailed information about a specified scheduled query, including its configuration, current state, and execution history.

" + "documentation":"

Retrieves details about a specific scheduled query, including its configuration, execution status, and metadata.

" }, "GetScheduledQueryHistory":{ "name":"GetScheduledQueryHistory", @@ -946,7 +1066,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves the execution history of a scheduled query within a specified time range, including execution status and destination processing metadata.

" + "documentation":"

Retrieves the execution history of a scheduled query within a specified time range, including query results and destination processing status.

" }, "GetTransformer":{ "name":"GetTransformer", @@ -964,6 +1084,21 @@ ], "documentation":"

Returns the information about the log transformer associated with this log group.

This operation returns data only for transformers created at the log group level. To get information for an account-level transformer, use DescribeAccountPolicies.

" }, + "ListAggregateLogGroupSummaries":{ + "name":"ListAggregateLogGroupSummaries", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListAggregateLogGroupSummariesRequest"}, + "output":{"shape":"ListAggregateLogGroupSummariesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"InvalidParameterException"} + ], + "documentation":"

Returns an aggregate summary of all log groups in the Region grouped by specified data source characteristics. Supports optional filtering by log group class, name patterns, and data sources. If you perform this action in a monitoring account, you can also return aggregated summaries of log groups from source accounts that are linked to the monitoring account. For more information about using cross-account observability to set up monitoring accounts and source accounts, see CloudWatch cross-account observability.

The operation aggregates log groups by data source name and type and optionally format, providing counts of log groups that share these characteristics. The operation paginates results. By default, it returns up to 50 results and includes a token to retrieve more results.

" + }, "ListAnomalies":{ "name":"ListAnomalies", "http":{ @@ -1022,7 +1157,7 @@ {"shape":"InvalidParameterException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Returns a list of log groups in the Region in your account. If you are performing this action in a monitoring account, you can choose to also return log groups from source accounts that are linked to the monitoring account. For more information about using cross-account observability to set up monitoring accounts and source accounts, see CloudWatch cross-account observability.

You can optionally filter the list by log group class and by using regular expressions in your request to match strings in the log group names.

This operation is paginated. By default, your first use of this operation returns 50 results, and includes a token to use in a subsequent operation to return more results.

" + "documentation":"

Returns a list of log groups in the Region in your account. If you are performing this action in a monitoring account, you can choose to also return log groups from source accounts that are linked to the monitoring account. For more information about using cross-account observability to set up monitoring accounts and source accounts, see CloudWatch cross-account observability.

You can optionally filter the list by log group class, by using regular expressions in your request to match strings in the log group names, by using the fieldIndexes parameter to filter log groups based on which field indexes are configured, by using the dataSources parameter to filter log groups by data source types, and by using the fieldIndexNames parameter to filter by specific field index names.

This operation is paginated. By default, your first use of this operation returns 50 results, and includes a token to use in a subsequent operation to return more results.

" }, "ListLogGroupsForQuery":{ "name":"ListLogGroupsForQuery", @@ -1054,7 +1189,24 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all scheduled queries in the current AWS account and region with optional filtering by state.

" + "documentation":"

Lists all scheduled queries in your account and region. You can filter results by state to show only enabled or disabled queries.

" + }, + "ListSourcesForS3TableIntegration":{ + "name":"ListSourcesForS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSourcesForS3TableIntegrationRequest"}, + "output":{"shape":"ListSourcesForS3TableIntegrationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Returns a list of data source associations for a specified S3 Table Integration, showing which data sources are currently associated for query access.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1101,7 +1253,7 @@ {"shape":"ServiceUnavailableException"}, {"shape":"LimitExceededException"} ], - "documentation":"

Creates an account-level data protection policy, subscription filter policy, field index policy, transformer policy, or metric extraction policy that applies to all log groups or a subset of log groups in the account.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

Transformer policy

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

You can create transformers only for the log groups in the Standard log class.

You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

CloudWatch Logs provides default field indexes for all log groups in the Standard log class. Default field indexes are automatically available for the following fields:

Default field indexes are in addition to any custom field indexes you define within your policy. Default field indexes are not counted towards your field index quota.

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

Field index policy

You can use field index policies to create indexes on fields found in log events in the log group. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs

To find the fields that are in your log group events, use the GetLogGroupFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value.

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId.

You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level index policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging.

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use only that log-group level policy, and will ignore the account-level policy that you create with PutAccountPolicy.

Metric extraction policy

A metric extraction policy controls whether CloudWatch Metrics can be created through the Embedded Metrics Format (EMF) for log groups in your account. By default, EMF metric creation is enabled for all log groups. You can use metric extraction policies to disable EMF metric creation for your entire account or specific log groups.

When a policy disables EMF metric creation for a log group, log events in the EMF format are still ingested, but no CloudWatch Metrics are created from them.

Creating a policy disables metrics for AWS features that use EMF to create metrics, such as CloudWatch Container Insights and CloudWatch Application Signals. To prevent turning off those features by accident, we recommend that you exclude the underlying log-groups through a selection-criteria such as LogGroupNamePrefix NOT IN [\"/aws/containerinsights\", \"/aws/ecs/containerinsights\", \"/aws/application-signals/data\"].

Each account can have either one account-level metric extraction policy that applies to all log groups, or up to 5 policies that are each scoped to a subset of log groups with the selectionCriteria parameter. The selection criteria supports filtering by LogGroupName and LogGroupNamePrefix using the operators IN and NOT IN. You can specify up to 50 values in each IN or NOT IN list.

The selection criteria can be specified in these formats:

LogGroupName IN [\"log-group-1\", \"log-group-2\"]

LogGroupNamePrefix NOT IN [\"/aws/prefix1\", \"/aws/prefix2\"]

If you have multiple account-level metric extraction policies with selection criteria, no two of them can have overlapping criteria. For example, if you have one policy with selection criteria LogGroupNamePrefix IN [\"my-log\"], you can't have another metric extraction policy with selection criteria LogGroupNamePrefix IN [\"/my-log-prod\"] or LogGroupNamePrefix IN [\"/my-logging\"], as the set of log groups matching these prefixes would be a subset of the log groups matching the first policy's prefix, creating an overlap.

When using NOT IN, only one policy with this operator is allowed per account.

When combining policies with IN and NOT IN operators, the overlap check ensures that policies don't have conflicting effects. Two policies with IN and NOT IN operators do not overlap if and only if every value in the IN policy is completely contained within some value in the NOT IN policy. For example:

" + "documentation":"

Creates an account-level data protection policy, subscription filter policy, field index policy, transformer policy, or metric extraction policy that applies to all log groups, a subset of log groups, or a data source name and type combination in the account.

For field index policies, you can configure indexed fields as facets to enable interactive exploration of your logs. Facets provide value distributions and counts for indexed fields in the CloudWatch Logs Insights console without requiring query execution. For more information, see Use facets to group and explore logs.

To use this operation, you must be signed on with the correct permissions depending on the type of policy that you are creating.

Data protection policy

A data protection policy can help safeguard sensitive data that's ingested by your log groups by auditing and masking the sensitive log data. Each account can have only one account-level data protection policy.

Sensitive data is detected and masked when it is ingested into a log group. When you set a data protection policy, log events ingested into the log groups before that time are not masked.

If you use PutAccountPolicy to create a data protection policy for your whole account, it applies to both existing log groups and all log groups that are created later in this account. The account-level policy is applied to existing log groups with eventual consistency. It might take up to 5 minutes before sensitive data in existing log groups begins to be masked.

By default, when a user views a log event that includes masked data, the sensitive data is replaced by asterisks. A user who has the logs:Unmask permission can use a GetLogEvents or FilterLogEvents operation with the unmask parameter set to true to view the unmasked log events. Users with the logs:Unmask can also view unmasked data in the CloudWatch Logs console by running a CloudWatch Logs Insights query with the unmask query command.

For more information, including a list of types of data that can be audited and masked, see Protect sensitive log data with masking.

To use the PutAccountPolicy operation for a data protection policy, you must be signed on with the logs:PutDataProtectionPolicy and logs:PutAccountPolicy permissions.

The PutAccountPolicy operation applies to all log groups in the account. You can use PutDataProtectionPolicy to create a data protection policy that applies to just one log group. If a log group has its own data protection policy and the account also has an account-level data protection policy, then the two policies are cumulative. Any sensitive term specified in either policy is masked.

Subscription filter policy

A subscription filter policy sets up a real-time feed of log events from CloudWatch Logs to other Amazon Web Services services. Account-level subscription filter policies apply to both existing log groups and log groups that are created later in this account. Supported destinations are Kinesis Data Streams, Firehose, and Lambda. When log events are sent to the receiving service, they are Base64 encoded and compressed with the GZIP format.

The following destinations are supported for subscription filters:

Each account can have one account-level subscription filter policy per Region. If you are updating an existing filter, you must specify the correct name in PolicyName. To perform a PutAccountPolicy subscription filter operation for any destination except a Lambda function, you must also have the iam:PassRole permission.

Transformer policy

Creates or updates a log transformer policy for your account. You use log transformers to transform log events into a different format, making them easier for you to process and analyze. You can also transform logs from different sources into standardized formats that contain relevant, source-specific information. After you have created a transformer, CloudWatch Logs performs this transformation at the time of log ingestion. You can then refer to the transformed versions of the logs during operations such as querying with CloudWatch Logs Insights or creating metric filters or subscription filters.

You can also use a transformer to copy metadata from metadata keys into the log events themselves. This metadata can include log group name, log stream name, account ID and Region.

A transformer for a log group is a series of processors, where each processor applies one type of transformation to the log events ingested into this log group. For more information about the available processors to use in a transformer, see Processors that you can use.

Having log events in standardized format enables visibility across your applications for your log analysis, reporting, and alarming needs. CloudWatch Logs provides transformation for common log types with out-of-the-box transformation templates for major Amazon Web Services log sources such as VPC flow logs, Lambda, and Amazon RDS. You can use pre-built transformation templates or create custom transformation policies.

You can create transformers only for the log groups in the Standard log class.

You can have one account-level transformer policy that applies to all log groups in the account. Or you can create as many as 20 account-level transformer policies that are each scoped to a subset of log groups with the selectionCriteria parameter. If you have multiple account-level transformer policies with selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another transformer policy filtered to my-logpprod or my-logging.

You can also set up a transformer at the log-group level. For more information, see PutTransformer. If there is both a log-group level transformer created with PutTransformer and an account-level transformer that could apply to the same log group, the log group uses only the log-group level transformer. It ignores the account-level transformer.

Field index policy

You can use field index policies to create indexes on fields found in log events for a log group or data source name and type combination. Creating field indexes can help lower the scan volume for CloudWatch Logs Insights queries that reference those fields, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, user IDs, or instance IDs. For more information, see Create field indexes to improve query performance and reduce costs

To find the fields that are in your log group events, use the GetLogGroupFields operation. To find the fields for a data source use the GetLogFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId in [value, value, ...] will attempt to process only the log events where the indexed field matches the specified value.

Matches of log events to the names of indexed fields are case-sensitive. For example, an indexed field of RequestId won't match a log event containing requestId.

You can have one account-level field index policy that applies to all log groups in the account. Or you can create as many as 20 account-level field index policies that are each scoped to a subset of log groups using LogGroupNamePrefix with the selectionCriteria parameter. You can have another 20 account-level field index policies using DataSourceName and DataSourceType for the selectionCriteria parameter. If you have multiple account-level index policies with LogGroupNamePrefix selection criteria, no two of them can use the same or overlapping log group name prefixes. For example, if you have one policy filtered to log groups that start with my-log, you can't have another field index policy filtered to my-logpprod or my-logging. Similarly, if you have multiple account-level index policies with DataSourceName and DataSourceType selection criteria, no two of them can use the same data source name and type combination. For example, if you have one policy filtered to the data source name amazon_vpc and data source type flow you cannot create another policy with this combination.

If you create an account-level field index policy in a monitoring account in cross-account observability, the policy is applied only to the monitoring account and not to any source accounts.

CloudWatch Logs provides default field indexes for all log groups in the Standard log class. Default field indexes are automatically available for the following fields:

CloudWatch Logs provides default field indexes for certain data source name and type combinations as well. Default field indexes are automatically available for the following data source name and type combinations as identified in the following list:

amazon_vpc.flow

amazon_route53.resolver_query

aws_waf.access

aws_cloudtrail.data, aws_cloudtrail.management

Default field indexes are in addition to any custom field indexes you define within your policy. Default field indexes are not counted towards your field index quota.

If you want to create a field index policy for a single log group, you can use PutIndexPolicy instead of PutAccountPolicy. If you do so, that log group will use that log-group level policy and any account-level policies that match at the data source level; any account-level policy that matches at the log group level (for example, no selection criteria or log group name prefix selection criteria) will be ignored.

Metric extraction policy

A metric extraction policy controls whether CloudWatch Metrics can be created through the Embedded Metrics Format (EMF) for log groups in your account. By default, EMF metric creation is enabled for all log groups. You can use metric extraction policies to disable EMF metric creation for your entire account or specific log groups.

When a policy disables EMF metric creation for a log group, log events in the EMF format are still ingested, but no CloudWatch Metrics are created from them.

Creating a policy disables metrics for AWS features that use EMF to create metrics, such as CloudWatch Container Insights and CloudWatch Application Signals. To prevent turning off those features by accident, we recommend that you exclude the underlying log-groups through a selection-criteria such as LogGroupNamePrefix NOT IN [\"/aws/containerinsights\", \"/aws/ecs/containerinsights\", \"/aws/application-signals/data\"].

Each account can have either one account-level metric extraction policy that applies to all log groups, or up to 5 policies that are each scoped to a subset of log groups with the selectionCriteria parameter. The selection criteria supports filtering by LogGroupName and LogGroupNamePrefix using the operators IN and NOT IN. You can specify up to 50 values in each IN or NOT IN list.

The selection criteria can be specified in these formats:

LogGroupName IN [\"log-group-1\", \"log-group-2\"]

LogGroupNamePrefix NOT IN [\"/aws/prefix1\", \"/aws/prefix2\"]

If you have multiple account-level metric extraction policies with selection criteria, no two of them can have overlapping criteria. For example, if you have one policy with selection criteria LogGroupNamePrefix IN [\"my-log\"], you can't have another metric extraction policy with selection criteria LogGroupNamePrefix IN [\"/my-log-prod\"] or LogGroupNamePrefix IN [\"/my-logging\"], as the set of log groups matching these prefixes would be a subset of the log groups matching the first policy's prefix, creating an overlap.

When using NOT IN, only one policy with this operator is allowed per account.

When combining policies with IN and NOT IN operators, the overlap check ensures that policies don't have conflicting effects. Two policies with IN and NOT IN operators do not overlap if and only if every value in the IN policy is completely contained within some value in the NOT IN policy. For example:

" }, "PutDataProtectionPolicy":{ "name":"PutDataProtectionPolicy", @@ -1216,7 +1368,7 @@ {"shape":"OperationAbortedException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Creates or updates a field index policy for the specified log group. Only log groups in the Standard log class support field index policies. For more information about log classes, see Log classes.

You can use field index policies to create field indexes on fields found in log events in the log group. Creating field indexes speeds up and lowers the costs for CloudWatch Logs Insights queries that reference those field indexes, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, userID, and instance IDs. For more information, see Create field indexes to improve query performance and reduce costs.

To find the fields that are in your log group events, use the GetLogGroupFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId IN [value, value, ...] will process fewer log events to reduce costs, and have improved performance.

CloudWatch Logs provides default field indexes for all log groups in the Standard log class. Default field indexes are automatically available for the following fields:

Default field indexes are in addition to any custom field indexes you define within your policy. Default field indexes are not counted towards your field index quota.

Each index policy has the following quotas and restrictions:

Matches of log events to the names of indexed fields are case-sensitive. For example, a field index of RequestId won't match a log event containing requestId.

Log group-level field index policies created with PutIndexPolicy override account-level field index policies created with PutAccountPolicy. If you use PutIndexPolicy to create a field index policy for a log group, that log group uses only that policy. The log group ignores any account-wide field index policy that you might have created.

" + "documentation":"

Creates or updates a field index policy for the specified log group. Only log groups in the Standard log class support field index policies. For more information about log classes, see Log classes.

You can use field index policies to create field indexes on fields found in log events in the log group. Creating field indexes speeds up and lowers the costs for CloudWatch Logs Insights queries that reference those field indexes, because these queries attempt to skip the processing of log events that are known to not match the indexed field. Good fields to index are fields that you often need to query for and fields or values that match only a small fraction of the total log events. Common examples of indexes include request ID, session ID, userID, and instance IDs. For more information, see Create field indexes to improve query performance and reduce costs.

You can configure indexed fields as facets to enable interactive exploration and filtering of your logs in the CloudWatch Logs Insights console. Facets allow you to view value distributions and counts for indexed fields without running queries. When you create a field index, you can optionally set it as a facet to enable this interactive analysis capability. For more information, see Use facets to group and explore logs.

To find the fields that are in your log group events, use the GetLogGroupFields operation.

For example, suppose you have created a field index for requestId. Then, any CloudWatch Logs Insights query on that log group that includes requestId = value or requestId IN [value, value, ...] will process fewer log events to reduce costs, and have improved performance.

CloudWatch Logs provides default field indexes for all log groups in the Standard log class. Default field indexes are automatically available for the following fields:

Default field indexes are in addition to any custom field indexes you define within your policy. Default field indexes are not counted towards your field index quota.

Each index policy has the following quotas and restrictions:

Matches of log events to the names of indexed fields are case-sensitive. For example, a field index of RequestId won't match a log event containing requestId.

Log group-level field index policies created with PutIndexPolicy override account-level field index policies created with PutAccountPolicy that apply to log groups. If you use PutIndexPolicy to create a field index policy for a log group, that log group uses only that policy for log group-level indexing, including any facet configurations. The log group ignores any account-wide field index policy that applies to log groups, but data source-based account policies may still apply.

" }, "PutIntegration":{ "name":"PutIntegration", @@ -1401,7 +1553,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Starts a query of one or more log groups using CloudWatch Logs Insights. You specify the log groups and time range to query and the query string to use.

For more information, see CloudWatch Logs Insights Query Syntax.

After you run a query using StartQuery, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId that StartQuery returns.

To specify the log groups to query, a StartQuery operation must include one of the following:

If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.

Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account.

You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.

" + "documentation":"

Starts a query of one or more log groups or data sources using CloudWatch Logs Insights. You specify the log groups or data sources and time range to query and the query string to use. You can query up to 10 data sources in a single query.

For more information, see CloudWatch Logs Insights Query Syntax.

After you run a query using StartQuery, the query results are stored by CloudWatch Logs. You can use GetQueryResults to retrieve the results of a query, using the queryId that StartQuery returns.

Interactive queries started with StartQuery share concurrency limits with automated scheduled query executions. Both types of queries count toward the same regional concurrent query quota, so high scheduled query activity may affect the availability of concurrent slots for interactive queries.

To specify the log groups to query, a StartQuery operation must include one of the following:

If you have associated a KMS key with the query results in this account, then StartQuery uses that key to encrypt the results when it stores them. If no key is associated with query results, the query results are encrypted with the default CloudWatch Logs encryption method.

Queries time out after 60 minutes of runtime. If your queries are timing out, reduce the time range being searched or partition your query into a number of queries.

If you are using CloudWatch cross-account observability, you can use this operation in a monitoring account to start a query in a linked source account. For more information, see CloudWatch cross-account observability. For a cross-account StartQuery operation, the query definition must be defined in the monitoring account.

You can have up to 30 concurrent CloudWatch Logs insights queries, including queries that have been added to dashboards.

" }, "StopQuery":{ "name":"StopQuery", @@ -1416,7 +1568,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ServiceUnavailableException"} ], - "documentation":"

Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.

" + "documentation":"

Stops a CloudWatch Logs Insights query that is in progress. If the query has already ended, the operation returns an error indicating that the specified query is not running.

This operation can be used to cancel both interactive queries and individual scheduled query executions. When used with scheduled queries, StopQuery cancels only the specific execution identified by the query ID, not the scheduled query configuration itself.

" }, "TagLogGroup":{ "name":"TagLogGroup", @@ -1568,7 +1720,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Updates the configuration of an existing scheduled query. This operation follows PUT semantics, replacing the existing configuration with the provided values.

" + "documentation":"

Updates an existing scheduled query with new configuration. This operation uses PUT semantics, allowing modification of query parameters, schedule, and destinations.

" } }, "shapes":{ @@ -1686,6 +1838,24 @@ }, "documentation":"

This processor adds new key-value pairs to the log event.

For more information about this processor including examples, see addKeys in the CloudWatch Logs User Guide.

" }, + "AggregateLogGroupSummaries":{ + "type":"list", + "member":{"shape":"AggregateLogGroupSummary"} + }, + "AggregateLogGroupSummary":{ + "type":"structure", + "members":{ + "logGroupCount":{ + "shape":"LogGroupCount", + "documentation":"

The number of log groups in this aggregate summary group.

" + }, + "groupingIdentifiers":{ + "shape":"GroupingIdentifiers", + "documentation":"

An array of key-value pairs that identify the data source characteristics used to group the log groups.

The size and content of this array depends on the groupBy parameter specified in the request.

" + } + }, + "documentation":"

Contains an aggregate summary of log groups grouped by data source characteristics, including the count of log groups and their grouping identifiers.

" + }, "AllowedActionForAllowVendedLogsDeliveryForResource":{"type":"string"}, "AllowedFieldDelimiters":{ "type":"list", @@ -1895,7 +2065,38 @@ } } }, + "AssociateSourceToS3TableIntegrationRequest":{ + "type":"structure", + "required":[ + "integrationArn", + "dataSource" + ], + "members":{ + "integrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table Integration to associate the data source with.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

The data source to associate with the S3 Table Integration. Contains the name and type of the data source.

" + } + } + }, + "AssociateSourceToS3TableIntegrationResponse":{ + "type":"structure", + "members":{ + "identifier":{ + "shape":"S3TableIntegrationSourceIdentifier", + "documentation":"

The unique identifier for the association between the data source and S3 Table Integration.

" + } + } + }, "Baseline":{"type":"boolean"}, + "BatchId":{ + "type":"string", + "max":256, + "min":1 + }, "Boolean":{"type":"boolean"}, "CSV":{ "type":"structure", @@ -1929,6 +2130,41 @@ } } }, + "CancelImportTaskRequest":{ + "type":"structure", + "required":["importId"], + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

The ID of the import task to cancel.

" + } + } + }, + "CancelImportTaskResponse":{ + "type":"structure", + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

The ID of the cancelled import task.

" + }, + "importStatistics":{ + "shape":"ImportStatistics", + "documentation":"

Statistics about the import progress at the time of cancellation.

" + }, + "importStatus":{ + "shape":"ImportStatus", + "documentation":"

The final status of the import task. This will be set to CANCELLED.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the import task was created, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + }, + "lastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the import task was cancelled, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + } + } + }, "ClientToken":{ "type":"string", "max":128, @@ -2154,6 +2390,44 @@ } } }, + "CreateImportTaskRequest":{ + "type":"structure", + "required":[ + "importSourceArn", + "importRoleArn" + ], + "members":{ + "importSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the source to import from.

" + }, + "importRoleArn":{ + "shape":"RoleArn", + "documentation":"

The ARN of the IAM role that grants CloudWatch Logs permission to import from the CloudTrail Lake Event Data Store.

" + }, + "importFilter":{ + "shape":"ImportFilter", + "documentation":"

Optional filters to constrain the import by CloudTrail event time. Times are specified in Unix timestamp milliseconds. The range of data being imported must be within the specified source's retention period.

" + } + } + }, + "CreateImportTaskResponse":{ + "type":"structure", + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

A unique identifier for the import task.

" + }, + "importDestinationArn":{ + "shape":"Arn", + "documentation":"

The ARN of the CloudWatch Logs log group created as the destination for the imported events.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the import task was created, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + } + } + }, "CreateLogAnomalyDetectorRequest":{ "type":"structure", "required":["logGroupArnList"], @@ -2252,59 +2526,59 @@ "members":{ "name":{ "shape":"ScheduledQueryName", - "documentation":"

A unique name for the scheduled query within the region for an AWS account. The name can contain letters, numbers, underscores, hyphens, forward slashes, periods, and hash symbols.

" + "documentation":"

The name of the scheduled query. The name must be unique within your account and region. Valid characters are alphanumeric characters, hyphens, underscores, and periods. Length must be between 1 and 255 characters.

" }, "description":{ "shape":"ScheduledQueryDescription", - "documentation":"

An optional description for the scheduled query to help identify its purpose.

" + "documentation":"

An optional description for the scheduled query to help identify its purpose and functionality.

" }, "queryLanguage":{ "shape":"QueryLanguage", - "documentation":"

The query language to use for the scheduled query. Valid values are LogsQL (CloudWatch Logs Insights query language), PPL (OpenSearch Service Piped Processing Language), and SQL (OpenSearch Service Structured Query Language).

" + "documentation":"

The query language to use for the scheduled query. Valid values are LogsQL, PPL, and SQL.

" }, "queryString":{ "shape":"QueryString", - "documentation":"

The CloudWatch Logs Insights query string to execute. This is the actual query that will be run against your log data on the specified schedule.

" + "documentation":"

The query string to execute. This is the same query syntax used in CloudWatch Logs Insights. Maximum length is 10,000 characters.

" }, "logGroupIdentifiers":{ "shape":"ScheduledQueryLogGroupIdentifiers", - "documentation":"

The log group identifiers to query. You can specify log group names or log group ARNs. If querying log groups in a source account from a monitoring account, you must specify the ARN of the log group.

" + "documentation":"

An array of log group names or ARNs to query. You can specify between 1 and 50 log groups. Log groups can be identified by name or full ARN.

" }, "scheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

A cron expression that defines when the scheduled query runs. The format is cron(fields) where fields consist of six space-separated values: minutes, hours, day_of_month, month, day_of_week, year.

" + "documentation":"

A cron expression that defines when the scheduled query runs. The expression uses standard cron syntax and supports minute-level precision. Maximum length is 256 characters.

" }, "timezone":{ "shape":"ScheduleTimezone", - "documentation":"

The timezone in which the schedule expression is evaluated. If not provided, defaults to UTC.

" + "documentation":"

The timezone for evaluating the schedule expression. This determines when the scheduled query executes relative to the specified timezone.

" }, "startTimeOffset":{ "shape":"StartTimeOffset", - "documentation":"

Time offset in seconds from the execution time for the start of the query time range. This defines the lookback period for the query (for example, 3600 for the last hour).

" + "documentation":"

The time offset in seconds that defines the lookback period for the query. This determines how far back in time the query searches from the execution time.

" }, "destinationConfiguration":{ "shape":"DestinationConfiguration", - "documentation":"

Configuration for destinations where the query results will be delivered after successful execution. You can configure delivery to S3 buckets or EventBridge event buses.

" + "documentation":"

Configuration for where to deliver query results. Currently supports Amazon S3 destinations for storing query output.

" }, "scheduleStartTime":{ "shape":"Timestamp", - "documentation":"

The start time for the query schedule in Unix epoch time (seconds since January 1, 1970, 00:00:00 UTC). If not specified, the schedule starts immediately.

" + "documentation":"

The start time for the scheduled query in Unix epoch format. The query will not execute before this time.

" }, "scheduleEndTime":{ "shape":"Timestamp", - "documentation":"

The end time for the query schedule in Unix epoch time (seconds since January 1, 1970, 00:00:00 UTC). If not specified, the schedule runs indefinitely.

" + "documentation":"

The end time for the scheduled query in Unix epoch format. The query will stop executing after this time.

" }, "executionRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM role that CloudWatch Logs will assume to execute the scheduled query and deliver results to the specified destinations.

" + "documentation":"

The ARN of the IAM role that grants permissions to execute the query and deliver results to the specified destination. The role must have permissions to read from the specified log groups and write to the destination.

" }, "state":{ "shape":"ScheduledQueryState", - "documentation":"

The initial state of the scheduled query. Valid values are ENABLED (the query will run according to its schedule) and DISABLED (the query is paused and will not run). If not provided, defaults to ENABLED.

" + "documentation":"

The initial state of the scheduled query. Valid values are ENABLED and DISABLED. Default is ENABLED.

" }, "tags":{ "shape":"Tags", - "documentation":"

An optional list of key-value pairs to associate with the resource.

For more information about tagging, see Tagging Amazon Web Services resources

" + "documentation":"

Key-value pairs to associate with the scheduled query for resource management and cost allocation.

" } } }, @@ -2313,11 +2587,11 @@ "members":{ "scheduledQueryArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the created scheduled query.

" + "documentation":"

The ARN of the created scheduled query.

" }, "state":{ "shape":"ScheduledQueryState", - "documentation":"

The current state of the scheduled query (ENABLED or DISABLED).

" + "documentation":"

The current state of the scheduled query.

" } } }, @@ -2344,6 +2618,45 @@ "DISABLED" ] }, + "DataSource":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"DataSourceName", + "documentation":"

The name of the data source.

" + }, + "type":{ + "shape":"DataSourceType", + "documentation":"

The type of the data source.

" + } + }, + "documentation":"

Represents a data source that categorizes logs by originating service and log type, providing service-based organization complementing traditional log groups.

" + }, + "DataSourceFilter":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"DataSourceName", + "documentation":"

The name pattern to filter data sources by.

" + }, + "type":{ + "shape":"DataSourceType", + "documentation":"

The type pattern to filter data sources by.

" + } + }, + "documentation":"

Filter criteria for data sources, used to specify which data sources to include in operations based on name and type.

" + }, + "DataSourceFilters":{ + "type":"list", + "member":{"shape":"DataSourceFilter"}, + "max":5, + "min":1 + }, + "DataSourceName":{"type":"string"}, + "DataSourceType":{"type":"string"}, + "DataType":{"type":"string"}, "DateTimeConverter":{ "type":"structure", "required":[ @@ -2614,7 +2927,7 @@ "members":{ "identifier":{ "shape":"ScheduledQueryIdentifier", - "documentation":"

The name or ARN of the scheduled query to delete.

" + "documentation":"

The ARN or name of the scheduled query to delete.

" } } }, @@ -3046,6 +3359,87 @@ "nextToken":{"shape":"NextToken"} } }, + "DescribeImportTaskBatchesRequest":{ + "type":"structure", + "required":["importId"], + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

The ID of the import task to get batch information for.

" + }, + "batchImportStatus":{ + "shape":"ImportStatusList", + "documentation":"

Optional filter to list import batches by their status. Accepts multiple status values: IN_PROGRESS, CANCELLED, COMPLETED and FAILED.

" + }, + "limit":{ + "shape":"DescribeLimit", + "documentation":"

The maximum number of import batches to return in the response. Default: 10

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token for the next set of results.

" + } + } + }, + "DescribeImportTaskBatchesResponse":{ + "type":"structure", + "members":{ + "importSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the source being imported from.

" + }, + "importId":{ + "shape":"ImportId", + "documentation":"

The ID of the import task.

" + }, + "importBatches":{ + "shape":"ImportBatchList", + "documentation":"

The list of import batches that match the request filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use when requesting the next set of results. Not present if there are no additional results to retrieve.

" + } + } + }, + "DescribeImportTasksRequest":{ + "type":"structure", + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

Optional filter to describe a specific import task by its ID.

" + }, + "importStatus":{ + "shape":"ImportStatus", + "documentation":"

Optional filter to list imports by their status. Valid values are IN_PROGRESS, CANCELLED, COMPLETED and FAILED.

" + }, + "importSourceArn":{ + "shape":"Arn", + "documentation":"

Optional filter to list imports from a specific source

" + }, + "limit":{ + "shape":"DescribeLimit", + "documentation":"

The maximum number of import tasks to return in the response. Default: 50

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The pagination token for the next set of results.

" + } + } + }, + "DescribeImportTasksResponse":{ + "type":"structure", + "members":{ + "imports":{ + "shape":"ImportList", + "documentation":"

The list of import tasks that match the request filters.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token to use when requesting the next set of results. Not present if there are no additional results to retrieve.

" + } + } + }, "DescribeIndexPoliciesLogGroupIdentifiers":{ "type":"list", "member":{"shape":"LogGroupIdentifier"}, @@ -3382,10 +3776,10 @@ "members":{ "s3Configuration":{ "shape":"S3Configuration", - "documentation":"

Configuration for delivering query results to an Amazon S3 bucket.

" + "documentation":"

Configuration for delivering query results to Amazon S3.

" } }, - "documentation":"

Configuration for destinations where scheduled query results are delivered, such as S3 buckets or EventBridge event buses.

" + "documentation":"

Configuration for where to deliver scheduled query results. Specifies the destination type and associated settings for result delivery.

" }, "DestinationField":{ "type":"string", @@ -3437,6 +3831,25 @@ } } }, + "DisassociateSourceFromS3TableIntegrationRequest":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"S3TableIntegrationSourceIdentifier", + "documentation":"

The unique identifier of the association to remove between the data source and S3 Table Integration.

" + } + } + }, + "DisassociateSourceFromS3TableIntegrationResponse":{ + "type":"structure", + "members":{ + "identifier":{ + "shape":"S3TableIntegrationSourceIdentifier", + "documentation":"

The unique identifier of the association that was removed.

" + } + } + }, "Distribution":{ "type":"string", "documentation":"

The method used to distribute log data to the destination, which can be either random or grouped by log stream.

", @@ -3523,6 +3936,7 @@ "type":"long", "min":0 }, + "ErrorMessage":{"type":"string"}, "EvaluationFrequency":{ "type":"string", "enum":[ @@ -3713,6 +4127,10 @@ "lastEventTime":{ "shape":"Timestamp", "documentation":"

The time and date of the most recent log event that matches this field index.

" + }, + "type":{ + "shape":"IndexType", + "documentation":"

The type of index. Specify FACET for facet-based indexing or FIELD_INDEX for field-based indexing. This determines how the field is indexed and can be queried.

" } }, "documentation":"

This structure describes one log event field that is used as an index in at least one index policy in this account.

" @@ -3723,6 +4141,12 @@ "min":1, "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, + "FieldIndexNames":{ + "type":"list", + "member":{"shape":"FieldIndexName"}, + "max":20, + "min":1 + }, "FieldIndexes":{ "type":"list", "member":{"shape":"FieldIndex"} @@ -4110,6 +4534,32 @@ } } }, + "GetLogFieldsRequest":{ + "type":"structure", + "required":[ + "dataSourceName", + "dataSourceType" + ], + "members":{ + "dataSourceName":{ + "shape":"DataSourceName", + "documentation":"

The name of the data source to retrieve log fields for.

" + }, + "dataSourceType":{ + "shape":"DataSourceType", + "documentation":"

The type of the data source to retrieve log fields for.

" + } + } + }, + "GetLogFieldsResponse":{ + "type":"structure", + "members":{ + "logFields":{ + "shape":"LogFieldsList", + "documentation":"

The list of log fields for the specified data source, including field names and their data types.

" + } + } + }, "GetLogGroupFieldsRequest":{ "type":"structure", "members":{ @@ -4246,23 +4696,23 @@ "members":{ "identifier":{ "shape":"ScheduledQueryIdentifier", - "documentation":"

The name or ARN of the scheduled query to retrieve history for.

" + "documentation":"

The ARN or name of the scheduled query to retrieve history for.

" }, "startTime":{ "shape":"Timestamp", - "documentation":"

The start time for the history retrieval window in Unix epoch time.

" + "documentation":"

The start time for the history query in Unix epoch format.

" }, "endTime":{ "shape":"Timestamp", - "documentation":"

The end time for the history retrieval window in Unix epoch time.

" + "documentation":"

The end time for the history query in Unix epoch format.

" }, "executionStatuses":{ "shape":"ExecutionStatusList", - "documentation":"

Filter results by execution status (Running, Complete, Failed, Timeout, or InvalidQuery).

" + "documentation":"

An array of execution statuses to filter the history results. Only executions with the specified statuses are returned.

" }, "maxResults":{ "shape":"GetScheduledQueryHistoryMaxResults", - "documentation":"

The maximum number of history records to return in a single call.

" + "documentation":"

The maximum number of history records to return. Valid range is 1 to 1000.

" }, "nextToken":{"shape":"NextToken"} } @@ -4280,7 +4730,7 @@ }, "triggerHistory":{ "shape":"TriggerHistoryRecordList", - "documentation":"

The list of execution history records for the scheduled query.

" + "documentation":"

An array of execution history records for the scheduled query.

" }, "nextToken":{"shape":"NextToken"} } @@ -4291,7 +4741,7 @@ "members":{ "identifier":{ "shape":"ScheduledQueryIdentifier", - "documentation":"

The name or ARN of the scheduled query to retrieve.

" + "documentation":"

The ARN or name of the scheduled query to retrieve.

" } } }, @@ -4300,7 +4750,7 @@ "members":{ "scheduledQueryArn":{ "shape":"Arn", - "documentation":"

The Amazon Resource Name (ARN) of the scheduled query.

" + "documentation":"

The ARN of the scheduled query.

" }, "name":{ "shape":"ScheduledQueryName", @@ -4312,15 +4762,15 @@ }, "queryLanguage":{ "shape":"QueryLanguage", - "documentation":"

The query language used by the scheduled query (LogsQL, PPL, or SQL).

" + "documentation":"

The query language used by the scheduled query.

" }, "queryString":{ "shape":"QueryString", - "documentation":"

The CloudWatch Logs Insights query string being executed.

" + "documentation":"

The query string executed by the scheduled query.

" }, "logGroupIdentifiers":{ "shape":"ScheduledQueryLogGroupIdentifiers", - "documentation":"

The log group identifiers being queried by the scheduled query.

" + "documentation":"

The log groups queried by the scheduled query.

" }, "scheduleExpression":{ "shape":"ScheduleExpression", @@ -4328,47 +4778,47 @@ }, "timezone":{ "shape":"ScheduleTimezone", - "documentation":"

The timezone in which the schedule expression is evaluated.

" + "documentation":"

The timezone used for evaluating the schedule expression.

" }, "startTimeOffset":{ "shape":"StartTimeOffset", - "documentation":"

Time offset in seconds from the execution time for the start of the query time range.

" + "documentation":"

The time offset in seconds that defines the lookback period for the query.

" }, "destinationConfiguration":{ "shape":"DestinationConfiguration", - "documentation":"

Configuration for destinations where the query results are delivered.

" + "documentation":"

Configuration for where query results are delivered.

" }, "state":{ "shape":"ScheduledQueryState", - "documentation":"

The current state of the scheduled query (ENABLED or DISABLED).

" + "documentation":"

The current state of the scheduled query.

" }, "lastTriggeredTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was last executed, in Unix epoch time.

" + "documentation":"

The timestamp when the scheduled query was last executed.

" }, "lastExecutionStatus":{ "shape":"ExecutionStatus", - "documentation":"

The status of the last executed query (Running, Complete, Failed, Timeout, or InvalidQuery).

" + "documentation":"

The status of the most recent execution of the scheduled query.

" }, "scheduleStartTime":{ "shape":"Timestamp", - "documentation":"

The start time for the query schedule in Unix epoch time.

" + "documentation":"

The start time for the scheduled query in Unix epoch format.

" }, "scheduleEndTime":{ "shape":"Timestamp", - "documentation":"

The end time for the query schedule in Unix epoch time.

" + "documentation":"

The end time for the scheduled query in Unix epoch format.

" }, "executionRoleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the IAM role used to execute the scheduled query.

" + "documentation":"

The ARN of the IAM role used to execute the query and deliver results.

" }, "creationTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was created, in Unix epoch time.

" + "documentation":"

The timestamp when the scheduled query was created.

" }, "lastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was last updated, in Unix epoch time.

" + "documentation":"

The timestamp when the scheduled query was last updated.

" } } }, @@ -4423,11 +4873,146 @@ "max":512, "min":1 }, + "GroupingIdentifier":{ + "type":"structure", + "members":{ + "key":{ + "shape":"GroupingIdentifierKey", + "documentation":"

The key that identifies the grouping characteristic. The format of the key uses dot notation. Examples are, dataSource.Name, dataSource.Type, and dataSource.Format.

" + }, + "value":{ + "shape":"GroupingIdentifierValue", + "documentation":"

The value associated with the grouping characteristic. Examples are amazon_vpc, flow, and OCSF.

" + } + }, + "documentation":"

A key-value pair that identifies how log groups are grouped in aggregate summaries.

" + }, + "GroupingIdentifierKey":{"type":"string"}, + "GroupingIdentifierValue":{"type":"string"}, + "GroupingIdentifiers":{ + "type":"list", + "member":{"shape":"GroupingIdentifier"} + }, "Histogram":{ "type":"map", "key":{"shape":"Time"}, "value":{"shape":"Count"} }, + "Import":{ + "type":"structure", + "members":{ + "importId":{ + "shape":"ImportId", + "documentation":"

The unique identifier of the import task.

" + }, + "importSourceArn":{ + "shape":"Arn", + "documentation":"

The ARN of the CloudTrail Lake Event Data Store being imported from.

" + }, + "importStatus":{ + "shape":"ImportStatus", + "documentation":"

The current status of the import task. Valid values are IN_PROGRESS, CANCELLED, COMPLETED and FAILED.

" + }, + "importDestinationArn":{ + "shape":"Arn", + "documentation":"

The ARN of the managed CloudWatch Logs log group where the events are being imported to.

" + }, + "importStatistics":{ + "shape":"ImportStatistics", + "documentation":"

Statistics about the import progress

" + }, + "importFilter":{ + "shape":"ImportFilter", + "documentation":"

The filter criteria used for this import task.

" + }, + "creationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the import task was created, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + }, + "lastUpdatedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the import task was last updated, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

Error message related to any failed imports

" + } + }, + "documentation":"

An import job to move data from CloudTrail Event Data Store to CloudWatch.

" + }, + "ImportBatch":{ + "type":"structure", + "required":[ + "batchId", + "status" + ], + "members":{ + "batchId":{ + "shape":"BatchId", + "documentation":"

The unique identifier of the import batch.

" + }, + "status":{ + "shape":"ImportStatus", + "documentation":"

The current status of the import batch. Valid values are IN_PROGRESS, CANCELLED, COMPLETED and FAILED.

" + }, + "errorMessage":{ + "shape":"ErrorMessage", + "documentation":"

The error message if the batch failed to import. Only present when status is FAILED.

" + } + }, + "documentation":"

A collection of events being imported to CloudWatch

" + }, + "ImportBatchList":{ + "type":"list", + "member":{"shape":"ImportBatch"} + }, + "ImportFilter":{ + "type":"structure", + "members":{ + "startEventTime":{ + "shape":"Timestamp", + "documentation":"

The start of the time range for events to import, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + }, + "endEventTime":{ + "shape":"Timestamp", + "documentation":"

The end of the time range for events to import, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.

" + } + }, + "documentation":"

The filter criteria used for import tasks

" + }, + "ImportId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[\\-a-zA-Z0-9]+" + }, + "ImportList":{ + "type":"list", + "member":{"shape":"Import"} + }, + "ImportStatistics":{ + "type":"structure", + "members":{ + "bytesImported":{ + "shape":"StoredBytes", + "documentation":"

The total number of bytes that have been imported to the managed log group.

" + } + }, + "documentation":"

Statistics about the import progress

" + }, + "ImportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "CANCELLED", + "COMPLETED", + "FAILED" + ] + }, + "ImportStatusList":{ + "type":"list", + "member":{"shape":"ImportStatus"} + }, "IncludeLinkedAccounts":{"type":"boolean"}, "IndexPolicies":{ "type":"list", @@ -4466,6 +5051,13 @@ "LOG_GROUP" ] }, + "IndexType":{ + "type":"string", + "enum":[ + "FACET", + "FIELD_INDEX" + ] + }, "InferredTokenName":{ "type":"string", "min":1 @@ -4574,7 +5166,7 @@ "InternalServerException":{ "type":"structure", "members":{}, - "documentation":"

An internal server error occurred while processing the request. This is typically a temporary issue and the request can be retried.

", + "documentation":"

An internal server error occurred while processing the request. This exception is returned when the service encounters an unexpected condition that prevents it from fulfilling the request.

", "exception":true, "fault":true }, @@ -4632,6 +5224,58 @@ "documentation":"

You have reached the maximum number of resources that can be created.

", "exception":true }, + "ListAggregateLogGroupSummariesGroupBy":{ + "type":"string", + "enum":[ + "DATA_SOURCE_NAME_TYPE_AND_FORMAT", + "DATA_SOURCE_NAME_AND_TYPE" + ] + }, + "ListAggregateLogGroupSummariesRequest":{ + "type":"structure", + "required":["groupBy"], + "members":{ + "accountIdentifiers":{ + "shape":"AccountIds", + "documentation":"

When includeLinkedAccounts is set to true, use this parameter to specify the list of accounts to search. You can specify as many as 20 account IDs in the array.

" + }, + "includeLinkedAccounts":{ + "shape":"IncludeLinkedAccounts", + "documentation":"

If you are using a monitoring account, set this to true to have the operation return log groups in the accounts listed in accountIdentifiers.

If this parameter is set to true and accountIdentifiers contains a null value, the operation returns all log groups in the monitoring account and all log groups in all source accounts that are linked to the monitoring account.

The default for this parameter is false.

" + }, + "logGroupClass":{ + "shape":"LogGroupClass", + "documentation":"

Filters the results by log group class to include only log groups of the specified class.

" + }, + "logGroupNamePattern":{ + "shape":"LogGroupNameRegexPattern", + "documentation":"

Use this parameter to limit the returned log groups to only those with names that match the pattern that you specify. This parameter is a regular expression that can match prefixes and substrings, and supports wildcard matching and matching multiple patterns, as in the following examples.

You can specify as many as five different regular expression patterns in this field, each of which must be between 3 and 24 characters. You can include the ^ symbol as many as five times, and include the | symbol as many as four times.

" + }, + "dataSources":{ + "shape":"DataSourceFilters", + "documentation":"

Filters the results by data source characteristics to include only log groups associated with the specified data sources.

" + }, + "groupBy":{ + "shape":"ListAggregateLogGroupSummariesGroupBy", + "documentation":"

Specifies how to group the log groups in the summary.

" + }, + "nextToken":{"shape":"NextToken"}, + "limit":{ + "shape":"ListLogGroupsRequestLimit", + "documentation":"

The maximum number of aggregated summaries to return. If you omit this parameter, the default is up to 50 aggregated summaries.

" + } + } + }, + "ListAggregateLogGroupSummariesResponse":{ + "type":"structure", + "members":{ + "aggregateLogGroupSummaries":{ + "shape":"AggregateLogGroupSummaries", + "documentation":"

The list of aggregate log group summaries grouped by the specified data source characteristics.

" + }, + "nextToken":{"shape":"NextToken"} + } + }, "ListAnomaliesLimit":{ "type":"integer", "max":50, @@ -4778,9 +5422,22 @@ "limit":{ "shape":"ListLimit", "documentation":"

The maximum number of log groups to return. If you omit this parameter, the default is up to 50 log groups.

" + }, + "dataSources":{ + "shape":"DataSourceFilters", + "documentation":"

An array of data source filters to filter log groups by their associated data sources. You can filter by data source name, type, or both. Multiple filters within the same dimension are combined with OR logic, while filters across different dimensions are combined with AND logic.

" + }, + "fieldIndexNames":{ + "shape":"FieldIndexNames", + "documentation":"

An array of field index names to filter log groups that have specific field indexes. Only log groups containing all specified field indexes are returned. You can specify 1 to 20 field index names, each with 1 to 512 characters.

" } } }, + "ListLogGroupsRequestLimit":{ + "type":"integer", + "max":50, + "min":1 + }, "ListLogGroupsResponse":{ "type":"structure", "members":{ @@ -4801,12 +5458,12 @@ "members":{ "maxResults":{ "shape":"ListScheduledQueriesMaxResults", - "documentation":"

The maximum number of scheduled queries to return in a single call.

" + "documentation":"

The maximum number of scheduled queries to return. Valid range is 1 to 1000.

" }, "nextToken":{"shape":"NextToken"}, "state":{ "shape":"ScheduledQueryState", - "documentation":"

Filter results by the state of scheduled queries (ENABLED or DISABLED).

" + "documentation":"

Filter scheduled queries by state. Valid values are ENABLED and DISABLED. If not specified, all scheduled queries are returned.

" } } }, @@ -4816,10 +5473,40 @@ "nextToken":{"shape":"NextToken"}, "scheduledQueries":{ "shape":"ScheduledQuerySummaryList", - "documentation":"

The list of scheduled queries with summary information.

" + "documentation":"

An array of scheduled query summary information.

" } } }, + "ListSourcesForS3TableIntegrationMaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "ListSourcesForS3TableIntegrationRequest":{ + "type":"structure", + "required":["integrationArn"], + "members":{ + "integrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table Integration to list associations for.

" + }, + "maxResults":{ + "shape":"ListSourcesForS3TableIntegrationMaxResults", + "documentation":"

The maximum number of associations to return in a single call. Valid range is 1 to 100.

" + }, + "nextToken":{"shape":"NextToken"} + } + }, + "ListSourcesForS3TableIntegrationResponse":{ + "type":"structure", + "members":{ + "sources":{ + "shape":"S3TableIntegrationSources", + "documentation":"

The list of data source associations for the specified S3 Table Integration.

" + }, + "nextToken":{"shape":"NextToken"} + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -5001,6 +5688,47 @@ "documentation":"

This structure contains the information for one sample log event that is associated with an anomaly found by a log anomaly detector.

" }, "LogEventIndex":{"type":"integer"}, + "LogFieldName":{ + "type":"string", + "max":256, + "min":1 + }, + "LogFieldType":{ + "type":"structure", + "members":{ + "type":{ + "shape":"DataType", + "documentation":"

The data type of the log field.

" + }, + "element":{ + "shape":"LogFieldType", + "documentation":"

For array or collection types, specifies the element type information.

" + }, + "fields":{ + "shape":"LogFieldsList", + "documentation":"

For complex types, contains the nested field definitions.

" + } + }, + "documentation":"

Defines the data type structure for a log field, including the type, element information, and nested fields for complex types.

" + }, + "LogFieldsList":{ + "type":"list", + "member":{"shape":"LogFieldsListItem"} + }, + "LogFieldsListItem":{ + "type":"structure", + "members":{ + "logFieldName":{ + "shape":"LogFieldName", + "documentation":"

The name of the log field.

" + }, + "logFieldType":{ + "shape":"LogFieldType", + "documentation":"

The data type information for the log field.

" + } + }, + "documentation":"

Represents a log field with its name and data type information for a specific data source.

" + }, "LogGroup":{ "type":"structure", "members":{ @@ -5070,6 +5798,7 @@ "DELIVERY" ] }, + "LogGroupCount":{"type":"integer"}, "LogGroupField":{ "type":"structure", "members":{ @@ -5840,7 +6569,7 @@ }, "mappingVersion":{ "shape":"MappingVersion", - "documentation":"

Identifies the specific release of the Open Cybersecurity Schema Framework (OCSF) transformer being used to parse OCSF data. Defaults to the latest version if not specified. Does not automatically update.

" + "documentation":"

The version of the OCSF mapping to use for parsing log data.

" } }, "documentation":"

This processor converts logs into Open Cybersecurity Schema Framework (OCSF) events.

For more information about this processor including examples, see parseToOCSF in the CloudWatch Logs User Guide.

" @@ -6069,11 +6798,11 @@ "members":{ "policyName":{ "shape":"PolicyName", - "documentation":"

A name for the policy. This must be unique within the account.

" + "documentation":"

A name for the policy. This must be unique within the account and cannot start with aws/.

" }, "policyDocument":{ "shape":"AccountPolicyDocument", - "documentation":"

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

Transformer policy

A transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see Processors that you can use.

Field index policy

A field index filter policy can include the following attribute in a JSON block:

It must contain at least one field index.

The following is an example of an index policy document that creates two indexes, RequestId and TransactionId.

\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"RequestId\\\", \\\"TransactionId\\\" ] }\"

" + "documentation":"

Specify the policy, in JSON.

Data protection policy

A data protection policy must include two JSON blocks:

For an example data protection policy, see the Examples section on this page.

The contents of the two DataIdentifer arrays must match exactly.

In addition to the two JSON blocks, the policyDocument can also include Name, Description, and Version fields. The Name is different than the operation's policyName parameter, and is used as a dimension when CloudWatch Logs reports audit findings metrics to CloudWatch.

The JSON specified in policyDocument can be up to 30,720 characters long.

Subscription filter policy

A subscription filter policy can include the following attributes in a JSON block:

Transformer policy

A transformer policy must include one JSON block with the array of processors and their configurations. For more information about available processors, see Processors that you can use.

Field index policy

A field index filter policy can include the following attribute in a JSON block:

It must contain at least one field index.

The following is an example of an index policy document that creates indexes with different types.

\"policyDocument\": \"{ \\\"Fields\\\": [ \\\"TransactionId\\\" ], \\\"FieldsV2\\\": {\\\"RequestId\\\": {\\\"type\\\": \\\"FIELD_INDEX\\\"}, \\\"APIName\\\": {\\\"type\\\": \\\"FACET\\\"}, \\\"StatusCode\\\": {\\\"type\\\": \\\"FACET\\\"}}}\"

You can use FieldsV2 to specify the type for each field. Supported types are FIELD_INDEX and FACET. Field names within Fields and FieldsV2 must be mutually exclusive.

" }, "policyType":{ "shape":"PolicyType", @@ -6085,7 +6814,7 @@ }, "selectionCriteria":{ "shape":"SelectionCriteria", - "documentation":"

Use this parameter to apply the new policy to a subset of log groups in the account.

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

If policyType is SUBSCRIPTION_FILTER_POLICY, the only supported selectionCriteria filter is LogGroupName NOT IN []

If policyType is FIELD_INDEX_POLICY or TRANSFORMER_POLICY, the only supported selectionCriteria filter is LogGroupNamePrefix

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

Using the selectionCriteria parameter with SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more information, see Log recursion prevention.

" + "documentation":"

Use this parameter to apply the new policy to a subset of log groups in the account or a data source name and type combination.

Specifying selectionCriteria is valid only when you specify SUBSCRIPTION_FILTER_POLICY, FIELD_INDEX_POLICY or TRANSFORMER_POLICYfor policyType.

The selectionCriteria string can be up to 25KB in length. The length is determined by using its UTF-8 bytes.

Using the selectionCriteria parameter with SUBSCRIPTION_FILTER_POLICY is useful to help prevent infinite loops. For more information, see Log recursion prevention.

" } } }, @@ -6211,7 +6940,7 @@ }, "logType":{ "shape":"LogType", - "documentation":"

Defines the type of log that the source is sending.

" + "documentation":"

Defines the type of log that the source is sending.

" }, "tags":{ "shape":"Tags", @@ -6297,7 +7026,7 @@ }, "policyDocument":{ "shape":"PolicyDocument", - "documentation":"

The index policy document, in JSON format. The following is an example of an index policy document that creates two indexes, RequestId and TransactionId.

\"policyDocument\": \"{ \"Fields\": [ \"RequestId\", \"TransactionId\" ] }\"

The policy document must include at least one field index. For more information about the fields that can be included and other restrictions, see Field index syntax and quotas.

" + "documentation":"

The index policy document, in JSON format. The following is an example of an index policy document that creates indexes with different types.

\"policyDocument\": \"{\"Fields\": [ \"TransactionId\" ], \"FieldsV2\": {\"RequestId\": {\"type\": \"FIELD_INDEX\"}, \"APIName\": {\"type\": \"FACET\"}, \"StatusCode\": {\"type\": \"FACET\"}}}\"

You can use FieldsV2 to specify the type for each field. Supported types are FIELD_INDEX and FACET. Field names within Fields and FieldsV2 must be mutually exclusive.

The policy document must include at least one field index. For more information about the fields that can be included and other restrictions, see Field index syntax and quotas.

" } } }, @@ -6992,14 +7721,14 @@ "members":{ "destinationIdentifier":{ "shape":"S3Uri", - "documentation":"

The S3 URI where query results will be stored (e.g., s3://bucket-name/prefix/).

" + "documentation":"

The Amazon S3 URI where query results are delivered. Must be a valid S3 URI format.

" }, "roleArn":{ "shape":"RoleArn", - "documentation":"

The ARN of the IAM role that CloudWatch Logs will assume to write results to the S3 bucket.

" + "documentation":"

The ARN of the IAM role that grants permissions to write query results to the specified Amazon S3 destination.

" } }, - "documentation":"

Configuration details for delivering scheduled query results to an Amazon S3 bucket.

" + "documentation":"

Configuration for Amazon S3 destination where scheduled query results are delivered.

" }, "S3DeliveryConfiguration":{ "type":"structure", @@ -7016,6 +7745,54 @@ }, "documentation":"

This structure contains delivery configurations that apply only when the delivery destination resource is an S3 bucket.

" }, + "S3TableIntegrationSource":{ + "type":"structure", + "members":{ + "identifier":{ + "shape":"S3TableIntegrationSourceIdentifier", + "documentation":"

The unique identifier for this data source association.

" + }, + "dataSource":{ + "shape":"DataSource", + "documentation":"

The data source associated with the S3 Table Integration.

" + }, + "status":{ + "shape":"S3TableIntegrationSourceStatus", + "documentation":"

The current status of the data source association.

" + }, + "statusReason":{ + "shape":"S3TableIntegrationSourceStatusReason", + "documentation":"

Additional information about the status of the data source association.

" + }, + "createdTimeStamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the data source association was created.

" + } + }, + "documentation":"

Represents a data source association with an S3 Table Integration, including its status and metadata.

" + }, + "S3TableIntegrationSourceIdentifier":{ + "type":"string", + "max":2048, + "min":1 + }, + "S3TableIntegrationSourceStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "UNHEALTHY", + "FAILED", + "DATA_SOURCE_DELETE_IN_PROGRESS" + ] + }, + "S3TableIntegrationSourceStatusReason":{ + "type":"string", + "min":1 + }, + "S3TableIntegrationSources":{ + "type":"list", + "member":{"shape":"S3TableIntegrationSource"} + }, "S3Uri":{ "type":"string", "max":1024, @@ -7038,26 +7815,26 @@ "members":{ "destinationType":{ "shape":"ScheduledQueryDestinationType", - "documentation":"

The type of destination (S3).

" + "documentation":"

The type of destination for query results.

" }, "destinationIdentifier":{ "shape":"String", - "documentation":"

The destination identifier (S3 URI).

" + "documentation":"

The identifier for the destination where results are delivered.

" }, "status":{ "shape":"ActionStatus", - "documentation":"

The processing status for this destination (IN_PROGRESS, ERROR, FAILED, or COMPLETE).

" + "documentation":"

The processing status of the destination delivery.

" }, "processedIdentifier":{ "shape":"String", - "documentation":"

The processed identifier returned for the destination (S3 key).

" + "documentation":"

The identifier of the processed result at the destination.

" }, "errorMessage":{ "shape":"String", - "documentation":"

Error message if the destination processing failed.

" + "documentation":"

Error message if destination processing failed.

" } }, - "documentation":"

Information about a destination where scheduled query results are processed and delivered.

" + "documentation":"

Information about a destination where scheduled query results are processed, including processing status and any error messages.

" }, "ScheduledQueryDestinationList":{ "type":"list", @@ -7103,15 +7880,15 @@ }, "state":{ "shape":"ScheduledQueryState", - "documentation":"

The current state of the scheduled query (ENABLED or DISABLED).

" + "documentation":"

The current state of the scheduled query.

" }, "lastTriggeredTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was last executed.

" + "documentation":"

The timestamp when the scheduled query was last executed.

" }, "lastExecutionStatus":{ "shape":"ExecutionStatus", - "documentation":"

The status of the last execution (Running, Complete, Failed, Timeout, or InvalidQuery).

" + "documentation":"

The status of the most recent execution.

" }, "scheduleExpression":{ "shape":"ScheduleExpression", @@ -7119,22 +7896,22 @@ }, "timezone":{ "shape":"ScheduleTimezone", - "documentation":"

The timezone in which the schedule expression is evaluated.

" + "documentation":"

The timezone used for evaluating the schedule expression.

" }, "destinationConfiguration":{ "shape":"DestinationConfiguration", - "documentation":"

Configuration for destinations where the query results are delivered.

" + "documentation":"

Configuration for where query results are delivered.

" }, "creationTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was created.

" + "documentation":"

The timestamp when the scheduled query was created.

" }, "lastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was last updated.

" + "documentation":"

The timestamp when the scheduled query was last updated.

" } }, - "documentation":"

Summary information about a scheduled query, used in list operations.

" + "documentation":"

Summary information about a scheduled query, including basic configuration and execution status.

" }, "ScheduledQuerySummaryList":{ "type":"list", @@ -7762,26 +8539,26 @@ "members":{ "queryId":{ "shape":"QueryId", - "documentation":"

The unique identifier for the query execution.

" + "documentation":"

The unique identifier for this query execution.

" }, "executionStatus":{ "shape":"ExecutionStatus", - "documentation":"

The status of the query execution (Running, Complete, Failed, Timeout, or InvalidQuery).

" + "documentation":"

The execution status of the scheduled query run.

" }, "triggeredTimestamp":{ "shape":"Timestamp", - "documentation":"

The time when the scheduled query was triggered, in Unix epoch time.

" + "documentation":"

The timestamp when the scheduled query execution was triggered.

" }, "errorMessage":{ "shape":"String", - "documentation":"

The error message if the scheduled query execution failed. This field is only populated when the execution status indicates a failure.

" + "documentation":"

Error message if the query execution failed.

" }, "destinations":{ "shape":"ScheduledQueryDestinationList", - "documentation":"

The list of destinations where the scheduled query results were delivered for this execution. This includes S3 buckets configured for the scheduled query.

" + "documentation":"

Information about destination processing for this query execution.

" } }, - "documentation":"

A record of a scheduled query execution, including its status and destination processing information.

" + "documentation":"

A record of a scheduled query execution, including execution status, timestamp, and destination processing results.

" }, "TriggerHistoryRecordList":{ "type":"list", @@ -7985,55 +8762,55 @@ "members":{ "identifier":{ "shape":"ScheduledQueryIdentifier", - "documentation":"

The name or ARN of the scheduled query to update.

" + "documentation":"

The ARN or name of the scheduled query to update.

" }, "description":{ "shape":"ScheduledQueryDescription", - "documentation":"

Updated description for the scheduled query.

" + "documentation":"

An updated description for the scheduled query.

" }, "queryLanguage":{ "shape":"QueryLanguage", - "documentation":"

Updated query language to use (LogsQL, PPL, or SQL).

" + "documentation":"

The updated query language for the scheduled query.

" }, "queryString":{ "shape":"QueryString", - "documentation":"

Updated CloudWatch Logs Insights query string to execute.

" + "documentation":"

The updated query string to execute.

" }, "logGroupIdentifiers":{ "shape":"ScheduledQueryLogGroupIdentifiers", - "documentation":"

Updated log group identifiers to query.

" + "documentation":"

The updated array of log group names or ARNs to query.

" }, "scheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

Updated cron expression that defines when the scheduled query runs.

" + "documentation":"

The updated cron expression that defines when the scheduled query runs.

" }, "timezone":{ "shape":"ScheduleTimezone", - "documentation":"

Updated timezone in which the schedule expression is evaluated.

" + "documentation":"

The updated timezone for evaluating the schedule expression.

" }, "startTimeOffset":{ "shape":"StartTimeOffset", - "documentation":"

Updated time offset in seconds from the execution time for the start of the query time range.

" + "documentation":"

The updated time offset in seconds that defines the lookback period for the query.

" }, "destinationConfiguration":{ "shape":"DestinationConfiguration", - "documentation":"

Updated configuration for destinations where the query results will be delivered.

" + "documentation":"

The updated configuration for where to deliver query results.

" }, "scheduleStartTime":{ "shape":"Timestamp", - "documentation":"

Updated start time for the query schedule in Unix epoch time.

" + "documentation":"

The updated start time for the scheduled query in Unix epoch format.

" }, "scheduleEndTime":{ "shape":"Timestamp", - "documentation":"

Updated end time for the query schedule in Unix epoch time.

" + "documentation":"

The updated end time for the scheduled query in Unix epoch format.

" }, "executionRoleArn":{ "shape":"RoleArn", - "documentation":"

Updated ARN of the IAM role that CloudWatch Logs will assume to execute the scheduled query.

" + "documentation":"

The updated ARN of the IAM role that grants permissions to execute the query and deliver results.

" }, "state":{ "shape":"ScheduledQueryState", - "documentation":"

Updated state of the scheduled query (ENABLED or DISABLED).

" + "documentation":"

The updated state of the scheduled query.

" } } }, @@ -8054,7 +8831,7 @@ }, "queryLanguage":{ "shape":"QueryLanguage", - "documentation":"

The query language used by the updated scheduled query.

" + "documentation":"

The query language of the updated scheduled query.

" }, "queryString":{ "shape":"QueryString", @@ -8062,11 +8839,11 @@ }, "logGroupIdentifiers":{ "shape":"ScheduledQueryLogGroupIdentifiers", - "documentation":"

The log group identifiers of the updated scheduled query.

" + "documentation":"

The log groups queried by the updated scheduled query.

" }, "scheduleExpression":{ "shape":"ScheduleExpression", - "documentation":"

The schedule expression of the updated scheduled query.

" + "documentation":"

The cron expression of the updated scheduled query.

" }, "timezone":{ "shape":"ScheduleTimezone", @@ -8074,7 +8851,7 @@ }, "startTimeOffset":{ "shape":"StartTimeOffset", - "documentation":"

The start time offset of the updated scheduled query.

" + "documentation":"

The time offset of the updated scheduled query.

" }, "destinationConfiguration":{ "shape":"DestinationConfiguration", @@ -8086,19 +8863,19 @@ }, "lastTriggeredTime":{ "shape":"Timestamp", - "documentation":"

The time when the updated scheduled query was last executed.

" + "documentation":"

The timestamp when the updated scheduled query was last executed.

" }, "lastExecutionStatus":{ "shape":"ExecutionStatus", - "documentation":"

The status of the last execution of the updated scheduled query (Running, Complete, Failed, Timeout, or InvalidQuery).

" + "documentation":"

The status of the most recent execution of the updated scheduled query.

" }, "scheduleStartTime":{ "shape":"Timestamp", - "documentation":"

The schedule start time of the updated scheduled query.

" + "documentation":"

The start time of the updated scheduled query.

" }, "scheduleEndTime":{ "shape":"Timestamp", - "documentation":"

The schedule end time of the updated scheduled query.

" + "documentation":"

The end time of the updated scheduled query.

" }, "executionRoleArn":{ "shape":"RoleArn", @@ -8106,11 +8883,11 @@ }, "creationTime":{ "shape":"Timestamp", - "documentation":"

The creation time of the updated scheduled query.

" + "documentation":"

The timestamp when the scheduled query was originally created.

" }, "lastUpdatedTime":{ "shape":"Timestamp", - "documentation":"

The last updated time of the scheduled query.

" + "documentation":"

The timestamp when the scheduled query was last updated.

" } } }, diff --git a/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json b/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json index 692571747709..a598ecceb06e 100644 --- a/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/awscli/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -2146,7 +2146,7 @@ }, "AudioDefaultSelection": { "type": "string", - "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio.", + "documentation": "Specify a fallback audio selector for this input. Use to ensure outputs have audio even when the audio selector you specify in your output is missing from the source. DEFAULT (Checked in the MediaConvert console): If your output settings specify an audio selector that does not exist in this input, MediaConvert uses this audio selector instead. This is useful when you have multiple inputs with a different number of audio tracks. NOT_DEFAULT (Unchecked in the MediaConvert console): MediaConvert will not fallback from any missing audio selector. Any output specifying a missing audio selector will be silent.", "enum": [ "DEFAULT", "NOT_DEFAULT" @@ -2375,7 +2375,7 @@ "DefaultSelection": { "shape": "AudioDefaultSelection", "locationName": "defaultSelection", - "documentation": "Enable this setting on one audio selector to set it as the default for the job. The service uses this default for outputs where it can't find the specified input audio. If you don't set a default, those outputs have no audio." + "documentation": "Specify a fallback audio selector for this input. Use to ensure outputs have audio even when the audio selector you specify in your output is missing from the source. DEFAULT (Checked in the MediaConvert console): If your output settings specify an audio selector that does not exist in this input, MediaConvert uses this audio selector instead. This is useful when you have multiple inputs with a different number of audio tracks. NOT_DEFAULT (Unchecked in the MediaConvert console): MediaConvert will not fallback from any missing audio selector. Any output specifying a missing audio selector will be silent." }, "ExternalAudioFileInput": { "shape": "__stringPatternS3Https", @@ -3400,7 +3400,8 @@ "TELETEXT", "NULL_SOURCE", "IMSC", - "WEBVTT" + "WEBVTT", + "TT_3GPP" ] }, "CaptionSourceUpconvertSTLToTeletext": { @@ -7284,6 +7285,22 @@ "FOLLOW_BOTTOM_FIELD" ] }, + "H265MvOverPictureBoundaries": { + "type": "string", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In all other configurations, you typically enter \"enabled\".", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, + "H265MvTemporalPredictor": { + "type": "string", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In other configurations, you typically enter \"enabled\".", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "H265ParControl": { "type": "string", "documentation": "Optional. Specify how the service determines the pixel aspect ratio (PAR) for this output. The default behavior, Follow source, uses the PAR from your input video for your output. To specify a different PAR, choose any value other than Follow source. When you choose SPECIFIED for this setting, you must also specify values for the parNumerator and parDenominator settings.", @@ -7480,6 +7497,16 @@ "locationName": "minIInterval", "documentation": "Specify the minimum number of frames allowed between two IDR-frames in your output. This includes frames created at the start of a GOP or a scene change. Use Min I-Interval to improve video compression by varying GOP size when two IDR-frames would be created near each other. For example, if a regular cadence-driven IDR-frame would fall within 5 frames of a scene-change IDR-frame, and you set Min I-interval to 5, then the encoder would only write an IDR-frame for the scene-change. In this way, one GOP is shortened or extended. If a cadence-driven IDR-frame would be further than 5 frames from a scene-change IDR-frame, then the encoder leaves all IDR-frames in place. To use an automatically determined interval: We recommend that you keep this value blank. This allows for MediaConvert to use an optimal setting according to the characteristics of your input video, and results in better video compression. To manually specify an interval: Enter a value from 1 to 30. Use when your downstream systems have specific GOP size requirements. To disable GOP size variance: Enter 0. MediaConvert will only create IDR-frames at the start of your output's cadence-driven GOP. Use when your downstream systems require a regular GOP size." }, + "MvOverPictureBoundaries": { + "shape": "H265MvOverPictureBoundaries", + "locationName": "mvOverPictureBoundaries", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In all other configurations, you typically enter \"enabled\"." + }, + "MvTemporalPredictor": { + "shape": "H265MvTemporalPredictor", + "locationName": "mvTemporalPredictor", + "documentation": "If you are setting up the picture as a tile, you must set this to \"disabled\". In other configurations, you typically enter \"enabled\"." + }, "NumberBFramesBetweenReferenceFrames": { "shape": "__integerMin0Max7", "locationName": "numberBFramesBetweenReferenceFrames", @@ -7570,11 +7597,31 @@ "locationName": "temporalIds", "documentation": "Enables temporal layer identifiers in the encoded bitstream. Up to 3 layers are supported depending on GOP structure: I- and P-frames form one layer, reference B-frames can form a second layer and non-reference b-frames can form a third layer. Decoders can optionally decode only the lower temporal layers to generate a lower frame rate output. For example, given a bitstream with temporal IDs and with b-frames = 1 (i.e. IbPbPb display order), a decoder could decode all the frames for full frame rate output or only the I and P frames (lowest temporal layer) for a half frame rate output." }, + "TileHeight": { + "shape": "__integerMin64Max2160", + "locationName": "tileHeight", + "documentation": "Set this field to set up the picture as a tile. You must also set TileWidth. The tile height must result in 22 or fewer rows in the frame. The tile width must result in 20 or fewer columns in the frame. And finally, the product of the column count and row count must be 64 or less. If the tile width and height are specified, MediaConvert will override the video codec slices field with a value that MediaConvert calculates." + }, + "TilePadding": { + "shape": "H265TilePadding", + "locationName": "tilePadding", + "documentation": "Set to \"padded\" to force MediaConvert to add padding to the frame, to obtain a frame that is a whole multiple of the tile size. If you are setting up the picture as a tile, you must enter \"padded\". In all other configurations, you typically enter \"none\"." + }, + "TileWidth": { + "shape": "__integerMin256Max3840", + "locationName": "tileWidth", + "documentation": "Set this field to set up the picture as a tile. See TileHeight for more information." + }, "Tiles": { "shape": "H265Tiles", "locationName": "tiles", "documentation": "Enable use of tiles, allowing horizontal as well as vertical subdivision of the encoded pictures." }, + "TreeBlockSize": { + "shape": "H265TreeBlockSize", + "locationName": "treeBlockSize", + "documentation": "Select the tree block size used for encoding. If you enter \"auto\", the encoder will pick the best size. If you are setting up the picture as a tile, you must set this to 32x32. In all other configurations, you typically enter \"auto\"." + }, "UnregisteredSeiTimecode": { "shape": "H265UnregisteredSeiTimecode", "locationName": "unregisteredSeiTimecode", @@ -7629,6 +7676,14 @@ "ENABLED" ] }, + "H265TilePadding": { + "type": "string", + "documentation": "Set to \"padded\" to force MediaConvert to add padding to the frame, to obtain a frame that is a whole multiple of the tile size. If you are setting up the picture as a tile, you must enter \"padded\". In all other configurations, you typically enter \"none\".", + "enum": [ + "NONE", + "PADDED" + ] + }, "H265Tiles": { "type": "string", "documentation": "Enable use of tiles, allowing horizontal as well as vertical subdivision of the encoded pictures.", @@ -7637,6 +7692,14 @@ "ENABLED" ] }, + "H265TreeBlockSize": { + "type": "string", + "documentation": "Select the tree block size used for encoding. If you enter \"auto\", the encoder will pick the best size. If you are setting up the picture as a tile, you must set this to 32x32. In all other configurations, you typically enter \"auto\".", + "enum": [ + "AUTO", + "TREE_SIZE_32X32" + ] + }, "H265UnregisteredSeiTimecode": { "type": "string", "documentation": "Inserts timecode for each frame as 4 bytes of an unregistered SEI message.", @@ -8830,6 +8893,11 @@ "locationName": "height", "documentation": "Specify the height, in pixels, for your video generator input. This is useful for positioning when you include one or more video overlays for this input. To use the default resolution 540x360: Leave both width and height blank. To specify a height: Enter an even integer from 32 to 8192. When you do, you must also specify a value for width." }, + "ImageInput": { + "shape": "__stringMin14PatternS3BmpBMPPngPNGTgaTGAHttpsBmpBMPPngPNGTgaTGA", + "locationName": "imageInput", + "documentation": "Specify the HTTP, HTTPS, or Amazon S3 location of the image that you want to overlay on the video. Use a PNG or TGA file." + }, "SampleRate": { "shape": "__integerMin32000Max48000", "locationName": "sampleRate", @@ -14145,6 +14213,11 @@ "VideoOverlayInput": { "type": "structure", "members": { + "AudioSelectors": { + "shape": "__mapOfAudioSelector", + "locationName": "audioSelectors", + "documentation": "Use Audio selectors to specify audio to use during your Video overlay. You can use multiple Audio selectors per Video overlay. When you include an Audio selector within a Video overlay, MediaConvert mutes any Audio selectors with the same name from the underlying input. For example, if your underlying input has Audio selector 1 and Audio selector 2, and your Video overlay only has Audio selector 1, then MediaConvert replaces all audio for Audio selector 1 during the Video overlay. To replace all audio for all Audio selectors from the underlying input by using a single Audio selector in your overlay, set DefaultSelection to DEFAULT (Check \\\"Use as default\\\" in the MediaConvert console)." + }, "FileInput": { "shape": "__stringPatternS3Https", "locationName": "fileInput", @@ -15601,6 +15674,11 @@ "min": 24, "max": 60000 }, + "__integerMin256Max3840": { + "type": "integer", + "min": 256, + "max": 3840 + }, "__integerMin25Max10000": { "type": "integer", "min": 25, @@ -15681,6 +15759,11 @@ "min": 64000, "max": 640000 }, + "__integerMin64Max2160": { + "type": "integer", + "min": 64, + "max": 2160 + }, "__integerMin6Max16": { "type": "integer", "min": 6, diff --git a/awscli/botocore/data/medialive/2017-10-14/service-2.json b/awscli/botocore/data/medialive/2017-10-14/service-2.json index 4c8656cb7f52..91a5af711cdf 100644 --- a/awscli/botocore/data/medialive/2017-10-14/service-2.json +++ b/awscli/botocore/data/medialive/2017-10-14/service-2.json @@ -7444,6 +7444,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for Channel" @@ -7595,6 +7600,11 @@ "shape": "__listOfChannelEngineVersionResponse", "locationName": "usedChannelEngineVersions", "documentation": "The engine version that the running pipelines are using." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for ChannelSummary" @@ -7770,6 +7780,11 @@ "DryRun": { "shape": "__boolean", "locationName": "dryRun" + }, + "LinkedChannelSettings": { + "shape": "LinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "The linked channel settings for the channel." } }, "documentation": "Placeholder documentation for CreateChannel" @@ -7860,6 +7875,11 @@ "DryRun": { "shape": "__boolean", "locationName": "dryRun" + }, + "LinkedChannelSettings": { + "shape": "LinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "The linked channel settings for the channel." } }, "documentation": "A request to create a channel" @@ -8493,6 +8513,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for DeleteChannelResponse" @@ -8958,6 +8983,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for DescribeChannelResponse" @@ -16598,7 +16628,13 @@ }, "PipelineLockingSettings": { "type": "structure", - "members": {}, + "members": { + "PipelineLockingMethod": { + "shape": "PipelineLockingMethod", + "locationName": "pipelineLockingMethod", + "documentation": "The method to use to lock the video frames in the pipelines. sourceTimecode (default): Use the timecode in the source. videoAlignment: Lock frames that the encoder identifies as having matching content. If videoAlignment is selected, existing timecodes will not be used for any locking decisions." + } + }, "documentation": "Pipeline Locking Settings" }, "PipelinePauseStateSettings": { @@ -17977,6 +18013,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for StartChannelResponse" @@ -18401,6 +18442,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for StopChannelResponse" @@ -19071,6 +19117,11 @@ "shape": "AnywhereSettings", "locationName": "anywhereSettings", "documentation": "The Elemental Anywhere settings for this channel." + }, + "LinkedChannelSettings": { + "shape": "LinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "The linked channel settings for the channel." } }, "documentation": "Placeholder documentation for UpdateChannel" @@ -19196,6 +19247,11 @@ "shape": "AnywhereSettings", "locationName": "anywhereSettings", "documentation": "The Elemental Anywhere settings for this channel." + }, + "LinkedChannelSettings": { + "shape": "LinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "The linked channel settings for the channel." } }, "documentation": "A request to update a channel.", @@ -21031,6 +21087,11 @@ "shape": "ChannelEngineVersionResponse", "locationName": "channelEngineVersion", "documentation": "Requested engine version for this channel." + }, + "LinkedChannelSettings": { + "shape": "DescribeLinkedChannelSettings", + "locationName": "linkedChannelSettings", + "documentation": "Linked Channel Settings for this channel." } }, "documentation": "Placeholder documentation for RestartChannelPipelinesResponse" @@ -29997,6 +30058,109 @@ "shape": "RouterDestinationSettings" }, "documentation": "Placeholder documentation for __listOfRouterDestinationSettings" + }, + "DescribeFollowerChannelSettings": { + "type": "structure", + "members": { + "LinkedChannelType": { + "shape": "LinkedChannelType", + "locationName": "linkedChannelType", + "documentation": "Specifies this as a follower channel" + }, + "PrimaryChannelArn": { + "shape": "__string", + "locationName": "primaryChannelArn", + "documentation": "The ARN of the primary channel this channel follows" + } + }, + "documentation": "Details of a follower channel in a linked pair" + }, + "DescribeLinkedChannelSettings": { + "type": "structure", + "members": { + "FollowerChannelSettings": { + "shape": "DescribeFollowerChannelSettings", + "locationName": "followerChannelSettings" + }, + "PrimaryChannelSettings": { + "shape": "DescribePrimaryChannelSettings", + "locationName": "primaryChannelSettings" + } + }, + "documentation": "Linked channel configuration details" + }, + "DescribePrimaryChannelSettings": { + "type": "structure", + "members": { + "FollowingChannelArns": { + "shape": "__listOf__string", + "locationName": "followingChannelArns", + "documentation": "The ARNs of the following channels for this primary channel" + }, + "LinkedChannelType": { + "shape": "LinkedChannelType", + "locationName": "linkedChannelType", + "documentation": "Specifies this as a primary channel" + } + }, + "documentation": "Details of a primary (leader) channel in a linked pair" + }, + "FollowerChannelSettings": { + "type": "structure", + "members": { + "LinkedChannelType": { + "shape": "LinkedChannelType", + "locationName": "linkedChannelType", + "documentation": "Specifies this as a follower channel" + }, + "PrimaryChannelArn": { + "shape": "__string", + "locationName": "primaryChannelArn", + "documentation": "The ARN of the primary channel to follow" + } + }, + "documentation": "Settings for a follower channel in a linked pair" + }, + "LinkedChannelSettings": { + "type": "structure", + "members": { + "FollowerChannelSettings": { + "shape": "FollowerChannelSettings", + "locationName": "followerChannelSettings" + }, + "PrimaryChannelSettings": { + "shape": "PrimaryChannelSettings", + "locationName": "primaryChannelSettings" + } + }, + "documentation": "Configuration for linked channel relationships" + }, + "LinkedChannelType": { + "type": "string", + "documentation": "The values for the role for a linked channel.", + "enum": [ + "FOLLOWING_CHANNEL", + "PRIMARY_CHANNEL" + ] + }, + "PipelineLockingMethod": { + "type": "string", + "documentation": "Pipeline Locking Method", + "enum": [ + "SOURCE_TIMECODE", + "VIDEO_ALIGNMENT" + ] + }, + "PrimaryChannelSettings": { + "type": "structure", + "members": { + "LinkedChannelType": { + "shape": "LinkedChannelType", + "locationName": "linkedChannelType", + "documentation": "Specifies this as a primary channel" + } + }, + "documentation": "Settings for a primary (leader) channel in a linked pair" } }, "documentation": "API for AWS Elemental MediaLive" diff --git a/awscli/botocore/data/mediapackagev2/2022-12-25/service-2.json b/awscli/botocore/data/mediapackagev2/2022-12-25/service-2.json index 7c56910e5085..94c42e7b5496 100644 --- a/awscli/botocore/data/mediapackagev2/2022-12-25/service-2.json +++ b/awscli/botocore/data/mediapackagev2/2022-12-25/service-2.json @@ -3818,10 +3818,20 @@ "Url":{ "shape":"SpekeKeyProviderUrlString", "documentation":"

The URL of the API Gateway proxy that you set up to talk to your key server. The API Gateway proxy must reside in the same AWS Region as MediaPackage and must start with https://.

The following example shows a URL: https://1wm2dx1f33.execute-api.us-west-2.amazonaws.com/SpekeSample/copyProtection

" + }, + "CertificateArn":{ + "shape":"SpekeKeyProviderCertificateArnString", + "documentation":"

The ARN for the certificate that you imported to AWS Certificate Manager to add content key encryption to this endpoint. For this feature to work, your DRM key provider must support content key encryption.

" } }, "documentation":"

The parameters for the SPEKE key provider.

" }, + "SpekeKeyProviderCertificateArnString":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:([^:\\n]+):acm:([^:\\n]+):([0-9]+):certificate/[a-zA-Z0-9-_]+" + }, "SpekeKeyProviderDrmSystemsList":{ "type":"list", "member":{"shape":"DrmSystem"}, @@ -4361,7 +4371,16 @@ "MALFORMED_SECRET_ARN", "SECRET_FROM_DIFFERENT_ACCOUNT", "SECRET_FROM_DIFFERENT_REGION", - "INVALID_SECRET" + "INVALID_SECRET", + "RESOURCE_NOT_IN_SAME_REGION", + "CERTIFICATE_RESOURCE_NOT_FOUND", + "CERTIFICATE_ACCESS_DENIED", + "DESCRIBE_CERTIFICATE_FAILED", + "INVALID_CERTIFICATE_STATUS", + "INVALID_CERTIFICATE_KEY_ALGORITHM", + "INVALID_CERTIFICATE_SIGNATURE_ALGORITHM", + "MISSING_CERTIFICATE_DOMAIN_NAME", + "INVALID_ARN" ] } }, diff --git a/awscli/botocore/data/mediatailor/2018-04-23/service-2.json b/awscli/botocore/data/mediatailor/2018-04-23/service-2.json index 0f6013c09783..79a310621c86 100644 --- a/awscli/botocore/data/mediatailor/2018-04-23/service-2.json +++ b/awscli/botocore/data/mediatailor/2018-04-23/service-2.json @@ -633,6 +633,16 @@ }, "documentation":"

The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns.

" }, + "AdDecisionServerConfiguration":{ + "type":"structure", + "members":{ + "HttpRequest":{ + "shape":"HttpRequest", + "documentation":"

The HTTP request configuration parameters for the ad decision server.

" + } + }, + "documentation":"

Configuration parameters for customizing HTTP requests sent to the ad decision server (ADS). This allows you to specify the HTTP method, headers, request body, and compression settings for ADS requests.

" + }, "AdMarkerPassthrough":{ "type":"structure", "members":{ @@ -970,6 +980,13 @@ }, "documentation":"

Clip range configuration for the VOD source associated with the program.

" }, + "CompressionMethod":{ + "type":"string", + "enum":[ + "NONE", + "GZIP" + ] + }, "ConfigurationAliasesRequest":{ "type":"map", "key":{ @@ -2282,6 +2299,10 @@ "AdConditioningConfiguration":{ "shape":"AdConditioningConfiguration", "documentation":"

The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns, and what priority MediaTailor uses when inserting ads.

" + }, + "AdDecisionServerConfiguration":{ + "shape":"AdDecisionServerConfiguration", + "documentation":"

The configuration for customizing HTTP requests to the ad decision server (ADS). This includes settings for request method, headers, body content, and compression options.

" } } }, @@ -2406,6 +2427,28 @@ "member":{"shape":"HttpPackageConfiguration"}, "documentation":"

The VOD source's HTTP package configuration settings.

" }, + "HttpRequest":{ + "type":"structure", + "members":{ + "Method":{ + "shape":"Method", + "documentation":"

The HTTP method to use when making requests to the ad decision server. Supported values are GET and POST.

" + }, + "Body":{ + "shape":"__string", + "documentation":"

The request body content to send with HTTP requests to the ad decision server. This value is only eligible for POST requests.

" + }, + "Headers":{ + "shape":"StringMap", + "documentation":"

Custom HTTP headers to include in requests to the ad decision server. Specify headers as key-value pairs. This value is only eligible for POST requests.

" + }, + "CompressRequest":{ + "shape":"CompressionMethod", + "documentation":"

The compression method to apply to requests sent to the ad decision server. Supported values are NONE and GZIP. This value is only eligible for POST requests.

" + } + }, + "documentation":"

HTTP request configuration parameters that define how MediaTailor communicates with the ad decision server.

" + }, "InsertionMode":{ "type":"string", "documentation":"

Insertion Mode controls whether players can use stitched or guided ad insertion.

", @@ -2888,6 +2931,13 @@ "TIME_SIGNAL" ] }, + "Method":{ + "type":"string", + "enum":[ + "GET", + "POST" + ] + }, "Mode":{ "type":"string", "enum":[ @@ -2994,7 +3044,8 @@ "AdConditioningConfiguration":{ "shape":"AdConditioningConfiguration", "documentation":"

The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns, and what priority MediaTailor uses when inserting ads.

" - } + }, + "AdDecisionServerConfiguration":{"shape":"AdDecisionServerConfiguration"} }, "documentation":"

A playback configuration. For information about MediaTailor configurations, see Working with configurations in AWS Elemental MediaTailor.

" }, @@ -3042,15 +3093,15 @@ }, "TrafficShapingType":{ "shape":"TrafficShapingType", - "documentation":"

Indicates the type of traffic shaping used for prefetch traffic shaping and limiting the number of requests to the ADS at one time.

" + "documentation":"

Indicates the type of traffic shaping used to limit the number of requests to the ADS at one time.

" }, "TrafficShapingRetrievalWindow":{ "shape":"TrafficShapingRetrievalWindow", - "documentation":"

Configuration for spreading ADS traffic across a set window instead of sending ADS requests for all sessions at the same time.

" + "documentation":"

The configuration that tells Elemental MediaTailor how many seconds to spread out requests to the ad decision server (ADS). Instead of sending ADS requests for all sessions at the same time, MediaTailor spreads the requests across the amount of time specified in the retrieval window.

" }, "TrafficShapingTpsConfiguration":{ "shape":"TrafficShapingTpsConfiguration", - "documentation":"

The configuration for TPS-based traffic shaping that limits the number of requests to the ad decision server (ADS) based on transactions per second instead of time windows.

" + "documentation":"

The configuration for TPS-based traffic shaping. This approach limits requests to the ad decision server (ADS) based on transactions per second and concurrent users.

" } }, "documentation":"

A complex type that contains settings governing when MediaTailor prefetches ads, and which dynamic variables that MediaTailor includes in the request to the ad decision server.

" @@ -3196,6 +3247,10 @@ "AdConditioningConfiguration":{ "shape":"AdConditioningConfiguration", "documentation":"

The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns, and what priority MediaTailor uses when inserting ads.

" + }, + "AdDecisionServerConfiguration":{ + "shape":"AdDecisionServerConfiguration", + "documentation":"

The configuration for customizing HTTP requests to the ad decision server (ADS). This includes settings for request method, headers, body content, and compression options.

" } } }, @@ -3286,6 +3341,10 @@ "AdConditioningConfiguration":{ "shape":"AdConditioningConfiguration", "documentation":"

The setting that indicates what conditioning MediaTailor will perform on ads that the ad decision server (ADS) returns, and what priority MediaTailor uses when inserting ads.

" + }, + "AdDecisionServerConfiguration":{ + "shape":"AdDecisionServerConfiguration", + "documentation":"

The configuration for customizing HTTP requests to the ad decision server (ADS). This includes settings for request method, headers, body content, and compression options.

" } } }, @@ -3343,15 +3402,15 @@ }, "TrafficShapingType":{ "shape":"TrafficShapingType", - "documentation":"

Indicates the type of traffic shaping used for traffic shaping and limiting the number of requests to the ADS at one time.

" + "documentation":"

Indicates the type of traffic shaping used to limit the number of requests to the ADS at one time.

" }, "TrafficShapingRetrievalWindow":{ "shape":"TrafficShapingRetrievalWindow", - "documentation":"

Configuration for spreading ADS traffic across a set window instead of sending ADS requests for all sessions at the same time.

" + "documentation":"

The configuration that tells Elemental MediaTailor how many seconds to spread out requests to the ad decision server (ADS). Instead of sending ADS requests for all sessions at the same time, MediaTailor spreads the requests across the amount of time specified in the retrieval window.

" }, "TrafficShapingTpsConfiguration":{ "shape":"TrafficShapingTpsConfiguration", - "documentation":"

The configuration for TPS-based traffic shaping that limits the number of requests to the ad decision server (ADS) based on transactions per second instead of time windows.

" + "documentation":"

The configuration for TPS-based traffic shaping. This approach limits requests to the ad decision server (ADS) based on transactions per second and concurrent users.

" } }, "documentation":"

With recurring prefetch, MediaTailor automatically prefetches ads for every avail that occurs during the retrieval window. The following configurations describe the MediaTailor behavior when prefetching ads for a live event.

" @@ -3728,6 +3787,11 @@ ] }, "String":{"type":"string"}, + "StringMap":{ + "type":"map", + "key":{"shape":"__string"}, + "value":{"shape":"__string"} + }, "TagResourceRequest":{ "type":"structure", "required":[ @@ -3784,7 +3848,7 @@ "documentation":"

The amount of time, in seconds, that MediaTailor spreads prefetch requests to the ADS.

" } }, - "documentation":"

The configuration that tells Elemental MediaTailor how to spread out requests to the ad decision server (ADS). Instead of sending ADS requests for all sessions at the same time, MediaTailor spreads the requests across the amount of time specified in the retrieval window.

" + "documentation":"

The configuration that tells Elemental MediaTailor how many seconds to spread out requests to the ad decision server (ADS). Instead of sending ADS requests for all sessions at the same time, MediaTailor spreads the requests across the amount of time specified in the retrieval window.

" }, "TrafficShapingTpsConfiguration":{ "type":"structure", @@ -3798,7 +3862,7 @@ "documentation":"

The expected peak number of concurrent viewers for your content. MediaTailor uses this value along with peak TPS to determine how to distribute prefetch requests across the available capacity without exceeding your ADS limits.

" } }, - "documentation":"

The configuration for TPS-based traffic shaping. This approach limits requests to the ad decision server (ADS) based on transactions per second and concurrent users, providing more intuitive capacity management compared to time-window based traffic shaping.

" + "documentation":"

The configuration for TPS-based traffic shaping. This approach limits requests to the ad decision server (ADS) based on transactions per second and concurrent users.

" }, "TrafficShapingType":{ "type":"string", diff --git a/awscli/botocore/data/mgn/2020-02-26/service-2.json b/awscli/botocore/data/mgn/2020-02-26/service-2.json index 2260236e9103..1fe390a35724 100644 --- a/awscli/botocore/data/mgn/2020-02-26/service-2.json +++ b/awscli/botocore/data/mgn/2020-02-26/service-2.json @@ -2,9 +2,10 @@ "version":"2.0", "metadata":{ "apiVersion":"2020-02-26", + "auth":["aws.auth#sigv4"], "endpointPrefix":"mgn", - "jsonVersion":"1.1", "protocol":"rest-json", + "protocols":["rest-json"], "serviceAbbreviation":"mgn", "serviceFullName":"Application Migration Service", "serviceId":"mgn", @@ -144,8 +145,8 @@ "output":{"shape":"LaunchConfigurationTemplate"}, "errors":[ {"shape":"UninitializedAccountException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

Creates a new Launch Configuration Template.

" }, @@ -160,8 +161,8 @@ "output":{"shape":"ReplicationConfigurationTemplate"}, "errors":[ {"shape":"UninitializedAccountException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

Creates a new ReplicationConfigurationTemplate.

" }, @@ -329,7 +330,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves detailed job log items with paging.

" + "documentation":"

Retrieves detailed job log items with paging.

", + "readonly":true }, "DescribeJobs":{ "name":"DescribeJobs", @@ -344,7 +346,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normally created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

" + "documentation":"

Returns a list of Jobs. Use the JobsID and fromDate and toData filters to limit which jobs are returned. The response is sorted by creationDataTime - latest date first. Jobs are normally created by the StartTest, StartCutover, and TerminateTargetInstances APIs. Jobs are also created by DiagnosticLaunch and TerminateDiagnosticInstances, which are APIs available only to *Support* and only used in response to relevant support tickets.

", + "readonly":true }, "DescribeLaunchConfigurationTemplates":{ "name":"DescribeLaunchConfigurationTemplates", @@ -360,7 +363,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all Launch Configuration Templates, filtered by Launch Configuration Template IDs

" + "documentation":"

Lists all Launch Configuration Templates, filtered by Launch Configuration Template IDs

", + "readonly":true }, "DescribeReplicationConfigurationTemplates":{ "name":"DescribeReplicationConfigurationTemplates", @@ -376,7 +380,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all ReplicationConfigurationTemplates, filtered by Source Server IDs.

" + "documentation":"

Lists all ReplicationConfigurationTemplates, filtered by Source Server IDs.

", + "readonly":true }, "DescribeSourceServers":{ "name":"DescribeSourceServers", @@ -391,7 +396,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves all SourceServers or multiple SourceServers by ID.

" + "documentation":"

Retrieves all SourceServers or multiple SourceServers by ID.

", + "readonly":true }, "DescribeVcenterClients":{ "name":"DescribeVcenterClients", @@ -407,7 +413,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of the installed vCenter clients.

" + "documentation":"

Returns a list of the installed vCenter clients.

", + "readonly":true }, "DisassociateApplications":{ "name":"DisassociateApplications", @@ -489,7 +496,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists all LaunchConfigurations available, filtered by Source Server IDs.

" + "documentation":"

Lists all LaunchConfigurations available, filtered by Source Server IDs.

", + "readonly":true }, "GetReplicationConfiguration":{ "name":"GetReplicationConfiguration", @@ -504,7 +512,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Lists all ReplicationConfigurations, filtered by Source Server ID.

" + "documentation":"

Lists all ReplicationConfigurations, filtered by Source Server ID.

", + "readonly":true }, "InitializeService":{ "name":"InitializeService", @@ -516,8 +525,8 @@ "input":{"shape":"InitializeServiceRequest"}, "output":{"shape":"InitializeServiceResponse"}, "errors":[ - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

Initialize Application Migration Service.

" }, @@ -533,7 +542,8 @@ "errors":[ {"shape":"UninitializedAccountException"} ], - "documentation":"

Retrieves all applications or multiple applications by ID.

" + "documentation":"

Retrieves all applications or multiple applications by ID.

", + "readonly":true }, "ListConnectors":{ "name":"ListConnectors", @@ -548,7 +558,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

List Connectors.

" + "documentation":"

List Connectors.

", + "readonly":true }, "ListExportErrors":{ "name":"ListExportErrors", @@ -563,7 +574,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

List export errors.

" + "documentation":"

List export errors.

", + "readonly":true }, "ListExports":{ "name":"ListExports", @@ -577,7 +589,8 @@ "errors":[ {"shape":"UninitializedAccountException"} ], - "documentation":"

List exports.

" + "documentation":"

List exports.

", + "readonly":true }, "ListImportErrors":{ "name":"ListImportErrors", @@ -592,7 +605,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

List import errors.

" + "documentation":"

List import errors.

", + "readonly":true }, "ListImports":{ "name":"ListImports", @@ -607,7 +621,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

List imports.

" + "documentation":"

List imports.

", + "readonly":true }, "ListManagedAccounts":{ "name":"ListManagedAccounts", @@ -622,7 +637,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ValidationException"} ], - "documentation":"

List Managed Accounts.

" + "documentation":"

List Managed Accounts.

", + "readonly":true }, "ListSourceServerActions":{ "name":"ListSourceServerActions", @@ -637,7 +653,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

List source server post migration custom actions.

" + "documentation":"

List source server post migration custom actions.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -651,11 +668,12 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

List all tags for your Application Migration Service resources.

" + "documentation":"

List all tags for your Application Migration Service resources.

", + "readonly":true }, "ListTemplateActions":{ "name":"ListTemplateActions", @@ -670,7 +688,8 @@ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

List template post migration custom actions.

" + "documentation":"

List template post migration custom actions.

", + "readonly":true }, "ListWaves":{ "name":"ListWaves", @@ -684,7 +703,8 @@ "errors":[ {"shape":"UninitializedAccountException"} ], - "documentation":"

Retrieves all waves or multiple waves by ID.

" + "documentation":"

Retrieves all waves or multiple waves by ID.

", + "readonly":true }, "MarkAsArchived":{ "name":"MarkAsArchived", @@ -886,7 +906,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"ConflictException"} ], - "documentation":"

Starts replication for SNAPSHOT_SHIPPING agents.

" + "documentation":"

Start replication for source server irrespective of its replication type.

" }, "StartTest":{ "name":"StartTest", @@ -933,8 +953,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], "documentation":"

Adds or overwrites only the specified tags for the specified Application Migration Service resource or resources. When you specify an existing tag key, the value is overwritten with the new value. Each resource can have a maximum of 50 tags. Each tag consists of a key and optional value.

", @@ -999,8 +1019,8 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"ThrottlingException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], "documentation":"

Deletes the specified set of tags from the specified set of Application Migration Service resources.

", @@ -1070,8 +1090,8 @@ "errors":[ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

Updates an existing Launch Configuration Template by ID.

" }, @@ -1087,8 +1107,8 @@ "errors":[ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"}, {"shape":"ConflictException"} ], "documentation":"

Allows you to update multiple ReplicationConfigurations by Source Server ID.

", @@ -1106,8 +1126,8 @@ "errors":[ {"shape":"UninitializedAccountException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"}, - {"shape":"AccessDeniedException"} + {"shape":"AccessDeniedException"}, + {"shape":"ValidationException"} ], "documentation":"

Updates multiple ReplicationConfigurationTemplates by ID.

" }, @@ -1143,7 +1163,7 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"} ], - "documentation":"

Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.

" + "documentation":"

Allows you to change between the AGENT_BASED replication type and the SNAPSHOT_SHIPPING replication type.

SNAPSHOT_SHIPPING should be used for agentless replication.

" }, "UpdateWave":{ "name":"UpdateWave", @@ -1172,8 +1192,8 @@ "AccessDeniedException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, - "message":{"shape":"LargeBoundedString"} + "message":{"shape":"LargeBoundedString"}, + "code":{"shape":"LargeBoundedString"} }, "documentation":"

Operating denied due to a file permission or access check error.

", "error":{ @@ -1186,7 +1206,7 @@ "type":"string", "max":12, "min":12, - "pattern":"[0-9]{12,}" + "pattern":".*[0-9]{12,}.*" }, "ActionCategory":{ "type":"string", @@ -1208,13 +1228,13 @@ "type":"string", "max":256, "min":0, - "pattern":"^[0-9a-zA-Z ():/.,'-_#*; ]*$" + "pattern":"[0-9a-zA-Z ():/.,'-_#*; ]*" }, "ActionID":{ "type":"string", "max":64, "min":1, - "pattern":"[0-9a-zA-Z]$" + "pattern":".*[0-9a-zA-Z]" }, "ActionIDs":{ "type":"list", @@ -1226,15 +1246,11 @@ "type":"string", "max":256, "min":1, - "pattern":"^[^\\s\\x00]( *[^\\s\\x00])*$" + "pattern":"[^\\s\\x00]( *[^\\s\\x00])*" }, "Application":{ "type":"structure", "members":{ - "applicationAggregatedStatus":{ - "shape":"ApplicationAggregatedStatus", - "documentation":"

Application aggregated status.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" @@ -1243,9 +1259,9 @@ "shape":"ARN", "documentation":"

Application ARN.

" }, - "creationDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Application creation dateTime.

" + "name":{ + "shape":"ApplicationName", + "documentation":"

Application name.

" }, "description":{ "shape":"ApplicationDescription", @@ -1255,14 +1271,18 @@ "shape":"Boolean", "documentation":"

Application archival status.

" }, + "applicationAggregatedStatus":{ + "shape":"ApplicationAggregatedStatus", + "documentation":"

Application aggregated status.

" + }, + "creationDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Application creation dateTime.

" + }, "lastModifiedDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Application last modified dateTime.

" }, - "name":{ - "shape":"ApplicationName", - "documentation":"

Application name.

" - }, "tags":{ "shape":"TagsMap", "documentation":"

Application tags.

" @@ -1276,14 +1296,14 @@ "ApplicationAggregatedStatus":{ "type":"structure", "members":{ - "healthStatus":{ - "shape":"ApplicationHealthStatus", - "documentation":"

Application aggregated status health status.

" - }, "lastUpdateDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Application aggregated status last update dateTime.

" }, + "healthStatus":{ + "shape":"ApplicationHealthStatus", + "documentation":"

Application aggregated status health status.

" + }, "progressStatus":{ "shape":"ApplicationProgressStatus", "documentation":"

Application aggregated status progress status.

" @@ -1299,7 +1319,7 @@ "type":"string", "max":600, "min":0, - "pattern":"^[^\\x00]*$" + "pattern":"[^\\x00]*" }, "ApplicationHealthStatus":{ "type":"string", @@ -1313,7 +1333,7 @@ "type":"string", "max":21, "min":21, - "pattern":"^app-[0-9a-zA-Z]{17}$" + "pattern":"app-[0-9a-zA-Z]{17}" }, "ApplicationIDs":{ "type":"list", @@ -1331,7 +1351,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[^\\s\\x00]( *[^\\s\\x00])*$" + "pattern":"[^\\s\\x00]( *[^\\s\\x00])*" }, "ApplicationProgressStatus":{ "type":"string", @@ -1349,13 +1369,13 @@ "type":"structure", "required":["applicationID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -1363,41 +1383,40 @@ "type":"structure", "required":["waveID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "waveID":{ "shape":"WaveID", "documentation":"

Wave ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "AssociateApplicationsRequest":{ "type":"structure", "required":[ - "applicationIDs", - "waveID" + "waveID", + "applicationIDs" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" + "waveID":{ + "shape":"WaveID", + "documentation":"

Wave ID.

" }, "applicationIDs":{ "shape":"ApplicationIDs", "documentation":"

Application IDs list.

" }, - "waveID":{ - "shape":"WaveID", - "documentation":"

Wave ID.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "AssociateApplicationsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "AssociateSourceServersRequest":{ "type":"structure", @@ -1406,10 +1425,6 @@ "sourceServerIDs" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" @@ -1417,6 +1432,10 @@ "sourceServerIDs":{ "shape":"AssociateSourceServersRequestSourceServerIDs", "documentation":"

Source server IDs list.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -1428,8 +1447,7 @@ }, "AssociateSourceServersResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "BandwidthThrottling":{ "type":"long", @@ -1470,21 +1488,21 @@ "ChangeServerLifeCycleStateRequest":{ "type":"structure", "required":[ - "lifeCycle", - "sourceServerID" + "sourceServerID", + "lifeCycle" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

The request to change the source server migration account ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

The request to change the source server migration lifecycle state by source server ID.

" }, "lifeCycle":{ "shape":"ChangeServerLifeCycleStateSourceServerLifecycle", "documentation":"

The request to change the source server migration lifecycle state.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

The request to change the source server migration lifecycle state by source server ID.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

The request to change the source server migration account ID.

" } } }, @@ -1516,17 +1534,13 @@ "type":"string", "max":512, "min":1, - "pattern":"^[\\.\\-_/#A-Za-z0-9]+$" + "pattern":"[\\.\\-_/#A-Za-z0-9]+" }, "ConflictException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, - "errors":{ - "shape":"ConflictExceptionErrors", - "documentation":"

Conflict Exception specific errors.

" - }, "message":{"shape":"LargeBoundedString"}, + "code":{"shape":"LargeBoundedString"}, "resourceId":{ "shape":"LargeBoundedString", "documentation":"

A conflict occurred when prompting for the Resource ID.

" @@ -1534,6 +1548,10 @@ "resourceType":{ "shape":"LargeBoundedString", "documentation":"

A conflict occurred when prompting for resource type.

" + }, + "errors":{ + "shape":"ConflictExceptionErrors", + "documentation":"

Conflict Exception specific errors.

" } }, "documentation":"

The request could not be completed due to a conflict with the current state of the target resource.

", @@ -1550,10 +1568,6 @@ "Connector":{ "type":"structure", "members":{ - "arn":{ - "shape":"ARN", - "documentation":"

Connector arn.

" - }, "connectorID":{ "shape":"ConnectorID", "documentation":"

Connector ID.

" @@ -1562,17 +1576,21 @@ "shape":"ConnectorName", "documentation":"

Connector name.

" }, - "ssmCommandConfig":{ - "shape":"ConnectorSsmCommandConfig", - "documentation":"

Connector SSM command config.

" - }, "ssmInstanceID":{ "shape":"SsmInstanceID", "documentation":"

Connector SSM instance ID.

" }, + "arn":{ + "shape":"ARN", + "documentation":"

Connector arn.

" + }, "tags":{ "shape":"TagsMap", "documentation":"

Connector tags.

" + }, + "ssmCommandConfig":{ + "shape":"ConnectorSsmCommandConfig", + "documentation":"

Connector SSM command config.

" } } }, @@ -1580,13 +1598,13 @@ "type":"string", "max":100, "min":27, - "pattern":"^arn:[\\w-]+:mgn:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:connector\\/(connector-[0-9a-zA-Z]{17})$" + "pattern":"arn:[\\w-]+:mgn:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:connector\\/(connector-[0-9a-zA-Z]{17})" }, "ConnectorID":{ "type":"string", "max":27, "min":27, - "pattern":"^connector-[0-9a-zA-Z]{17}$" + "pattern":"connector-[0-9a-zA-Z]{17}" }, "ConnectorIDsFilter":{ "type":"list", @@ -1598,30 +1616,30 @@ "type":"string", "max":256, "min":1, - "pattern":"^[A-Za-z0-9_-]+$" + "pattern":"[A-Za-z0-9_-]+" }, "ConnectorSsmCommandConfig":{ "type":"structure", "required":[ - "cloudWatchOutputEnabled", - "s3OutputEnabled" + "s3OutputEnabled", + "cloudWatchOutputEnabled" ], "members":{ - "cloudWatchLogGroupName":{ - "shape":"CloudWatchLogGroupName", - "documentation":"

Connector SSM command config CloudWatch log group name.

" - }, - "cloudWatchOutputEnabled":{ + "s3OutputEnabled":{ "shape":"Boolean", - "documentation":"

Connector SSM command config CloudWatch output enabled.

" + "documentation":"

Connector SSM command config S3 output enabled.

" }, "outputS3BucketName":{ "shape":"S3BucketName", "documentation":"

Connector SSM command config output S3 bucket name.

" }, - "s3OutputEnabled":{ + "cloudWatchOutputEnabled":{ "shape":"Boolean", - "documentation":"

Connector SSM command config S3 output enabled.

" + "documentation":"

Connector SSM command config CloudWatch output enabled.

" + }, + "cloudWatchLogGroupName":{ + "shape":"CloudWatchLogGroupName", + "documentation":"

Connector SSM command config CloudWatch log group name.

" } }, "documentation":"

Connector SSM command config.

" @@ -1640,21 +1658,21 @@ "type":"structure", "required":["name"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" + "name":{ + "shape":"ApplicationName", + "documentation":"

Application name.

" }, "description":{ "shape":"ApplicationDescription", "documentation":"

Application description.

" }, - "name":{ - "shape":"ApplicationName", - "documentation":"

Application name.

" - }, "tags":{ "shape":"TagsMap", "documentation":"

Application tags.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -1669,10 +1687,6 @@ "shape":"ConnectorName", "documentation":"

Create Connector request name.

" }, - "ssmCommandConfig":{ - "shape":"ConnectorSsmCommandConfig", - "documentation":"

Create Connector request SSM command config.

" - }, "ssmInstanceID":{ "shape":"SsmInstanceID", "documentation":"

Create Connector request SSM instance ID.

" @@ -1680,98 +1694,114 @@ "tags":{ "shape":"TagsMap", "documentation":"

Create Connector request tags.

" + }, + "ssmCommandConfig":{ + "shape":"ConnectorSsmCommandConfig", + "documentation":"

Create Connector request SSM command config.

" } } }, "CreateLaunchConfigurationTemplateRequest":{ "type":"structure", "members":{ - "associatePublicIpAddress":{ + "postLaunchActions":{ + "shape":"PostLaunchActions", + "documentation":"

Launch configuration template post launch actions.

" + }, + "enableMapAutoTagging":{ "shape":"Boolean", - "documentation":"

Associate public Ip address.

" + "documentation":"

Enable map auto tagging.

" }, - "bootMode":{ - "shape":"BootMode", - "documentation":"

Launch configuration template boot mode.

" + "mapAutoTaggingMpeID":{ + "shape":"TagValue", + "documentation":"

Launch configuration template map auto tagging MPE ID.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Request to associate tags during creation of a Launch Configuration Template.

" + }, + "launchDisposition":{ + "shape":"LaunchDisposition", + "documentation":"

Launch disposition.

" + }, + "targetInstanceTypeRightSizingMethod":{ + "shape":"TargetInstanceTypeRightSizingMethod", + "documentation":"

Target instance type right-sizing method.

" }, "copyPrivateIp":{ "shape":"Boolean", "documentation":"

Copy private Ip.

" }, - "copyTags":{ + "associatePublicIpAddress":{ "shape":"Boolean", - "documentation":"

Copy tags.

" + "documentation":"

Associate public Ip address.

" }, - "enableMapAutoTagging":{ + "copyTags":{ "shape":"Boolean", - "documentation":"

Enable map auto tagging.

" - }, - "largeVolumeConf":{ - "shape":"LaunchTemplateDiskConf", - "documentation":"

Large volume config.

" - }, - "launchDisposition":{ - "shape":"LaunchDisposition", - "documentation":"

Launch disposition.

" + "documentation":"

Copy tags.

" }, "licensing":{"shape":"Licensing"}, - "mapAutoTaggingMpeID":{ - "shape":"TagValue", - "documentation":"

Launch configuration template map auto tagging MPE ID.

" + "bootMode":{ + "shape":"BootMode", + "documentation":"

Launch configuration template boot mode.

" }, - "postLaunchActions":{ - "shape":"PostLaunchActions", - "documentation":"

Launch configuration template post launch actions.

" + "smallVolumeMaxSize":{ + "shape":"PositiveInteger", + "documentation":"

Small volume maximum size.

" }, "smallVolumeConf":{ "shape":"LaunchTemplateDiskConf", "documentation":"

Small volume config.

" }, - "smallVolumeMaxSize":{ - "shape":"PositiveInteger", - "documentation":"

Small volume maximum size.

" + "largeVolumeConf":{ + "shape":"LaunchTemplateDiskConf", + "documentation":"

Large volume config.

" }, - "tags":{ - "shape":"TagsMap", - "documentation":"

Request to associate tags during creation of a Launch Configuration Template.

" + "enableParametersEncryption":{ + "shape":"Boolean", + "documentation":"

Enable parameters encryption.

" }, - "targetInstanceTypeRightSizingMethod":{ - "shape":"TargetInstanceTypeRightSizingMethod", - "documentation":"

Target instance type right-sizing method.

" + "parametersEncryptionKey":{ + "shape":"KmsKeyArn", + "documentation":"

Parameters encryption key.

" } } }, "CreateReplicationConfigurationTemplateRequest":{ "type":"structure", "required":[ + "stagingAreaSubnetId", "associateDefaultSecurityGroup", - "bandwidthThrottling", - "createPublicIP", - "dataPlaneRouting", + "replicationServersSecurityGroupsIDs", + "replicationServerInstanceType", + "useDedicatedReplicationServer", "defaultLargeStagingDiskType", "ebsEncryption", - "replicationServerInstanceType", - "replicationServersSecurityGroupsIDs", - "stagingAreaSubnetId", - "stagingAreaTags", - "useDedicatedReplicationServer" + "bandwidthThrottling", + "dataPlaneRouting", + "createPublicIP", + "stagingAreaTags" ], "members":{ + "stagingAreaSubnetId":{ + "shape":"SubnetID", + "documentation":"

Request to configure the Staging Area subnet ID during Replication Settings template creation.

" + }, "associateDefaultSecurityGroup":{ "shape":"Boolean", "documentation":"

Request to associate the default Application Migration Service Security group with the Replication Settings template.

" }, - "bandwidthThrottling":{ - "shape":"BandwidthThrottling", - "documentation":"

Request to configure bandwidth throttling during Replication Settings template creation.

" + "replicationServersSecurityGroupsIDs":{ + "shape":"ReplicationServersSecurityGroupsIDs", + "documentation":"

Request to configure the Replication Server Security group ID during Replication Settings template creation.

" }, - "createPublicIP":{ - "shape":"Boolean", - "documentation":"

Request to create Public IP during Replication Settings template creation.

" + "replicationServerInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Request to configure the Replication Server instance type during Replication Settings template creation.

" }, - "dataPlaneRouting":{ - "shape":"ReplicationConfigurationDataPlaneRouting", - "documentation":"

Request to configure data plane routing during Replication Settings template creation.

" + "useDedicatedReplicationServer":{ + "shape":"Boolean", + "documentation":"

Request to use Dedicated Replication Servers during Replication Settings template creation.

" }, "defaultLargeStagingDiskType":{ "shape":"ReplicationConfigurationDefaultLargeStagingDiskType", @@ -1785,33 +1815,33 @@ "shape":"ARN", "documentation":"

Request to configure an EBS encryption key during Replication Settings template creation.

" }, - "replicationServerInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Request to configure the Replication Server instance type during Replication Settings template creation.

" + "bandwidthThrottling":{ + "shape":"BandwidthThrottling", + "documentation":"

Request to configure bandwidth throttling during Replication Settings template creation.

" }, - "replicationServersSecurityGroupsIDs":{ - "shape":"ReplicationServersSecurityGroupsIDs", - "documentation":"

Request to configure the Replication Server Security group ID during Replication Settings template creation.

" + "dataPlaneRouting":{ + "shape":"ReplicationConfigurationDataPlaneRouting", + "documentation":"

Request to configure data plane routing during Replication Settings template creation.

" }, - "stagingAreaSubnetId":{ - "shape":"SubnetID", - "documentation":"

Request to configure the Staging Area subnet ID during Replication Settings template creation.

" + "createPublicIP":{ + "shape":"Boolean", + "documentation":"

Request to create Public IP during Replication Settings template creation.

" }, "stagingAreaTags":{ "shape":"TagsMap", "documentation":"

Request to configure Staging Area tags during Replication Settings template creation.

" }, + "useFipsEndpoint":{ + "shape":"Boolean", + "documentation":"

Request to use Fips Endpoint during Replication Settings template creation.

" + }, "tags":{ "shape":"TagsMap", "documentation":"

Request to configure tags during Replication Settings template creation.

" }, - "useDedicatedReplicationServer":{ - "shape":"Boolean", - "documentation":"

Request to use Dedicated Replication Servers during Replication Settings template creation.

" - }, - "useFipsEndpoint":{ - "shape":"Boolean", - "documentation":"

Request to use Fips Endpoint during Replication Settings template creation.

" + "internetProtocol":{ + "shape":"InternetProtocol", + "documentation":"

Request to configure the internet protocol to IPv4 or IPv6.

" } } }, @@ -1819,21 +1849,21 @@ "type":"structure", "required":["name"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" + "name":{ + "shape":"WaveName", + "documentation":"

Wave name.

" }, "description":{ "shape":"WaveDescription", "documentation":"

Wave description.

" }, - "name":{ - "shape":"WaveName", - "documentation":"

Wave name.

" - }, "tags":{ "shape":"TagsMap", "documentation":"

Wave tags.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -1875,33 +1905,37 @@ "DataReplicationInfo":{ "type":"structure", "members":{ - "dataReplicationError":{ - "shape":"DataReplicationError", - "documentation":"

Error in obtaining data replication info.

" + "lagDuration":{ + "shape":"ISO8601DurationString", + "documentation":"

Request to query data replication lag duration.

" }, - "dataReplicationInitiation":{ - "shape":"DataReplicationInitiation", - "documentation":"

Request to query whether data replication has been initiated.

" + "etaDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Request to query the time when data replication will be complete.

" + }, + "replicatedDisks":{ + "shape":"DataReplicationInfoReplicatedDisks", + "documentation":"

Request to query disks replicated.

" }, "dataReplicationState":{ "shape":"DataReplicationState", "documentation":"

Request to query the data replication state.

" }, - "etaDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Request to query the time when data replication will be complete.

" + "dataReplicationInitiation":{ + "shape":"DataReplicationInitiation", + "documentation":"

Request to query whether data replication has been initiated.

" }, - "lagDuration":{ - "shape":"ISO8601DurationString", - "documentation":"

Request to query data replication lag duration.

" + "dataReplicationError":{ + "shape":"DataReplicationError", + "documentation":"

Error in obtaining data replication info.

" }, "lastSnapshotDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Request to query data replication last snapshot time.

" }, - "replicatedDisks":{ - "shape":"DataReplicationInfoReplicatedDisks", - "documentation":"

Request to query disks replicated.

" + "replicatorId":{ + "shape":"ReplicatorID", + "documentation":"

Replication server instance ID.

" } }, "documentation":"

Request data replication info.

" @@ -1909,14 +1943,14 @@ "DataReplicationInfoReplicatedDisk":{ "type":"structure", "members":{ - "backloggedStorageBytes":{ - "shape":"PositiveInteger", - "documentation":"

Request to query data replication backlog size in bytes.

" - }, "deviceName":{ "shape":"BoundedString", "documentation":"

Request to query device name.

" }, + "totalStorageBytes":{ + "shape":"PositiveInteger", + "documentation":"

Request to query total amount of data replicated in bytes.

" + }, "replicatedStorageBytes":{ "shape":"PositiveInteger", "documentation":"

Request to query amount of data replicated in bytes.

" @@ -1925,9 +1959,9 @@ "shape":"PositiveInteger", "documentation":"

Request to query amount of data rescanned in bytes.

" }, - "totalStorageBytes":{ + "backloggedStorageBytes":{ "shape":"PositiveInteger", - "documentation":"

Request to query total amount of data replicated in bytes.

" + "documentation":"

Request to query data replication backlog size in bytes.

" } }, "documentation":"

Request to query disks replicated.

" @@ -1941,14 +1975,14 @@ "DataReplicationInitiation":{ "type":"structure", "members":{ - "nextAttemptDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Request to query next data initiation date and time.

" - }, "startDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Request to query data initiation start date and time.

" }, + "nextAttemptDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Request to query next data initiation date and time.

" + }, "steps":{ "shape":"DataReplicationInitiationSteps", "documentation":"

Request to query data initiation steps.

" @@ -2021,20 +2055,19 @@ "type":"structure", "required":["applicationID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "DeleteApplicationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConnectorRequest":{ "type":"structure", @@ -2050,20 +2083,19 @@ "type":"structure", "required":["jobID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to delete Job from service by Account ID.

" - }, "jobID":{ "shape":"JobID", "documentation":"

Request to delete Job from service by Job ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to delete Job from service by Account ID.

" } } }, "DeleteJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteLaunchConfigurationTemplateRequest":{ "type":"structure", @@ -2077,8 +2109,7 @@ }, "DeleteLaunchConfigurationTemplateResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteReplicationConfigurationTemplateRequest":{ "type":"structure", @@ -2092,27 +2123,25 @@ }, "DeleteReplicationConfigurationTemplateResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSourceServerRequest":{ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to delete Source Server from service by Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Request to delete Source Server from service by Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to delete Source Server from service by Account ID.

" } } }, "DeleteSourceServerResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteVcenterClientRequest":{ "type":"structure", @@ -2128,29 +2157,24 @@ "type":"structure", "required":["waveID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "waveID":{ "shape":"WaveID", "documentation":"

Wave ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "DeleteWaveResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DescribeJobLogItemsRequest":{ "type":"structure", "required":["jobID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to describe Job log Account ID.

" - }, "jobID":{ "shape":"JobID", "documentation":"

Request to describe Job log job ID.

" @@ -2162,6 +2186,10 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request to describe Job log next token.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to describe Job log Account ID.

" } } }, @@ -2181,10 +2209,6 @@ "DescribeJobsRequest":{ "type":"structure", "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to describe job log items by Account ID.

" - }, "filters":{ "shape":"DescribeJobsRequestFilters", "documentation":"

Request to describe Job log filters.

" @@ -2196,20 +2220,24 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request to describe job log items by next token.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to describe job log items by Account ID.

" } } }, "DescribeJobsRequestFilters":{ "type":"structure", "members":{ - "fromDate":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Request to describe Job log filters by date.

" - }, "jobIDs":{ "shape":"DescribeJobsRequestFiltersJobIDs", "documentation":"

Request to describe Job log filters by job ID.

" }, + "fromDate":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Request to describe Job log filters by date.

" + }, "toDate":{ "shape":"ISO8601DatetimeString", "documentation":"

Request to describe job log items by last date.

" @@ -2269,6 +2297,10 @@ "DescribeReplicationConfigurationTemplatesRequest":{ "type":"structure", "members":{ + "replicationConfigurationTemplateIDs":{ + "shape":"ReplicationConfigurationTemplateIDs", + "documentation":"

Request to describe Replication Configuration template by template IDs.

" + }, "maxResults":{ "shape":"MaxResultsType", "documentation":"

Request to describe Replication Configuration template by max results.

" @@ -2276,10 +2308,6 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request to describe Replication Configuration template by next token.

" - }, - "replicationConfigurationTemplateIDs":{ - "shape":"ReplicationConfigurationTemplateIDs", - "documentation":"

Request to describe Replication Configuration template by template IDs.

" } } }, @@ -2299,10 +2327,6 @@ "DescribeSourceServersRequest":{ "type":"structure", "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to filter Source Servers list by Accoun ID.

" - }, "filters":{ "shape":"DescribeSourceServersRequestFilters", "documentation":"

Request to filter Source Servers list.

" @@ -2314,6 +2338,10 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request to filter Source Servers list by next token.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to filter Source Servers list by Accoun ID.

" } } }, @@ -2326,25 +2354,25 @@ "DescribeSourceServersRequestFilters":{ "type":"structure", "members":{ - "applicationIDs":{ - "shape":"DescribeSourceServersRequestApplicationIDs", - "documentation":"

Request to filter Source Servers list by application IDs.

" + "sourceServerIDs":{ + "shape":"DescribeSourceServersRequestFiltersIDs", + "documentation":"

Request to filter Source Servers list by Source Server ID.

" }, "isArchived":{ "shape":"Boolean", "documentation":"

Request to filter Source Servers list by archived.

" }, - "lifeCycleStates":{ - "shape":"LifeCycleStates", - "documentation":"

Request to filter Source Servers list by life cycle states.

" - }, "replicationTypes":{ "shape":"ReplicationTypes", "documentation":"

Request to filter Source Servers list by replication type.

" }, - "sourceServerIDs":{ - "shape":"DescribeSourceServersRequestFiltersIDs", - "documentation":"

Request to filter Source Servers list by Source Server ID.

" + "lifeCycleStates":{ + "shape":"LifeCycleStates", + "documentation":"

Request to filter Source Servers list by life cycle states.

" + }, + "applicationIDs":{ + "shape":"DescribeSourceServersRequestApplicationIDs", + "documentation":"

Request to filter Source Servers list by application IDs.

" } }, "documentation":"

Request to filter Source Servers list.

" @@ -2401,28 +2429,27 @@ "DisassociateApplicationsRequest":{ "type":"structure", "required":[ - "applicationIDs", - "waveID" + "waveID", + "applicationIDs" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" + "waveID":{ + "shape":"WaveID", + "documentation":"

Wave ID.

" }, "applicationIDs":{ "shape":"ApplicationIDs", "documentation":"

Application IDs list.

" }, - "waveID":{ - "shape":"WaveID", - "documentation":"

Wave ID.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "DisassociateApplicationsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateSourceServersRequest":{ "type":"structure", @@ -2431,10 +2458,6 @@ "sourceServerIDs" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" @@ -2442,6 +2465,10 @@ "sourceServerIDs":{ "shape":"DisassociateSourceServersRequestSourceServerIDs", "documentation":"

Source server IDs list.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -2453,33 +2480,32 @@ }, "DisassociateSourceServersResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisconnectFromServiceRequest":{ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to disconnect Source Server from service by Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Request to disconnect Source Server from service by Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to disconnect Source Server from service by Account ID.

" } } }, "Disk":{ "type":"structure", "members":{ - "bytes":{ - "shape":"PositiveInteger", - "documentation":"

The amount of storage on the disk in bytes.

" - }, "deviceName":{ "shape":"BoundedString", "documentation":"

The disk or device name.

" + }, + "bytes":{ + "shape":"PositiveInteger", + "documentation":"

The amount of storage on the disk in bytes.

" } }, "documentation":"

The disk identifier.

" @@ -2492,13 +2518,13 @@ }, "DocumentVersion":{ "type":"string", - "pattern":"^(\\$DEFAULT|\\$LATEST|[0-9]+)$" + "pattern":"(\\$DEFAULT|\\$LATEST|[0-9]+)" }, "EC2InstanceID":{ "type":"string", "max":255, "min":0, - "pattern":"^i-[0-9a-fA-F]{8,}$" + "pattern":"i-[0-9a-fA-F]{8,}" }, "EC2InstanceType":{ "type":"string", @@ -2509,19 +2535,19 @@ "type":"string", "max":20, "min":20, - "pattern":"^lt-[0-9a-z]{17}$" + "pattern":"lt-[0-9a-z]{17}" }, "ErrorDetails":{ "type":"structure", "members":{ - "code":{ - "shape":"BoundedString", - "documentation":"

Error details code.

" - }, "message":{ "shape":"LargeBoundedString", "documentation":"

Error details message.

" }, + "code":{ + "shape":"BoundedString", + "documentation":"

Error details code.

" + }, "resourceId":{ "shape":"LargeBoundedString", "documentation":"

Error details resourceId.

" @@ -2551,7 +2577,7 @@ "type":"string", "max":24, "min":24, - "pattern":"^export-[0-9a-zA-Z]{17}$" + "pattern":"export-[0-9a-zA-Z]{17}" }, "ExportStatus":{ "type":"string", @@ -2565,41 +2591,49 @@ "ExportTask":{ "type":"structure", "members":{ - "creationDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Export task creation datetime.

" - }, - "endDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Export task end datetime.

" - }, "exportID":{ "shape":"ExportID", "documentation":"

Export task id.

" }, - "progressPercentage":{ - "shape":"Float", - "documentation":"

Export task progress percentage.

" + "arn":{ + "shape":"ARN", + "documentation":"

ExportTask arn.

" }, "s3Bucket":{ "shape":"S3BucketName", "documentation":"

Export task s3 bucket.

" }, + "s3Key":{ + "shape":"S3Key", + "documentation":"

Export task s3 key.

" + }, "s3BucketOwner":{ "shape":"AccountID", "documentation":"

Export task s3 bucket owner.

" }, - "s3Key":{ - "shape":"S3Key", - "documentation":"

Export task s3 key.

" + "creationDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Export task creation datetime.

" + }, + "endDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Export task end datetime.

" }, "status":{ "shape":"ExportStatus", "documentation":"

Export task status.

" }, + "progressPercentage":{ + "shape":"Float", + "documentation":"

Export task progress percentage.

" + }, "summary":{ "shape":"ExportTaskSummary", "documentation":"

Export task summary.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Export task tags.

" } }, "documentation":"

Export task.

" @@ -2607,13 +2641,13 @@ "ExportTaskError":{ "type":"structure", "members":{ - "errorData":{ - "shape":"ExportErrorData", - "documentation":"

Export task error data.

" - }, "errorDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Export task error datetime.

" + }, + "errorData":{ + "shape":"ExportErrorData", + "documentation":"

Export task error data.

" } }, "documentation":"

Export task error.

" @@ -2621,14 +2655,14 @@ "ExportTaskSummary":{ "type":"structure", "members":{ - "applicationsCount":{ - "shape":"PositiveInteger", - "documentation":"

Export task summary applications count.

" - }, "serversCount":{ "shape":"PositiveInteger", "documentation":"

Export task summary servers count.

" }, + "applicationsCount":{ + "shape":"PositiveInteger", + "documentation":"

Export task summary applications count.

" + }, "wavesCount":{ "shape":"PositiveInteger", "documentation":"

Export task summary waves count.

" @@ -2644,13 +2678,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to finalize Cutover by Source Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Request to finalize Cutover by Source Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to finalize Cutover by Source Account ID.

" } } }, @@ -2671,13 +2705,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to get Launch Configuration information by Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Request to get Launch Configuration information by Source Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to get Launch Configuration information by Account ID.

" } } }, @@ -2685,13 +2719,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request to get Replication Configuration by Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Request to get Replication Configuration by Source Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request to get Replication Configuration by Account ID.

" } } }, @@ -2703,7 +2737,7 @@ "type":"string", "max":32, "min":19, - "pattern":"^[1-9][0-9]*-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\\.[0-9]+)?Z$" + "pattern":"[1-9][0-9]*-(0[1-9]|1[0-2])-(0[1-9]|[12][0-9]|3[01])T([0-1][0-9]|2[0-3]):[0-5][0-9]:[0-5][0-9](\\.[0-9]+)?Z" }, "ISO8601DurationString":{ "type":"string", @@ -2713,10 +2747,6 @@ "IdentificationHints":{ "type":"structure", "members":{ - "awsInstanceID":{ - "shape":"EC2InstanceID", - "documentation":"

AWS Instance ID identification hint.

" - }, "fqdn":{ "shape":"BoundedString", "documentation":"

FQDN address identification hint.

" @@ -2725,13 +2755,17 @@ "shape":"BoundedString", "documentation":"

Hostname identification hint.

" }, - "vmPath":{ - "shape":"BoundedString", - "documentation":"

vCenter VM path identification hint.

" - }, "vmWareUuid":{ "shape":"BoundedString", "documentation":"

vmWare UUID identification hint.

" + }, + "awsInstanceID":{ + "shape":"EC2InstanceID", + "documentation":"

AWS Instance ID identification hint.

" + }, + "vmPath":{ + "shape":"BoundedString", + "documentation":"

vCenter VM path identification hint.

" } }, "documentation":"

Identification hints.

" @@ -2739,33 +2773,33 @@ "ImportErrorData":{ "type":"structure", "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Import error data source account ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Import error data source server ID.

" }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Import error data application ID.

" }, + "waveID":{ + "shape":"WaveID", + "documentation":"

Import error data wave id.

" + }, "ec2LaunchTemplateID":{ "shape":"BoundedString", "documentation":"

Import error data ec2 LaunchTemplate ID.

" }, - "rawError":{ - "shape":"LargeBoundedString", - "documentation":"

Import error data raw error.

" - }, "rowNumber":{ "shape":"PositiveInteger", "documentation":"

Import error data row number.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Import error data source server ID.

" + "rawError":{ + "shape":"LargeBoundedString", + "documentation":"

Import error data raw error.

" }, - "waveID":{ - "shape":"WaveID", - "documentation":"

Import error data wave id.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Import error data source account ID.

" } }, "documentation":"

Import error data.

" @@ -2785,7 +2819,7 @@ "type":"string", "max":24, "min":24, - "pattern":"^import-[0-9a-zA-Z]{17}$" + "pattern":"import-[0-9a-zA-Z]{17}" }, "ImportIDsFilter":{ "type":"list", @@ -2809,33 +2843,41 @@ "ImportTask":{ "type":"structure", "members":{ - "creationDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Import task creation datetime.

" - }, - "endDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Import task end datetime.

" - }, "importID":{ "shape":"ImportID", "documentation":"

Import task id.

" }, - "progressPercentage":{ - "shape":"Float", - "documentation":"

Import task progress percentage.

" + "arn":{ + "shape":"ARN", + "documentation":"

ImportTask arn.

" }, "s3BucketSource":{ "shape":"S3BucketSource", "documentation":"

Import task s3 bucket source.

" }, + "creationDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Import task creation datetime.

" + }, + "endDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Import task end datetime.

" + }, "status":{ "shape":"ImportStatus", "documentation":"

Import task status.

" }, + "progressPercentage":{ + "shape":"Float", + "documentation":"

Import task progress percentage.

" + }, "summary":{ "shape":"ImportTaskSummary", "documentation":"

Import task summary.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Import task tags.

" } }, "documentation":"

Import task.

" @@ -2843,10 +2885,6 @@ "ImportTaskError":{ "type":"structure", "members":{ - "errorData":{ - "shape":"ImportErrorData", - "documentation":"

Import task error data.

" - }, "errorDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Import task error datetime.

" @@ -2854,6 +2892,10 @@ "errorType":{ "shape":"ImportErrorType", "documentation":"

Import task error type.

" + }, + "errorData":{ + "shape":"ImportErrorData", + "documentation":"

Import task error data.

" } }, "documentation":"

Import task error.

" @@ -2861,6 +2903,10 @@ "ImportTaskSummary":{ "type":"structure", "members":{ + "waves":{ + "shape":"ImportTaskSummaryWaves", + "documentation":"

Import task summary waves.

" + }, "applications":{ "shape":"ImportTaskSummaryApplications", "documentation":"

Import task summary applications.

" @@ -2868,10 +2914,6 @@ "servers":{ "shape":"ImportTaskSummaryServers", "documentation":"

Import task summary servers.

" - }, - "waves":{ - "shape":"ImportTaskSummaryWaves", - "documentation":"

Import task summary waves.

" } }, "documentation":"

Import task summary.

" @@ -2920,13 +2962,11 @@ }, "InitializeServiceRequest":{ "type":"structure", - "members":{ - } + "members":{} }, "InitializeServiceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "InitiatedBy":{ "type":"string", @@ -2954,6 +2994,13 @@ "exception":true, "fault":true }, + "InternetProtocol":{ + "type":"string", + "enum":[ + "IPV4", + "IPV6" + ] + }, "Iops":{ "type":"long", "box":true, @@ -2964,16 +3011,28 @@ "type":"string", "max":1011, "min":1, - "pattern":"^[a-zA-Z0-9_]+(\\.[a-zA-Z0-9_\\[\\]]+)*$" + "pattern":"[a-zA-Z0-9_]+(\\.[a-zA-Z0-9_\\[\\]]+)*" }, "Job":{ "type":"structure", "required":["jobID"], "members":{ + "jobID":{ + "shape":"JobID", + "documentation":"

Job ID.

" + }, "arn":{ "shape":"ARN", "documentation":"

the ARN of the specific Job.

" }, + "type":{ + "shape":"JobType", + "documentation":"

Job type.

" + }, + "initiatedBy":{ + "shape":"InitiatedBy", + "documentation":"

Job initiated by field.

" + }, "creationDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Job creation time.

" @@ -2982,29 +3041,17 @@ "shape":"ISO8601DatetimeString", "documentation":"

Job end time.

" }, - "initiatedBy":{ - "shape":"InitiatedBy", - "documentation":"

Job initiated by field.

" - }, - "jobID":{ - "shape":"JobID", - "documentation":"

Job ID.

" + "status":{ + "shape":"JobStatus", + "documentation":"

Job status.

" }, "participatingServers":{ "shape":"ParticipatingServers", "documentation":"

Servers participating in a specific Job.

" }, - "status":{ - "shape":"JobStatus", - "documentation":"

Job status.

" - }, "tags":{ "shape":"TagsMap", "documentation":"

Tags associated with specific Job.

" - }, - "type":{ - "shape":"JobType", - "documentation":"

Job type.

" } }, "documentation":"

Job.

" @@ -3013,11 +3060,15 @@ "type":"string", "max":24, "min":24, - "pattern":"^mgnjob-[0-9a-zA-Z]{17}$" + "pattern":"mgnjob-[0-9a-zA-Z]{17}" }, "JobLog":{ "type":"structure", "members":{ + "logDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Job log event date and time.

" + }, "event":{ "shape":"JobLogEvent", "documentation":"

Job log event.

" @@ -3025,10 +3076,6 @@ "eventData":{ "shape":"JobLogEventData", "documentation":"

Job event data

" - }, - "logDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Job log event date and time.

" } }, "documentation":"

Job log.

" @@ -3057,21 +3104,29 @@ "JobLogEventData":{ "type":"structure", "members":{ + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Job Event Source Server ID.

" + }, "conversionServerID":{ "shape":"EC2InstanceID", "documentation":"

Job Event conversion Server ID.

" }, + "targetInstanceID":{ + "shape":"EC2InstanceID", + "documentation":"

Job Event Target instance ID.

" + }, "rawError":{ "shape":"LargeBoundedString", "documentation":"

Job error.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Job Event Source Server ID.

" + "attemptCount":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Retries for this operation.

" }, - "targetInstanceID":{ - "shape":"EC2InstanceID", - "documentation":"

Job Event Target instance ID.

" + "maxAttemptsCount":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

The maximum number of retries that will be attempted if this operation failed.

" } }, "documentation":"

Job log data

" @@ -3083,6 +3138,14 @@ "JobPostLaunchActionsLaunchStatus":{ "type":"structure", "members":{ + "ssmDocument":{ + "shape":"SsmDocument", + "documentation":"

AWS Systems Manager's Document of the of the Job Post Launch Actions.

" + }, + "ssmDocumentType":{ + "shape":"SsmDocumentType", + "documentation":"

AWS Systems Manager Document type.

" + }, "executionID":{ "shape":"BoundedString", "documentation":"

AWS Systems Manager Document's execution ID of the of the Job Post Launch Actions.

" @@ -3094,14 +3157,6 @@ "failureReason":{ "shape":"BoundedString", "documentation":"

AWS Systems Manager Document's failure reason.

" - }, - "ssmDocument":{ - "shape":"SsmDocument", - "documentation":"

AWS Systems Manager's Document of the of the Job Post Launch Actions.

" - }, - "ssmDocumentType":{ - "shape":"SsmDocumentType", - "documentation":"

AWS Systems Manager Document type.

" } }, "documentation":"

Launch Status of the Job Post Launch Actions.

" @@ -3125,6 +3180,12 @@ "type":"list", "member":{"shape":"Job"} }, + "KmsKeyArn":{ + "type":"string", + "max":276, + "min":0, + "pattern":"((arn:[\\w-]+:kms:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:((alias|key)/[a-zA-Z0-9:/_-]{1,256}))|())" + }, "LargeBoundedString":{ "type":"string", "max":65536, @@ -3133,50 +3194,50 @@ "LaunchConfiguration":{ "type":"structure", "members":{ - "bootMode":{ - "shape":"BootMode", - "documentation":"

Launch configuration boot mode.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Launch configuration Source Server ID.

" }, - "copyPrivateIp":{ - "shape":"Boolean", - "documentation":"

Copy Private IP during Launch Configuration.

" - }, - "copyTags":{ - "shape":"Boolean", - "documentation":"

Copy Tags during Launch Configuration.

" + "name":{ + "shape":"SmallBoundedString", + "documentation":"

Launch configuration name.

" }, "ec2LaunchTemplateID":{ "shape":"BoundedString", "documentation":"

Launch configuration EC2 Launch template ID.

" }, - "enableMapAutoTagging":{ - "shape":"Boolean", - "documentation":"

Enable map auto tagging.

" - }, "launchDisposition":{ "shape":"LaunchDisposition", "documentation":"

Launch disposition for launch configuration.

" }, + "targetInstanceTypeRightSizingMethod":{ + "shape":"TargetInstanceTypeRightSizingMethod", + "documentation":"

Launch configuration Target instance type right sizing method.

" + }, + "copyPrivateIp":{ + "shape":"Boolean", + "documentation":"

Copy Private IP during Launch Configuration.

" + }, + "copyTags":{ + "shape":"Boolean", + "documentation":"

Copy Tags during Launch Configuration.

" + }, "licensing":{ "shape":"Licensing", "documentation":"

Launch configuration OS licensing.

" }, - "mapAutoTaggingMpeID":{ - "shape":"TagValue", - "documentation":"

Map auto tagging MPE ID.

" - }, - "name":{ - "shape":"SmallBoundedString", - "documentation":"

Launch configuration name.

" + "bootMode":{ + "shape":"BootMode", + "documentation":"

Launch configuration boot mode.

" }, "postLaunchActions":{"shape":"PostLaunchActions"}, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Launch configuration Source Server ID.

" + "enableMapAutoTagging":{ + "shape":"Boolean", + "documentation":"

Enable map auto tagging.

" }, - "targetInstanceTypeRightSizingMethod":{ - "shape":"TargetInstanceTypeRightSizingMethod", - "documentation":"

Launch configuration Target instance type right sizing method.

" + "mapAutoTaggingMpeID":{ + "shape":"TagValue", + "documentation":"

Map auto tagging MPE ID.

" } } }, @@ -3184,70 +3245,78 @@ "type":"structure", "required":["launchConfigurationTemplateID"], "members":{ + "launchConfigurationTemplateID":{ + "shape":"LaunchConfigurationTemplateID", + "documentation":"

ID of the Launch Configuration Template.

" + }, "arn":{ "shape":"ARN", "documentation":"

ARN of the Launch Configuration Template.

" }, - "associatePublicIpAddress":{ - "shape":"Boolean", - "documentation":"

Associate public Ip address.

" - }, - "bootMode":{ - "shape":"BootMode", - "documentation":"

Launch configuration template boot mode.

" + "postLaunchActions":{ + "shape":"PostLaunchActions", + "documentation":"

Post Launch Actions of the Launch Configuration Template.

" }, - "copyPrivateIp":{ + "enableMapAutoTagging":{ "shape":"Boolean", - "documentation":"

Copy private Ip.

" + "documentation":"

Enable map auto tagging.

" }, - "copyTags":{ - "shape":"Boolean", - "documentation":"

Copy tags.

" + "mapAutoTaggingMpeID":{ + "shape":"TagValue", + "documentation":"

Launch configuration template map auto tagging MPE ID.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Tags of the Launch Configuration Template.

" }, "ec2LaunchTemplateID":{ "shape":"EC2LaunchConfigurationTemplateID", "documentation":"

EC2 launch template ID.

" }, - "enableMapAutoTagging":{ - "shape":"Boolean", - "documentation":"

Enable map auto tagging.

" - }, - "largeVolumeConf":{ - "shape":"LaunchTemplateDiskConf", - "documentation":"

Large volume config.

" - }, - "launchConfigurationTemplateID":{ - "shape":"LaunchConfigurationTemplateID", - "documentation":"

ID of the Launch Configuration Template.

" - }, "launchDisposition":{ "shape":"LaunchDisposition", "documentation":"

Launch disposition.

" }, + "targetInstanceTypeRightSizingMethod":{ + "shape":"TargetInstanceTypeRightSizingMethod", + "documentation":"

Target instance type right-sizing method.

" + }, + "copyPrivateIp":{ + "shape":"Boolean", + "documentation":"

Copy private Ip.

" + }, + "associatePublicIpAddress":{ + "shape":"Boolean", + "documentation":"

Associate public Ip address.

" + }, + "copyTags":{ + "shape":"Boolean", + "documentation":"

Copy tags.

" + }, "licensing":{"shape":"Licensing"}, - "mapAutoTaggingMpeID":{ - "shape":"TagValue", - "documentation":"

Launch configuration template map auto tagging MPE ID.

" + "bootMode":{ + "shape":"BootMode", + "documentation":"

Launch configuration template boot mode.

" }, - "postLaunchActions":{ - "shape":"PostLaunchActions", - "documentation":"

Post Launch Actions of the Launch Configuration Template.

" + "smallVolumeMaxSize":{ + "shape":"PositiveInteger", + "documentation":"

Small volume maximum size.

" }, "smallVolumeConf":{ "shape":"LaunchTemplateDiskConf", "documentation":"

Small volume config.

" }, - "smallVolumeMaxSize":{ - "shape":"PositiveInteger", - "documentation":"

Small volume maximum size.

" + "largeVolumeConf":{ + "shape":"LaunchTemplateDiskConf", + "documentation":"

Large volume config.

" }, - "tags":{ - "shape":"TagsMap", - "documentation":"

Tags of the Launch Configuration Template.

" + "enableParametersEncryption":{ + "shape":"Boolean", + "documentation":"

Enable parameters encryption.

" }, - "targetInstanceTypeRightSizingMethod":{ - "shape":"TargetInstanceTypeRightSizingMethod", - "documentation":"

Target instance type right-sizing method.

" + "parametersEncryptionKey":{ + "shape":"ARN", + "documentation":"

Parameters encryption key.

" } } }, @@ -3255,7 +3324,7 @@ "type":"string", "max":21, "min":21, - "pattern":"^lct-[0-9a-zA-Z]{17}$" + "pattern":"lct-[0-9a-zA-Z]{17}" }, "LaunchConfigurationTemplateIDs":{ "type":"list", @@ -3289,6 +3358,10 @@ "LaunchTemplateDiskConf":{ "type":"structure", "members":{ + "volumeType":{ + "shape":"VolumeType", + "documentation":"

Launch template disk volume type configuration.

" + }, "iops":{ "shape":"Iops", "documentation":"

Launch template disk iops configuration.

" @@ -3296,10 +3369,6 @@ "throughput":{ "shape":"Throughput", "documentation":"

Launch template disk throughput configuration.

" - }, - "volumeType":{ - "shape":"VolumeType", - "documentation":"

Launch template disk volume type configuration.

" } }, "documentation":"

Launch template disk configuration.

" @@ -3311,13 +3380,13 @@ "shape":"EC2InstanceID", "documentation":"

Launched instance EC2 ID.

" }, - "firstBoot":{ - "shape":"FirstBoot", - "documentation":"

Launched instance first boot.

" - }, "jobID":{ "shape":"JobID", "documentation":"

Launched instance Job ID.

" + }, + "firstBoot":{ + "shape":"FirstBoot", + "documentation":"

Launched instance first boot.

" } }, "documentation":"

Launched instance.

" @@ -3339,17 +3408,13 @@ "shape":"ISO8601DatetimeString", "documentation":"

Lifecycle added to service data and time.

" }, - "elapsedReplicationDuration":{ - "shape":"ISO8601DurationString", - "documentation":"

Lifecycle elapsed time and duration.

" - }, "firstByteDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Lifecycle replication initiation date and time.

" }, - "lastCutover":{ - "shape":"LifeCycleLastCutover", - "documentation":"

Lifecycle last Cutover.

" + "elapsedReplicationDuration":{ + "shape":"ISO8601DurationString", + "documentation":"

Lifecycle elapsed time and duration.

" }, "lastSeenByServiceDateTime":{ "shape":"ISO8601DatetimeString", @@ -3359,6 +3424,10 @@ "shape":"LifeCycleLastTest", "documentation":"

Lifecycle last Test.

" }, + "lastCutover":{ + "shape":"LifeCycleLastCutover", + "documentation":"

Lifecycle last Cutover.

" + }, "state":{ "shape":"LifeCycleState", "documentation":"

Lifecycle state.

" @@ -3369,10 +3438,6 @@ "LifeCycleLastCutover":{ "type":"structure", "members":{ - "finalized":{ - "shape":"LifeCycleLastCutoverFinalized", - "documentation":"

Lifecycle Cutover finalized date and time.

" - }, "initiated":{ "shape":"LifeCycleLastCutoverInitiated", "documentation":"

Lifecycle last Cutover initiated.

" @@ -3380,6 +3445,10 @@ "reverted":{ "shape":"LifeCycleLastCutoverReverted", "documentation":"

Lifecycle last Cutover reverted.

" + }, + "finalized":{ + "shape":"LifeCycleLastCutoverFinalized", + "documentation":"

Lifecycle Cutover finalized date and time.

" } }, "documentation":"

Lifecycle last Cutover .

" @@ -3421,10 +3490,6 @@ "LifeCycleLastTest":{ "type":"structure", "members":{ - "finalized":{ - "shape":"LifeCycleLastTestFinalized", - "documentation":"

Lifecycle last Test finalized.

" - }, "initiated":{ "shape":"LifeCycleLastTestInitiated", "documentation":"

Lifecycle last Test initiated.

" @@ -3432,6 +3497,10 @@ "reverted":{ "shape":"LifeCycleLastTestReverted", "documentation":"

Lifecycle last Test reverted.

" + }, + "finalized":{ + "shape":"LifeCycleLastTestFinalized", + "documentation":"

Lifecycle last Test finalized.

" } }, "documentation":"

Lifecycle last Test.

" @@ -3494,10 +3563,6 @@ "ListApplicationsRequest":{ "type":"structure", "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Applications list Account ID.

" - }, "filters":{ "shape":"ListApplicationsRequestFilters", "documentation":"

Applications list filters.

" @@ -3509,6 +3574,10 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request next token.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Applications list Account ID.

" } } }, @@ -3769,9 +3838,9 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID to return when listing source server post migration custom actions.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Source server ID.

" }, "filters":{ "shape":"SourceServerActionsRequestFilters", @@ -3785,9 +3854,9 @@ "shape":"PaginationToken", "documentation":"

Next token to use when listing source server post migration custom actions.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Source server ID.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID to return when listing source server post migration custom actions.

" } } }, @@ -3829,14 +3898,14 @@ "type":"structure", "required":["launchConfigurationTemplateID"], "members":{ - "filters":{ - "shape":"TemplateActionsRequestFilters", - "documentation":"

Filters to apply when listing template post migration custom actions.

" - }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

Launch configuration template ID.

" }, + "filters":{ + "shape":"TemplateActionsRequestFilters", + "documentation":"

Filters to apply when listing template post migration custom actions.

" + }, "maxResults":{ "shape":"MaxResultsType", "documentation":"

Maximum amount of items to return when listing template post migration custom actions.

" @@ -3863,10 +3932,6 @@ "ListWavesRequest":{ "type":"structure", "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Request account ID.

" - }, "filters":{ "shape":"ListWavesRequestFilters", "documentation":"

Waves list filters.

" @@ -3878,19 +3943,23 @@ "nextToken":{ "shape":"PaginationToken", "documentation":"

Request next token.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Request account ID.

" } } }, "ListWavesRequestFilters":{ "type":"structure", "members":{ - "isArchived":{ - "shape":"Boolean", - "documentation":"

Filter waves list by archival status.

" - }, "waveIDs":{ "shape":"WaveIDsFilter", "documentation":"

Filter waves list by wave ID.

" + }, + "isArchived":{ + "shape":"Boolean", + "documentation":"

Filter waves list by archival status.

" } }, "documentation":"

Waves list filters.

" @@ -3928,13 +3997,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Mark as archived by Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Mark as archived by Source Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Mark as archived by Account ID.

" } } }, @@ -3947,6 +4016,10 @@ "NetworkInterface":{ "type":"structure", "members":{ + "macAddress":{ + "shape":"BoundedString", + "documentation":"

Network interface Mac address.

" + }, "ips":{ "shape":"IPsList", "documentation":"

Network interface IPs.

" @@ -3954,10 +4027,6 @@ "isPrimary":{ "shape":"Boolean", "documentation":"

Network interface primary IP.

" - }, - "macAddress":{ - "shape":"BoundedString", - "documentation":"

Network interface Mac address.

" } }, "documentation":"

Network interface.

" @@ -3980,7 +4049,7 @@ }, "OperatingSystemString":{ "type":"string", - "pattern":"^(linux|windows)$" + "pattern":"(linux|windows)" }, "OrderType":{ "type":"integer", @@ -3997,6 +4066,10 @@ "type":"structure", "required":["sourceServerID"], "members":{ + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Participating server Source Server ID.

" + }, "launchStatus":{ "shape":"LaunchStatus", "documentation":"

Participating server launch status.

" @@ -4008,10 +4081,6 @@ "postLaunchActionsStatus":{ "shape":"PostLaunchActionsStatus", "documentation":"

Participating server's Post Launch Actions Status.

" - }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Participating server Source Server ID.

" } }, "documentation":"

Server participating in Job.

" @@ -4024,13 +4093,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Pause Replication Request account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Pause Replication Request source server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Pause Replication Request account ID.

" } } }, @@ -4049,10 +4118,6 @@ "PostLaunchActions":{ "type":"structure", "members":{ - "cloudWatchLogGroupName":{ - "shape":"CloudWatchLogGroupName", - "documentation":"

AWS Systems Manager Command's CloudWatch log group name.

" - }, "deployment":{ "shape":"PostLaunchActionsDeploymentType", "documentation":"

Deployment type in which AWS Systems Manager Documents will be executed.

" @@ -4065,9 +4130,13 @@ "shape":"BoundedString", "documentation":"

AWS Systems Manager Command's logs S3 output key prefix.

" }, - "ssmDocuments":{ - "shape":"SsmDocuments", - "documentation":"

AWS Systems Manager Documents.

" + "cloudWatchLogGroupName":{ + "shape":"CloudWatchLogGroupName", + "documentation":"

AWS Systems Manager Command's CloudWatch log group name.

" + }, + "ssmDocuments":{ + "shape":"SsmDocuments", + "documentation":"

AWS Systems Manager Documents.

" } }, "documentation":"

Post Launch Actions to executed on the Test or Cutover instance.

" @@ -4087,13 +4156,13 @@ "PostLaunchActionsStatus":{ "type":"structure", "members":{ - "postLaunchActionsLaunchStatusList":{ - "shape":"PostLaunchActionsLaunchStatusList", - "documentation":"

List of Post Launch Action status.

" - }, "ssmAgentDiscoveryDatetime":{ "shape":"ISO8601DatetimeString", "documentation":"

Time where the AWS Systems Manager was detected as running on the Test or Cutover instance.

" + }, + "postLaunchActionsLaunchStatusList":{ + "shape":"PostLaunchActionsLaunchStatusList", + "documentation":"

List of Post Launch Action status.

" } }, "documentation":"

Status of the Post Launch Actions running on the Test or Cutover instance.

" @@ -4101,210 +4170,224 @@ "PutSourceServerActionRequest":{ "type":"structure", "required":[ - "actionID", + "sourceServerID", "actionName", "documentIdentifier", "order", - "sourceServerID" + "actionID" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Source server post migration custom account ID.

" - }, - "actionID":{ - "shape":"ActionID", - "documentation":"

Source server post migration custom action ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Source server ID.

" }, "actionName":{ "shape":"ActionName", "documentation":"

Source server post migration custom action name.

" }, - "active":{ - "shape":"Boolean", - "documentation":"

Source server post migration custom action active status.

" - }, - "category":{ - "shape":"ActionCategory", - "documentation":"

Source server post migration custom action category.

" - }, - "description":{ - "shape":"ActionDescription", - "documentation":"

Source server post migration custom action description.

" - }, "documentIdentifier":{ "shape":"BoundedString", "documentation":"

Source server post migration custom action document identifier.

" }, + "order":{ + "shape":"OrderType", + "documentation":"

Source server post migration custom action order.

" + }, + "actionID":{ + "shape":"ActionID", + "documentation":"

Source server post migration custom action ID.

" + }, "documentVersion":{ "shape":"DocumentVersion", "documentation":"

Source server post migration custom action document version.

" }, - "externalParameters":{ - "shape":"SsmDocumentExternalParameters", - "documentation":"

Source server post migration custom action external parameters.

" + "active":{ + "shape":"Boolean", + "documentation":"

Source server post migration custom action active status.

" + }, + "timeoutSeconds":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Source server post migration custom action timeout in seconds.

" }, "mustSucceedForCutover":{ "shape":"Boolean", "documentation":"

Source server post migration custom action must succeed for cutover.

" }, - "order":{ - "shape":"OrderType", - "documentation":"

Source server post migration custom action order.

" - }, "parameters":{ "shape":"SsmDocumentParameters", "documentation":"

Source server post migration custom action parameters.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Source server ID.

" + "externalParameters":{ + "shape":"SsmDocumentExternalParameters", + "documentation":"

Source server post migration custom action external parameters.

" }, - "timeoutSeconds":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

Source server post migration custom action timeout in seconds.

" + "description":{ + "shape":"ActionDescription", + "documentation":"

Source server post migration custom action description.

" + }, + "category":{ + "shape":"ActionCategory", + "documentation":"

Source server post migration custom action category.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Source server post migration custom account ID.

" } } }, "PutTemplateActionRequest":{ "type":"structure", "required":[ - "actionID", + "launchConfigurationTemplateID", "actionName", "documentIdentifier", - "launchConfigurationTemplateID", - "order" + "order", + "actionID" ], "members":{ - "actionID":{ - "shape":"ActionID", - "documentation":"

Template post migration custom action ID.

" + "launchConfigurationTemplateID":{ + "shape":"LaunchConfigurationTemplateID", + "documentation":"

Launch configuration template ID.

" }, "actionName":{ "shape":"BoundedString", "documentation":"

Template post migration custom action name.

" }, - "active":{ - "shape":"Boolean", - "documentation":"

Template post migration custom action active status.

" - }, - "category":{ - "shape":"ActionCategory", - "documentation":"

Template post migration custom action category.

" - }, - "description":{ - "shape":"ActionDescription", - "documentation":"

Template post migration custom action description.

" - }, "documentIdentifier":{ "shape":"BoundedString", "documentation":"

Template post migration custom action document identifier.

" }, + "order":{ + "shape":"OrderType", + "documentation":"

Template post migration custom action order.

" + }, + "actionID":{ + "shape":"ActionID", + "documentation":"

Template post migration custom action ID.

" + }, "documentVersion":{ "shape":"DocumentVersion", "documentation":"

Template post migration custom action document version.

" }, - "externalParameters":{ - "shape":"SsmDocumentExternalParameters", - "documentation":"

Template post migration custom action external parameters.

" + "active":{ + "shape":"Boolean", + "documentation":"

Template post migration custom action active status.

" }, - "launchConfigurationTemplateID":{ - "shape":"LaunchConfigurationTemplateID", - "documentation":"

Launch configuration template ID.

" + "timeoutSeconds":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Template post migration custom action timeout in seconds.

" }, "mustSucceedForCutover":{ "shape":"Boolean", "documentation":"

Template post migration custom action must succeed for cutover.

" }, + "parameters":{ + "shape":"SsmDocumentParameters", + "documentation":"

Template post migration custom action parameters.

" + }, "operatingSystem":{ "shape":"OperatingSystemString", "documentation":"

Operating system eligible for this template post migration custom action.

" }, - "order":{ - "shape":"OrderType", - "documentation":"

Template post migration custom action order.

" + "externalParameters":{ + "shape":"SsmDocumentExternalParameters", + "documentation":"

Template post migration custom action external parameters.

" }, - "parameters":{ - "shape":"SsmDocumentParameters", - "documentation":"

Template post migration custom action parameters.

" + "description":{ + "shape":"ActionDescription", + "documentation":"

Template post migration custom action description.

" }, - "timeoutSeconds":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

Template post migration custom action timeout in seconds.

" + "category":{ + "shape":"ActionCategory", + "documentation":"

Template post migration custom action category.

" } } }, "RemoveSourceServerActionRequest":{ "type":"structure", "required":[ - "actionID", - "sourceServerID" + "sourceServerID", + "actionID" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Source server post migration account ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Source server ID of the post migration custom action to remove.

" }, "actionID":{ "shape":"ActionID", "documentation":"

Source server post migration custom action ID to remove.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Source server ID of the post migration custom action to remove.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Source server post migration account ID.

" } } }, "RemoveSourceServerActionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "RemoveTemplateActionRequest":{ "type":"structure", "required":[ - "actionID", - "launchConfigurationTemplateID" + "launchConfigurationTemplateID", + "actionID" ], "members":{ - "actionID":{ - "shape":"ActionID", - "documentation":"

Template post migration custom action ID to remove.

" - }, "launchConfigurationTemplateID":{ "shape":"LaunchConfigurationTemplateID", "documentation":"

Launch configuration template ID of the post migration custom action to remove.

" + }, + "actionID":{ + "shape":"ActionID", + "documentation":"

Template post migration custom action ID to remove.

" } } }, "RemoveTemplateActionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "ReplicationConfiguration":{ "type":"structure", "members":{ + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Replication Configuration Source Server ID.

" + }, + "name":{ + "shape":"SmallBoundedString", + "documentation":"

Replication Configuration name.

" + }, + "stagingAreaSubnetId":{ + "shape":"SubnetID", + "documentation":"

Replication Configuration Staging Area subnet ID.

" + }, "associateDefaultSecurityGroup":{ "shape":"Boolean", "documentation":"

Replication Configuration associate default Application Migration Service Security Group.

" }, - "bandwidthThrottling":{ - "shape":"BandwidthThrottling", - "documentation":"

Replication Configuration set bandwidth throttling.

" + "replicationServersSecurityGroupsIDs":{ + "shape":"ReplicationServersSecurityGroupsIDs", + "documentation":"

Replication Configuration Replication Server Security Group IDs.

" }, - "createPublicIP":{ - "shape":"Boolean", - "documentation":"

Replication Configuration create Public IP.

" + "replicationServerInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Replication Configuration Replication Server instance type.

" }, - "dataPlaneRouting":{ - "shape":"ReplicationConfigurationDataPlaneRouting", - "documentation":"

Replication Configuration data plane routing.

" + "useDedicatedReplicationServer":{ + "shape":"Boolean", + "documentation":"

Replication Configuration use Dedicated Replication Server.

" }, "defaultLargeStagingDiskType":{ "shape":"ReplicationConfigurationDefaultLargeStagingDiskType", "documentation":"

Replication Configuration use default large Staging Disks.

" }, + "replicatedDisks":{ + "shape":"ReplicationConfigurationReplicatedDisks", + "documentation":"

Replication Configuration replicated disks.

" + }, "ebsEncryption":{ "shape":"ReplicationConfigurationEbsEncryption", "documentation":"

Replication Configuration EBS encryption.

" @@ -4313,41 +4396,29 @@ "shape":"ARN", "documentation":"

Replication Configuration EBS encryption key ARN.

" }, - "name":{ - "shape":"SmallBoundedString", - "documentation":"

Replication Configuration name.

" - }, - "replicatedDisks":{ - "shape":"ReplicationConfigurationReplicatedDisks", - "documentation":"

Replication Configuration replicated disks.

" - }, - "replicationServerInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Replication Configuration Replication Server instance type.

" - }, - "replicationServersSecurityGroupsIDs":{ - "shape":"ReplicationServersSecurityGroupsIDs", - "documentation":"

Replication Configuration Replication Server Security Group IDs.

" + "bandwidthThrottling":{ + "shape":"BandwidthThrottling", + "documentation":"

Replication Configuration set bandwidth throttling.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Replication Configuration Source Server ID.

" + "dataPlaneRouting":{ + "shape":"ReplicationConfigurationDataPlaneRouting", + "documentation":"

Replication Configuration data plane routing.

" }, - "stagingAreaSubnetId":{ - "shape":"SubnetID", - "documentation":"

Replication Configuration Staging Area subnet ID.

" + "createPublicIP":{ + "shape":"Boolean", + "documentation":"

Replication Configuration create Public IP.

" }, "stagingAreaTags":{ "shape":"TagsMap", "documentation":"

Replication Configuration Staging Area tags.

" }, - "useDedicatedReplicationServer":{ - "shape":"Boolean", - "documentation":"

Replication Configuration use Dedicated Replication Server.

" - }, "useFipsEndpoint":{ "shape":"Boolean", "documentation":"

Replication Configuration use Fips Endpoint.

" + }, + "internetProtocol":{ + "shape":"InternetProtocol", + "documentation":"

Replication Configuration internet protocol.

" } } }, @@ -4380,10 +4451,6 @@ "shape":"BoundedString", "documentation":"

Replication Configuration replicated disk device name.

" }, - "iops":{ - "shape":"PositiveInteger", - "documentation":"

Replication Configuration replicated disk IOPs.

" - }, "isBootDisk":{ "shape":"Boolean", "documentation":"

Replication Configuration replicated disk boot disk.

" @@ -4392,6 +4459,10 @@ "shape":"ReplicationConfigurationReplicatedDiskStagingDiskType", "documentation":"

Replication Configuration replicated disk staging disk type.

" }, + "iops":{ + "shape":"PositiveInteger", + "documentation":"

Replication Configuration replicated disk IOPs.

" + }, "throughput":{ "shape":"PositiveInteger", "documentation":"

Replication Configuration replicated disk throughput.

" @@ -4422,25 +4493,33 @@ "type":"structure", "required":["replicationConfigurationTemplateID"], "members":{ + "replicationConfigurationTemplateID":{ + "shape":"ReplicationConfigurationTemplateID", + "documentation":"

Replication Configuration template ID.

" + }, "arn":{ "shape":"ARN", "documentation":"

Replication Configuration template ARN.

" }, + "stagingAreaSubnetId":{ + "shape":"SubnetID", + "documentation":"

Replication Configuration template Staging Area subnet ID.

" + }, "associateDefaultSecurityGroup":{ "shape":"Boolean", "documentation":"

Replication Configuration template associate default Application Migration Service Security group.

" }, - "bandwidthThrottling":{ - "shape":"BandwidthThrottling", - "documentation":"

Replication Configuration template bandwidth throttling.

" - }, - "createPublicIP":{ - "shape":"Boolean", - "documentation":"

Replication Configuration template create Public IP.

" + "replicationServersSecurityGroupsIDs":{ + "shape":"ReplicationServersSecurityGroupsIDs", + "documentation":"

Replication Configuration template server Security Groups IDs.

" }, - "dataPlaneRouting":{ - "shape":"ReplicationConfigurationDataPlaneRouting", - "documentation":"

Replication Configuration template data plane routing.

" + "replicationServerInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Replication Configuration template server instance type.

" + }, + "useDedicatedReplicationServer":{ + "shape":"Boolean", + "documentation":"

Replication Configuration template use Dedicated Replication Server.

" }, "defaultLargeStagingDiskType":{ "shape":"ReplicationConfigurationDefaultLargeStagingDiskType", @@ -4454,37 +4533,33 @@ "shape":"ARN", "documentation":"

Replication Configuration template EBS encryption key ARN.

" }, - "replicationConfigurationTemplateID":{ - "shape":"ReplicationConfigurationTemplateID", - "documentation":"

Replication Configuration template ID.

" - }, - "replicationServerInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Replication Configuration template server instance type.

" + "bandwidthThrottling":{ + "shape":"BandwidthThrottling", + "documentation":"

Replication Configuration template bandwidth throttling.

" }, - "replicationServersSecurityGroupsIDs":{ - "shape":"ReplicationServersSecurityGroupsIDs", - "documentation":"

Replication Configuration template server Security Groups IDs.

" + "dataPlaneRouting":{ + "shape":"ReplicationConfigurationDataPlaneRouting", + "documentation":"

Replication Configuration template data plane routing.

" }, - "stagingAreaSubnetId":{ - "shape":"SubnetID", - "documentation":"

Replication Configuration template Staging Area subnet ID.

" + "createPublicIP":{ + "shape":"Boolean", + "documentation":"

Replication Configuration template create Public IP.

" }, "stagingAreaTags":{ "shape":"TagsMap", "documentation":"

Replication Configuration template Staging Area Tags.

" }, + "useFipsEndpoint":{ + "shape":"Boolean", + "documentation":"

Replication Configuration template use Fips Endpoint.

" + }, "tags":{ "shape":"TagsMap", "documentation":"

Replication Configuration template Tags.

" }, - "useDedicatedReplicationServer":{ - "shape":"Boolean", - "documentation":"

Replication Configuration template use Dedicated Replication Server.

" - }, - "useFipsEndpoint":{ - "shape":"Boolean", - "documentation":"

Replication Configuration template use Fips Endpoint.

" + "internetProtocol":{ + "shape":"InternetProtocol", + "documentation":"

Replication Configuration template internet protocol.

" } } }, @@ -4492,7 +4567,7 @@ "type":"string", "max":21, "min":21, - "pattern":"^rct-[0-9a-zA-Z]{17}$" + "pattern":"rct-[0-9a-zA-Z]{17}" }, "ReplicationConfigurationTemplateIDs":{ "type":"list", @@ -4523,11 +4598,17 @@ "max":2, "min":0 }, + "ReplicatorID":{ + "type":"string", + "max":19, + "min":19, + "pattern":"i-[0-9a-zA-Z]{17}" + }, "ResourceNotFoundException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, "message":{"shape":"LargeBoundedString"}, + "code":{"shape":"LargeBoundedString"}, "resourceId":{ "shape":"LargeBoundedString", "documentation":"

Resource ID not found error.

" @@ -4548,13 +4629,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Resume Replication Request account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Resume Replication Request source server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Resume Replication Request account ID.

" } } }, @@ -4562,19 +4643,19 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Retry data replication for Account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Retry data replication for Source Server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Retry data replication for Account ID.

" } } }, "S3BucketName":{ "type":"string", - "pattern":"^[a-zA-Z0-9.\\-_]{1,255}$" + "pattern":"[a-zA-Z0-9.\\-_]{1,255}" }, "S3BucketSource":{ "type":"structure", @@ -4587,20 +4668,20 @@ "shape":"S3BucketName", "documentation":"

S3 bucket source s3 bucket.

" }, - "s3BucketOwner":{ - "shape":"AccountID", - "documentation":"

S3 bucket source s3 bucket owner.

" - }, "s3Key":{ "shape":"S3Key", "documentation":"

S3 bucket source s3 key.

" + }, + "s3BucketOwner":{ + "shape":"AccountID", + "documentation":"

S3 bucket source s3 bucket owner.

" } }, "documentation":"

S3 bucket source.

" }, "S3Key":{ "type":"string", - "pattern":"^[^\\x00]{1,1020}\\.csv$" + "pattern":"[^\\x00]{1,1020}\\.csv" }, "S3LogBucketName":{ "type":"string", @@ -4611,27 +4692,19 @@ "type":"string", "max":100, "min":20, - "pattern":"^arn:[\\w-]+:secretsmanager:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:secret:(.+)$" + "pattern":"arn:[\\w-]+:secretsmanager:([a-z]{2}-(gov-)?[a-z]+-\\d{1})?:(\\d{12})?:secret:(.+)" }, "SecurityGroupID":{ "type":"string", "max":255, "min":0, - "pattern":"^sg-[0-9a-fA-F]{8,}$" + "pattern":"sg-[0-9a-fA-F]{8,}" }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, "message":{"shape":"LargeBoundedString"}, - "quotaCode":{ - "shape":"LargeBoundedString", - "documentation":"

Exceeded the service quota code.

" - }, - "quotaValue":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

Exceeded the service quota value.

" - }, + "code":{"shape":"LargeBoundedString"}, "resourceId":{ "shape":"LargeBoundedString", "documentation":"

Exceeded the service quota resource ID.

" @@ -4643,6 +4716,14 @@ "serviceCode":{ "shape":"LargeBoundedString", "documentation":"

Exceeded the service quota service code.

" + }, + "quotaCode":{ + "shape":"LargeBoundedString", + "documentation":"

Exceeded the service quota code.

" + }, + "quotaValue":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Exceeded the service quota value.

" } }, "documentation":"

The request could not be completed because its exceeded the service quota.

", @@ -4660,37 +4741,37 @@ "SourceProperties":{ "type":"structure", "members":{ - "cpus":{ - "shape":"Cpus", - "documentation":"

Source Server CPUs.

" + "lastUpdatedDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Source server last update date and time.

" }, - "disks":{ - "shape":"Disks", - "documentation":"

Source Server disks.

" + "recommendedInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Source server recommended instance type.

" }, "identificationHints":{ "shape":"IdentificationHints", "documentation":"

Source server identification hints.

" }, - "lastUpdatedDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Source server last update date and time.

" - }, "networkInterfaces":{ "shape":"NetworkInterfaces", "documentation":"

Source server network interfaces.

" }, - "os":{ - "shape":"OS", - "documentation":"

Source server OS.

" + "disks":{ + "shape":"Disks", + "documentation":"

Source Server disks.

" + }, + "cpus":{ + "shape":"Cpus", + "documentation":"

Source Server CPUs.

" }, "ramBytes":{ "shape":"PositiveInteger", "documentation":"

Source server RAM in bytes.

" }, - "recommendedInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Source server recommended instance type.

" + "os":{ + "shape":"OS", + "documentation":"

Source server OS.

" } }, "documentation":"

Source server properties.

" @@ -4698,61 +4779,61 @@ "SourceServer":{ "type":"structure", "members":{ - "applicationID":{ - "shape":"ApplicationID", - "documentation":"

Source server application ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Source server ID.

" }, "arn":{ "shape":"ARN", "documentation":"

Source server ARN.

" }, - "connectorAction":{ - "shape":"SourceServerConnectorAction", - "documentation":"

Source Server connector action.

" - }, - "dataReplicationInfo":{ - "shape":"DataReplicationInfo", - "documentation":"

Source server data replication info.

" - }, - "fqdnForActionFramework":{ - "shape":"BoundedString", - "documentation":"

Source server fqdn for action framework.

" - }, "isArchived":{ "shape":"Boolean", "documentation":"

Source server archived status.

" }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Source server Tags.

" + }, "launchedInstance":{ "shape":"LaunchedInstance", "documentation":"

Source server launched instance.

" }, + "dataReplicationInfo":{ + "shape":"DataReplicationInfo", + "documentation":"

Source server data replication info.

" + }, "lifeCycle":{ "shape":"LifeCycle", "documentation":"

Source server lifecycle state.

" }, - "replicationType":{ - "shape":"ReplicationType", - "documentation":"

Source server replication type.

" - }, "sourceProperties":{ "shape":"SourceProperties", "documentation":"

Source server properties.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Source server ID.

" + "replicationType":{ + "shape":"ReplicationType", + "documentation":"

Source server replication type.

" }, - "tags":{ - "shape":"TagsMap", - "documentation":"

Source server Tags.

" + "vcenterClientID":{ + "shape":"VcenterClientID", + "documentation":"

Source server vCenter client id.

" + }, + "applicationID":{ + "shape":"ApplicationID", + "documentation":"

Source server application ID.

" }, "userProvidedID":{ - "shape":"BoundedString", + "shape":"UserProvidedId", "documentation":"

Source server user provided ID.

" }, - "vcenterClientID":{ - "shape":"VcenterClientID", - "documentation":"

Source server vCenter client id.

" + "fqdnForActionFramework":{ + "shape":"BoundedString", + "documentation":"

Source server fqdn for action framework.

" + }, + "connectorAction":{ + "shape":"SourceServerConnectorAction", + "documentation":"

Source Server connector action.

" } } }, @@ -4767,45 +4848,45 @@ "shape":"ActionName", "documentation":"

Source server post migration custom action name.

" }, - "active":{ - "shape":"Boolean", - "documentation":"

Source server post migration custom action active status.

" - }, - "category":{ - "shape":"ActionCategory", - "documentation":"

Source server post migration custom action category.

" - }, - "description":{ - "shape":"ActionDescription", - "documentation":"

Source server post migration custom action description.

" - }, "documentIdentifier":{ "shape":"BoundedString", "documentation":"

Source server post migration custom action document identifier.

" }, + "order":{ + "shape":"OrderType", + "documentation":"

Source server post migration custom action order.

" + }, "documentVersion":{ "shape":"DocumentVersion", "documentation":"

Source server post migration custom action document version.

" }, - "externalParameters":{ - "shape":"SsmDocumentExternalParameters", - "documentation":"

Source server post migration custom action external parameters.

" + "active":{ + "shape":"Boolean", + "documentation":"

Source server post migration custom action active status.

" + }, + "timeoutSeconds":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Source server post migration custom action timeout in seconds.

" }, "mustSucceedForCutover":{ "shape":"Boolean", "documentation":"

Source server post migration custom action must succeed for cutover.

" }, - "order":{ - "shape":"OrderType", - "documentation":"

Source server post migration custom action order.

" - }, "parameters":{ "shape":"SsmDocumentParameters", "documentation":"

Source server post migration custom action parameters.

" }, - "timeoutSeconds":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

Source server post migration custom action timeout in seconds.

" + "externalParameters":{ + "shape":"SsmDocumentExternalParameters", + "documentation":"

Source server post migration custom action external parameters.

" + }, + "description":{ + "shape":"ActionDescription", + "documentation":"

Source server post migration custom action description.

" + }, + "category":{ + "shape":"ActionCategory", + "documentation":"

Source server post migration custom action category.

" } } }, @@ -4828,13 +4909,13 @@ "SourceServerConnectorAction":{ "type":"structure", "members":{ - "connectorArn":{ - "shape":"ConnectorArn", - "documentation":"

Source Server connector action connector arn.

" - }, "credentialsSecretArn":{ "shape":"SecretArn", "documentation":"

Source Server connector action credentials secret arn.

" + }, + "connectorArn":{ + "shape":"ConnectorArn", + "documentation":"

Source Server connector action connector arn.

" } }, "documentation":"

Source Server connector action.

" @@ -4843,7 +4924,7 @@ "type":"string", "max":19, "min":19, - "pattern":"^s-[0-9a-zA-Z]{17}$" + "pattern":"s-[0-9a-zA-Z]{17}" }, "SourceServersList":{ "type":"list", @@ -4860,9 +4941,13 @@ "shape":"BoundedString", "documentation":"

User-friendly name for the AWS Systems Manager Document.

" }, - "externalParameters":{ - "shape":"SsmDocumentExternalParameters", - "documentation":"

AWS Systems Manager Document external parameters.

" + "ssmDocumentName":{ + "shape":"SsmDocumentName", + "documentation":"

AWS Systems Manager Document name or full ARN.

" + }, + "timeoutSeconds":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

AWS Systems Manager Document timeout seconds.

" }, "mustSucceedForCutover":{ "shape":"Boolean", @@ -4872,14 +4957,10 @@ "shape":"SsmDocumentParameters", "documentation":"

AWS Systems Manager Document parameters.

" }, - "ssmDocumentName":{ - "shape":"SsmDocumentName", - "documentation":"

AWS Systems Manager Document name or full ARN.

" - }, - "timeoutSeconds":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

AWS Systems Manager Document timeout seconds.

" - } + "externalParameters":{ + "shape":"SsmDocumentExternalParameters", + "documentation":"

AWS Systems Manager Document external parameters.

" + } }, "documentation":"

AWS Systems Manager Document.

" }, @@ -4894,13 +4975,13 @@ "type":"string", "max":172, "min":3, - "pattern":"^([A-Za-z0-9/:_\\.-])+$" + "pattern":"([A-Za-z0-9/:_\\.-])+" }, "SsmDocumentParameterName":{ "type":"string", "max":1011, "min":1, - "pattern":"^([A-Za-z0-9])+$" + "pattern":"([A-Za-z0-9])+" }, "SsmDocumentParameters":{ "type":"map", @@ -4937,22 +5018,22 @@ "type":"string", "max":20, "min":19, - "pattern":"(^i-[0-9a-zA-Z]{17}$)|(^mi-[0-9a-zA-Z]{17}$)" + "pattern":".*(^i-[0-9a-zA-Z]{17}$)|(^mi-[0-9a-zA-Z]{17}$).*" }, "SsmParameterStoreParameter":{ "type":"structure", "required":[ - "parameterName", - "parameterType" + "parameterType", + "parameterName" ], "members":{ - "parameterName":{ - "shape":"SsmParameterStoreParameterName", - "documentation":"

AWS Systems Manager Parameter Store parameter name.

" - }, "parameterType":{ "shape":"SsmParameterStoreParameterType", "documentation":"

AWS Systems Manager Parameter Store parameter type.

" + }, + "parameterName":{ + "shape":"SsmParameterStoreParameterName", + "documentation":"

AWS Systems Manager Parameter Store parameter name.

" } }, "documentation":"

AWS Systems Manager Parameter Store parameter.

" @@ -4961,11 +5042,14 @@ "type":"string", "max":1011, "min":1, - "pattern":"^([A-Za-z0-9_\\.-])+$" + "pattern":"([A-Za-z0-9_\\.-])+" }, "SsmParameterStoreParameterType":{ "type":"string", - "enum":["STRING"] + "enum":[ + "STRING", + "SECURE_STRING" + ] }, "SsmParameterStoreParameters":{ "type":"list", @@ -4977,10 +5061,6 @@ "type":"structure", "required":["sourceServerIDs"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Start Cutover by Account IDs

" - }, "sourceServerIDs":{ "shape":"StartCutoverRequestSourceServerIDs", "documentation":"

Start Cutover by Source Server IDs.

" @@ -4988,6 +5068,10 @@ "tags":{ "shape":"TagsMap", "documentation":"

Start Cutover by Tags.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Start Cutover by Account IDs

" } } }, @@ -5017,13 +5101,17 @@ "shape":"S3BucketName", "documentation":"

Start export request s3 bucket.

" }, + "s3Key":{ + "shape":"S3Key", + "documentation":"

Start export request s3key.

" + }, "s3BucketOwner":{ "shape":"AccountID", "documentation":"

Start export request s3 bucket owner.

" }, - "s3Key":{ - "shape":"S3Key", - "documentation":"

Start export request s3key.

" + "tags":{ + "shape":"TagsMap", + "documentation":"

Start import request tags.

" } }, "documentation":"

Start export request.

" @@ -5050,6 +5138,10 @@ "s3BucketSource":{ "shape":"S3BucketSource", "documentation":"

Start import request s3 bucket source.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

Start import request tags.

" } }, "documentation":"

Start import request.

" @@ -5068,13 +5160,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID on which to start replication.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

ID of source server on which to start replication.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID on which to start replication.

" } } }, @@ -5082,10 +5174,6 @@ "type":"structure", "required":["sourceServerIDs"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Start Test for Account ID.

" - }, "sourceServerIDs":{ "shape":"StartTestRequestSourceServerIDs", "documentation":"

Start Test for Source Server IDs.

" @@ -5093,6 +5181,10 @@ "tags":{ "shape":"TagsMap", "documentation":"

Start Test by Tags.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Start Test for Account ID.

" } } }, @@ -5115,13 +5207,13 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Stop Replication Request account ID.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Stop Replication Request source server ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Stop Replication Request account ID.

" } } }, @@ -5134,7 +5226,7 @@ "type":"string", "max":255, "min":0, - "pattern":"^subnet-[0-9a-fA-F]{8,}$" + "pattern":"subnet-[0-9a-fA-F]{8,}" }, "TagKey":{ "type":"string", @@ -5196,49 +5288,49 @@ "shape":"BoundedString", "documentation":"

Template post migration custom action name.

" }, - "active":{ - "shape":"Boolean", - "documentation":"

Template post migration custom action active status.

" - }, - "category":{ - "shape":"ActionCategory", - "documentation":"

Template post migration custom action category.

" - }, - "description":{ - "shape":"ActionDescription", - "documentation":"

Template post migration custom action description.

" - }, "documentIdentifier":{ "shape":"BoundedString", "documentation":"

Template post migration custom action document identifier.

" }, + "order":{ + "shape":"OrderType", + "documentation":"

Template post migration custom action order.

" + }, "documentVersion":{ "shape":"DocumentVersion", "documentation":"

Template post migration custom action document version.

" }, - "externalParameters":{ - "shape":"SsmDocumentExternalParameters", - "documentation":"

Template post migration custom action external parameters.

" + "active":{ + "shape":"Boolean", + "documentation":"

Template post migration custom action active status.

" + }, + "timeoutSeconds":{ + "shape":"StrictlyPositiveInteger", + "documentation":"

Template post migration custom action timeout in seconds.

" }, "mustSucceedForCutover":{ "shape":"Boolean", "documentation":"

Template post migration custom action must succeed for cutover.

" }, + "parameters":{ + "shape":"SsmDocumentParameters", + "documentation":"

Template post migration custom action parameters.

" + }, "operatingSystem":{ "shape":"OperatingSystemString", "documentation":"

Operating system eligible for this template post migration custom action.

" }, - "order":{ - "shape":"OrderType", - "documentation":"

Template post migration custom action order.

" + "externalParameters":{ + "shape":"SsmDocumentExternalParameters", + "documentation":"

Template post migration custom action external parameters.

" }, - "parameters":{ - "shape":"SsmDocumentParameters", - "documentation":"

Template post migration custom action parameters.

" + "description":{ + "shape":"ActionDescription", + "documentation":"

Template post migration custom action description.

" }, - "timeoutSeconds":{ - "shape":"StrictlyPositiveInteger", - "documentation":"

Template post migration custom action timeout in seconds.

" + "category":{ + "shape":"ActionCategory", + "documentation":"

Template post migration custom action category.

" } } }, @@ -5262,10 +5354,6 @@ "type":"structure", "required":["sourceServerIDs"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Terminate Target instance by Account ID

" - }, "sourceServerIDs":{ "shape":"TerminateTargetInstancesRequestSourceServerIDs", "documentation":"

Terminate Target instance by Source Server IDs.

" @@ -5273,6 +5361,10 @@ "tags":{ "shape":"TagsMap", "documentation":"

Terminate Target instance by Tags.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Terminate Target instance by Account ID

" } } }, @@ -5296,6 +5388,10 @@ "required":["message"], "members":{ "message":{"shape":"LargeBoundedString"}, + "serviceCode":{ + "shape":"LargeBoundedString", + "documentation":"

Reached throttling quota exception service code.

" + }, "quotaCode":{ "shape":"LargeBoundedString", "documentation":"

Reached throttling quota exception.

" @@ -5305,10 +5401,6 @@ "documentation":"

Reached throttling quota exception will retry after x seconds.

", "location":"header", "locationName":"Retry-After" - }, - "serviceCode":{ - "shape":"LargeBoundedString", - "documentation":"

Reached throttling quota exception service code.

" } }, "documentation":"

Reached throttling quota exception.

", @@ -5328,13 +5420,13 @@ "type":"structure", "required":["applicationID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -5342,21 +5434,21 @@ "type":"structure", "required":["waveID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "waveID":{ "shape":"WaveID", "documentation":"

Wave ID.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, "UninitializedAccountException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, - "message":{"shape":"LargeBoundedString"} + "message":{"shape":"LargeBoundedString"}, + "code":{"shape":"LargeBoundedString"} }, "documentation":"

Uninitialized account exception.

", "error":{ @@ -5390,21 +5482,21 @@ "type":"structure", "required":["applicationID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, "applicationID":{ "shape":"ApplicationID", "documentation":"

Application ID.

" }, + "name":{ + "shape":"ApplicationName", + "documentation":"

Application name.

" + }, "description":{ "shape":"ApplicationDescription", "documentation":"

Application description.

" }, - "name":{ - "shape":"ApplicationName", - "documentation":"

Application name.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, @@ -5430,13 +5522,21 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Update Launch configuration Account ID.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Update Launch configuration by Source Server ID request.

" }, - "bootMode":{ - "shape":"BootMode", - "documentation":"

Update Launch configuration boot mode request.

" + "name":{ + "shape":"SmallBoundedString", + "documentation":"

Update Launch configuration name request.

" + }, + "launchDisposition":{ + "shape":"LaunchDisposition", + "documentation":"

Update Launch configuration launch disposition request.

" + }, + "targetInstanceTypeRightSizingMethod":{ + "shape":"TargetInstanceTypeRightSizingMethod", + "documentation":"

Update Launch configuration Target instance right sizing request.

" }, "copyPrivateIp":{ "shape":"Boolean", @@ -5446,34 +5546,26 @@ "shape":"Boolean", "documentation":"

Update Launch configuration copy Tags request.

" }, - "enableMapAutoTagging":{ - "shape":"Boolean", - "documentation":"

Enable map auto tagging.

" - }, - "launchDisposition":{ - "shape":"LaunchDisposition", - "documentation":"

Update Launch configuration launch disposition request.

" - }, "licensing":{ "shape":"Licensing", "documentation":"

Update Launch configuration licensing request.

" }, + "bootMode":{ + "shape":"BootMode", + "documentation":"

Update Launch configuration boot mode request.

" + }, + "postLaunchActions":{"shape":"PostLaunchActions"}, + "enableMapAutoTagging":{ + "shape":"Boolean", + "documentation":"

Enable map auto tagging.

" + }, "mapAutoTaggingMpeID":{ "shape":"TagValue", "documentation":"

Launch configuration map auto tagging MPE ID.

" }, - "name":{ - "shape":"SmallBoundedString", - "documentation":"

Update Launch configuration name request.

" - }, - "postLaunchActions":{"shape":"PostLaunchActions"}, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Update Launch configuration by Source Server ID request.

" - }, - "targetInstanceTypeRightSizingMethod":{ - "shape":"TargetInstanceTypeRightSizingMethod", - "documentation":"

Update Launch configuration Target instance right sizing request.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Update Launch configuration Account ID.

" } } }, @@ -5481,58 +5573,66 @@ "type":"structure", "required":["launchConfigurationTemplateID"], "members":{ - "associatePublicIpAddress":{ + "launchConfigurationTemplateID":{ + "shape":"LaunchConfigurationTemplateID", + "documentation":"

Launch Configuration Template ID.

" + }, + "postLaunchActions":{ + "shape":"PostLaunchActions", + "documentation":"

Post Launch Action to execute on the Test or Cutover instance.

" + }, + "enableMapAutoTagging":{ "shape":"Boolean", - "documentation":"

Associate public Ip address.

" + "documentation":"

Enable map auto tagging.

" }, - "bootMode":{ - "shape":"BootMode", - "documentation":"

Launch configuration template boot mode.

" + "mapAutoTaggingMpeID":{ + "shape":"TagValue", + "documentation":"

Launch configuration template map auto tagging MPE ID.

" + }, + "launchDisposition":{ + "shape":"LaunchDisposition", + "documentation":"

Launch disposition.

" + }, + "targetInstanceTypeRightSizingMethod":{ + "shape":"TargetInstanceTypeRightSizingMethod", + "documentation":"

Target instance type right-sizing method.

" }, "copyPrivateIp":{ "shape":"Boolean", "documentation":"

Copy private Ip.

" }, + "associatePublicIpAddress":{ + "shape":"Boolean", + "documentation":"

Associate public Ip address.

" + }, "copyTags":{ "shape":"Boolean", "documentation":"

Copy tags.

" }, - "enableMapAutoTagging":{ - "shape":"Boolean", - "documentation":"

Enable map auto tagging.

" + "licensing":{"shape":"Licensing"}, + "bootMode":{ + "shape":"BootMode", + "documentation":"

Launch configuration template boot mode.

" + }, + "smallVolumeMaxSize":{ + "shape":"PositiveInteger", + "documentation":"

Small volume maximum size.

" + }, + "smallVolumeConf":{ + "shape":"LaunchTemplateDiskConf", + "documentation":"

Small volume config.

" }, "largeVolumeConf":{ "shape":"LaunchTemplateDiskConf", "documentation":"

Large volume config.

" }, - "launchConfigurationTemplateID":{ - "shape":"LaunchConfigurationTemplateID", - "documentation":"

Launch Configuration Template ID.

" + "enableParametersEncryption":{ + "shape":"Boolean", + "documentation":"

Enable parameters encryption.

" }, - "launchDisposition":{ - "shape":"LaunchDisposition", - "documentation":"

Launch disposition.

" - }, - "licensing":{"shape":"Licensing"}, - "mapAutoTaggingMpeID":{ - "shape":"TagValue", - "documentation":"

Launch configuration template map auto tagging MPE ID.

" - }, - "postLaunchActions":{ - "shape":"PostLaunchActions", - "documentation":"

Post Launch Action to execute on the Test or Cutover instance.

" - }, - "smallVolumeConf":{ - "shape":"LaunchTemplateDiskConf", - "documentation":"

Small volume config.

" - }, - "smallVolumeMaxSize":{ - "shape":"PositiveInteger", - "documentation":"

Small volume maximum size.

" - }, - "targetInstanceTypeRightSizingMethod":{ - "shape":"TargetInstanceTypeRightSizingMethod", - "documentation":"

Target instance type right-sizing method.

" + "parametersEncryptionKey":{ + "shape":"ARN", + "documentation":"

Parameters encryption key.

" } } }, @@ -5540,30 +5640,42 @@ "type":"structure", "required":["sourceServerID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Update replication configuration Account ID request.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

Update replication configuration Source Server ID request.

" + }, + "name":{ + "shape":"SmallBoundedString", + "documentation":"

Update replication configuration name request.

" + }, + "stagingAreaSubnetId":{ + "shape":"SubnetID", + "documentation":"

Update replication configuration Staging Area subnet request.

" }, "associateDefaultSecurityGroup":{ "shape":"Boolean", "documentation":"

Update replication configuration associate default Application Migration Service Security group request.

" }, - "bandwidthThrottling":{ - "shape":"BandwidthThrottling", - "documentation":"

Update replication configuration bandwidth throttling request.

" + "replicationServersSecurityGroupsIDs":{ + "shape":"ReplicationServersSecurityGroupsIDs", + "documentation":"

Update replication configuration Replication Server Security Groups IDs request.

" }, - "createPublicIP":{ - "shape":"Boolean", - "documentation":"

Update replication configuration create Public IP request.

" + "replicationServerInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Update replication configuration Replication Server instance type request.

" }, - "dataPlaneRouting":{ - "shape":"ReplicationConfigurationDataPlaneRouting", - "documentation":"

Update replication configuration data plane routing request.

" + "useDedicatedReplicationServer":{ + "shape":"Boolean", + "documentation":"

Update replication configuration use dedicated Replication Server request.

" }, "defaultLargeStagingDiskType":{ "shape":"ReplicationConfigurationDefaultLargeStagingDiskType", "documentation":"

Update replication configuration use default large Staging Disk type request.

" }, + "replicatedDisks":{ + "shape":"ReplicationConfigurationReplicatedDisks", + "documentation":"

Update replication configuration replicated disks request.

" + }, "ebsEncryption":{ "shape":"ReplicationConfigurationEbsEncryption", "documentation":"

Update replication configuration EBS encryption request.

" @@ -5572,41 +5684,33 @@ "shape":"ARN", "documentation":"

Update replication configuration EBS encryption key ARN request.

" }, - "name":{ - "shape":"SmallBoundedString", - "documentation":"

Update replication configuration name request.

" - }, - "replicatedDisks":{ - "shape":"ReplicationConfigurationReplicatedDisks", - "documentation":"

Update replication configuration replicated disks request.

" - }, - "replicationServerInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Update replication configuration Replication Server instance type request.

" - }, - "replicationServersSecurityGroupsIDs":{ - "shape":"ReplicationServersSecurityGroupsIDs", - "documentation":"

Update replication configuration Replication Server Security Groups IDs request.

" + "bandwidthThrottling":{ + "shape":"BandwidthThrottling", + "documentation":"

Update replication configuration bandwidth throttling request.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

Update replication configuration Source Server ID request.

" + "dataPlaneRouting":{ + "shape":"ReplicationConfigurationDataPlaneRouting", + "documentation":"

Update replication configuration data plane routing request.

" }, - "stagingAreaSubnetId":{ - "shape":"SubnetID", - "documentation":"

Update replication configuration Staging Area subnet request.

" + "createPublicIP":{ + "shape":"Boolean", + "documentation":"

Update replication configuration create Public IP request.

" }, "stagingAreaTags":{ "shape":"TagsMap", "documentation":"

Update replication configuration Staging Area Tags request.

" }, - "useDedicatedReplicationServer":{ - "shape":"Boolean", - "documentation":"

Update replication configuration use dedicated Replication Server request.

" - }, "useFipsEndpoint":{ "shape":"Boolean", "documentation":"

Update replication configuration use Fips Endpoint.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Update replication configuration Account ID request.

" + }, + "internetProtocol":{ + "shape":"InternetProtocol", + "documentation":"

Update replication configuration internet protocol.

" } } }, @@ -5614,25 +5718,33 @@ "type":"structure", "required":["replicationConfigurationTemplateID"], "members":{ + "replicationConfigurationTemplateID":{ + "shape":"ReplicationConfigurationTemplateID", + "documentation":"

Update replication configuration template template ID request.

" + }, "arn":{ "shape":"ARN", "documentation":"

Update replication configuration template ARN request.

" }, + "stagingAreaSubnetId":{ + "shape":"SubnetID", + "documentation":"

Update replication configuration template Staging Area subnet ID request.

" + }, "associateDefaultSecurityGroup":{ "shape":"Boolean", "documentation":"

Update replication configuration template associate default Application Migration Service Security group request.

" }, - "bandwidthThrottling":{ - "shape":"BandwidthThrottling", - "documentation":"

Update replication configuration template bandwidth throttling request.

" + "replicationServersSecurityGroupsIDs":{ + "shape":"ReplicationServersSecurityGroupsIDs", + "documentation":"

Update replication configuration template Replication Server Security groups IDs request.

" }, - "createPublicIP":{ - "shape":"Boolean", - "documentation":"

Update replication configuration template create Public IP request.

" + "replicationServerInstanceType":{ + "shape":"EC2InstanceType", + "documentation":"

Update replication configuration template Replication Server instance type request.

" }, - "dataPlaneRouting":{ - "shape":"ReplicationConfigurationDataPlaneRouting", - "documentation":"

Update replication configuration template data plane routing request.

" + "useDedicatedReplicationServer":{ + "shape":"Boolean", + "documentation":"

Update replication configuration template use dedicated Replication Server request.

" }, "defaultLargeStagingDiskType":{ "shape":"ReplicationConfigurationDefaultLargeStagingDiskType", @@ -5646,54 +5758,50 @@ "shape":"ARN", "documentation":"

Update replication configuration template EBS encryption key ARN request.

" }, - "replicationConfigurationTemplateID":{ - "shape":"ReplicationConfigurationTemplateID", - "documentation":"

Update replication configuration template template ID request.

" - }, - "replicationServerInstanceType":{ - "shape":"EC2InstanceType", - "documentation":"

Update replication configuration template Replication Server instance type request.

" + "bandwidthThrottling":{ + "shape":"BandwidthThrottling", + "documentation":"

Update replication configuration template bandwidth throttling request.

" }, - "replicationServersSecurityGroupsIDs":{ - "shape":"ReplicationServersSecurityGroupsIDs", - "documentation":"

Update replication configuration template Replication Server Security groups IDs request.

" + "dataPlaneRouting":{ + "shape":"ReplicationConfigurationDataPlaneRouting", + "documentation":"

Update replication configuration template data plane routing request.

" }, - "stagingAreaSubnetId":{ - "shape":"SubnetID", - "documentation":"

Update replication configuration template Staging Area subnet ID request.

" + "createPublicIP":{ + "shape":"Boolean", + "documentation":"

Update replication configuration template create Public IP request.

" }, "stagingAreaTags":{ "shape":"TagsMap", "documentation":"

Update replication configuration template Staging Area Tags request.

" }, - "useDedicatedReplicationServer":{ - "shape":"Boolean", - "documentation":"

Update replication configuration template use dedicated Replication Server request.

" - }, "useFipsEndpoint":{ "shape":"Boolean", "documentation":"

Update replication configuration template use Fips Endpoint request.

" + }, + "internetProtocol":{ + "shape":"InternetProtocol", + "documentation":"

Update replication configuration template internet protocol request.

" } } }, "UpdateSourceServerReplicationTypeRequest":{ "type":"structure", "required":[ - "replicationType", - "sourceServerID" + "sourceServerID", + "replicationType" ], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID on which to update replication type.

" + "sourceServerID":{ + "shape":"SourceServerID", + "documentation":"

ID of source server on which to update replication type.

" }, "replicationType":{ "shape":"ReplicationType", "documentation":"

Replication type to which to update source server.

" }, - "sourceServerID":{ - "shape":"SourceServerID", - "documentation":"

ID of source server on which to update replication type.

" + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID on which to update replication type.

" } } }, @@ -5705,13 +5813,13 @@ "shape":"AccountID", "documentation":"

Update Source Server request account ID.

" }, - "connectorAction":{ - "shape":"SourceServerConnectorAction", - "documentation":"

Update Source Server request connector action.

" - }, "sourceServerID":{ "shape":"SourceServerID", "documentation":"

Update Source Server request source server ID.

" + }, + "connectorAction":{ + "shape":"SourceServerConnectorAction", + "documentation":"

Update Source Server request connector action.

" } } }, @@ -5719,36 +5827,42 @@ "type":"structure", "required":["waveID"], "members":{ - "accountID":{ - "shape":"AccountID", - "documentation":"

Account ID.

" - }, - "description":{ - "shape":"WaveDescription", - "documentation":"

Wave description.

" + "waveID":{ + "shape":"WaveID", + "documentation":"

Wave ID.

" }, "name":{ "shape":"WaveName", "documentation":"

Wave name.

" }, - "waveID":{ - "shape":"WaveID", - "documentation":"

Wave ID.

" + "description":{ + "shape":"WaveDescription", + "documentation":"

Wave description.

" + }, + "accountID":{ + "shape":"AccountID", + "documentation":"

Account ID.

" } } }, + "UserProvidedId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[^\\s\\x00]( *[^\\s\\x00])*" + }, "ValidationException":{ "type":"structure", "members":{ - "code":{"shape":"LargeBoundedString"}, - "fieldList":{ - "shape":"ValidationExceptionFieldList", - "documentation":"

Validate exception field list.

" - }, "message":{"shape":"LargeBoundedString"}, + "code":{"shape":"LargeBoundedString"}, "reason":{ "shape":"ValidationExceptionReason", "documentation":"

Validate exception reason.

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

Validate exception field list.

" } }, "documentation":"

Validate exception.

", @@ -5761,13 +5875,13 @@ "ValidationExceptionField":{ "type":"structure", "members":{ - "message":{ - "shape":"LargeBoundedString", - "documentation":"

Validate exception field message.

" - }, "name":{ "shape":"LargeBoundedString", "documentation":"

Validate exception field name.

" + }, + "message":{ + "shape":"LargeBoundedString", + "documentation":"

Validate exception field message.

" } }, "documentation":"

Validate exception field.

" @@ -5788,18 +5902,26 @@ "VcenterClient":{ "type":"structure", "members":{ + "vcenterClientID":{ + "shape":"VcenterClientID", + "documentation":"

ID of vCenter client.

" + }, "arn":{ "shape":"ARN", "documentation":"

Arn of vCenter client.

" }, - "datacenterName":{ - "shape":"BoundedString", - "documentation":"

Datacenter name of vCenter client.

" - }, "hostname":{ "shape":"BoundedString", "documentation":"

Hostname of vCenter client .

" }, + "vcenterUUID":{ + "shape":"BoundedString", + "documentation":"

Vcenter UUID of vCenter client.

" + }, + "datacenterName":{ + "shape":"BoundedString", + "documentation":"

Datacenter name of vCenter client.

" + }, "lastSeenDatetime":{ "shape":"ISO8601DatetimeString", "documentation":"

Last seen time of vCenter client.

" @@ -5811,14 +5933,6 @@ "tags":{ "shape":"TagsMap", "documentation":"

Tags for vCenter client.

" - }, - "vcenterClientID":{ - "shape":"VcenterClientID", - "documentation":"

ID of vCenter client.

" - }, - "vcenterUUID":{ - "shape":"BoundedString", - "documentation":"

Vcenter UUID of vCenter client.

" } }, "documentation":"

vCenter client.

" @@ -5827,7 +5941,7 @@ "type":"string", "max":21, "min":21, - "pattern":"^vcc-[0-9a-zA-Z]{17}$" + "pattern":"vcc-[0-9a-zA-Z]{17}" }, "VcenterClientList":{ "type":"list", @@ -5848,13 +5962,17 @@ "Wave":{ "type":"structure", "members":{ + "waveID":{ + "shape":"WaveID", + "documentation":"

Wave ID.

" + }, "arn":{ "shape":"ARN", "documentation":"

Wave ARN.

" }, - "creationDateTime":{ - "shape":"ISO8601DatetimeString", - "documentation":"

Wave creation dateTime.

" + "name":{ + "shape":"WaveName", + "documentation":"

Wave name.

" }, "description":{ "shape":"WaveDescription", @@ -5864,47 +5982,43 @@ "shape":"Boolean", "documentation":"

Wave archival status.

" }, + "waveAggregatedStatus":{ + "shape":"WaveAggregatedStatus", + "documentation":"

Wave aggregated status.

" + }, + "creationDateTime":{ + "shape":"ISO8601DatetimeString", + "documentation":"

Wave creation dateTime.

" + }, "lastModifiedDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Wave last modified dateTime.

" }, - "name":{ - "shape":"WaveName", - "documentation":"

Wave name.

" - }, "tags":{ "shape":"TagsMap", "documentation":"

Wave tags.

" - }, - "waveAggregatedStatus":{ - "shape":"WaveAggregatedStatus", - "documentation":"

Wave aggregated status.

" - }, - "waveID":{ - "shape":"WaveID", - "documentation":"

Wave ID.

" } } }, "WaveAggregatedStatus":{ "type":"structure", "members":{ - "healthStatus":{ - "shape":"WaveHealthStatus", - "documentation":"

Wave aggregated status health status.

" - }, "lastUpdateDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

Wave aggregated status last update dateTime.

" }, - "progressStatus":{ - "shape":"WaveProgressStatus", - "documentation":"

Wave aggregated status progress status.

" - }, "replicationStartedDateTime":{ "shape":"ISO8601DatetimeString", "documentation":"

DateTime marking when the first source server in the wave started replication.

" }, + "healthStatus":{ + "shape":"WaveHealthStatus", + "documentation":"

Wave aggregated status health status.

" + }, + "progressStatus":{ + "shape":"WaveProgressStatus", + "documentation":"

Wave aggregated status progress status.

" + }, "totalApplications":{ "shape":"PositiveInteger", "documentation":"

Wave aggregated status total applications amount.

" @@ -5916,7 +6030,7 @@ "type":"string", "max":600, "min":0, - "pattern":"^[^\\x00]*$" + "pattern":"[^\\x00]*" }, "WaveHealthStatus":{ "type":"string", @@ -5930,7 +6044,7 @@ "type":"string", "max":22, "min":22, - "pattern":"^wave-[0-9a-zA-Z]{17}$" + "pattern":"wave-[0-9a-zA-Z]{17}" }, "WaveIDsFilter":{ "type":"list", @@ -5942,7 +6056,7 @@ "type":"string", "max":256, "min":1, - "pattern":"^[^\\s\\x00]( *[^\\s\\x00])*$" + "pattern":"[^\\s\\x00]( *[^\\s\\x00])*" }, "WaveProgressStatus":{ "type":"string", diff --git a/awscli/botocore/data/mgn/2020-02-26/waiters-2.json b/awscli/botocore/data/mgn/2020-02-26/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/awscli/botocore/data/mgn/2020-02-26/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/awscli/botocore/data/nova-act/2025-08-22/endpoint-rule-set-1.json b/awscli/botocore/data/nova-act/2025-08-22/endpoint-rule-set-1.json new file mode 100644 index 000000000000..62c818fe9e3b --- /dev/null +++ b/awscli/botocore/data/nova-act/2025-08-22/endpoint-rule-set-1.json @@ -0,0 +1,344 @@ +{ + "version": "1.0", + "parameters": { + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "string" + }, + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "string" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://nova-act-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + false + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://nova-act-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + false + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://nova-act.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "endpoint": { + "url": "https://nova-act.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/awscli/botocore/data/nova-act/2025-08-22/paginators-1.json b/awscli/botocore/data/nova-act/2025-08-22/paginators-1.json new file mode 100644 index 000000000000..d50ded2db8e4 --- /dev/null +++ b/awscli/botocore/data/nova-act/2025-08-22/paginators-1.json @@ -0,0 +1,28 @@ +{ + "pagination": { + "ListActs": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "actSummaries" + }, + "ListSessions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "sessionSummaries" + }, + "ListWorkflowDefinitions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowDefinitionSummaries" + }, + "ListWorkflowRuns": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "workflowRunSummaries" + } + } +} diff --git a/awscli/botocore/data/nova-act/2025-08-22/service-2.json b/awscli/botocore/data/nova-act/2025-08-22/service-2.json new file mode 100644 index 000000000000..983bdc136b62 --- /dev/null +++ b/awscli/botocore/data/nova-act/2025-08-22/service-2.json @@ -0,0 +1,1824 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2025-08-22", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"nova-act", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"Nova Act Service", + "serviceId":"Nova Act", + "signatureVersion":"v4", + "signingName":"nova-act", + "uid":"nova-act-2025-08-22" + }, + "operations":{ + "CreateAct":{ + "name":"CreateAct", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}/sessions/{sessionId}/acts", + "responseCode":201 + }, + "input":{"shape":"CreateActRequest"}, + "output":{"shape":"CreateActResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new AI task (act) within a session that can interact with tools and perform specific actions.

", + "idempotent":true + }, + "CreateSession":{ + "name":"CreateSession", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}/sessions", + "responseCode":201 + }, + "input":{"shape":"CreateSessionRequest"}, + "output":{"shape":"CreateSessionResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new session context within a workflow run to manage conversation state and acts.

", + "idempotent":true + }, + "CreateWorkflowDefinition":{ + "name":"CreateWorkflowDefinition", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions", + "responseCode":201 + }, + "input":{"shape":"CreateWorkflowDefinitionRequest"}, + "output":{"shape":"CreateWorkflowDefinitionResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new workflow definition template that can be used to execute multiple workflow runs.

", + "idempotent":true + }, + "CreateWorkflowRun":{ + "name":"CreateWorkflowRun", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs", + "responseCode":201 + }, + "input":{"shape":"CreateWorkflowRunRequest"}, + "output":{"shape":"CreateWorkflowRunResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Creates a new execution instance of a workflow definition with specified parameters.

", + "idempotent":true + }, + "DeleteWorkflowDefinition":{ + "name":"DeleteWorkflowDefinition", + "http":{ + "method":"DELETE", + "requestUri":"/workflow-definitions/{workflowDefinitionName}", + "responseCode":202 + }, + "input":{"shape":"DeleteWorkflowDefinitionRequest"}, + "output":{"shape":"DeleteWorkflowDefinitionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Deletes a workflow definition and all associated resources. This operation cannot be undone.

", + "idempotent":true + }, + "DeleteWorkflowRun":{ + "name":"DeleteWorkflowRun", + "http":{ + "method":"DELETE", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}", + "responseCode":202 + }, + "input":{"shape":"DeleteWorkflowRunRequest"}, + "output":{"shape":"DeleteWorkflowRunResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Terminates and cleans up a workflow run, stopping all associated acts and sessions.

", + "idempotent":true + }, + "GetWorkflowDefinition":{ + "name":"GetWorkflowDefinition", + "http":{ + "method":"GET", + "requestUri":"/workflow-definitions/{workflowDefinitionName}", + "responseCode":200 + }, + "input":{"shape":"GetWorkflowDefinitionRequest"}, + "output":{"shape":"GetWorkflowDefinitionResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Retrieves the details and configuration of a specific workflow definition.

", + "readonly":true + }, + "GetWorkflowRun":{ + "name":"GetWorkflowRun", + "http":{ + "method":"GET", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}", + "responseCode":200 + }, + "input":{"shape":"GetWorkflowRunRequest"}, + "output":{"shape":"GetWorkflowRunResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Retrieves the current state, configuration, and execution details of a workflow run.

", + "readonly":true + }, + "InvokeActStep":{ + "name":"InvokeActStep", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}/sessions/{sessionId}/acts/{actId}/invoke-step/", + "responseCode":200 + }, + "input":{"shape":"InvokeActStepRequest"}, + "output":{"shape":"InvokeActStepResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Executes the next step of an act, processing tool call results and returning new tool calls if needed.

" + }, + "ListActs":{ + "name":"ListActs", + "http":{ + "method":"POST", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/acts", + "responseCode":200 + }, + "input":{"shape":"ListActsRequest"}, + "output":{"shape":"ListActsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all acts within a specific session with their current status and execution details.

", + "readonly":true + }, + "ListModels":{ + "name":"ListModels", + "http":{ + "method":"POST", + "requestUri":"/models", + "responseCode":200 + }, + "input":{"shape":"ListModelsRequest"}, + "output":{"shape":"ListModelsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists all available AI models that can be used for workflow execution, including their status and compatibility information.

", + "readonly":true + }, + "ListSessions":{ + "name":"ListSessions", + "http":{ + "method":"POST", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}", + "responseCode":200 + }, + "input":{"shape":"ListSessionsRequest"}, + "output":{"shape":"ListSessionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all sessions within a specific workflow run.

", + "readonly":true + }, + "ListWorkflowDefinitions":{ + "name":"ListWorkflowDefinitions", + "http":{ + "method":"POST", + "requestUri":"/workflow-definitions", + "responseCode":200 + }, + "input":{"shape":"ListWorkflowDefinitionsRequest"}, + "output":{"shape":"ListWorkflowDefinitionsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all workflow definitions in your account with optional filtering and pagination.

", + "readonly":true + }, + "ListWorkflowRuns":{ + "name":"ListWorkflowRuns", + "http":{ + "method":"POST", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs", + "responseCode":200 + }, + "input":{"shape":"ListWorkflowRunsRequest"}, + "output":{"shape":"ListWorkflowRunsResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Lists all workflow runs for a specific workflow definition with optional filtering and pagination.

", + "readonly":true + }, + "UpdateAct":{ + "name":"UpdateAct", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}/sessions/{sessionId}/acts/{actId}", + "responseCode":200 + }, + "input":{"shape":"UpdateActRequest"}, + "output":{"shape":"UpdateActResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates an existing act's configuration, status, or error information.

", + "idempotent":true + }, + "UpdateWorkflowRun":{ + "name":"UpdateWorkflowRun", + "http":{ + "method":"PUT", + "requestUri":"/workflow-definitions/{workflowDefinitionName}/workflow-runs/{workflowRunId}", + "responseCode":200 + }, + "input":{"shape":"UpdateWorkflowRunRequest"}, + "output":{"shape":"UpdateWorkflowRunResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"} + ], + "documentation":"

Updates the configuration or state of an active workflow run.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

You don't have sufficient permissions to perform this action. Verify your IAM permissions and try again.

" + } + }, + "documentation":"

You don't have sufficient permissions to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "ActError":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"ActErrorMessageString", + "documentation":"

A human-readable description of the error that occurred.

" + }, + "type":{ + "shape":"ActErrorTypeString", + "documentation":"

The type or category of error that occurred.

" + } + }, + "documentation":"

Error information when an act fails to execute successfully.

" + }, + "ActErrorMessageString":{ + "type":"string", + "max":10000, + "min":1, + "sensitive":true + }, + "ActErrorTypeString":{ + "type":"string", + "max":100, + "min":1 + }, + "ActStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "PENDING_CLIENT_ACTION", + "PENDING_HUMAN_ACTION", + "SUCCEEDED", + "FAILED", + "TIMED_OUT" + ] + }, + "ActSummaries":{ + "type":"list", + "member":{"shape":"ActSummary"} + }, + "ActSummary":{ + "type":"structure", + "required":[ + "workflowRunId", + "sessionId", + "actId", + "status", + "startedAt" + ], + "members":{ + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run containing this act.

" + }, + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session containing this act.

" + }, + "actId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the act.

" + }, + "status":{ + "shape":"ActStatus", + "documentation":"

The current execution status of the act.

" + }, + "startedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the act started execution.

" + }, + "endedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the act completed execution, if applicable.

" + }, + "traceLocation":{ + "shape":"TraceLocation", + "documentation":"

The location where trace information for this act is stored.

" + } + }, + "documentation":"

Summary information about an act, including its status and execution timing.

" + }, + "Call":{ + "type":"structure", + "required":[ + "callId", + "input", + "name" + ], + "members":{ + "callId":{ + "shape":"CallId", + "documentation":"

A unique identifier for this tool call, used to match results back to requests.

" + }, + "input":{ + "shape":"SensitiveDocument", + "documentation":"

The input parameters for the tool call, formatted according to the tool's schema.

" + }, + "name":{ + "shape":"String", + "documentation":"

The name of the tool to invoke, following the pattern 'tool.{toolName}' or 'browser.{browserAction}'.

" + } + }, + "documentation":"

A request for the client to execute a specific tool with given parameters.

" + }, + "CallId":{ + "type":"string", + "max":100, + "min":1 + }, + "CallResult":{ + "type":"structure", + "required":["content"], + "members":{ + "callId":{ + "shape":"CallId", + "documentation":"

The identifier of the tool call that this result corresponds to.

" + }, + "content":{ + "shape":"CallResultContents", + "documentation":"

The content returned by the tool execution, which can include text or other media types.

" + } + }, + "documentation":"

The result returned from executing a tool call.

" + }, + "CallResultContent":{ + "type":"structure", + "members":{ + "text":{ + "shape":"String", + "documentation":"

Text content returned from the tool execution.

" + } + }, + "documentation":"

Content returned from a tool call execution.

", + "sensitive":true, + "union":true + }, + "CallResultContents":{ + "type":"list", + "member":{"shape":"CallResultContent"}, + "max":100, + "min":1 + }, + "CallResults":{ + "type":"list", + "member":{"shape":"CallResult"}, + "max":100, + "min":1 + }, + "Calls":{ + "type":"list", + "member":{"shape":"Call"} + }, + "ClientInfo":{ + "type":"structure", + "required":["compatibilityVersion"], + "members":{ + "compatibilityVersion":{ + "shape":"Integer", + "documentation":"

The compatibility version of the client, used to ensure API compatibility.

" + }, + "sdkVersion":{ + "shape":"NonBlankString", + "documentation":"

The version of the SDK being used by the client.

" + } + }, + "documentation":"

Information about the client making API requests, used for compatibility checking.

" + }, + "ClientToken":{ + "type":"string", + "max":256, + "min":33, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,256}" + }, + "CloudWatchLogGroupName":{ + "type":"string", + "max":512, + "min":1, + "pattern":"[a-zA-Z0-9_/.-]+" + }, + "CompatibilityInformation":{ + "type":"structure", + "required":[ + "clientCompatibilityVersion", + "supportedModelIds" + ], + "members":{ + "clientCompatibilityVersion":{ + "shape":"Integer", + "documentation":"

The client compatibility version that was requested.

" + }, + "supportedModelIds":{ + "shape":"ModelIdList", + "documentation":"

A list of model IDs that are supported for the client compatibility version.

" + }, + "message":{ + "shape":"NonBlankString", + "documentation":"

Additional information about compatibility requirements or recommendations.

" + } + }, + "documentation":"

Information about client compatibility and supported model versions.

" + }, + "ConflictException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The requested operation conflicts with the current state of the resource.

" + }, + "resourceId":{ + "shape":"NonBlankString", + "documentation":"

The identifier of the resource that caused the conflict.

" + }, + "resourceType":{ + "shape":"NonBlankString", + "documentation":"

The type of resource that caused the conflict.

" + } + }, + "documentation":"

The request could not be completed due to a conflict with the current state of the resource.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "CreateActRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId", + "sessionId", + "task" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the session.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run containing the session.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session to create the act in.

", + "location":"uri", + "locationName":"sessionId" + }, + "task":{ + "shape":"Task", + "documentation":"

The task description that defines what the act should accomplish.

" + }, + "toolSpecs":{ + "shape":"ToolSpecs", + "documentation":"

A list of tool specifications that the act can invoke to complete its task.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "CreateActResponse":{ + "type":"structure", + "required":[ + "actId", + "status" + ], + "members":{ + "actId":{ + "shape":"UuidString", + "documentation":"

The unique identifier for the created act.

" + }, + "status":{ + "shape":"ActStatus", + "documentation":"

The initial status of the act after creation.

" + } + } + }, + "CreateSessionRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the workflow run.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run to create the session in.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "CreateSessionResponse":{ + "type":"structure", + "required":["sessionId"], + "members":{ + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier for the created session.

" + } + } + }, + "CreateWorkflowDefinitionRequest":{ + "type":"structure", + "required":["name"], + "members":{ + "name":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition. Must be unique within your account and region.

" + }, + "description":{ + "shape":"WorkflowDescription", + "documentation":"

An optional description of the workflow definition's purpose and functionality.

" + }, + "exportConfig":{ + "shape":"WorkflowExportConfig", + "documentation":"

Configuration for exporting workflow execution data to Amazon Simple Storage Service.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "CreateWorkflowDefinitionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"WorkflowDefinitionStatus", + "documentation":"

The current status of the workflow definition after creation.

" + } + } + }, + "CreateWorkflowRunRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "modelId", + "clientInfo" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition to execute.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "modelId":{ + "shape":"ModelId", + "documentation":"

The ID of the AI model to use for workflow execution.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "logGroupName":{ + "shape":"CloudWatchLogGroupName", + "documentation":"

The CloudWatch log group name for storing workflow execution logs.

" + }, + "clientInfo":{ + "shape":"ClientInfo", + "documentation":"

Information about the client making the request, including compatibility version and SDK version.

" + } + } + }, + "CreateWorkflowRunResponse":{ + "type":"structure", + "required":[ + "workflowRunId", + "status" + ], + "members":{ + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier for the created workflow run.

" + }, + "status":{ + "shape":"WorkflowRunStatus", + "documentation":"

The initial status of the workflow run after creation.

" + } + } + }, + "DateTimestamp":{ + "type":"timestamp", + "timestampFormat":"iso8601" + }, + "DeleteWorkflowDefinitionRequest":{ + "type":"structure", + "required":["workflowDefinitionName"], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition to delete.

", + "location":"uri", + "locationName":"workflowDefinitionName" + } + } + }, + "DeleteWorkflowDefinitionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"WorkflowDefinitionStatus", + "documentation":"

The status of the workflow definition after deletion request.

" + } + } + }, + "DeleteWorkflowRunRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the workflow run.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run to delete.

", + "location":"uri", + "locationName":"workflowRunId" + } + } + }, + "DeleteWorkflowRunResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"WorkflowRunStatus", + "documentation":"

The status of the workflow run after deletion request.

" + } + } + }, + "GetWorkflowDefinitionRequest":{ + "type":"structure", + "required":["workflowDefinitionName"], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition to retrieve.

", + "location":"uri", + "locationName":"workflowDefinitionName" + } + } + }, + "GetWorkflowDefinitionResponse":{ + "type":"structure", + "required":[ + "name", + "arn", + "createdAt", + "status" + ], + "members":{ + "name":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition.

" + }, + "arn":{ + "shape":"WorkflowDefinitionArn", + "documentation":"

The Amazon Resource Name (ARN) of the workflow definition.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow definition was created.

" + }, + "description":{ + "shape":"WorkflowDescription", + "documentation":"

The description of the workflow definition.

" + }, + "exportConfig":{ + "shape":"WorkflowExportConfig", + "documentation":"

The export configuration for the workflow definition.

" + }, + "status":{ + "shape":"WorkflowDefinitionStatus", + "documentation":"

The current status of the workflow definition.

" + } + } + }, + "GetWorkflowRunRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the workflow run.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run to retrieve.

", + "location":"uri", + "locationName":"workflowRunId" + } + } + }, + "GetWorkflowRunResponse":{ + "type":"structure", + "required":[ + "workflowRunArn", + "workflowRunId", + "status", + "startedAt", + "modelId" + ], + "members":{ + "workflowRunArn":{ + "shape":"WorkflowRunArn", + "documentation":"

The Amazon Resource Name (ARN) of the workflow run.

" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run.

" + }, + "status":{ + "shape":"WorkflowRunStatus", + "documentation":"

The current execution status of the workflow run.

" + }, + "startedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow run started execution.

" + }, + "endedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow run completed execution, if applicable.

" + }, + "modelId":{ + "shape":"ModelId", + "documentation":"

The ID of the AI model being used for this workflow run.

" + }, + "logGroupName":{ + "shape":"CloudWatchLogGroupName", + "documentation":"

The CloudWatch log group name for this workflow run's logs.

" + } + } + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The service encountered an internal error. Try again later.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to wait before retrying the request.

", + "location":"header", + "locationName":"Retry-After" + }, + "reason":{ + "shape":"InternalServerExceptionReason", + "documentation":"

The reason for the internal server error.

" + } + }, + "documentation":"

An internal server error occurred. Please try again later.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "InternalServerExceptionReason":{ + "type":"string", + "enum":[ + "InvalidModelGeneration", + "RequestTokenLimitExceeded" + ] + }, + "InvokeActStepRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId", + "sessionId", + "actId", + "callResults" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the act.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run containing the act.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session containing the act.

", + "location":"uri", + "locationName":"sessionId" + }, + "actId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the act to invoke the next step for.

", + "location":"uri", + "locationName":"actId" + }, + "callResults":{ + "shape":"CallResults", + "documentation":"

The results from previous tool calls that the act requested.

" + }, + "previousStepId":{ + "shape":"UuidString", + "documentation":"

The identifier of the previous step, used for tracking execution flow.

" + } + } + }, + "InvokeActStepResponse":{ + "type":"structure", + "required":[ + "calls", + "stepId" + ], + "members":{ + "calls":{ + "shape":"Calls", + "documentation":"

A list of tool calls that the act wants to execute in this step.

" + }, + "stepId":{ + "shape":"UuidString", + "documentation":"

The unique identifier for this execution step.

" + } + } + }, + "ListActsRequest":{ + "type":"structure", + "required":["workflowDefinitionName"], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the session.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run containing the session.

", + "location":"querystring", + "locationName":"workflowRunId" + }, + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session to list acts for.

", + "location":"querystring", + "locationName":"sessionId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of acts to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for the returned acts (ascending or descending).

" + } + } + }, + "ListActsResponse":{ + "type":"structure", + "required":["actSummaries"], + "members":{ + "actSummaries":{ + "shape":"ActSummaries", + "documentation":"

A list of summary information for acts in the session.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results, if available.

" + } + } + }, + "ListModelsRequest":{ + "type":"structure", + "required":["clientCompatibilityVersion"], + "members":{ + "clientCompatibilityVersion":{ + "shape":"Integer", + "documentation":"

The client compatibility version to filter models by compatibility.

", + "location":"querystring", + "locationName":"clientCompatibilityVersion" + } + } + }, + "ListModelsResponse":{ + "type":"structure", + "required":[ + "modelSummaries", + "modelAliases", + "compatibilityInformation" + ], + "members":{ + "modelSummaries":{ + "shape":"ModelSummaries", + "documentation":"

A list of available AI models with their status and compatibility information.

" + }, + "modelAliases":{ + "shape":"ModelAliases", + "documentation":"

A list of model aliases that provide stable references to model versions.

" + }, + "compatibilityInformation":{ + "shape":"CompatibilityInformation", + "documentation":"

Information about client compatibility and supported models.

" + } + } + }, + "ListSessionsRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the workflow run.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run to list sessions for.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of sessions to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for the returned sessions (ascending or descending).

" + } + } + }, + "ListSessionsResponse":{ + "type":"structure", + "required":["sessionSummaries"], + "members":{ + "sessionSummaries":{ + "shape":"SessionSummaries", + "documentation":"

A list of summary information for sessions in the workflow run.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results, if available.

" + } + } + }, + "ListWorkflowDefinitionsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of workflow definitions to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for the returned workflow definitions (ascending or descending).

" + } + } + }, + "ListWorkflowDefinitionsResponse":{ + "type":"structure", + "required":["workflowDefinitionSummaries"], + "members":{ + "workflowDefinitionSummaries":{ + "shape":"WorkflowDefinitionSummaries", + "documentation":"

A list of summary information for workflow definitions.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results, if available.

" + } + } + }, + "ListWorkflowRunsRequest":{ + "type":"structure", + "required":["workflowDefinitionName"], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition to list workflow runs for.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of workflow runs to return in a single response.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "sortOrder":{ + "shape":"SortOrder", + "documentation":"

The sort order for the returned workflow runs (ascending or descending).

" + } + } + }, + "ListWorkflowRunsResponse":{ + "type":"structure", + "required":["workflowRunSummaries"], + "members":{ + "workflowRunSummaries":{ + "shape":"WorkflowRunSummaries", + "documentation":"

A list of summary information for workflow runs.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for retrieving the next page of results, if available.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ModelAlias":{ + "type":"structure", + "required":[ + "aliasName", + "latestModelId" + ], + "members":{ + "aliasName":{ + "shape":"ModelId", + "documentation":"

The name of the model alias.

" + }, + "latestModelId":{ + "shape":"ModelId", + "documentation":"

The model ID that this alias currently points to.

" + }, + "resolvedModelId":{ + "shape":"ModelId", + "documentation":"

The resolved model ID after alias resolution.

" + } + }, + "documentation":"

An alias that provides a stable reference to a model version.

" + }, + "ModelAliases":{ + "type":"list", + "member":{"shape":"ModelAlias"} + }, + "ModelId":{ + "type":"string", + "max":100, + "min":1 + }, + "ModelIdList":{ + "type":"list", + "member":{"shape":"ModelId"} + }, + "ModelLifecycle":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"ModelStatus", + "documentation":"

The current lifecycle status of the model.

" + } + }, + "documentation":"

Lifecycle information for an AI model.

" + }, + "ModelStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "LEGACY", + "DEPRECATED", + "PREVIEW" + ] + }, + "ModelSummaries":{ + "type":"list", + "member":{"shape":"ModelSummary"} + }, + "ModelSummary":{ + "type":"structure", + "required":[ + "modelId", + "modelLifecycle", + "minimumCompatibilityVersion" + ], + "members":{ + "modelId":{ + "shape":"ModelId", + "documentation":"

The unique identifier of the model.

" + }, + "modelLifecycle":{ + "shape":"ModelLifecycle", + "documentation":"

The lifecycle information for the model.

" + }, + "minimumCompatibilityVersion":{ + "shape":"Integer", + "documentation":"

The minimum client compatibility version required to use this model.

" + } + }, + "documentation":"

Summary information about an available AI model.

" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"\\S*" + }, + "NonBlankString":{ + "type":"string", + "pattern":"[\\s\\S]+" + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType" + ], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The specified resource was not found.

" + }, + "resourceId":{ + "shape":"NonBlankString", + "documentation":"

The identifier of the resource that wasn't found.

" + }, + "resourceType":{ + "shape":"NonBlankString", + "documentation":"

The type of resource that wasn't found.

" + } + }, + "documentation":"

The requested resource was not found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "S3BucketName":{ + "type":"string", + "max":63, + "min":3, + "pattern":"[a-z0-9][a-z0-9.-]*[a-z0-9]" + }, + "S3KeyPrefix":{ + "type":"string", + "documentation":"

A prefix for S3 object keys that will be prepended to 'step_{N}.json'. Must follow S3 object key naming guidelines and cannot end with a forward slash as it will be directly concatenated with the filename.

", + "max":100, + "min":1, + "pattern":"[a-zA-Z0-9!\\-_.*'()]+(?:/[a-zA-Z0-9!\\-_.*'()]+)*" + }, + "SensitiveDocument":{ + "type":"structure", + "members":{}, + "document":true, + "sensitive":true + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "required":[ + "message", + "resourceId", + "resourceType", + "serviceCode", + "quotaCode" + ], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The request would exceed one or more service quotas for your account.

" + }, + "resourceId":{ + "shape":"NonBlankString", + "documentation":"

The identifier of the resource that exceeded the quota.

" + }, + "resourceType":{ + "shape":"NonBlankString", + "documentation":"

The type of resource that exceeded the quota.

" + }, + "serviceCode":{ + "shape":"NonBlankString", + "documentation":"

The service code for the quota that was exceeded.

" + }, + "quotaCode":{ + "shape":"NonBlankString", + "documentation":"

The code for the specific quota that was exceeded.

" + } + }, + "documentation":"

The request would exceed a service quota limit.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SessionSummaries":{ + "type":"list", + "member":{"shape":"SessionSummary"} + }, + "SessionSummary":{ + "type":"structure", + "required":["sessionId"], + "members":{ + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session.

" + } + }, + "documentation":"

Summary information about a session within a workflow run.

" + }, + "SortOrder":{ + "type":"string", + "enum":[ + "Ascending", + "Descending" + ] + }, + "String":{"type":"string"}, + "Task":{ + "type":"string", + "max":10000, + "min":1, + "sensitive":true + }, + "ThrottlingException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The request was denied due to request throttling.

" + }, + "serviceCode":{ + "shape":"NonBlankString", + "documentation":"

The service code where throttling occurred.

" + }, + "quotaCode":{ + "shape":"NonBlankString", + "documentation":"

The quota code related to the throttling.

" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to wait before retrying the throttled request.

", + "location":"header", + "locationName":"Retry-After" + } + }, + "documentation":"

The request was throttled due to too many requests. Please try again later.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "ToolDescription":{ + "type":"string", + "max":10000, + "min":1, + "sensitive":true + }, + "ToolInputSchema":{ + "type":"structure", + "members":{ + "json":{ + "shape":"ToolInputSchemaDocument", + "documentation":"

The JSON schema that defines the expected input format for the tool.

" + } + }, + "documentation":"

The schema definition for tool input parameters.

", + "sensitive":true, + "union":true + }, + "ToolInputSchemaDocument":{ + "type":"structure", + "members":{}, + "document":true + }, + "ToolName":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9_-]+" + }, + "ToolSpec":{ + "type":"structure", + "required":[ + "name", + "description", + "inputSchema" + ], + "members":{ + "name":{ + "shape":"ToolName", + "documentation":"

The unique name of the tool that acts will use to invoke it.

" + }, + "description":{ + "shape":"ToolDescription", + "documentation":"

A description of what the tool does and how it should be used.

" + }, + "inputSchema":{ + "shape":"ToolInputSchema", + "documentation":"

The JSON schema that defines the expected input format for the tool.

" + } + }, + "documentation":"

Specification for a tool that acts can invoke, including its name, description, and input schema.

" + }, + "ToolSpecs":{ + "type":"list", + "member":{"shape":"ToolSpec"}, + "max":100, + "min":0 + }, + "TraceLocation":{ + "type":"structure", + "required":[ + "locationType", + "location" + ], + "members":{ + "locationType":{ + "shape":"TraceLocationType", + "documentation":"

The type of storage location for the trace data.

" + }, + "location":{ + "shape":"NonBlankString", + "documentation":"

The specific location where the trace data is stored.

" + } + }, + "documentation":"

Information about where trace data is stored for debugging and monitoring.

" + }, + "TraceLocationType":{ + "type":"string", + "enum":["S3"] + }, + "UpdateActRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId", + "sessionId", + "actId", + "status" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the act.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run containing the act.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "sessionId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the session containing the act.

", + "location":"uri", + "locationName":"sessionId" + }, + "actId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the act to update.

", + "location":"uri", + "locationName":"actId" + }, + "status":{ + "shape":"ActStatus", + "documentation":"

The new status to set for the act.

" + }, + "error":{ + "shape":"ActError", + "documentation":"

Error information to associate with the act, if applicable.

" + } + } + }, + "UpdateActResponse":{ + "type":"structure", + "members":{} + }, + "UpdateWorkflowRunRequest":{ + "type":"structure", + "required":[ + "workflowDefinitionName", + "workflowRunId", + "status" + ], + "members":{ + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition containing the workflow run.

", + "location":"uri", + "locationName":"workflowDefinitionName" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run to update.

", + "location":"uri", + "locationName":"workflowRunId" + }, + "status":{ + "shape":"WorkflowRunStatus", + "documentation":"

The new status to set for the workflow run.

" + } + } + }, + "UpdateWorkflowRunResponse":{ + "type":"structure", + "members":{} + }, + "UuidString":{ + "type":"string", + "pattern":"[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "ValidationException":{ + "type":"structure", + "required":[ + "message", + "reason" + ], + "members":{ + "message":{ + "shape":"NonBlankString", + "documentation":"

The input fails to satisfy the constraints specified by the service.

" + }, + "reason":{ + "shape":"ValidationExceptionReason", + "documentation":"

The reason for the validation failure.

" + }, + "fieldList":{ + "shape":"ValidationExceptionFieldList", + "documentation":"

The list of fields that failed validation.

" + } + }, + "documentation":"

The input parameters for the request are invalid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "ValidationExceptionField":{ + "type":"structure", + "required":[ + "name", + "message" + ], + "members":{ + "name":{ + "shape":"NonBlankString", + "documentation":"

The name of the field that failed validation.

" + }, + "message":{ + "shape":"NonBlankString", + "documentation":"

A description of the validation error for this field.

" + } + }, + "documentation":"

Information about a field that failed validation.

" + }, + "ValidationExceptionFieldList":{ + "type":"list", + "member":{"shape":"ValidationExceptionField"} + }, + "ValidationExceptionReason":{ + "type":"string", + "enum":[ + "FieldValidationFailed", + "InvalidStatus", + "GuardrailIntervened" + ] + }, + "WorkflowDefinitionArn":{ + "type":"string", + "pattern":"arn:(aws|aws-cn|aws-us-gov):nova-act:[a-z0-9-]+:[0-9]{12}:workflow-definition/[a-zA-Z0-9_-]{1,40}" + }, + "WorkflowDefinitionName":{ + "type":"string", + "max":40, + "min":1, + "pattern":"[a-zA-Z0-9_-]{1,40}" + }, + "WorkflowDefinitionStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "WorkflowDefinitionSummaries":{ + "type":"list", + "member":{"shape":"WorkflowDefinitionSummary"} + }, + "WorkflowDefinitionSummary":{ + "type":"structure", + "required":[ + "workflowDefinitionArn", + "workflowDefinitionName", + "createdAt", + "status" + ], + "members":{ + "workflowDefinitionArn":{ + "shape":"WorkflowDefinitionArn", + "documentation":"

The Amazon Resource Name (ARN) of the workflow definition.

" + }, + "workflowDefinitionName":{ + "shape":"WorkflowDefinitionName", + "documentation":"

The name of the workflow definition.

" + }, + "createdAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow definition was created.

" + }, + "status":{ + "shape":"WorkflowDefinitionStatus", + "documentation":"

The current status of the workflow definition.

" + } + }, + "documentation":"

Summary information about a workflow definition, used in list operations.

" + }, + "WorkflowDescription":{ + "type":"string", + "max":4000, + "min":1, + "sensitive":true + }, + "WorkflowExportConfig":{ + "type":"structure", + "required":["s3BucketName"], + "members":{ + "s3BucketName":{ + "shape":"S3BucketName", + "documentation":"

The name of your Amazon S3 bucket, that Nova Act uses to export your workflow data. Note that the IAM role used to access Nova Act must also have write permissions to this bucket.

" + }, + "s3KeyPrefix":{ + "shape":"S3KeyPrefix", + "documentation":"

An optional prefix for Amazon S3 object keys to organize exported data.

" + } + }, + "documentation":"

Configuration settings for exporting workflow execution data and logs to Amazon Simple Storage Service (Amazon S3).

" + }, + "WorkflowRunArn":{ + "type":"string", + "pattern":"arn:(aws|aws-cn|aws-us-gov):nova-act:[a-z0-9-]+:[0-9]{12}:workflow-definition/[a-zA-Z0-9_-]{1,40}/workflow-run/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + }, + "WorkflowRunStatus":{ + "type":"string", + "enum":[ + "RUNNING", + "SUCCEEDED", + "FAILED", + "TIMED_OUT", + "DELETING" + ] + }, + "WorkflowRunSummaries":{ + "type":"list", + "member":{"shape":"WorkflowRunSummary"} + }, + "WorkflowRunSummary":{ + "type":"structure", + "required":[ + "workflowRunArn", + "workflowRunId", + "status", + "startedAt" + ], + "members":{ + "workflowRunArn":{ + "shape":"WorkflowRunArn", + "documentation":"

The Amazon Resource Name (ARN) of the workflow run.

" + }, + "workflowRunId":{ + "shape":"UuidString", + "documentation":"

The unique identifier of the workflow run.

" + }, + "status":{ + "shape":"WorkflowRunStatus", + "documentation":"

The current execution status of the workflow run.

" + }, + "startedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow run started execution.

" + }, + "endedAt":{ + "shape":"DateTimestamp", + "documentation":"

The timestamp when the workflow run completed execution, if applicable.

" + }, + "traceLocation":{ + "shape":"TraceLocation", + "documentation":"

The location where trace information for this workflow run is stored.

" + } + }, + "documentation":"

Summary information about a workflow run, including execution status and timing.

" + } + }, + "documentation":"

The Nova Act service provides a REST API for managing AI-powered workflow automation. It enables users to create workflow definitions, execute workflow runs, manage sessions, and orchestrate acts (individual AI tasks) with tool integrations.

" +} diff --git a/awscli/botocore/data/nova-act/2025-08-22/waiters-2.json b/awscli/botocore/data/nova-act/2025-08-22/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/awscli/botocore/data/nova-act/2025-08-22/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/awscli/botocore/data/observabilityadmin/2018-05-10/paginators-1.json b/awscli/botocore/data/observabilityadmin/2018-05-10/paginators-1.json index 837d3687bf47..6fe945f01552 100644 --- a/awscli/botocore/data/observabilityadmin/2018-05-10/paginators-1.json +++ b/awscli/botocore/data/observabilityadmin/2018-05-10/paginators-1.json @@ -29,6 +29,18 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "CentralizationRuleSummaries" + }, + "ListS3TableIntegrations": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "IntegrationSummaries" + }, + "ListTelemetryPipelines": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "PipelineSummaries" } } } diff --git a/awscli/botocore/data/observabilityadmin/2018-05-10/service-2.json b/awscli/botocore/data/observabilityadmin/2018-05-10/service-2.json index a7f1c6ae842d..972b04d2a3f9 100644 --- a/awscli/botocore/data/observabilityadmin/2018-05-10/service-2.json +++ b/awscli/botocore/data/observabilityadmin/2018-05-10/service-2.json @@ -32,6 +32,44 @@ ], "documentation":"

Creates a centralization rule that applies across an Amazon Web Services Organization. This operation can only be called by the organization's management account or a delegated administrator account.

" }, + "CreateS3TableIntegration":{ + "name":"CreateS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/CreateS3TableIntegration", + "responseCode":200 + }, + "input":{"shape":"CreateS3TableIntegrationInput"}, + "output":{"shape":"CreateS3TableIntegrationOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Creates an integration between CloudWatch and S3 Tables for analytics. This integration enables querying CloudWatch telemetry data using analytics engines like Amazon Athena, Amazon Redshift, and Apache Spark.

" + }, + "CreateTelemetryPipeline":{ + "name":"CreateTelemetryPipeline", + "http":{ + "method":"POST", + "requestUri":"/CreateTelemetryPipeline", + "responseCode":200 + }, + "input":{"shape":"CreateTelemetryPipelineInput"}, + "output":{"shape":"CreateTelemetryPipelineOutput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Creates a telemetry pipeline for processing and transforming telemetry data. The pipeline defines how data flows from sources through processors to destinations, enabling data transformation and delivering capabilities.

" + }, "CreateTelemetryRule":{ "name":"CreateTelemetryRule", "http":{ @@ -87,6 +125,44 @@ ], "documentation":"

Deletes an organization-wide centralization rule. This operation can only be called by the organization's management account or a delegated administrator account.

" }, + "DeleteS3TableIntegration":{ + "name":"DeleteS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/DeleteS3TableIntegration", + "responseCode":204 + }, + "input":{"shape":"DeleteS3TableIntegrationInput"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"InvalidStateException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Deletes an S3 Table integration and its associated data. This operation removes the connection between CloudWatch Observability Admin and S3 Tables.

" + }, + "DeleteTelemetryPipeline":{ + "name":"DeleteTelemetryPipeline", + "http":{ + "method":"POST", + "requestUri":"/DeleteTelemetryPipeline", + "responseCode":200 + }, + "input":{"shape":"DeleteTelemetryPipelineInput"}, + "output":{"shape":"DeleteTelemetryPipelineOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Deletes a telemetry pipeline and its associated resources. This operation stops data processing and removes the pipeline configuration.

", + "idempotent":true + }, "DeleteTelemetryRule":{ "name":"DeleteTelemetryRule", "http":{ @@ -140,6 +216,25 @@ "documentation":"

Retrieves the details of a specific organization centralization rule. This operation can only be called by the organization's management account or a delegated administrator account.

", "readonly":true }, + "GetS3TableIntegration":{ + "name":"GetS3TableIntegration", + "http":{ + "method":"POST", + "requestUri":"/GetS3TableIntegration", + "responseCode":200 + }, + "input":{"shape":"GetS3TableIntegrationInput"}, + "output":{"shape":"GetS3TableIntegrationOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Retrieves information about a specific S3 Table integration, including its configuration, status, and metadata.

", + "readonly":true + }, "GetTelemetryEnrichmentStatus":{ "name":"GetTelemetryEnrichmentStatus", "http":{ @@ -154,7 +249,7 @@ {"shape":"InternalServerException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Returns the current status of the resource tags for telemetry feature, which enhances telemetry data with additional resource metadata from Amazon Web Services Resource Explorer.

", + "documentation":"

Returns the current status of the resource tags for telemetry feature, which enhances telemetry data with additional resource metadata from Resource Explorer.

", "readonly":true }, "GetTelemetryEvaluationStatus":{ @@ -190,6 +285,25 @@ "documentation":"

This returns the onboarding status of the telemetry configuration feature for the organization. It can only be called by a Management Account of an Amazon Web Services Organization or an assigned Delegated Admin Account of Amazon CloudWatch telemetry config.

", "readonly":true }, + "GetTelemetryPipeline":{ + "name":"GetTelemetryPipeline", + "http":{ + "method":"POST", + "requestUri":"/GetTelemetryPipeline", + "responseCode":200 + }, + "input":{"shape":"GetTelemetryPipelineInput"}, + "output":{"shape":"GetTelemetryPipelineOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Retrieves information about a specific telemetry pipeline, including its configuration, status, and metadata.

", + "readonly":true + }, "GetTelemetryRule":{ "name":"GetTelemetryRule", "http":{ @@ -282,6 +396,24 @@ "documentation":"

Returns a list of telemetry configurations for Amazon Web Services resources supported by telemetry config in the organization.

", "readonly":true }, + "ListS3TableIntegrations":{ + "name":"ListS3TableIntegrations", + "http":{ + "method":"POST", + "requestUri":"/ListS3TableIntegrations", + "responseCode":200 + }, + "input":{"shape":"ListS3TableIntegrationsInput"}, + "output":{"shape":"ListS3TableIntegrationsOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Lists all S3 Table integrations in your account. We recommend using pagination to ensure that the operation returns quickly and successfully.

", + "readonly":true + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -298,7 +430,25 @@ {"shape":"ValidationException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists all tags attached to the specified telemetry rule resource.

", + "documentation":"

Lists all tags attached to the specified resource. Supports telemetry rule resources and telemetry pipeline resources.

", + "readonly":true + }, + "ListTelemetryPipelines":{ + "name":"ListTelemetryPipelines", + "http":{ + "method":"POST", + "requestUri":"/ListTelemetryPipelines", + "responseCode":200 + }, + "input":{"shape":"ListTelemetryPipelinesInput"}, + "output":{"shape":"ListTelemetryPipelinesOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Returns a list of telemetry pipelines in your account. Returns up to 100 results. If more than 100 telemetry pipelines exist, include the NextToken value from the response to retrieve the next set of results.

", "readonly":true }, "ListTelemetryRules":{ @@ -351,7 +501,7 @@ {"shape":"InternalServerException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Enables the resource tags for telemetry feature for your account, which enhances telemetry data with additional resource metadata from Amazon Web Services Resource Explorer to provide richer context for monitoring and observability.

" + "documentation":"

Enables the resource tags for telemetry feature for your account, which enhances telemetry data with additional resource metadata from Resource Explorer to provide richer context for monitoring and observability.

" }, "StartTelemetryEvaluation":{ "name":"StartTelemetryEvaluation", @@ -445,7 +595,24 @@ {"shape":"ValidationException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Adds or updates tags for a telemetry rule resource.

" + "documentation":"

Adds or updates tags for a resource. Supports telemetry rule resources and telemetry pipeline resources.

" + }, + "TestTelemetryPipeline":{ + "name":"TestTelemetryPipeline", + "http":{ + "method":"POST", + "requestUri":"/TestTelemetryPipeline", + "responseCode":200 + }, + "input":{"shape":"TestTelemetryPipelineInput"}, + "output":{"shape":"TestTelemetryPipelineOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Tests a pipeline configuration with sample records to validate data processing before deployment. This operation helps ensure your pipeline configuration works as expected.

" }, "UntagResource":{ "name":"UntagResource", @@ -462,7 +629,7 @@ {"shape":"ValidationException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Removes tags from a telemetry rule resource.

" + "documentation":"

Removes tags from a resource. Supports telemetry rule resources and telemetry pipeline resources.

" }, "UpdateCentralizationRuleForOrganization":{ "name":"UpdateCentralizationRuleForOrganization", @@ -483,6 +650,24 @@ ], "documentation":"

Updates an existing centralization rule that applies across an Amazon Web Services Organization. This operation can only be called by the organization's management account or a delegated administrator account.

" }, + "UpdateTelemetryPipeline":{ + "name":"UpdateTelemetryPipeline", + "http":{ + "method":"POST", + "requestUri":"/UpdateTelemetryPipeline", + "responseCode":200 + }, + "input":{"shape":"UpdateTelemetryPipelineInput"}, + "output":{"shape":"UpdateTelemetryPipelineOutput"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Updates the configuration of an existing telemetry pipeline.

The following attributes cannot be updated after pipeline creation:

Processors can be added, removed, or modified. However, some processors are not supported for third-party pipelines and cannot be added through updates.

Source-Specific Update Rules

CloudWatch Logs Sources (Vended and Custom)

Updatable: sts_role_arn

Fixed: data_source_name, data_source_type, sink (must remain @original)

S3 Sources (Crowdstrike, Zscaler, SentinelOne, Custom)

Updatable: All SQS configuration parameters, sts_role_arn, codec settings, compression type, bucket ownership settings, sink log group

Fixed: notification_type, aws.region

GitHub Audit Logs

Updatable: All Amazon Web Services Secrets Manager attributes, scope (can switch between ORGANIZATION/ENTERPRISE), organization or enterprise name, range, authentication credentials (PAT or GitHub App)

Microsoft Sources (Entra ID, Office365, Windows)

Updatable: All Amazon Web Services Secrets Manager attributes, tenant_id, workspace_id (Windows only), OAuth2 credentials (client_id, client_secret)

Okta Sources (SSO, Auth0)

Updatable: All Amazon Web Services Secrets Manager attributes, domain, range (SSO only), OAuth2 credentials (client_id, client_secret)

Palo Alto Networks

Updatable: All Amazon Web Services Secrets Manager attributes, hostname, basic authentication credentials (username, password)

ServiceNow CMDB

Updatable: All Amazon Web Services Secrets Manager attributes, instance_url, range, OAuth2 credentials (client_id, client_secret)

Wiz CNAPP

Updatable: All Amazon Web Services Secrets Manager attributes, region, range, OAuth2 credentials (client_id, client_secret)

" + }, "UpdateTelemetryRule":{ "name":"UpdateTelemetryRule", "http":{ @@ -495,12 +680,13 @@ "errors":[ {"shape":"ServiceQuotaExceededException"}, {"shape":"ResourceNotFoundException"}, + {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Updates an existing telemetry rule in your account.

" + "documentation":"

Updates an existing telemetry rule in your account. If multiple users attempt to modify the same telemetry rule simultaneously, a ConflictException is returned to provide specific error information for concurrent modification scenarios.

" }, "UpdateTelemetryRuleForOrganization":{ "name":"UpdateTelemetryRuleForOrganization", @@ -520,6 +706,23 @@ {"shape":"TooManyRequestsException"} ], "documentation":"

Updates an existing telemetry rule that applies across an Amazon Web Services Organization. This operation can only be called by the organization's management account or a delegated administrator account.

" + }, + "ValidateTelemetryPipelineConfiguration":{ + "name":"ValidateTelemetryPipelineConfiguration", + "http":{ + "method":"POST", + "requestUri":"/ValidateTelemetryPipelineConfiguration", + "responseCode":200 + }, + "input":{"shape":"ValidateTelemetryPipelineConfigurationInput"}, + "output":{"shape":"ValidateTelemetryPipelineConfigurationOutput"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"TooManyRequestsException"} + ], + "documentation":"

Validates a pipeline configuration without creating the pipeline. This operation checks the configuration for syntax errors and compatibility issues.

" } }, "shapes":{ @@ -553,6 +756,82 @@ "max":10, "min":1 }, + "Action":{ + "type":"string", + "documentation":"

Enumeration of WAF actions that can be matched in filter conditions.

", + "enum":[ + "ALLOW", + "BLOCK", + "COUNT", + "CAPTCHA", + "CHALLENGE", + "EXCLUDED_AS_COUNT" + ] + }, + "ActionCondition":{ + "type":"structure", + "members":{ + "Action":{ + "shape":"Action", + "documentation":"

The WAF action to match against (ALLOW, BLOCK, COUNT, CAPTCHA, CHALLENGE, EXCLUDED_AS_COUNT).

" + } + }, + "documentation":"

Condition that matches based on the specific WAF action taken on the request.

" + }, + "AdvancedEventSelector":{ + "type":"structure", + "required":["FieldSelectors"], + "members":{ + "Name":{ + "shape":"String", + "documentation":"

An optional, descriptive name for an advanced event selector, such as \"Log data events for only two S3 buckets\".

" + }, + "FieldSelectors":{ + "shape":"FieldSelectors", + "documentation":"

Contains all selector statements in an advanced event selector.

" + } + }, + "documentation":"

Advanced event selectors let you create fine-grained selectors for management, data, and network activity events.

" + }, + "AdvancedEventSelectors":{ + "type":"list", + "member":{"shape":"AdvancedEventSelector"} + }, + "AdvancedFieldSelector":{ + "type":"structure", + "required":["Field"], + "members":{ + "Field":{ + "shape":"String", + "documentation":"

The name of the field to use for selection.

" + }, + "Equals":{ + "shape":"StringList", + "documentation":"

Matches if the field value equals the specified value.

" + }, + "StartsWith":{ + "shape":"StringList", + "documentation":"

Matches if the field value starts with the specified value.

" + }, + "EndsWith":{ + "shape":"StringList", + "documentation":"

Matches if the field value ends with the specified value.

" + }, + "NotEquals":{ + "shape":"StringList", + "documentation":"

Matches if the field value does not equal the specified value.

" + }, + "NotStartsWith":{ + "shape":"StringList", + "documentation":"

Matches if the field value does not start with the specified value.

" + }, + "NotEndsWith":{ + "shape":"StringList", + "documentation":"

Matches if the field value does not end with the specified value.

" + } + }, + "documentation":"

Defines criteria for selecting resources based on field values.

" + }, "AwsResourceExplorerManagedViewArn":{ "type":"string", "max":2048, @@ -677,10 +956,75 @@ }, "documentation":"

A summary of a centralization rule's key properties and status.

" }, + "CloudtrailParameters":{ + "type":"structure", + "required":["AdvancedEventSelectors"], + "members":{ + "AdvancedEventSelectors":{ + "shape":"AdvancedEventSelectors", + "documentation":"

The advanced event selectors to use for filtering Amazon Web Services CloudTrail events.

" + } + }, + "documentation":"

Parameters specific to Amazon Web Services CloudTrail telemetry configuration.

" + }, + "Condition":{ + "type":"structure", + "members":{ + "ActionCondition":{ + "shape":"ActionCondition", + "documentation":"

Matches log records based on the WAF rule action taken (ALLOW, BLOCK, COUNT, etc.).

" + }, + "LabelNameCondition":{ + "shape":"LabelNameCondition", + "documentation":"

Matches log records based on WAF rule labels applied to the request.

" + } + }, + "documentation":"

A single condition that can match based on WAF rule action or label name.

" + }, + "Conditions":{ + "type":"list", + "member":{"shape":"Condition"}, + "documentation":"

A list of conditions for filter matching, with at least one condition required.

", + "min":1 + }, + "ConfigurationSummary":{ + "type":"structure", + "members":{ + "Sources":{ + "shape":"Sources", + "documentation":"

The list of data sources configured in the pipeline.

" + }, + "DataSources":{ + "shape":"DataSources", + "documentation":"

The list of data sources that provide telemetry data to the pipeline.

" + }, + "Processors":{ + "shape":"Processors", + "documentation":"

The list of processors configured in the pipeline for data transformation.

" + }, + "ProcessorCount":{ + "shape":"Integer", + "documentation":"

The total number of processors configured in the pipeline.

" + }, + "Sinks":{ + "shape":"Sinks", + "documentation":"

The list of destinations where processed data is sent.

" + } + }, + "documentation":"

Provides a summary of pipeline configuration components including sources, processors, and destinations.

" + }, "ConflictException":{ "type":"structure", "members":{ - "Message":{"shape":"String"} + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

The identifier of the resource which is in conflict with the requested operation.

" + }, + "ResourceType":{ + "shape":"String", + "documentation":"

The type of the resource which is in conflict with the requested operation.

" + } }, "documentation":"

The requested operation conflicts with the current state of the specified resource or with another request.

", "error":{ @@ -719,6 +1063,66 @@ } } }, + "CreateS3TableIntegrationInput":{ + "type":"structure", + "required":[ + "Encryption", + "RoleArn" + ], + "members":{ + "Encryption":{ + "shape":"Encryption", + "documentation":"

The encryption configuration for the S3 Table integration, including the encryption algorithm and KMS key settings.

" + }, + "RoleArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that grants permissions for the S3 Table integration to access necessary resources.

" + }, + "Tags":{ + "shape":"TagMapInput", + "documentation":"

The key-value pairs to associate with the S3 Table integration resource for categorization and management purposes.

" + } + } + }, + "CreateS3TableIntegrationOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the created S3 Table integration.

" + } + } + }, + "CreateTelemetryPipelineInput":{ + "type":"structure", + "required":[ + "Name", + "Configuration" + ], + "members":{ + "Name":{ + "shape":"TelemetryPipelineName", + "documentation":"

The name of the telemetry pipeline to create. The name must be unique within your account.

" + }, + "Configuration":{ + "shape":"TelemetryPipelineConfiguration", + "documentation":"

The configuration that defines how the telemetry pipeline processes data, including sources, processors, and destinations. For more information about pipeline components, see the Amazon CloudWatch User Guide

" + }, + "Tags":{ + "shape":"TagMapInput", + "documentation":"

The key-value pairs to associate with the telemetry pipeline resource for categorization and management purposes.

" + } + } + }, + "CreateTelemetryPipelineOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the created telemetry pipeline.

" + } + } + }, "CreateTelemetryRuleForOrganizationInput":{ "type":"structure", "required":[ @@ -779,6 +1183,24 @@ } } }, + "DataSource":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"String", + "documentation":"

The name of the data source. For CloudWatch Logs sources, this corresponds to the data_source_name from the log event metadata. For third-party sources, this is either the configured data_source_name or defaults to the plugin name if not specified.

" + }, + "Type":{ + "shape":"String", + "documentation":"

The type of the data source. For CloudWatch Logs sources, this corresponds to the data_source_type from the log event metadata. For third-party sources, this field is empty.

" + } + }, + "documentation":"

Information about a data source associated with the telemetry pipeline. For CloudWatch Logs sources, this includes both a name and type extracted from the log event metadata. For third-party sources (such as S3), this includes only a name, with the type field left empty.

" + }, + "DataSources":{ + "type":"list", + "member":{"shape":"DataSource"} + }, "DeleteCentralizationRuleForOrganizationInput":{ "type":"structure", "required":["RuleIdentifier"], @@ -789,6 +1211,30 @@ } } }, + "DeleteS3TableIntegrationInput":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table integration to delete.

" + } + } + }, + "DeleteTelemetryPipelineInput":{ + "type":"structure", + "required":["PipelineIdentifier"], + "members":{ + "PipelineIdentifier":{ + "shape":"TelemetryPipelineIdentifier", + "documentation":"

The ARN of the telemetry pipeline to delete.

" + } + } + }, + "DeleteTelemetryPipelineOutput":{ + "type":"structure", + "members":{} + }, "DeleteTelemetryRuleForOrganizationInput":{ "type":"structure", "required":["RuleIdentifier"], @@ -827,13 +1273,42 @@ "type":"string", "enum":["cloud-watch-logs"] }, - "EncryptedLogGroupStrategy":{ + "ELBLoadBalancerLoggingParameters":{ + "type":"structure", + "members":{ + "OutputFormat":{ + "shape":"OutputFormat", + "documentation":"

The format for ELB access log entries (plain text or JSON format).

" + }, + "FieldDelimiter":{ + "shape":"String", + "documentation":"

The delimiter character used to separate fields in ELB access log entries when using plain text format.

" + } + }, + "documentation":"

Configuration parameters for ELB load balancer logging, including output format and field delimiter settings.

" + }, + "EncryptedLogGroupStrategy":{ "type":"string", "enum":[ "ALLOW", "SKIP" ] }, + "Encryption":{ + "type":"structure", + "required":["SseAlgorithm"], + "members":{ + "SseAlgorithm":{ + "shape":"SSEAlgorithm", + "documentation":"

The server-side encryption algorithm used for encrypting data in the S3 Table integration.

" + }, + "KmsKeyArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used for encryption when using customer-managed keys.

" + } + }, + "documentation":"

Defines the encryption configuration for S3 Table integrations, including the encryption algorithm and KMS key settings.

" + }, "EncryptionConflictResolutionStrategy":{ "type":"string", "enum":[ @@ -849,6 +1324,77 @@ ] }, "FailureReason":{"type":"string"}, + "FieldMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "FieldSelectors":{ + "type":"list", + "member":{"shape":"AdvancedFieldSelector"} + }, + "FieldToMatch":{ + "type":"structure", + "members":{ + "SingleHeader":{ + "shape":"SingleHeader", + "documentation":"

Redacts a specific header field by name from WAF logs.

" + }, + "UriPath":{ + "shape":"String", + "documentation":"

Redacts the URI path from WAF logs.

" + }, + "QueryString":{ + "shape":"String", + "documentation":"

Redacts the entire query string from WAF logs.

" + }, + "Method":{ + "shape":"String", + "documentation":"

Redacts the HTTP method from WAF logs.

" + } + }, + "documentation":"

Specifies a field in the request to redact from WAF logs, such as headers, query parameters, or body content.

" + }, + "Filter":{ + "type":"structure", + "members":{ + "Behavior":{ + "shape":"FilterBehavior", + "documentation":"

The action to take for log records matching this filter (KEEP or DROP).

" + }, + "Requirement":{ + "shape":"FilterRequirement", + "documentation":"

Whether the log record must meet all conditions (MEETS_ALL) or any condition (MEETS_ANY) to match this filter.

" + }, + "Conditions":{ + "shape":"Conditions", + "documentation":"

The list of conditions that determine if a log record matches this filter.

" + } + }, + "documentation":"

A single filter condition that specifies behavior, requirement, and matching conditions for WAF log records.

" + }, + "FilterBehavior":{ + "type":"string", + "documentation":"

Enumeration of filter actions: KEEP to include log records, DROP to exclude them.

", + "enum":[ + "KEEP", + "DROP" + ] + }, + "FilterRequirement":{ + "type":"string", + "documentation":"

Enumeration of condition matching requirements: MEETS_ALL requires all conditions to match, MEETS_ANY requires at least one.

", + "enum":[ + "MEETS_ALL", + "MEETS_ANY" + ] + }, + "Filters":{ + "type":"list", + "member":{"shape":"Filter"}, + "documentation":"

A list of filter conditions, with at least one filter required.

", + "min":1 + }, "GetCentralizationRuleForOrganizationInput":{ "type":"structure", "required":["RuleIdentifier"], @@ -900,6 +1446,45 @@ } } }, + "GetS3TableIntegrationInput":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table integration to retrieve.

" + } + } + }, + "GetS3TableIntegrationOutput":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table integration.

" + }, + "RoleArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role used by the S3 Table integration.

" + }, + "Status":{ + "shape":"IntegrationStatus", + "documentation":"

The current status of the S3 Table integration.

" + }, + "Encryption":{ + "shape":"Encryption", + "documentation":"

The encryption configuration for the S3 Table integration.

" + }, + "DestinationTableBucketArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 bucket used as the destination for the table data.

" + }, + "CreatedTimeStamp":{ + "shape":"Long", + "documentation":"

The timestamp when the S3 Table integration was created.

" + } + } + }, "GetTelemetryEnrichmentStatusOutput":{ "type":"structure", "members":{ @@ -909,7 +1494,7 @@ }, "AwsResourceExplorerManagedViewArn":{ "shape":"AwsResourceExplorerManagedViewArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Resource Explorer managed view used for resource tags for telemetry, if the feature is enabled.

" + "documentation":"

The Amazon Resource Name (ARN) of the Resource Explorer managed view used for resource tags for telemetry, if the feature is enabled.

" } } }, @@ -939,6 +1524,25 @@ } } }, + "GetTelemetryPipelineInput":{ + "type":"structure", + "required":["PipelineIdentifier"], + "members":{ + "PipelineIdentifier":{ + "shape":"TelemetryPipelineIdentifier", + "documentation":"

The identifier (name or ARN) of the telemetry pipeline to retrieve.

" + } + } + }, + "GetTelemetryPipelineOutput":{ + "type":"structure", + "members":{ + "Pipeline":{ + "shape":"TelemetryPipeline", + "documentation":"

The complete telemetry pipeline resource information, including configuration, status, and metadata.

" + } + } + }, "GetTelemetryRuleForOrganizationInput":{ "type":"structure", "required":["RuleIdentifier"], @@ -1013,6 +1617,31 @@ "type":"integer", "box":true }, + "IntegrationStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "DELETING" + ] + }, + "IntegrationSummaries":{ + "type":"list", + "member":{"shape":"IntegrationSummary"} + }, + "IntegrationSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the S3 Table integration.

" + }, + "Status":{ + "shape":"IntegrationStatus", + "documentation":"

The current status of the S3 Table integration.

" + } + }, + "documentation":"

Contains summary information about an S3 Table integration for listing operations.

" + }, "InternalServerException":{ "type":"structure", "members":{ @@ -1022,6 +1651,12 @@ "documentation":"

The name of the exception.

", "location":"header", "locationName":"x-amzn-ErrorType" + }, + "retryAfterSeconds":{ + "shape":"Integer", + "documentation":"

The number of seconds to wait before retrying the request.

", + "location":"header", + "locationName":"Retry-After" } }, "documentation":"

Indicates the request has failed to process because of an unknown server error, exception, or failure.

", @@ -1029,6 +1664,34 @@ "exception":true, "fault":true }, + "InvalidStateException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The requested operation cannot be completed on the specified resource in the current state.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "LabelNameCondition":{ + "type":"structure", + "members":{ + "LabelName":{ + "shape":"LabelNameConditionLabelNameString", + "documentation":"

The label name to match, supporting alphanumeric characters, underscores, hyphens, and colons.

" + } + }, + "documentation":"

Condition that matches based on WAF rule labels, with label names limited to 1024 characters.

" + }, + "LabelNameConditionLabelNameString":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"[0-9A-Za-z_\\-:]+" + }, "ListCentralizationRulesForOrganizationInput":{ "type":"structure", "members":{ @@ -1174,6 +1837,38 @@ } } }, + "ListS3TableIntegrationsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListS3TableIntegrationsMaxResults", + "documentation":"

The maximum number of S3 Table integrations to return in a single call.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. A previous call generates this token.

" + } + } + }, + "ListS3TableIntegrationsMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListS3TableIntegrationsOutput":{ + "type":"structure", + "members":{ + "IntegrationSummaries":{ + "shape":"IntegrationSummaries", + "documentation":"

A list of S3 Table integration summaries containing key information about each integration.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of results.

" + } + } + }, "ListTagsForResourceInput":{ "type":"structure", "required":["ResourceARN"], @@ -1194,6 +1889,38 @@ } } }, + "ListTelemetryPipelinesInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"ListTelemetryPipelinesMaxResults", + "documentation":"

The maximum number of telemetry pipelines to return in a single call.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. A previous call generates this token.

" + } + } + }, + "ListTelemetryPipelinesMaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "ListTelemetryPipelinesOutput":{ + "type":"structure", + "members":{ + "PipelineSummaries":{ + "shape":"TelemetryPipelineSummaries", + "documentation":"

A list of telemetry pipeline summaries containing key information about each pipeline.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token to resume pagination of results.

" + } + } + }, "ListTelemetryRulesForOrganizationInput":{ "type":"structure", "members":{ @@ -1274,6 +2001,41 @@ } } }, + "LogDeliveryParameters":{ + "type":"structure", + "members":{ + "LogTypes":{ + "shape":"LogTypes", + "documentation":"

The type of log that the source is sending.

" + } + }, + "documentation":"

Configuration parameters for Amazon Bedrock AgentCore logging, including logType settings.

" + }, + "LogType":{ + "type":"string", + "enum":[ + "APPLICATION_LOGS", + "USAGE_LOGS" + ] + }, + "LogTypes":{ + "type":"list", + "member":{"shape":"LogType"} + }, + "LoggingFilter":{ + "type":"structure", + "members":{ + "Filters":{ + "shape":"Filters", + "documentation":"

A list of filter conditions that determine log record handling behavior.

" + }, + "DefaultBehavior":{ + "shape":"FilterBehavior", + "documentation":"

The default action (KEEP or DROP) for log records that don't match any filter conditions.

" + } + }, + "documentation":"

Configuration that determines which WAF log records to keep or drop based on specified conditions.

" + }, "LogsBackupConfiguration":{ "type":"structure", "required":["Region"], @@ -1284,7 +2046,7 @@ }, "KmsKeyArn":{ "shape":"ResourceArn", - "documentation":"

KMS Key arn belonging to the primary destination account and backup region, to encrypt newly created central log groups in the backup destination.

" + "documentation":"

KMS Key ARN belonging to the primary destination account and backup region, to encrypt newly created central log groups in the backup destination.

" } }, "documentation":"

Configuration for backing up centralized log data to a secondary region.

" @@ -1299,7 +2061,7 @@ }, "KmsKeyArn":{ "shape":"ResourceArn", - "documentation":"

KMS Key arn belonging to the primary destination account and region, to encrypt newly created central log groups in the primary destination.

" + "documentation":"

KMS Key ARN belonging to the primary destination account and region, to encrypt newly created central log groups in the primary destination.

" }, "EncryptionConflictResolutionStrategy":{ "shape":"EncryptionConflictResolutionStrategy", @@ -1327,6 +2089,78 @@ "member":{"shape":"OrganizationUnitIdentifier"}, "min":1 }, + "OutputFormat":{ + "type":"string", + "documentation":"

Enumeration of supported output formats for ELB access logs: PLAIN for space-delimited format, JSON for structured JSON format.

", + "enum":[ + "plain", + "json" + ] + }, + "PipelineOutput":{ + "type":"structure", + "members":{ + "Record":{ + "shape":"Record", + "documentation":"

The processed record output from the pipeline test operation.

" + }, + "Error":{ + "shape":"PipelineOutputError", + "documentation":"

Any error that occurred during the pipeline test operation for this record.

" + } + }, + "documentation":"

Contains the output from pipeline test operations, including processed records and any errors encountered.

" + }, + "PipelineOutputError":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The detailed error message describing what went wrong during the pipeline test operation for this record.

" + } + }, + "documentation":"

Contains detailed error information from pipeline test operations, providing structured error responses for better debugging and troubleshooting capabilities.

" + }, + "PipelineOutputs":{ + "type":"list", + "member":{"shape":"PipelineOutput"} + }, + "Processors":{ + "type":"list", + "member":{"shape":"String"} + }, + "Record":{ + "type":"structure", + "members":{ + "Data":{ + "shape":"String", + "documentation":"

The data content of the test record used for pipeline validation.

" + }, + "Type":{ + "shape":"RecordFormat", + "documentation":"

The type of the test record, indicating the format or category of the data.

" + } + }, + "documentation":"

Represents a test record structure used for pipeline testing operations to validate data processing.

" + }, + "RecordFormat":{ + "type":"string", + "enum":[ + "STRING", + "JSON" + ] + }, + "Records":{ + "type":"list", + "member":{"shape":"Record"} + }, + "RedactedFields":{ + "type":"list", + "member":{"shape":"FieldToMatch"}, + "documentation":"

A list of fields to redact from WAF logs to protect sensitive information, with a maximum of 100 fields.

", + "max":100, + "min":0 + }, "Region":{ "type":"string", "min":1 @@ -1351,7 +2185,15 @@ "ResourceNotFoundException":{ "type":"structure", "members":{ - "Message":{"shape":"String"} + "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

The identifier of the resource which could not be found.

" + }, + "ResourceType":{ + "shape":"String", + "documentation":"

The type of the resource which could not be found.

" + } }, "documentation":"

The specified resource (such as a telemetry rule) could not be found.

", "error":{ @@ -1365,7 +2207,15 @@ "enum":[ "AWS::EC2::Instance", "AWS::EC2::VPC", - "AWS::Lambda::Function" + "AWS::Lambda::Function", + "AWS::CloudTrail", + "AWS::EKS::Cluster", + "AWS::WAFv2::WebACL", + "AWS::ElasticLoadBalancingV2::LoadBalancer", + "AWS::Route53Resolver::ResolverEndpoint", + "AWS::BedrockAgentCore::Runtime", + "AWS::BedrockAgentCore::Browser", + "AWS::BedrockAgentCore::CodeInterpreter" ] }, "ResourceTypes":{ @@ -1399,10 +2249,33 @@ "min":1, "pattern":"[0-9A-Za-z-_.#/]+" }, + "SSEAlgorithm":{ + "type":"string", + "enum":[ + "aws:kms", + "AES256" + ] + }, "ServiceQuotaExceededException":{ "type":"structure", "members":{ "Message":{"shape":"String"}, + "ResourceId":{ + "shape":"String", + "documentation":"

The identifier of the resource which exceeds the service quota.

" + }, + "ResourceType":{ + "shape":"String", + "documentation":"

The type of the resource which exceeds the service quota.

" + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

The code for the service of the exceeded quota.

" + }, + "QuotaCode":{ + "shape":"String", + "documentation":"

The code for the exceeded service quota.

" + }, "amznErrorType":{ "shape":"String", "documentation":"

The name of the exception.

", @@ -1417,6 +2290,35 @@ }, "exception":true }, + "SingleHeader":{ + "type":"structure", + "members":{ + "Name":{ + "shape":"SingleHeaderNameString", + "documentation":"

The name value, limited to 64 characters.

" + } + }, + "documentation":"

Structure containing a name field limited to 64 characters for header or query parameter identification.

" + }, + "SingleHeaderNameString":{ + "type":"string", + "max":64, + "min":1 + }, + "Sinks":{ + "type":"list", + "member":{"shape":"String"} + }, + "Source":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

The plugin name of the source, such as cloudwatch_logs or s3.

" + } + }, + "documentation":"

A list of source plugin types used in the pipeline configuration (such as cloudwatch_logs or s3). Currently supports a single source per pipeline, but is structured as a list to accommodate multiple pipelines in the configuration.

" + }, "SourceFilterString":{ "type":"string", "max":2000, @@ -1440,6 +2342,10 @@ }, "documentation":"

Configuration for selecting and handling source log groups for centralization.

" }, + "Sources":{ + "type":"list", + "member":{"shape":"Source"} + }, "StartTelemetryEnrichmentOutput":{ "type":"structure", "members":{ @@ -1449,7 +2355,7 @@ }, "AwsResourceExplorerManagedViewArn":{ "shape":"AwsResourceExplorerManagedViewArn", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Resource Explorer managed view created for resource tags for telemetry.

" + "documentation":"

The Amazon Resource Name (ARN) of the Resource Explorer managed view created for resource tags for telemetry.

" } } }, @@ -1475,6 +2381,10 @@ } }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "TagKey":{ "type":"string", "max":128, @@ -1535,11 +2445,11 @@ }, "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of resource, for example Amazon Web Services::EC2::Instance.

" + "documentation":"

The type of resource, for example Amazon Web Services::EC2::Instance, or Amazon Web Services::EKS::Cluster, etc.

" }, "ResourceIdentifier":{ "shape":"ResourceIdentifier", - "documentation":"

The identifier of the resource, for example i-0b22a22eec53b9321.

" + "documentation":"

The identifier of the resource, for example for Amazon VPC, it would be vpc-1a2b3c4d5e6f1a2b3.

" }, "ResourceTags":{ "shape":"TagMapOutput", @@ -1579,6 +2489,22 @@ "VPCFlowLogParameters":{ "shape":"VPCFlowLogParameters", "documentation":"

Configuration parameters specific to VPC Flow Logs when VPC is the resource type.

" + }, + "CloudtrailParameters":{ + "shape":"CloudtrailParameters", + "documentation":"

Configuration parameters specific to Amazon Web Services CloudTrail when CloudTrail is the source type.

" + }, + "ELBLoadBalancerLoggingParameters":{ + "shape":"ELBLoadBalancerLoggingParameters", + "documentation":"

Configuration parameters specific to ELB load balancer logging when ELB is the resource type.

" + }, + "WAFLoggingParameters":{ + "shape":"WAFLoggingParameters", + "documentation":"

Configuration parameters specific to WAF logging when WAF is the resource type.

" + }, + "LogDeliveryParameters":{ + "shape":"LogDeliveryParameters", + "documentation":"

Configuration parameters specific to Amazon Bedrock AgentCore logging when Amazon Bedrock AgentCore is the resource type.

" } }, "documentation":"

Configuration specifying where and how telemetry data should be delivered for Amazon Web Services resources.

" @@ -1591,18 +2517,146 @@ "Impaired" ] }, + "TelemetryPipeline":{ + "type":"structure", + "members":{ + "CreatedTimeStamp":{ + "shape":"Long", + "documentation":"

The timestamp when the telemetry pipeline was created.

" + }, + "LastUpdateTimeStamp":{ + "shape":"Long", + "documentation":"

The timestamp when the telemetry pipeline was last updated.

" + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the telemetry pipeline.

" + }, + "Name":{ + "shape":"TelemetryPipelineName", + "documentation":"

The name of the telemetry pipeline.

" + }, + "Configuration":{ + "shape":"TelemetryPipelineConfiguration", + "documentation":"

The configuration that defines how the telemetry pipeline processes data.

" + }, + "Status":{ + "shape":"TelemetryPipelineStatus", + "documentation":"

The current status of the telemetry pipeline.

" + }, + "StatusReason":{ + "shape":"TelemetryPipelineStatusReason", + "documentation":"

Additional information about the pipeline status, including reasons for failure states.

" + }, + "Tags":{ + "shape":"TagMapOutput", + "documentation":"

The key-value pairs associated with the telemetry pipeline resource.

" + } + }, + "documentation":"

Represents a complete telemetry pipeline resource with configuration, status, and metadata for data processing and transformation.

" + }, + "TelemetryPipelineConfiguration":{ + "type":"structure", + "required":["Body"], + "members":{ + "Body":{ + "shape":"TelemetryPipelineConfigurationBody", + "documentation":"

The pipeline configuration body that defines the data processing rules and transformations.

" + } + }, + "documentation":"

Defines the configuration for a telemetry pipeline, including how data flows from sources through processors to destinations.

" + }, + "TelemetryPipelineConfigurationBody":{ + "type":"string", + "max":24000, + "min":1 + }, + "TelemetryPipelineIdentifier":{ + "type":"string", + "max":512, + "min":1 + }, + "TelemetryPipelineName":{ + "type":"string", + "max":28, + "min":3, + "pattern":".*[a-z][a-z0-9\\-]+.*" + }, + "TelemetryPipelineStatus":{ + "type":"string", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATE_FAILED", + "UPDATE_FAILED" + ] + }, + "TelemetryPipelineStatusReason":{ + "type":"structure", + "members":{ + "Description":{ + "shape":"String", + "documentation":"

A description of the pipeline status reason, providing additional context about the current state.

" + } + }, + "documentation":"

Provides detailed information about the status of a telemetry pipeline, including reasons for specific states.

" + }, + "TelemetryPipelineSummaries":{ + "type":"list", + "member":{"shape":"TelemetryPipelineSummary"} + }, + "TelemetryPipelineSummary":{ + "type":"structure", + "members":{ + "CreatedTimeStamp":{ + "shape":"Long", + "documentation":"

The timestamp when the telemetry pipeline was created.

" + }, + "LastUpdateTimeStamp":{ + "shape":"Long", + "documentation":"

The timestamp when the telemetry pipeline was last updated.

" + }, + "Arn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the telemetry pipeline.

" + }, + "Name":{ + "shape":"TelemetryPipelineName", + "documentation":"

The name of the telemetry pipeline.

" + }, + "Status":{ + "shape":"TelemetryPipelineStatus", + "documentation":"

The current status of the telemetry pipeline.

" + }, + "Tags":{ + "shape":"TagMapOutput", + "documentation":"

The key-value pairs associated with the telemetry pipeline resource.

" + }, + "ConfigurationSummary":{ + "shape":"ConfigurationSummary", + "documentation":"

A summary of the pipeline configuration components.

" + } + }, + "documentation":"

Contains summary information about a telemetry pipeline for listing operations.

" + }, "TelemetryRule":{ "type":"structure", "required":["TelemetryType"], "members":{ "ResourceType":{ "shape":"ResourceType", - "documentation":"

The type of Amazon Web Services resource to configure telemetry for (e.g., \"AWS::EC2::VPC\").

" + "documentation":"

The type of Amazon Web Services resource to configure telemetry for (e.g., \"AWS::EC2::VPC\", \"AWS::EKS::Cluster\", \"AWS::WAFv2::WebACL\").

" }, "TelemetryType":{ "shape":"TelemetryType", "documentation":"

The type of telemetry to collect (Logs, Metrics, or Traces).

" }, + "TelemetrySourceTypes":{ + "shape":"TelemetrySourceTypes", + "documentation":"

The specific telemetry source types to configure for the resource, such as VPC_FLOW_LOGS or EKS_AUDIT_LOGS. TelemetrySourceTypes must be correlated with the specific resource type.

" + }, "DestinationConfiguration":{ "shape":"TelemetryDestinationConfiguration", "documentation":"

Configuration specifying where and how the telemetry data should be delivered.

" @@ -1648,10 +2702,32 @@ "TelemetryType":{ "shape":"TelemetryType", "documentation":"

The type of telemetry (Logs, Metrics, or Traces) the rule configures.

" + }, + "TelemetrySourceTypes":{ + "shape":"TelemetrySourceTypes", + "documentation":"

The types of telemetry sources configured for this rule, such as VPC Flow Logs or EKS audit logs. TelemetrySourceTypes must be correlated with the specific resource type.

" } }, "documentation":"

A summary of a telemetry rule's key properties.

" }, + "TelemetrySourceType":{ + "type":"string", + "documentation":"

Specifies the type of telemetry source for a resource, such as EKS cluster logs.

", + "enum":[ + "VPC_FLOW_LOGS", + "ROUTE53_RESOLVER_QUERY_LOGS", + "EKS_AUDIT_LOGS", + "EKS_AUTHENTICATOR_LOGS", + "EKS_CONTROLLER_MANAGER_LOGS", + "EKS_SCHEDULER_LOGS", + "EKS_API_LOGS" + ] + }, + "TelemetrySourceTypes":{ + "type":"list", + "member":{"shape":"TelemetrySourceType"}, + "documentation":"

A list of telemetry source types that specify what kind of telemetry data to collect from a resource.

" + }, "TelemetryState":{ "type":"string", "enum":[ @@ -1668,6 +2744,32 @@ "Traces" ] }, + "TestTelemetryPipelineInput":{ + "type":"structure", + "required":[ + "Records", + "Configuration" + ], + "members":{ + "Records":{ + "shape":"Records", + "documentation":"

The sample records to process through the pipeline configuration for testing purposes.

" + }, + "Configuration":{ + "shape":"TelemetryPipelineConfiguration", + "documentation":"

The pipeline configuration to test with the provided sample records.

" + } + } + }, + "TestTelemetryPipelineOutput":{ + "type":"structure", + "members":{ + "Results":{ + "shape":"PipelineOutputs", + "documentation":"

The results of processing the test records through the pipeline configuration, including any outputs or errors.

" + } + } + }, "TooManyRequestsException":{ "type":"structure", "members":{ @@ -1723,6 +2825,27 @@ } } }, + "UpdateTelemetryPipelineInput":{ + "type":"structure", + "required":[ + "PipelineIdentifier", + "Configuration" + ], + "members":{ + "PipelineIdentifier":{ + "shape":"TelemetryPipelineIdentifier", + "documentation":"

The ARN of the telemetry pipeline to update.

" + }, + "Configuration":{ + "shape":"TelemetryPipelineConfiguration", + "documentation":"

The new configuration for the telemetry pipeline, including updated sources, processors, and destinations.

" + } + } + }, + "UpdateTelemetryPipelineOutput":{ + "type":"structure", + "members":{} + }, "UpdateTelemetryRuleForOrganizationInput":{ "type":"structure", "required":[ @@ -1793,10 +2916,55 @@ }, "documentation":"

Configuration parameters specific to VPC Flow Logs.

" }, + "ValidateTelemetryPipelineConfigurationInput":{ + "type":"structure", + "required":["Configuration"], + "members":{ + "Configuration":{ + "shape":"TelemetryPipelineConfiguration", + "documentation":"

The pipeline configuration to validate for syntax and compatibility.

" + } + } + }, + "ValidateTelemetryPipelineConfigurationOutput":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"ValidationErrors", + "documentation":"

A list of validation errors found in the pipeline configuration, if any.

" + } + } + }, + "ValidationError":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

The error message describing the validation issue.

" + }, + "Reason":{ + "shape":"String", + "documentation":"

The reason code or category for the validation error.

" + }, + "FieldMap":{ + "shape":"FieldMap", + "documentation":"

A mapping of field names to specific validation issues within the configuration.

" + } + }, + "documentation":"

Represents a detailed validation error with message, reason, and field mapping for comprehensive error reporting.

" + }, + "ValidationErrors":{ + "type":"list", + "member":{"shape":"ValidationError"} + }, "ValidationException":{ "type":"structure", "members":{ - "Message":{"shape":"String"} + "Message":{"shape":"String"}, + "Errors":{ + "shape":"ValidationErrors", + "documentation":"

The errors in the input which caused the exception.

" + } }, "documentation":"

Indicates input validation failed. Check your request parameters and retry the request.

", "error":{ @@ -1804,6 +2972,29 @@ "senderFault":true }, "exception":true + }, + "WAFLogType":{ + "type":"string", + "documentation":"

Enumeration of supported WAF log types. Currently only WAF_LOGS is supported.

", + "enum":["WAF_LOGS"] + }, + "WAFLoggingParameters":{ + "type":"structure", + "members":{ + "RedactedFields":{ + "shape":"RedactedFields", + "documentation":"

The fields to redact from WAF logs to protect sensitive information.

" + }, + "LoggingFilter":{ + "shape":"LoggingFilter", + "documentation":"

A filter configuration that determines which WAF log records to include or exclude.

" + }, + "LogType":{ + "shape":"WAFLogType", + "documentation":"

The type of WAF logs to collect (currently supports WAF_LOGS).

" + } + }, + "documentation":"

Configuration parameters for WAF logging, including redacted fields and logging filters.

" } }, "documentation":"

You can use Amazon CloudWatch Observability Admin to discover and understand the state of telemetry configuration in CloudWatch for your Amazon Web Services Organization or account. This simplifies the process of auditing your telemetry collection configurations across multiple resource types within your Amazon Web Services Organization or account. By providing a consolidated view, it allows you to easily review and manage telemetry settings, helping you ensure proper monitoring and data collection across your Amazon Web Services environment. For more information, see Auditing CloudWatch telemetry configurations in the CloudWatch User Guide.

For information on the permissions you need to use this API, see Identity and access management for Amazon CloudWatch in the CloudWatch User Guide.

" diff --git a/awscli/botocore/data/odb/2024-08-20/service-2.json b/awscli/botocore/data/odb/2024-08-20/service-2.json index 407cc70384c4..efa508421cc7 100644 --- a/awscli/botocore/data/odb/2024-08-20/service-2.json +++ b/awscli/botocore/data/odb/2024-08-20/service-2.json @@ -819,15 +819,15 @@ "members":{ "iamRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role to associate with the resource.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) service role to associate with the resource.

" }, "awsIntegration":{ "shape":"SupportedAwsIntegration", - "documentation":"

The Amazon Web Services integration configuration settings for the IAM service role association.

" + "documentation":"

The Amazon Web Services integration configuration settings for the Amazon Web Services Identity and Access Management (IAM) service role association.

" }, "resourceArn":{ "shape":"AssociateIamRoleToResourceInputResourceArnString", - "documentation":"

The Amazon Resource Name (ARN) of the target resource to associate with the IAM service role.

" + "documentation":"

The Amazon Resource Name (ARN) of the target resource to associate with the Amazon Web Services Identity and Access Management (IAM) service role.

" } } }, @@ -923,6 +923,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the ODB network associated with this Autonomous VM cluster.

" }, + "odbNetworkArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the ODB network associated with this Autonomous VM cluster.

" + }, "ociResourceAnchorName":{ "shape":"String", "documentation":"

The name of the OCI resource anchor associated with this Autonomous VM cluster.

" @@ -947,6 +951,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the Cloud Exadata Infrastructure containing this Autonomous VM cluster.

" }, + "cloudExadataInfrastructureArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Cloud Exadata Infrastructure containing this Autonomous VM cluster.

" + }, "autonomousDataStoragePercentage":{ "shape":"Float", "documentation":"

The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster.

" @@ -1148,6 +1156,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the ODB network associated with this Autonomous VM cluster.

" }, + "odbNetworkArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the ODB network associated with this Autonomous VM cluster.

" + }, "ociResourceAnchorName":{ "shape":"String", "documentation":"

The name of the OCI resource anchor associated with this Autonomous VM cluster.

" @@ -1172,6 +1184,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the Exadata infrastructure containing this Autonomous VM cluster.

" }, + "cloudExadataInfrastructureArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Exadata infrastructure containing this Autonomous VM cluster.

" + }, "autonomousDataStoragePercentage":{ "shape":"Float", "documentation":"

The percentage of data storage currently in use for Autonomous Databases in the Autonomous VM cluster.

" @@ -1719,6 +1735,10 @@ "shape":"String", "documentation":"

The unique identifier of the Exadata infrastructure that this VM cluster belongs to.

" }, + "cloudExadataInfrastructureArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Exadata infrastructure that this VM cluster belongs to.

" + }, "clusterName":{ "shape":"String", "documentation":"

The name of the Grid Infrastructure (GI) cluster.

" @@ -1847,6 +1867,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the ODB network for the VM cluster.

" }, + "odbNetworkArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the ODB network associated with this VM cluster.

" + }, "percentProgress":{ "shape":"Float", "documentation":"

The amount of progress made on the current operation on the VM cluster, expressed as a percentage.

" @@ -1894,6 +1918,10 @@ "shape":"String", "documentation":"

The unique identifier of the Exadata infrastructure that this VM cluster belongs to.

" }, + "cloudExadataInfrastructureArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the Exadata infrastructure that this VM cluster belongs to.

" + }, "clusterName":{ "shape":"String", "documentation":"

The name of the Grid Infrastructure (GI) cluster.

" @@ -2016,6 +2044,10 @@ "shape":"ResourceIdOrArn", "documentation":"

The unique identifier of the ODB network for the VM cluster.

" }, + "odbNetworkArn":{ + "shape":"ResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the ODB network associated with this VM cluster.

" + }, "percentProgress":{ "shape":"Float", "documentation":"

The amount of progress made on the current operation on the VM cluster, expressed as a percentage.

" @@ -2556,11 +2588,11 @@ }, "stsPolicyDocument":{ "shape":"PolicyDocument", - "documentation":"

The STS policy document that defines permissions for token service usage within the ODB network.

" + "documentation":"

The Amazon Web Services Security Token Service (STS) policy document that defines permissions for token service usage within the ODB network.

" }, "kmsPolicyDocument":{ "shape":"PolicyDocument", - "documentation":"

The KMS policy document that defines permissions for key usage within the ODB network.

" + "documentation":"

The Amazon Web Services Key Management Service (KMS) policy document that defines permissions for key usage within the ODB network.

" }, "crossRegionS3RestoreSourcesToEnable":{ "shape":"StringList", @@ -2698,15 +2730,15 @@ "members":{ "region":{ "shape":"String", - "documentation":"

The Amazon Web Services Region for cross-Region S3 restore access.

" + "documentation":"

The Amazon Web Services Region for cross-Region Amazon S3 restore access.

" }, "ipv4Addresses":{ "shape":"StringList", - "documentation":"

The IPv4 addresses allowed for cross-Region S3 restore access.

" + "documentation":"

The IPv4 addresses allowed for cross-Region Amazon S3 restore access.

" }, "status":{ "shape":"ManagedResourceStatus", - "documentation":"

The current status of the cross-Region S3 restore access configuration.

" + "documentation":"

The current status of the cross-Region Amazon S3 restore access configuration.

" } }, "documentation":"

The configuration access for the cross-Region Amazon S3 database restore source for the ODB network.

" @@ -3448,15 +3480,15 @@ "members":{ "iamRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role to disassociate from the resource.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) service role to disassociate from the resource.

" }, "awsIntegration":{ "shape":"SupportedAwsIntegration", - "documentation":"

The Amazon Web Services integration configuration settings for the IAM service role disassociation.

" + "documentation":"

The Amazon Web Services integration configuration settings for the Amazon Web Services Identity and Access Management (IAM) service role disassociation.

" }, "resourceArn":{ "shape":"DisassociateIamRoleFromResourceInputResourceArnString", - "documentation":"

The Amazon Resource Name (ARN) of the target resource to disassociate from the IAM service role.

" + "documentation":"

The Amazon Resource Name (ARN) of the target resource to disassociate from the Amazon Web Services Identity and Access Management (IAM) service role.

" } } }, @@ -3716,19 +3748,19 @@ "members":{ "iamRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of the IAM service role.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Identity and Access Management (IAM) service role.

" }, "status":{ "shape":"IamRoleStatus", - "documentation":"

The current status of the IAM service role.

" + "documentation":"

The current status of the Amazon Web Services Identity and Access Management (IAM) service role.

" }, "statusReason":{ "shape":"String", - "documentation":"

Additional information about the current status of the IAM service role, if applicable.

" + "documentation":"

Additional information about the current status of the Amazon Web Services Identity and Access Management (IAM) service role, if applicable.

" }, "awsIntegration":{ "shape":"SupportedAwsIntegration", - "documentation":"

The Amazon Web Services integration configuration settings for the IAM service role.

" + "documentation":"

The Amazon Web Services integration configuration settings for the Amazon Web Services Identity and Access Management (IAM) service role.

" } }, "documentation":"

Information about an Amazon Web Services Identity and Access Management (IAM) service role associated with a resource.

" @@ -3796,19 +3828,19 @@ "members":{ "status":{ "shape":"ManagedResourceStatus", - "documentation":"

The current status of the KMS access configuration.

" + "documentation":"

The current status of the Amazon Web Services Key Management Service (KMS) access configuration.

" }, "ipv4Addresses":{ "shape":"StringList", - "documentation":"

The IPv4 addresses allowed for KMS access.

" + "documentation":"

The IPv4 addresses allowed for Amazon Web Services Key Management Service (KMS) access.

" }, "domainName":{ "shape":"String", - "documentation":"

The domain name for KMS access configuration.

" + "documentation":"

The domain name for Amazon Web Services Key Management Service (KMS) access configuration.

" }, "kmsPolicyDocument":{ "shape":"String", - "documentation":"

The KMS policy document that defines permissions for key usage.

" + "documentation":"

The Amazon Web Services Key Management Service (KMS) policy document that defines permissions for key usage.

" } }, "documentation":"

Configuration for Amazon Web Services Key Management Service (KMS) access from the ODB network.

" @@ -4447,11 +4479,11 @@ }, "stsAccess":{ "shape":"StsAccess", - "documentation":"

The Amazon Web Services Security Token Service (STS) access configuration for managed services.

" + "documentation":"

The Amazon Web Services Security Token Service (STS) access configuration.

" }, "kmsAccess":{ "shape":"KmsAccess", - "documentation":"

The Amazon Web Services Key Management Service (KMS) access configuration for managed services.

" + "documentation":"

The Amazon Web Services Key Management Service (KMS) access configuration.

" }, "crossRegionS3RestoreSourcesAccess":{ "shape":"CrossRegionS3RestoreSourcesAccessList", @@ -5246,19 +5278,19 @@ "members":{ "status":{ "shape":"ManagedResourceStatus", - "documentation":"

The current status of the STS access configuration.

" + "documentation":"

The current status of the Amazon Web Services Security Token Service (STS) access configuration.

" }, "ipv4Addresses":{ "shape":"StringList", - "documentation":"

The IPv4 addresses allowed for STS access.

" + "documentation":"

The IPv4 addresses allowed for Amazon Web Services Security Token Service (STS) access.

" }, "domainName":{ "shape":"String", - "documentation":"

The domain name for STS access configuration.

" + "documentation":"

The domain name for Amazon Web Services Security Token Service (STS) access configuration.

" }, "stsPolicyDocument":{ "shape":"String", - "documentation":"

The STS policy document that defines permissions for token service usage.

" + "documentation":"

The Amazon Web Services Security Token Service (STS) policy document that defines permissions for token service usage.

" } }, "documentation":"

Configuration for Amazon Web Services Security Token Service (STS) access from the ODB network.

" @@ -5439,11 +5471,11 @@ }, "stsPolicyDocument":{ "shape":"PolicyDocument", - "documentation":"

The STS policy document that defines permissions for token service usage within the ODB network.

" + "documentation":"

The Amazon Web Services Security Token Service (STS) policy document that defines permissions for token service usage within the ODB network.

" }, "kmsPolicyDocument":{ "shape":"PolicyDocument", - "documentation":"

The KMS policy document that defines permissions for key usage within the ODB network.

" + "documentation":"

The Amazon Web Services Key Management Service (KMS) policy document that defines permissions for key usage within the ODB network.

" }, "crossRegionS3RestoreSourcesToEnable":{ "shape":"StringList", diff --git a/awscli/botocore/data/opensearch/2021-01-01/service-2.json b/awscli/botocore/data/opensearch/2021-01-01/service-2.json index 848951ccc448..472b2cb30c88 100644 --- a/awscli/botocore/data/opensearch/2021-01-01/service-2.json +++ b/awscli/botocore/data/opensearch/2021-01-01/service-2.json @@ -1419,6 +1419,10 @@ "S3VectorsEngine":{ "shape":"S3VectorsEngine", "documentation":"

Container for parameters required to enable S3 vectors engine features on the specified domain.

" + }, + "ServerlessVectorAcceleration":{ + "shape":"ServerlessVectorAcceleration", + "documentation":"

Specifies whether to enable serverless vector acceleration for the domain. When enabled, provides GPU-accelerated vector search capabilities for improved performance on vector workloads.

" } }, "documentation":"

Container for parameters required to enable all machine learning features.

" @@ -1433,6 +1437,10 @@ "S3VectorsEngine":{ "shape":"S3VectorsEngine", "documentation":"

Container for parameters representing the state of S3 vectors engine features on the specified domain.

" + }, + "ServerlessVectorAcceleration":{ + "shape":"ServerlessVectorAcceleration", + "documentation":"

The current serverless vector acceleration configuration for the domain.

" } }, "documentation":"

Container for parameters representing the state of machine learning features on the specified domain.

" @@ -2665,7 +2673,11 @@ "shape":"AppConfigs", "documentation":"

Configuration settings for the OpenSearch application, including administrative options.

" }, - "tagList":{"shape":"TagList"} + "tagList":{"shape":"TagList"}, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the application's data at rest. If provided, the application uses your customer-managed key for encryption. If omitted, the application uses an AWS-managed key. The KMS key must be in the same region as the application.

" + } } }, "CreateApplicationResponse":{ @@ -2696,6 +2708,10 @@ "createdAt":{ "shape":"Timestamp", "documentation":"

The timestamp indicating when the OpenSearch application was created.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the application's data at rest.

" } } }, @@ -4772,6 +4788,10 @@ "lastUpdatedAt":{ "shape":"Timestamp", "documentation":"

The timestamp of the last update to the OpenSearch application.

" + }, + "kmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The Amazon Resource Name (ARN) of the KMS key used to encrypt the application's data at rest.

" } } }, @@ -5536,6 +5556,12 @@ }, "documentation":"

The configuration parameters to enable access to the key store required by the package.

" }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws[a-zA-Z-]*:kms:[a-z0-9-]+:[0-9]{12}:key/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, "KmsKeyId":{ "type":"string", "max":500, @@ -6325,7 +6351,8 @@ "enum":[ "Data", "Ultrawarm", - "Master" + "Master", + "Warm" ] }, "NonEmptyString":{ @@ -7520,6 +7547,16 @@ }, "documentation":"

Configuration details for a Security Lake data source that can be used for direct queries.

" }, + "ServerlessVectorAcceleration":{ + "type":"structure", + "members":{ + "Enabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether serverless vector acceleration is enabled for the domain.

" + } + }, + "documentation":"

Configuration for serverless vector acceleration, which provides GPU-accelerated vector search capabilities for improved performance on vector workloads.

" + }, "ServiceSoftwareOptions":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/opensearchserverless/2021-11-01/service-2.json b/awscli/botocore/data/opensearchserverless/2021-11-01/service-2.json index c76c5c9bd43f..740c91d4397e 100644 --- a/awscli/botocore/data/opensearchserverless/2021-11-01/service-2.json +++ b/awscli/botocore/data/opensearchserverless/2021-11-01/service-2.json @@ -27,7 +27,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns attributes for one or more collections, including the collection endpoint, the OpenSearch Dashboards endpoint, and FIPS-compliant endpoints. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

" + "documentation":"

Returns attributes for one or more collections, including the collection endpoint, the OpenSearch Dashboards endpoint, and FIPS-compliant endpoints. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

", + "readonly":true }, "BatchGetEffectiveLifecyclePolicy":{ "name":"BatchGetEffectiveLifecyclePolicy", @@ -41,7 +42,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of successful and failed retrievals for the OpenSearch Serverless indexes. For more information, see Viewing data lifecycle policies.

" + "documentation":"

Returns a list of successful and failed retrievals for the OpenSearch Serverless indexes. For more information, see Viewing data lifecycle policies.

", + "readonly":true }, "BatchGetLifecyclePolicy":{ "name":"BatchGetLifecyclePolicy", @@ -55,7 +57,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns one or more configured OpenSearch Serverless lifecycle policies. For more information, see Viewing data lifecycle policies.

" + "documentation":"

Returns one or more configured OpenSearch Serverless lifecycle policies. For more information, see Viewing data lifecycle policies.

", + "readonly":true }, "BatchGetVpcEndpoint":{ "name":"BatchGetVpcEndpoint", @@ -69,7 +72,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns attributes for one or more VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

" + "documentation":"

Returns attributes for one or more VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

", + "readonly":true }, "CreateAccessPolicy":{ "name":"CreateAccessPolicy", @@ -322,7 +326,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns an OpenSearch Serverless access policy. For more information, see Data access control for Amazon OpenSearch Serverless.

" + "documentation":"

Returns an OpenSearch Serverless access policy. For more information, see Data access control for Amazon OpenSearch Serverless.

", + "readonly":true }, "GetAccountSettings":{ "name":"GetAccountSettings", @@ -336,7 +341,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns account-level settings related to OpenSearch Serverless.

" + "documentation":"

Returns account-level settings related to OpenSearch Serverless.

", + "readonly":true }, "GetIndex":{ "name":"GetIndex", @@ -351,7 +357,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves information about an index in an OpenSearch Serverless collection, including its schema definition. The index might be configured to conduct automatic semantic enrichment ingestion and search. For more information, see About automatic semantic enrichment.

" + "documentation":"

Retrieves information about an index in an OpenSearch Serverless collection, including its schema definition. The index might be configured to conduct automatic semantic enrichment ingestion and search. For more information, see About automatic semantic enrichment.

", + "readonly":true }, "GetPoliciesStats":{ "name":"GetPoliciesStats", @@ -364,7 +371,8 @@ "errors":[ {"shape":"InternalServerException"} ], - "documentation":"

Returns statistical information about your OpenSearch Serverless access policies, security configurations, and security policies.

" + "documentation":"

Returns statistical information about your OpenSearch Serverless access policies, security configurations, and security policies.

", + "readonly":true }, "GetSecurityConfig":{ "name":"GetSecurityConfig", @@ -379,7 +387,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns information about an OpenSearch Serverless security configuration. For more information, see SAML authentication for Amazon OpenSearch Serverless.

" + "documentation":"

Returns information about an OpenSearch Serverless security configuration. For more information, see SAML authentication for Amazon OpenSearch Serverless.

", + "readonly":true }, "GetSecurityPolicy":{ "name":"GetSecurityPolicy", @@ -394,7 +403,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns information about a configured OpenSearch Serverless security policy. For more information, see Network access for Amazon OpenSearch Serverless and Encryption at rest for Amazon OpenSearch Serverless.

" + "documentation":"

Returns information about a configured OpenSearch Serverless security policy. For more information, see Network access for Amazon OpenSearch Serverless and Encryption at rest for Amazon OpenSearch Serverless.

", + "readonly":true }, "ListAccessPolicies":{ "name":"ListAccessPolicies", @@ -408,7 +418,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns information about a list of OpenSearch Serverless access policies.

" + "documentation":"

Returns information about a list of OpenSearch Serverless access policies.

", + "readonly":true }, "ListCollections":{ "name":"ListCollections", @@ -422,7 +433,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all OpenSearch Serverless collections. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

Make sure to include an empty request body {} if you don't include any collection filters in the request.

" + "documentation":"

Lists all OpenSearch Serverless collections. For more information, see Creating and managing Amazon OpenSearch Serverless collections.

Make sure to include an empty request body {} if you don't include any collection filters in the request.

", + "readonly":true }, "ListLifecyclePolicies":{ "name":"ListLifecyclePolicies", @@ -436,7 +448,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns a list of OpenSearch Serverless lifecycle policies. For more information, see Viewing data lifecycle policies.

" + "documentation":"

Returns a list of OpenSearch Serverless lifecycle policies. For more information, see Viewing data lifecycle policies.

", + "readonly":true }, "ListSecurityConfigs":{ "name":"ListSecurityConfigs", @@ -450,7 +463,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns information about configured OpenSearch Serverless security configurations. For more information, see SAML authentication for Amazon OpenSearch Serverless.

" + "documentation":"

Returns information about configured OpenSearch Serverless security configurations. For more information, see SAML authentication for Amazon OpenSearch Serverless.

", + "readonly":true }, "ListSecurityPolicies":{ "name":"ListSecurityPolicies", @@ -464,7 +478,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns information about configured OpenSearch Serverless security policies.

" + "documentation":"

Returns information about configured OpenSearch Serverless security policies.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -493,7 +508,8 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns the OpenSearch Serverless-managed interface VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

" + "documentation":"

Returns the OpenSearch Serverless-managed interface VPC endpoints associated with the current account. For more information, see Access Amazon OpenSearch Serverless using an interface endpoint.

", + "readonly":true }, "TagResource":{ "name":"TagResource", @@ -555,7 +571,8 @@ "output":{"shape":"UpdateAccountSettingsResponse"}, "errors":[ {"shape":"InternalServerException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"} ], "documentation":"

Update the OpenSearch Serverless settings for the current Amazon Web Services account. For more information, see Managing capacity limits for Amazon OpenSearch Serverless.

" }, @@ -909,6 +926,10 @@ "shape":"StandbyReplicas", "documentation":"

Details about an OpenSearch Serverless collection.

" }, + "vectorOptions":{ + "shape":"VectorOptions", + "documentation":"

Configuration options for vector search capabilities in the collection.

" + }, "createdDate":{ "shape":"Long", "documentation":"

The Epoch time when the collection was created.

" @@ -1145,6 +1166,10 @@ "shape":"StandbyReplicas", "documentation":"

Creates details about an OpenSearch Serverless collection.

" }, + "vectorOptions":{ + "shape":"VectorOptions", + "documentation":"

Configuration options for vector search capabilities in the collection.

" + }, "createdDate":{ "shape":"Long", "documentation":"

The Epoch time when the collection was created.

" @@ -1180,6 +1205,10 @@ "shape":"StandbyReplicas", "documentation":"

Indicates whether standby replicas should be used for a collection.

" }, + "vectorOptions":{ + "shape":"VectorOptions", + "documentation":"

Configuration options for vector search capabilities in the collection.

" + }, "clientToken":{ "shape":"ClientToken", "documentation":"

Unique, case-sensitive identifier to ensure idempotency of the request.

", @@ -2665,6 +2694,15 @@ "network" ] }, + "ServerlessVectorAccelerationStatus":{ + "type":"string", + "documentation":"

Specifies whether serverless vector acceleration is enabled for the collection.

", + "enum":[ + "ENABLED", + "DISABLED", + "ALLOWED" + ] + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ @@ -3179,6 +3217,17 @@ "documentation":"

Thrown when the HTTP request contains invalid input or is missing required input.

", "exception":true }, + "VectorOptions":{ + "type":"structure", + "required":["ServerlessVectorAcceleration"], + "members":{ + "ServerlessVectorAcceleration":{ + "shape":"ServerlessVectorAccelerationStatus", + "documentation":"

Specifies whether serverless vector acceleration is enabled for the collection.

" + } + }, + "documentation":"

Configuration options for vector search capabilities in an OpenSearch Serverless collection.

" + }, "VpcEndpointDetail":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/organizations/2016-11-28/service-2.json b/awscli/botocore/data/organizations/2016-11-28/service-2.json index 8f10478a5001..545c42be7345 100644 --- a/awscli/botocore/data/organizations/2016-11-28/service-2.json +++ b/awscli/botocore/data/organizations/2016-11-28/service-2.json @@ -62,7 +62,7 @@ {"shape":"UnsupportedAPIEndpointException"}, {"shape":"PolicyChangesInProgressException"} ], - "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

You can only call this operation from the management account or a member account that is a delegated administrator.

" + "documentation":"

Attaches a policy to a root, an organizational unit (OU), or an individual account. How the policy affects accounts depends on the type of policy. Refer to the Organizations User Guide for information about each policy type:

You can only call this operation from the management account or a member account that is a delegated administrator.

" }, "CancelHandshake":{ "name":"CancelHandshake", @@ -1854,7 +1854,7 @@ }, "Type":{ "shape":"PolicyType", - "documentation":"

The type of policy to create. You can specify one of the following values:

" + "documentation":"

The type of policy to create. You can specify one of the following values:

" }, "Tags":{ "shape":"Tags", @@ -2035,7 +2035,7 @@ "members":{ "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" + "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" }, "TargetId":{ "shape":"PolicyTargetId", @@ -2194,7 +2194,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to disable in this root. You can specify one of the following values:

" + "documentation":"

The policy type that you want to disable in this root. You can specify one of the following values:

" } } }, @@ -2289,7 +2289,8 @@ "INSPECTOR_POLICY", "UPGRADE_ROLLOUT_POLICY", "BEDROCK_POLICY", - "S3_POLICY" + "S3_POLICY", + "NETWORK_SECURITY_DIRECTOR_POLICY" ] }, "EffectivePolicyValidationError":{ @@ -2361,7 +2362,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type that you want to enable. You can specify one of the following values:

" + "documentation":"

The policy type that you want to enable. You can specify one of the following values:

" } } }, @@ -2850,7 +2851,7 @@ "members":{ "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" + "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -2871,7 +2872,7 @@ }, "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The specified policy type. One of the following values:

" + "documentation":"

The specified policy type. One of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -3021,7 +3022,7 @@ }, "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" + "documentation":"

The type of policy that you want information about. You can specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -3042,7 +3043,7 @@ }, "PolicyType":{ "shape":"EffectivePolicyType", - "documentation":"

The specified policy type. One of the following values:

" + "documentation":"

The specified policy type. One of the following values:

" }, "Path":{ "shape":"Path", @@ -3263,7 +3264,7 @@ }, "Filter":{ "shape":"PolicyType", - "documentation":"

The type of policy that you want to include in the returned list. You must specify one of the following values:

" + "documentation":"

The type of policy that you want to include in the returned list. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -3294,7 +3295,7 @@ "members":{ "Filter":{ "shape":"PolicyType", - "documentation":"

Specifies the type of policy that you want to include in the response. You must specify one of the following values:

" + "documentation":"

Specifies the type of policy that you want to include in the response. You must specify one of the following values:

" }, "NextToken":{ "shape":"NextToken", @@ -3759,7 +3760,8 @@ "INSPECTOR_POLICY", "UPGRADE_ROLLOUT_POLICY", "BEDROCK_POLICY", - "S3_POLICY" + "S3_POLICY", + "NETWORK_SECURITY_DIRECTOR_POLICY" ] }, "PolicyTypeAlreadyEnabledException":{ diff --git a/awscli/botocore/data/partnercentral-account/2025-04-04/service-2.json b/awscli/botocore/data/partnercentral-account/2025-04-04/service-2.json index 09a805167215..19ad2076b9cb 100644 --- a/awscli/botocore/data/partnercentral-account/2025-04-04/service-2.json +++ b/awscli/botocore/data/partnercentral-account/2025-04-04/service-2.json @@ -285,6 +285,24 @@ "documentation":"

Retrieves the visibility settings for a partner profile, determining who can see the profile information.

", "readonly":true }, + "GetVerification":{ + "name":"GetVerification", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetVerificationRequest"}, + "output":{"shape":"GetVerificationResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves the current status and details of a verification process for a partner account. This operation allows partners to check the progress and results of business or registrant verification processes.

", + "readonly":true + }, "ListConnectionInvitations":{ "name":"ListConnectionInvitations", "http":{ @@ -442,6 +460,25 @@ "documentation":"

Initiates a profile update task to modify partner profile information asynchronously.

", "idempotent":true }, + "StartVerification":{ + "name":"StartVerification", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartVerificationRequest"}, + "output":{"shape":"StartVerificationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Initiates a new verification process for a partner account. This operation begins the verification workflow for either business registration or individual registrant identity verification as required by AWS Partner Central.

", + "idempotent":true + }, "TagResource":{ "name":"TagResource", "http":{ @@ -673,6 +710,44 @@ }, "documentation":"

Contains information about a business rule validation error that occurred during an operation.

" }, + "BusinessVerificationDetails":{ + "type":"structure", + "required":[ + "LegalName", + "RegistrationId", + "CountryCode" + ], + "members":{ + "LegalName":{ + "shape":"LegalName", + "documentation":"

The official legal name of the business as registered with the appropriate government authorities.

" + }, + "RegistrationId":{ + "shape":"RegistrationId", + "documentation":"

The unique business registration identifier assigned by the government or regulatory authority, such as a company registration number or tax identification number.

" + }, + "CountryCode":{ + "shape":"CountryCode", + "documentation":"

The ISO 3166-1 alpha-2 country code where the business is legally registered and operates.

" + }, + "JurisdictionOfIncorporation":{ + "shape":"JurisdictionCode", + "documentation":"

The specific legal jurisdiction or state where the business was incorporated or registered, providing additional location context beyond the country code.

" + } + }, + "documentation":"

Contains the business information required for verifying a company's legal status and registration details within AWS Partner Central.

" + }, + "BusinessVerificationResponse":{ + "type":"structure", + "required":["BusinessVerificationDetails"], + "members":{ + "BusinessVerificationDetails":{ + "shape":"BusinessVerificationDetails", + "documentation":"

The business verification details that were processed and verified, potentially including additional information discovered during the verification process.

" + } + }, + "documentation":"

Contains the response information and results from a business verification process, including any verification-specific data returned by the verification service.

" + }, "CancelConnectionInvitationRequest":{ "type":"structure", "required":[ @@ -941,6 +1016,7 @@ "min":1, "pattern":"[A-Za-z0-9-_]+" }, + "CompletionUrl":{"type":"string"}, "ConflictException":{ "type":"structure", "required":[ @@ -969,7 +1045,9 @@ "INCOMPATIBLE_CONNECTION_INVITATION_RECEIVER", "DUPLICATE_CONNECTION", "INCOMPATIBLE_CONNECTION_STATE", - "INCOMPATIBLE_CONNECTION_PREFERENCES_REVISION" + "INCOMPATIBLE_CONNECTION_PREFERENCES_REVISION", + "ACCOUNT_ALREADY_VERIFIED", + "VERIFICATION_ALREADY_IN_PROGRESS" ] }, "Connection":{ @@ -1232,6 +1310,12 @@ "key":{"shape":"ConnectionType"}, "value":{"shape":"ConnectionTypeSummary"} }, + "CountryCode":{ + "type":"string", + "max":2, + "min":2, + "pattern":"[A-Z]{2}" + }, "CreateConnectionInvitationRequest":{ "type":"structure", "required":[ @@ -1983,6 +2067,51 @@ } } }, + "GetVerificationRequest":{ + "type":"structure", + "required":["VerificationType"], + "members":{ + "VerificationType":{ + "shape":"VerificationType", + "documentation":"

The type of verification to retrieve information for. Valid values include business verification for company registration details and registrant verification for individual identity confirmation.

" + } + } + }, + "GetVerificationResponse":{ + "type":"structure", + "required":[ + "VerificationType", + "VerificationStatus", + "VerificationResponseDetails", + "StartedAt" + ], + "members":{ + "VerificationType":{ + "shape":"VerificationType", + "documentation":"

The type of verification that was requested and processed.

" + }, + "VerificationStatus":{ + "shape":"VerificationStatus", + "documentation":"

The current status of the verification process. Possible values include pending, in-progress, completed, failed, or expired.

" + }, + "VerificationStatusReason":{ + "shape":"VerificationStatusReason", + "documentation":"

Additional information explaining the current verification status, particularly useful when the status indicates a failure or requires additional action.

" + }, + "VerificationResponseDetails":{ + "shape":"VerificationResponseDetails", + "documentation":"

Detailed response information specific to the type of verification performed, including any verification-specific data or results.

" + }, + "StartedAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the verification process was initiated.

" + }, + "CompletedAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the verification process was completed. This field is null if the verification is still in progress.

" + } + } + }, "IndustrySegment":{ "type":"string", "enum":[ @@ -2052,6 +2181,19 @@ "EXPIRED" ] }, + "JurisdictionCode":{ + "type":"string", + "max":2, + "min":2, + "pattern":"[A-Z0-9]{2}" + }, + "LegalName":{ + "type":"string", + "max":80, + "min":1, + "pattern":"[\\u0020-\\u007E\\u00A0-\\uD7FF\\uE000-\\uFFFD]+", + "sensitive":true + }, "ListConnectionInvitationsRequest":{ "type":"structure", "required":["Catalog"], @@ -2603,6 +2745,36 @@ } } }, + "RegistrantVerificationDetails":{ + "type":"structure", + "members":{}, + "documentation":"

Contains the personal information required for verifying an individual's identity as part of the partner registration process in AWS Partner Central.

" + }, + "RegistrantVerificationResponse":{ + "type":"structure", + "required":[ + "CompletionUrl", + "CompletionUrlExpiresAt" + ], + "members":{ + "CompletionUrl":{ + "shape":"CompletionUrl", + "documentation":"

A secure URL where the registrant can complete additional verification steps, such as document upload or identity confirmation through a third-party verification service.

" + }, + "CompletionUrlExpiresAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the completion URL expires and is no longer valid for accessing the verification workflow.

" + } + }, + "documentation":"

Contains the response information from a registrant verification process, including any verification-specific data and next steps for the individual verification workflow.

" + }, + "RegistrationId":{ + "type":"string", + "max":80, + "min":1, + "pattern":"[\\u0020-\\u007E\\u00A0-\\uD7FF\\uE000-\\uFFFD]+", + "sensitive":true + }, "RejectConnectionInvitationRequest":{ "type":"structure", "required":[ @@ -2742,7 +2914,8 @@ "SENDER_PROFILE_NOT_FOUND", "RECEIVER_PROFILE_NOT_FOUND", "CONNECTION_INVITATION_NOT_FOUND", - "CONNECTION_NOT_FOUND" + "CONNECTION_NOT_FOUND", + "VERIFICATION_NOT_FOUND" ] }, "Revision":{ @@ -2901,6 +3074,55 @@ } } }, + "StartVerificationRequest":{ + "type":"structure", + "members":{ + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request. This prevents duplicate verification processes from being started accidentally.

", + "idempotencyToken":true + }, + "VerificationDetails":{ + "shape":"VerificationDetails", + "documentation":"

The specific details required for the verification process, including business information for business verification or personal information for registrant verification.

" + } + } + }, + "StartVerificationResponse":{ + "type":"structure", + "required":[ + "VerificationType", + "VerificationStatus", + "VerificationResponseDetails", + "StartedAt" + ], + "members":{ + "VerificationType":{ + "shape":"VerificationType", + "documentation":"

The type of verification that was started based on the provided verification details.

" + }, + "VerificationStatus":{ + "shape":"VerificationStatus", + "documentation":"

The initial status of the verification process after it has been started. Typically this will be pending or in-progress.

" + }, + "VerificationStatusReason":{ + "shape":"VerificationStatusReason", + "documentation":"

Additional information about the initial verification status, including any immediate feedback about the submitted verification details.

" + }, + "VerificationResponseDetails":{ + "shape":"VerificationResponseDetails", + "documentation":"

Initial response details specific to the type of verification started, which may include next steps or additional requirements.

" + }, + "StartedAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the verification process was successfully initiated.

" + }, + "CompletedAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the verification process was completed. This field is typically null for newly started verifications unless they complete immediately.

" + } + } + }, "String":{"type":"string"}, "Tag":{ "type":"structure", @@ -3186,6 +3408,59 @@ "REQUEST_VALIDATION_FAILED", "BUSINESS_VALIDATION_FAILED" ] + }, + "VerificationDetails":{ + "type":"structure", + "members":{ + "BusinessVerificationDetails":{ + "shape":"BusinessVerificationDetails", + "documentation":"

The business verification details to be used when starting a business verification process.

" + }, + "RegistrantVerificationDetails":{ + "shape":"RegistrantVerificationDetails", + "documentation":"

The registrant verification details to be used when starting an individual identity verification process.

" + } + }, + "documentation":"

A union structure containing the specific details required for different types of verification processes supported by AWS Partner Central.

", + "union":true + }, + "VerificationResponseDetails":{ + "type":"structure", + "members":{ + "BusinessVerificationResponse":{ + "shape":"BusinessVerificationResponse", + "documentation":"

The response details from a business verification process, including verification results and any additional business information discovered.

" + }, + "RegistrantVerificationResponse":{ + "shape":"RegistrantVerificationResponse", + "documentation":"

The response details from a registrant verification process, including verification results and any additional steps required for identity confirmation.

" + } + }, + "documentation":"

A union structure containing the response details specific to different types of verification processes, providing type-specific information and results.

", + "union":true + }, + "VerificationStatus":{ + "type":"string", + "enum":[ + "PENDING_CUSTOMER_ACTION", + "IN_PROGRESS", + "FAILED", + "SUCCEEDED", + "REJECTED" + ] + }, + "VerificationStatusReason":{ + "type":"string", + "max":200, + "min":1, + "pattern":"[\\u0020-\\u007E\\u00A0-\\uD7FF\\uE000-\\uFFFD]+" + }, + "VerificationType":{ + "type":"string", + "enum":[ + "BUSINESS_VERIFICATION", + "REGISTRANT_VERIFICATION" + ] } }, "documentation":"

AWS Partner Central Account service provides APIs for managing partner accounts, connections, and profiles within the AWS Partner Network. This service enables partners to create and manage their partner profiles, establish connections with other partners, and maintain their account information.

" diff --git a/awscli/botocore/data/partnercentral-selling/2022-07-26/service-2.json b/awscli/botocore/data/partnercentral-selling/2022-07-26/service-2.json index 77807a5bcaf3..b9179753bee6 100644 --- a/awscli/botocore/data/partnercentral-selling/2022-07-26/service-2.json +++ b/awscli/botocore/data/partnercentral-selling/2022-07-26/service-2.json @@ -990,9 +990,15 @@ }, "Amount":{ "type":"string", - "pattern":"(0|([1-9][0-9]{0,30}))(\\.[0-9]{0,2})?", + "pattern":"((0|([1-9][0-9]{0,30}))(\\.[0-9]{0,2})?)?", "sensitive":true }, + "AmountMap":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"MonetaryAmount"}, + "documentation":"

Maps spend amounts to AWS programs and strategic initiatives

" + }, "ApnPrograms":{ "type":"list", "member":{"shape":"String"} @@ -1220,7 +1226,8 @@ "EngagementScore":{ "shape":"EngagementScore", "documentation":"

Represents a score assigned by AWS to indicate the level of engagement and potential success for the opportunity. This score helps partners prioritize their efforts.

" - } + }, + "AwsProductsSpendInsightsBySource":{"shape":"AwsProductsSpendInsightsBySource"} }, "documentation":"

Contains insights provided by AWS for the opportunity, offering recommendations and analysis that can help the partner optimize their engagement and strategy.

" }, @@ -1267,6 +1274,10 @@ "ExpectedCustomerSpend":{ "shape":"ExpectedCustomerSpendList", "documentation":"

Indicates the expected spending by the customer over the course of the project. This value helps partners and AWS estimate the financial impact of the opportunity. Use the AWS Pricing Calculator to create an estimate of the customer’s total spend. If only annual recurring revenue (ARR) is available, distribute it across 12 months to provide an average monthly value.

" + }, + "AwsPartition":{ + "shape":"AwsPartition", + "documentation":"

AWS partition where the opportunity will be deployed. Possible values: 'aws-eusc' for AWS European Sovereign Cloud, null for all other partitions

" } }, "documentation":"

Captures details about the project associated with the opportunity, including objectives, scope, and customer requirements.

" @@ -1318,11 +1329,321 @@ "type":"list", "member":{"shape":"AwsTeamMember"} }, + "AwsPartition":{ + "type":"string", + "enum":["aws-eusc"] + }, + "AwsProductDetails":{ + "type":"structure", + "required":[ + "ProductCode", + "Categories", + "Optimizations" + ], + "members":{ + "ProductCode":{ + "shape":"String", + "documentation":"

AWS Partner Central product identifier used for opportunity association

" + }, + "ServiceCode":{ + "shape":"String", + "documentation":"

Pricing Calculator service code (links to original calculator URL)

" + }, + "Categories":{ + "shape":"AwsProductDetailsCategoriesList", + "documentation":"

List of program and pathway categories this product is eligible for

" + }, + "Amount":{ + "shape":"MonetaryAmount", + "documentation":"

Baseline service cost before optimizations (may be null for AWS-sourced predictions)

" + }, + "OptimizedAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Service cost after applying optimizations (may be null for AWS-sourced predictions)

" + }, + "PotentialSavingsAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Service-specific cost reduction through optimizations (may be null for AWS-sourced predictions)

" + }, + "Optimizations":{ + "shape":"AwsProductOptimizationsList", + "documentation":"

List of specific optimization recommendations for this product

" + } + }, + "documentation":"

AWS services with program eligibility indicators (MAP, modernization pathways), cost estimates, and optimization recommendations.

" + }, + "AwsProductDetailsCategoriesList":{ + "type":"list", + "member":{"shape":"String"}, + "max":20, + "min":0 + }, "AwsProductIdentifier":{"type":"string"}, "AwsProductIdentifiers":{ "type":"list", "member":{"shape":"AwsProductIdentifier"} }, + "AwsProductInsights":{ + "type":"structure", + "required":[ + "CurrencyCode", + "Frequency", + "TotalAmountByCategory", + "AwsProducts" + ], + "members":{ + "CurrencyCode":{ + "shape":"AwsProductInsightsCurrencyCodeEnum", + "documentation":"

ISO 4217 currency code (e.g., "USD") ensuring consistent representation across calculations

" + }, + "Frequency":{ + "shape":"PaymentFrequency", + "documentation":"

Time period for spend amounts: "Monthly" or "Annually"

" + }, + "TotalAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Total estimated spend for this source before optimizations

" + }, + "TotalOptimizedAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Total estimated spend after applying recommended optimizations

" + }, + "TotalPotentialSavingsAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Quantified savings achievable through implementing optimizations

" + }, + "TotalAmountByCategory":{ + "shape":"AmountMap", + "documentation":"

Spend amounts mapped to AWS programs and modernization pathways

" + }, + "AwsProducts":{ + "shape":"AwsProductsList", + "documentation":"

Product-level details including costs and optimization recommendations

" + } + }, + "documentation":"

Comprehensive spend analysis for a single source (AWS or Partner) including total amounts, optimization savings, program category breakdowns, and detailed product-level insights.

" + }, + "AwsProductInsightsCurrencyCodeEnum":{ + "type":"string", + "enum":[ + "USD", + "EUR", + "GBP", + "AUD", + "CAD", + "CNY", + "NZD", + "INR", + "JPY", + "CHF", + "SEK", + "AED", + "AFN", + "ALL", + "AMD", + "ANG", + "AOA", + "ARS", + "AWG", + "AZN", + "BAM", + "BBD", + "BDT", + "BGN", + "BHD", + "BIF", + "BMD", + "BND", + "BOB", + "BOV", + "BRL", + "BSD", + "BTN", + "BWP", + "BYN", + "BZD", + "CDF", + "CHE", + "CHW", + "CLF", + "CLP", + "COP", + "COU", + "CRC", + "CUC", + "CUP", + "CVE", + "CZK", + "DJF", + "DKK", + "DOP", + "DZD", + "EGP", + "ERN", + "ETB", + "FJD", + "FKP", + "GEL", + "GHS", + "GIP", + "GMD", + "GNF", + "GTQ", + "GYD", + "HKD", + "HNL", + "HRK", + "HTG", + "HUF", + "IDR", + "ILS", + "IQD", + "IRR", + "ISK", + "JMD", + "JOD", + "KES", + "KGS", + "KHR", + "KMF", + "KPW", + "KRW", + "KWD", + "KYD", + "KZT", + "LAK", + "LBP", + "LKR", + "LRD", + "LSL", + "LYD", + "MAD", + "MDL", + "MGA", + "MKD", + "MMK", + "MNT", + "MOP", + "MRU", + "MUR", + "MVR", + "MWK", + "MXN", + "MXV", + "MYR", + "MZN", + "NAD", + "NGN", + "NIO", + "NOK", + "NPR", + "OMR", + "PAB", + "PEN", + "PGK", + "PHP", + "PKR", + "PLN", + "PYG", + "QAR", + "RON", + "RSD", + "RUB", + "RWF", + "SAR", + "SBD", + "SCR", + "SDG", + "SGD", + "SHP", + "SLL", + "SOS", + "SRD", + "SSP", + "STN", + "SVC", + "SYP", + "SZL", + "THB", + "TJS", + "TMT", + "TND", + "TOP", + "TRY", + "TTD", + "TWD", + "TZS", + "UAH", + "UGX", + "USN", + "UYI", + "UYU", + "UZS", + "VEF", + "VND", + "VUV", + "WST", + "XAF", + "XCD", + "XDR", + "XOF", + "XPF", + "XSU", + "XUA", + "YER", + "ZAR", + "ZMW", + "ZWL" + ], + "pattern":"USD", + "sensitive":true + }, + "AwsProductOptimization":{ + "type":"structure", + "required":[ + "Description", + "SavingsAmount" + ], + "members":{ + "Description":{ + "shape":"String", + "documentation":"

Human-readable explanation of the optimization strategy

" + }, + "SavingsAmount":{ + "shape":"MonetaryAmount", + "documentation":"

Quantified cost savings achievable by implementing this optimization

" + } + }, + "documentation":"

Details for a specific optimization recommendation

" + }, + "AwsProductOptimizationsList":{ + "type":"list", + "member":{"shape":"AwsProductOptimization"}, + "documentation":"

List of optimization recommendations

", + "max":10, + "min":0 + }, + "AwsProductsList":{ + "type":"list", + "member":{"shape":"AwsProductDetails"}, + "documentation":"

AwsProductDetailsList of AWS services with program eligibility indicators (MAP, modernization pathways), cost estimates, and optimization recommendations.

", + "max":100, + "min":0 + }, + "AwsProductsSpendInsightsBySource":{ + "type":"structure", + "members":{ + "Partner":{ + "shape":"AwsProductInsights", + "documentation":"

Partner-sourced insights derived from Pricing Calculator URLs including detailed service costs and optimizations

" + }, + "AWS":{ + "shape":"AwsProductInsights", + "documentation":"

AI-generated insights including recommended products from AWS

" + } + }, + "documentation":"

Source-separated spend insights that provide independent analysis for AWS predictions and partner estimates

" + }, "AwsSubmission":{ "type":"structure", "required":["InvolvementType"], @@ -2801,10 +3122,13 @@ "type":"string", "pattern":"(?s).{1,255}" }, + "EstimationUrl":{ + "type":"string", + "pattern":"https://calculator\\.aws/#/estimate\\?id=[a-f0-9]{32,64}" + }, "ExpectedCustomerSpend":{ "type":"structure", "required":[ - "Amount", "CurrencyCode", "Frequency", "TargetCompany" @@ -2827,7 +3151,7 @@ "documentation":"

Specifies the name of the partner company that is expected to generate revenue from the opportunity. This field helps track the partner’s involvement in the opportunity. This field only accepts the value AWS. If any other value is provided, the system will automatically set it to AWS.

" }, "EstimationUrl":{ - "shape":"WebsiteUrl", + "shape":"EstimationUrl", "documentation":"

A URL providing additional information or context about the spend estimation.

" } }, @@ -4870,6 +5194,11 @@ "max":10, "min":1 }, + "MonetaryAmount":{ + "type":"string", + "pattern":"(0|([1-9][0-9]{0,30}))(\\.[0-9]{0,2})?", + "sensitive":true + }, "MonetaryValue":{ "type":"structure", "required":[ @@ -5233,6 +5562,10 @@ "AdditionalComments":{ "shape":"ProjectAdditionalCommentsString", "documentation":"

Captures additional comments or information for the Opportunity that weren't captured in other fields.

" + }, + "AwsPartition":{ + "shape":"AwsPartition", + "documentation":"

AWS partition where the opportunity will be deployed. Possible values: 'aws-eusc' for AWS European Sovereign Cloud, null for all other partitions

" } }, "documentation":"

An object that contains the Opportunity's project details.

" diff --git a/awscli/botocore/data/payment-cryptography-data/2022-02-03/service-2.json b/awscli/botocore/data/payment-cryptography-data/2022-02-03/service-2.json index 20e6d1cdf162..17dadde2096e 100644 --- a/awscli/botocore/data/payment-cryptography-data/2022-02-03/service-2.json +++ b/awscli/botocore/data/payment-cryptography-data/2022-02-03/service-2.json @@ -49,6 +49,24 @@ ], "documentation":"

Encrypts plaintext data to ciphertext using a symmetric (TDES, AES), asymmetric (RSA), or derived (DUKPT or EMV) encryption key scheme. For more information, see Encrypt data in the Amazon Web Services Payment Cryptography User Guide.

You can generate an encryption key within Amazon Web Services Payment Cryptography by calling CreateKey. You can import your own encryption key by calling ImportKey.

For this operation, the key must have KeyModesOfUse set to Encrypt. In asymmetric encryption, plaintext is encrypted using public component. You can import the public component of an asymmetric key pair created outside Amazon Web Services Payment Cryptography by calling ImportKey.

This operation also supports dynamic keys, allowing you to pass a dynamic encryption key as a TR-31 WrappedKeyBlock. This can be used when key material is frequently rotated, such as during every card transaction, and there is need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. To encrypt using dynamic keys, the keyARN is the Key Encryption Key (KEK) of the TR-31 wrapped encryption key material. The incoming wrapped key shall have a key purpose of D0 with a mode of use of B or D. For more information, see Using Dynamic Keys in the Amazon Web Services Payment Cryptography User Guide.

For symmetric and DUKPT encryption, Amazon Web Services Payment Cryptography supports TDES and AES algorithms. For EMV encryption, Amazon Web Services Payment Cryptography supports TDES algorithms.For asymmetric encryption, Amazon Web Services Payment Cryptography supports RSA.

When you use TDES or TDES DUKPT, the plaintext data length must be a multiple of 8 bytes. For AES or AES DUKPT, the plaintext data length must be a multiple of 16 bytes. For RSA, it sould be equal to the key size unless padding is enabled.

To encrypt using DUKPT, you must already have a BDK (Base Derivation Key) key in your account with KeyModesOfUse set to DeriveKey, or you can generate a new DUKPT key by calling CreateKey. To encrypt using EMV, you must already have an IMK (Issuer Master Key) key in your account with KeyModesOfUse set to DeriveKey.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, + "GenerateAs2805KekValidation":{ + "name":"GenerateAs2805KekValidation", + "http":{ + "method":"POST", + "requestUri":"/as2805kekvalidation/generate", + "responseCode":200 + }, + "input":{"shape":"GenerateAs2805KekValidationInput"}, + "output":{"shape":"GenerateAs2805KekValidationOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Establishes node-to-node initialization between payment processing nodes such as an acquirer, issuer or payment network using Australian Standard 2805 (AS2805).

During node-to-node initialization, both communicating nodes must validate that they possess the correct Key Encrypting Keys (KEKs) before proceeding with session key exchange. In AS2805, the sending KEK (KEKs) of one node corresponds to the receiving KEK (KEKr) of its partner node. Each node uses its KEK to encrypt and decrypt session keys exchanged between the nodes. A KEK can be created or imported into Amazon Web Services Payment Cryptography using either the CreateKey or ImportKey operations.

The node initiating communication can use GenerateAS2805KekValidation to generate a combined KEK validation request and KEK validation response to send to the partnering node for validation. When invoked, the API internally generates a random sending key encrypted under KEKs and provides a receiving key encrypted under KEKr as response. The initiating node sends the response returned by this API to its partner for validation.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

" + }, "GenerateCardValidationData":{ "name":"GenerateCardValidationData", "http":{ @@ -83,7 +101,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Generates a Message Authentication Code (MAC) cryptogram within Amazon Web Services Payment Cryptography.

You can use this operation to authenticate card-related data by using known data values to generate MAC for data validation between the sending and receiving parties. This operation uses message data, a secret encryption key and MAC algorithm to generate a unique MAC value for transmission. The receiving party of the MAC must use the same message data, secret encryption key and MAC algorithm to reproduce another MAC value for comparision.

You can use this operation to generate a DUPKT, CMAC, HMAC or EMV MAC by setting generation attributes and algorithm to the associated values. The MAC generation encryption key must have valid values for KeyUsage such as TR31_M7_HMAC_KEY for HMAC generation, and the key must have KeyModesOfUse set to Generate and Verify.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Generates a Message Authentication Code (MAC) cryptogram within Amazon Web Services Payment Cryptography.

You can use this operation to authenticate card-related data by using known data values to generate MAC for data validation between the sending and receiving parties. This operation uses message data, a secret encryption key and MAC algorithm to generate a unique MAC value for transmission. The receiving party of the MAC must use the same message data, secret encryption key and MAC algorithm to reproduce another MAC value for comparision.

You can use this operation to generate a DUPKT, CMAC, HMAC or EMV MAC by setting generation attributes and algorithm to the associated values. The MAC generation encryption key must have valid values for KeyUsage such as TR31_M7_HMAC_KEY for HMAC generation, and the key must have KeyModesOfUse set to Generate.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "GenerateMacEmvPinChange":{ "name":"GenerateMacEmvPinChange", @@ -155,7 +173,7 @@ {"shape":"ThrottlingException"}, {"shape":"InternalServerException"} ], - "documentation":"

Translates an encryption key between different wrapping keys without importing the key into Amazon Web Services Payment Cryptography.

This operation can be used when key material is frequently rotated, such as during every card transaction, and there is a need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. It translates short-lived transaction keys such as Pin Encryption Key (PEK) generated for each transaction and wrapped with an ECDH (Elliptic Curve Diffie-Hellman) derived wrapping key to another KEK (Key Encryption Key) wrapping key.

Before using this operation, you must first request the public key certificate of the ECC key pair generated within Amazon Web Services Payment Cryptography to establish an ECDH key agreement. In TranslateKeyData, the service uses its own ECC key pair, public certificate of receiving ECC key pair, and the key derivation parameters to generate a derived key. The service uses this derived key to unwrap the incoming transaction key received as a TR31WrappedKeyBlock and re-wrap using a user provided KEK to generate an outgoing Tr31WrappedKeyBlock. For more information on establishing ECDH derived keys, see the Creating keys in the Amazon Web Services Payment Cryptography User Guide.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" + "documentation":"

Translates an cryptographic key between different wrapping keys without importing the key into Amazon Web Services Payment Cryptography.

This operation can be used when key material is frequently rotated, such as during every card transaction, and there is a need to avoid importing short-lived keys into Amazon Web Services Payment Cryptography. It translates short-lived transaction keys such as PEK generated for each transaction and wrapped with an ECDH derived wrapping key to another KEK wrapping key.

Before using this operation, you must first request the public key certificate of the ECC key pair generated within Amazon Web Services Payment Cryptography to establish an ECDH key agreement. In TranslateKeyData, the service uses its own ECC key pair, public certificate of receiving ECC key pair, and the key derivation parameters to generate a derived key. The service uses this derived key to unwrap the incoming transaction key received as a TR31WrappedKeyBlock and re-wrap using a user provided KEK to generate an outgoing Tr31WrappedKeyBlock.

For information about valid keys for this operation, see Understanding key attributes and Key types for specific data operations in the Amazon Web Services Payment Cryptography User Guide.

Cross-account use: This operation can't be used across different Amazon Web Services accounts.

Related operations:

" }, "TranslatePinData":{ "name":"TranslatePinData", @@ -338,6 +356,46 @@ "pattern":"[0-9a-fA-F]+", "sensitive":true }, + "As2805KekValidationType":{ + "type":"structure", + "members":{ + "KekValidationRequest":{ + "shape":"KekValidationRequest", + "documentation":"

Parameter information for generating a KEK validation request during node-to-node initialization.

" + }, + "KekValidationResponse":{ + "shape":"KekValidationResponse", + "documentation":"

Parameter information for generating a KEK validation response during node-to-node initialization.

" + } + }, + "documentation":"

Parameter information for generating a random key for KEK validation to perform node-to-node initialization.

", + "union":true + }, + "As2805PekDerivationAttributes":{ + "type":"structure", + "required":[ + "SystemTraceAuditNumber", + "TransactionAmount" + ], + "members":{ + "SystemTraceAuditNumber":{ + "shape":"SystemTraceAuditNumberType", + "documentation":"

The system trace audit number for the transaction.

" + }, + "TransactionAmount":{ + "shape":"TransactionAmountType", + "documentation":"

The transaction amount for the transaction.

" + } + }, + "documentation":"

Parameter information to use a PEK derived using AS2805.

" + }, + "As2805RandomKeyMaterial":{ + "type":"string", + "max":48, + "min":32, + "pattern":"(?:[0-9a-fA-F]{32}|[0-9a-fA-F]{48})", + "sensitive":true + }, "AsymmetricEncryptionAttributes":{ "type":"structure", "members":{ @@ -1076,6 +1134,55 @@ "OFB" ] }, + "GenerateAs2805KekValidationInput":{ + "type":"structure", + "required":[ + "KeyIdentifier", + "KekValidationType", + "RandomKeySendVariantMask" + ], + "members":{ + "KeyIdentifier":{ + "shape":"KeyArnOrKeyAliasType", + "documentation":"

The keyARN of sending KEK that Amazon Web Services Payment Cryptography uses for node-to-node initialization

" + }, + "KekValidationType":{ + "shape":"As2805KekValidationType", + "documentation":"

Parameter information for generating a random key for KEK validation to perform node-to-node initialization.

" + }, + "RandomKeySendVariantMask":{ + "shape":"RandomKeySendVariantMask", + "documentation":"

The key variant to use for generating a random key for KEK validation during node-to-node initialization.

" + } + } + }, + "GenerateAs2805KekValidationOutput":{ + "type":"structure", + "required":[ + "KeyArn", + "KeyCheckValue", + "RandomKeySend", + "RandomKeyReceive" + ], + "members":{ + "KeyArn":{ + "shape":"KeyArn", + "documentation":"

The keyARN of sending KEK that Amazon Web Services Payment Cryptography validates for node-to-node initialization

" + }, + "KeyCheckValue":{ + "shape":"KeyCheckValue", + "documentation":"

The key check value (KCV) of the sending KEK that Amazon Web Services Payment Cryptography validates for node-to-node initialization.

" + }, + "RandomKeySend":{ + "shape":"As2805RandomKeyMaterial", + "documentation":"

The random key generated for sending KEK validation.

" + }, + "RandomKeyReceive":{ + "shape":"As2805RandomKeyMaterial", + "documentation":"

The random key generated for receiving KEK validation. The initiating node sends this key to its partner node for validation.

" + } + } + }, "GenerateCardValidationDataInput":{ "type":"structure", "required":[ @@ -1296,7 +1403,7 @@ }, "PinBlockFormat":{ "shape":"PinBlockFormatForPinData", - "documentation":"

The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0, ISO_Format_3 and ISO_Format_4.

The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

The ISO_Format_4 PIN block format is the only one supporting AES encryption. It is similar to ISO_Format_3 but doubles the pin block length by padding with fill digit A and random values from 10 to 15.

" + "documentation":"

The PIN encoding format for pin data generation as specified in ISO 9564. Amazon Web Services Payment Cryptography supports ISO_Format_0, ISO_Format_3 and ISO_Format_4.

The ISO_Format_0 PIN block format is equivalent to the ANSI X9.8, VISA-1, and ECI-1 PIN block formats. It is similar to a VISA-4 PIN block format. It supports a PIN from 4 to 12 digits in length.

The ISO_Format_3 PIN block format is the same as ISO_Format_0 except that the fill digits are random values from 10 to 15.

The ISO_Format_4 PIN block format is the only one supporting AES encryption.

" }, "EncryptionWrappedKey":{"shape":"WrappedKey"} } @@ -1608,6 +1715,28 @@ "exception":true, "fault":true }, + "KekValidationRequest":{ + "type":"structure", + "required":["DeriveKeyAlgorithm"], + "members":{ + "DeriveKeyAlgorithm":{ + "shape":"SymmetricKeyAlgorithm", + "documentation":"

The key derivation algorithm to use for generating a KEK validation request.

" + } + }, + "documentation":"

Parameter information for generating a KEK validation request during node-to-node initialization.

" + }, + "KekValidationResponse":{ + "type":"structure", + "required":["RandomKeySend"], + "members":{ + "RandomKeySend":{ + "shape":"As2805RandomKeyMaterial", + "documentation":"

The random key for generating a KEK validation response.

" + } + }, + "documentation":"

Parameter information for generating a KEK validation response during node-to-node initialization.

" + }, "KeyArn":{ "type":"string", "max":150, @@ -1653,7 +1782,7 @@ "KeyMaterial":{ "type":"string", "max":16384, - "min":48, + "min":32, "sensitive":true }, "MacAlgorithm":{ @@ -1666,7 +1795,8 @@ "HMAC_SHA224", "HMAC_SHA256", "HMAC_SHA384", - "HMAC_SHA512" + "HMAC_SHA512", + "AS2805_4_1" ] }, "MacAlgorithmDukpt":{ @@ -1992,6 +2122,13 @@ "pattern":"[0-9a-fA-F]+", "sensitive":true }, + "RandomKeySendVariantMask":{ + "type":"string", + "enum":[ + "VARIANT_MASK_82C0", + "VARIANT_MASK_82" + ] + }, "ReEncryptDataInput":{ "type":"structure", "required":[ @@ -2299,6 +2436,12 @@ "HMAC_SHA224" ] }, + "SystemTraceAuditNumberType":{ + "type":"string", + "max":6, + "min":6, + "pattern":"[0-9]+" + }, "ThrottlingException":{ "type":"structure", "members":{ @@ -2325,6 +2468,12 @@ "pattern":"[0-9a-fA-F]+", "sensitive":true }, + "TransactionAmountType":{ + "type":"string", + "max":12, + "min":12, + "pattern":"[0-9]+" + }, "TransactionDataType":{ "type":"string", "max":1024, @@ -2349,7 +2498,7 @@ }, "KeyCheckValueAlgorithm":{ "shape":"KeyCheckValueAlgorithm", - "documentation":"

The key check value (KCV) algorithm used for calculating the KCV.

" + "documentation":"

The key check value (KCV) algorithm used for calculating the KCV of the derived key.

" } } }, @@ -2408,6 +2557,10 @@ "OutgoingWrappedKey":{ "shape":"WrappedKey", "documentation":"

The WrappedKeyBlock containing the encryption key for encrypting outgoing PIN block data.

" + }, + "IncomingAs2805Attributes":{ + "shape":"As2805PekDerivationAttributes", + "documentation":"

The attributes and values to use for incoming AS2805 encryption key for PIN block translation.

" } } }, @@ -2438,24 +2591,39 @@ "members":{ "IsoFormat0":{ "shape":"TranslationPinDataIsoFormat034", - "documentation":"

Parameters that are required for ISO9564 PIN format 0 tranlation.

" + "documentation":"

Parameters that are required for ISO9564 PIN format 0 translation.

" }, "IsoFormat1":{ "shape":"TranslationPinDataIsoFormat1", - "documentation":"

Parameters that are required for ISO9564 PIN format 1 tranlation.

" + "documentation":"

Parameters that are required for ISO9564 PIN format 1 translation.

" }, "IsoFormat3":{ "shape":"TranslationPinDataIsoFormat034", - "documentation":"

Parameters that are required for ISO9564 PIN format 3 tranlation.

" + "documentation":"

Parameters that are required for ISO9564 PIN format 3 translation.

" }, "IsoFormat4":{ "shape":"TranslationPinDataIsoFormat034", - "documentation":"

Parameters that are required for ISO9564 PIN format 4 tranlation.

" + "documentation":"

Parameters that are required for ISO9564 PIN format 4 translation.

" + }, + "As2805Format0":{ + "shape":"TranslationPinDataAs2805Format0", + "documentation":"

Parameters that are required for AS2805 PIN format 0 translation.

" } }, "documentation":"

Parameters that are required for translation between ISO9564 PIN block formats 0,1,3,4.

", "union":true }, + "TranslationPinDataAs2805Format0":{ + "type":"structure", + "required":["PrimaryAccountNumber"], + "members":{ + "PrimaryAccountNumber":{ + "shape":"PrimaryAccountNumberType", + "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" + } + }, + "documentation":"

Parameters that are required for translation between AS2805 PIN format 0 translation.

" + }, "TranslationPinDataIsoFormat034":{ "type":"structure", "required":["PrimaryAccountNumber"], @@ -2465,12 +2633,12 @@ "documentation":"

The Primary Account Number (PAN) of the cardholder. A PAN is a unique identifier for a payment credit or debit card and associates the card to a specific account holder.

" } }, - "documentation":"

Parameters that are required for tranlation between ISO9564 PIN format 0,3,4 tranlation.

" + "documentation":"

Parameters that are required for translation between ISO9564 PIN format 0,3,4 translation.

" }, "TranslationPinDataIsoFormat1":{ "type":"structure", "members":{}, - "documentation":"

Parameters that are required for ISO9564 PIN format 1 tranlation.

" + "documentation":"

Parameters that are required for ISO9564 PIN format 1 translation.

" }, "ValidationDataType":{ "type":"string", diff --git a/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json b/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json index 7e52376227a0..d7fbcafc5c20 100644 --- a/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json +++ b/awscli/botocore/data/payment-cryptography/2021-09-14/service-2.json @@ -584,6 +584,15 @@ "type":"list", "member":{"shape":"Alias"} }, + "As2805KeyVariant":{ + "type":"string", + "enum":[ + "TERMINAL_MAJOR_KEY_VARIANT_00", + "PIN_ENCRYPTION_KEY_VARIANT_28", + "MESSAGE_AUTHENTICATION_KEY_VARIANT_24", + "DATA_ENCRYPTION_KEY_VARIANT_22" + ] + }, "Boolean":{ "type":"boolean", "box":true @@ -617,7 +626,7 @@ }, "Country":{ "shape":"CertificateSubjectTypeCountryString", - "documentation":"

The city you provide to create the certificate signing request.

" + "documentation":"

The country you provide to create the certificate signing request.

" }, "StateOrProvince":{ "shape":"CertificateSubjectTypeStateOrProvinceString", @@ -883,6 +892,21 @@ "min":16, "pattern":"(?:[0-9a-fA-F][0-9a-fA-F])+" }, + "ExportAs2805KeyCryptogram":{ + "type":"structure", + "required":[ + "WrappingKeyIdentifier", + "As2805KeyVariant" + ], + "members":{ + "WrappingKeyIdentifier":{"shape":"KeyArnOrKeyAliasType"}, + "As2805KeyVariant":{ + "shape":"As2805KeyVariant", + "documentation":"

The cryptographic usage of the key under export.

" + } + }, + "documentation":"

Parameter information for key material export using AS2805 key cryptogram format.

" + }, "ExportAttributes":{ "type":"structure", "members":{ @@ -1013,6 +1037,10 @@ "DiffieHellmanTr31KeyBlock":{ "shape":"ExportDiffieHellmanTr31KeyBlock", "documentation":"

Key derivation parameter information for key material export using asymmetric ECDH key exchange method.

" + }, + "As2805KeyCryptogram":{ + "shape":"ExportAs2805KeyCryptogram", + "documentation":"

Parameter information for key material export using AS2805 key cryptogram format.

" } }, "documentation":"

Parameter information for key material export from Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

", @@ -1308,6 +1336,38 @@ "min":20, "pattern":"[0-9A-F]{20}$|^[0-9A-F]{24}" }, + "ImportAs2805KeyCryptogram":{ + "type":"structure", + "required":[ + "As2805KeyVariant", + "KeyModesOfUse", + "KeyAlgorithm", + "Exportable", + "WrappingKeyIdentifier", + "WrappedKeyCryptogram" + ], + "members":{ + "As2805KeyVariant":{ + "shape":"As2805KeyVariant", + "documentation":"

The cryptographic usage of the key under import.

" + }, + "KeyModesOfUse":{"shape":"KeyModesOfUse"}, + "KeyAlgorithm":{ + "shape":"KeyAlgorithm", + "documentation":"

The key algorithm of the key under import.

" + }, + "Exportable":{ + "shape":"Boolean", + "documentation":"

Specified whether the key is exportable. This data is immutable after the key is imported.

" + }, + "WrappingKeyIdentifier":{"shape":"KeyArnOrKeyAliasType"}, + "WrappedKeyCryptogram":{ + "shape":"WrappedKeyCryptogram", + "documentation":"

The wrapped key cryptogram under import.

" + } + }, + "documentation":"

Parameter information for key material import using AS2805 key cryptogram format.

" + }, "ImportDiffieHellmanTr31KeyBlock":{ "type":"structure", "required":[ @@ -1434,6 +1494,10 @@ "DiffieHellmanTr31KeyBlock":{ "shape":"ImportDiffieHellmanTr31KeyBlock", "documentation":"

Key derivation parameter information for key material import using asymmetric ECDH key exchange method.

" + }, + "As2805KeyCryptogram":{ + "shape":"ImportAs2805KeyCryptogram", + "documentation":"

Parameter information for key material import using AS2805 key cryptogram format.

" } }, "documentation":"

Parameter information for key material import into Amazon Web Services Payment Cryptography using TR-31 or TR-34 or RSA wrap and unwrap key exchange method.

", @@ -1886,6 +1950,7 @@ "TR31_K0_KEY_ENCRYPTION_KEY", "TR31_K1_KEY_BLOCK_PROTECTION_KEY", "TR31_K3_ASYMMETRIC_KEY_FOR_KEY_AGREEMENT", + "TR31_M0_ISO_16609_MAC_KEY", "TR31_M3_ISO_9797_3_MAC_KEY", "TR31_M1_ISO_9797_1_MAC_KEY", "TR31_M6_ISO_9797_5_CMAC_KEY", diff --git a/awscli/botocore/data/pcs/2023-02-10/service-2.json b/awscli/botocore/data/pcs/2023-02-10/service-2.json index 4d8976cb1fe7..30ab02146bc8 100644 --- a/awscli/botocore/data/pcs/2023-02-10/service-2.json +++ b/awscli/botocore/data/pcs/2023-02-10/service-2.json @@ -382,7 +382,7 @@ }, "mode":{ "shape":"AccountingMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means Slurm accounting is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means Slurm accounting is enabled.

" } }, "documentation":"

The accounting configuration includes configurable settings for Slurm accounting. It's a property of the ClusterSlurmConfiguration object.

" @@ -410,7 +410,7 @@ }, "mode":{ "shape":"AccountingMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means Slurm accounting is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means Slurm accounting is enabled.

" } }, "documentation":"

The accounting configuration includes configurable settings for Slurm accounting. It's a property of the ClusterSlurmConfiguration object.

" @@ -1252,14 +1252,14 @@ "members":{ "secretArn":{ "shape":"String", - "documentation":"

The Amazon Resource Name (ARN) of the AWS Secrets Manager secret containing the JWT key.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret containing the JWT key.

" }, "secretVersion":{ "shape":"String", - "documentation":"

The version of the AWS Secrets Manager secret containing the JWT key.

" + "documentation":"

The version of the Amazon Web Services Secrets Manager secret containing the JWT key.

" } }, - "documentation":"

The JWT key stored in AWS Secrets Manager for Slurm REST API authentication.

" + "documentation":"

The JWT key stored in Amazon Web Services Secrets Manager for Slurm REST API authentication.

" }, "ListClustersRequest":{ "type":"structure", @@ -1842,7 +1842,7 @@ "members":{ "mode":{ "shape":"SlurmRestMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means the Slurm REST API is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means the Slurm REST API is enabled.

" } }, "documentation":"

The Slurm REST API configuration includes settings for enabling and configuring the Slurm REST API. It's a property of the ClusterSlurmConfiguration object.

" @@ -1860,7 +1860,7 @@ "members":{ "mode":{ "shape":"SlurmRestMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means the Slurm REST API is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means the Slurm REST API is enabled.

" } }, "documentation":"

The Slurm REST API configuration includes settings for enabling and configuring the Slurm REST API. It's a property of the ClusterSlurmConfiguration object.

" @@ -1982,7 +1982,7 @@ }, "mode":{ "shape":"AccountingMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means Slurm accounting is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means Slurm accounting is enabled.

" } }, "documentation":"

The accounting configuration includes configurable settings for Slurm accounting.

" @@ -2161,7 +2161,7 @@ "members":{ "mode":{ "shape":"SlurmRestMode", - "documentation":"

The default value for mode is STANDARD. A value of STANDARD means the Slurm REST API is enabled.

" + "documentation":"

The default value for mode is NONE. A value of STANDARD means the Slurm REST API is enabled.

" } }, "documentation":"

The Slurm REST API configuration includes settings for enabling and configuring the Slurm REST API.

" diff --git a/awscli/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json b/awscli/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json index 3d1ff02bb5f3..0d3aa1449f6e 100644 --- a/awscli/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json +++ b/awscli/botocore/data/pinpoint-sms-voice-v2/2022-03-31/service-2.json @@ -122,7 +122,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new opt-out list.

If the opt-out list name already exists, an error is returned.

An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the End User MessagingSMS User Guide.

" + "documentation":"

Creates a new opt-out list.

If the opt-out list name already exists, an error is returned.

An opt-out list is a list of phone numbers that are opted out, meaning you can't send SMS or voice messages to them. If end user replies with the keyword \"STOP,\" an entry for the phone number is added to the opt-out list. In addition to STOP, your recipients can use any supported opt-out keyword, such as CANCEL or OPTOUT. For a list of supported opt-out keywords, see SMS opt out in the End User Messaging SMS User Guide.

" }, "CreatePool":{ "name":"CreatePool", @@ -354,7 +354,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an existing keyword from an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User MessagingSMS responds with a customizable message.

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" + "documentation":"

Deletes an existing keyword from an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User Messaging SMS responds with a customizable message.

Keywords \"HELP\" and \"STOP\" can't be deleted or modified.

" }, "DeleteMediaMessageSpendLimitOverride":{ "name":"DeleteMediaMessageSpendLimitOverride", @@ -530,7 +530,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes the resource-based policy document attached to the End User MessagingSMS resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" + "documentation":"

Deletes the resource-based policy document attached to the End User Messaging SMS resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" }, "DeleteTextMessageSpendLimitOverride":{ "name":"DeleteTextMessageSpendLimitOverride", @@ -546,7 +546,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the End User MessagingSMS User Guide.

" + "documentation":"

Deletes an account-level monthly spending limit override for sending text messages. Deleting a spend limit override will set the EnforcedLimit to equal the MaxLimit, which is controlled by Amazon Web Services. For more information on spend limits (quotas) see Quotas in the End User Messaging SMS User Guide.

" }, "DeleteVerifiedDestinationNumber":{ "name":"DeleteVerifiedDestinationNumber", @@ -580,7 +580,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Quotas in the End User MessagingSMS User Guide.

" + "documentation":"

Deletes an account level monthly spend limit override for sending voice messages. Deleting a spend limit override sets the EnforcedLimit equal to the MaxLimit, which is controlled by Amazon Web Services. For more information on spending limits (quotas) see Quotas in the End User Messaging SMS User Guide.

" }, "DescribeAccountAttributes":{ "name":"DescribeAccountAttributes", @@ -612,7 +612,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the current End User MessagingSMS SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Quotas in the End User MessagingSMS User Guide.

" + "documentation":"

Describes the current End User Messaging SMS SMS Voice V2 resource quotas for your account. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.

When you establish an Amazon Web Services account, the account has initial quotas on the maximum number of configuration sets, opt-out lists, phone numbers, and pools that you can create in a given Region. For more information see Quotas in the End User Messaging SMS User Guide.

" }, "DescribeConfigurationSets":{ "name":"DescribeConfigurationSets", @@ -646,7 +646,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the specified keywords or all keywords on your origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User MessagingSMS responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" + "documentation":"

Describes the specified keywords or all keywords on your origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User Messaging SMS responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" }, "DescribeOptOutLists":{ "name":"DescribeOptOutLists", @@ -880,7 +880,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the current monthly spend limits for sending voice and text messages.

When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS, MMS, or Voice spending quota in the End User MessagingSMS User Guide.

" + "documentation":"

Describes the current monthly spend limits for sending voice and text messages.

When you establish an Amazon Web Services account, the account has initial monthly spend limit in a given Region. For more information on increasing your monthly spend limit, see Requesting increases to your monthly SMS, MMS, or Voice spending quota in the End User Messaging SMS User Guide.

" }, "DescribeVerifiedDestinationNumbers":{ "name":"DescribeVerifiedDestinationNumbers", @@ -985,7 +985,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the End User MessagingSMS resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" + "documentation":"

Retrieves the JSON text of the resource-based policy document attached to the End User Messaging SMS resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number.

" }, "ListPoolOriginationIdentities":{ "name":"ListPoolOriginationIdentities", @@ -1072,7 +1072,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates or updates a keyword configuration on an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User MessagingSMS responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" + "documentation":"

Creates or updates a keyword configuration on an origination phone number or pool.

A keyword is a word that you can search for on a particular phone number or pool. It is also a specific word or phrase that an end user can send to your number to elicit a response, such as an informational message or a special offer. When your number receives a message that begins with a keyword, End User Messaging SMS responds with a customizable message.

If you specify a keyword that isn't valid, an error is returned.

" }, "PutMessageFeedback":{ "name":"PutMessageFeedback", @@ -1159,7 +1159,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Attaches a resource-based policy to a End User MessagingSMS resource(phone number, sender Id, phone poll, or opt-out list) that is used for sharing the resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. For more information about resource-based policies, see Working with shared resources in the End User MessagingSMS User Guide.

" + "documentation":"

Attaches a resource-based policy to a End User Messaging SMS resource(phone number, sender Id, phone poll, or opt-out list) that is used for sharing the resource. A shared resource can be a Pool, Opt-out list, Sender Id, or Phone number. For more information about resource-based policies, see Working with shared resources in the End User Messaging SMS User Guide.

" }, "ReleasePhoneNumber":{ "name":"ReleasePhoneNumber", @@ -1214,7 +1214,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Request an origination phone number for use in your account. For more information on phone number request see Request a phone number in the End User MessagingSMS User Guide.

" + "documentation":"

Request an origination phone number for use in your account. For more information on phone number request see Request a phone number in the End User Messaging SMS User Guide.

" }, "RequestSenderId":{ "name":"RequestSenderId", @@ -1289,7 +1289,7 @@ {"shape":"ConflictException"}, {"shape":"InternalServerException"} ], - "documentation":"

Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked.

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information about MPS, see Message Parts per Second (MPS) limits in the End User MessagingSMS User Guide.

" + "documentation":"

Creates a new text message and sends it to a recipient's phone number. SendTextMessage only sends an SMS message to one recipient each time it is invoked.

SMS throughput limits are measured in Message Parts per Second (MPS). Your MPS limit depends on the destination country of your messages, as well as the type of phone number (origination number) that you use to send the message. For more information about MPS, see Message Parts per Second (MPS) limits in the End User Messaging SMS User Guide.

" }, "SendVoiceMessage":{ "name":"SendVoiceMessage", @@ -1460,7 +1460,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tags in the End User MessagingSMS User Guide.

" + "documentation":"

Adds or overwrites only the specified tags for the specified resource. When you specify an existing tag key, the value is overwritten with the new value. Each tag consists of a key and an optional value. Tag keys must be unique per resource. For more information about tags, see Tags in the End User Messaging SMS User Guide.

" }, "UntagResource":{ "name":"UntagResource", @@ -1477,7 +1477,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Removes the association of the specified tags from a resource. For more information on tags see Tags in the End User MessagingSMS User Guide.

" + "documentation":"

Removes the association of the specified tags from a resource. For more information on tags see Tags in the End User Messaging SMS User Guide.

" }, "UpdateEventDestination":{ "name":"UpdateEventDestination", @@ -1708,11 +1708,11 @@ "members":{ "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools.

If you are using a shared End User MessagingSMS; resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The pool to update with the new Identity. This value can be either the PoolId or PoolArn, and you can find these values using DescribePools.

If you are using a shared End User Messaging SMS; resource then you must use the full Amazon Resource Name(ARN).

" }, "OriginationIdentity":{ "shape":"PhoneOrSenderIdOrArn", - "documentation":"

The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use, such as PhoneNumberId, PhoneNumberArn, SenderId, or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "IsoCountryCode":{ "shape":"IsoCountryCode", @@ -1851,15 +1851,15 @@ }, "DialingCountryCode":{ "shape":"DialingCountryCodeType", - "documentation":"

The numeric dialing code for the country or region where the phone number was originally registered.

" + "documentation":"

The country or region numeric dialing code for the phone number.

" }, "IsoCountryCode":{ "shape":"IsoCountryCode", - "documentation":"

The two-character code, in ISO 3166-1 alpha-2 format, for the country or region where the phone number was originally registered.

" + "documentation":"

The two-character country or region code, in ISO 3166-1 alpha-2 format, for the phone number.

" }, "Country":{ "shape":"String", - "documentation":"

The name of the country where the phone number was originally registered.

" + "documentation":"

The name of the country or region for the phone number.

" }, "MCC":{ "shape":"MCCType", @@ -2142,7 +2142,7 @@ }, "MatchingEventTypes":{ "shape":"EventTypeList", - "documentation":"

An array of event types that determine which events to log. If \"ALL\" is used, then End User MessagingSMS logs every event type.

The TEXT_SENT event type is not supported.

" + "documentation":"

An array of event types that determine which events to log. If \"ALL\" is used, then End User Messaging SMS logs every event type.

The TEXT_SENT event type is not supported.

" }, "CloudWatchLogsDestination":{ "shape":"CloudWatchLogsDestination", @@ -2230,7 +2230,7 @@ "members":{ "OriginationIdentity":{ "shape":"PhoneOrSenderIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, and use DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn, and use DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

After the pool is created you can add more origination identities to the pool by using AssociateOriginationIdentity.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "IsoCountryCode":{ "shape":"IsoCountryCode", @@ -2288,7 +2288,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When set to false, and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false, and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -2803,7 +2803,7 @@ "members":{ "OriginationIdentity":{ "shape":"PhoneOrPoolIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, PoolId or PoolArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn and DescribePools to find the values of PoolId and PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Keyword":{ "shape":"Keyword", @@ -2855,7 +2855,7 @@ "members":{ "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutListName or OptOutListArn of the OptOutList to delete. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" } } }, @@ -2885,7 +2885,7 @@ "members":{ "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The OptOutListName or OptOutListArn to remove the phone number from.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutListName or OptOutListArn to remove the phone number from.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "OptedOutNumber":{ "shape":"PhoneNumber", @@ -2924,7 +2924,7 @@ "members":{ "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn .

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The PoolId or PoolArn of the pool to delete. You can use DescribePools to find the values for PoolId and PoolArn .

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" } } }, @@ -2961,7 +2961,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -3238,7 +3238,7 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource you're deleting the resource-based policy from.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource you're deleting the resource-based policy from.

" } } }, @@ -3247,7 +3247,7 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource that the resource-based policy was deleted from.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource that the resource-based policy was deleted from.

" }, "Policy":{ "shape":"ResourcePolicy", @@ -3420,7 +3420,7 @@ "members":{ "OriginationIdentity":{ "shape":"PhoneOrPoolIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers to find the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Keywords":{ "shape":"KeywordList", @@ -3466,7 +3466,7 @@ "members":{ "OptOutListNames":{ "shape":"OptOutListNameList", - "documentation":"

The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutLists to show the details of. This is an array of strings that can be either the OptOutListName or OptOutListArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "NextToken":{ "shape":"NextToken", @@ -3501,7 +3501,7 @@ "members":{ "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutListName or OptOutListArn of the OptOutList. You can use DescribeOptOutLists to find the values for OptOutListName and OptOutListArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "OptedOutNumbers":{ "shape":"OptedOutNumberList", @@ -3547,7 +3547,7 @@ "members":{ "PhoneNumberIds":{ "shape":"PhoneNumberIdList", - "documentation":"

The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier of phone numbers to find information about. This is an array of strings that can be either the PhoneNumberId or PhoneNumberArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Filters":{ "shape":"PhoneNumberFilterList", @@ -3585,7 +3585,7 @@ "members":{ "PoolIds":{ "shape":"PoolIdList", - "documentation":"

The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier of pools to find. This is an array of strings that can be either the PoolId or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Filters":{ "shape":"PoolFilterList", @@ -3965,7 +3965,7 @@ "members":{ "SenderIds":{ "shape":"SenderIdList", - "documentation":"

An array of SenderIdAndCountry objects to search for.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

An array of SenderIdAndCountry objects to search for.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Filters":{ "shape":"SenderIdFilterList", @@ -4105,11 +4105,11 @@ "members":{ "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier for the pool to disassociate with the origination identity. This value can be either the PoolId or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "OriginationIdentity":{ "shape":"PhoneOrSenderIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers find the values for PhoneNumberId and PhoneNumberArn, or use DescribeSenderIds to get the values for SenderId and SenderIdArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "IsoCountryCode":{ "shape":"IsoCountryCode", @@ -4418,7 +4418,7 @@ }, "CountryRuleSet":{ "shape":"ProtectConfigurationCountryRuleSet", - "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the End User MessagingSMS User Guide.

" + "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the End User Messaging SMS User Guide.

" } } }, @@ -4428,7 +4428,7 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource attached to the resource-based policy.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource attached to the resource-based policy.

" } } }, @@ -4437,11 +4437,11 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource attached to the resource-based policy.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource attached to the resource-based policy.

" }, "Policy":{ "shape":"ResourcePolicy", - "documentation":"

The JSON formatted string that contains the resource-based policy attached to the End User MessagingSMS resource.

" + "documentation":"

The JSON formatted string that contains the resource-based policy attached to the End User Messaging SMS resource.

" }, "CreatedTimestamp":{ "shape":"Timestamp", @@ -4602,7 +4602,7 @@ "members":{ "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The unique identifier for the pool. This value can be either the PoolId or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier for the pool. This value can be either the PoolId or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Filters":{ "shape":"PoolOriginationIdentitiesFilterList", @@ -5168,7 +5168,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs

" + "documentation":"

When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out request. For more information see Self-managed opt-outs

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -5316,7 +5316,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs

" + "documentation":"

When set to false, an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests. For more information see Self-managed opt-outs

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -5587,7 +5587,7 @@ "members":{ "OriginationIdentity":{ "shape":"PhoneOrPoolIdOrArn", - "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use such as a PhoneNumberId, PhoneNumberArn, SenderId or SenderIdArn. You can use DescribePhoneNumbers get the values for PhoneNumberId and PhoneNumberArn while DescribeSenderIds can be used to get the values for SenderId and SenderIdArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "Keyword":{ "shape":"Keyword", @@ -5671,7 +5671,7 @@ "members":{ "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The OptOutListName or OptOutListArn to add the phone number to.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutListName or OptOutListArn to add the phone number to.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "OptedOutNumber":{ "shape":"PhoneNumber", @@ -5852,7 +5852,7 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource to attach the resource-based policy to.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource to attach the resource-based policy to.

" }, "Policy":{ "shape":"ResourcePolicy", @@ -5865,7 +5865,7 @@ "members":{ "ResourceArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the End User MessagingSMS resource attached to the resource-based policy.

" + "documentation":"

The Amazon Resource Name (ARN) of the End User Messaging SMS resource attached to the resource-based policy.

" }, "Policy":{ "shape":"ResourcePolicy", @@ -6179,6 +6179,10 @@ "DeniedReason":{ "shape":"String", "documentation":"

A description of why the registration was denied.

" + }, + "Feedback":{ + "shape":"String", + "documentation":"

Feedback provided for this specific field during the registration review process. This may include validation errors, suggestions for improvement, or additional requirements.

" } }, "documentation":"

Provides the values of the specified field.

" @@ -6341,6 +6345,7 @@ "enum":[ "CREATED", "SUBMITTED", + "AWS_REVIEWING", "REVIEWING", "REQUIRES_AUTHENTICATION", "PROVISIONING", @@ -6497,6 +6502,10 @@ "DeniedReasons":{ "shape":"RegistrationDeniedReasonInformationList", "documentation":"

An array of RegistrationDeniedReasonInformation objects.

" + }, + "Feedback":{ + "shape":"String", + "documentation":"

Feedback information provided during the registration review process. This includes comments, suggestions, or additional requirements.

" } }, "documentation":"

Provides information about the specified version of the registration.

" @@ -6522,6 +6531,7 @@ "enum":[ "DRAFT", "SUBMITTED", + "AWS_REVIEWING", "REVIEWING", "REQUIRES_AUTHENTICATION", "APPROVED", @@ -6543,6 +6553,10 @@ "shape":"Timestamp", "documentation":"

The time when the registration was in the submitted state, in UNIX epoch time format.

" }, + "AwsReviewingTimestamp":{ + "shape":"Timestamp", + "documentation":"

The time when the registration was in the AWS reviewing state, in UNIX epoch time format.

" + }, "ReviewingTimestamp":{ "shape":"Timestamp", "documentation":"

The time when the registration was in the reviewing state, in UNIX epoch time format.

" @@ -6580,7 +6594,7 @@ "members":{ "PhoneNumberId":{ "shape":"PhoneNumberIdOrArn", - "documentation":"

The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The PhoneNumberId or PhoneNumberArn of the phone number to release. You can use DescribePhoneNumbers to get the values for PhoneNumberId and PhoneNumberArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" } } }, @@ -6637,7 +6651,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -6738,11 +6752,11 @@ }, "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The name of the OptOutList to associate with the phone number. You can use the OptOutListName or OptOutListArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The pool to associated with the phone number. You can use the PoolId or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The pool to associated with the phone number. You can use the PoolId or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "RegistrationId":{ "shape":"RegistrationIdOrArn", @@ -6820,7 +6834,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -7079,7 +7093,7 @@ }, "OriginationIdentity":{ "shape":"VerificationMessageOriginationIdentity", - "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "ConfigurationSetName":{ "shape":"ConfigurationSetNameOrArn", @@ -7118,7 +7132,7 @@ }, "OriginationIdentity":{ "shape":"MediaMessageOriginationIdentity", - "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "MessageBody":{ "shape":"TextMessageBody", @@ -7177,7 +7191,7 @@ }, "OriginationIdentity":{ "shape":"TextMessageOriginationIdentity", - "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity of the message. This can be either the PhoneNumber, PhoneNumberId, PhoneNumberArn, SenderId, SenderIdArn, PoolId, or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "MessageBody":{ "shape":"TextMessageBody", @@ -7213,7 +7227,7 @@ }, "DryRun":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun.

The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the End User MessagingSMS User Guide..

" + "documentation":"

When set to true, the message is checked and validated, but isn't sent to the end recipient. You are not charged for using DryRun.

The Message Parts per Second (MPS) limit when using DryRun is five. If your origination identity has a lower MPS limit then the lower MPS limit is used. For more information about MPS limits, see Message Parts per Second (MPS) limits in the End User Messaging SMS User Guide..

" }, "ProtectConfigurationId":{ "shape":"ProtectConfigurationIdOrArn", @@ -7247,7 +7261,7 @@ }, "OriginationIdentity":{ "shape":"VoiceMessageOriginationIdentity", - "documentation":"

The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The origination identity to use for the voice call. This can be the PhoneNumber, PhoneNumberId, PhoneNumberArn, PoolId, or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "MessageBody":{ "shape":"VoiceMessageBody", @@ -7322,7 +7336,7 @@ "documentation":"

The two-character code, in ISO 3166-1 alpha-2 format, for the country or region.

" } }, - "documentation":"

The alphanumeric sender ID in a specific country that you want to describe. For more information on sender IDs see Requesting sender IDs in the End User MessagingSMS User Guide.

" + "documentation":"

The alphanumeric sender ID in a specific country that you want to describe. For more information on sender IDs see Requesting sender IDs in the End User Messaging SMS User Guide.

" }, "SenderIdFilter":{ "type":"structure", @@ -7690,7 +7704,7 @@ "documentation":"

When set to True, the value that has been specified in the EnforcedLimit is used to determine the maximum amount in US dollars that can be spent to send messages each month, in US dollars.

" } }, - "documentation":"

Describes the current monthly spend limits for sending voice and text messages. For more information on increasing your monthly spend limit, see Requesting a spending quota increase in the End User MessagingSMS User Guide.

" + "documentation":"

Describes the current monthly spend limits for sending voice and text messages. For more information on increasing your monthly spend limit, see Requesting a spending quota increase in the End User Messaging SMS User Guide.

" }, "SpendLimitList":{ "type":"list", @@ -7721,6 +7735,10 @@ "RegistrationId":{ "shape":"RegistrationIdOrArn", "documentation":"

The unique identifier for the registration.

" + }, + "AwsReview":{ + "shape":"PrimitiveBoolean", + "documentation":"

Set to true to request AWS review of the registration. When enabled, AWS will perform additional validation and review of the registration submission before processing.

" } } }, @@ -7731,7 +7749,8 @@ "RegistrationId", "VersionNumber", "RegistrationVersionStatus", - "RegistrationVersionStatusHistory" + "RegistrationVersionStatusHistory", + "AwsReview" ], "members":{ "RegistrationArn":{ @@ -7753,6 +7772,10 @@ "RegistrationVersionStatusHistory":{ "shape":"RegistrationVersionStatusHistory", "documentation":"

The RegistrationVersionStatusHistory object contains the time stamps for when the reservations status changes.

" + }, + "AwsReview":{ + "shape":"PrimitiveBoolean", + "documentation":"

Indicates whether AWS review was requested for this registration submission.

" } } }, @@ -7993,7 +8016,7 @@ "members":{ "PhoneNumberId":{ "shape":"PhoneNumberIdOrArn", - "documentation":"

The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier of the phone number. Valid values for this field can be either the PhoneNumberId or PhoneNumberArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "TwoWayEnabled":{ "shape":"Boolean", @@ -8009,7 +8032,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"Boolean", - "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListNameOrArn", @@ -8108,7 +8131,7 @@ "members":{ "PoolId":{ "shape":"PoolIdOrArn", - "documentation":"

The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The unique identifier of the pool to update. Valid values are either the PoolId or PoolArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "TwoWayEnabled":{ "shape":"Boolean", @@ -8124,11 +8147,11 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"Boolean", - "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

By default this is set to false. When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListNameOrArn", - "documentation":"

The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn.

If you are using a shared End User MessagingSMS resource then you must use the full Amazon Resource Name(ARN).

" + "documentation":"

The OptOutList to associate with the pool. Valid values are either OptOutListName or OptOutListArn.

If you are using a shared End User Messaging SMS resource then you must use the full Amazon Resource Name(ARN).

" }, "SharedRoutesEnabled":{ "shape":"Boolean", @@ -8173,7 +8196,7 @@ }, "SelfManagedOptOutsEnabled":{ "shape":"PrimitiveBoolean", - "documentation":"

When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User MessagingSMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" + "documentation":"

When set to false and an end recipient sends a message that begins with HELP or STOP to one of your dedicated numbers, End User Messaging SMS automatically replies with a customizable message and adds the end recipient to the OptOutList. When set to true you're responsible for responding to HELP and STOP requests. You're also responsible for tracking and honoring opt-out requests.

" }, "OptOutListName":{ "shape":"OptOutListName", @@ -8211,7 +8234,7 @@ }, "CountryRuleSetUpdates":{ "shape":"ProtectConfigurationCountryRuleSet", - "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the End User MessagingSMS User Guide.

For example, to set the United States as allowed and Canada as blocked, the CountryRuleSetUpdates would be formatted as: \"CountryRuleSetUpdates\": { \"US\" : { \"ProtectStatus\": \"ALLOW\" } \"CA\" : { \"ProtectStatus\": \"BLOCK\" } }

" + "documentation":"

A map of ProtectConfigurationCountryRuleSetInformation objects that contain the details for the requested NumberCapability. The Key is the two-letter ISO country code. For a list of supported ISO country codes, see Supported countries and regions (SMS channel) in the End User Messaging SMS User Guide.

For example, to set the United States as allowed and Canada as blocked, the CountryRuleSetUpdates would be formatted as: \"CountryRuleSetUpdates\": { \"US\" : { \"ProtectStatus\": \"ALLOW\" } \"CA\" : { \"ProtectStatus\": \"BLOCK\" } }

" } } }, @@ -8675,5 +8698,5 @@ "pattern":"[A-Za-z0-9_:/\\+-]+" } }, - "documentation":"

Welcome to the End User MessagingSMS, version 2 API Reference. This guide provides information about End User MessagingSMS, version 2 API resources, including supported HTTP methods, parameters, and schemas.

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The End User MessagingSMS, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. End User MessagingSMS, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

If you're new to End User MessagingSMS, it's also helpful to review the End User MessagingSMS User Guide, where you'll find tutorials, code samples, and procedures that demonstrate how to use End User MessagingSMS features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as End User MessagingSMS integration with other Amazon Web Services services, and the quotas that apply to use of the service.

Regional availability

The End User MessagingSMS version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure.

" + "documentation":"

Welcome to the End User Messaging SMS, version 2 API Reference. This guide provides information about End User Messaging SMS, version 2 API resources, including supported HTTP methods, parameters, and schemas.

Amazon Pinpoint is an Amazon Web Services service that you can use to engage with your recipients across multiple messaging channels. The End User Messaging SMS, version 2 API provides programmatic access to options that are unique to the SMS and voice channels. End User Messaging SMS, version 2 resources such as phone numbers, sender IDs, and opt-out lists can be used by the Amazon Pinpoint API.

If you're new to End User Messaging SMS, it's also helpful to review the End User Messaging SMS User Guide, where you'll find tutorials, code samples, and procedures that demonstrate how to use End User Messaging SMS features programmatically and how to integrate functionality into mobile apps and other types of applications. The guide also provides key information, such as End User Messaging SMS integration with other Amazon Web Services services, and the quotas that apply to use of the service.

Regional availability

The End User Messaging SMS version 2 API Reference is available in several Amazon Web Services Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see Amazon Web Services Service Endpoints and Amazon Pinpoint endpoints and quotas in the Amazon Web Services General Reference. To learn more about Amazon Web Services Regions, see Managing Amazon Web Services Regions in the Amazon Web Services General Reference.

In each Region, Amazon Web Services maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see Amazon Web Services Global Infrastructure.

" } diff --git a/awscli/botocore/data/qbusiness/2023-11-27/service-2.json b/awscli/botocore/data/qbusiness/2023-11-27/service-2.json index a0e6756c3ac8..ec6f506080e3 100644 --- a/awscli/botocore/data/qbusiness/2023-11-27/service-2.json +++ b/awscli/botocore/data/qbusiness/2023-11-27/service-2.json @@ -150,7 +150,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Verifies if a user has access permissions for a specified document and returns the actual ACL attached to the document. Resolves user access on the document via user aliases and groups when verifying user access.

" + "documentation":"

Verifies if a user has access permissions for a specified document and returns the actual ACL attached to the document. Resolves user access on the document via user aliases and groups when verifying user access.

", + "readonly":true }, "CreateAnonymousWebExperienceUrl":{ "name":"CreateAnonymousWebExperienceUrl", @@ -673,7 +674,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing Amazon Q Business application.

" + "documentation":"

Gets information about an existing Amazon Q Business application.

", + "readonly":true }, "GetChatControlsConfiguration":{ "name":"GetChatControlsConfiguration", @@ -691,7 +693,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about chat controls configured for an existing Amazon Q Business application.

" + "documentation":"

Gets information about chat controls configured for an existing Amazon Q Business application.

", + "readonly":true }, "GetChatResponseConfiguration":{ "name":"GetChatResponseConfiguration", @@ -709,7 +712,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves detailed information about a specific chat response configuration from an Amazon Q Business application. This operation returns the complete configuration settings and metadata.

" + "documentation":"

Retrieves detailed information about a specific chat response configuration from an Amazon Q Business application. This operation returns the complete configuration settings and metadata.

", + "readonly":true }, "GetDataAccessor":{ "name":"GetDataAccessor", @@ -727,7 +731,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves information about a specified data accessor. This operation returns details about the data accessor, including its display name, unique identifier, Amazon Resource Name (ARN), the associated Amazon Q Business application and IAM Identity Center application, the IAM role for the ISV, the action configurations, and the timestamps for when the data accessor was created and last updated.

" + "documentation":"

Retrieves information about a specified data accessor. This operation returns details about the data accessor, including its display name, unique identifier, Amazon Resource Name (ARN), the associated Amazon Q Business application and IAM Identity Center application, the IAM role for the ISV, the action configurations, and the timestamps for when the data accessor was created and last updated.

", + "readonly":true }, "GetDataSource":{ "name":"GetDataSource", @@ -745,7 +750,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing Amazon Q Business data source connector.

" + "documentation":"

Gets information about an existing Amazon Q Business data source connector.

", + "readonly":true }, "GetDocumentContent":{ "name":"GetDocumentContent", @@ -763,7 +769,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves the content of a document that was ingested into Amazon Q Business. This API validates user authorization against document ACLs before returning a pre-signed URL for secure document access. You can download or view source documents referenced in chat responses through the URL.

" + "documentation":"

Retrieves the content of a document that was ingested into Amazon Q Business. This API validates user authorization against document ACLs before returning a pre-signed URL for secure document access. You can download or view source documents referenced in chat responses through the URL.

", + "readonly":true }, "GetGroup":{ "name":"GetGroup", @@ -782,7 +789,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Describes a group by group name.

" + "documentation":"

Describes a group by group name.

", + "readonly":true }, "GetIndex":{ "name":"GetIndex", @@ -800,7 +808,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing Amazon Q Business index.

" + "documentation":"

Gets information about an existing Amazon Q Business index.

", + "readonly":true }, "GetMedia":{ "name":"GetMedia", @@ -820,7 +829,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Returns the image bytes corresponding to a media object. If you have implemented your own application with the Chat and ChatSync APIs, and have enabled content extraction from visual data in Amazon Q Business, you use the GetMedia API operation to download the images so you can show them in your UI with responses.

For more information, see Extracting semantic meaning from images and visuals.

" + "documentation":"

Returns the image bytes corresponding to a media object. If you have implemented your own application with the Chat and ChatSync APIs, and have enabled content extraction from visual data in Amazon Q Business, you use the GetMedia API operation to download the images so you can show them in your UI with responses.

For more information, see Extracting semantic meaning from images and visuals.

", + "readonly":true }, "GetPlugin":{ "name":"GetPlugin", @@ -838,7 +848,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing Amazon Q Business plugin.

" + "documentation":"

Gets information about an existing Amazon Q Business plugin.

", + "readonly":true }, "GetPolicy":{ "name":"GetPolicy", @@ -856,7 +867,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves the current permission policy for a Amazon Q Business application. The policy is returned as a JSON-formatted string and defines the IAM actions that are allowed or denied for the application's resources.

" + "documentation":"

Retrieves the current permission policy for a Amazon Q Business application. The policy is returned as a JSON-formatted string and defines the IAM actions that are allowed or denied for the application's resources.

", + "readonly":true }, "GetRetriever":{ "name":"GetRetriever", @@ -874,7 +886,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing retriever used by an Amazon Q Business application.

" + "documentation":"

Gets information about an existing retriever used by an Amazon Q Business application.

", + "readonly":true }, "GetUser":{ "name":"GetUser", @@ -893,7 +906,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Describes the universally unique identifier (UUID) associated with a local user in a data source.

" + "documentation":"

Describes the universally unique identifier (UUID) associated with a local user in a data source.

", + "readonly":true }, "GetWebExperience":{ "name":"GetWebExperience", @@ -911,7 +925,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets information about an existing Amazon Q Business web experience.

" + "documentation":"

Gets information about an existing Amazon Q Business web experience.

", + "readonly":true }, "ListApplications":{ "name":"ListApplications", @@ -928,7 +943,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists Amazon Q Business applications.

Amazon Q Business applications may securely transmit data for processing across Amazon Web Services Regions within your geography. For more information, see Cross region inference in Amazon Q Business.

" + "documentation":"

Lists Amazon Q Business applications.

Amazon Q Business applications may securely transmit data for processing across Amazon Web Services Regions within your geography. For more information, see Cross region inference in Amazon Q Business.

", + "readonly":true }, "ListAttachments":{ "name":"ListAttachments", @@ -947,7 +963,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a list of attachments associated with an Amazon Q Business web experience or a list of attachements associated with a specific Amazon Q Business conversation.

" + "documentation":"

Gets a list of attachments associated with an Amazon Q Business web experience or a list of attachements associated with a specific Amazon Q Business conversation.

", + "readonly":true }, "ListChatResponseConfigurations":{ "name":"ListChatResponseConfigurations", @@ -965,7 +982,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of all chat response configurations available in a specified Amazon Q Business application. This operation returns summary information about each configuration to help administrators manage and select appropriate response settings.

" + "documentation":"

Retrieves a list of all chat response configurations available in a specified Amazon Q Business application. This operation returns summary information about each configuration to help administrators manage and select appropriate response settings.

", + "readonly":true }, "ListConversations":{ "name":"ListConversations", @@ -984,7 +1002,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists one or more Amazon Q Business conversations.

" + "documentation":"

Lists one or more Amazon Q Business conversations.

", + "readonly":true }, "ListDataAccessors":{ "name":"ListDataAccessors", @@ -1002,7 +1021,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the data accessors for a Amazon Q Business application. This operation returns a paginated list of data accessor summaries, including the friendly name, unique identifier, ARN, associated IAM role, and creation/update timestamps for each data accessor.

" + "documentation":"

Lists the data accessors for a Amazon Q Business application. This operation returns a paginated list of data accessor summaries, including the friendly name, unique identifier, ARN, associated IAM role, and creation/update timestamps for each data accessor.

", + "readonly":true }, "ListDataSourceSyncJobs":{ "name":"ListDataSourceSyncJobs", @@ -1021,7 +1041,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Get information about an Amazon Q Business data source connector synchronization.

" + "documentation":"

Get information about an Amazon Q Business data source connector synchronization.

", + "readonly":true }, "ListDataSources":{ "name":"ListDataSources", @@ -1039,7 +1060,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the Amazon Q Business data source connectors that you have created.

" + "documentation":"

Lists the Amazon Q Business data source connectors that you have created.

", + "readonly":true }, "ListDocuments":{ "name":"ListDocuments", @@ -1057,7 +1079,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

A list of documents attached to an index.

" + "documentation":"

A list of documents attached to an index.

", + "readonly":true }, "ListGroups":{ "name":"ListGroups", @@ -1076,7 +1099,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Provides a list of groups that are mapped to users.

" + "documentation":"

Provides a list of groups that are mapped to users.

", + "readonly":true }, "ListIndices":{ "name":"ListIndices", @@ -1094,7 +1118,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the Amazon Q Business indices you have created.

" + "documentation":"

Lists the Amazon Q Business indices you have created.

", + "readonly":true }, "ListMessages":{ "name":"ListMessages", @@ -1113,7 +1138,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a list of messages associated with an Amazon Q Business web experience.

" + "documentation":"

Gets a list of messages associated with an Amazon Q Business web experience.

", + "readonly":true }, "ListPluginActions":{ "name":"ListPluginActions", @@ -1131,7 +1157,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists configured Amazon Q Business actions for a specific plugin in an Amazon Q Business application.

" + "documentation":"

Lists configured Amazon Q Business actions for a specific plugin in an Amazon Q Business application.

", + "readonly":true }, "ListPluginTypeActions":{ "name":"ListPluginTypeActions", @@ -1148,7 +1175,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists configured Amazon Q Business actions for any plugin type—both built-in and custom.

" + "documentation":"

Lists configured Amazon Q Business actions for any plugin type—both built-in and custom.

", + "readonly":true }, "ListPluginTypeMetadata":{ "name":"ListPluginTypeMetadata", @@ -1165,7 +1193,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists metadata for all Amazon Q Business plugin types.

" + "documentation":"

Lists metadata for all Amazon Q Business plugin types.

", + "readonly":true }, "ListPlugins":{ "name":"ListPlugins", @@ -1183,7 +1212,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists configured Amazon Q Business plugins.

" + "documentation":"

Lists configured Amazon Q Business plugins.

", + "readonly":true }, "ListRetrievers":{ "name":"ListRetrievers", @@ -1201,7 +1231,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the retriever used by an Amazon Q Business application.

" + "documentation":"

Lists the retriever used by an Amazon Q Business application.

", + "readonly":true }, "ListSubscriptions":{ "name":"ListSubscriptions", @@ -1220,7 +1251,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all subscriptions created in an Amazon Q Business application.

" + "documentation":"

Lists all subscriptions created in an Amazon Q Business application.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1238,7 +1270,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a list of tags associated with a specified resource. Amazon Q Business applications and data sources can have tags associated with them.

" + "documentation":"

Gets a list of tags associated with a specified resource. Amazon Q Business applications and data sources can have tags associated with them.

", + "readonly":true }, "ListWebExperiences":{ "name":"ListWebExperiences", @@ -1256,7 +1289,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists one or more Amazon Q Business Web Experiences.

" + "documentation":"

Lists one or more Amazon Q Business Web Experiences.

", + "readonly":true }, "PutFeedback":{ "name":"PutFeedback", @@ -1798,8 +1832,7 @@ }, "ActionPayloadFieldValue":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "ActionReview":{ @@ -1926,8 +1959,7 @@ }, "ActionReviewPayloadFieldArrayItemJsonSchema":{ "type":"structure", - "members":{ - }, + "members":{}, "document":true }, "ActionSummary":{ @@ -1990,7 +2022,7 @@ }, "quickSightConfiguration":{ "shape":"QuickSightConfiguration", - "documentation":"

The Amazon QuickSight configuration for an Amazon Q Business application that uses QuickSight as the identity provider.

" + "documentation":"

The Amazon Quick Suite configuration for an Amazon Q Business application that uses Quick Suite as the identity provider.

" } }, "documentation":"

Summary information for an Amazon Q Business application.

" @@ -3343,7 +3375,7 @@ }, "quickSightConfiguration":{ "shape":"QuickSightConfiguration", - "documentation":"

The Amazon QuickSight configuration for an Amazon Q Business application that uses QuickSight for authentication. This configuration is required if your application uses QuickSight as the identity provider. For more information, see Creating an Amazon QuickSight integrated application.

" + "documentation":"

The Amazon Quick Suite configuration for an Amazon Q Business application that uses Quick Suite for authentication. This configuration is required if your application uses Quick Suite as the identity provider. For more information, see Creating an Amazon Quick Suite integrated application.

" } } }, @@ -3799,8 +3831,7 @@ }, "CreateUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "CreateWebExperienceRequest":{ "type":"structure", @@ -4100,8 +4131,7 @@ }, "DataSourceConfiguration":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

Provides the configuration information for an Amazon Q Business data source.

", "document":true }, @@ -4267,8 +4297,7 @@ }, "DeleteApplicationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteAttachmentRequest":{ "type":"structure", @@ -4306,8 +4335,7 @@ }, "DeleteAttachmentResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChatControlsConfigurationRequest":{ "type":"structure", @@ -4323,8 +4351,7 @@ }, "DeleteChatControlsConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteChatResponseConfigurationRequest":{ "type":"structure", @@ -4349,8 +4376,7 @@ }, "DeleteChatResponseConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteConversationRequest":{ "type":"structure", @@ -4381,8 +4407,7 @@ }, "DeleteConversationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataAccessorRequest":{ "type":"structure", @@ -4407,8 +4432,7 @@ }, "DeleteDataAccessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataSourceRequest":{ "type":"structure", @@ -4440,8 +4464,7 @@ }, "DeleteDataSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDocument":{ "type":"structure", @@ -4494,8 +4517,7 @@ }, "DeleteGroupResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteIndexRequest":{ "type":"structure", @@ -4520,8 +4542,7 @@ }, "DeleteIndexResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePluginRequest":{ "type":"structure", @@ -4546,8 +4567,7 @@ }, "DeletePluginResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteRetrieverRequest":{ "type":"structure", @@ -4572,8 +4592,7 @@ }, "DeleteRetrieverResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserRequest":{ "type":"structure", @@ -4598,8 +4617,7 @@ }, "DeleteUserResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteWebExperienceRequest":{ "type":"structure", @@ -4624,8 +4642,7 @@ }, "DeleteWebExperienceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Description":{ "type":"string", @@ -4656,8 +4673,7 @@ }, "DisassociatePermissionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisplayName":{ "type":"string", @@ -5091,8 +5107,7 @@ }, "EndOfInputEvent":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

The end of the streaming input for the Chat API.

", "event":true }, @@ -5299,7 +5314,7 @@ }, "quickSightConfiguration":{ "shape":"QuickSightConfiguration", - "documentation":"

The Amazon QuickSight authentication configuration for the Amazon Q Business application.

" + "documentation":"

The Amazon Quick Suite authentication configuration for the Amazon Q Business application.

" } } }, @@ -6205,13 +6220,13 @@ "type":"string", "max":2048, "min":20, - "pattern":"arn:aws:iam::\\d{12}:(oidc-provider|saml-provider)/[a-zA-Z0-9_\\.\\/@\\-]+" + "pattern":"arn:[a-z0-9-\\.]{1,63}:iam::\\d{12}:(oidc-provider|saml-provider)/[a-zA-Z0-9_\\.\\/@\\-]+" }, "IdcApplicationArn":{ "type":"string", "max":1224, "min":10, - "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso::\\d{12}:application/(sso)?ins-[a-zA-Z0-9-.]{16}/apl-[a-zA-Z0-9]{16}" + "pattern":"arn:[a-z0-9-\\.]{1,63}:sso::\\d{12}:application/(sso)?ins-[a-zA-Z0-9-.]{16}/apl-[a-zA-Z0-9]{16}" }, "IdcAuthConfiguration":{ "type":"structure", @@ -6235,7 +6250,7 @@ "type":"string", "max":1284, "min":0, - "pattern":"arn:aws:sso::[0-9]{12}:trustedTokenIssuer/(sso)?ins-[a-zA-Z0-9-.]{16}/tti-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" + "pattern":"arn:[a-z0-9-\\.]{1,63}:sso::[0-9]{12}:trustedTokenIssuer/(sso)?ins-[a-zA-Z0-9-.]{16}/tti-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" }, "IdentityProviderConfiguration":{ "type":"structure", @@ -6410,7 +6425,7 @@ "type":"string", "max":1224, "min":10, - "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" + "pattern":"arn:[a-z0-9-\\.]{1,63}:sso:::instance/(sso)?ins-[a-zA-Z0-9-.]{16}" }, "Instruction":{ "type":"string", @@ -6498,7 +6513,7 @@ "type":"string", "max":2048, "min":1, - "pattern":"arn:aws[a-zA-Z-]*:lambda:[a-z-]*-[0-9]:[0-9]{12}:function:[a-zA-Z0-9-_]+(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?(:[a-zA-Z0-9-_]+)?" + "pattern":"arn:[a-z0-9-\\.]{1,63}:lambda:[a-z-]*-[0-9]:[0-9]{12}:function:[a-zA-Z0-9-_]+(/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})?(:[a-zA-Z0-9-_]+)?" }, "LicenseNotFoundException":{ "type":"structure", @@ -7698,8 +7713,7 @@ }, "NoAuthConfiguration":{ "type":"structure", - "members":{ - }, + "members":{}, "documentation":"

Information about invoking a custom plugin without any authentication or authorization requirement.

" }, "NumberAttributeBoostingConfiguration":{ @@ -7828,7 +7842,7 @@ }, "PermissionConditionKey":{ "type":"string", - "pattern":"aws:PrincipalTag/qbusiness-dataaccessor:[a-zA-Z]+.*" + "pattern":"aws:[a-zA-Z][a-zA-Z0-9-/:]*" }, "PermissionConditionOperator":{ "type":"string", @@ -7838,7 +7852,7 @@ "type":"string", "max":1000, "min":1, - "pattern":"[a-zA-Z0-9][a-zA-Z0-9_-]*" + "pattern":"[a-zA-Z0-9][a-zA-Z0-9._-]*" }, "PermissionConditionValues":{ "type":"list", @@ -8076,7 +8090,7 @@ "type":"string", "max":1284, "min":1, - "pattern":"arn:aws:iam::[0-9]{12}:role/[a-zA-Z0-9_/+=,.@-]+" + "pattern":"arn:[a-z0-9-\\.]{1,63}:iam::[0-9]{12}:role/[a-zA-Z0-9_/+=,.@-]+" }, "PrincipalUser":{ "type":"structure", @@ -8186,8 +8200,7 @@ }, "PutGroupResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "QAppsConfiguration":{ "type":"structure", @@ -8224,10 +8237,10 @@ "members":{ "clientNamespace":{ "shape":"ClientNamespace", - "documentation":"

The Amazon QuickSight namespace that is used as the identity provider. For more information about QuickSight namespaces, see Namespace operations.

" + "documentation":"

The Amazon Quick Suite namespace that is used as the identity provider. For more information about Quick Suite namespaces, see Namespace operations.

" } }, - "documentation":"

The Amazon QuickSight configuration for an Amazon Q Business application that uses QuickSight as the identity provider. For more information, see Creating an Amazon QuickSight integrated application.

" + "documentation":"

The Amazon Quick Suite configuration for an Amazon Q Business application that uses Quick Suite as the identity provider. For more information, see Creating an Amazon Quick Suite integrated application.

" }, "ReadAccessType":{ "type":"string", @@ -8839,8 +8852,7 @@ }, "StopDataSourceSyncJobResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "String":{ "type":"string", @@ -9052,8 +9064,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -9245,8 +9256,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateApplicationRequest":{ "type":"structure", @@ -9294,8 +9304,7 @@ }, "UpdateApplicationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateChatControlsConfigurationRequest":{ "type":"structure", @@ -9344,8 +9353,7 @@ }, "UpdateChatControlsConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateChatResponseConfigurationRequest":{ "type":"structure", @@ -9384,8 +9392,7 @@ }, "UpdateChatResponseConfigurationResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDataAccessorRequest":{ "type":"structure", @@ -9423,8 +9430,7 @@ }, "UpdateDataAccessorResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateDataSourceRequest":{ "type":"structure", @@ -9479,8 +9485,7 @@ }, "UpdateDataSourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateIndexRequest":{ "type":"structure", @@ -9521,8 +9526,7 @@ }, "UpdateIndexResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdatePluginRequest":{ "type":"structure", @@ -9567,8 +9571,7 @@ }, "UpdatePluginResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateRetrieverRequest":{ "type":"structure", @@ -9602,8 +9605,7 @@ }, "UpdateRetrieverResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateSubscriptionRequest":{ "type":"structure", @@ -9759,8 +9761,7 @@ }, "UpdateWebExperienceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Url":{ "type":"string", diff --git a/awscli/botocore/data/quicksight/2018-04-01/service-2.json b/awscli/botocore/data/quicksight/2018-04-01/service-2.json index 9a53f1526c62..9790388e6621 100644 --- a/awscli/botocore/data/quicksight/2018-04-01/service-2.json +++ b/awscli/botocore/data/quicksight/2018-04-01/service-2.json @@ -1532,7 +1532,7 @@ {"shape":"UnsupportedUserEditionException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Describes an existing snapshot job.

Poll job descriptions after a job starts to know the status of the job. For information on available status codes, see JobStatus.

" + "documentation":"

Describes an existing snapshot job.

Poll job descriptions after a job starts to know the status of the job. For information on available status codes, see JobStatus.

Registered user support

This API can be called as before to get status of a job started by the same Quick Sight user.

Possible error scenarios

Request will fail with an Access Denied error in the following scenarios:

" }, "DescribeDashboardSnapshotJobResult":{ "name":"DescribeDashboardSnapshotJobResult", @@ -1551,7 +1551,7 @@ {"shape":"PreconditionNotMetException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Describes the result of an existing snapshot job that has finished running.

A finished snapshot job will return a COMPLETED or FAILED status when you poll the job with a DescribeDashboardSnapshotJob API call.

If the job has not finished running, this operation returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not reached a terminal state..

" + "documentation":"

Describes the result of an existing snapshot job that has finished running.

A finished snapshot job will return a COMPLETED or FAILED status when you poll the job with a DescribeDashboardSnapshotJob API call.

If the job has not finished running, this operation returns a message that says Dashboard Snapshot Job with id <SnapshotjobId> has not reached a terminal state..

Registered user support

This API can be called as before to get the result of a job started by the same Quick Sight user. The result for the user will be returned in RegisteredUsers response attribute. The attribute will contain a list with at most one object in it.

Possible error scenarios

The request fails with an Access Denied error in the following scenarios:

The request succeeds but the job fails in the following scenarios:

The request succeeds but the response contains an error code in the following scenarios:

" }, "DescribeDashboardsQAConfiguration":{ "name":"DescribeDashboardsQAConfiguration", @@ -1929,6 +1929,26 @@ ], "documentation":"

Describes all custom permissions that are mapped to a role.

" }, + "DescribeSelfUpgradeConfiguration":{ + "name":"DescribeSelfUpgradeConfiguration", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/self-upgrade-configuration" + }, + "input":{"shape":"DescribeSelfUpgradeConfigurationRequest"}, + "output":{"shape":"DescribeSelfUpgradeConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Describes the self-upgrade configuration for a Quick Suite account.

" + }, "DescribeTemplate":{ "name":"DescribeTemplate", "http":{ @@ -2288,6 +2308,25 @@ "documentation":"

Get permissions for a flow.

", "readonly":true }, + "GetIdentityContext":{ + "name":"GetIdentityContext", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/identity-context", + "responseCode":200 + }, + "input":{"shape":"GetIdentityContextRequest"}, + "output":{"shape":"GetIdentityContextResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InternalFailureException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Retrieves the identity context for a Quick Sight user in a specified namespace, allowing you to obtain identity tokens that can be used with identity-enhanced IAM role sessions to call identity-aware APIs.

Currently, you can call the following APIs with identity-enhanced Credentials

Supported Authentication Methods

This API supports Quick Sight native users, IAM federated users, and Active Directory users. For Quick Sight users authenticated by Amazon Web Services Identity Center, see Identity Center documentation on identity-enhanced IAM role sessions.

Getting Identity-Enhanced Credentials

To obtain identity-enhanced credentials, follow these steps:

Usage with STS AssumeRole

The identity token returned by this API should be used with the STS AssumeRole API to obtain credentials for an identity-enhanced IAM role session. When calling AssumeRole, include the identity token in the ProvidedContexts parameter with ProviderArn set to arn:aws:iam::aws:contextProvider/QuickSight and ContextAssertion set to the identity token received from this API.

The assumed role must allow the sts:SetContext action in addition to sts:AssumeRole in its trust relationship policy. The trust policy should include both actions for the principal that will be assuming the role.

" + }, "GetSessionEmbedUrl":{ "name":"GetSessionEmbedUrl", "http":{ @@ -2730,6 +2769,27 @@ ], "documentation":"

Lists all groups that are associated with a role.

" }, + "ListSelfUpgrades":{ + "name":"ListSelfUpgrades", + "http":{ + "method":"GET", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/self-upgrade-requests" + }, + "input":{"shape":"ListSelfUpgradesRequest"}, + "output":{"shape":"ListSelfUpgradesResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ThrottlingException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Lists all self-upgrade requests for a Quick Suite account.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -3268,7 +3328,7 @@ {"shape":"UnsupportedPricingPlanException"}, {"shape":"InternalFailureException"} ], - "documentation":"

Starts an asynchronous job that generates a snapshot of a dashboard's output. You can request one or several of the following format configurations in each API call.

The status of a submitted job can be polled with the DescribeDashboardSnapshotJob API. When you call the DescribeDashboardSnapshotJob API, check the JobStatus field in the response. Once the job reaches a COMPLETED or FAILED status, use the DescribeDashboardSnapshotJobResult API to obtain the URLs for the generated files. If the job fails, the DescribeDashboardSnapshotJobResult API returns detailed information about the error that occurred.

StartDashboardSnapshotJob API throttling

Quick Sight utilizes API throttling to create a more consistent user experience within a time span for customers when they call the StartDashboardSnapshotJob. By default, 12 jobs can run simlutaneously in one Amazon Web Services account and users can submit up 10 API requests per second before an account is throttled. If an overwhelming number of API requests are made by the same user in a short period of time, Quick Sight throttles the API calls to maintin an optimal experience and reliability for all Quick Sight users.

Common throttling scenarios

The following list provides information about the most commin throttling scenarios that can occur.

If your use case requires a higher throttling limit, contact your account admin or Amazon Web ServicesSupport to explore options to tailor a more optimal expereince for your account.

Best practices to handle throttling

If your use case projects high levels of API traffic, try to reduce the degree of frequency and parallelism of API calls as much as you can to avoid throttling. You can also perform a timing test to calculate an estimate for the total processing time of your projected load that stays within the throttling limits of the Quick Sight APIs. For example, if your projected traffic is 100 snapshot jobs before 12:00 PM per day, start 12 jobs in parallel and measure the amount of time it takes to proccess all 12 jobs. Once you obtain the result, multiply the duration by 9, for example (12 minutes * 9 = 108 minutes). Use the new result to determine the latest time at which the jobs need to be started to meet your target deadline.

The time that it takes to process a job can be impacted by the following factors:

" + "documentation":"

Starts an asynchronous job that generates a snapshot of a dashboard's output. You can request one or several of the following format configurations in each API call.

The status of a submitted job can be polled with the DescribeDashboardSnapshotJob API. When you call the DescribeDashboardSnapshotJob API, check the JobStatus field in the response. Once the job reaches a COMPLETED or FAILED status, use the DescribeDashboardSnapshotJobResult API to obtain the URLs for the generated files. If the job fails, the DescribeDashboardSnapshotJobResult API returns detailed information about the error that occurred.

StartDashboardSnapshotJob API throttling

Quick Sight utilizes API throttling to create a more consistent user experience within a time span for customers when they call the StartDashboardSnapshotJob. By default, 12 jobs can run simlutaneously in one Amazon Web Services account and users can submit up 10 API requests per second before an account is throttled. If an overwhelming number of API requests are made by the same user in a short period of time, Quick Sight throttles the API calls to maintin an optimal experience and reliability for all Quick Sight users.

Common throttling scenarios

The following list provides information about the most commin throttling scenarios that can occur.

If your use case requires a higher throttling limit, contact your account admin or Amazon Web ServicesSupport to explore options to tailor a more optimal expereince for your account.

Best practices to handle throttling

If your use case projects high levels of API traffic, try to reduce the degree of frequency and parallelism of API calls as much as you can to avoid throttling. You can also perform a timing test to calculate an estimate for the total processing time of your projected load that stays within the throttling limits of the Quick Sight APIs. For example, if your projected traffic is 100 snapshot jobs before 12:00 PM per day, start 12 jobs in parallel and measure the amount of time it takes to proccess all 12 jobs. Once you obtain the result, multiply the duration by 9, for example (12 minutes * 9 = 108 minutes). Use the new result to determine the latest time at which the jobs need to be started to meet your target deadline.

The time that it takes to process a job can be impacted by the following factors:

Registered user support

You can generate snapshots for registered Quick Sight users by using the Snapshot Job APIs with identity-enhanced IAM role session credentials. This approach allows you to create snapshots on behalf of specific Quick Sight users while respecting their row-level security (RLS), column-level security (CLS), dynamic default parameters and dashboard parameter/filter settings.

To generate snapshots for registered Quick Sight users, you need to:

Identity-enhanced credentials are credentials that contain information about the end user (e.g., registered Quick Sight user).

If your Quick Sight users are backed by Amazon Web Services Identity Center, then you need to set up a trusted token issuer. Then, getting identity-enhanced IAM credentials for a Quick Sight user will look like the following:

For more details, see IdC documentation on Identity-enhanced IAM role sessions.

To obtain Identity-enhanced credentials for Quick Sight native users, IAM federated users, or Active Directory users, follow the steps below:

After obtaining the identity-enhanced IAM role session credentials, you can use them to start a job, describe the job and describe job result. You can use the same credentials as long as they haven't expired. All API requests made with these credentials are considered to be made by the impersonated Quick Sight user.

When using identity-enhanced session credentials, set the UserConfiguration request attribute to null. Otherwise, the request will be invalid.

Possible error scenarios

The request fails with an Access Denied error in the following scenarios:

" }, "StartDashboardSnapshotJobSchedule":{ "name":"StartDashboardSnapshotJobSchedule", @@ -3999,6 +4059,47 @@ ], "documentation":"

Updates the SPICE capacity configuration for a Quick Sight account.

" }, + "UpdateSelfUpgrade":{ + "name":"UpdateSelfUpgrade", + "http":{ + "method":"POST", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/update-self-upgrade-request" + }, + "input":{"shape":"UpdateSelfUpgradeRequest"}, + "output":{"shape":"UpdateSelfUpgradeResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ThrottlingException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Updates a self-upgrade request for a Quick Suite user by approving, denying, or verifying the request.

" + }, + "UpdateSelfUpgradeConfiguration":{ + "name":"UpdateSelfUpgradeConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/accounts/{AwsAccountId}/namespaces/{Namespace}/self-upgrade-configuration" + }, + "input":{"shape":"UpdateSelfUpgradeConfigurationRequest"}, + "output":{"shape":"UpdateSelfUpgradeConfigurationResponse"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValueException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"PreconditionNotMetException"}, + {"shape":"ThrottlingException"}, + {"shape":"InternalFailureException"}, + {"shape":"ResourceUnavailableException"} + ], + "documentation":"

Updates the self-upgrade configuration for a Quick Suite account.

" + }, "UpdateTemplate":{ "name":"UpdateTemplate", "http":{ @@ -7328,6 +7429,14 @@ "shape":"ChartAxisLabelOptions", "documentation":"

The label options (label text, label visibility and sort icon visibility) for a color that is used in a bar chart.

" }, + "DefaultSeriesSettings":{ + "shape":"BarChartDefaultSeriesSettings", + "documentation":"

The options that determine the default presentation of all bar series in BarChartVisual.

" + }, + "Series":{ + "shape":"BarSeriesItemList", + "documentation":"

The series item configuration of a BarChartVisual.

" + }, "Legend":{ "shape":"LegendOptions", "documentation":"

The legend display setup of the visual.

" @@ -7355,6 +7464,20 @@ }, "documentation":"

The configuration of a BarChartVisual.

" }, + "BarChartDefaultSeriesSettings":{ + "type":"structure", + "members":{ + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings for all bar series in the visual.

" + }, + "BorderSettings":{ + "shape":"BorderSettings", + "documentation":"

Border settings for all bar series in the visual.

" + } + }, + "documentation":"

The options that determine the default presentation of all bar series in BarChartVisual.

" + }, "BarChartFieldWells":{ "type":"structure", "members":{ @@ -7372,6 +7495,20 @@ "VERTICAL" ] }, + "BarChartSeriesSettings":{ + "type":"structure", + "members":{ + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings for the bar series.

" + }, + "BorderSettings":{ + "shape":"BorderSettings", + "documentation":"

Border settings for the bar series.

" + } + }, + "documentation":"

Options that determine the presentation of a bar series in the visual.

" + }, "BarChartSortConfiguration":{ "type":"structure", "members":{ @@ -7437,6 +7574,25 @@ }, "documentation":"

A bar chart.

The BarChartVisual structure describes a visual that is a member of the bar chart family. The following charts can be described using this structure:

For more information, see Using bar charts in the Amazon Quick Suite User Guide.

" }, + "BarSeriesItem":{ + "type":"structure", + "members":{ + "FieldBarSeriesItem":{ + "shape":"FieldBarSeriesItem", + "documentation":"

The field series item configuration of a BarChartVisual.

" + }, + "DataFieldBarSeriesItem":{ + "shape":"DataFieldBarSeriesItem", + "documentation":"

The data field series item configuration of a BarChartVisual.

" + } + }, + "documentation":"

The series item configuration of a BarChartVisual.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" + }, + "BarSeriesItemList":{ + "type":"list", + "member":{"shape":"BarSeriesItem"}, + "max":2000 + }, "BarsArrangement":{ "type":"string", "enum":[ @@ -7790,6 +7946,24 @@ "type":"string", "max":50 }, + "BorderSettings":{ + "type":"structure", + "members":{ + "BorderVisibility":{ + "shape":"Visibility", + "documentation":"

Visibility setting for the border.

" + }, + "BorderWidth":{ + "shape":"PixelLength", + "documentation":"

Width of the border. Valid range is from 1px to 8px.

" + }, + "BorderColor":{ + "shape":"HexColorWithTransparency", + "documentation":"

Color of the border.

" + } + }, + "documentation":"

Border settings configuration for visual elements, including visibility, width, and color properties.

" + }, "BorderStyle":{ "type":"structure", "members":{ @@ -8442,6 +8616,10 @@ "Research":{ "shape":"CapabilityState", "documentation":"

The ability to perform research-related actions.

" + }, + "SelfUpgradeUserRole":{ + "shape":"CapabilityState", + "documentation":"

The ability to enable users to upgrade their user role.

" } }, "documentation":"

A set of actions that correspond to Amazon Quick Sight permissions.

" @@ -8948,6 +9126,10 @@ "ColorsConfiguration":{ "shape":"ColorsConfiguration", "documentation":"

The color configurations of the column.

" + }, + "DecalSettingsConfiguration":{ + "shape":"DecalSettingsConfiguration", + "documentation":"

Decal configuration of the column.

" } }, "documentation":"

The general configuration of a column.

" @@ -9333,6 +9515,14 @@ "shape":"ChartAxisLabelOptions", "documentation":"

The label options (label text, label visibility, and sort icon visibility) of a combo chart's color field well.

" }, + "DefaultSeriesSettings":{ + "shape":"ComboChartDefaultSeriesSettings", + "documentation":"

The options that determine the default presentation of all series in ComboChartVisual.

" + }, + "Series":{ + "shape":"ComboSeriesItemList", + "documentation":"

The series item configuration of a ComboChartVisual.

" + }, "Legend":{ "shape":"LegendOptions", "documentation":"

The legend display setup of the visual.

" @@ -9364,6 +9554,28 @@ }, "documentation":"

The configuration of a ComboChartVisual.

" }, + "ComboChartDefaultSeriesSettings":{ + "type":"structure", + "members":{ + "LineStyleSettings":{ + "shape":"LineChartLineStyleSettings", + "documentation":"

Line styles options for all line series in the visual.

" + }, + "MarkerStyleSettings":{ + "shape":"LineChartMarkerStyleSettings", + "documentation":"

Marker styles options for all line series in the visual.

" + }, + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings for all series in the visual.

" + }, + "BorderSettings":{ + "shape":"BorderSettings", + "documentation":"

Border settings for all bar series in the visual.

" + } + }, + "documentation":"

The options that determine the default presentation of all series in ComboChartVisual.

" + }, "ComboChartFieldWells":{ "type":"structure", "members":{ @@ -9374,6 +9586,28 @@ }, "documentation":"

The field wells of the visual.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" }, + "ComboChartSeriesSettings":{ + "type":"structure", + "members":{ + "LineStyleSettings":{ + "shape":"LineChartLineStyleSettings", + "documentation":"

Line styles options for the line series in the visual.

" + }, + "MarkerStyleSettings":{ + "shape":"LineChartMarkerStyleSettings", + "documentation":"

Marker styles options for the line series in the visual.

" + }, + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings for the series in the visual.

" + }, + "BorderSettings":{ + "shape":"BorderSettings", + "documentation":"

Border settings for the bar series in the visual.

" + } + }, + "documentation":"

Options that determine the presentation of a series in the visual.

" + }, "ComboChartSortConfiguration":{ "type":"structure", "members":{ @@ -9431,6 +9665,25 @@ }, "documentation":"

A combo chart.

The ComboChartVisual includes stacked bar combo charts and clustered bar combo charts

For more information, see Using combo charts in the Amazon Quick Suite User Guide.

" }, + "ComboSeriesItem":{ + "type":"structure", + "members":{ + "FieldComboSeriesItem":{ + "shape":"FieldComboSeriesItem", + "documentation":"

The field series item configuration of a ComboChartVisual.

" + }, + "DataFieldComboSeriesItem":{ + "shape":"DataFieldComboSeriesItem", + "documentation":"

The data field series item configuration of a ComboChartVisual.

" + } + }, + "documentation":"

The series item configuration of a ComboChartVisual.

This is a union type structure. For this structure to be valid, only one of the attributes can be defined.

" + }, + "ComboSeriesItemList":{ + "type":"list", + "member":{"shape":"ComboSeriesItem"}, + "max":2000 + }, "CommitMode":{ "type":"string", "enum":[ @@ -9857,6 +10110,36 @@ "max":4, "min":1 }, + "Coordinate":{ + "type":"structure", + "required":[ + "Latitude", + "Longitude" + ], + "members":{ + "Latitude":{ + "shape":"CoordinateLatitudeDouble", + "documentation":"

The latitude coordinate value for the geocode preference.

" + }, + "Longitude":{ + "shape":"CoordinateLongitudeDouble", + "documentation":"

The longitude coordinate value for the geocode preference.

" + } + }, + "documentation":"

The preference coordinate for the geocode preference.

" + }, + "CoordinateLatitudeDouble":{ + "type":"double", + "box":true, + "max":90.0, + "min":-90.0 + }, + "CoordinateLongitudeDouble":{ + "type":"double", + "box":true, + "max":180.0, + "min":-180.0 + }, "CopySourceArn":{ "type":"string", "pattern":"^arn:[-a-z0-9]*:quicksight:[-a-z0-9]*:[0-9]{12}:datasource/.+" @@ -12108,6 +12391,23 @@ "DISABLED" ] }, + "DashboardCustomizationStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, + "DashboardCustomizationVisualOptions":{ + "type":"structure", + "members":{ + "FieldsConfiguration":{ + "shape":"VisualCustomizationFieldsConfiguration", + "documentation":"

The configuration that controls field customization options available to dashboard readers for a visual.

" + } + }, + "documentation":"

The options that define customizations available to dashboard readers for a specific visual

" + }, "DashboardError":{ "type":"structure", "members":{ @@ -12595,6 +12895,44 @@ }, "documentation":"

The theme colors that are used for data colors in charts. The colors description is a hexadecimal color code that consists of six alphanumerical characters, prefixed with #, for example #37BFF5.

" }, + "DataFieldBarSeriesItem":{ + "type":"structure", + "required":["FieldId"], + "members":{ + "FieldId":{ + "shape":"FieldId", + "documentation":"

Field ID of the field that you are setting the series configuration for.

" + }, + "FieldValue":{ + "shape":"SensitiveString", + "documentation":"

Field value of the field that you are setting the series configuration for.

" + }, + "Settings":{ + "shape":"BarChartSeriesSettings", + "documentation":"

Options that determine the presentation of bar series associated to the field.

" + } + }, + "documentation":"

The data field series item configuration of a BarChartVisual.

" + }, + "DataFieldComboSeriesItem":{ + "type":"structure", + "required":["FieldId"], + "members":{ + "FieldId":{ + "shape":"FieldId", + "documentation":"

Field ID of the field that you are setting the series configuration for.

" + }, + "FieldValue":{ + "shape":"SensitiveString", + "documentation":"

Field value of the field that you are setting the series configuration for.

" + }, + "Settings":{ + "shape":"ComboChartSeriesSettings", + "documentation":"

Options that determine the presentation of series associated to the field.

" + } + }, + "documentation":"

The data field series item configuration of a ComboChartVisual.

" + }, "DataFieldSeriesItem":{ "type":"structure", "required":[ @@ -13639,6 +13977,10 @@ "shape":"SecretArn", "documentation":"

The Amazon Resource Name (ARN) of the secret associated with the data source in Amazon Secrets Manager.

" }, + "KeyPairCredentials":{ + "shape":"KeyPairCredentials", + "documentation":"

The credentials for connecting using key-pair.

" + }, "WebProxyCredentials":{ "shape":"WebProxyCredentials", "documentation":"

The credentials for connecting through a web proxy server.

" @@ -14401,6 +14743,81 @@ "max":64, "min":1 }, + "DecalPatternType":{ + "type":"string", + "enum":[ + "SOLID", + "DIAGONAL_MEDIUM", + "CIRCLE_MEDIUM", + "DIAMOND_GRID_MEDIUM", + "CHECKERBOARD_MEDIUM", + "TRIANGLE_MEDIUM", + "DIAGONAL_OPPOSITE_MEDIUM", + "DIAMOND_MEDIUM", + "DIAGONAL_LARGE", + "CIRCLE_LARGE", + "DIAMOND_GRID_LARGE", + "CHECKERBOARD_LARGE", + "TRIANGLE_LARGE", + "DIAGONAL_OPPOSITE_LARGE", + "DIAMOND_LARGE", + "DIAGONAL_SMALL", + "CIRCLE_SMALL", + "DIAMOND_GRID_SMALL", + "CHECKERBOARD_SMALL", + "TRIANGLE_SMALL", + "DIAGONAL_OPPOSITE_SMALL", + "DIAMOND_SMALL" + ] + }, + "DecalSettings":{ + "type":"structure", + "members":{ + "ElementValue":{ + "shape":"ElementValue", + "documentation":"

Field value of the field that you are setting the decal pattern to. Applicable only for field level settings.

" + }, + "DecalVisibility":{ + "shape":"Visibility", + "documentation":"

Visibility setting for the decal pattern.

" + }, + "DecalColor":{ + "shape":"HexColorWithTransparency", + "documentation":"

Color configuration for the decal pattern.

" + }, + "DecalPatternType":{ + "shape":"DecalPatternType", + "documentation":"

Type of pattern used for the decal, such as solid, diagonal, or circular patterns in various sizes.

" + }, + "DecalStyleType":{ + "shape":"DecalStyleType", + "documentation":"

Style type for the decal, which can be either manual or automatic. This field is only applicable for line series.

" + } + }, + "documentation":"

Decal settings for accessibility features that define visual patterns and styling for data elements.

" + }, + "DecalSettingsConfiguration":{ + "type":"structure", + "members":{ + "CustomDecalSettings":{ + "shape":"DecalSettingsList", + "documentation":"

A list of up to 50 decal settings.

" + } + }, + "documentation":"

Decal settings configuration for a column

" + }, + "DecalSettingsList":{ + "type":"list", + "member":{"shape":"DecalSettings"}, + "max":50 + }, + "DecalStyleType":{ + "type":"string", + "enum":[ + "Manual", + "Auto" + ] + }, "DecimalDatasetParameter":{ "type":"structure", "required":[ @@ -18192,6 +18609,45 @@ } } }, + "DescribeSelfUpgradeConfigurationRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the Quick Suite self-upgrade configuration.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The Quick Suite namespace that you want to describe the Quick Suite self-upgrade configuration for.

", + "location":"uri", + "locationName":"Namespace" + } + } + }, + "DescribeSelfUpgradeConfigurationResponse":{ + "type":"structure", + "members":{ + "SelfUpgradeConfiguration":{ + "shape":"SelfUpgradeConfiguration", + "documentation":"

The self-upgrade configuration for the Quick Suite account.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, "DescribeTemplateAliasRequest":{ "type":"structure", "required":[ @@ -19118,6 +19574,10 @@ "ENTERPRISE_AND_Q" ] }, + "ElementValue":{ + "type":"string", + "max":1024 + }, "Email":{ "type":"string", "pattern":"[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,}", @@ -19378,6 +19838,21 @@ }, "documentation":"

An entry that appears when a KeyRegistration update to Quick Sight fails.

" }, + "FieldBarSeriesItem":{ + "type":"structure", + "required":["FieldId"], + "members":{ + "FieldId":{ + "shape":"FieldId", + "documentation":"

Field ID of the field for which you are setting the series configuration.

" + }, + "Settings":{ + "shape":"BarChartSeriesSettings", + "documentation":"

Options that determine the presentation of bar series associated to the field.

" + } + }, + "documentation":"

The field series item configuration of a BarChartVisual.

" + }, "FieldBasedTooltip":{ "type":"structure", "members":{ @@ -19396,6 +19871,21 @@ }, "documentation":"

The setup for the detailed tooltip.

" }, + "FieldComboSeriesItem":{ + "type":"structure", + "required":["FieldId"], + "members":{ + "FieldId":{ + "shape":"FieldId", + "documentation":"

Field ID of the field for which you are setting the series configuration.

" + }, + "Settings":{ + "shape":"ComboChartSeriesSettings", + "documentation":"

Options that determine the presentation of series associated to the field.

" + } + }, + "documentation":"

The field series item configuration of a ComboChartVisual.

" + }, "FieldFolder":{ "type":"structure", "members":{ @@ -19711,6 +20201,10 @@ "VisualContentAltText":{ "shape":"LongPlainText", "documentation":"

The alt text for the visual.

" + }, + "GeocodingPreferences":{ + "shape":"GeocodePreferenceList", + "documentation":"

The geocoding prefences for filled map visual.

" } }, "documentation":"

A filled map.

For more information, see Creating filled maps in the Amazon Quick Suite User Guide.

" @@ -21511,6 +22005,96 @@ "LATITUDE" ] }, + "GeocodePreference":{ + "type":"structure", + "required":[ + "RequestKey", + "Preference" + ], + "members":{ + "RequestKey":{ + "shape":"GeocoderHierarchy", + "documentation":"

The unique request key for the geocode preference.

" + }, + "Preference":{ + "shape":"GeocodePreferenceValue", + "documentation":"

The preference definition for the geocode preference.

" + } + }, + "documentation":"

The geocode preference.

" + }, + "GeocodePreferenceList":{ + "type":"list", + "member":{"shape":"GeocodePreference"}, + "max":200, + "min":0 + }, + "GeocodePreferenceValue":{ + "type":"structure", + "members":{ + "GeocoderHierarchy":{ + "shape":"GeocoderHierarchy", + "documentation":"

The preference hierarchy for the geocode preference.

" + }, + "Coordinate":{ + "shape":"Coordinate", + "documentation":"

The preference coordinate for the geocode preference.

" + } + }, + "documentation":"

The preference value for the geocode preference.

", + "union":true + }, + "GeocoderHierarchy":{ + "type":"structure", + "members":{ + "Country":{ + "shape":"GeocoderHierarchyCountryString", + "documentation":"

The country value for the preference hierarchy.

" + }, + "State":{ + "shape":"GeocoderHierarchyStateString", + "documentation":"

The state/region value for the preference hierarchy.

" + }, + "County":{ + "shape":"GeocoderHierarchyCountyString", + "documentation":"

The county/district value for the preference hierarchy.

" + }, + "City":{ + "shape":"GeocoderHierarchyCityString", + "documentation":"

The city value for the preference hierarchy.

" + }, + "PostCode":{ + "shape":"GeocoderHierarchyPostCodeString", + "documentation":"

The postcode value for the preference hierarchy.

" + } + }, + "documentation":"

The preference hierarchy for the geocode preference.

" + }, + "GeocoderHierarchyCityString":{ + "type":"string", + "max":3000, + "min":0 + }, + "GeocoderHierarchyCountryString":{ + "type":"string", + "max":3000, + "min":0 + }, + "GeocoderHierarchyCountyString":{ + "type":"string", + "max":3000, + "min":0 + }, + "GeocoderHierarchyPostCodeString":{ + "type":"string", + "max":3000, + "min":0 + }, + "GeocoderHierarchyStateString":{ + "type":"string", + "max":3000, + "min":0 + }, "GeospatialCategoricalColor":{ "type":"structure", "required":["CategoryDataColors"], @@ -22049,6 +22633,10 @@ "VisualContentAltText":{ "shape":"LongPlainText", "documentation":"

The alt text for the visual.

" + }, + "GeocodingPreferences":{ + "shape":"GeocodePreferenceList", + "documentation":"

The geocoding prefences for geospatial map.

" } }, "documentation":"

A geospatial map or a points on map visual.

For more information, see Creating point maps in the Amazon Quick Suite User Guide.

" @@ -22434,6 +23022,56 @@ } } }, + "GetIdentityContextRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "UserIdentifier" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID for the Amazon Web Services account that the user whose identity context you want to retrieve is in. Currently, you use the ID for the Amazon Web Services account that contains your Quick Sight account.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "UserIdentifier":{ + "shape":"UserIdentifier", + "documentation":"

The identifier for the user whose identity context you want to retrieve.

" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The namespace of the user that you want to get identity context for. This parameter is required when the UserIdentifier is specified using Email or UserName.

" + }, + "SessionExpiresAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the session will expire.

" + } + }, + "documentation":"

///////////////////////// /////////////////////////

" + }, + "GetIdentityContextResponse":{ + "type":"structure", + "required":[ + "Status", + "RequestId" + ], + "members":{ + "Status":{ + "shape":"statusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Context":{ + "shape":"String", + "documentation":"

The identity context information for the user. This is an identity token that should be used as the ContextAssertion parameter in the STS AssumeRole API call to obtain identity enhanced Amazon Web Services credentials.

" + } + } + }, "GetSessionEmbedUrlRequest":{ "type":"structure", "required":["AwsAccountId"], @@ -24038,6 +24676,19 @@ "error":{"httpStatusCode":400}, "exception":true }, + "InvalidParameterException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this request.

" + } + }, + "documentation":"

One or more parameter has a value that isn't valid.

", + "error":{"httpStatusCode":400}, + "exception":true + }, "InvalidParameterValueException":{ "type":"structure", "members":{ @@ -24519,6 +25170,28 @@ "VERTICAL" ] }, + "KeyPairCredentials":{ + "type":"structure", + "required":[ + "KeyPairUsername", + "PrivateKey" + ], + "members":{ + "KeyPairUsername":{ + "shape":"DbUsername", + "documentation":"

Username

" + }, + "PrivateKey":{ + "shape":"PrivateKey", + "documentation":"

PrivateKey

" + }, + "PrivateKeyPassphrase":{ + "shape":"PrivateKeyPassphrase", + "documentation":"

PrivateKeyPassphrase

" + } + }, + "documentation":"

The combination of username, private key and passphrase that are used as credentials.

" + }, "KeyRegistration":{ "type":"list", "member":{"shape":"RegisteredCustomerManagedKey"} @@ -24868,6 +25541,10 @@ "MarkerStyleSettings":{ "shape":"LineChartMarkerStyleSettings", "documentation":"

Marker styles options for all line series in the visual.

" + }, + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings options for all line series in the visual.

" } }, "documentation":"

The options that determine the default presentation of all line series in LineChartVisual.

" @@ -24954,6 +25631,10 @@ "MarkerStyleSettings":{ "shape":"LineChartMarkerStyleSettings", "documentation":"

Marker styles options for a line series in LineChartVisual.

" + }, + "DecalSettings":{ + "shape":"DecalSettings", + "documentation":"

Decal settings for a line series in LineChartVisual.

" } }, "documentation":"

The options that determine the presentation of a line series in the visual

" @@ -26288,6 +26969,62 @@ } } }, + "ListSelfUpgradesRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the self-upgrade requests.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The Quick Suite namespace for the self-upgrade requests.

", + "location":"uri", + "locationName":"Namespace" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

", + "location":"querystring", + "locationName":"next-token" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return.

", + "box":true, + "location":"querystring", + "locationName":"max-results" + } + } + }, + "ListSelfUpgradesResponse":{ + "type":"structure", + "members":{ + "SelfUpgradeRequestDetails":{ + "shape":"SelfUpgradeRequestDetailList", + "documentation":"

A list of self-upgrade request details.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

The token for the next set of results, or null if there are no more results.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -28514,7 +29251,7 @@ "ParameterDeclarationList":{ "type":"list", "member":{"shape":"ParameterDeclaration"}, - "max":200 + "max":400 }, "ParameterDropDownControl":{ "type":"structure", @@ -29322,6 +30059,10 @@ "shape":"PivotTablePaginatedReportOptions", "documentation":"

The paginated report options for a pivot table visual.

" }, + "DashboardCustomizationVisualOptions":{ + "shape":"DashboardCustomizationVisualOptions", + "documentation":"

The options that define customizations available to dashboard readers for a specific visual

" + }, "Interactions":{ "shape":"VisualInteractionOptions", "documentation":"

The general visual interactions setup for a visual.

" @@ -30069,6 +30810,18 @@ "max":100, "min":1 }, + "PrivateKey":{ + "type":"string", + "max":8000, + "min":1600, + "pattern":"^-{5}BEGIN (ENCRYPTED )?PRIVATE KEY-{5}\\u000D?\\u000A([A-Za-z0-9/+]{64}\\u000D?\\u000A)*[A-Za-z0-9/+]{1,64}={0,2}\\u000D?\\u000A-{5}END (ENCRYPTED )?PRIVATE KEY-{5}(\\u000D?\\u000A)?$", + "sensitive":true + }, + "PrivateKeyPassphrase":{ + "type":"string", + "max":256, + "sensitive":true + }, "ProgressBarOptions":{ "type":"structure", "members":{ @@ -31425,6 +32178,20 @@ }, "documentation":"

Information about the Amazon Quick Sight console that you want to embed.

" }, + "RegisteredUserSnapshotJobResult":{ + "type":"structure", + "members":{ + "FileGroups":{ + "shape":"SnapshotJobResultFileGroupList", + "documentation":"

A list of SnapshotJobResultFileGroup objects that contain information on the files that are requested for registered user during a StartDashboardSnapshotJob API call. If the job succeeds, these objects contain the location where the snapshot artifacts are stored. If the job fails, the objects contain information about the error that caused the job to fail.

" + } + }, + "documentation":"

A structure that contains information about files that are requested for registered user during a StartDashboardSnapshotJob API call.

" + }, + "RegisteredUserSnapshotJobResultList":{ + "type":"list", + "member":{"shape":"RegisteredUserSnapshotJobResult"} + }, "RelationalTable":{ "type":"structure", "required":[ @@ -33102,6 +33869,87 @@ "DETAILED" ] }, + "SelfUpgradeAdminAction":{ + "type":"string", + "enum":[ + "APPROVE", + "DENY", + "VERIFY" + ] + }, + "SelfUpgradeConfiguration":{ + "type":"structure", + "members":{ + "SelfUpgradeStatus":{ + "shape":"SelfUpgradeStatus", + "documentation":"

Status set for the self-upgrade configuration for the Quick Suite account. It can contain the following values:

" + } + }, + "documentation":"

The self-upgrade configuration for the Quick Suite account.

" + }, + "SelfUpgradeRequestDetail":{ + "type":"structure", + "members":{ + "UpgradeRequestId":{ + "shape":"String", + "documentation":"

The ID of the self-upgrade request.

" + }, + "UserName":{ + "shape":"UserName", + "documentation":"

The username of the user who initiated the self-upgrade request.

" + }, + "OriginalRole":{ + "shape":"UserRole", + "documentation":"

The original role of the user before the upgrade.

" + }, + "RequestedRole":{ + "shape":"UserRole", + "documentation":"

The role that the user is requesting to upgrade to.

" + }, + "RequestNote":{ + "shape":"String", + "documentation":"

An optional note explaining the reason for the self-upgrade request.

" + }, + "CreationTime":{ + "shape":"Long", + "documentation":"

The time when the self-upgrade request was created.

" + }, + "RequestStatus":{ + "shape":"SelfUpgradeRequestStatus", + "documentation":"

The status of the self-upgrade request.

" + }, + "lastUpdateAttemptTime":{ + "shape":"Long", + "documentation":"

The time of the last update attempt for the self-upgrade request.

" + }, + "lastUpdateFailureReason":{ + "shape":"String", + "documentation":"

The reason for the last update failure, if applicable.

" + } + }, + "documentation":"

Details of a self-upgrade request.

" + }, + "SelfUpgradeRequestDetailList":{ + "type":"list", + "member":{"shape":"SelfUpgradeRequestDetail"} + }, + "SelfUpgradeRequestStatus":{ + "type":"string", + "enum":[ + "PENDING", + "APPROVED", + "DENIED", + "UPDATE_FAILED", + "VERIFY_FAILED" + ] + }, + "SelfUpgradeStatus":{ + "type":"string", + "enum":[ + "AUTO_APPROVAL", + "ADMIN_APPROVAL" + ] + }, "SemanticEntityType":{ "type":"structure", "members":{ @@ -34187,7 +35035,7 @@ "documentation":"

The error type.

" } }, - "documentation":"

An object that contains information on the error that caused the snapshot job to fail.

" + "documentation":"

An object that contains information on the error that caused the snapshot job to fail.

For more information, see DescribeDashboardSnapshotJobResult API.

" }, "SnapshotJobResult":{ "type":"structure", @@ -34195,6 +35043,10 @@ "AnonymousUsers":{ "shape":"AnonymousUserSnapshotJobResultList", "documentation":"

A list of AnonymousUserSnapshotJobResult objects that contain information on anonymous users and their user configurations. This data provided by you when you make a StartDashboardSnapshotJob API call.

" + }, + "RegisteredUsers":{ + "shape":"RegisteredUserSnapshotJobResultList", + "documentation":"

A list of RegisteredUserSnapshotJobResult objects that contain information about files that are requested for registered user during a StartDashboardSnapshotJob API call.

" } }, "documentation":"

An object that provides information on the result of a snapshot job. This object provides information about the job, the job status, and the location of the generated file.

" @@ -34291,7 +35143,7 @@ "documentation":"

An array of records that describe the anonymous users that the dashboard snapshot is generated for.

" } }, - "documentation":"

A structure that contains information about the users that the dashboard snapshot is generated for.

" + "documentation":"

A structure that contains information about the users that the dashboard snapshot is generated for.

When using identity-enhanced session credentials, set the UserConfiguration request attribute to null. Otherwise, the request will be invalid.

" }, "SnapshotUserConfigurationRedacted":{ "type":"structure", @@ -34670,7 +35522,6 @@ "AwsAccountId", "DashboardId", "SnapshotJobId", - "UserConfiguration", "SnapshotConfiguration" ], "members":{ @@ -34692,7 +35543,7 @@ }, "UserConfiguration":{ "shape":"SnapshotUserConfiguration", - "documentation":"

A structure that contains information about the anonymous users that the generated snapshot is for. This API will not return information about registered Amazon Quick Sight.

" + "documentation":"

A structure that contains information about the users that the dashboard snapshot is generated for. The users can be either anonymous users or registered users. Anonymous users cannot be used together with registered users.

When using identity-enhanced session credentials, set the UserConfiguration request attribute to null. Otherwise, the request will be invalid.

" }, "SnapshotConfiguration":{ "shape":"SnapshotConfiguration", @@ -35281,6 +36132,10 @@ "shape":"TableInlineVisualizationList", "documentation":"

A collection of inline visualizations to display within a chart.

" }, + "DashboardCustomizationVisualOptions":{ + "shape":"DashboardCustomizationVisualOptions", + "documentation":"

The options that define customizations available to dashboard readers for a specific visual

" + }, "Interactions":{ "shape":"VisualInteractionOptions", "documentation":"

The general visual interactions setup for a visual.

" @@ -40499,6 +41354,95 @@ } } }, + "UpdateSelfUpgradeConfigurationRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace", + "SelfUpgradeStatus" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the Quick Suite self-upgrade configuration that you want to update.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The Quick Suite namespace that you want to update the Quick Suite self-upgrade configuration for.

", + "location":"uri", + "locationName":"Namespace" + }, + "SelfUpgradeStatus":{ + "shape":"SelfUpgradeStatus", + "documentation":"

The self-upgrade status that you want to set for the Quick Suite account.

" + } + } + }, + "UpdateSelfUpgradeConfigurationResponse":{ + "type":"structure", + "members":{ + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, + "UpdateSelfUpgradeRequest":{ + "type":"structure", + "required":[ + "AwsAccountId", + "Namespace", + "UpgradeRequestId", + "Action" + ], + "members":{ + "AwsAccountId":{ + "shape":"AwsAccountId", + "documentation":"

The ID of the Amazon Web Services account that contains the self-upgrade request.

", + "location":"uri", + "locationName":"AwsAccountId" + }, + "Namespace":{ + "shape":"Namespace", + "documentation":"

The Quick Suite namespace for the self-upgrade request.

", + "location":"uri", + "locationName":"Namespace" + }, + "UpgradeRequestId":{ + "shape":"String", + "documentation":"

The ID of the self-upgrade request to update.

" + }, + "Action":{ + "shape":"SelfUpgradeAdminAction", + "documentation":"

The action to perform on the self-upgrade request. Valid values are APPROVE, DENY, or VERIFY.

" + } + } + }, + "UpdateSelfUpgradeResponse":{ + "type":"structure", + "members":{ + "SelfUpgradeRequestDetail":{ + "shape":"SelfUpgradeRequestDetail", + "documentation":"

Details of the updated self-upgrade request.

" + }, + "RequestId":{ + "shape":"String", + "documentation":"

The Amazon Web Services request ID for this operation.

" + }, + "Status":{ + "shape":"StatusCode", + "documentation":"

The HTTP status of the request.

", + "location":"statusCode" + } + } + }, "UpdateTemplateAliasRequest":{ "type":"structure", "required":[ @@ -41300,6 +42244,25 @@ }, "documentation":"

A registered user of Quick Sight.

" }, + "UserIdentifier":{ + "type":"structure", + "members":{ + "UserName":{ + "shape":"SensitiveString", + "documentation":"

The name of the user that you want to get identity context for.

" + }, + "Email":{ + "shape":"SensitiveString", + "documentation":"

The email address of the user that you want to get identity context for.

" + }, + "UserArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the user that you want to get identity context for.

" + } + }, + "documentation":"

A structure that contains information to identify a user.

", + "union":true + }, "UserList":{ "type":"list", "member":{"shape":"User"} @@ -41750,6 +42713,25 @@ "DATA_POINT_MENU" ] }, + "VisualCustomizationAdditionalFieldsList":{ + "type":"list", + "member":{"shape":"ColumnIdentifier"}, + "max":2500 + }, + "VisualCustomizationFieldsConfiguration":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"DashboardCustomizationStatus", + "documentation":"

Specifies whether dashboard readers can customize fields for this visual. This option is ENABLED by default.

" + }, + "AdditionalFields":{ + "shape":"VisualCustomizationAdditionalFieldsList", + "documentation":"

The additional dataset fields available for dashboard readers to customize the visual with, beyond the fields already configured on the visual.

" + } + }, + "documentation":"

The configuration that controls field customization options available to dashboard readers for a visual.

" + }, "VisualHighlightOperation":{ "type":"structure", "required":["Trigger"], @@ -42437,7 +43419,11 @@ }, "documentation":"

The options that are available for a single Y axis in a chart.

" }, - "boolean":{"type":"boolean"} + "boolean":{"type":"boolean"}, + "statusCode":{ + "type":"integer", + "box":true + } }, "documentation":"Amazon Quick Suite API Reference

Amazon Quick Sight is a fully managed, serverless business intelligence service for the Amazon Web Services Cloud that makes it easy to extend data and insights to every user in your organization. This API reference contains documentation for a programming interface that you can use to manage Amazon Quick Sight.

" } diff --git a/awscli/botocore/data/rds/2014-10-31/service-2.json b/awscli/botocore/data/rds/2014-10-31/service-2.json index 848bbf547e99..9f26a909a0cf 100644 --- a/awscli/botocore/data/rds/2014-10-31/service-2.json +++ b/awscli/botocore/data/rds/2014-10-31/service-2.json @@ -3269,6 +3269,79 @@ }, "documentation":"

" }, + "AdditionalStorageVolume":{ + "type":"structure", + "required":["VolumeName"], + "members":{ + "VolumeName":{ + "shape":"String", + "documentation":"

The name of the additional storage volume.

Valid Values: RDSDBDATA2 | RDSDBDATA3 | RDSDBDATA4

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The amount of storage allocated for the additional storage volume, in gibibytes (GiB). The minimum is 20 GiB. The maximum is 65,536 GiB (64 TiB).

" + }, + "IOPS":{ + "shape":"IntegerOptional", + "documentation":"

The number of I/O operations per second (IOPS) provisioned for the additional storage volume.

" + }, + "MaxAllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The upper limit in gibibytes (GiB) to which RDS can automatically scale the storage of the additional storage volume.

" + }, + "StorageThroughput":{ + "shape":"IntegerOptional", + "documentation":"

The storage throughput value for the additional storage volume, in mebibytes per second (MiBps). This setting applies only to the General Purpose SSD (gp3) storage type.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

The storage type for the additional storage volume.

Valid Values: GP3 | IO2

" + } + }, + "documentation":"

Contains details about an additional storage volume for a DB instance. RDS support additional storage volumes for RDS for Oracle and RDS for SQL Server.

" + }, + "AdditionalStorageVolumeOutput":{ + "type":"structure", + "members":{ + "VolumeName":{ + "shape":"String", + "documentation":"

The name of the additional storage volume.

" + }, + "StorageVolumeStatus":{ + "shape":"String", + "documentation":"

The status of the additional storage volume.

Valid Values: ACTIVE | CREATING | DELETING | MODIFYING | NOT-IN-USE | STORAGE-OPTIMIZATION | VOLUME-FULL

" + }, + "AllocatedStorage":{ + "shape":"Integer", + "documentation":"

The amount of storage allocated for the additional storage volume, in gibibytes (GiB). The minimum is 20 GiB. The maximum is 65,536 GiB (64 TiB).

" + }, + "IOPS":{ + "shape":"IntegerOptional", + "documentation":"

The number of I/O operations per second (IOPS) provisioned for the additional storage volume.

" + }, + "MaxAllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The upper limit in gibibytes (GiB) to which RDS can automatically scale the storage of the additional storage volume.

" + }, + "StorageThroughput":{ + "shape":"IntegerOptional", + "documentation":"

The storage throughput value for the additional storage volume, in mebibytes per second (MiBps).

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

The storage type for the additional storage volume.

Valid Values: GP3 | IO2

" + } + }, + "documentation":"

Contains information about an additional storage volume for a DB instance.

" + }, + "AdditionalStorageVolumesList":{ + "type":"list", + "member":{"shape":"AdditionalStorageVolume"} + }, + "AdditionalStorageVolumesOutputList":{ + "type":"list", + "member":{"shape":"AdditionalStorageVolumeOutput"} + }, "ApplyMethod":{ "type":"string", "enum":[ @@ -3431,6 +3504,67 @@ "locationName":"AvailabilityZone" } }, + "AvailableAdditionalStorageVolumesOption":{ + "type":"structure", + "members":{ + "SupportsStorageAutoscaling":{ + "shape":"Boolean", + "documentation":"

Indicates whether the additional storage volume supports storage autoscaling.

" + }, + "SupportsStorageThroughput":{ + "shape":"Boolean", + "documentation":"

Indicates whether the additional storage volume supports configurable storage throughput.

" + }, + "SupportsIops":{ + "shape":"Boolean", + "documentation":"

Indicates whether the additional storage volume supports provisioned IOPS.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

The storage type for the additional storage volume.

Valid Values: GP3 | IO2

" + }, + "MinStorageSize":{ + "shape":"IntegerOptional", + "documentation":"

The minimum amount of storage that you can allocate for the additional storage volume, in gibibytes (GiB).

" + }, + "MaxStorageSize":{ + "shape":"IntegerOptional", + "documentation":"

The maximum amount of storage that you can allocate for the additional storage volume, in gibibytes (GiB).

" + }, + "MinIops":{ + "shape":"IntegerOptional", + "documentation":"

The minimum number of I/O operations per second (IOPS) that the additional storage volume supports.

" + }, + "MaxIops":{ + "shape":"IntegerOptional", + "documentation":"

The maximum number of I/O operations per second (IOPS) that the additional storage volume supports.

" + }, + "MinIopsPerGib":{ + "shape":"DoubleOptional", + "documentation":"

The minimum ratio of I/O operations per second (IOPS) to gibibytes (GiB) of storage for the additional storage volume.

" + }, + "MaxIopsPerGib":{ + "shape":"DoubleOptional", + "documentation":"

The maximum ratio of I/O operations per second (IOPS) to gibibytes (GiB) of storage for the additional storage volume.

" + }, + "MinStorageThroughput":{ + "shape":"IntegerOptional", + "documentation":"

The minimum storage throughput that the additional storage volume supports, in mebibytes per second (MiBps).

" + }, + "MaxStorageThroughput":{ + "shape":"IntegerOptional", + "documentation":"

The maximum storage throughput that the additional storage volume supports, in mebibytes per second (MiBps).

" + } + }, + "documentation":"

Contains the available options for additional storage volumes for a DB instance class.

" + }, + "AvailableAdditionalStorageVolumesOptionList":{ + "type":"list", + "member":{ + "shape":"AvailableAdditionalStorageVolumesOption", + "locationName":"AvailableAdditionalStorageVolumesOption" + } + }, "AvailableProcessorFeature":{ "type":"structure", "members":{ @@ -4143,11 +4277,11 @@ "members":{ "Engine":{ "shape":"CustomEngineName", - "documentation":"

The database engine. RDS Custom for Oracle supports the following values:

" + "documentation":"

The database engine.

RDS Custom for Oracle supports the following values:

RDS Custom for SQL Server supports the following values:

RDS for SQL Server supports only sqlserver-dev-ee.

" }, "EngineVersion":{ "shape":"CustomEngineVersion", - "documentation":"

The name of your CEV. The name format is 19.customized_string. For example, a valid CEV name is 19.my_cev1. This setting is required for RDS Custom for Oracle, but optional for Amazon RDS. The combination of Engine and EngineVersion is unique per customer per Region.

" + "documentation":"

The name of your custom engine version (CEV).

For RDS Custom for Oracle, the name format is 19.*customized_string*. For example, a valid CEV name is 19.my_cev1.

For RDS for SQL Server and RDS Custom for SQL Server, the name format is major engine_version*.*minor_engine_version*.*customized_string*. For example, a valid CEV name is 16.00.4215.2.my_cev1.

The CEV name is unique per customer per Amazon Web Services Regions.

" }, "DatabaseInstallationFilesS3BucketName":{ "shape":"BucketName", @@ -4181,7 +4315,11 @@ "shape":"CustomDBEngineVersionManifest", "documentation":"

The CEV manifest, which is a JSON document that describes the installation .zip files stored in Amazon S3. Specify the name/value pairs in a file or a quoted string. RDS Custom applies the patches in the order in which they are listed.

The following JSON fields are valid:

MediaImportTemplateVersion

Version of the CEV manifest. The date is in the format YYYY-MM-DD.

databaseInstallationFileNames

Ordered list of installation files for the CEV.

opatchFileNames

Ordered list of OPatch installers used for the Oracle DB engine.

psuRuPatchFileNames

The PSU and RU patches for this CEV.

OtherPatchFileNames

The patches that are not in the list of PSU and RU patches. Amazon RDS applies these patches after applying the PSU and RU patches.

For more information, see Creating the CEV manifest in the Amazon RDS User Guide.

" }, - "Tags":{"shape":"TagList"} + "Tags":{"shape":"TagList"}, + "DatabaseInstallationFiles":{ + "shape":"StringList", + "documentation":"

The database installation files (ISO and EXE) uploaded to Amazon S3 for your database engine version to import to Amazon RDS.

" + } } }, "CreateDBClusterEndpointMessage":{ @@ -4446,6 +4584,10 @@ "shape":"String", "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB cluster will fail if the DB major version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB cluster.

Valid Values:

" + }, "MasterUserAuthenticationType":{ "shape":"MasterUserAuthenticationType", "documentation":"

Specifies the authentication type for the master user. With IAM master user authentication, you can configure the master DB user with IAM database authentication when you create a DB cluster.

You can specify one of the following values:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

This option is only valid for RDS for PostgreSQL and Aurora PostgreSQL engines.

" @@ -4546,7 +4688,7 @@ }, "Engine":{ "shape":"String", - "documentation":"

The database engine to use for this DB instance.

Not every database engine is available in every Amazon Web Services Region.

Valid Values:

" + "documentation":"

The database engine to use for this DB instance.

Not every database engine is available in every Amazon Web Services Region.

Valid Values:

" }, "MasterUsername":{ "shape":"String", @@ -4784,9 +4926,17 @@ "shape":"String", "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, creating the DB instance will fail if the DB major version is past its end of standard support date.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, "MasterUserAuthenticationType":{ "shape":"MasterUserAuthenticationType", "documentation":"

Specifies the authentication type for the master user. With IAM master user authentication, you can configure the master DB user with IAM database authentication when you create a DB instance.

You can specify one of the following values:

This option is only valid for RDS for PostgreSQL and Aurora PostgreSQL engines.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to create for the DB instance. You can create up to three additional storage volumes using the names rdsdbdata2, rdsdbdata3, and rdsdbdata4. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } }, "documentation":"

" @@ -4979,6 +5129,14 @@ "CACertificateIdentifier":{ "shape":"String", "documentation":"

The CA certificate identifier to use for the read replica's server certificate.

This setting doesn't apply to RDS Custom DB instances.

For more information, see Using SSL/TLS to encrypt a connection to a DB instance in the Amazon RDS User Guide and Using SSL/TLS to encrypt a connection to a DB cluster in the Amazon Aurora User Guide.

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to create for the DB instance. You can create up to three additional storage volumes using the names rdsdbdata2, rdsdbdata3, and rdsdbdata4. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } } }, @@ -6008,7 +6166,8 @@ "AwsBackupRecoveryPointArn":{ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

" - } + }, + "TagList":{"shape":"TagList"} }, "documentation":"

An automated backup of a DB cluster. It consists of system backups, transaction logs, and the database cluster properties that existed at the time you deleted the source cluster.

", "wrapper":true @@ -6833,6 +6992,14 @@ "ServerlessV2FeaturesSupport":{ "shape":"ServerlessV2FeaturesSupport", "documentation":"

Specifies any Aurora Serverless v2 properties or limits that differ between Aurora engine versions. You can test the values of this attribute when deciding which Aurora version to use in a new or upgraded DB cluster. You can also retrieve the version of an existing DB cluster and check whether that version supports certain Aurora Serverless v2 features before you attempt to use those features.

" + }, + "DatabaseInstallationFiles":{ + "shape":"StringList", + "documentation":"

The database installation files (ISO and EXE) uploaded to Amazon S3 for your database engine version to import to Amazon RDS. Required for sqlserver-dev-ee.

" + }, + "FailureReason":{ + "shape":"String", + "documentation":"

The reason that the custom engine version creation for sqlserver-dev-ee failed with an incompatible-installation-media status.

" } }, "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" @@ -7209,6 +7376,14 @@ "EngineLifecycleSupport":{ "shape":"String", "documentation":"

The lifecycle type for the DB instance.

For more information, see CreateDBInstance.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesOutputList", + "documentation":"

The additional storage volumes associated with the DB instance. RDS supports additional storage volumes for RDS for Oracle and RDS for SQL Server.

" + }, + "StorageVolumeStatus":{ + "shape":"String", + "documentation":"

The detailed status information for storage volumes associated with the DB instance. This information helps identify which specific volume is causing the instance to be in a storage-full state.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the operations CreateDBInstance, CreateDBInstanceReadReplica, DeleteDBInstance, DescribeDBInstances, ModifyDBInstance, PromoteReadReplica, RebootDBInstance, RestoreDBInstanceFromDBSnapshot, RestoreDBInstanceFromS3, RestoreDBInstanceToPointInTime, StartDBInstance, and StopDBInstance.

", @@ -7250,7 +7425,7 @@ }, "AllocatedStorage":{ "shape":"Integer", - "documentation":"

The allocated storage size for the the automated backup in gibibytes (GiB).

" + "documentation":"

The allocated storage size for the automated backup in gibibytes (GiB).

" }, "Status":{ "shape":"String", @@ -7348,9 +7523,14 @@ "shape":"String", "documentation":"

The Amazon Resource Name (ARN) of the recovery point in Amazon Web Services Backup.

" }, + "TagList":{"shape":"TagList"}, "DedicatedLogVolume":{ "shape":"BooleanOptional", "documentation":"

Indicates whether the DB instance has a dedicated log volume (DLV) enabled.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

The additional storage volumes associated with the automated backup.

Valid Values: GP3 | IO2

" } }, "documentation":"

An automated backup of a DB instance. It consists of system backups, transaction logs, and the database instance properties that existed at the time you deleted the source instance.

", @@ -8535,6 +8715,10 @@ "SnapshotAvailabilityZone":{ "shape":"String", "documentation":"

Specifies the name of the Availability Zone where RDS stores the DB snapshot. This value is valid only for snapshots that RDS stores on a Dedicated Local Zone.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

The additional storage volumes associated with the DB snapshot. RDS supports additional storage volumes for RDS for Oracle and RDS for SQL Server.

" } }, "documentation":"

Contains the details of an Amazon RDS DB snapshot.

This data type is used as a response element in the DescribeDBSnapshots action.

", @@ -8895,7 +9079,7 @@ "members":{ "Engine":{ "shape":"CustomEngineName", - "documentation":"

The database engine. RDS Custom for Oracle supports the following values:

" + "documentation":"

The database engine.

RDS Custom for Oracle supports the following values:

RDS Custom for SQL Server supports the following values:

RDS for SQL Server supports only sqlserver-dev-ee.

" }, "EngineVersion":{ "shape":"CustomEngineVersion", @@ -12341,6 +12525,45 @@ } } }, + "ModifyAdditionalStorageVolume":{ + "type":"structure", + "required":["VolumeName"], + "members":{ + "VolumeName":{ + "shape":"String", + "documentation":"

The name of the additional storage volume that you want to modify.

Valid Values: RDSDBDATA2 | RDSDBDATA3 | RDSDBDATA4

" + }, + "AllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The amount of storage allocated for the additional storage volume, in gibibytes (GiB). The minimum is 20 GiB. The maximum is 65,536 GiB (64 TiB).

" + }, + "IOPS":{ + "shape":"IntegerOptional", + "documentation":"

The number of I/O operations per second (IOPS) provisioned for the additional storage volume. This setting is only supported for Provisioned IOPS SSD (io1 and io2) storage types.

" + }, + "MaxAllocatedStorage":{ + "shape":"IntegerOptional", + "documentation":"

The upper limit in gibibytes (GiB) to which RDS can automatically scale the storage of the additional storage volume. You must provide a value greater than or equal to AllocatedStorage.

" + }, + "StorageThroughput":{ + "shape":"IntegerOptional", + "documentation":"

The storage throughput value for the additional storage volume, in mebibytes per second (MiBps). This setting applies only to the General Purpose SSD (gp3) storage type.

" + }, + "StorageType":{ + "shape":"String", + "documentation":"

The new storage type for the additional storage volume.

Valid Values: GP3 | IO2

" + }, + "SetForDelete":{ + "shape":"BooleanOptional", + "documentation":"

Indicates whether to delete the additional storage volume. The value true schedules the volume for deletion. You can delete an additional storage volume only when it doesn't contain database files or other data.

" + } + }, + "documentation":"

Contains details about the modification of an additional storage volume.

" + }, + "ModifyAdditionalStorageVolumesList":{ + "type":"list", + "member":{"shape":"ModifyAdditionalStorageVolume"} + }, "ModifyCertificatesMessage":{ "type":"structure", "members":{ @@ -12391,7 +12614,7 @@ "members":{ "Engine":{ "shape":"CustomEngineName", - "documentation":"

The database engine. RDS Custom for Oracle supports the following values:

" + "documentation":"

The database engine.

RDS Custom for Oracle supports the following values:

RDS Custom for SQL Server supports the following values:

RDS for SQL Server supports only sqlserver-dev-ee.

" }, "EngineVersion":{ "shape":"CustomEngineVersion", @@ -12925,9 +13148,17 @@ "shape":"String", "documentation":"

The target Oracle DB engine when you convert a non-CDB to a CDB. This intermediate step is necessary to upgrade an Oracle Database 19c non-CDB to an Oracle Database 21c CDB.

Note the following requirements:

Note the following limitations:

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, "MasterUserAuthenticationType":{ "shape":"MasterUserAuthenticationType", "documentation":"

Specifies the authentication type for the master user. With IAM master user authentication, you can change the master DB user to use IAM database authentication.

You can specify one of the following values:

This option is only valid for RDS for PostgreSQL and Aurora PostgreSQL engines.

" + }, + "AdditionalStorageVolumes":{ + "shape":"ModifyAdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to modify or delete for the DB instance. You can create up to 3 additional storage volumes. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } }, "documentation":"

" @@ -13962,6 +14193,14 @@ "SupportsHttpEndpoint":{ "shape":"Boolean", "documentation":"

Indicates whether a DB instance supports HTTP endpoints.

" + }, + "SupportsAdditionalStorageVolumes":{ + "shape":"BooleanOptional", + "documentation":"

Indicates whether the DB instance class supports additional storage volumes.

" + }, + "AvailableAdditionalStorageVolumesOptions":{ + "shape":"AvailableAdditionalStorageVolumesOptionList", + "documentation":"

The available options for additional storage volumes for the DB instance class.

" } }, "documentation":"

Contains a list of available options for a DB instance.

This data type is used as a response element in the DescribeOrderableDBInstanceOptions action.

", @@ -14214,6 +14453,10 @@ "Engine":{ "shape":"String", "documentation":"

The database engine of the DB instance.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

The additional storage volume modifications that are pending for the DB instance.

" } }, "documentation":"

This data type is used as a response element in the ModifyDBInstance operation and contains changes that will be applied during the next maintenance window.

" @@ -14300,7 +14543,7 @@ "documentation":"

The value of a processor feature.

" } }, - "documentation":"

Contains the processor features of a DB instance class.

To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter.

You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:

You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter.

In addition, you can use the following actions for DB instance class processor information:

If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met:

For more information, see Configuring the processor for a DB instance class in RDS for Oracle in the Amazon RDS User Guide.

" + "documentation":"

Contains the processor features of a DB instance class.

To specify the number of CPU cores, use the coreCount feature name for the Name parameter. To specify the number of threads per core, use the threadsPerCore feature name for the Name parameter.

You can set the processor features of the DB instance class for a DB instance when you call one of the following actions:

You can view the valid processor values for a particular instance class by calling the DescribeOrderableDBInstanceOptions action and specifying the instance class for the DBInstanceClass parameter.

In addition, you can use the following actions for DB instance class processor information:

If you call DescribeDBInstances, ProcessorFeature returns non-null values only if the following conditions are met:

For more information, see Configuring the processor for a DB instance class in RDS for Oracle, Optimizing your RDS for SQL Server CPU, and DB instance classes in the Amazon RDS User Guide.

" }, "ProcessorFeatureList":{ "type":"list", @@ -14341,6 +14584,10 @@ "PreferredBackupWindow":{ "shape":"String", "documentation":"

The daily time range during which automated backups are created if automated backups are enabled, using the BackupRetentionPeriod parameter.

The default is a 30-minute window selected at random from an 8-hour block of time for each Amazon Web Services Region. To see the time blocks available, see Adjusting the Preferred Maintenance Window in the Amazon RDS User Guide.

Constraints:

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" } }, "documentation":"

" @@ -15188,6 +15435,10 @@ "EngineLifecycleSupport":{ "shape":"String", "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB cluster.

Valid Values:

" } } }, @@ -15341,6 +15592,10 @@ "EngineLifecycleSupport":{ "shape":"String", "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB cluster.

Valid Values:

" } }, "documentation":"

" @@ -15488,6 +15743,10 @@ "EngineLifecycleSupport":{ "shape":"String", "documentation":"

The life cycle type for this DB cluster.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB cluster into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB cluster to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB cluster into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB cluster past the end of standard support for that engine version. For more information, see the following sections:

Valid for Cluster Type: Aurora DB clusters and Multi-AZ DB clusters

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB cluster.

Valid Values:

" } }, "documentation":"

" @@ -15667,6 +15926,10 @@ "shape":"String", "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", "documentation":"

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager in the restored DB instance.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

Constraints:

" @@ -15674,6 +15937,10 @@ "MasterUserSecretKmsKeyId":{ "shape":"String", "documentation":"

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to create for the DB instance. You can create up to three additional storage volumes using the names rdsdbdata2, rdsdbdata3, and rdsdbdata4. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } }, "documentation":"

" @@ -15903,6 +16170,14 @@ "EngineLifecycleSupport":{ "shape":"String", "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support Amazon RDS in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" + }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to modify or delete for the DB instance. You can modify or delete up to three additional storage volumes using the names rdsdbdata2, rdsdbdata3, and rdsdbdata4. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } } }, @@ -16097,6 +16372,10 @@ "shape":"String", "documentation":"

The life cycle type for this DB instance.

By default, this value is set to open-source-rds-extended-support, which enrolls your DB instance into Amazon RDS Extended Support. At the end of standard support, you can avoid charges for Extended Support by setting the value to open-source-rds-extended-support-disabled. In this case, RDS automatically upgrades your restored DB instance to a higher engine version, if the major engine version is past its end of standard support date.

You can use this setting to enroll your DB instance into Amazon RDS Extended Support. With RDS Extended Support, you can run the selected major engine version on your DB instance past the end of standard support for that engine version. For more information, see Amazon RDS Extended Support with Amazon RDS in the Amazon RDS User Guide.

This setting applies only to RDS for MySQL and RDS for PostgreSQL. For Amazon Aurora DB instances, the life cycle type is managed by the DB cluster.

Valid Values: open-source-rds-extended-support | open-source-rds-extended-support-disabled

Default: open-source-rds-extended-support

" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"

Tags to assign to resources associated with the DB instance.

Valid Values:

" + }, "ManageMasterUserPassword":{ "shape":"BooleanOptional", "documentation":"

Specifies whether to manage the master user password with Amazon Web Services Secrets Manager in the restored DB instance.

For more information, see Password management with Amazon Web Services Secrets Manager in the Amazon RDS User Guide.

Constraints:

" @@ -16104,6 +16383,10 @@ "MasterUserSecretKmsKeyId":{ "shape":"String", "documentation":"

The Amazon Web Services KMS key identifier to encrypt a secret that is automatically generated and managed in Amazon Web Services Secrets Manager.

This setting is valid only if the master user password is managed by RDS in Amazon Web Services Secrets Manager for the DB instance.

The Amazon Web Services KMS key identifier is the key ARN, key ID, alias ARN, or alias name for the KMS key. To use a KMS key in a different Amazon Web Services account, specify the key ARN or alias ARN.

If you don't specify MasterUserSecretKmsKeyId, then the aws/secretsmanager KMS key is used to encrypt the secret. If the secret is in a different Amazon Web Services account, then you can't use the aws/secretsmanager KMS key to encrypt the secret, and you must use a customer managed KMS key.

There is a default KMS key for your Amazon Web Services account. Your Amazon Web Services account has a different default KMS key for each Amazon Web Services Region.

" + }, + "AdditionalStorageVolumes":{ + "shape":"AdditionalStorageVolumesList", + "documentation":"

A list of additional storage volumes to restore to the DB instance. You can restore up to three additional storage volumes using the names rdsdbdata2, rdsdbdata3, and rdsdbdata4. Additional storage volumes are supported for RDS for Oracle and RDS for SQL Server DB instances only.

" } }, "documentation":"

" @@ -16539,6 +16822,10 @@ "PreSignedUrl":{ "shape":"SensitiveString", "documentation":"

In an Amazon Web Services GovCloud (US) Region, an URL that contains a Signature Version 4 signed request for the StartDBInstanceAutomatedBackupsReplication operation to call in the Amazon Web Services Region of the source DB instance. The presigned URL must be a valid request for the StartDBInstanceAutomatedBackupsReplication API operation that can run in the Amazon Web Services Region that contains the source DB instance.

This setting applies only to Amazon Web Services GovCloud (US) Regions. It's ignored in other Amazon Web Services Regions.

To learn how to generate a Signature Version 4 signed request, see Authenticating Requests: Using Query Parameters (Amazon Web Services Signature Version 4) and Signature Version 4 Signing Process.

If you are using an Amazon Web Services SDK tool or the CLI, you can specify SourceRegion (or --source-region for the CLI) instead of specifying PreSignedUrl manually. Specifying SourceRegion autogenerates a presigned URL that is a valid request for the operation that can run in the source Amazon Web Services Region.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags to associate with the replicated automated backups.

" } } }, @@ -16971,6 +17258,24 @@ }, "documentation":"

" }, + "TagSpecification":{ + "type":"structure", + "members":{ + "ResourceType":{ + "shape":"String", + "documentation":"

The type of resource to tag on creation.

Valid Values:

" + }, + "Tags":{"shape":"TagList"} + }, + "documentation":"

The tags to apply to resources when creating or modifying a DB instance or DB cluster. When you specify a tag, you must specify the resource type to tag, otherwise the request will fail.

" + }, + "TagSpecificationList":{ + "type":"list", + "member":{ + "shape":"TagSpecification", + "locationName":"item" + } + }, "TargetConnectionNetworkType":{ "type":"string", "enum":[ @@ -17342,6 +17647,20 @@ "max":200, "min":0 }, + "ValidAdditionalStorageOptions":{ + "type":"structure", + "members":{ + "SupportsAdditionalStorageVolumes":{ + "shape":"Boolean", + "documentation":"

Indicates whether the DB instance supports additional storage volumes.

" + }, + "Volumes":{ + "shape":"ValidVolumeOptionsList", + "documentation":"

The valid additional storage volume options for the DB instance.

" + } + }, + "documentation":"

Contains the valid options for additional storage volumes for a DB instance.

" + }, "ValidDBInstanceModificationsMessage":{ "type":"structure", "members":{ @@ -17356,6 +17675,10 @@ "SupportsDedicatedLogVolume":{ "shape":"Boolean", "documentation":"

Indicates whether a DB instance supports using a dedicated log volume (DLV).

" + }, + "AdditionalStorage":{ + "shape":"ValidAdditionalStorageOptions", + "documentation":"

The valid additional storage options for the DB instance.

" } }, "documentation":"

Information about valid modifications that you can make to your DB instance. Contains the result of a successful call to the DescribeValidDBInstanceModifications action. You can use this information when you call ModifyDBInstance.

", @@ -17409,6 +17732,24 @@ "locationName":"UpgradeTarget" } }, + "ValidVolumeOptions":{ + "type":"structure", + "members":{ + "VolumeName":{ + "shape":"String", + "documentation":"

The name of the additional storage volume.

" + }, + "Storage":{ + "shape":"ValidStorageOptionsList", + "documentation":"

The valid storage options for the additional storage volume.

" + } + }, + "documentation":"

Contains the valid options for an additional storage volume.

" + }, + "ValidVolumeOptionsList":{ + "type":"list", + "member":{"shape":"ValidVolumeOptions"} + }, "VpcEncryptionControlViolationException":{ "type":"structure", "members":{}, diff --git a/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json b/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json index d6d3b8c1f3d2..67ed28088b8b 100644 --- a/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json +++ b/awscli/botocore/data/redshift-serverless/2021-04-21/service-2.json @@ -401,6 +401,25 @@ ], "documentation":"

Returns information, such as the name, about a VPC endpoint.

" }, + "GetIdentityCenterAuthToken":{ + "name":"GetIdentityCenterAuthToken", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetIdentityCenterAuthTokenRequest"}, + "output":{"shape":"GetIdentityCenterAuthTokenResponse"}, + "errors":[ + {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"DryRunException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Returns an Identity Center authentication token for accessing Amazon Redshift Serverless workgroups.

The token provides secure access to data within the specified workgroups using Identity Center identity propagation. The token expires after a specified duration and must be refreshed for continued access.

The Identity and Access Management (IAM) user or role that runs GetIdentityCenterAuthToken must have appropriate permissions to access the specified workgroups and Identity Center integration must be configured for the workgroups.

" + }, "GetNamespace":{ "name":"GetNamespace", "http":{ @@ -537,6 +556,7 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"}, {"shape":"AccessDeniedException"}, + {"shape":"DryRunException"}, {"shape":"ThrottlingException"} ], "documentation":"

Get the Redshift Serverless version for a specified track.

" @@ -2016,6 +2036,30 @@ } } }, + "GetIdentityCenterAuthTokenRequest":{ + "type":"structure", + "required":["workgroupNames"], + "members":{ + "workgroupNames":{ + "shape":"WorkgroupNameList", + "documentation":"

A list of workgroup names for which to generate the Identity Center authentication token.

Constraints:

" + } + } + }, + "GetIdentityCenterAuthTokenResponse":{ + "type":"structure", + "members":{ + "expirationTime":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The date and time when the Identity Center authentication token expires.

After this time, a new token must be requested for continued access.

" + }, + "token":{ + "shape":"String", + "documentation":"

The Identity Center authentication token that can be used to access data in the specified workgroups.

This token contains the Identity Center identity information and is encrypted for secure transmission.

" + } + }, + "sensitive":true + }, "GetNamespaceRequest":{ "type":"structure", "required":["namespaceName"], @@ -4586,6 +4630,12 @@ "min":3, "pattern":"^[a-z0-9-]+$" }, + "WorkgroupNameList":{ + "type":"list", + "member":{"shape":"WorkgroupName"}, + "max":20, + "min":1 + }, "WorkgroupStatus":{ "type":"string", "enum":[ diff --git a/awscli/botocore/data/rolesanywhere/2018-05-10/service-2.json b/awscli/botocore/data/rolesanywhere/2018-05-10/service-2.json index 0e2891a73e0f..e57657851bf1 100644 --- a/awscli/botocore/data/rolesanywhere/2018-05-10/service-2.json +++ b/awscli/botocore/data/rolesanywhere/2018-05-10/service-2.json @@ -2,16 +2,15 @@ "version":"2.0", "metadata":{ "apiVersion":"2018-05-10", + "auth":["aws.auth#sigv4"], "endpointPrefix":"rolesanywhere", - "jsonVersion":"1.1", "protocol":"rest-json", "protocols":["rest-json"], "serviceFullName":"IAM Roles Anywhere", "serviceId":"RolesAnywhere", "signatureVersion":"v4", "signingName":"rolesanywhere", - "uid":"rolesanywhere-2018-05-10", - "auth":["aws.auth#sigv4"] + "uid":"rolesanywhere-2018-05-10" }, "operations":{ "CreateProfile":{ @@ -211,7 +210,8 @@ "errors":[ {"shape":"ResourceNotFoundException"} ], - "documentation":"

Gets a certificate revocation list (CRL).

Required permissions: rolesanywhere:GetCrl.

" + "documentation":"

Gets a certificate revocation list (CRL).

Required permissions: rolesanywhere:GetCrl.

", + "readonly":true }, "GetProfile":{ "name":"GetProfile", @@ -226,7 +226,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a profile.

Required permissions: rolesanywhere:GetProfile.

" + "documentation":"

Gets a profile.

Required permissions: rolesanywhere:GetProfile.

", + "readonly":true }, "GetSubject":{ "name":"GetSubject", @@ -241,7 +242,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a subject, which associates a certificate identity with authentication attempts. The subject stores auditing information such as the status of the last authentication attempt, the certificate data used in the attempt, and the last time the associated identity attempted authentication.

Required permissions: rolesanywhere:GetSubject.

" + "documentation":"

Gets a subject, which associates a certificate identity with authentication attempts. The subject stores auditing information such as the status of the last authentication attempt, the certificate data used in the attempt, and the last time the associated identity attempted authentication.

Required permissions: rolesanywhere:GetSubject.

", + "readonly":true }, "GetTrustAnchor":{ "name":"GetTrustAnchor", @@ -257,7 +259,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Gets a trust anchor.

Required permissions: rolesanywhere:GetTrustAnchor.

" + "documentation":"

Gets a trust anchor.

Required permissions: rolesanywhere:GetTrustAnchor.

", + "readonly":true }, "ImportCrl":{ "name":"ImportCrl", @@ -287,7 +290,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all certificate revocation lists (CRL) in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListCrls.

" + "documentation":"

Lists all certificate revocation lists (CRL) in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListCrls.

", + "readonly":true }, "ListProfiles":{ "name":"ListProfiles", @@ -302,7 +306,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists all profiles in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListProfiles.

" + "documentation":"

Lists all profiles in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListProfiles.

", + "readonly":true }, "ListSubjects":{ "name":"ListSubjects", @@ -317,7 +322,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the subjects in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListSubjects.

" + "documentation":"

Lists the subjects in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListSubjects.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -333,7 +339,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the tags attached to the resource.

Required permissions: rolesanywhere:ListTagsForResource.

" + "documentation":"

Lists the tags attached to the resource.

Required permissions: rolesanywhere:ListTagsForResource.

", + "readonly":true }, "ListTrustAnchors":{ "name":"ListTrustAnchors", @@ -348,7 +355,8 @@ {"shape":"ValidationException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Lists the trust anchors in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListTrustAnchors.

" + "documentation":"

Lists the trust anchors in the authenticated account and Amazon Web Services Region.

Required permissions: rolesanywhere:ListTrustAnchors.

", + "readonly":true }, "PutAttributeMapping":{ "name":"PutAttributeMapping", @@ -539,41 +547,41 @@ "roleArns" ], "members":{ - "acceptRoleSessionName":{ - "shape":"Boolean", - "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" - }, - "durationSeconds":{ - "shape":"CreateProfileRequestDurationSecondsInteger", - "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" - }, - "enabled":{ - "shape":"Boolean", - "documentation":"

Specifies whether the profile is enabled.

" - }, - "managedPolicyArns":{ - "shape":"ManagedPolicyList", - "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" - }, "name":{ "shape":"ResourceName", "documentation":"

The name of the profile.

" }, "requireInstanceProperties":{ "shape":"Boolean", - "documentation":"

Specifies whether instance properties are required in temporary credential requests with this profile.

" + "documentation":"

Unused, saved for future use. Will likely specify whether instance properties are required in temporary credential requests with this profile.

" + }, + "sessionPolicy":{ + "shape":"String", + "documentation":"

A session policy that applies to the trust boundary of the vended session credentials.

" }, "roleArns":{ "shape":"RoleArnList", "documentation":"

A list of IAM roles that this profile can assume in a temporary credential request.

" }, - "sessionPolicy":{ - "shape":"String", - "documentation":"

A session policy that applies to the trust boundary of the vended session credentials.

" + "managedPolicyArns":{ + "shape":"ManagedPolicyList", + "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" + }, + "durationSeconds":{ + "shape":"CreateProfileRequestDurationSecondsInteger", + "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" + }, + "enabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether the profile is enabled.

" }, "tags":{ "shape":"TagList", "documentation":"

The tags to attach to the profile.

" + }, + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" } } }, @@ -590,25 +598,25 @@ "source" ], "members":{ - "enabled":{ - "shape":"Boolean", - "documentation":"

Specifies whether the trust anchor is enabled.

" - }, "name":{ "shape":"ResourceName", "documentation":"

The name of the trust anchor.

" }, - "notificationSettings":{ - "shape":"NotificationSettings", - "documentation":"

A list of notification settings to be associated to the trust anchor.

" - }, "source":{ "shape":"Source", "documentation":"

The trust anchor type and its related certificate data.

" }, + "enabled":{ + "shape":"Boolean", + "documentation":"

Specifies whether the trust anchor is enabled.

" + }, "tags":{ "shape":"TagList", "documentation":"

The tags to attach to the trust anchor.

" + }, + "notificationSettings":{ + "shape":"NotificationSettings", + "documentation":"

A list of notification settings to be associated to the trust anchor.

" } } }, @@ -619,18 +627,6 @@ "CredentialSummary":{ "type":"structure", "members":{ - "enabled":{ - "shape":"Boolean", - "documentation":"

Indicates whether the credential is enabled.

" - }, - "failed":{ - "shape":"Boolean", - "documentation":"

Indicates whether the temporary credential request was successful.

" - }, - "issuer":{ - "shape":"String", - "documentation":"

The fully qualified domain name of the issuing certificate for the presented end-entity certificate.

" - }, "seenAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

" @@ -639,9 +635,21 @@ "shape":"String", "documentation":"

The serial number of the certificate.

" }, + "issuer":{ + "shape":"String", + "documentation":"

The fully qualified domain name of the issuing certificate for the presented end-entity certificate.

" + }, + "enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether the credential is enabled.

" + }, "x509CertificateData":{ "shape":"String", "documentation":"

The PEM-encoded data of the certificate.

" + }, + "failed":{ + "shape":"Boolean", + "documentation":"

Indicates whether the temporary credential request was successful.

" } }, "documentation":"

A record of a presented X509 credential from a temporary credential request.

" @@ -649,34 +657,34 @@ "CrlDetail":{ "type":"structure", "members":{ - "createdAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 timestamp when the certificate revocation list (CRL) was created.

" + "crlId":{ + "shape":"Uuid", + "documentation":"

The unique identifier of the certificate revocation list (CRL).

" }, "crlArn":{ "shape":"String", "documentation":"

The ARN of the certificate revocation list (CRL).

" }, - "crlData":{ - "shape":"Blob", - "documentation":"

The state of the certificate revocation list (CRL) after a read or write operation.

" - }, - "crlId":{ - "shape":"Uuid", - "documentation":"

The unique identifier of the certificate revocation list (CRL).

" + "name":{ + "shape":"String", + "documentation":"

The name of the certificate revocation list (CRL).

" }, "enabled":{ "shape":"Boolean", "documentation":"

Indicates whether the certificate revocation list (CRL) is enabled.

" }, - "name":{ - "shape":"String", - "documentation":"

The name of the certificate revocation list (CRL).

" + "crlData":{ + "shape":"Blob", + "documentation":"

The state of the certificate revocation list (CRL) after a read or write operation.

" }, "trustAnchorArn":{ "shape":"String", "documentation":"

The ARN of the TrustAnchor the certificate revocation list (CRL) will provide revocation for.

" }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 timestamp when the certificate revocation list (CRL) was created.

" + }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp when the certificate revocation list (CRL) was last updated.

" @@ -701,22 +709,22 @@ "DeleteAttributeMappingRequest":{ "type":"structure", "required":[ - "certificateField", - "profileId" + "profileId", + "certificateField" ], "members":{ - "certificateField":{ - "shape":"CertificateField", - "documentation":"

Fields (x509Subject, x509Issuer and x509SAN) within X.509 certificates.

", - "location":"querystring", - "locationName":"certificateField" - }, "profileId":{ "shape":"Uuid", "documentation":"

The unique identifier of the profile.

", "location":"uri", "locationName":"profileId" }, + "certificateField":{ + "shape":"CertificateField", + "documentation":"

Fields (x509Subject, x509Issuer and x509SAN) within X.509 certificates.

", + "location":"querystring", + "locationName":"certificateField" + }, "specifiers":{ "shape":"SpecifierList", "documentation":"

A list of specifiers of a certificate field; for example, CN, OU, UID from a Subject.

", @@ -738,11 +746,15 @@ "ImportCrlRequest":{ "type":"structure", "required":[ - "crlData", "name", + "crlData", "trustAnchorArn" ], "members":{ + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the certificate revocation list (CRL).

" + }, "crlData":{ "shape":"ImportCrlRequestCrlDataBlob", "documentation":"

The x509 v3 specified certificate revocation list (CRL).

" @@ -751,10 +763,6 @@ "shape":"Boolean", "documentation":"

Specifies whether the certificate revocation list (CRL) is enabled.

" }, - "name":{ - "shape":"ResourceName", - "documentation":"

The name of the certificate revocation list (CRL).

" - }, "tags":{ "shape":"TagList", "documentation":"

A list of tags to attach to the certificate revocation list (CRL).

" @@ -777,17 +785,17 @@ "InstanceProperty":{ "type":"structure", "members":{ - "failed":{ - "shape":"Boolean", - "documentation":"

Indicates whether the temporary credential request was successful.

" + "seenAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

" }, "properties":{ "shape":"InstancePropertyMap", "documentation":"

A list of instanceProperty objects.

" }, - "seenAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

" + "failed":{ + "shape":"Boolean", + "documentation":"

Indicates whether the temporary credential request was successful.

" } }, "documentation":"

A key-value pair you set that identifies a property of the authenticating instance.

" @@ -816,13 +824,13 @@ "ListCrlsResponse":{ "type":"structure", "members":{ - "crls":{ - "shape":"CrlDetails", - "documentation":"

A list of certificate revocation lists (CRL).

" - }, "nextToken":{ "shape":"String", "documentation":"

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

" + }, + "crls":{ + "shape":"CrlDetails", + "documentation":"

A list of certificate revocation lists (CRL).

" } } }, @@ -864,13 +872,13 @@ "ListSubjectsResponse":{ "type":"structure", "members":{ - "nextToken":{ - "shape":"String", - "documentation":"

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

" - }, "subjects":{ "shape":"SubjectSummaries", "documentation":"

A list of subjects.

" + }, + "nextToken":{ + "shape":"String", + "documentation":"

A token that indicates where the output should continue from, if a previous request did not show all results. To get the next results, make the request again with this value.

" } } }, @@ -957,10 +965,6 @@ "event" ], "members":{ - "channel":{ - "shape":"NotificationChannel", - "documentation":"

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

" - }, "enabled":{ "shape":"Boolean", "documentation":"

Indicates whether the notification setting is enabled.

" @@ -972,6 +976,10 @@ "threshold":{ "shape":"NotificationSettingThresholdInteger", "documentation":"

The number of days before a notification event. This value is required for a notification setting that is enabled.

" + }, + "channel":{ + "shape":"NotificationChannel", + "documentation":"

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

" } }, "documentation":"

Customizable notification settings that will be applied to notification events. IAM Roles Anywhere consumes these settings while notifying across multiple channels - CloudWatch metrics, EventBridge, and Health Dashboard.

" @@ -983,14 +991,6 @@ "event" ], "members":{ - "channel":{ - "shape":"NotificationChannel", - "documentation":"

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

" - }, - "configuredBy":{ - "shape":"NotificationSettingDetailConfiguredByString", - "documentation":"

The principal that configured the notification setting. For default settings configured by IAM Roles Anywhere, the value is rolesanywhere.amazonaws.com, and for customized notifications settings, it is the respective account ID.

" - }, "enabled":{ "shape":"Boolean", "documentation":"

Indicates whether the notification setting is enabled.

" @@ -1002,6 +1002,14 @@ "threshold":{ "shape":"NotificationSettingDetailThresholdInteger", "documentation":"

The number of days before a notification event.

" + }, + "channel":{ + "shape":"NotificationChannel", + "documentation":"

The specified channel of notification. IAM Roles Anywhere uses CloudWatch metrics, EventBridge, and Health Dashboard to notify for an event.

In the absence of a specific channel, IAM Roles Anywhere applies this setting to 'ALL' channels.

" + }, + "configuredBy":{ + "shape":"NotificationSettingDetailConfiguredByString", + "documentation":"

The principal that configured the notification setting. For default settings configured by IAM Roles Anywhere, the value is rolesanywhere.amazonaws.com, and for customized notifications settings, it is the respective account ID.

" } }, "documentation":"

The state of a notification setting.

A notification setting includes information such as event name, threshold, status of the notification setting, and the channel to notify.

" @@ -1027,13 +1035,13 @@ "type":"structure", "required":["event"], "members":{ - "channel":{ - "shape":"NotificationChannel", - "documentation":"

The specified channel of notification.

" - }, "event":{ "shape":"NotificationEvent", "documentation":"

The notification setting event to reset.

" + }, + "channel":{ + "shape":"NotificationChannel", + "documentation":"

The specified channel of notification.

" } }, "documentation":"

A notification setting key to reset. A notification setting key includes the event and the channel.

" @@ -1060,66 +1068,66 @@ "type":"string", "max":1011, "min":1, - "pattern":"^arn:aws(-[^:]+)?:rolesanywhere(:.*){2}(:profile.*)$" + "pattern":"arn:aws(-[^:]+)?:rolesanywhere(:.*){2}(:profile.*)" }, "ProfileDetail":{ "type":"structure", "members":{ - "acceptRoleSessionName":{ - "shape":"Boolean", - "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" - }, - "attributeMappings":{ - "shape":"AttributeMappings", - "documentation":"

A mapping applied to the authenticating end-entity certificate.

" + "profileId":{ + "shape":"Uuid", + "documentation":"

The unique identifier of the profile.

" }, - "createdAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 timestamp when the profile was created.

" + "profileArn":{ + "shape":"ProfileArn", + "documentation":"

The ARN of the profile.

" }, - "createdBy":{ - "shape":"String", - "documentation":"

The Amazon Web Services account that created the profile.

" + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the profile.

" }, - "durationSeconds":{ - "shape":"Integer", - "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" + "requireInstanceProperties":{ + "shape":"Boolean", + "documentation":"

Unused, saved for future use. Will likely specify whether instance properties are required in temporary credential requests with this profile.

" }, "enabled":{ "shape":"Boolean", "documentation":"

Indicates whether the profile is enabled.

" }, - "managedPolicyArns":{ - "shape":"ManagedPolicyList", - "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" - }, - "name":{ - "shape":"ResourceName", - "documentation":"

The name of the profile.

" - }, - "profileArn":{ - "shape":"ProfileArn", - "documentation":"

The ARN of the profile.

" - }, - "profileId":{ - "shape":"Uuid", - "documentation":"

The unique identifier of the profile.

" + "createdBy":{ + "shape":"String", + "documentation":"

The Amazon Web Services account that created the profile.

" }, - "requireInstanceProperties":{ - "shape":"Boolean", - "documentation":"

Specifies whether instance properties are required in temporary credential requests with this profile.

" + "sessionPolicy":{ + "shape":"String", + "documentation":"

A session policy that applies to the trust boundary of the vended session credentials.

" }, "roleArns":{ "shape":"RoleArnList", "documentation":"

A list of IAM roles that this profile can assume in a temporary credential request.

" }, - "sessionPolicy":{ - "shape":"String", - "documentation":"

A session policy that applies to the trust boundary of the vended session credentials.

" + "managedPolicyArns":{ + "shape":"ManagedPolicyList", + "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" + }, + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 timestamp when the profile was created.

" }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp when the profile was last updated.

" + }, + "durationSeconds":{ + "shape":"Integer", + "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" + }, + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" + }, + "attributeMappings":{ + "shape":"AttributeMappings", + "documentation":"

A mapping applied to the authenticating end-entity certificate.

" } }, "documentation":"

The state of the profile after a read or write operation.

" @@ -1140,11 +1148,17 @@ "PutAttributeMappingRequest":{ "type":"structure", "required":[ + "profileId", "certificateField", - "mappingRules", - "profileId" + "mappingRules" ], "members":{ + "profileId":{ + "shape":"Uuid", + "documentation":"

The unique identifier of the profile.

", + "location":"uri", + "locationName":"profileId" + }, "certificateField":{ "shape":"CertificateField", "documentation":"

Fields (x509Subject, x509Issuer and x509SAN) within X.509 certificates.

" @@ -1152,12 +1166,6 @@ "mappingRules":{ "shape":"MappingRules", "documentation":"

A list of mapping entries for every supported specifier or sub-field.

" - }, - "profileId":{ - "shape":"Uuid", - "documentation":"

The unique identifier of the profile.

", - "location":"uri", - "locationName":"profileId" } } }, @@ -1174,17 +1182,17 @@ "PutNotificationSettingsRequest":{ "type":"structure", "required":[ - "notificationSettings", - "trustAnchorId" + "trustAnchorId", + "notificationSettings" ], "members":{ - "notificationSettings":{ - "shape":"NotificationSettings", - "documentation":"

A list of notification settings to be associated to the trust anchor.

" - }, "trustAnchorId":{ "shape":"Uuid", "documentation":"

The unique identifier of the trust anchor.

" + }, + "notificationSettings":{ + "shape":"NotificationSettings", + "documentation":"

A list of notification settings to be associated to the trust anchor.

" } } }, @@ -1198,17 +1206,17 @@ "ResetNotificationSettingsRequest":{ "type":"structure", "required":[ - "notificationSettingKeys", - "trustAnchorId" + "trustAnchorId", + "notificationSettingKeys" ], "members":{ - "notificationSettingKeys":{ - "shape":"NotificationSettingKeys", - "documentation":"

A list of notification setting keys to reset. A notification setting key includes the event and the channel.

" - }, "trustAnchorId":{ "shape":"Uuid", "documentation":"

The unique identifier of the trust anchor.

" + }, + "notificationSettingKeys":{ + "shape":"NotificationSettingKeys", + "documentation":"

A list of notification setting keys to reset. A notification setting key includes the event and the channel.

" } } }, @@ -1223,7 +1231,7 @@ "type":"string", "max":255, "min":1, - "pattern":"^[ a-zA-Z0-9-_]*$" + "pattern":"[ a-zA-Z0-9-_]*" }, "ResourceNotFoundException":{ "type":"structure", @@ -1241,7 +1249,7 @@ "type":"string", "max":1011, "min":1, - "pattern":"^arn:aws(-[^:]+)?:iam(:.*){2}(:role.*)$" + "pattern":"arn:aws(-[^:]+)?:iam(:.*){2}(:role.*)" }, "RoleArnList":{ "type":"list", @@ -1300,13 +1308,13 @@ "Source":{ "type":"structure", "members":{ - "sourceData":{ - "shape":"SourceData", - "documentation":"

The data field of the trust anchor depending on its type.

" - }, "sourceType":{ "shape":"TrustAnchorType", "documentation":"

The type of the trust anchor.

" + }, + "sourceData":{ + "shape":"SourceData", + "documentation":"

The data field of the trust anchor depending on its type.

" } }, "documentation":"

The trust anchor type and its related certificate data.

" @@ -1314,13 +1322,13 @@ "SourceData":{ "type":"structure", "members":{ - "acmPcaArn":{ - "shape":"String", - "documentation":"

The root certificate of the Private Certificate Authority specified by this ARN is used in trust validation for temporary credential requests. Included for trust anchors of type AWS_ACM_PCA.

" - }, "x509CertificateData":{ "shape":"SourceDataX509CertificateDataString", "documentation":"

The PEM-encoded data for the certificate anchor. Included for trust anchors of type CERTIFICATE_BUNDLE.

" + }, + "acmPcaArn":{ + "shape":"String", + "documentation":"

The root certificate of the Private Certificate Authority specified by this ARN is used in trust validation for temporary credential requests. Included for trust anchors of type AWS_ACM_PCA.

" } }, "documentation":"

The data field of the trust anchor depending on its type.

", @@ -1328,7 +1336,7 @@ }, "SourceDataX509CertificateDataString":{ "type":"string", - "max":8000, + "max":16000, "min":1 }, "SpecifierList":{ @@ -1339,41 +1347,41 @@ "SubjectDetail":{ "type":"structure", "members":{ - "createdAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 timestamp when the subject was created.

" + "subjectArn":{ + "shape":"String", + "documentation":"

The ARN of the resource.

" }, - "credentials":{ - "shape":"CredentialSummaries", - "documentation":"

The temporary session credentials vended at the last authenticating call with this subject.

" + "subjectId":{ + "shape":"Uuid", + "documentation":"

The id of the resource

" }, "enabled":{ "shape":"Boolean", "documentation":"

The enabled status of the subject.

" }, - "instanceProperties":{ - "shape":"InstanceProperties", - "documentation":"

The specified instance properties associated with the request.

" + "x509Subject":{ + "shape":"String", + "documentation":"

The x509 principal identifier of the authenticating certificate.

" }, "lastSeenAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp of the last time this subject requested temporary session credentials.

" }, - "subjectArn":{ - "shape":"String", - "documentation":"

The ARN of the resource.

" - }, - "subjectId":{ - "shape":"Uuid", - "documentation":"

The id of the resource

" + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 timestamp when the subject was created.

" }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp when the subject was last updated.

" }, - "x509Subject":{ - "shape":"String", - "documentation":"

The x509 principal identifier of the authenticating certificate.

" + "credentials":{ + "shape":"CredentialSummaries", + "documentation":"

The temporary session credentials vended at the last authenticating call with this subject.

" + }, + "instanceProperties":{ + "shape":"InstanceProperties", + "documentation":"

The specified instance properties associated with the request.

" } }, "documentation":"

The state of the subject after a read or write operation.

" @@ -1394,33 +1402,33 @@ "SubjectSummary":{ "type":"structure", "members":{ - "createdAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 time stamp of when the certificate was first used in a temporary credential request.

" + "subjectArn":{ + "shape":"String", + "documentation":"

The ARN of the resource.

" + }, + "subjectId":{ + "shape":"Uuid", + "documentation":"

The id of the resource.

" }, "enabled":{ "shape":"Boolean", "documentation":"

The enabled status of the subject.

" }, + "x509Subject":{ + "shape":"String", + "documentation":"

The x509 principal identifier of the authenticating certificate.

" + }, "lastSeenAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 time stamp of when the certificate was last used in a temporary credential request.

" }, - "subjectArn":{ - "shape":"String", - "documentation":"

The ARN of the resource.

" - }, - "subjectId":{ - "shape":"Uuid", - "documentation":"

The id of the resource.

" + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 time stamp of when the certificate was first used in a temporary credential request.

" }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp when the subject was last updated.

" - }, - "x509Subject":{ - "shape":"String", - "documentation":"

The x509 principal identifier of the authenticating certificate.

" } }, "documentation":"

A summary representation of subjects.

" @@ -1451,7 +1459,7 @@ "type":"string", "max":128, "min":1, - "pattern":"^[ a-zA-Z0-9_.:/=+@-]*$", + "pattern":"[ a-zA-Z0-9_.:/=+@-]*", "sensitive":true }, "TagKeyList":{ @@ -1485,14 +1493,13 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", "max":256, "min":0, - "pattern":"^[ a-zA-Z0-9_.:/=+@-]*$", + "pattern":"[ a-zA-Z0-9_.:/=+@-]*", "sensitive":true }, "TooManyTagsException":{ @@ -1511,42 +1518,42 @@ "type":"string", "max":1011, "min":1, - "pattern":"^arn:aws(-[^:]+)?:rolesanywhere(:.*){2}(:trust-anchor.*)$" + "pattern":"arn:aws(-[^:]+)?:rolesanywhere(:.*){2}(:trust-anchor.*)" }, "TrustAnchorDetail":{ "type":"structure", "members":{ - "createdAt":{ - "shape":"SyntheticTimestamp_date_time", - "documentation":"

The ISO-8601 timestamp when the trust anchor was created.

" + "trustAnchorId":{ + "shape":"Uuid", + "documentation":"

The unique identifier of the trust anchor.

" }, - "enabled":{ - "shape":"Boolean", - "documentation":"

Indicates whether the trust anchor is enabled.

" + "trustAnchorArn":{ + "shape":"String", + "documentation":"

The ARN of the trust anchor.

" }, "name":{ "shape":"ResourceName", "documentation":"

The name of the trust anchor.

" }, - "notificationSettings":{ - "shape":"NotificationSettingDetails", - "documentation":"

A list of notification settings to be associated to the trust anchor.

" - }, "source":{ "shape":"Source", "documentation":"

The trust anchor type and its related certificate data.

" }, - "trustAnchorArn":{ - "shape":"String", - "documentation":"

The ARN of the trust anchor.

" + "enabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether the trust anchor is enabled.

" }, - "trustAnchorId":{ - "shape":"Uuid", - "documentation":"

The unique identifier of the trust anchor.

" + "createdAt":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The ISO-8601 timestamp when the trust anchor was created.

" }, "updatedAt":{ "shape":"SyntheticTimestamp_date_time", "documentation":"

The ISO-8601 timestamp when the trust anchor was last updated.

" + }, + "notificationSettings":{ + "shape":"NotificationSettingDetails", + "documentation":"

A list of notification settings to be associated to the trust anchor.

" } }, "documentation":"

The state of the trust anchor after a read or write operation.

" @@ -1592,17 +1599,12 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateCrlRequest":{ "type":"structure", "required":["crlId"], "members":{ - "crlData":{ - "shape":"UpdateCrlRequestCrlDataBlob", - "documentation":"

The x509 v3 specified certificate revocation list (CRL).

" - }, "crlId":{ "shape":"Uuid", "documentation":"

The unique identifier of the certificate revocation list (CRL).

", @@ -1612,6 +1614,10 @@ "name":{ "shape":"ResourceName", "documentation":"

The name of the Crl.

" + }, + "crlData":{ + "shape":"UpdateCrlRequestCrlDataBlob", + "documentation":"

The x509 v3 specified certificate revocation list (CRL).

" } } }, @@ -1624,35 +1630,35 @@ "type":"structure", "required":["profileId"], "members":{ - "acceptRoleSessionName":{ - "shape":"Boolean", - "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" - }, - "durationSeconds":{ - "shape":"UpdateProfileRequestDurationSecondsInteger", - "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" - }, - "managedPolicyArns":{ - "shape":"ManagedPolicyList", - "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" - }, - "name":{ - "shape":"ResourceName", - "documentation":"

The name of the profile.

" - }, "profileId":{ "shape":"Uuid", "documentation":"

The unique identifier of the profile.

", "location":"uri", "locationName":"profileId" }, - "roleArns":{ - "shape":"RoleArnList", - "documentation":"

A list of IAM roles that this profile can assume in a temporary credential request.

" + "name":{ + "shape":"ResourceName", + "documentation":"

The name of the profile.

" }, "sessionPolicy":{ "shape":"UpdateProfileRequestSessionPolicyString", "documentation":"

A session policy that applies to the trust boundary of the vended session credentials.

" + }, + "roleArns":{ + "shape":"RoleArnList", + "documentation":"

A list of IAM roles that this profile can assume in a temporary credential request.

" + }, + "managedPolicyArns":{ + "shape":"ManagedPolicyList", + "documentation":"

A list of managed policy ARNs that apply to the vended session credentials.

" + }, + "durationSeconds":{ + "shape":"UpdateProfileRequestDurationSecondsInteger", + "documentation":"

Used to determine how long sessions vended using this profile are valid for. See the Expiration section of the CreateSession API documentation page for more details. In requests, if this value is not provided, the default value will be 3600.

" + }, + "acceptRoleSessionName":{ + "shape":"Boolean", + "documentation":"

Used to determine if a custom role session name will be accepted in a temporary credential request.

" } } }, @@ -1671,6 +1677,12 @@ "type":"structure", "required":["trustAnchorId"], "members":{ + "trustAnchorId":{ + "shape":"Uuid", + "documentation":"

The unique identifier of the trust anchor.

", + "location":"uri", + "locationName":"trustAnchorId" + }, "name":{ "shape":"ResourceName", "documentation":"

The name of the trust anchor.

" @@ -1678,12 +1690,6 @@ "source":{ "shape":"Source", "documentation":"

The trust anchor type and its related certificate data.

" - }, - "trustAnchorId":{ - "shape":"Uuid", - "documentation":"

The unique identifier of the trust anchor.

", - "location":"uri", - "locationName":"trustAnchorId" } } }, @@ -1691,7 +1697,7 @@ "type":"string", "max":36, "min":36, - "pattern":"[a-f0-9]{8}-([a-z0-9]{4}-){3}[a-z0-9]{12}" + "pattern":".*[a-f0-9]{8}-([a-z0-9]{4}-){3}[a-z0-9]{12}.*" }, "ValidationException":{ "type":"structure", diff --git a/awscli/botocore/data/rolesanywhere/2018-05-10/waiters-2.json b/awscli/botocore/data/rolesanywhere/2018-05-10/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/awscli/botocore/data/rolesanywhere/2018-05-10/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/awscli/botocore/data/route53/2013-04-01/service-2.json b/awscli/botocore/data/route53/2013-04-01/service-2.json index 11a6cb3b4123..9e0fc87e262b 100644 --- a/awscli/botocore/data/route53/2013-04-01/service-2.json +++ b/awscli/botocore/data/route53/2013-04-01/service-2.json @@ -1837,7 +1837,8 @@ "ap-east-2", "eu-isoe-west-1", "ap-southeast-6", - "us-isob-west-1" + "us-isob-west-1", + "eusc-de-east-1" ], "max":64, "min":1 @@ -5627,7 +5628,8 @@ "us-gov-east-1", "us-gov-west-1", "ap-east-2", - "ap-southeast-6" + "ap-southeast-6", + "eusc-de-east-1" ], "max":64, "min":1 @@ -6534,7 +6536,8 @@ "ap-east-2", "eu-isoe-west-1", "ap-southeast-6", - "us-isob-west-1" + "us-isob-west-1", + "eusc-de-east-1" ], "max":64, "min":1 diff --git a/awscli/botocore/data/route53resolver/2018-04-01/service-2.json b/awscli/botocore/data/route53resolver/2018-04-01/service-2.json index 45c04cf68f0f..9ceabbc52603 100644 --- a/awscli/botocore/data/route53resolver/2018-04-01/service-2.json +++ b/awscli/botocore/data/route53resolver/2018-04-01/service-2.json @@ -1362,7 +1362,7 @@ }, "Name":{ "shape":"Name", - "documentation":"

A name for the association that you're creating between a Resolver rule and a VPC.

" + "documentation":"

A name for the association that you're creating between a Resolver rule and a VPC.

The name can be up to 64 characters long and can contain letters (a-z, A-Z), numbers (0-9), hyphens (-), underscores (_), and spaces. The name cannot consist of only numbers.

" }, "VPCId":{ "shape":"ResourceId", @@ -1678,6 +1678,16 @@ "shape":"ProtocolList", "documentation":"

The protocols you want to use for the endpoint. DoH-FIPS is applicable for default inbound endpoints only.

For a default inbound endpoint you can apply the protocols as follows:

For a delegation inbound endpoint you can use Do53 only.

For an outbound endpoint you can apply the protocols as follows:

", "box":true + }, + "RniEnhancedMetricsEnabled":{ + "shape":"RniEnhancedMetricsEnabled", + "documentation":"

Specifies whether RNI enhanced metrics are enabled for the Resolver endpoints. When set to true, one-minute granular metrics are published in CloudWatch for each RNI associated with this endpoint. When set to false, metrics are not published. Default is false.

Standard CloudWatch pricing and charges are applied for using the Route 53 Resolver endpoint RNI enhanced metrics. For more information, see Detailed metrics.

", + "box":true + }, + "TargetNameServerMetricsEnabled":{ + "shape":"TargetNameServerMetricsEnabled", + "documentation":"

Specifies whether target name server metrics are enabled for the outbound Resolver endpoints. When set to true, one-minute granular metrics are published in CloudWatch for each target name server associated with this endpoint. When set to false, metrics are not published. Default is false. This is not supported for inbound Resolver endpoints.

Standard CloudWatch pricing and charges are applied for using the Route 53 Resolver endpoint target name server metrics. For more information, see Detailed metrics.

", + "box":true } } }, @@ -1740,7 +1750,7 @@ }, "Name":{ "shape":"Name", - "documentation":"

A friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console.

" + "documentation":"

A friendly name that lets you easily find a rule in the Resolver dashboard in the Route 53 console.

The name can be up to 64 characters long and can contain letters (a-z, A-Z), numbers (0-9), hyphens (-), underscores (_), and spaces. The name cannot consist of only numbers.

" }, "RuleType":{ "shape":"RuleTypeOption", @@ -1753,7 +1763,7 @@ }, "TargetIps":{ "shape":"TargetList", - "documentation":"

The IPs that you want Resolver to forward DNS queries to. You can specify either Ipv4 or Ipv6 addresses but not both in the same rule. Separate IP addresses with a space.

TargetIps is available only when the value of Rule type is FORWARD.

", + "documentation":"

The IPs that you want Resolver to forward DNS queries to. You can specify either Ipv4 or Ipv6 addresses but not both in the same rule. Separate IP addresses with a space.

TargetIps is available only when the value of Rule type is FORWARD. You should not provide TargetIps when the Rule type is DELEGATE.

when creating a DELEGATE rule, you must not provide the TargetIps parameter. If you provide the TargetIps, you may receive an ERROR message similar to \"Delegate resolver rules need to specify a nameserver name\". This error means you should not provide TargetIps.

", "box":true }, "ResolverEndpointId":{ @@ -3944,6 +3954,14 @@ "Protocols":{ "shape":"ProtocolList", "documentation":"

Protocols used for the endpoint. DoH-FIPS is applicable for a default inbound endpoints only.

For an inbound endpoint you can apply the protocols as follows:

For a delegation inbound endpoint you can use Do53 only.

For an outbound endpoint you can apply the protocols as follows:

" + }, + "RniEnhancedMetricsEnabled":{ + "shape":"RniEnhancedMetricsEnabled", + "documentation":"

Indicates whether RNI enhanced metrics are enabled for the Resolver endpoint. When enabled, one-minute granular metrics are published in CloudWatch for each RNI associated with this endpoint. When disabled, these metrics are not published.

" + }, + "TargetNameServerMetricsEnabled":{ + "shape":"TargetNameServerMetricsEnabled", + "documentation":"

Indicates whether target name server metrics are enabled for the outbound Resolver endpoint. When enabled, one-minute granular metrics are published in CloudWatch for each target name server associated with this endpoint. When disabled, these metrics are not published. This feature is not supported for inbound Resolver endpoint.

" } }, "documentation":"

In the response to a CreateResolverEndpoint, DeleteResolverEndpoint, GetResolverEndpoint, Updates the name, or ResolverEndpointType for an endpoint, or UpdateResolverEndpoint request, a complex type that contains settings for an existing inbound or outbound Resolver endpoint.

" @@ -4139,7 +4157,7 @@ }, "Name":{ "shape":"Name", - "documentation":"

The name for the Resolver rule, which you specified when you created the Resolver rule.

" + "documentation":"

The name for the Resolver rule, which you specified when you created the Resolver rule.

The name can be up to 64 characters long and can contain letters (a-z, A-Z), numbers (0-9), hyphens (-), underscores (_), and spaces. The name cannot consist of only numbers.

" }, "TargetIps":{ "shape":"TargetList", @@ -4185,7 +4203,7 @@ }, "Name":{ "shape":"Name", - "documentation":"

The name of an association between a Resolver rule and a VPC.

" + "documentation":"

The name of an association between a Resolver rule and a VPC.

The name can be up to 64 characters long and can contain letters (a-z, A-Z), numbers (0-9), hyphens (-), underscores (_), and spaces. The name cannot consist of only numbers.

" }, "VPCId":{ "shape":"ResourceId", @@ -4221,7 +4239,7 @@ "members":{ "Name":{ "shape":"Name", - "documentation":"

The new name for the Resolver rule. The name that you specify appears in the Resolver dashboard in the Route 53 console.

" + "documentation":"

The new name for the Resolver rule. The name that you specify appears in the Resolver dashboard in the Route 53 console.

The name can be up to 64 characters long and can contain letters (a-z, A-Z), numbers (0-9), hyphens (-), underscores (_), and spaces. The name cannot consist of only numbers.

" }, "TargetIps":{ "shape":"TargetList", @@ -4309,6 +4327,7 @@ "max":40, "min":20 }, + "RniEnhancedMetricsEnabled":{"type":"boolean"}, "RuleTypeOption":{ "type":"string", "enum":[ @@ -4465,6 +4484,7 @@ "member":{"shape":"TargetAddress"}, "min":1 }, + "TargetNameServerMetricsEnabled":{"type":"boolean"}, "ThrottlingException":{ "type":"structure", "members":{ @@ -4826,6 +4846,16 @@ "shape":"ProtocolList", "documentation":"

The protocols you want to use for the endpoint. DoH-FIPS is applicable for default inbound endpoints only.

For a default inbound endpoint you can apply the protocols as follows:

For a delegation inbound endpoint you can use Do53 only.

For an outbound endpoint you can apply the protocols as follows:

You can't change the protocol of an inbound endpoint directly from only Do53 to only DoH, or DoH-FIPS. This is to prevent a sudden disruption to incoming traffic that relies on Do53. To change the protocol from Do53 to DoH, or DoH-FIPS, you must first enable both Do53 and DoH, or Do53 and DoH-FIPS, to make sure that all incoming traffic has transferred to using the DoH protocol, or DoH-FIPS, and then remove the Do53.

", "box":true + }, + "RniEnhancedMetricsEnabled":{ + "shape":"RniEnhancedMetricsEnabled", + "documentation":"

Updates whether RNI enhanced metrics are enabled for the Resolver endpoints. When set to true, one-minute granular metrics are published in CloudWatch for each RNI associated with this endpoint. When set to false, metrics are not published.

Standard CloudWatch pricing and charges are applied for using the Route 53 Resolver endpoint RNI enhanced metrics. For more information, see Detailed metrics.

", + "box":true + }, + "TargetNameServerMetricsEnabled":{ + "shape":"TargetNameServerMetricsEnabled", + "documentation":"

Updates whether target name server metrics are enabled for the outbound Resolver endpoints. When set to true, one-minute granular metrics are published in CloudWatch for each target name server associated with this endpoint. When set to false, metrics are not published. This setting is not supported for inbound Resolver endpoints.

Standard CloudWatch pricing and charges are applied for using the Route 53 Resolver endpoint target name server metrics. For more information, see Detailed metrics.

", + "box":true } } }, diff --git a/awscli/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json b/awscli/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json index 9f92d3adab29..3a1389aafa32 100644 --- a/awscli/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json +++ b/awscli/botocore/data/s3/2006-03-01/endpoint-rule-set-1.json @@ -5372,12 +5372,12 @@ { "conditions": [ { - "fn": "stringEquals", + "fn": "aws.isVirtualHostableS3Bucket", "argv": [ { - "ref": "hardwareType" + "ref": "Bucket" }, - "e" + false ] } ], @@ -5388,9 +5388,9 @@ "fn": "stringEquals", "argv": [ { - "ref": "regionPrefix" + "ref": "hardwareType" }, - "beta" + "e" ] } ], @@ -5398,44 +5398,86 @@ { "conditions": [ { - "fn": "not", + "fn": "stringEquals", "argv": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } + "ref": "regionPrefix" + }, + "beta" ] } ], - "error": "Expected a endpoint to be specified but no endpoint was found", - "type": "error" - }, - { - "conditions": [ + "rules": [ { - "fn": "isSet", - "argv": [ + "conditions": [ { - "ref": "Endpoint" + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] } - ] + ], + "error": "Expected a endpoint to be specified but no endpoint was found", + "type": "error" }, { - "fn": "parseURL", - "argv": [ + "conditions": [ { - "ref": "Endpoint" + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], - "assign": "url" + "endpoint": { + "url": "https://{Bucket}.ec2.{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4a", + "signingName": "s3-outposts", + "signingRegionSet": [ + "*" + ] + }, + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], + "type": "tree" + }, + { + "conditions": [], "endpoint": { - "url": "https://{Bucket}.ec2.{url#authority}", + "url": "https://{Bucket}.ec2.s3-outposts.{Region}.{regionPartition#dnsSuffix}", "properties": { "authSchemes": [ { @@ -5461,57 +5503,15 @@ ], "type": "tree" }, - { - "conditions": [], - "endpoint": { - "url": "https://{Bucket}.ec2.s3-outposts.{Region}.{regionPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4a", - "signingName": "s3-outposts", - "signingRegionSet": [ - "*" - ] - }, - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" - } - ], - "type": "tree" - }, - { - "conditions": [ - { - "fn": "stringEquals", - "argv": [ - { - "ref": "hardwareType" - }, - "o" - ] - } - ], - "rules": [ { "conditions": [ { "fn": "stringEquals", "argv": [ { - "ref": "regionPrefix" + "ref": "hardwareType" }, - "beta" + "o" ] } ], @@ -5519,44 +5519,86 @@ { "conditions": [ { - "fn": "not", + "fn": "stringEquals", "argv": [ { - "fn": "isSet", - "argv": [ - { - "ref": "Endpoint" - } - ] - } + "ref": "regionPrefix" + }, + "beta" ] } ], - "error": "Expected a endpoint to be specified but no endpoint was found", - "type": "error" - }, - { - "conditions": [ + "rules": [ { - "fn": "isSet", - "argv": [ + "conditions": [ { - "ref": "Endpoint" + "fn": "not", + "argv": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ] } - ] + ], + "error": "Expected a endpoint to be specified but no endpoint was found", + "type": "error" }, { - "fn": "parseURL", - "argv": [ + "conditions": [ { - "ref": "Endpoint" + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + }, + { + "fn": "parseURL", + "argv": [ + { + "ref": "Endpoint" + } + ], + "assign": "url" } ], - "assign": "url" + "endpoint": { + "url": "https://{Bucket}.op-{outpostId}.{url#authority}", + "properties": { + "authSchemes": [ + { + "disableDoubleEncoding": true, + "name": "sigv4a", + "signingName": "s3-outposts", + "signingRegionSet": [ + "*" + ] + }, + { + "disableDoubleEncoding": true, + "name": "sigv4", + "signingName": "s3-outposts", + "signingRegion": "{Region}" + } + ] + }, + "headers": {} + }, + "type": "endpoint" } ], + "type": "tree" + }, + { + "conditions": [], "endpoint": { - "url": "https://{Bucket}.op-{outpostId}.{url#authority}", + "url": "https://{Bucket}.op-{outpostId}.s3-outposts.{Region}.{regionPartition#dnsSuffix}", "properties": { "authSchemes": [ { @@ -5584,36 +5626,15 @@ }, { "conditions": [], - "endpoint": { - "url": "https://{Bucket}.op-{outpostId}.s3-outposts.{Region}.{regionPartition#dnsSuffix}", - "properties": { - "authSchemes": [ - { - "disableDoubleEncoding": true, - "name": "sigv4a", - "signingName": "s3-outposts", - "signingRegionSet": [ - "*" - ] - }, - { - "disableDoubleEncoding": true, - "name": "sigv4", - "signingName": "s3-outposts", - "signingRegion": "{Region}" - } - ] - }, - "headers": {} - }, - "type": "endpoint" + "error": "Unrecognized hardware type: \"Expected hardware type o or e but got {hardwareType}\"", + "type": "error" } ], "type": "tree" }, { "conditions": [], - "error": "Unrecognized hardware type: \"Expected hardware type o or e but got {hardwareType}\"", + "error": "Invalid Outposts Bucket alias - it must be a valid bucket name.", "type": "error" } ], diff --git a/awscli/botocore/data/s3/2006-03-01/service-2.json b/awscli/botocore/data/s3/2006-03-01/service-2.json index 3add914bbff9..48b3438ee0e6 100644 --- a/awscli/botocore/data/s3/2006-03-01/service-2.json +++ b/awscli/botocore/data/s3/2006-03-01/service-2.json @@ -50,7 +50,7 @@ "errors":[ {"shape":"ObjectNotInActiveTierError"} ], - "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

End of support notice: As of October 1, 2025, Amazon S3 has discontinued support for Email Grantee Access Control Lists (ACLs). If you attempt to use an Email Grantee ACL in a request after October 1, 2025, the request will receive an HTTP 405 (Method Not Allowed) error.

This change affects the following Amazon Web Services Regions: US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Europe (Ireland), and South America (São Paulo).

You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

  • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

    If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

  • If the copy is successful, you receive a response with information about the copied object.

  • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

    • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

    • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax
  • Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

  • Amazon S3 on Outposts - When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.

The following operations are related to CopyObject:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates a copy of an object that is already stored in Amazon S3.

End of support notice: As of October 1, 2025, Amazon S3 has discontinued support for Email Grantee Access Control Lists (ACLs). If you attempt to use an Email Grantee ACL in a request after October 1, 2025, the request will receive an HTTP 405 (Method Not Allowed) error.

This change affects the following Amazon Web Services Regions: US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Europe (Ireland), and South America (São Paulo).

You can store individual objects of up to 50 TB in Amazon S3. You create a copy of your object up to 5 GB in size in a single atomic action using this API. However, to copy an object greater than 5 GB, you must use the multipart upload Upload Part - Copy (UploadPartCopy) API. For more information, see Copy Object Using the REST Multipart Upload API.

You can copy individual objects between general purpose buckets, between directory buckets, and between general purpose buckets and directory buckets.

Both the Region that you want to copy the object from and the Region that you want to copy the object to must be enabled for your account. For more information about how to enable a Region for your account, see Enable or disable a Region for standalone accounts in the Amazon Web Services Account Management Guide.

Amazon S3 transfer acceleration does not support cross-Region copies. If you request a cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad Request error. For more information, see Transfer Acceleration.

Authentication and authorization

All CopyObject requests must be authenticated and signed by using IAM credentials (access key ID and secret access key for the IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source, must be signed. For more information, see REST Authentication.

Directory buckets - You must use the IAM credentials to authenticate and authorize your access to the CopyObject API operation, instead of using the temporary security credentials through the CreateSession API operation.

Amazon Web Services CLI or SDKs handles authentication and authorization on your behalf.

Permissions

You must have read access to the source object and write access to the destination bucket.

  • General purpose bucket permissions - You must have permissions in an IAM policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object is in a general purpose bucket, you must have s3:GetObject permission to read the source object that is being copied.

    • If the destination bucket is a general purpose bucket, you must have s3:PutObject permission to write the object copy to the destination bucket.

  • Directory bucket permissions - You must have permissions in a bucket policy or an IAM identity-based policy based on the source and destination bucket types in a CopyObject operation.

    • If the source object that you want to copy is in a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to read the object. By default, the session is in the ReadWrite mode. If you want to restrict the access, you can explicitly set the s3express:SessionMode condition key to ReadOnly on the copy source bucket.

    • If the copy destination is a directory bucket, you must have the s3express:CreateSession permission in the Action element of a policy to write the object to the destination. The s3express:SessionMode condition key can't be set to ReadOnly on the copy destination bucket.

    If the object is encrypted with SSE-KMS, you must also have the kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies and KMS key policies for the KMS key.

    For example policies, see Example bucket policies for S3 Express One Zone and Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone in the Amazon S3 User Guide.

Response and special errors

When the request is an HTTP 1.1 request, the response is chunk encoded. When the request is not an HTTP 1.1 request, the response would not contain the Content-Length. You always need to read the entire response body to check if the copy succeeds.

  • If the copy is successful, you receive a response with information about the copied object.

  • A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3 is copying the files. A 200 OK response can contain either a success or an error.

    • If the error occurs before the copy action starts, you receive a standard Amazon S3 error.

    • If the error occurs during the copy operation, the error response is embedded in the 200 OK response. For example, in a cross-region copy, you may encounter throttling and receive a 200 OK response. For more information, see Resolve the Error 200 response when copying objects to Amazon S3. The 200 OK status code means the copy was accepted, but it doesn't mean the copy is complete. Another example is when you disconnect from Amazon S3 before the copy is complete, Amazon S3 might cancel the copy and you may receive a 200 OK response. You must stay connected to Amazon S3 until the entire response is successfully received and processed.

      If you call this API operation directly, make sure to design your application to parse the content of the response and handle it appropriately. If you use Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the embedded error and apply error handling per your configuration settings (including automatically retrying the request as appropriate). If the condition persists, the SDKs throw an exception (or, for the SDKs that don't use exceptions, they return an error).

Charge

The copy request charge is based on the storage class and Region that you specify for the destination object. The request can also result in a data retrieval charge for the source if the source storage class bills for data retrieval. If the copy source is in a different region, the data transfer is billed to the copy source account. For pricing information, see Amazon S3 pricing.

HTTP Host header syntax
  • Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

  • Amazon S3 on Outposts - When you use this action with S3 on Outposts through the REST API, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. The hostname isn't required when you use the Amazon Web Services CLI or SDKs.

The following operations are related to CopyObject:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "DisableS3ExpressSessionAuth":{"value":true} } @@ -67,7 +67,7 @@ {"shape":"BucketAlreadyExists"}, {"shape":"BucketAlreadyOwnedByYou"} ], - "documentation":"

End of support notice: As of October 1, 2025, Amazon S3 has discontinued support for Email Grantee Access Control Lists (ACLs). If you attempt to use an Email Grantee ACL in a request after October 1, 2025, the request will receive an HTTP 405 (Method Not Allowed) error.

This change affects the following Amazon Web Services Regions: US East (N. Virginia), US West (N. California), US West (Oregon), Asia Pacific (Singapore), Asia Pacific (Sydney), Asia Pacific (Tokyo), Europe (Ireland), and South America (São Paulo).

This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are required in a policy when your CreateBucket request includes specific headers:

    • Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) and set it to public-read, public-read-write, authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only the s3:CreateBucket permission is required.

    • Object Lock - In your CreateBucket request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

    • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required.

      To set an ACL on a bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request will fail.

      For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

    • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

  • Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.

    For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

The following operations are related to CreateBucket:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action creates an Amazon S3 bucket. To create an Amazon S3 on Outposts bucket, see CreateBucket .

Creates a new S3 bucket. To create a bucket, you must set up Amazon S3 and have a valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to create buckets. By creating the bucket, you become the bucket owner.

There are two types of buckets: general purpose buckets and directory buckets. For more information about these bucket types, see Creating, configuring, and working with Amazon S3 buckets in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - In addition to the s3:CreateBucket permission, the following permissions are required in a policy when your CreateBucket request includes specific headers:

    • Access control lists (ACLs) - In your CreateBucket request, if you specify an access control list (ACL) and set it to public-read, public-read-write, authenticated-read, or if you explicitly specify any other custom ACLs, both s3:CreateBucket and s3:PutBucketAcl permissions are required. In your CreateBucket request, if you set the ACL to private, or if you don't specify any ACLs, only the s3:CreateBucket permission is required.

    • Object Lock - In your CreateBucket request, if you set x-amz-bucket-object-lock-enabled to true, the s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

    • S3 Object Ownership - If your CreateBucket request includes the x-amz-object-ownership header, then the s3:PutBucketOwnershipControls permission is required.

      To set an ACL on a bucket as part of a CreateBucket request, you must explicitly set S3 Object Ownership for the bucket to a different value than the default, BucketOwnerEnforced. Additionally, if your desired bucket ACL grants public access, you must first create the bucket (without the bucket ACL) and then explicitly disable Block Public Access on the bucket before using PutBucketAcl to set the ACL. If you try to create a bucket with a public ACL, the request will fail.

      For the majority of modern use cases in S3, we recommend that you keep all Block Public Access settings enabled and keep ACLs disabled. If you would like to share data with users outside of your account, you can use bucket policies as needed. For more information, see Controlling ownership of objects and disabling ACLs for your bucket and Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

    • S3 Block Public Access - If your specific use case requires granting public access to your S3 resources, you can disable Block Public Access. Specifically, you can create a new bucket with Block Public Access enabled, then separately call the DeletePublicAccessBlock API. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about S3 Block Public Access, see Blocking public access to your Amazon S3 storage in the Amazon S3 User Guide.

  • Directory bucket permissions - You must have the s3express:CreateBucket permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

    The permissions for ACLs, Object Lock, S3 Object Ownership, and S3 Block Public Access are not supported for directory buckets. For directory buckets, all Block Public Access settings are enabled at the bucket level and S3 Object Ownership is set to Bucket owner enforced (ACLs disabled). These settings can't be modified.

    For more information about permissions for creating and working with directory buckets, see Directory buckets in the Amazon S3 User Guide. For more information about supported S3 features for directory buckets, see Features of S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

The following operations are related to CreateBucket:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "DisableAccessPoints":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -370,7 +370,7 @@ "responseCode":204 }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

This operation is not supported for directory buckets.

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported for directory buckets.

Removes the PublicAccessBlock configuration for an Amazon S3 bucket. This operation removes the bucket-level configuration only. The effective public access behavior will still be governed by account-level settings (which may inherit from organization-level policies). To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

The following operations are related to DeletePublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -383,7 +383,7 @@ }, "input":{"shape":"GetBucketAbacRequest"}, "output":{"shape":"GetBucketAbacOutput"}, - "documentation":"

Returns the attribute-based access control (ABAC) property of the general purpose bucket. If the bucket ABAC is enabled, you can use tags for bucket access control. For more information, see Enabling ABAC in general purpose buckets. Whether ABAC is enabled or disabled, you can use tags for cost tracking. For more information, see Using tags with S3 general purpose buckets.

" + "documentation":"

Returns the attribute-based access control (ABAC) property of the general purpose bucket. If ABAC is enabled on your bucket, you can use tags on the bucket for access control. For more information, see Enabling ABAC in general purpose buckets.

" }, "GetBucketAccelerateConfiguration":{ "name":"GetBucketAccelerateConfiguration", @@ -406,7 +406,7 @@ }, "input":{"shape":"GetBucketAclRequest"}, "output":{"shape":"GetBucketAclOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

The following operations are related to GetBucketAcl:

", + "documentation":"

This operation is not supported for directory buckets.

This implementation of the GET action uses the acl subresource to return the access control list (ACL) of a bucket. To use GET to return the ACL of the bucket, you must have the READ_ACP access to the bucket. If READ_ACP permission is granted to the anonymous user, you can return the ACL of the bucket without using an authorization header.

When you use this API operation with an access point, provide the alias of the access point in place of the bucket name.

When you use this API operation with an Object Lambda access point, provide the alias of the Object Lambda access point in place of the bucket name. If the Object Lambda access point alias in a request is not valid, the error code InvalidAccessPointAliasError is returned. For more information about InvalidAccessPointAliasError, see List of Error Codes.

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

The following operations are related to GetBucketAcl:

", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -445,7 +445,7 @@ }, "input":{"shape":"GetBucketEncryptionRequest"}, "output":{"shape":"GetBucketEncryptionOutput"}, - "documentation":"

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). This operation also returns the BucketKeyEnabled and BlockedEncryptionTypes statuses.

Permissions
  • General purpose bucket permissions - The s3:GetEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.

  • Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

The following operations are related to GetBucketEncryption:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the default encryption configuration for an Amazon S3 bucket. By default, all buckets have a default encryption configuration that uses server-side encryption with Amazon S3 managed keys (SSE-S3). This operation also returns the BucketKeyEnabled and BlockedEncryptionTypes statuses.

Permissions
  • General purpose bucket permissions - The s3:GetEncryptionConfiguration permission is required in a policy. The bucket owner has this permission by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Operations and Managing Access Permissions to Your Amazon S3 Resources.

  • Directory bucket permissions - To grant access to this API operation, you must have the s3express:GetEncryptionConfiguration permission in an IAM identity-based policy instead of a bucket policy. Cross-account access to this API operation isn't supported. This operation can only be performed by the Amazon Web Services account that owns the resource. For more information about directory bucket policies and permissions, see Amazon Web Services Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region-code.amazonaws.com.

The following operations are related to GetBucketEncryption:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -524,7 +524,7 @@ }, "input":{"shape":"GetBucketLoggingRequest"}, "output":{"shape":"GetBucketLoggingOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

Returns the logging status of a bucket and the permissions users have to view and modify that status.

The following operations are related to GetBucketLogging:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported for directory buckets.

Returns the logging status of a bucket and the permissions users have to view and modify that status.

The following operations are related to GetBucketLogging:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -734,7 +734,7 @@ "errors":[ {"shape":"NoSuchKey"} ], - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide

This functionality is not supported for Amazon S3 on Outposts.

By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

The following operations are related to GetObjectAcl:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

This operation is not supported for directory buckets.

Returns the access control list (ACL) of an object. To use this operation, you must have s3:GetObjectAcl permissions or READ_ACP access to the object. For more information, see Mapping of ACL permissions and access policy permissions in the Amazon S3 User Guide

This functionality is not supported for Amazon S3 on Outposts.

By default, GET returns ACL information about the current version of an object. To return ACL information about a different version, use the versionId subresource.

If your bucket uses the bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are still supported and return the bucket-owner-full-control ACL with the owner being the account that created the bucket. For more information, see Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

The following operations are related to GetObjectAcl:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "GetObjectAttributes":{ "name":"GetObjectAttributes", @@ -807,7 +807,7 @@ }, "input":{"shape":"GetPublicAccessBlockRequest"}, "output":{"shape":"GetPublicAccessBlockOutput"}, - "documentation":"

This operation is not supported for directory buckets.

Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

The following operations are related to GetPublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported for directory buckets.

Retrieves the PublicAccessBlock configuration for an Amazon S3 bucket. This operation returns the bucket-level configuration only. To understand the effective public access behavior, you must also consider account-level settings (which may inherit from organization-level policies). To use this operation, you must have the s3:GetBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. Account-level settings automatically inherit from organization-level policies when present. If the PublicAccessBlock settings are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

The following operations are related to GetPublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "staticContextParams":{ "UseS3ExpressControlEndpoint":{"value":true} } @@ -895,7 +895,7 @@ }, "input":{"shape":"ListBucketsRequest"}, "output":{"shape":"ListBucketsOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

This operation is not supported for directory buckets.

Returns a list of all buckets owned by the authenticated sender of the request. To grant IAM permission to use this operation, you must add the s3:ListAllMyBuckets policy action.

For information about Amazon S3 buckets, see Creating, configuring, and working with Amazon S3 buckets.

We strongly recommend using only paginated ListBuckets requests. Unpaginated ListBuckets requests are only supported for Amazon Web Services accounts set to the default general purpose bucket quota of 10,000. If you have an approved general purpose bucket quota above 10,000, you must send paginated ListBuckets requests to list your account’s buckets. All unpaginated ListBuckets requests will be rejected for Amazon Web Services accounts with a general purpose bucket quota greater than 10,000.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "ListDirectoryBuckets":{ "name":"ListDirectoryBuckets", @@ -918,7 +918,7 @@ }, "input":{"shape":"ListMultipartUploadsRequest"}, "output":{"shape":"ListMultipartUploadsOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting of multipart uploads in response
  • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

    • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

  • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

The following operations are related to ListMultipartUploads:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

This operation lists in-progress multipart uploads in a bucket. An in-progress multipart upload is a multipart upload that has been initiated by the CreateMultipartUpload request, but has not yet been completed or aborted.

Directory buckets - If multipart uploads in a directory bucket are in progress, you can't delete the bucket until all the in-progress multipart uploads are aborted or completed. To delete these in-progress multipart uploads, use the ListMultipartUploads operation to list the in-progress multipart uploads in the bucket and use the AbortMultipartUpload operation to abort all the in-progress multipart uploads.

The ListMultipartUploads operation returns a maximum of 1,000 multipart uploads in the response. The limit of 1,000 multipart uploads is also the default value. You can further limit the number of uploads in a response by specifying the max-uploads request parameter. If there are more than 1,000 multipart uploads that satisfy your ListMultipartUploads request, the response returns an IsTruncated element with the value of true, a NextKeyMarker element, and a NextUploadIdMarker element. To list the remaining multipart uploads, you need to make subsequent ListMultipartUploads requests. In these requests, include two query parameters: key-marker and upload-id-marker. Set the value of key-marker to the NextKeyMarker value from the previous response. Similarly, set the value of upload-id-marker to the NextUploadIdMarker value from the previous response.

Directory buckets - The upload-id-marker element and the NextUploadIdMarker element aren't supported by directory buckets. To list the additional multipart uploads, you only need to set the value of key-marker to the NextKeyMarker value from the previous response.

For more information about multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting of multipart uploads in response
  • General purpose bucket - In the ListMultipartUploads response, the multipart uploads are sorted based on two criteria:

    • Key-based sorting - Multipart uploads are initially sorted in ascending order based on their object keys.

    • Time-based sorting - For uploads that share the same object key, they are further sorted in ascending order based on the upload initiation time. Among uploads with the same key, the one that was initiated first will appear before the ones that were initiated later.

  • Directory bucket - In the ListMultipartUploads response, the multipart uploads aren't sorted lexicographically based on the object keys.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

The following operations are related to ListMultipartUploads:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "ListObjectVersions":{ "name":"ListObjectVersions", @@ -928,7 +928,7 @@ }, "input":{"shape":"ListObjectVersionsRequest"}, "output":{"shape":"ListObjectVersionsOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

To use this operation, you must have READ access to the bucket.

The following operations are related to ListObjectVersions:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

This operation is not supported for directory buckets.

Returns metadata about all versions of the objects in a bucket. You can also use request parameters as selection criteria to return metadata about a subset of all the object versions.

To use this operation, you must have permission to perform the s3:ListBucketVersions action. Be aware of the name difference.

A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately.

To use this operation, you must have READ access to the bucket.

The following operations are related to ListObjectVersions:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "ListObjects":{ "name":"ListObjects", @@ -941,7 +941,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

This operation is not supported for directory buckets.

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

This operation is not supported for directory buckets.

Returns some or all (up to 1,000) of the objects in a bucket. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Be sure to design your application to parse the contents of the response and handle it appropriately.

This action has been revised. We recommend that you use the newer version, ListObjectsV2, when developing applications. For backward compatibility, Amazon S3 continues to support ListObjects.

The following operations are related to ListObjects:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "ListObjectsV2":{ "name":"ListObjectsV2", @@ -954,7 +954,7 @@ "errors":[ {"shape":"NoSuchBucket"} ], - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

Permissions
  • General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting order of returned objects
  • General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

  • Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.

The following operations are related to ListObjectsV2:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

Returns some or all (up to 1,000) of the objects in a bucket with each request. You can use the request parameters as selection criteria to return a subset of the objects in a bucket. A 200 OK response can contain valid or invalid XML. Make sure to design your application to parse the contents of the response and handle it appropriately. For more information about listing objects, see Listing object keys programmatically in the Amazon S3 User Guide. To get a list of your buckets, see ListBuckets.

Permissions
  • General purpose bucket permissions - To use this operation, you must have READ access to the bucket. You must have permission to perform the s3:ListBucket action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources in the Amazon S3 User Guide.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

Sorting order of returned objects
  • General purpose bucket - For general purpose buckets, ListObjectsV2 returns objects in lexicographical order based on their key names.

  • Directory bucket - For directory buckets, ListObjectsV2 does not return objects in lexicographical order.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

This section describes the latest revision of this action. We recommend that you use this revised API operation for application development. For backward compatibility, Amazon S3 continues to support the prior version of this API operation, ListObjects.

The following operations are related to ListObjectsV2:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "ListParts":{ "name":"ListParts", @@ -964,7 +964,7 @@ }, "input":{"shape":"ListPartsRequest"}, "output":{"shape":"ListPartsOutput"}, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

Lists the parts that have been uploaded for a specific multipart upload.

To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.

The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

    If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt action for the ListParts request to succeed.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

The following operations are related to ListParts:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" + "documentation":"

Lists the parts that have been uploaded for a specific multipart upload.

To use this operation, you must provide the upload ID in the request. You obtain this uploadID by sending the initiate multipart upload request through CreateMultipartUpload.

The ListParts request returns a maximum of 1,000 uploaded parts. The limit of 1,000 parts is also the default value. You can restrict the number of parts in a response by specifying the max-parts request parameter. If your multipart upload consists of more than 1,000 parts, the response returns an IsTruncated field with the value of true, and a NextPartNumberMarker element. To list remaining uploaded parts, in subsequent ListParts requests, include the part-number-marker query string parameter and set its value to the NextPartNumberMarker field value from the previous response.

For more information on multipart uploads, see Uploading Objects Using Multipart Upload in the Amazon S3 User Guide.

Directory buckets - For directory buckets, you must make requests for this API operation to the Zonal endpoint. These endpoints support virtual-hosted-style requests in the format https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name . Path-style requests are not supported. For more information about endpoints in Availability Zones, see Regional and Zonal endpoints for directory buckets in Availability Zones in the Amazon S3 User Guide. For more information about endpoints in Local Zones, see Concepts for directory buckets in Local Zones in the Amazon S3 User Guide.

Permissions
  • General purpose bucket permissions - For information about permissions required to use the multipart upload API, see Multipart Upload and Permissions in the Amazon S3 User Guide.

    If the upload was created using server-side encryption with Key Management Service (KMS) keys (SSE-KMS) or dual-layer server-side encryption with Amazon Web Services KMS keys (DSSE-KMS), you must have permission to the kms:Decrypt action for the ListParts request to succeed.

  • Directory bucket permissions - To grant access to this API operation on a directory bucket, we recommend that you use the CreateSession API operation for session-based authorization. Specifically, you grant the s3express:CreateSession permission to the directory bucket in a bucket policy or an IAM identity-based policy. Then, you make the CreateSession API call on the bucket to obtain a session token. With the session token in your request header, you can make API requests to this operation. After the session token expires, you make another CreateSession API call to generate a new session token for use. Amazon Web Services CLI or SDKs create session and refresh the session token automatically to avoid service interruptions when a session expires. For more information about authorization, see CreateSession .

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is Bucket-name.s3express-zone-id.region-code.amazonaws.com.

The following operations are related to ListParts:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

" }, "PutBucketAbac":{ "name":"PutBucketAbac", @@ -973,7 +973,7 @@ "requestUri":"/{Bucket}?abac" }, "input":{"shape":"PutBucketAbacRequest"}, - "documentation":"

Sets the attribute-based access control (ABAC) property of the general purpose bucket. When you enable ABAC, you can use tags for bucket access control. Additionally, when ABAC is enabled, you must use the TagResource, UntagResource, and ListTagsForResource actions to manage bucket tags, and you can nolonger use the PutBucketTagging and DeleteBucketTagging actions to tag the bucket. You must also have the correct permissions for these actions. For more information, see Enabling ABAC in general purpose buckets.

", + "documentation":"

Sets the attribute-based access control (ABAC) property of the general purpose bucket. You must have s3:PutBucketABAC permission to perform this action. When you enable ABAC, you can use tags for access control on your buckets. Additionally, when ABAC is enabled, you must use the TagResource and UntagResource actions to manage tags on your buckets. You can nolonger use the PutBucketTagging and DeleteBucketTagging actions to tag your bucket. For more information, see Enabling ABAC in general purpose buckets.

", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":false @@ -1241,7 +1241,7 @@ "requestUri":"/{Bucket}?tagging" }, "input":{"shape":"PutBucketTaggingRequest"}, - "documentation":"

This operation is not supported for directory buckets.

Sets the tags for a general purpose bucket if attribute based access control (ABAC) is not enabled for the bucket. When you enable ABAC for a general purpose bucket, you can no longer use this operation for that bucket and must use the TagResource or UntagResource operations instead.

if ABAC is not enabled for the bucket. When you enable ABAC for a general purpose bucket, you can no longer use this operation for that bucket and must use TagResource instead.

Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.

When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors. For more Amazon S3 errors see, Error Responses.

The following operations are related to PutBucketTagging:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported for directory buckets.

Sets the tags for a general purpose bucket if attribute based access control (ABAC) is not enabled for the bucket. When you enable ABAC for a general purpose bucket, you can no longer use this operation for that bucket and must use the TagResource or UntagResource operations instead.

Use tags to organize your Amazon Web Services bill to reflect your own cost structure. To do this, sign up to get your Amazon Web Services account bill with tag key values included. Then, to see the cost of combined resources, organize your billing information according to resources with the same tag key values. For example, you can tag several resources with a specific application name, and then organize your billing information to see the total cost of that application across several services. For more information, see Cost Allocation and Tagging and Using Cost Allocation in Amazon S3 Bucket Tags.

When this operation sets the tags for a bucket, it will overwrite any current tags the bucket already has. You cannot use this operation to add tags to an existing list of tags.

To use this operation, you must have permissions to perform the s3:PutBucketTagging action. The bucket owner has this permission by default and can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

PutBucketTagging has the following special errors. For more Amazon S3 errors see, Error Responses.

The following operations are related to PutBucketTagging:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1382,7 +1382,7 @@ "requestUri":"/{Bucket}?publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "documentation":"

This operation is not supported for directory buckets.

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

The following operations are related to PutPublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported for directory buckets.

Creates or modifies the PublicAccessBlock configuration for an Amazon S3 bucket. To use this operation, you must have the s3:PutBucketPublicAccessBlock permission. For more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

When Amazon S3 evaluates the PublicAccessBlock configuration for a bucket or an object, it checks the PublicAccessBlock configuration for both the bucket (or the bucket that contains the object) and the bucket owner's account. Account-level settings automatically inherit from organization-level policies when present. If the PublicAccessBlock configurations are different between the bucket and the account, Amazon S3 uses the most restrictive combination of the bucket-level and account-level settings.

For more information about when Amazon S3 considers a bucket or an object public, see The Meaning of \"Public\".

The following operations are related to PutPublicAccessBlock:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", "httpChecksum":{ "requestAlgorithmMember":"ChecksumAlgorithm", "requestChecksumRequired":true @@ -1776,7 +1776,7 @@ "documentation":"

The object encryption type that you want to block or unblock for an Amazon S3 general purpose bucket.

Currently, this parameter only supports blocking or unblocking server side encryption with customer-provided keys (SSE-C). For more information about SSE-C, see Using server-side encryption with customer-provided keys (SSE-C).

" } }, - "documentation":"

A bucket-level setting for Amazon S3 general purpose buckets used to prevent the upload of new objects encrypted with the specified server-side encryption type. For example, blocking an encryption type will block PutObject, CopyObject, PostObject, multipart upload, and replication requests to the bucket for objects with the specified encryption type. However, you can continue to read and list any pre-existing objects already encrypted with the specified encryption type. For more information, see Blocking an encryption type for a general purpose bucket.

This data type is used with the following actions:

Permissions

You must have the s3:PutEncryptionConfiguration permission to block or unblock an encryption type for a bucket.

You must have the s3:GetEncryptionConfiguration permission to view a bucket's encryption type.

" + "documentation":"

A bucket-level setting for Amazon S3 general purpose buckets used to prevent the upload of new objects encrypted with the specified server-side encryption type. For example, blocking an encryption type will block PutObject, CopyObject, PostObject, multipart upload, and replication requests to the bucket for objects with the specified encryption type. However, you can continue to read and list any pre-existing objects already encrypted with the specified encryption type. For more information, see Blocking or unblocking SSE-C for a general purpose bucket.

This data type is used with the following actions:

Permissions

You must have the s3:PutEncryptionConfiguration permission to block or unblock an encryption type for a bucket.

You must have the s3:GetEncryptionConfiguration permission to view a bucket's encryption type.

" }, "Body":{"type":"blob"}, "Bucket":{ @@ -2912,7 +2912,7 @@ }, "Tags":{ "shape":"TagSet", - "documentation":"

An array of tags that you can apply to the bucket that you're creating. Tags are key-value pairs of metadata used to categorize and organize your buckets, track costs, and control access.

This parameter is only supported for S3 directory buckets. For more information, see Using tags with directory buckets.

You must have the s3express:TagResource permission to create a directory bucket with tags.

" + "documentation":"

An array of tags that you can apply to the bucket that you're creating. Tags are key-value pairs of metadata used to categorize and organize your buckets, track costs, and control access.

You must have the s3:TagResource permission to create a general purpose bucket with tags or the s3express:TagResource permission to create a directory bucket with tags.

When creating buckets with tags, note that tag-based conditions using aws:ResourceTag and s3:BucketTag condition keys are applicable only after ABAC is enabled on the bucket. To learn more, see Enabling ABAC in general purpose buckets.

" } }, "documentation":"

The configuration information for the bucket.

" @@ -4511,7 +4511,7 @@ "members":{ "Owner":{ "shape":"Owner", - "documentation":"

Container for the bucket owner's display name and ID.

" + "documentation":"

Container for the bucket owner's ID.

" }, "Grants":{ "shape":"Grants", @@ -5221,7 +5221,7 @@ "members":{ "Owner":{ "shape":"Owner", - "documentation":"

Container for the bucket owner's display name and ID.

" + "documentation":"

Container for the bucket owner's ID.

" }, "Grants":{ "shape":"Grants", @@ -6101,11 +6101,11 @@ "members":{ "DisplayName":{ "shape":"DisplayName", - "documentation":"

Screen name of the grantee.

" + "documentation":"

" }, "EmailAddress":{ "shape":"EmailAddress", - "documentation":"

Email address of the grantee.

Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

" + "documentation":"

" }, "ID":{ "shape":"ID", @@ -6122,7 +6122,7 @@ "documentation":"

URI of the grantee group.

" } }, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

Container for the person being granted permissions.

", + "documentation":"

Container for the person being granted permissions.

", "xmlNamespace":{ "prefix":"xsi", "uri":"http://www.w3.org/2001/XMLSchema-instance" @@ -6605,7 +6605,7 @@ }, "DisplayName":{ "shape":"DisplayName", - "documentation":"

Name of the Principal.

This functionality is not supported for directory buckets.

" + "documentation":"

This functionality is not supported for directory buckets.

" } }, "documentation":"

Container element that identifies who initiated the multipart upload.

" @@ -6870,7 +6870,8 @@ "BucketKeyStatus", "ChecksumAlgorithm", "ObjectAccessControlList", - "ObjectOwner" + "ObjectOwner", + "LifecycleExpirationDate" ] }, "InventoryOptionalFields":{ @@ -8046,11 +8047,11 @@ }, "Initiator":{ "shape":"Initiator", - "documentation":"

Container element that identifies who initiated the multipart upload. If the initiator is an Amazon Web Services account, this element provides the same information as the Owner element. If the initiator is an IAM User, this element provides the user ARN and display name.

" + "documentation":"

Container element that identifies who initiated the multipart upload. If the initiator is an Amazon Web Services account, this element provides the same information as the Owner element. If the initiator is an IAM User, this element provides the user ARN.

" }, "Owner":{ "shape":"Owner", - "documentation":"

Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID and display name.

Directory buckets - The bucket owner is returned as the object owner for all the parts.

" + "documentation":"

Container element that identifies the object owner, after the object is created. If multipart upload is initiated by an IAM user, this element provides the parent account ID.

Directory buckets - The bucket owner is returned as the object owner for all the parts.

" }, "StorageClass":{ "shape":"StorageClass", @@ -8860,7 +8861,8 @@ "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE", - "FSX_OPENZFS" + "FSX_OPENZFS", + "FSX_ONTAP" ] }, "ObjectVersion":{ @@ -8960,14 +8962,14 @@ "members":{ "DisplayName":{ "shape":"DisplayName", - "documentation":"

Container for the display name of the owner. This value is only supported in the following Amazon Web Services Regions:

This functionality is not supported for directory buckets.

" + "documentation":"

" }, "ID":{ "shape":"ID", "documentation":"

Container for the ID of the owner.

" } }, - "documentation":"

End of support notice: Beginning November 21, 2025, Amazon S3 will stop returning DisplayName. Update your applications to use canonical IDs (unique identifier for Amazon Web Services accounts), Amazon Web Services account ID (12 digit identifier) or IAM ARNs (full resource naming) as a direct replacement of DisplayName.

This change affects the following Amazon Web Services Regions: US East (N. Virginia) Region, US West (N. California) Region, US West (Oregon) Region, Asia Pacific (Singapore) Region, Asia Pacific (Sydney) Region, Asia Pacific (Tokyo) Region, Europe (Ireland) Region, and South America (São Paulo) Region.

Container for the owner's display name and ID.

" + "documentation":"

Container for the owner's display name and ID.

" }, "OwnerOverride":{ "type":"string", @@ -9174,7 +9176,7 @@ "locationName":"RestrictPublicBuckets" } }, - "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon S3 User Guide.

" + "documentation":"

The PublicAccessBlock configuration that you want to apply to this Amazon S3 bucket. You can enable the configuration options in any combination. Bucket-level settings work alongside account-level settings (which may inherit from organization-level policies). For more information about when Amazon S3 considers a bucket or object public, see The Meaning of \"Public\" in the Amazon S3 User Guide.

" }, "PutBucketAbacRequest":{ "type":"structure", @@ -11821,7 +11823,7 @@ }, "BlockedEncryptionTypes":{ "shape":"BlockedEncryptionTypes", - "documentation":"

A bucket-level setting for Amazon S3 general purpose buckets used to prevent the upload of new objects encrypted with the specified server-side encryption type. For example, blocking an encryption type will block PutObject, CopyObject, PostObject, multipart upload, and replication requests to the bucket for objects with the specified encryption type. However, you can continue to read and list any pre-existing objects already encrypted with the specified encryption type. For more information, see Blocking an encryption type for a general purpose bucket.

Currently, this parameter only supports blocking or unblocking Server Side Encryption with Customer Provided Keys (SSE-C). For more information about SSE-C, see Using server-side encryption with customer-provided keys (SSE-C).

" + "documentation":"

A bucket-level setting for Amazon S3 general purpose buckets used to prevent the upload of new objects encrypted with the specified server-side encryption type. For example, blocking an encryption type will block PutObject, CopyObject, PostObject, multipart upload, and replication requests to the bucket for objects with the specified encryption type. However, you can continue to read and list any pre-existing objects already encrypted with the specified encryption type. For more information, see Blocking or unblocking SSE-C for a general purpose bucket.

Currently, this parameter only supports blocking or unblocking server-side encryption with customer-provided keys (SSE-C). For more information about SSE-C, see Using server-side encryption with customer-provided keys (SSE-C).

" } }, "documentation":"

Specifies the default server-side encryption configuration.

" @@ -11974,7 +11976,8 @@ "GLACIER_IR", "SNOW", "EXPRESS_ONEZONE", - "FSX_OPENZFS" + "FSX_OPENZFS", + "FSX_ONTAP" ] }, "StorageClassAnalysis":{ diff --git a/awscli/botocore/data/s3control/2018-08-20/service-2.json b/awscli/botocore/data/s3control/2018-08-20/service-2.json index e4a4e3baff92..b2d568b97978 100644 --- a/awscli/botocore/data/s3control/2018-08-20/service-2.json +++ b/awscli/botocore/data/s3control/2018-08-20/service-2.json @@ -24,7 +24,7 @@ "locationName":"AssociateAccessGrantsIdentityCenterRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Associate your S3 Access Grants instance with an Amazon Web Services IAM Identity Center instance. Use this action if you want to create access grants for users or groups from your corporate identity directory. First, you must add your corporate identity directory to Amazon Web Services IAM Identity Center. Then, you can associate this IAM Identity Center instance with your S3 Access Grants instance.

Permissions

You must have the s3:AssociateAccessGrantsIdentityCenter permission to use this operation.

Additional Permissions

You must also have the following permissions: sso:CreateApplication, sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Associate your S3 Access Grants instance with an Amazon Web Services IAM Identity Center instance. Use this action if you want to create access grants for users or groups from your corporate identity directory. First, you must add your corporate identity directory to Amazon Web Services IAM Identity Center. Then, you can associate this IAM Identity Center instance with your S3 Access Grants instance.

Permissions

You must have the s3:AssociateAccessGrantsIdentityCenter permission to use this operation.

Additional Permissions

You must also have the following permissions: sso:CreateApplication, sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -45,7 +45,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessGrantResult"}, - "documentation":"

Creates an access grant that gives a grantee access to your S3 data. The grantee can be an IAM user or role or a directory user, or group. Before you can create a grant, you must have an S3 Access Grants instance in the same Region as the S3 data. You can create an S3 Access Grants instance using the CreateAccessGrantsInstance. You must also have registered at least one S3 data location in your S3 Access Grants instance using CreateAccessGrantsLocation.

Permissions

You must have the s3:CreateAccessGrant permission to use this operation.

Additional Permissions

For any directory identity - sso:DescribeInstance and sso:DescribeApplication

For directory users - identitystore:DescribeUser

For directory groups - identitystore:DescribeGroup

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates an access grant that gives a grantee access to your S3 data. The grantee can be an IAM user or role or a directory user, or group. Before you can create a grant, you must have an S3 Access Grants instance in the same Region as the S3 data. You can create an S3 Access Grants instance using the CreateAccessGrantsInstance. You must also have registered at least one S3 data location in your S3 Access Grants instance using CreateAccessGrantsLocation.

Permissions

You must have the s3:CreateAccessGrant permission to use this operation.

Additional Permissions

For any directory identity - sso:DescribeInstance and sso:DescribeApplication

For directory users - identitystore:DescribeUser

For directory groups - identitystore:DescribeGroup

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -66,7 +66,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessGrantsInstanceResult"}, - "documentation":"

Creates an S3 Access Grants instance, which serves as a logical grouping for access grants. You can create one S3 Access Grants instance per Region per account.

Permissions

You must have the s3:CreateAccessGrantsInstance permission to use this operation.

Additional Permissions

To associate an IAM Identity Center instance with your S3 Access Grants instance, you must also have the sso:DescribeInstance, sso:CreateApplication, sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod permissions.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates an S3 Access Grants instance, which serves as a logical grouping for access grants. You can create one S3 Access Grants instance per Region per account.

Permissions

You must have the s3:CreateAccessGrantsInstance permission to use this operation.

Additional Permissions

To associate an IAM Identity Center instance with your S3 Access Grants instance, you must also have the sso:DescribeInstance, sso:CreateApplication, sso:PutApplicationGrant, and sso:PutApplicationAuthenticationMethod permissions.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -87,7 +87,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessGrantsLocationResult"}, - "documentation":"

The S3 data location that you would like to register in your S3 Access Grants instance. Your S3 data must be in the same Region as your S3 Access Grants instance. The location can be one of the following:

When you register a location, you must include the IAM role that has permission to manage the S3 location that you are registering. Give S3 Access Grants permission to assume this role using a policy. S3 Access Grants assumes this role to manage access to the location and to vend temporary credentials to grantees or client applications.

Permissions

You must have the s3:CreateAccessGrantsLocation permission to use this operation.

Additional Permissions

You must also have the following permission for the specified IAM role: iam:PassRole

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

The S3 data location that you would like to register in your S3 Access Grants instance. Your S3 data must be in the same Region as your S3 Access Grants instance. The location can be one of the following:

When you register a location, you must include the IAM role that has permission to manage the S3 location that you are registering. Give S3 Access Grants permission to assume this role using a policy. S3 Access Grants assumes this role to manage access to the location and to vend temporary credentials to grantees or client applications.

Permissions

You must have the s3:CreateAccessGrantsLocation permission to use this operation.

Additional Permissions

You must also have the following permission for the specified IAM role: iam:PassRole

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -108,7 +108,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointResult"}, - "documentation":"

Creates an access point and associates it to a specified bucket. For more information, see Managing access to shared datasets with access points or Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

To create an access point and attach it to a volume on an Amazon FSx file system, see CreateAndAttachS3AccessPoint in the Amazon FSx API Reference.

S3 on Outposts only supports VPC-style access points.

For more information, see Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only access points in the Amazon S3 User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates an access point and associates it to a specified bucket. For more information, see Managing access to shared datasets with access points or Managing access to shared datasets in directory buckets with access points in the Amazon S3 User Guide.

To create an access point and attach it to a volume on an Amazon FSx file system, see CreateAndAttachS3AccessPoint in the Amazon FSx API Reference.

S3 on Outposts only supports VPC-style access points.

For more information, see Accessing Amazon S3 on Outposts using virtual private cloud (VPC) only access points in the Amazon S3 User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to CreateAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -128,7 +128,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateAccessPointForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Creates an Object Lambda Access Point. For more information, see Transforming objects with Object Lambda Access Points in the Amazon S3 User Guide.

The following actions are related to CreateAccessPointForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Creates an Object Lambda Access Point. For more information, see Transforming objects with Object Lambda Access Points in the Amazon S3 User Guide.

The following actions are related to CreateAccessPointForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -169,7 +169,7 @@ {"shape":"IdempotencyException"}, {"shape":"InternalServiceException"} ], - "documentation":"

This operation creates an S3 Batch Operations job.

You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

For information about permissions required to use the Batch Operations, see Granting permissions for S3 Batch Operations in the Amazon S3 User Guide.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation creates an S3 Batch Operations job.

You can use S3 Batch Operations to perform large-scale batch actions on Amazon S3 objects. Batch Operations can run a single action on lists of Amazon S3 objects that you specify. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

For information about permissions required to use the Batch Operations, see Granting permissions for S3 Batch Operations in the Amazon S3 User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -189,7 +189,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"CreateMultiRegionAccessPointResult"}, - "documentation":"

This operation is not supported by directory buckets.

Creates a Multi-Region Access Point and associates it with the specified buckets. For more information about creating Multi-Region Access Points, see Creating Multi-Region Access Points in the Amazon S3 User Guide.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

This request is asynchronous, meaning that you might receive a response before the command has completed. When this request provides a response, it provides a token that you can use to monitor the status of the request with DescribeMultiRegionAccessPointOperation.

The following actions are related to CreateMultiRegionAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Creates a Multi-Region Access Point and associates it with the specified buckets. For more information about creating Multi-Region Access Points, see Creating Multi-Region Access Points in the Amazon S3 User Guide.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

This request is asynchronous, meaning that you might receive a response before the command has completed. When this request provides a response, it provides a token that you can use to monitor the status of the request with DescribeMultiRegionAccessPointOperation.

The following actions are related to CreateMultiRegionAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -210,7 +210,7 @@ "locationName":"CreateStorageLensGroupRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Creates a new S3 Storage Lens group and associates it with the specified Amazon Web Services account ID. An S3 Storage Lens group is a custom grouping of objects based on prefix, suffix, object tags, object size, object age, or a combination of these filters. For each Storage Lens group that you’ve created, you can also optionally add Amazon Web Services resource tags. For more information about S3 Storage Lens groups, see Working with S3 Storage Lens groups.

To use this operation, you must have the permission to perform the s3:CreateStorageLensGroup action. If you’re trying to create a Storage Lens group with Amazon Web Services resource tags, you must also have permission to perform the s3:TagResource action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates a new S3 Storage Lens group and associates it with the specified Amazon Web Services account ID. An S3 Storage Lens group is a custom grouping of objects based on prefix, suffix, object tags, object size, object age, or a combination of these filters. For each Storage Lens group that you’ve created, you can also optionally add Amazon Web Services resource tags. For more information about S3 Storage Lens groups, see Working with S3 Storage Lens groups.

To use this operation, you must have the permission to perform the s3:CreateStorageLensGroup action. If you’re trying to create a Storage Lens group with Amazon Web Services resource tags, you must also have permission to perform the s3:TagResource action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -225,7 +225,7 @@ "requestUri":"/v20180820/accessgrantsinstance/grant/{id}" }, "input":{"shape":"DeleteAccessGrantRequest"}, - "documentation":"

Deletes the access grant from the S3 Access Grants instance. You cannot undo an access grant deletion and the grantee will no longer have access to the S3 data.

Permissions

You must have the s3:DeleteAccessGrant permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes the access grant from the S3 Access Grants instance. You cannot undo an access grant deletion and the grantee will no longer have access to the S3 data.

Permissions

You must have the s3:DeleteAccessGrant permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -241,7 +241,7 @@ "requestUri":"/v20180820/accessgrantsinstance" }, "input":{"shape":"DeleteAccessGrantsInstanceRequest"}, - "documentation":"

Deletes your S3 Access Grants instance. You must first delete the access grants and locations before S3 Access Grants can delete the instance. See DeleteAccessGrant and DeleteAccessGrantsLocation. If you have associated an IAM Identity Center instance with your S3 Access Grants instance, you must first dissassociate the Identity Center instance from the S3 Access Grants instance before you can delete the S3 Access Grants instance. See AssociateAccessGrantsIdentityCenter and DissociateAccessGrantsIdentityCenter.

Permissions

You must have the s3:DeleteAccessGrantsInstance permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes your S3 Access Grants instance. You must first delete the access grants and locations before S3 Access Grants can delete the instance. See DeleteAccessGrant and DeleteAccessGrantsLocation. If you have associated an IAM Identity Center instance with your S3 Access Grants instance, you must first dissassociate the Identity Center instance from the S3 Access Grants instance before you can delete the S3 Access Grants instance. See AssociateAccessGrantsIdentityCenter and DissociateAccessGrantsIdentityCenter.

Permissions

You must have the s3:DeleteAccessGrantsInstance permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -257,7 +257,7 @@ "requestUri":"/v20180820/accessgrantsinstance/resourcepolicy" }, "input":{"shape":"DeleteAccessGrantsInstanceResourcePolicyRequest"}, - "documentation":"

Deletes the resource policy of the S3 Access Grants instance. The resource policy is used to manage cross-account access to your S3 Access Grants instance. By deleting the resource policy, you delete any cross-account permissions to your S3 Access Grants instance.

Permissions

You must have the s3:DeleteAccessGrantsInstanceResourcePolicy permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes the resource policy of the S3 Access Grants instance. The resource policy is used to manage cross-account access to your S3 Access Grants instance. By deleting the resource policy, you delete any cross-account permissions to your S3 Access Grants instance.

Permissions

You must have the s3:DeleteAccessGrantsInstanceResourcePolicy permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -273,7 +273,7 @@ "requestUri":"/v20180820/accessgrantsinstance/location/{id}" }, "input":{"shape":"DeleteAccessGrantsLocationRequest"}, - "documentation":"

Deregisters a location from your S3 Access Grants instance. You can only delete a location registration from an S3 Access Grants instance if there are no grants associated with this location. See Delete a grant for information on how to delete grants. You need to have at least one registered location in your S3 Access Grants instance in order to create access grants.

Permissions

You must have the s3:DeleteAccessGrantsLocation permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deregisters a location from your S3 Access Grants instance. You can only delete a location registration from an S3 Access Grants instance if there are no grants associated with this location. See Delete a grant for information on how to delete grants. You need to have at least one registered location in your S3 Access Grants instance in order to create access grants.

Permissions

You must have the s3:DeleteAccessGrantsLocation permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -289,7 +289,7 @@ "requestUri":"/v20180820/accesspoint/{name}" }, "input":{"shape":"DeleteAccessPointRequest"}, - "documentation":"

Deletes the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -304,7 +304,7 @@ "requestUri":"/v20180820/accesspointforobjectlambda/{name}" }, "input":{"shape":"DeleteAccessPointForObjectLambdaRequest"}, - "documentation":"

This operation is not supported by directory buckets.

Deletes the specified Object Lambda Access Point.

The following actions are related to DeleteAccessPointForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Deletes the specified Object Lambda Access Point.

The following actions are related to DeleteAccessPointForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -319,7 +319,7 @@ "requestUri":"/v20180820/accesspoint/{name}/policy" }, "input":{"shape":"DeleteAccessPointPolicyRequest"}, - "documentation":"

Deletes the access point policy for the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPointPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes the access point policy for the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -334,7 +334,7 @@ "requestUri":"/v20180820/accesspointforobjectlambda/{name}/policy" }, "input":{"shape":"DeleteAccessPointPolicyForObjectLambdaRequest"}, - "documentation":"

This operation is not supported by directory buckets.

Removes the resource policy for an Object Lambda Access Point.

The following actions are related to DeleteAccessPointPolicyForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Removes the resource policy for an Object Lambda Access Point.

The following actions are related to DeleteAccessPointPolicyForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -349,7 +349,7 @@ "requestUri":"/v20180820/accesspoint/{name}/scope" }, "input":{"shape":"DeleteAccessPointScopeRequest"}, - "documentation":"

Deletes an existing access point scope for a directory bucket.

When you delete the scope of an access point, all prefixes and permissions are deleted.

To use this operation, you must have the permission to perform the s3express:DeleteAccessPointScope action.

For information about REST API errors, see REST error responses.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes an existing access point scope for a directory bucket.

When you delete the scope of an access point, all prefixes and permissions are deleted.

To use this operation, you must have the permission to perform the s3express:DeleteAccessPointScope action.

For information about REST API errors, see REST error responses.

", "staticContextParams":{ "RequiresAccountId":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -377,7 +377,7 @@ "requestUri":"/v20180820/bucket/{name}/lifecycleconfiguration" }, "input":{"shape":"DeleteBucketLifecycleConfigurationRequest"}, - "documentation":"

This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon S3 API Reference.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

To use this operation, you must have permission to perform the s3-outposts:PutLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's lifecycle configuration. To delete an S3 bucket's lifecycle configuration, see DeleteBucketLifecycle in the Amazon S3 API Reference.

Deletes the lifecycle configuration from the specified Outposts bucket. Amazon S3 on Outposts removes all the lifecycle configuration rules in the lifecycle subresource associated with the bucket. Your objects never expire, and Amazon S3 on Outposts no longer automatically deletes any objects on the basis of rules contained in the deleted lifecycle configuration. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

To use this operation, you must have permission to perform the s3-outposts:PutLifecycleConfiguration action. By default, the bucket owner has this permission and the Outposts bucket owner can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

For more information about object expiration, see Elements to Describe Lifecycle Actions.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -392,7 +392,7 @@ "requestUri":"/v20180820/bucket/{name}/policy" }, "input":{"shape":"DeleteBucketPolicyRequest"}, - "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon S3 API Reference.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the Amazon Web Services account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket policy. To delete an S3 bucket policy, see DeleteBucketPolicy in the Amazon S3 API Reference.

This implementation of the DELETE action uses the policy subresource to delete the policy of a specified Amazon S3 on Outposts bucket. If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the s3-outposts:DeleteBucketPolicy permissions on the specified Outposts bucket and belong to the bucket owner's account to use this action. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

If you don't have DeleteBucketPolicy permissions, Amazon S3 returns a 403 Access Denied error. If you have the correct permissions, but you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 405 Method Not Allowed error.

As a security precaution, the root user of the Amazon Web Services account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -407,7 +407,7 @@ "requestUri":"/v20180820/bucket/{name}/replication" }, "input":{"shape":"DeleteBucketReplicationRequest"}, - "documentation":"

This operation deletes an Amazon S3 on Outposts bucket's replication configuration. To delete an S3 bucket's replication configuration, see DeleteBucketReplication in the Amazon S3 API Reference.

Deletes the replication configuration from the specified S3 on Outposts bucket.

To use this operation, you must have permissions to perform the s3-outposts:PutReplicationConfiguration action. The Outposts bucket owner has this permission by default and can grant it to others. For more information about permissions, see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts buckets in the Amazon S3 User Guide.

It can take a while to propagate PUT or DELETE requests for a replication configuration to all S3 on Outposts systems. Therefore, the replication configuration that's returned by a GET request soon after a PUT or DELETE request might return a more recent result than what's on the Outpost. If an Outpost is offline, the delay in updating the replication configuration on that Outpost can be significant.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

For information about S3 replication on Outposts configuration, see Replicating objects for S3 on Outposts in the Amazon S3 User Guide.

The following operations are related to DeleteBucketReplication:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation deletes an Amazon S3 on Outposts bucket's replication configuration. To delete an S3 bucket's replication configuration, see DeleteBucketReplication in the Amazon S3 API Reference.

Deletes the replication configuration from the specified S3 on Outposts bucket.

To use this operation, you must have permissions to perform the s3-outposts:PutReplicationConfiguration action. The Outposts bucket owner has this permission by default and can grant it to others. For more information about permissions, see Setting up IAM with S3 on Outposts and Managing access to S3 on Outposts buckets in the Amazon S3 User Guide.

It can take a while to propagate PUT or DELETE requests for a replication configuration to all S3 on Outposts systems. Therefore, the replication configuration that's returned by a GET request soon after a PUT or DELETE request might return a more recent result than what's on the Outpost. If an Outpost is offline, the delay in updating the replication configuration on that Outpost can be significant.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

For information about S3 replication on Outposts configuration, see Replicating objects for S3 on Outposts in the Amazon S3 User Guide.

The following operations are related to DeleteBucketReplication:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -423,7 +423,7 @@ "responseCode":204 }, "input":{"shape":"DeleteBucketTaggingRequest"}, - "documentation":"

This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon S3 API Reference.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

To use this action, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action deletes an Amazon S3 on Outposts bucket's tags. To delete an S3 bucket tags, see DeleteBucketTagging in the Amazon S3 API Reference.

Deletes the tags from the Outposts bucket. For more information, see Using Amazon S3 on Outposts in Amazon S3 User Guide.

To use this action, you must have permission to perform the PutBucketTagging action. By default, the bucket owner has this permission and can grant this permission to others.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to DeleteBucketTagging:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -444,7 +444,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job.

Permissions

To use the DeleteJobTagging operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Removes the entire tag set from the specified S3 Batch Operations job.

Permissions

To use the DeleteJobTagging operation, you must have permission to perform the s3:DeleteJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -464,7 +464,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"DeleteMultiRegionAccessPointResult"}, - "documentation":"

This operation is not supported by directory buckets.

Deletes a Multi-Region Access Point. This action does not delete the buckets associated with the Multi-Region Access Point, only the Multi-Region Access Point itself.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

This request is asynchronous, meaning that you might receive a response before the command has completed. When this request provides a response, it provides a token that you can use to monitor the status of the request with DescribeMultiRegionAccessPointOperation.

The following actions are related to DeleteMultiRegionAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Deletes a Multi-Region Access Point. This action does not delete the buckets associated with the Multi-Region Access Point, only the Multi-Region Access Point itself.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

This request is asynchronous, meaning that you might receive a response before the command has completed. When this request provides a response, it provides a token that you can use to monitor the status of the request with DescribeMultiRegionAccessPointOperation.

The following actions are related to DeleteMultiRegionAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -480,7 +480,7 @@ "requestUri":"/v20180820/configuration/publicAccessBlock" }, "input":{"shape":"DeletePublicAccessBlockRequest"}, - "documentation":"

This operation is not supported by directory buckets.

Removes the PublicAccessBlock configuration for an Amazon Web Services account. For more information, see Using Amazon S3 block public access.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Removes the PublicAccessBlock configuration for an Amazon Web Services account. This operation might be restricted when the account is managed by organization-level Block Public Access policies. You’ll get an Access Denied (403) error when the account is managed by organization-level Block Public Access policies. Organization-level policies override account-level settings, preventing direct account-level modifications. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -495,7 +495,7 @@ "requestUri":"/v20180820/storagelens/{storagelensid}" }, "input":{"shape":"DeleteStorageLensConfigurationRequest"}, - "documentation":"

This operation is not supported by directory buckets.

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Deletes the Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -511,7 +511,7 @@ }, "input":{"shape":"DeleteStorageLensConfigurationTaggingRequest"}, "output":{"shape":"DeleteStorageLensConfigurationTaggingResult"}, - "documentation":"

This operation is not supported by directory buckets.

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Deletes the Amazon S3 Storage Lens configuration tags. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:DeleteStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -527,7 +527,7 @@ "responseCode":204 }, "input":{"shape":"DeleteStorageLensGroupRequest"}, - "documentation":"

Deletes an existing S3 Storage Lens group.

To use this operation, you must have the permission to perform the s3:DeleteStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Deletes an existing S3 Storage Lens group.

To use this operation, you must have the permission to perform the s3:DeleteStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -549,7 +549,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the DescribeJob operation, you must have permission to perform the s3:DescribeJob action.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Retrieves the configuration parameters and status for a Batch Operations job. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the DescribeJob operation, you must have permission to perform the s3:DescribeJob action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -565,7 +565,7 @@ }, "input":{"shape":"DescribeMultiRegionAccessPointOperationRequest"}, "output":{"shape":"DescribeMultiRegionAccessPointOperationResult"}, - "documentation":"

This operation is not supported by directory buckets.

Retrieves the status of an asynchronous request to manage a Multi-Region Access Point. For more information about managing Multi-Region Access Points and how asynchronous requests work, see Using Multi-Region Access Points in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Retrieves the status of an asynchronous request to manage a Multi-Region Access Point. For more information about managing Multi-Region Access Points and how asynchronous requests work, see Using Multi-Region Access Points in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -581,7 +581,7 @@ "requestUri":"/v20180820/accessgrantsinstance/identitycenter" }, "input":{"shape":"DissociateAccessGrantsIdentityCenterRequest"}, - "documentation":"

Dissociates the Amazon Web Services IAM Identity Center instance from the S3 Access Grants instance.

Permissions

You must have the s3:DissociateAccessGrantsIdentityCenter permission to use this operation.

Additional Permissions

You must have the sso:DeleteApplication permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Dissociates the Amazon Web Services IAM Identity Center instance from the S3 Access Grants instance.

Permissions

You must have the s3:DissociateAccessGrantsIdentityCenter permission to use this operation.

Additional Permissions

You must have the sso:DeleteApplication permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -598,7 +598,7 @@ }, "input":{"shape":"GetAccessGrantRequest"}, "output":{"shape":"GetAccessGrantResult"}, - "documentation":"

Get the details of an access grant from your S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrant permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Get the details of an access grant from your S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrant permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -615,7 +615,7 @@ }, "input":{"shape":"GetAccessGrantsInstanceRequest"}, "output":{"shape":"GetAccessGrantsInstanceResult"}, - "documentation":"

Retrieves the S3 Access Grants instance for a Region in your account.

Permissions

You must have the s3:GetAccessGrantsInstance permission to use this operation.

GetAccessGrantsInstance is not supported for cross-account access. You can only call the API from the account that owns the S3 Access Grants instance.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Retrieves the S3 Access Grants instance for a Region in your account.

Permissions

You must have the s3:GetAccessGrantsInstance permission to use this operation.

GetAccessGrantsInstance is not supported for cross-account access. You can only call the API from the account that owns the S3 Access Grants instance.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -632,7 +632,7 @@ }, "input":{"shape":"GetAccessGrantsInstanceForPrefixRequest"}, "output":{"shape":"GetAccessGrantsInstanceForPrefixResult"}, - "documentation":"

Retrieve the S3 Access Grants instance that contains a particular prefix.

Permissions

You must have the s3:GetAccessGrantsInstanceForPrefix permission for the caller account to use this operation.

Additional Permissions

The prefix owner account must grant you the following permissions to their S3 Access Grants instance: s3:GetAccessGrantsInstanceForPrefix.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Retrieve the S3 Access Grants instance that contains a particular prefix.

Permissions

You must have the s3:GetAccessGrantsInstanceForPrefix permission for the caller account to use this operation.

Additional Permissions

The prefix owner account must grant you the following permissions to their S3 Access Grants instance: s3:GetAccessGrantsInstanceForPrefix.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -649,7 +649,7 @@ }, "input":{"shape":"GetAccessGrantsInstanceResourcePolicyRequest"}, "output":{"shape":"GetAccessGrantsInstanceResourcePolicyResult"}, - "documentation":"

Returns the resource policy of the S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrantsInstanceResourcePolicy permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the resource policy of the S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrantsInstanceResourcePolicy permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -666,7 +666,7 @@ }, "input":{"shape":"GetAccessGrantsLocationRequest"}, "output":{"shape":"GetAccessGrantsLocationResult"}, - "documentation":"

Retrieves the details of a particular location registered in your S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrantsLocation permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Retrieves the details of a particular location registered in your S3 Access Grants instance.

Permissions

You must have the s3:GetAccessGrantsLocation permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -683,7 +683,7 @@ }, "input":{"shape":"GetAccessPointRequest"}, "output":{"shape":"GetAccessPointResult"}, - "documentation":"

Returns configuration information about the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to GetAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns configuration information about the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to GetAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -699,7 +699,7 @@ }, "input":{"shape":"GetAccessPointConfigurationForObjectLambdaRequest"}, "output":{"shape":"GetAccessPointConfigurationForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns configuration for an Object Lambda Access Point.

The following actions are related to GetAccessPointConfigurationForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns configuration for an Object Lambda Access Point.

The following actions are related to GetAccessPointConfigurationForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -715,7 +715,7 @@ }, "input":{"shape":"GetAccessPointForObjectLambdaRequest"}, "output":{"shape":"GetAccessPointForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns configuration information about the specified Object Lambda Access Point

The following actions are related to GetAccessPointForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns configuration information about the specified Object Lambda Access Point

The following actions are related to GetAccessPointForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -731,7 +731,7 @@ }, "input":{"shape":"GetAccessPointPolicyRequest"}, "output":{"shape":"GetAccessPointPolicyResult"}, - "documentation":"

Returns the access point policy associated with the specified access point.

The following actions are related to GetAccessPointPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the access point policy associated with the specified access point.

The following actions are related to GetAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -747,7 +747,7 @@ }, "input":{"shape":"GetAccessPointPolicyForObjectLambdaRequest"}, "output":{"shape":"GetAccessPointPolicyForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns the resource policy for an Object Lambda Access Point.

The following actions are related to GetAccessPointPolicyForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns the resource policy for an Object Lambda Access Point.

The following actions are related to GetAccessPointPolicyForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -763,7 +763,7 @@ }, "input":{"shape":"GetAccessPointPolicyStatusRequest"}, "output":{"shape":"GetAccessPointPolicyStatusResult"}, - "documentation":"

This operation is not supported by directory buckets.

Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 access points in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Indicates whether the specified access point currently has a policy that allows public access. For more information about public access through access points, see Managing Data Access with Amazon S3 access points in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -779,7 +779,7 @@ }, "input":{"shape":"GetAccessPointPolicyStatusForObjectLambdaRequest"}, "output":{"shape":"GetAccessPointPolicyStatusForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns the status of the resource policy associated with an Object Lambda Access Point.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns the status of the resource policy associated with an Object Lambda Access Point.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -795,7 +795,7 @@ }, "input":{"shape":"GetAccessPointScopeRequest"}, "output":{"shape":"GetAccessPointScopeResult"}, - "documentation":"

Returns the access point scope for a directory bucket.

To use this operation, you must have the permission to perform the s3express:GetAccessPointScope action.

For information about REST API errors, see REST error responses.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the access point scope for a directory bucket.

To use this operation, you must have the permission to perform the s3express:GetAccessPointScope action.

For information about REST API errors, see REST error responses.

", "staticContextParams":{ "RequiresAccountId":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -809,7 +809,7 @@ }, "input":{"shape":"GetBucketRequest"}, "output":{"shape":"GetBucketResult"}, - "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the Amazon Web Services account that owns the Outposts bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to the Outposts bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Gets an Amazon S3 on Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the Amazon Web Services account that owns the Outposts bucket, the calling identity must have the s3-outposts:GetBucket permissions on the specified Outposts bucket and belong to the Outposts bucket owner's account in order to use this action. Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket.

If you don't have s3-outposts:GetBucket permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

The following actions are related to GetBucket for Amazon S3 on Outposts:

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -825,7 +825,7 @@ }, "input":{"shape":"GetBucketLifecycleConfigurationRequest"}, "output":{"shape":"GetBucketLifecycleConfigurationResult"}, - "documentation":"

This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon S3 API Reference.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon S3 User Guide.

To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action gets an Amazon S3 on Outposts bucket's lifecycle configuration. To get an S3 bucket's lifecycle configuration, see GetBucketLifecycleConfiguration in the Amazon S3 API Reference.

Returns the lifecycle configuration information set on the Outposts bucket. For more information, see Using Amazon S3 on Outposts and for information about lifecycle configuration, see Object Lifecycle Management in Amazon S3 User Guide.

To use this action, you must have permission to perform the s3-outposts:GetLifecycleConfiguration action. The Outposts bucket owner has this permission, by default. The bucket owner can grant this permission to others. For more information about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3 Resources.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

GetBucketLifecycleConfiguration has the following special error:

The following actions are related to GetBucketLifecycleConfiguration:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -841,7 +841,7 @@ }, "input":{"shape":"GetBucketPolicyRequest"}, "output":{"shape":"GetBucketPolicyResult"}, - "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the Amazon Web Services account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This action gets a bucket policy for an Amazon S3 on Outposts bucket. To get a policy for an S3 bucket, see GetBucketPolicy in the Amazon S3 API Reference.

Returns the policy of a specified Outposts bucket. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

If you are using an identity other than the root user of the Amazon Web Services account that owns the bucket, the calling identity must have the GetBucketPolicy permissions on the specified bucket and belong to the bucket owner's account in order to use this action.

Only users from Outposts bucket owner account with the right permissions can perform actions on an Outposts bucket. If you don't have s3-outposts:GetBucketPolicy permissions or you're not using an identity that belongs to the bucket owner's account, Amazon S3 returns a 403 Access Denied error.

As a security precaution, the root user of the Amazon Web Services account that owns a bucket can always use this action, even if the policy explicitly denies the root user the ability to perform this action.

For more information about bucket policies, see Using Bucket Policies and User Policies.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to GetBucketPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -905,7 +905,7 @@ }, "input":{"shape":"GetDataAccessRequest"}, "output":{"shape":"GetDataAccessResult"}, - "documentation":"

Returns a temporary access credential from S3 Access Grants to the grantee or client application. The temporary credential is an Amazon Web Services STS token that grants them access to the S3 data.

Permissions

You must have the s3:GetDataAccess permission to use this operation.

Additional Permissions

The IAM role that S3 Access Grants assumes must have the following permissions specified in the trust policy when registering the location: sts:AssumeRole, for directory users or groups sts:SetContext, and for IAM users or roles sts:SetSourceIdentity.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns a temporary access credential from S3 Access Grants to the grantee or client application. The temporary credential is an Amazon Web Services STS token that grants them access to the S3 data.

Permissions

You must have the s3:GetDataAccess permission to use this operation.

Additional Permissions

The IAM role that S3 Access Grants assumes must have the following permissions specified in the trust policy when registering the location: sts:AssumeRole, for directory users or groups sts:SetContext, and for IAM users or roles sts:SetSourceIdentity.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -927,7 +927,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"NotFoundException"} ], - "documentation":"

Returns the tags on an S3 Batch Operations job.

Permissions

To use the GetJobTagging operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the tags on an S3 Batch Operations job.

Permissions

To use the GetJobTagging operation, you must have permission to perform the s3:GetJobTagging action. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -943,7 +943,7 @@ }, "input":{"shape":"GetMultiRegionAccessPointRequest"}, "output":{"shape":"GetMultiRegionAccessPointResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns configuration information about the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns configuration information about the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -960,7 +960,7 @@ }, "input":{"shape":"GetMultiRegionAccessPointPolicyRequest"}, "output":{"shape":"GetMultiRegionAccessPointPolicyResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns the access control policy of the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPointPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns the access control policy of the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -977,7 +977,7 @@ }, "input":{"shape":"GetMultiRegionAccessPointPolicyStatusRequest"}, "output":{"shape":"GetMultiRegionAccessPointPolicyStatusResult"}, - "documentation":"

This operation is not supported by directory buckets.

Indicates whether the specified Multi-Region Access Point has an access control policy that allows public access.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPointPolicyStatus:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Indicates whether the specified Multi-Region Access Point has an access control policy that allows public access.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to GetMultiRegionAccessPointPolicyStatus:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -994,7 +994,7 @@ }, "input":{"shape":"GetMultiRegionAccessPointRoutesRequest"}, "output":{"shape":"GetMultiRegionAccessPointRoutesResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns the routing configuration for a Multi-Region Access Point, indicating which Regions are active or passive.

To obtain routing control changes and failover requests, use the Amazon S3 failover control infrastructure endpoints in these five Amazon Web Services Regions:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns the routing configuration for a Multi-Region Access Point, indicating which Regions are active or passive.

To obtain routing control changes and failover requests, use the Amazon S3 failover control infrastructure endpoints in these five Amazon Web Services Regions:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1014,7 +1014,7 @@ "errors":[ {"shape":"NoSuchPublicAccessBlockConfiguration"} ], - "documentation":"

This operation is not supported by directory buckets.

Retrieves the PublicAccessBlock configuration for an Amazon Web Services account. For more information, see Using Amazon S3 block public access.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Retrieves the PublicAccessBlock configuration for an Amazon Web Services account. This operation returns the effective account-level configuration, which may inherit from organization-level policies. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1030,7 +1030,7 @@ }, "input":{"shape":"GetStorageLensConfigurationRequest"}, "output":{"shape":"GetStorageLensConfigurationResult"}, - "documentation":"

This operation is not supported by directory buckets.

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Gets the Amazon S3 Storage Lens configuration. For more information, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1046,7 +1046,7 @@ }, "input":{"shape":"GetStorageLensConfigurationTaggingRequest"}, "output":{"shape":"GetStorageLensConfigurationTaggingResult"}, - "documentation":"

This operation is not supported by directory buckets.

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Gets the tags of Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:GetStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1062,7 +1062,7 @@ }, "input":{"shape":"GetStorageLensGroupRequest"}, "output":{"shape":"GetStorageLensGroupResult"}, - "documentation":"

Retrieves the Storage Lens group configuration details.

To use this operation, you must have the permission to perform the s3:GetStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Retrieves the Storage Lens group configuration details.

To use this operation, you must have the permission to perform the s3:GetStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1078,7 +1078,7 @@ }, "input":{"shape":"ListAccessGrantsRequest"}, "output":{"shape":"ListAccessGrantsResult"}, - "documentation":"

Returns the list of access grants in your S3 Access Grants instance.

Permissions

You must have the s3:ListAccessGrants permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns the list of access grants in your S3 Access Grants instance.

Permissions

You must have the s3:ListAccessGrants permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1095,7 +1095,7 @@ }, "input":{"shape":"ListAccessGrantsInstancesRequest"}, "output":{"shape":"ListAccessGrantsInstancesResult"}, - "documentation":"

Returns a list of S3 Access Grants instances. An S3 Access Grants instance serves as a logical grouping for your individual access grants. You can only have one S3 Access Grants instance per Region per account.

Permissions

You must have the s3:ListAccessGrantsInstances permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns a list of S3 Access Grants instances. An S3 Access Grants instance serves as a logical grouping for your individual access grants. You can only have one S3 Access Grants instance per Region per account.

Permissions

You must have the s3:ListAccessGrantsInstances permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1112,7 +1112,7 @@ }, "input":{"shape":"ListAccessGrantsLocationsRequest"}, "output":{"shape":"ListAccessGrantsLocationsResult"}, - "documentation":"

Returns a list of the locations registered in your S3 Access Grants instance.

Permissions

You must have the s3:ListAccessGrantsLocations permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns a list of the locations registered in your S3 Access Grants instance.

Permissions

You must have the s3:ListAccessGrantsLocations permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1129,7 +1129,7 @@ }, "input":{"shape":"ListAccessPointsRequest"}, "output":{"shape":"ListAccessPointsResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns a list of the access points. You can retrieve up to 1,000 access points per call. If the call returns more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

Returns only access points attached to S3 buckets by default. To return all access points specify DataSourceType as ALL.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to ListAccessPoints:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns a list of the access points. You can retrieve up to 1,000 access points per call. If the call returns more than 1,000 access points (or the number specified in maxResults, whichever is less), the response will include a continuation token that you can use to list the additional access points.

Returns only access points attached to S3 buckets by default. To return all access points specify DataSourceType as ALL.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to ListAccessPoints:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1145,7 +1145,7 @@ }, "input":{"shape":"ListAccessPointsForDirectoryBucketsRequest"}, "output":{"shape":"ListAccessPointsForDirectoryBucketsResult"}, - "documentation":"

Returns a list of the access points that are owned by the Amazon Web Services account and that are associated with the specified directory bucket.

To list access points for general purpose buckets, see ListAccesspoints.

To use this operation, you must have the permission to perform the s3express:ListAccessPointsForDirectoryBuckets action.

For information about REST API errors, see REST error responses.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Returns a list of the access points that are owned by the Amazon Web Services account and that are associated with the specified directory bucket.

To list access points for general purpose buckets, see ListAccesspoints.

To use this operation, you must have the permission to perform the s3express:ListAccessPointsForDirectoryBuckets action.

For information about REST API errors, see REST error responses.

", "staticContextParams":{ "RequiresAccountId":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -1159,7 +1159,7 @@ }, "input":{"shape":"ListAccessPointsForObjectLambdaRequest"}, "output":{"shape":"ListAccessPointsForObjectLambdaResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns some or all (up to 1,000) access points associated with the Object Lambda Access Point per call. If there are more access points than what can be returned in one call, the response will include a continuation token that you can use to list the additional access points.

The following actions are related to ListAccessPointsForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns some or all (up to 1,000) access points associated with the Object Lambda Access Point per call. If there are more access points than what can be returned in one call, the response will include a continuation token that you can use to list the additional access points.

The following actions are related to ListAccessPointsForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1175,7 +1175,7 @@ }, "input":{"shape":"ListCallerAccessGrantsRequest"}, "output":{"shape":"ListCallerAccessGrantsResult"}, - "documentation":"

Use this API to list the access grants that grant the caller access to Amazon S3 data through S3 Access Grants. The caller (grantee) can be an Identity and Access Management (IAM) identity or Amazon Web Services Identity Center corporate directory identity. You must pass the Amazon Web Services account of the S3 data owner (grantor) in the request. You can, optionally, narrow the results by GrantScope, using a fragment of the data's S3 path, and S3 Access Grants will return only the grants with a path that contains the path fragment. You can also pass the AllowedByApplication filter in the request, which returns only the grants authorized for applications, whether the application is the caller's Identity Center application or any other application (ALL). For more information, see List the caller's access grants in the Amazon S3 User Guide.

Permissions

You must have the s3:ListCallerAccessGrants permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Use this API to list the access grants that grant the caller access to Amazon S3 data through S3 Access Grants. The caller (grantee) can be an Identity and Access Management (IAM) identity or Amazon Web Services Identity Center corporate directory identity. You must pass the Amazon Web Services account of the S3 data owner (grantor) in the request. You can, optionally, narrow the results by GrantScope, using a fragment of the data's S3 path, and S3 Access Grants will return only the grants with a path that contains the path fragment. You can also pass the AllowedByApplication filter in the request, which returns only the grants authorized for applications, whether the application is the caller's Identity Center application or any other application (ALL). For more information, see List the caller's access grants in the Amazon S3 User Guide.

Permissions

You must have the s3:ListCallerAccessGrants permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1197,7 +1197,7 @@ {"shape":"InternalServiceException"}, {"shape":"InvalidNextTokenException"} ], - "documentation":"

Lists current S3 Batch Operations jobs as well as the jobs that have ended within the last 90 days for the Amazon Web Services account making the request. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the ListJobs operation, you must have permission to perform the s3:ListJobs action.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Lists current S3 Batch Operations jobs as well as the jobs that have ended within the last 90 days for the Amazon Web Services account making the request. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the ListJobs operation, you must have permission to perform the s3:ListJobs action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1213,7 +1213,7 @@ }, "input":{"shape":"ListMultiRegionAccessPointsRequest"}, "output":{"shape":"ListMultiRegionAccessPointsResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns a list of the Multi-Region Access Points currently associated with the specified Amazon Web Services account. Each call can return up to 100 Multi-Region Access Points, the maximum number of Multi-Region Access Points that can be associated with a single account.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to ListMultiRegionAccessPoint:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns a list of the Multi-Region Access Points currently associated with the specified Amazon Web Services account. Each call can return up to 100 Multi-Region Access Points, the maximum number of Multi-Region Access Points that can be associated with a single account.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to ListMultiRegionAccessPoint:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1230,7 +1230,7 @@ }, "input":{"shape":"ListRegionalBucketsRequest"}, "output":{"shape":"ListRegionalBucketsResult"}, - "documentation":"

This operation is not supported by directory buckets.

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Returns a list of all Outposts buckets in an Outpost that are owned by the authenticated sender of the request. For more information, see Using Amazon S3 on Outposts in the Amazon S3 User Guide.

For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and x-amz-outpost-id in your request, see the Examples section.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1246,7 +1246,7 @@ }, "input":{"shape":"ListStorageLensConfigurationsRequest"}, "output":{"shape":"ListStorageLensConfigurationsResult"}, - "documentation":"

This operation is not supported by directory buckets.

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Gets a list of Amazon S3 Storage Lens configurations. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:ListStorageLensConfigurations action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1262,7 +1262,7 @@ }, "input":{"shape":"ListStorageLensGroupsRequest"}, "output":{"shape":"ListStorageLensGroupsResult"}, - "documentation":"

Lists all the Storage Lens groups in the specified home Region.

To use this operation, you must have the permission to perform the s3:ListStorageLensGroups action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Lists all the Storage Lens groups in the specified home Region.

To use this operation, you must have the permission to perform the s3:ListStorageLensGroups action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1278,7 +1278,7 @@ }, "input":{"shape":"ListTagsForResourceRequest"}, "output":{"shape":"ListTagsForResourceResult"}, - "documentation":"

This operation allows you to list all of the tags for a specified resource. Each tag is a label consisting of a key and value. Tags can help you organize, track costs for, and control access to resources.

This operation is only supported for the following Amazon S3 resources:

Permissions

For Storage Lens groups and S3 Access Grants, you must have the s3:ListTagsForResource permission to use this operation.

For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

Directory bucket permissions

For directory buckets and access points for directory buckets, you must have the s3express:ListTagsForResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation allows you to list all of the tags for a specified resource. Each tag is a label consisting of a key and value. Tags can help you organize, track costs for, and control access to resources.

This operation is only supported for the following Amazon S3 resources:

Permissions

For general purpose buckets, access points for general purpose buckets, Storage Lens groups, and S3 Access Grants, you must have the s3:ListTagsForResource permission to use this operation.

Directory bucket permissions

For directory buckets, you must have the s3express:ListTagsForResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1298,7 +1298,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutAccessGrantsInstanceResourcePolicyResult"}, - "documentation":"

Updates the resource policy of the S3 Access Grants instance.

Permissions

You must have the s3:PutAccessGrantsInstanceResourcePolicy permission to use this operation.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Updates the resource policy of the S3 Access Grants instance.

Permissions

You must have the s3:PutAccessGrantsInstanceResourcePolicy permission to use this operation.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1318,7 +1318,7 @@ "locationName":"PutAccessPointConfigurationForObjectLambdaRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This operation is not supported by directory buckets.

Replaces configuration for an Object Lambda Access Point.

The following actions are related to PutAccessPointConfigurationForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Replaces configuration for an Object Lambda Access Point.

The following actions are related to PutAccessPointConfigurationForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1337,7 +1337,7 @@ "locationName":"PutAccessPointPolicyRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to PutAccessPointPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Associates an access policy with the specified access point. Each access point can have only one policy, so a request made to this API replaces any existing policy associated with the specified access point.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following actions are related to PutAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1356,7 +1356,7 @@ "locationName":"PutAccessPointPolicyForObjectLambdaRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This operation is not supported by directory buckets.

Creates or replaces resource policy for an Object Lambda Access Point. For an example policy, see Creating Object Lambda Access Points in the Amazon S3 User Guide.

The following actions are related to PutAccessPointPolicyForObjectLambda:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Creates or replaces resource policy for an Object Lambda Access Point. For an example policy, see Creating Object Lambda Access Points in the Amazon S3 User Guide.

The following actions are related to PutAccessPointPolicyForObjectLambda:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1375,7 +1375,7 @@ "locationName":"PutAccessPointScopeRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Creates or replaces the access point scope for a directory bucket. You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.

You can specify any amount of prefixes, but the total length of characters of all prefixes must be less than 256 bytes in size.

To use this operation, you must have the permission to perform the s3express:PutAccessPointScope action.

For information about REST API errors, see REST error responses.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates or replaces the access point scope for a directory bucket. You can use the access point scope to restrict access to specific prefixes, API operations, or a combination of both.

You can specify any amount of prefixes, but the total length of characters of all prefixes must be less than 256 bytes in size.

To use this operation, you must have the permission to perform the s3express:PutAccessPointScope action.

For information about REST API errors, see REST error responses.

", "staticContextParams":{ "RequiresAccountId":{"value":true}, "UseS3ExpressControlEndpoint":{"value":true} @@ -1456,7 +1456,7 @@ "requestUri":"/v20180820/bucket/{name}/versioning" }, "input":{"shape":"PutBucketVersioningRequest"}, - "documentation":"

This operation sets the versioning state for S3 on Outposts buckets only. To set the versioning state for an S3 bucket, see PutBucketVersioning in the Amazon S3 API Reference.

Sets the versioning state for an S3 on Outposts bucket. With S3 Versioning, you can save multiple distinct copies of your objects and recover from unintended user actions and application failures.

You can set the versioning state to one of the following:

If you've never set versioning on your bucket, it has no versioning state. In that case, a GetBucketVersioning request does not return a versioning state value.

When you enable S3 Versioning, for each object in your bucket, you have a current version and zero or more noncurrent versions. You can configure your bucket S3 Lifecycle rules to expire noncurrent versions after a specified time period. For more information, see Creating and managing a lifecycle configuration for your S3 on Outposts bucket in the Amazon S3 User Guide.

If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. For more information, see Versioning in the Amazon S3 User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following operations are related to PutBucketVersioning for S3 on Outposts.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation sets the versioning state for S3 on Outposts buckets only. To set the versioning state for an S3 bucket, see PutBucketVersioning in the Amazon S3 API Reference.

Sets the versioning state for an S3 on Outposts bucket. With S3 Versioning, you can save multiple distinct copies of your objects and recover from unintended user actions and application failures.

You can set the versioning state to one of the following:

If you've never set versioning on your bucket, it has no versioning state. In that case, a GetBucketVersioning request does not return a versioning state value.

When you enable S3 Versioning, for each object in your bucket, you have a current version and zero or more noncurrent versions. You can configure your bucket S3 Lifecycle rules to expire noncurrent versions after a specified time period. For more information, see Creating and managing a lifecycle configuration for your S3 on Outposts bucket in the Amazon S3 User Guide.

If you have an object expiration lifecycle configuration in your non-versioned bucket and you want to maintain the same permanent delete behavior when you enable versioning, you must add a noncurrent expiration policy. The noncurrent expiration lifecycle configuration will manage the deletes of the noncurrent object versions in the version-enabled bucket. For more information, see Versioning in the Amazon S3 User Guide.

All Amazon S3 on Outposts REST API requests for this action require an additional parameter of x-amz-outpost-id to be passed with the request. In addition, you must use an S3 on Outposts endpoint hostname prefix instead of s3-control. For an example of the request syntax for Amazon S3 on Outposts that uses the S3 on Outposts endpoint hostname prefix and the x-amz-outpost-id derived by using the access point ARN, see the Examples section.

The following operations are related to PutBucketVersioning for S3 on Outposts.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1483,7 +1483,7 @@ {"shape":"NotFoundException"}, {"shape":"TooManyTagsException"} ], - "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this operation to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Permissions

To use the PutJobTagging operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Sets the supplied tag-set on an S3 Batch Operations job.

A tag is a key-value pair. You can associate S3 Batch Operations tags with any job by sending a PUT request against the tagging subresource that is associated with the job. To modify the existing tag set, you can either replace the existing tag set entirely, or make changes within the existing tag set by retrieving the existing tag set using GetJobTagging, modify that tag set, and use this operation to replace the tag set with the one you modified. For more information, see Controlling access and labeling jobs using tags in the Amazon S3 User Guide.

Permissions

To use the PutJobTagging operation, you must have permission to perform the s3:PutJobTagging action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1503,7 +1503,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutMultiRegionAccessPointPolicyResult"}, - "documentation":"

This operation is not supported by directory buckets.

Associates an access control policy with the specified Multi-Region Access Point. Each Multi-Region Access Point can have only one policy, so a request made to this action replaces any existing policy that is associated with the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to PutMultiRegionAccessPointPolicy:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Associates an access control policy with the specified Multi-Region Access Point. Each Multi-Region Access Point can have only one policy, so a request made to this action replaces any existing policy that is associated with the specified Multi-Region Access Point.

This action will always be routed to the US West (Oregon) Region. For more information about the restrictions around working with Multi-Region Access Points, see Multi-Region Access Point restrictions and limitations in the Amazon S3 User Guide.

The following actions are related to PutMultiRegionAccessPointPolicy:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1519,7 +1519,7 @@ "requestUri":"/v20180820/configuration/publicAccessBlock" }, "input":{"shape":"PutPublicAccessBlockRequest"}, - "documentation":"

This operation is not supported by directory buckets.

Creates or modifies the PublicAccessBlock configuration for an Amazon Web Services account. For this operation, users must have the s3:PutAccountPublicAccessBlock permission. For more information, see Using Amazon S3 block public access.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Creates or modifies the PublicAccessBlock configuration for an Amazon Web Services account. This operation may be restricted when the account is managed by organization-level Block Public Access policies. You might get an Access Denied (403) error when the account is managed by organization-level Block Public Access policies. Organization-level policies override account-level settings, preventing direct account-level modifications. For this operation, users must have the s3:PutAccountPublicAccessBlock permission. For more information, see Using Amazon S3 block public access.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1538,7 +1538,7 @@ "locationName":"PutStorageLensConfigurationRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

This operation is not supported by directory buckets.

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Puts an Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Working with Amazon S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfiguration action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1558,7 +1558,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"PutStorageLensConfigurationTaggingResult"}, - "documentation":"

This operation is not supported by directory buckets.

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Put or replace tags on an existing Amazon S3 Storage Lens configuration. For more information about S3 Storage Lens, see Assessing your storage activity and usage with Amazon S3 Storage Lens in the Amazon S3 User Guide.

To use this action, you must have permission to perform the s3:PutStorageLensConfigurationTagging action. For more information, see Setting permissions to use Amazon S3 Storage Lens in the Amazon S3 User Guide.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1578,7 +1578,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"SubmitMultiRegionAccessPointRoutesResult"}, - "documentation":"

This operation is not supported by directory buckets.

Submits an updated route configuration for a Multi-Region Access Point. This API operation updates the routing status for the specified Regions from active to passive, or from passive to active. A value of 0 indicates a passive status, which means that traffic won't be routed to the specified Region. A value of 100 indicates an active status, which means that traffic will be routed to the specified Region. At least one Region must be active at all times.

When the routing configuration is changed, any in-progress operations (uploads, copies, deletes, and so on) to formerly active Regions will continue to run to their final completion state (success or failure). The routing configurations of any Regions that aren’t specified remain unchanged.

Updated routing configurations might not be immediately applied. It can take up to 2 minutes for your changes to take effect.

To submit routing control changes and failover requests, use the Amazon S3 failover control infrastructure endpoints in these five Amazon Web Services Regions:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation is not supported by directory buckets.

Submits an updated route configuration for a Multi-Region Access Point. This API operation updates the routing status for the specified Regions from active to passive, or from passive to active. A value of 0 indicates a passive status, which means that traffic won't be routed to the specified Region. A value of 100 indicates an active status, which means that traffic will be routed to the specified Region. At least one Region must be active at all times.

When the routing configuration is changed, any in-progress operations (uploads, copies, deletes, and so on) to formerly active Regions will continue to run to their final completion state (success or failure). The routing configurations of any Regions that aren’t specified remain unchanged.

Updated routing configurations might not be immediately applied. It can take up to 2 minutes for your changes to take effect.

To submit routing control changes and failover requests, use the Amazon S3 failover control infrastructure endpoints in these five Amazon Web Services Regions:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1600,7 +1600,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"TagResourceResult"}, - "documentation":"

Creates a new user-defined tag or updates an existing tag. Each tag is a label consisting of a key and value that is applied to your resource. Tags can help you organize, track costs for, and control access to your resources. You can add up to 50 Amazon Web Services resource tags for each S3 resource.

This operation is only supported for the following Amazon S3 resource:

This operation is only supported for the following Amazon S3 resource:

Permissions

For Storage Lens groups and S3 Access Grants, you must have the s3:TagResource permission to use this operation.

For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

Directory bucket permissions

For directory buckets and access points for directory buckets, you must have the s3express:TagResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Creates a new user-defined tag or updates an existing tag. Each tag is a label consisting of a key and value that is applied to your resource. Tags can help you organize, track costs for, and control access to your resources. You can add up to 50 Amazon Web Services resource tags for each S3 resource.

This operation is only supported for the following Amazon S3 resource:

Permissions

For general purpose buckets, access points for general purpose buckets, Storage Lens groups, and S3 Access Grants, you must have the s3:TagResource permission to use this operation.

Directory bucket permissions

For directory buckets, you must have the s3express:TagResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1617,7 +1617,7 @@ }, "input":{"shape":"UntagResourceRequest"}, "output":{"shape":"UntagResourceResult"}, - "documentation":"

This operation removes the specified user-defined tags from an S3 resource. You can pass one or more tag keys.

This operation is only supported for the following Amazon S3 resources:

Permissions

For Storage Lens groups and S3 Access Grants, you must have the s3:UntagResource permission to use this operation.

For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

Directory bucket permissions

For directory buckets and access points for directory buckets, you must have the s3express:UntagResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

This operation removes the specified user-defined tags from an S3 resource. You can pass one or more tag keys.

This operation is only supported for the following Amazon S3 resources:

Permissions

For general purpose buckets, access points for general purpose buckets, Storage Lens groups, and S3 Access Grants, you must have the s3:UntagResource permission to use this operation.

Directory bucket permissions

For directory buckets, you must have the s3express:UntagResource permission to use this operation. For more information about directory buckets policies and permissions, see Identity and Access Management (IAM) for S3 Express One Zone in the Amazon S3 User Guide.

HTTP Host header syntax

Directory buckets - The HTTP Host header syntax is s3express-control.region.amazonaws.com.

For information about S3 Tagging errors, see List of Amazon S3 Tagging error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1637,7 +1637,7 @@ "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, "output":{"shape":"UpdateAccessGrantsLocationResult"}, - "documentation":"

Updates the IAM role of a registered location in your S3 Access Grants instance.

Permissions

You must have the s3:UpdateAccessGrantsLocation permission to use this operation.

Additional Permissions

You must also have the following permission: iam:PassRole

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Updates the IAM role of a registered location in your S3 Access Grants instance.

Permissions

You must have the s3:UpdateAccessGrantsLocation permission to use this operation.

Additional Permissions

You must also have the following permission: iam:PassRole

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1660,7 +1660,7 @@ {"shape":"NotFoundException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the UpdateJobPriority operation, you must have permission to perform the s3:UpdateJobPriority action.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Updates an existing S3 Batch Operations job's priority. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the UpdateJobPriority operation, you must have permission to perform the s3:UpdateJobPriority action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1683,7 +1683,7 @@ {"shape":"JobStatusException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the UpdateJobStatus operation, you must have permission to perform the s3:UpdateJobStatus action.

Related actions include:

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Updates the status for the specified job. Use this operation to confirm that you want to run a job or to cancel an existing job. For more information, see S3 Batch Operations in the Amazon S3 User Guide.

Permissions

To use the UpdateJobStatus operation, you must have permission to perform the s3:UpdateJobStatus action.

Related actions include:

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1703,7 +1703,7 @@ "locationName":"UpdateStorageLensGroupRequest", "xmlNamespace":{"uri":"http://awss3control.amazonaws.com/doc/2018-08-20/"} }, - "documentation":"

Updates the existing Storage Lens group.

To use this operation, you must have the permission to perform the s3:UpdateStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

You must URL encode any signed header values that contain spaces. For example, if your header value is my file.txt, containing two spaces after my, you must URL encode this value to my%20%20file.txt.

", + "documentation":"

Updates the existing Storage Lens group.

To use this operation, you must have the permission to perform the s3:UpdateStorageLensGroup action. For more information about the required Storage Lens Groups permissions, see Setting account permissions to use S3 Storage Lens groups.

For information about Storage Lens groups errors, see List of Amazon S3 Storage Lens error codes.

", "endpoint":{ "hostPrefix":"{AccountId}." }, @@ -1897,12 +1897,16 @@ "shape":"DetailedStatusCodesMetrics", "documentation":"

A container element for detailed status code metrics.

" }, + "AdvancedPerformanceMetrics":{ + "shape":"AdvancedPerformanceMetrics", + "documentation":"

A container element for S3 Storage Lens advanced performance metrics.

" + }, "StorageLensGroupLevel":{ "shape":"StorageLensGroupLevel", "documentation":"

A container element for S3 Storage Lens groups metrics.

" } }, - "documentation":"

A container element for the account-level Amazon S3 Storage Lens configuration.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

" + "documentation":"

A container element for the account-level Amazon S3 Storage Lens configuration.

You must enable Storage Lens metrics consistently at both the account level and bucket level, or your request will fail.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

" }, "ActivityMetrics":{ "type":"structure", @@ -1934,6 +1938,16 @@ }, "documentation":"

The container element for Amazon S3 Storage Lens advanced data-protection metrics. Advanced data-protection metrics provide insights that you can use to perform audits and protect your data, for example replication rule counts within and across Regions.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

" }, + "AdvancedPerformanceMetrics":{ + "type":"structure", + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container that indicates whether S3 Storage Lens advanced performance metrics are enabled.

" + } + }, + "documentation":"

The container element for S3 Storage Lens advanced performance metrics. Advanced performance metrics provide insights into application performance, such as request efficiency and access patterns. These metrics help you optimize your S3 storage for both cost and performance by providing detailed analytics on how your applications interact with S3 resources.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide. For a complete list of S3 Storage Lens metrics, see S3 Storage Lens metrics glossary in the Amazon S3 User Guide.

" + }, "Alias":{ "type":"string", "max":63, @@ -2135,6 +2149,10 @@ "DetailedStatusCodesMetrics":{ "shape":"DetailedStatusCodesMetrics", "documentation":"

A container for bucket-level detailed status code metrics for S3 Storage Lens.

" + }, + "AdvancedPerformanceMetrics":{ + "shape":"AdvancedPerformanceMetrics", + "documentation":"

A container for bucket-level advanced performance metrics for S3 Storage Lens.

" } }, "documentation":"

A container for the bucket-level configuration for Amazon S3 Storage Lens.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide.

" @@ -2502,7 +2520,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of tags that you can apply to an access point. Tags are key-value pairs of metadata used to control access to your access points. For more information about tags, see Using tags with Amazon S3. For information about tagging access points, see Using tags for attribute-based access control (ABAC).

" + "documentation":"

An array of tags that you can apply to an access point. Tags are key-value pairs of metadata used to control access to your access points. For more information about tags, see Using tags with Amazon S3. For information about tagging access points, see Using tags for attribute-based access control (ABAC).

" } } }, @@ -2796,11 +2814,11 @@ "members":{ "KmsKeyArn":{ "shape":"NonEmptyKmsKeyArnString", - "documentation":"

The Amazon Resource Name (ARN) of the customer managed KMS key to use for the filter to return objects that are encrypted by the specified key. For best performance, we recommend using the KMSKeyArn filter in conjunction with other object metadata filters, like MatchAnyPrefix, CreatedAfter, or MatchAnyStorageClass.

You must provide the full KMS Key ARN. You can't use an alias name or alias ARN. For more information, see KMS keys in the Amazon Web Services Key Management Service Developer Guide.

", + "documentation":"

The Amazon Resource Name (ARN) of the customer managed KMS key to use for the filter to return objects that are encrypted by the specified key. For best performance, use keys in the same Region as the S3 Batch Operations job.

", "box":true } }, - "documentation":"

A filter that returns objects that are encrypted by dual-layer server-side encryption with Amazon Web Services Key Management Service (KMS) keys (DSSE-KMS). You can further refine your filtering by optionally providing a KMS Key ARN to create an object list of DSSE-KMS objects with that specific KMS Key ARN.

" + "documentation":"

A filter that returns objects that are encrypted by dual-layer server-side encryption with Amazon Web Services Key Management Service (KMS) keys (DSSE-KMS). You can further refine your filtering by optionally providing a KMS Key ARN to filter objects encrypted by a specific key.

" }, "DataSourceId":{ "type":"string", @@ -4992,7 +5010,7 @@ }, "MatchAnyObjectEncryption":{ "shape":"ObjectEncryptionFilterList", - "documentation":"

If provided, the generated object list includes only source bucket objects with the indicated server-side encryption type (SSE-S3, SSE-KMS, DSSE-KMS, SSE-C, or NOT-SSE). If you select SSE-KMS or DSSE-KMS, you can optionally further filter your results by specifying a specific KMS Key ARN. If you select SSE-KMS, you can also optionally further filter your results by Bucket Key enabled status.

" + "documentation":"

If provided, the generated object list includes only source bucket objects with the indicated server-side encryption type (SSE-S3, SSE-KMS, DSSE-KMS, SSE-C, or NOT-SSE).

" } }, "documentation":"

The filter used to describe a set of objects for the job's manifest.

" @@ -5325,7 +5343,7 @@ }, "NoncurrentVersionTransitions":{ "shape":"NoncurrentVersionTransitionList", - "documentation":"

Specifies the transition rule for the lifecycle rule that describes when non-current objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.

This is not supported by Amazon S3 on Outposts buckets.

" + "documentation":"

Specifies the transition rule for the lifecycle rule that describes when noncurrent objects transition to a specific storage class. If your bucket is versioning-enabled (or versioning is suspended), you can set this action to request that Amazon S3 transition noncurrent object versions to a specific storage class at a set period in the object's lifetime.

This is not supported by Amazon S3 on Outposts buckets.

" }, "NoncurrentVersionExpiration":{ "shape":"NoncurrentVersionExpiration", @@ -6202,11 +6220,11 @@ "members":{ "BytesGreaterThan":{ "shape":"ObjectSizeValue", - "documentation":"

Specifies the minimum object size in Bytes. The value must be a positive number, greater than 0 and less than 5 TB.

" + "documentation":"

Specifies the minimum object size in Bytes. The value must be a positive number, greater than 0 and less than 50 TB.

" }, "BytesLessThan":{ "shape":"ObjectSizeValue", - "documentation":"

Specifies the maximum object size in Bytes. The value must be a positive number, greater than the minimum object size and less than 5 TB.

" + "documentation":"

Specifies the maximum object size in Bytes. The value must be a positive number, greater than the minimum object size and less than 50 TB.

" } }, "documentation":"

A filter condition that specifies the object size range of included objects in bytes. Only integers are supported.

" @@ -6501,11 +6519,11 @@ }, "NOTSSE":{ "shape":"NotSSEFilter", - "documentation":"

Filters for objects that are not encrypted by server-side encryption.

", + "documentation":"

Filters for objects that are not encrypted by server-side encryption.

", "locationName":"NOT-SSE" } }, - "documentation":"

An optional filter for the S3JobManifestGenerator that identifies the subset of objects by encryption type. This filter is used to create an object list for S3 Batch Operations jobs. If provided, this filter will generate an object list that only includes objects with the specified encryption type.

", + "documentation":"

An optional filter for the S3JobManifestGenerator that identifies the subset of objects by encryption type.

", "union":true }, "ObjectEncryptionFilterList":{ @@ -7741,14 +7759,14 @@ "members":{ "ChecksumAlgorithm":{ "shape":"ComputeObjectChecksumAlgorithm", - "documentation":"

Indicates the algorithm that you want Amazon S3 to use to create the checksum. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + "documentation":"

Indicates the algorithm that you want Amazon S3 to use to create the checksum. For more information, see Checking object integrity in the Amazon S3 User Guide.

" }, "ChecksumType":{ "shape":"ComputeObjectChecksumType", - "documentation":"

Indicates the checksum type that you want Amazon S3 to use to calculate the object’s checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" + "documentation":"

Indicates the checksum type that you want Amazon S3 to use to calculate the object's checksum value. For more information, see Checking object integrity in the Amazon S3 User Guide.

" } }, - "documentation":"

Directs the specified job to invoke the ComputeObjectChecksum operation on every object listed in the job's manifest.

" + "documentation":"

Directs the specified job to invoke the ComputeObjectChecksum operation on every object listed in the job's manifest.

" }, "S3ContentLength":{ "type":"long", @@ -8279,12 +8297,12 @@ "members":{ "KmsKeyArn":{ "shape":"NonEmptyKmsKeyArnString", - "documentation":"

The Amazon Resource Name (ARN) of the customer managed KMS key to use for the filter to return objects that are encrypted by the specified key. For best performance, we recommend using the KMSKeyArn filter in conjunction with other object metadata filters, like MatchAnyPrefix, CreatedAfter, or MatchAnyStorageClass.

You must provide the full KMS Key ARN. You can't use an alias name or alias ARN. For more information, see KMS keys in the Amazon Web Services Key Management Service Developer Guide.

", + "documentation":"

The Amazon Resource Name (ARN) of the customer managed KMS key to use for the filter to return objects that are encrypted by the specified key. For best performance, use keys in the same Region as the S3 Batch Operations job.

", "box":true }, "BucketKeyEnabled":{ "shape":"Boolean", - "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Amazon Web Services Key Management Service (Amazon Web Services KMS) keys (SSE-KMS). If specified, will filter SSE-KMS encrypted objects by S3 Bucket Key status. For more information, see Reducing the cost of SSE-KMS with Amazon S3 Bucket Keys in the Amazon S3 User Guide.

", + "documentation":"

Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption with server-side encryption using Amazon Web Services Key Management Service (Amazon Web Services KMS) keys (SSE-KMS). If specified, will filter SSE-KMS encrypted objects by S3 Bucket Key status.

", "box":true } }, @@ -8452,6 +8470,10 @@ "shape":"StorageLensDataExport", "documentation":"

A container to specify the properties of your S3 Storage Lens metrics export including, the destination, schema and format.

" }, + "ExpandedPrefixesDataExport":{ + "shape":"StorageLensExpandedPrefixesDataExport", + "documentation":"

A container that configures your S3 Storage Lens expanded prefixes metrics report.

" + }, "IsEnabled":{ "shape":"IsEnabled", "documentation":"

A container for whether the S3 Storage Lens configuration is enabled.

" @@ -8463,6 +8485,10 @@ "StorageLensArn":{ "shape":"StorageLensArn", "documentation":"

The Amazon Resource Name (ARN) of the S3 Storage Lens configuration. This property is read-only and follows the following format: arn:aws:s3:us-east-1:example-account-id:storage-lens/your-dashboard-name

" + }, + "PrefixDelimiter":{ + "shape":"StorageLensPrefixLevelDelimiter", + "documentation":"

A container for all prefix delimiters that are used for object keys in this S3 Storage Lens configuration. The prefix delimiters determine how S3 Storage Lens counts prefix depth, by separating the hierarchical levels in object keys.

" } }, "documentation":"

A container for the Amazon S3 Storage Lens configuration.

" @@ -8485,6 +8511,10 @@ "CloudWatchMetrics":{ "shape":"CloudWatchMetrics", "documentation":"

A container for enabling Amazon CloudWatch publishing for S3 Storage Lens metrics.

" + }, + "StorageLensTableDestination":{ + "shape":"StorageLensTableDestination", + "documentation":"

A container for configuring S3 Storage Lens data exports to read-only S3 table buckets.

" } }, "documentation":"

A container to specify the properties of your S3 Storage Lens metrics export, including the destination, schema, and format.

" @@ -8505,6 +8535,17 @@ }, "documentation":"

A container for the encryption of the S3 Storage Lens metrics exports.

" }, + "StorageLensExpandedPrefixesDataExport":{ + "type":"structure", + "members":{ + "S3BucketDestination":{"shape":"S3BucketDestination"}, + "StorageLensTableDestination":{ + "shape":"StorageLensTableDestination", + "documentation":"

A container for the bucket where the S3 Storage Lens metric export files are located. At least one export destination must be specified.

" + } + }, + "documentation":"

A container for your S3 Storage Lens expanded prefix metrics report configuration. Unlike the default Storage Lens metrics report, the enhanced prefix metrics report includes all S3 Storage Lens storage and activity data related to the full list of prefixes in your Storage Lens configuration.

" + }, "StorageLensGroup":{ "type":"structure", "required":[ @@ -8680,6 +8721,18 @@ "max":10, "min":1 }, + "StorageLensTableDestination":{ + "type":"structure", + "required":["IsEnabled"], + "members":{ + "IsEnabled":{ + "shape":"IsEnabled", + "documentation":"

A container that indicates whether the export to read-only S3 table buckets is enabled for your S3 Storage Lens configuration. When set to true, Storage Lens reports are automatically exported to tables in addition to other configured destinations.

" + }, + "Encryption":{"shape":"StorageLensDataExportEncryption"} + }, + "documentation":"

A container for configuring your S3 Storage Lens reports to export to read-only S3 table buckets. This parameter enables you to store your Storage Lens metrics in a structured, queryable table format in Apache Iceberg.

For more information about S3 Storage Lens, see Assessing your storage activity and usage with S3 Storage Lens in the Amazon S3 User Guide.

" + }, "StorageLensTag":{ "type":"structure", "required":[ diff --git a/awscli/botocore/data/s3tables/2018-05-10/service-2.json b/awscli/botocore/data/s3tables/2018-05-10/service-2.json index 096dd2140c6b..921dd89e9800 100644 --- a/awscli/botocore/data/s3tables/2018-05-10/service-2.json +++ b/awscli/botocore/data/s3tables/2018-05-10/service-2.json @@ -49,7 +49,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

Permissions
  • You must have the s3tables:CreateTable permission to use this operation.

  • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

  • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

  • You must have the s3tables:TagResource permission in addition to s3tables:CreateTable permission to create a table with tags.

Additionally, If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

" + "documentation":"

Creates a new table associated with the given namespace in a table bucket. For more information, see Creating an Amazon S3 table in the Amazon Simple Storage Service User Guide.

Permissions
  • You must have the s3tables:CreateTable permission to use this operation.

  • If you use this operation with the optional metadata request parameter you must have the s3tables:PutTableData permission.

  • If you use this operation with the optional encryptionConfiguration request parameter you must have the s3tables:PutTableEncryption permission.

  • If you use this operation with the storageClassConfiguration request parameter, you must have the s3tables:PutTableStorageClass permission.

  • To create a table with tags, you must have the s3tables:TagResource permission in addition to s3tables:CreateTable permission.

Additionally, If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

" }, "CreateTableBucket":{ "name":"CreateTableBucket", @@ -68,7 +68,7 @@ {"shape":"ConflictException"}, {"shape":"BadRequestException"} ], - "documentation":"

Creates a table bucket. For more information, see Creating a table bucket in the Amazon Simple Storage Service User Guide.

Permissions
  • You must have the s3tables:CreateTableBucket permission to use this operation.

  • If you use this operation with the optional encryptionConfiguration parameter you must have the s3tables:PutTableBucketEncryption permission.

  • You must have the s3tables:TagResource permission in addition to s3tables:CreateTableBucket permission to create a table bucket with tags.

" + "documentation":"

Creates a table bucket. For more information, see Creating a table bucket in the Amazon Simple Storage Service User Guide.

Permissions
  • You must have the s3tables:CreateTableBucket permission to use this operation.

  • If you use this operation with the optional encryptionConfiguration parameter you must have the s3tables:PutTableBucketEncryption permission.

  • If you use this operation with the storageClassConfiguration request parameter, you must have the s3tables:PutTableBucketStorageClass permission.

  • To create a table bucket with tags, you must have the s3tables:TagResource permission in addition to s3tables:CreateTableBucket permission.

" }, "DeleteNamespace":{ "name":"DeleteNamespace", @@ -184,6 +184,26 @@ "documentation":"

Deletes a table bucket policy. For more information, see Deleting a table bucket policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:DeleteTableBucketPolicy permission to use this operation.

", "idempotent":true }, + "DeleteTableBucketReplication":{ + "name":"DeleteTableBucketReplication", + "http":{ + "method":"DELETE", + "requestUri":"/table-bucket-replication", + "responseCode":204 + }, + "input":{"shape":"DeleteTableBucketReplicationRequest"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Deletes the replication configuration for a table bucket. After deletion, new table updates will no longer be replicated to destination buckets, though existing replicated tables will remain in destination buckets.

Permissions

You must have the s3tables:DeleteTableBucketReplication permission to use this operation.

", + "idempotent":true + }, "DeleteTablePolicy":{ "name":"DeleteTablePolicy", "http":{ @@ -203,6 +223,26 @@ "documentation":"

Deletes a table policy. For more information, see Deleting a table policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:DeleteTablePolicy permission to use this operation.

", "idempotent":true }, + "DeleteTableReplication":{ + "name":"DeleteTableReplication", + "http":{ + "method":"DELETE", + "requestUri":"/table-replication", + "responseCode":204 + }, + "input":{"shape":"DeleteTableReplicationRequest"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Deletes the replication configuration for a specific table. After deletion, new updates to this table will no longer be replicated to destination tables, though existing replicated copies will remain in destination buckets.

Permissions

You must have the s3tables:DeleteTableReplication permission to use this operation.

", + "idempotent":true + }, "GetNamespace":{ "name":"GetNamespace", "http":{ @@ -346,6 +386,47 @@ "documentation":"

Gets details about a table bucket policy. For more information, see Viewing a table bucket policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:GetTableBucketPolicy permission to use this operation.

", "readonly":true }, + "GetTableBucketReplication":{ + "name":"GetTableBucketReplication", + "http":{ + "method":"GET", + "requestUri":"/table-bucket-replication", + "responseCode":200 + }, + "input":{"shape":"GetTableBucketReplicationRequest"}, + "output":{"shape":"GetTableBucketReplicationResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the replication configuration for a table bucket.This operation returns the IAM role, versionToken, and replication rules that define how tables in this bucket are replicated to other buckets.

Permissions

You must have the s3tables:GetTableBucketReplication permission to use this operation.

", + "readonly":true + }, + "GetTableBucketStorageClass":{ + "name":"GetTableBucketStorageClass", + "http":{ + "method":"GET", + "requestUri":"/buckets/{tableBucketARN}/storage-class", + "responseCode":200 + }, + "input":{"shape":"GetTableBucketStorageClassRequest"}, + "output":{"shape":"GetTableBucketStorageClassResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the storage class configuration for a specific table. This allows you to view the storage class settings that apply to an individual table, which may differ from the table bucket's default configuration.

Permissions

You must have the s3tables:GetTableBucketStorageClass permission to use this operation.

", + "readonly":true + }, "GetTableEncryption":{ "name":"GetTableEncryption", "http":{ @@ -446,6 +527,107 @@ "documentation":"

Gets details about a table policy. For more information, see Viewing a table policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:GetTablePolicy permission to use this operation.

", "readonly":true }, + "GetTableRecordExpirationConfiguration":{ + "name":"GetTableRecordExpirationConfiguration", + "http":{ + "method":"GET", + "requestUri":"/table-record-expiration", + "responseCode":200 + }, + "input":{"shape":"GetTableRecordExpirationConfigurationRequest"}, + "output":{"shape":"GetTableRecordExpirationConfigurationResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the expiration configuration settings for records in a table, and the status of the configuration. If the status of the configuration is enabled, records expire and are automatically removed from the table after the specified number of days.

Permissions

You must have the s3tables:GetTableRecordExpirationConfiguration permission to use this operation.

", + "readonly":true + }, + "GetTableRecordExpirationJobStatus":{ + "name":"GetTableRecordExpirationJobStatus", + "http":{ + "method":"GET", + "requestUri":"/table-record-expiration-job-status", + "responseCode":200 + }, + "input":{"shape":"GetTableRecordExpirationJobStatusRequest"}, + "output":{"shape":"GetTableRecordExpirationJobStatusResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the status, metrics, and details of the latest record expiration job for a table. This includes when the job ran, and whether it succeeded or failed. If the job ran successfully, this also includes statistics about the records that were removed.

Permissions

You must have the s3tables:GetTableRecordExpirationJobStatus permission to use this operation.

", + "readonly":true + }, + "GetTableReplication":{ + "name":"GetTableReplication", + "http":{ + "method":"GET", + "requestUri":"/table-replication", + "responseCode":200 + }, + "input":{"shape":"GetTableReplicationRequest"}, + "output":{"shape":"GetTableReplicationResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the replication configuration for a specific table.

Permissions

You must have the s3tables:GetTableReplication permission to use this operation.

", + "readonly":true + }, + "GetTableReplicationStatus":{ + "name":"GetTableReplicationStatus", + "http":{ + "method":"GET", + "requestUri":"/replication-status", + "responseCode":200 + }, + "input":{"shape":"GetTableReplicationStatusRequest"}, + "output":{"shape":"GetTableReplicationStatusResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the replication status for a table, including the status of replication to each destination. This operation provides visibility into replication health and progress.

Permissions

You must have the s3tables:GetTableReplicationStatus permission to use this operation.

", + "readonly":true + }, + "GetTableStorageClass":{ + "name":"GetTableStorageClass", + "http":{ + "method":"GET", + "requestUri":"/tables/{tableBucketARN}/{namespace}/{name}/storage-class", + "responseCode":200 + }, + "input":{"shape":"GetTableStorageClassRequest"}, + "output":{"shape":"GetTableStorageClassResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Retrieves the storage class configuration for a specific table. This allows you to view the storage class settings that apply to an individual table, which may differ from the table bucket's default configuration.

Permissions

You must have the s3tables:GetTableStorageClass permission to use this operation.

", + "readonly":true + }, "ListNamespaces":{ "name":"ListNamespaces", "http":{ @@ -526,7 +708,7 @@ {"shape":"BadRequestException"} ], "documentation":"

Lists all of the tags applied to a specified Amazon S3 Tables resource. Each tag is a label consisting of a key and value pair. Tags can help you organize, track costs for, and control access to resources.

For a list of S3 resources that support tagging, see Managing tags for Amazon S3 resources.

Permissions

For tables and table buckets, you must have the s3tables:ListTagsForResource permission to use this operation.

", - "idempotent":true + "readonly":true }, "PutTableBucketEncryption":{ "name":"PutTableBucketEncryption", @@ -603,6 +785,46 @@ "documentation":"

Creates a new table bucket policy or replaces an existing table bucket policy for a table bucket. For more information, see Adding a table bucket policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:PutTableBucketPolicy permission to use this operation.

", "idempotent":true }, + "PutTableBucketReplication":{ + "name":"PutTableBucketReplication", + "http":{ + "method":"PUT", + "requestUri":"/table-bucket-replication", + "responseCode":200 + }, + "input":{"shape":"PutTableBucketReplicationRequest"}, + "output":{"shape":"PutTableBucketReplicationResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Creates or updates the replication configuration for a table bucket. This operation defines how tables in the source bucket are replicated to destination buckets. Replication helps ensure data availability and disaster recovery across regions or accounts.

Permissions
  • You must have the s3tables:PutTableBucketReplication permission to use this operation. The IAM role specified in the configuration must have permissions to read from the source bucket and write permissions to all destination buckets.

  • You must also have the following permissions:

    • s3tables:GetTable permission on the source table.

    • s3tables:ListTables permission on the bucket containing the table.

    • s3tables:CreateTable permission for the destination.

    • s3tables:CreateNamespace permission for the destination.

    • s3tables:GetTableMaintenanceConfig permission for the source bucket.

    • s3tables:PutTableMaintenanceConfig permission for the destination bucket.

  • You must have iam:PassRole permission with condition allowing roles to be passed to replication.s3tables.amazonaws.com.

", + "idempotent":true + }, + "PutTableBucketStorageClass":{ + "name":"PutTableBucketStorageClass", + "http":{ + "method":"PUT", + "requestUri":"/buckets/{tableBucketARN}/storage-class", + "responseCode":200 + }, + "input":{"shape":"PutTableBucketStorageClassRequest"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Sets or updates the storage class configuration for a table bucket. This configuration serves as the default storage class for all new tables created in the bucket, allowing you to optimize storage costs at the bucket level.

Permissions

You must have the s3tables:PutTableBucketStorageClass permission to use this operation.

", + "idempotent":true + }, "PutTableMaintenanceConfiguration":{ "name":"PutTableMaintenanceConfiguration", "http":{ @@ -640,6 +862,46 @@ "documentation":"

Creates a new table policy or replaces an existing table policy for a table. For more information, see Adding a table policy in the Amazon Simple Storage Service User Guide.

Permissions

You must have the s3tables:PutTablePolicy permission to use this operation.

", "idempotent":true }, + "PutTableRecordExpirationConfiguration":{ + "name":"PutTableRecordExpirationConfiguration", + "http":{ + "method":"PUT", + "requestUri":"/table-record-expiration", + "responseCode":204 + }, + "input":{"shape":"PutTableRecordExpirationConfigurationRequest"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"MethodNotAllowedException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Creates or updates the expiration configuration settings for records in a table, including the status of the configuration. If you enable record expiration for a table, records expire and are automatically removed from the table after the number of days that you specify.

Permissions

You must have the s3tables:PutTableRecordExpirationConfiguration permission to use this operation.

", + "idempotent":true + }, + "PutTableReplication":{ + "name":"PutTableReplication", + "http":{ + "method":"PUT", + "requestUri":"/table-replication", + "responseCode":200 + }, + "input":{"shape":"PutTableReplicationRequest"}, + "output":{"shape":"PutTableReplicationResponse"}, + "errors":[ + {"shape":"InternalServerErrorException"}, + {"shape":"ForbiddenException"}, + {"shape":"NotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"ConflictException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Creates or updates the replication configuration for a specific table. This operation allows you to define table-level replication independently of bucket-level replication, providing granular control over which tables are replicated and where.

Permissions
  • You must have the s3tables:PutTableReplication permission to use this operation. The IAM role specified in the configuration must have permissions to read from the source table and write to all destination tables.

  • You must also have the following permissions:

    • s3tables:GetTable permission on the source table being replicated.

    • s3tables:CreateTable permission for the destination.

    • s3tables:CreateNamespace permission for the destination.

    • s3tables:GetTableMaintenanceConfig permission for the source table.

    • s3tables:PutTableMaintenanceConfig permission for the destination table.

  • You must have iam:PassRole permission with condition allowing roles to be passed to replication.s3tables.amazonaws.com.

", + "idempotent":true + }, "RenameTable":{ "name":"RenameTable", "http":{ @@ -819,6 +1081,10 @@ "shape":"EncryptionConfiguration", "documentation":"

The encryption configuration to use for the table bucket. This configuration specifies the default encryption settings that will be applied to all tables created in this bucket unless overridden at the table level. The configuration includes the encryption algorithm and, if using SSE-KMS, the KMS key to use.

" }, + "storageClassConfiguration":{ + "shape":"StorageClassConfiguration", + "documentation":"

The default storage class configuration for the table bucket. This configuration will be applied to all new tables created in this bucket unless overridden at the table level. If not specified, the service default storage class will be used.

" + }, "tags":{ "shape":"Tags", "documentation":"

A map of user-defined tags that you would like to apply to the table bucket that you are creating. A tag is a key-value pair that you apply to your resources. Tags can help you organize and control access to resources. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

You must have the s3tables:TagResource permission in addition to s3tables:CreateTableBucket permisson to create a table bucket with tags.

" @@ -872,6 +1138,10 @@ "shape":"EncryptionConfiguration", "documentation":"

The encryption configuration to use for the table. This configuration specifies the encryption algorithm and, if using SSE-KMS, the KMS key to use for encrypting the table.

If you choose SSE-KMS encryption you must grant the S3 Tables maintenance principal access to your KMS key. For more information, see Permissions requirements for S3 Tables SSE-KMS encryption.

" }, + "storageClassConfiguration":{ + "shape":"StorageClassConfiguration", + "documentation":"

The storage class configuration for the table. If not specified, the table inherits the storage class configuration from its table bucket. Specify this parameter to override the bucket's default storage class for this table.

" + }, "tags":{ "shape":"Tags", "documentation":"

A map of user-defined tags that you would like to apply to the table that you are creating. A tag is a key-value pair that you apply to your resources. Tags can help you organize, track costs for, and control access to resources. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

You must have the s3tables:TagResource permission in addition to s3tables:CreateTable permission to create a table with tags.

" @@ -952,6 +1222,24 @@ } } }, + "DeleteTableBucketReplicationRequest":{ + "type":"structure", + "required":["tableBucketARN"], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket.

", + "location":"querystring", + "locationName":"tableBucketARN" + }, + "versionToken":{ + "shape":"VersionToken", + "documentation":"

A version token from a previous GetTableBucketReplication call. Use this token to ensure you're deleting the expected version of the configuration.

", + "location":"querystring", + "locationName":"versionToken" + } + } + }, "DeleteTableBucketRequest":{ "type":"structure", "required":["tableBucketARN"], @@ -992,6 +1280,27 @@ } } }, + "DeleteTableReplicationRequest":{ + "type":"structure", + "required":[ + "tableArn", + "versionToken" + ], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + }, + "versionToken":{ + "shape":"String", + "documentation":"

A version token from a previous GetTableReplication call. Use this token to ensure you're deleting the expected version of the configuration.

", + "location":"querystring", + "locationName":"versionToken" + } + } + }, "DeleteTableRequest":{ "type":"structure", "required":[ @@ -1215,6 +1524,35 @@ } } }, + "GetTableBucketReplicationRequest":{ + "type":"structure", + "required":["tableBucketARN"], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket.

", + "location":"querystring", + "locationName":"tableBucketARN" + } + } + }, + "GetTableBucketReplicationResponse":{ + "type":"structure", + "required":[ + "versionToken", + "configuration" + ], + "members":{ + "versionToken":{ + "shape":"VersionToken", + "documentation":"

A version token that represents the current state of the replication configuration. Use this token when updating the configuration to ensure consistency.

" + }, + "configuration":{ + "shape":"TableBucketReplicationConfiguration", + "documentation":"

The replication configuration for the table bucket, including the IAM role and replication rules.

" + } + } + }, "GetTableBucketRequest":{ "type":"structure", "required":["tableBucketARN"], @@ -1262,17 +1600,39 @@ } } }, - "GetTableEncryptionRequest":{ + "GetTableBucketStorageClassRequest":{ "type":"structure", - "required":[ - "tableBucketARN", - "namespace", - "name" - ], + "required":["tableBucketARN"], "members":{ "tableBucketARN":{ "shape":"TableBucketARN", - "documentation":"

The Amazon Resource Name (ARN) of the table bucket containing the table.

", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket.

", + "location":"uri", + "locationName":"tableBucketARN" + } + } + }, + "GetTableBucketStorageClassResponse":{ + "type":"structure", + "required":["storageClassConfiguration"], + "members":{ + "storageClassConfiguration":{ + "shape":"StorageClassConfiguration", + "documentation":"

The storage class configuration for the table bucket.

" + } + } + }, + "GetTableEncryptionRequest":{ + "type":"structure", + "required":[ + "tableBucketARN", + "namespace", + "name" + ], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket containing the table.

", "location":"uri", "locationName":"tableBucketARN" }, @@ -1477,6 +1837,120 @@ } } }, + "GetTableRecordExpirationConfigurationRequest":{ + "type":"structure", + "required":["tableArn"], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + } + } + }, + "GetTableRecordExpirationConfigurationResponse":{ + "type":"structure", + "required":["configuration"], + "members":{ + "configuration":{ + "shape":"TableRecordExpirationConfigurationValue", + "documentation":"

The record expiration configuration for the table, including the status and retention settings.

" + } + } + }, + "GetTableRecordExpirationJobStatusRequest":{ + "type":"structure", + "required":["tableArn"], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + } + } + }, + "GetTableRecordExpirationJobStatusResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"TableRecordExpirationJobStatus", + "documentation":"

The current status of the most recent expiration job.

" + }, + "lastRunTimestamp":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The timestamp when the expiration job was last executed.

" + }, + "failureMessage":{ + "shape":"String", + "documentation":"

If the job failed, this field contains an error message describing the failure reason.

" + }, + "metrics":{ + "shape":"TableRecordExpirationJobMetrics", + "documentation":"

Metrics about the most recent expiration job execution, including the number of records and files deleted.

" + } + } + }, + "GetTableReplicationRequest":{ + "type":"structure", + "required":["tableArn"], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + } + } + }, + "GetTableReplicationResponse":{ + "type":"structure", + "required":[ + "versionToken", + "configuration" + ], + "members":{ + "versionToken":{ + "shape":"String", + "documentation":"

A version token that represents the current state of the table's replication configuration. Use this token when updating the configuration to ensure consistency.

" + }, + "configuration":{ + "shape":"TableReplicationConfiguration", + "documentation":"

The replication configuration for the table, including the IAM role and replication rules.

" + } + } + }, + "GetTableReplicationStatusRequest":{ + "type":"structure", + "required":["tableArn"], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + } + } + }, + "GetTableReplicationStatusResponse":{ + "type":"structure", + "required":[ + "sourceTableArn", + "destinations" + ], + "members":{ + "sourceTableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the source table being replicated.

" + }, + "destinations":{ + "shape":"ReplicationDestinationStatuses", + "documentation":"

An array of status information for each replication destination, including the current state, last successful update, and any error messages.

" + } + } + }, "GetTableRequest":{ "type":"structure", "members":{ @@ -1586,9 +2060,57 @@ "tableBucketId":{ "shape":"TableBucketId", "documentation":"

The unique identifier of the table bucket containing this table.

" + }, + "managedTableInformation":{ + "shape":"ManagedTableInformation", + "documentation":"

If this table is managed by S3 Tables, contains additional information such as replication details.

" + } + } + }, + "GetTableStorageClassRequest":{ + "type":"structure", + "required":[ + "tableBucketARN", + "namespace", + "name" + ], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket that contains the table.

", + "location":"uri", + "locationName":"tableBucketARN" + }, + "namespace":{ + "shape":"NamespaceName", + "documentation":"

The namespace associated with the table.

", + "location":"uri", + "locationName":"namespace" + }, + "name":{ + "shape":"TableName", + "documentation":"

The name of the table.

", + "location":"uri", + "locationName":"name" + } + } + }, + "GetTableStorageClassResponse":{ + "type":"structure", + "required":["storageClassConfiguration"], + "members":{ + "storageClassConfiguration":{ + "shape":"StorageClassConfiguration", + "documentation":"

The storage class configuration for the table.

" } } }, + "IAMRole":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.+:iam::[0-9]{12}:role/.+" + }, "IcebergCompactionSettings":{ "type":"structure", "members":{ @@ -1619,6 +2141,10 @@ "schema":{ "shape":"IcebergSchema", "documentation":"

The schema for an Iceberg table.

" + }, + "properties":{ + "shape":"TableProperties", + "documentation":"

Contains configuration properties for an Iceberg table.

" } }, "documentation":"

Contains details about the metadata for an Iceberg table.

" @@ -1681,6 +2207,24 @@ "Disabled" ] }, + "LastSuccessfulReplicatedUpdate":{ + "type":"structure", + "required":[ + "metadataLocation", + "timestamp" + ], + "members":{ + "metadataLocation":{ + "shape":"MetadataLocation", + "documentation":"

The S3 location of the metadata that was successfully replicated.

" + }, + "timestamp":{ + "shape":"SyntheticTimestamp_date_time", + "documentation":"

The timestamp when the replication update completed successfully.

" + } + }, + "documentation":"

Contains information about the most recent successful replication update to a destination.

" + }, "ListNamespacesLimit":{ "type":"integer", "box":true, @@ -1872,6 +2416,10 @@ } } }, + "Long":{ + "type":"long", + "box":true + }, "MaintenanceStatus":{ "type":"string", "enum":[ @@ -1879,11 +2427,33 @@ "disabled" ] }, + "ManagedTableInformation":{ + "type":"structure", + "members":{ + "replicationInformation":{ + "shape":"ReplicationInformation", + "documentation":"

If this table is a replica, contains information about the source table from which it is replicated.

" + } + }, + "documentation":"

Contains information about tables that are managed by S3 Tables, including replication information for replica tables.

" + }, "MetadataLocation":{ "type":"string", "max":2048, "min":1 }, + "MethodNotAllowedException":{ + "type":"structure", + "members":{ + "message":{"shape":"ErrorMessage"} + }, + "documentation":"

The requested operation is not allowed on this resource. This may occur when attempting to modify a resource that is managed by a service or has restrictions that prevent the operation.

", + "error":{ + "httpStatusCode":405, + "senderFault":true + }, + "exception":true + }, "NamespaceId":{"type":"string"}, "NamespaceList":{ "type":"list", @@ -2038,6 +2608,67 @@ } } }, + "PutTableBucketReplicationRequest":{ + "type":"structure", + "required":[ + "tableBucketARN", + "configuration" + ], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the source table bucket.

", + "location":"querystring", + "locationName":"tableBucketARN" + }, + "versionToken":{ + "shape":"VersionToken", + "documentation":"

A version token from a previous GetTableBucketReplication call. Use this token to ensure you're updating the expected version of the configuration.

", + "location":"querystring", + "locationName":"versionToken" + }, + "configuration":{ + "shape":"TableBucketReplicationConfiguration", + "documentation":"

The replication configuration to apply, including the IAM role and replication rules.

" + } + } + }, + "PutTableBucketReplicationResponse":{ + "type":"structure", + "required":[ + "versionToken", + "status" + ], + "members":{ + "versionToken":{ + "shape":"VersionToken", + "documentation":"

A new version token representing the updated replication configuration.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the replication configuration operation.

" + } + } + }, + "PutTableBucketStorageClassRequest":{ + "type":"structure", + "required":[ + "tableBucketARN", + "storageClassConfiguration" + ], + "members":{ + "tableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the table bucket.

", + "location":"uri", + "locationName":"tableBucketARN" + }, + "storageClassConfiguration":{ + "shape":"StorageClassConfiguration", + "documentation":"

The storage class configuration to apply to the table bucket. This configuration will serve as the default for new tables created in this bucket.

" + } + } + }, "PutTableMaintenanceConfigurationRequest":{ "type":"structure", "required":[ @@ -2111,6 +2742,67 @@ } } }, + "PutTableRecordExpirationConfigurationRequest":{ + "type":"structure", + "required":[ + "tableArn", + "value" + ], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the table.

", + "location":"querystring", + "locationName":"tableArn" + }, + "value":{ + "shape":"TableRecordExpirationConfigurationValue", + "documentation":"

The record expiration configuration to apply to the table, including the status (enabled or disabled) and retention period in days.

" + } + } + }, + "PutTableReplicationRequest":{ + "type":"structure", + "required":[ + "tableArn", + "configuration" + ], + "members":{ + "tableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the source table.

", + "location":"querystring", + "locationName":"tableArn" + }, + "versionToken":{ + "shape":"String", + "documentation":"

A version token from a previous GetTableReplication call. Use this token to ensure you're updating the expected version of the configuration.

", + "location":"querystring", + "locationName":"versionToken" + }, + "configuration":{ + "shape":"TableReplicationConfiguration", + "documentation":"

The replication configuration to apply to the table, including the IAM role and replication rules.

" + } + } + }, + "PutTableReplicationResponse":{ + "type":"structure", + "required":[ + "versionToken", + "status" + ], + "members":{ + "versionToken":{ + "shape":"String", + "documentation":"

A new version token representing the updated replication configuration.

" + }, + "status":{ + "shape":"String", + "documentation":"

The status of the replication configuration operation.

" + } + } + }, "RenameTableRequest":{ "type":"structure", "required":[ @@ -2151,6 +2843,78 @@ } } }, + "ReplicationDestination":{ + "type":"structure", + "required":["destinationTableBucketARN"], + "members":{ + "destinationTableBucketARN":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the destination table bucket where tables will be replicated.

" + } + }, + "documentation":"

Specifies a destination table bucket for replication.

" + }, + "ReplicationDestinationStatusModel":{ + "type":"structure", + "required":[ + "replicationStatus", + "destinationTableBucketArn" + ], + "members":{ + "replicationStatus":{ + "shape":"ReplicationStatus", + "documentation":"

The current status of replication to this destination.

" + }, + "destinationTableBucketArn":{ + "shape":"TableBucketARN", + "documentation":"

The Amazon Resource Name (ARN) of the destination table bucket.

" + }, + "destinationTableArn":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the destination table.

" + }, + "lastSuccessfulReplicatedUpdate":{ + "shape":"LastSuccessfulReplicatedUpdate", + "documentation":"

Information about the most recent successful replication update to this destination.

" + }, + "failureMessage":{ + "shape":"String", + "documentation":"

If replication has failed, this field contains an error message describing the failure reason.

" + } + }, + "documentation":"

Contains status information for a replication destination, including the current replication state, last successful update, and any error messages.

" + }, + "ReplicationDestinationStatuses":{ + "type":"list", + "member":{"shape":"ReplicationDestinationStatusModel"}, + "max":5, + "min":1 + }, + "ReplicationDestinations":{ + "type":"list", + "member":{"shape":"ReplicationDestination"}, + "max":5, + "min":1 + }, + "ReplicationInformation":{ + "type":"structure", + "required":["sourceTableARN"], + "members":{ + "sourceTableARN":{ + "shape":"TableARN", + "documentation":"

The Amazon Resource Name (ARN) of the source table from which this table is replicated.

" + } + }, + "documentation":"

Contains information about the source of a replicated table.

" + }, + "ReplicationStatus":{ + "type":"string", + "enum":[ + "pending", + "completed", + "failed" + ] + }, "ResourceArn":{ "type":"string", "max":2048, @@ -2195,6 +2959,24 @@ "type":"list", "member":{"shape":"SchemaField"} }, + "StorageClass":{ + "type":"string", + "enum":[ + "STANDARD", + "INTELLIGENT_TIERING" + ] + }, + "StorageClassConfiguration":{ + "type":"structure", + "required":["storageClass"], + "members":{ + "storageClass":{ + "shape":"StorageClass", + "documentation":"

The storage class for the table or table bucket. Valid values include storage classes optimized for different access patterns and cost profiles.

" + } + }, + "documentation":"

The configuration details for the storage class of tables or table buckets. This allows you to optimize storage costs by selecting the appropriate storage class based on your access patterns and performance requirements.

" + }, "String":{"type":"string"}, "SyntheticTimestamp_date_time":{ "type":"timestamp", @@ -2251,6 +3033,41 @@ "min":3, "pattern":"[0-9a-z-]*" }, + "TableBucketReplicationConfiguration":{ + "type":"structure", + "required":[ + "role", + "rules" + ], + "members":{ + "role":{ + "shape":"IAMRole", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that S3 Tables assumes to replicate tables on your behalf.

" + }, + "rules":{ + "shape":"TableBucketReplicationRules", + "documentation":"

An array of replication rules that define which tables to replicate and where to replicate them.

" + } + }, + "documentation":"

The replication configuration for a table bucket. This configuration defines how tables in the source bucket are replicated to destination table buckets, including the IAM role used for replication.

" + }, + "TableBucketReplicationRule":{ + "type":"structure", + "required":["destinations"], + "members":{ + "destinations":{ + "shape":"ReplicationDestinations", + "documentation":"

An array of destination table buckets where tables should be replicated.

" + } + }, + "documentation":"

Defines a rule for replicating tables from a source table bucket to one or more destination table buckets.

" + }, + "TableBucketReplicationRules":{ + "type":"list", + "member":{"shape":"TableBucketReplicationRule"}, + "max":1, + "min":1 + }, "TableBucketSummary":{ "type":"structure", "required":[ @@ -2315,7 +3132,7 @@ "documentation":"

Contains details about the settings for the maintenance configuration.

" } }, - "documentation":"

Contains the values that define a maintenance configuration for a table.

" + "documentation":"

The values that define a maintenance configuration for a table.

" }, "TableMaintenanceJobStatus":{ "type":"map", @@ -2388,6 +3205,104 @@ "min":1, "pattern":"[0-9a-z_]*" }, + "TableProperties":{ + "type":"map", + "key":{"shape":"String"}, + "value":{"shape":"String"} + }, + "TableRecordExpirationConfigurationValue":{ + "type":"structure", + "members":{ + "status":{ + "shape":"TableRecordExpirationStatus", + "documentation":"

The status of the expiration settings for records in the table.

" + }, + "settings":{ + "shape":"TableRecordExpirationSettings", + "documentation":"

The expiration settings for records in the table.

" + } + }, + "documentation":"

The expiration configuration settings for records in a table, and the status of the configuration. If the status of the configuration is enabled, records expire and are automatically removed after the number of days specified in the record expiration settings for the table.

" + }, + "TableRecordExpirationJobMetrics":{ + "type":"structure", + "members":{ + "deletedDataFiles":{ + "shape":"Long", + "documentation":"

The total number of data files that were removed when the job ran.

" + }, + "deletedRecords":{ + "shape":"Long", + "documentation":"

The total number of records that were removed when the job ran.

" + }, + "removedFilesSize":{ + "shape":"Long", + "documentation":"

The total size (in bytes) of the data files that were removed when the job ran.

" + } + }, + "documentation":"

Provides metrics for the record expiration job that most recently ran for a table. The metrics provide insight into the amount of data that was removed when the job ran.

" + }, + "TableRecordExpirationJobStatus":{ + "type":"string", + "enum":[ + "NotYetRun", + "Successful", + "Failed", + "Disabled" + ] + }, + "TableRecordExpirationSettings":{ + "type":"structure", + "members":{ + "days":{ + "shape":"PositiveInteger", + "documentation":"

If you enable record expiration for a table, you can specify the number of days to retain your table records. For example, to retain your table records for one year, set this value to 365.

" + } + }, + "documentation":"

The record expiration setting that specifies when records expire and are automatically removed from a table.

" + }, + "TableRecordExpirationStatus":{ + "type":"string", + "enum":[ + "enabled", + "disabled" + ] + }, + "TableReplicationConfiguration":{ + "type":"structure", + "required":[ + "role", + "rules" + ], + "members":{ + "role":{ + "shape":"IAMRole", + "documentation":"

The Amazon Resource Name (ARN) of the IAM role that S3 Tables assumes to replicate the table on your behalf.

" + }, + "rules":{ + "shape":"TableReplicationRules", + "documentation":"

An array of replication rules that define where this table should be replicated.

" + } + }, + "documentation":"

The replication configuration for an individual table. This configuration defines how the table is replicated to destination tables.

" + }, + "TableReplicationRule":{ + "type":"structure", + "required":["destinations"], + "members":{ + "destinations":{ + "shape":"ReplicationDestinations", + "documentation":"

An array of destination table buckets where this table should be replicated.

" + } + }, + "documentation":"

Defines a rule for replicating a table to one or more destination tables.

" + }, + "TableReplicationRules":{ + "type":"list", + "member":{"shape":"TableReplicationRule"}, + "max":1, + "min":1 + }, "TableSummary":{ "type":"structure", "required":[ @@ -2423,6 +3338,10 @@ "shape":"SyntheticTimestamp_date_time", "documentation":"

The date and time the table was last modified at.

" }, + "managedByService":{ + "shape":"String", + "documentation":"

The Amazon Web Services service managing this table, if applicable. For example, a replicated table is managed by the S3 Tables replication service.

" + }, "namespaceId":{ "shape":"NamespaceId", "documentation":"

The unique identifier for the namespace that contains this table.

" diff --git a/awscli/botocore/data/s3vectors/2025-07-15/service-2.json b/awscli/botocore/data/s3vectors/2025-07-15/service-2.json index 38a447f9c8e0..ece3e5cad728 100644 --- a/awscli/botocore/data/s3vectors/2025-07-15/service-2.json +++ b/awscli/botocore/data/s3vectors/2025-07-15/service-2.json @@ -33,8 +33,7 @@ {"shape":"NotFoundException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Creates a vector index within a vector bucket. To specify the vector bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:CreateIndex permission to use this operation.

", - "idempotent":true + "documentation":"

Creates a vector index within a vector bucket. To specify the vector bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:CreateIndex permission to use this operation.

You must have the s3vectors:TagResource permission in addition to s3vectors:CreateIndex permission to create a vector index with tags.

" }, "CreateVectorBucket":{ "name":"CreateVectorBucket", @@ -55,8 +54,7 @@ {"shape":"ConflictException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Creates a vector bucket in the Amazon Web Services Region that you want your bucket to be in.

Permissions

You must have the s3vectors:CreateVectorBucket permission to use this operation.

", - "idempotent":true + "documentation":"

Creates a vector bucket in the Amazon Web Services Region that you want your bucket to be in.

Permissions

You must have the s3vectors:CreateVectorBucket permission to use this operation.

You must have the s3vectors:TagResource permission in addition to s3vectors:CreateVectorBucket permission to create a vector bucket with tags.

" }, "DeleteIndex":{ "name":"DeleteIndex", @@ -73,10 +71,10 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, - {"shape":"RequestTimeoutException"} + {"shape":"RequestTimeoutException"}, + {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Deletes a vector index. To specify the vector index, you can either use both the vector bucket name and vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteIndex permission to use this operation.

", - "idempotent":true + "documentation":"

Deletes a vector index. To specify the vector index, you can either use both the vector bucket name and vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteIndex permission to use this operation.

" }, "DeleteVectorBucket":{ "name":"DeleteVectorBucket", @@ -94,10 +92,10 @@ {"shape":"InternalServerException"}, {"shape":"AccessDeniedException"}, {"shape":"RequestTimeoutException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Deletes a vector bucket. All vector indexes in the vector bucket must be deleted before the vector bucket can be deleted. To perform this operation, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectorBucket permission to use this operation.

", - "idempotent":true + "documentation":"

Deletes a vector bucket. All vector indexes in the vector bucket must be deleted before the vector bucket can be deleted. To perform this operation, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectorBucket permission to use this operation.

" }, "DeleteVectorBucketPolicy":{ "name":"DeleteVectorBucketPolicy", @@ -117,7 +115,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Deletes a vector bucket policy. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectorBucketPolicy permission to use this operation.

", + "documentation":"

Deletes a vector bucket policy. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectorBucketPolicy permission to use this operation.

", "idempotent":true }, "DeleteVectors":{ @@ -142,7 +140,7 @@ {"shape":"NotFoundException"}, {"shape":"KmsDisabledException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Deletes one or more vectors in a vector index. To specify the vector index, you can either use both the vector bucket name and vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectors permission to use this operation.

", + "documentation":"

Deletes one or more vectors in a vector index. To specify the vector index, you can either use both the vector bucket name and vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:DeleteVectors permission to use this operation.

", "idempotent":true }, "GetIndex":{ @@ -163,7 +161,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Returns vector index attributes. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetIndex permission to use this operation.

", + "documentation":"

Returns vector index attributes. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetIndex permission to use this operation.

", "readonly":true }, "GetVectorBucket":{ @@ -184,7 +182,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Returns vector bucket attributes. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectorBucket permission to use this operation.

", + "documentation":"

Returns vector bucket attributes. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectorBucket permission to use this operation.

", "readonly":true }, "GetVectorBucketPolicy":{ @@ -205,7 +203,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Gets details about a vector bucket policy. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectorBucketPolicy permission to use this operation.

", + "documentation":"

Gets details about a vector bucket policy. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectorBucketPolicy permission to use this operation.

", "readonly":true }, "GetVectors":{ @@ -230,7 +228,7 @@ {"shape":"NotFoundException"}, {"shape":"KmsDisabledException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Returns vector attributes. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectors permission to use this operation.

", + "documentation":"

Returns vector attributes. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:GetVectors permission to use this operation.

", "readonly":true }, "ListIndexes":{ @@ -251,7 +249,28 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Returns a list of all the vector indexes within the specified vector bucket. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:ListIndexes permission to use this operation.

", + "documentation":"

Returns a list of all the vector indexes within the specified vector bucket. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:ListIndexes permission to use this operation.

", + "readonly":true + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceInput"}, + "output":{"shape":"ListTagsForResourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Lists all of the tags applied to a specified Amazon S3 Vectors resource. Each tag is a label consisting of a key and value pair. Tags can help you organize, track costs for, and control access to resources.

For a list of S3 resources that support tagging, see Managing tags for Amazon S3 resources.

Permissions

For vector buckets and vector indexes, you must have the s3vectors:ListTagsForResource permission to use this operation.

", "readonly":true }, "ListVectorBuckets":{ @@ -271,7 +290,7 @@ {"shape":"AccessDeniedException"}, {"shape":"RequestTimeoutException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Returns a list of all the vector buckets that are owned by the authenticated sender of the request.

Permissions

You must have the s3vectors:ListVectorBuckets permission to use this operation.

", + "documentation":"

Returns a list of all the vector buckets that are owned by the authenticated sender of the request.

Permissions

You must have the s3vectors:ListVectorBuckets permission to use this operation.

", "readonly":true }, "ListVectors":{ @@ -292,7 +311,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

List vectors in the specified vector index. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

ListVectors operations proceed sequentially; however, for faster performance on a large number of vectors in a vector index, applications can request a parallel ListVectors operation by providing the segmentCount and segmentIndex parameters.

Permissions

You must have the s3vectors:ListVectors permission to use this operation. Additional permissions are required based on the request parameters you specify:

  • With only s3vectors:ListVectors permission, you can list vector keys when returnData and returnMetadata are both set to false or not specified..

  • If you set returnData or returnMetadata to true, you must have both s3vectors:ListVectors and s3vectors:GetVectors permissions. The request fails with a 403 Forbidden error if you request vector data or metadata without the s3vectors:GetVectors permission.

", + "documentation":"

List vectors in the specified vector index. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

ListVectors operations proceed sequentially; however, for faster performance on a large number of vectors in a vector index, applications can request a parallel ListVectors operation by providing the segmentCount and segmentIndex parameters.

Permissions

You must have the s3vectors:ListVectors permission to use this operation. Additional permissions are required based on the request parameters you specify:

  • With only s3vectors:ListVectors permission, you can list vector keys when returnData and returnMetadata are both set to false or not specified..

  • If you set returnData or returnMetadata to true, you must have both s3vectors:ListVectors and s3vectors:GetVectors permissions. The request fails with a 403 Forbidden error if you request vector data or metadata without the s3vectors:GetVectors permission.

", "readonly":true }, "PutVectorBucketPolicy":{ @@ -313,7 +332,7 @@ {"shape":"RequestTimeoutException"}, {"shape":"NotFoundException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Creates a bucket policy for a vector bucket. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:PutVectorBucketPolicy permission to use this operation.

", + "documentation":"

Creates a bucket policy for a vector bucket. To specify the bucket, you must use either the vector bucket name or the vector bucket Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:PutVectorBucketPolicy permission to use this operation.

", "idempotent":true }, "PutVectors":{ @@ -339,7 +358,7 @@ {"shape":"ServiceQuotaExceededException"}, {"shape":"KmsDisabledException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Adds one or more vectors to a vector index. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

For more information about limits, see Limitations and restrictions in the Amazon S3 User Guide.

When inserting vector data into your vector index, you must provide the vector data as float32 (32-bit floating point) values. If you pass higher-precision values to an Amazon Web Services SDK, S3 Vectors converts the values to 32-bit floating point before storing them, and GetVectors, ListVectors, and QueryVectors operations return the float32 values. Different Amazon Web Services SDKs may have different default numeric types, so ensure your vectors are properly formatted as float32 values regardless of which SDK you're using. For example, in Python, use numpy.float32 or explicitly cast your values.

Permissions

You must have the s3vectors:PutVectors permission to use this operation.

", + "documentation":"

Adds one or more vectors to a vector index. To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

For more information about limits, see Limitations and restrictions in the Amazon S3 User Guide.

When inserting vector data into your vector index, you must provide the vector data as float32 (32-bit floating point) values. If you pass higher-precision values to an Amazon Web Services SDK, S3 Vectors converts the values to 32-bit floating point before storing them, and GetVectors, ListVectors, and QueryVectors operations return the float32 values. Different Amazon Web Services SDKs may have different default numeric types, so ensure your vectors are properly formatted as float32 values regardless of which SDK you're using. For example, in Python, use numpy.float32 or explicitly cast your values.

Permissions

You must have the s3vectors:PutVectors permission to use this operation.

", "idempotent":true }, "QueryVectors":{ @@ -364,8 +383,52 @@ {"shape":"NotFoundException"}, {"shape":"KmsDisabledException"} ], - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Performs an approximate nearest neighbor search query in a vector index using a query vector. By default, it returns the keys of approximate nearest neighbors. You can optionally include the computed distance (between the query vector and each vector in the response), the vector data, and metadata of each vector in the response.

To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:QueryVectors permission to use this operation. Additional permissions are required based on the request parameters you specify:

  • With only s3vectors:QueryVectors permission, you can retrieve vector keys of approximate nearest neighbors and computed distances between these vectors. This permission is sufficient only when you don't set any metadata filters and don't request vector data or metadata (by keeping the returnMetadata parameter set to false or not specified).

  • If you specify a metadata filter or set returnMetadata to true, you must have both s3vectors:QueryVectors and s3vectors:GetVectors permissions. The request fails with a 403 Forbidden error if you request metadata filtering, vector data, or metadata without the s3vectors:GetVectors permission.

", + "documentation":"

Performs an approximate nearest neighbor search query in a vector index using a query vector. By default, it returns the keys of approximate nearest neighbors. You can optionally include the computed distance (between the query vector and each vector in the response), the vector data, and metadata of each vector in the response.

To specify the vector index, you can either use both the vector bucket name and the vector index name, or use the vector index Amazon Resource Name (ARN).

Permissions

You must have the s3vectors:QueryVectors permission to use this operation. Additional permissions are required based on the request parameters you specify:

  • With only s3vectors:QueryVectors permission, you can retrieve vector keys of approximate nearest neighbors and computed distances between these vectors. This permission is sufficient only when you don't set any metadata filters and don't request vector data or metadata (by keeping the returnMetadata parameter set to false or not specified).

  • If you specify a metadata filter or set returnMetadata to true, you must have both s3vectors:QueryVectors and s3vectors:GetVectors permissions. The request fails with a 403 Forbidden error if you request metadata filtering, vector data, or metadata without the s3vectors:GetVectors permission.

", "readonly":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceInput"}, + "output":{"shape":"TagResourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Applies one or more user-defined tags to an Amazon S3 Vectors resource or updates existing tags. Each tag is a label consisting of a key and value pair. Tags can help you organize, track costs for, and control access to your resources. You can add up to 50 tags for each resource.

For a list of S3 resources that support tagging, see Managing tags for Amazon S3 resources.

Permissions

For vector buckets and vector indexes, you must have the s3vectors:TagResource permission to use this operation.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceInput"}, + "output":{"shape":"UntagResourceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ServiceUnavailableException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"}, + {"shape":"AccessDeniedException"}, + {"shape":"RequestTimeoutException"}, + {"shape":"NotFoundException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Removes the specified user-defined tags from an Amazon S3 Vectors resource. You can pass one or more tag keys.

For a list of S3 resources that support tagging, see Managing tags for Amazon S3 resources.

Permissions

For vector buckets and vector indexes, you must have the s3vectors:UntagResource permission to use this operation.

", + "idempotent":true } }, "shapes":{ @@ -435,6 +498,14 @@ "metadataConfiguration":{ "shape":"MetadataConfiguration", "documentation":"

The metadata configuration for the vector index.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration for a vector index. By default, if you don't specify, all new vectors in the vector index will use the encryption configuration of the vector bucket.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

An array of user-defined tags that you would like to apply to the vector index that you are creating. A tag is a key-value pair that you apply to your resources. Tags can help you organize, track costs, and control access to resources. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

You must have the s3vectors:TagResource permission in addition to s3vectors:CreateIndex permission to create a vector index with tags.

" } } }, @@ -459,6 +530,10 @@ "encryptionConfiguration":{ "shape":"EncryptionConfiguration", "documentation":"

The encryption configuration for the vector bucket. By default, if you don't specify, all new vectors in Amazon S3 vector buckets use server-side encryption with Amazon S3 managed keys (SSE-S3), specifically AES256.

" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

An array of user-defined tags that you would like to apply to the vector bucket that you are creating. A tag is a key-value pair that you apply to your resources. Tags can help you organize and control access to resources. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

You must have the s3vectors:TagResource permission in addition to s3vectors:CreateVectorBucket permission to create a vector bucket with tags.

" } } }, @@ -593,7 +668,7 @@ "documentation":"

Amazon Web Services Key Management Service (KMS) customer managed key ID to use for the encryption configuration. This parameter is allowed if and only if sseType is set to aws:kms.

To specify the KMS key, you must use the format of the KMS key Amazon Resource Name (ARN).

For example, specify Key ARN in the following format: arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The encryption configuration for a vector bucket. By default, if you don't specify, all new vectors in Amazon S3 vector buckets use server-side encryption with Amazon S3 managed keys (SSE-S3), specifically AES256.

" + "documentation":"

The encryption configuration for a vector bucket or index. By default, if you don't specify, all new vectors in Amazon S3 vector buckets use server-side encryption with Amazon S3 managed keys (SSE-S3), specifically AES256. You can optionally override bucket level encryption settings, and set a specific encryption configuration for a vector index at the time of index creation.

" }, "ExceptionMessage":{"type":"string"}, "Float":{ @@ -648,7 +723,7 @@ "documentation":"

Metadata about the vector.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector returned by the GetVectors operation.

" + "documentation":"

The attributes of a vector returned by the GetVectors operation.

" }, "GetVectorBucketInput":{ "type":"structure", @@ -788,11 +863,18 @@ "metadataConfiguration":{ "shape":"MetadataConfiguration", "documentation":"

The metadata configuration for the vector index.

" + }, + "encryptionConfiguration":{ + "shape":"EncryptionConfiguration", + "documentation":"

The encryption configuration for a vector index. By default, if you don't specify, all new vectors in the vector index will use the encryption configuration of the vector bucket.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector index.

" + "documentation":"

The attributes of a vector index.

" + }, + "IndexArn":{ + "type":"string", + "pattern":"arn:aws[-a-z0-9]*:s3vectors:[a-z0-9-]+:[0-9]{12}:bucket/[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]/index/[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]" }, - "IndexArn":{"type":"string"}, "IndexName":{ "type":"string", "max":63, @@ -824,7 +906,7 @@ "documentation":"

Date and time when the vector index was created.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Summary information about a vector index.

" + "documentation":"

Summary information about a vector index.

" }, "InternalServerException":{ "type":"structure", @@ -972,7 +1054,29 @@ "documentation":"

Metadata about the vector.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector returned by the ListVectors operation.

" + "documentation":"

The attributes of a vector returned by the ListVectors operation.

" + }, + "ListTagsForResourceInput":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 Vectors resource that you want to list tags for. The tagged resource can be a vector bucket or a vector index.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceOutput":{ + "type":"structure", + "required":["tags"], + "members":{ + "tags":{ + "shape":"TagsMap", + "documentation":"

The user-defined tags that are applied to the S3 Vectors resource. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

" + } + } }, "ListVectorBucketsInput":{ "type":"structure", @@ -1116,7 +1220,7 @@ "documentation":"

Non-filterable metadata keys allow you to enrich vectors with additional context during storage and retrieval. Unlike default metadata keys, these keys can’t be used as query filters. Non-filterable metadata keys can be retrieved but can’t be searched, queried, or filtered. You can access non-filterable metadata keys of your vectors after finding the vectors. For more information about non-filterable metadata keys, see Vectors and Limitations and restrictions in the Amazon S3 User Guide.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The metadata configuration for a vector index.

" + "documentation":"

The metadata configuration for a vector index.

" }, "MetadataKey":{ "type":"string", @@ -1162,7 +1266,7 @@ "documentation":"

Metadata about the vector. All metadata entries undergo validation to ensure they meet the format requirements for size and data types.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector to add to a vector index.

" + "documentation":"

The attributes of a vector to add to a vector index.

" }, "PutVectorBucketPolicyInput":{ "type":"structure", @@ -1222,24 +1326,20 @@ "type":"structure", "required":["key"], "members":{ + "distance":{ + "shape":"Float", + "documentation":"

The measure of similarity between the vector in the response and the query vector.

" + }, "key":{ "shape":"VectorKey", "documentation":"

The key of the vector in the approximate nearest neighbor search.

" }, - "data":{ - "shape":"VectorData", - "documentation":"

The vector data associated with the vector, if requested.

" - }, "metadata":{ "shape":"VectorMetadata", "documentation":"

The metadata associated with the vector, if requested.

" - }, - "distance":{ - "shape":"Float", - "documentation":"

The measure of similarity between the vector in the response and the query vector.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector in the approximate nearest neighbor search.

" + "documentation":"

The attributes of a vector in the approximate nearest neighbor search.

" }, "QueryVectorsInput":{ "type":"structure", @@ -1317,6 +1417,11 @@ "exception":true, "retryable":{"throttling":false} }, + "ResourceARN":{ + "type":"string", + "max":1011, + "min":0 + }, "ServiceQuotaExceededException":{ "type":"structure", "required":["message"], @@ -1350,6 +1455,50 @@ ] }, "String":{"type":"string"}, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"} + }, + "TagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 Vectors resource that you're applying tags to. The tagged resource can be a vector bucket or a vector index.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"TagsMap", + "documentation":"

The user-defined tag that you want to add to the specified S3 Vectors resource. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

" + } + } + }, + "TagResourceOutput":{ + "type":"structure", + "members":{} + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)" + }, + "TagsMap":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, "Timestamp":{"type":"timestamp"}, "TooManyRequestsException":{ "type":"structure", @@ -1370,6 +1519,31 @@ "box":true, "min":1 }, + "UntagResourceInput":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"ResourceARN", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon S3 Vectors resource that you're removing tags from. The tagged resource can be a vector bucket or a vector index.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The array of tag keys that you're removing from the S3 Vectors resource. For more information, see Tagging for cost allocation or attribute-based access control (ABAC).

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceOutput":{ + "type":"structure", + "members":{} + }, "ValidationException":{ "type":"structure", "required":["message"], @@ -1430,9 +1604,12 @@ "documentation":"

The encryption configuration for the vector bucket.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The attributes of a vector bucket.

" + "documentation":"

The attributes of a vector bucket.

" + }, + "VectorBucketArn":{ + "type":"string", + "pattern":"arn:aws[-a-z0-9]*:s3vectors:[a-z0-9-]+:[0-9]{12}:bucket/[a-z0-9][a-z0-9-.]{1,61}[a-z0-9]" }, - "VectorBucketArn":{"type":"string"}, "VectorBucketName":{ "type":"string", "max":63, @@ -1460,7 +1637,7 @@ "documentation":"

Date and time when the vector bucket was created.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

Summary information about a vector bucket.

" + "documentation":"

Summary information about a vector bucket.

" }, "VectorData":{ "type":"structure", @@ -1470,7 +1647,7 @@ "documentation":"

The vector data as 32-bit floating point numbers. The number of elements in this array must exactly match the dimension of the vector index where the operation is being performed.

" } }, - "documentation":"

Amazon S3 Vectors is in preview release for Amazon S3 and is subject to change.

The vector data in different formats.

", + "documentation":"

The vector data in different formats.

", "union":true }, "VectorKey":{ diff --git a/awscli/botocore/data/sagemaker/2017-07-24/paginators-1.json b/awscli/botocore/data/sagemaker/2017-07-24/paginators-1.json index 55d0803df9af..0b965cbdb6d5 100644 --- a/awscli/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/awscli/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -491,6 +491,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "UltraServers" + }, + "ListMlflowApps": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Summaries" } } } diff --git a/awscli/botocore/data/sagemaker/2017-07-24/service-2.json b/awscli/botocore/data/sagemaker/2017-07-24/service-2.json index 553dfa81d1f1..26c2fb315a23 100644 --- a/awscli/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/awscli/botocore/data/sagemaker/2017-07-24/service-2.json @@ -233,7 +233,7 @@ {"shape":"ResourceInUse"}, {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Creates a SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing persistent clusters for developing large machine learning models, such as large language models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the Amazon SageMaker Developer Guide.

" + "documentation":"

Creates an Amazon SageMaker HyperPod cluster. SageMaker HyperPod is a capability of SageMaker for creating and managing persistent clusters for developing large machine learning models, such as large language models (LLMs) and diffusion models. To learn more, see Amazon SageMaker HyperPod in the Amazon SageMaker Developer Guide.

" }, "CreateClusterSchedulerConfig":{ "name":"CreateClusterSchedulerConfig", @@ -596,6 +596,19 @@ ], "documentation":"

Creates a job that uses workers to label the data objects in your input dataset. You can use the labeled data to train machine learning models.

You can select your workforce from one of three providers:

You can also use automated data labeling to reduce the number of data objects that need to be labeled by a human. Automated data labeling uses active learning to determine if a data object can be labeled by machine or if it needs to be sent to a human worker. For more information, see Using Automated Data Labeling.

The data objects to be labeled are contained in an Amazon S3 bucket. You create a manifest file that describes the location of each object. For more information, see Using Input and Output Data.

The output can be used as the manifest file for another labeling job or as training data for your machine learning models.

You can use this operation to create a static labeling job or a streaming labeling job. A static labeling job stops if all data objects in the input manifest file identified in ManifestS3Uri have been labeled. A streaming labeling job runs perpetually until it is manually stopped, or remains idle for 10 days. You can send new data objects to an active (InProgress) streaming labeling job in real time. To learn how to create a static labeling job, see Create a Labeling Job (API) in the Amazon SageMaker Developer Guide. To learn how to create a streaming labeling job, see Create a Streaming Labeling Job.

" }, + "CreateMlflowApp":{ + "name":"CreateMlflowApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateMlflowAppRequest"}, + "output":{"shape":"CreateMlflowAppResponse"}, + "errors":[ + {"shape":"ResourceLimitExceeded"} + ], + "documentation":"

Creates an MLflow Tracking Server using a general purpose Amazon S3 bucket as the artifact store.

" + }, "CreateMlflowTrackingServer":{ "name":"CreateMlflowTrackingServer", "http":{ @@ -829,6 +842,19 @@ ], "documentation":"

Creates a URL for a specified UserProfile in a Domain. When accessed in a web browser, the user will be automatically signed in to the domain, and granted access to all of the Apps and files associated with the Domain's Amazon Elastic File System volume. This operation can only be called when the authentication mode equals IAM.

The IAM role or user passed to this API defines the permissions to access the app. Once the presigned URL is created, no additional permission is required to access this URL. IAM authorization policies for this API are also enforced for every HTTP request and WebSocket frame that attempts to connect to the app.

You can restrict access to this API and to the URL that it returns to a list of IP addresses, Amazon VPCs or Amazon VPC Endpoints that you specify. For more information, see Connect to Amazon SageMaker AI Studio Through an Interface VPC Endpoint .

" }, + "CreatePresignedMlflowAppUrl":{ + "name":"CreatePresignedMlflowAppUrl", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreatePresignedMlflowAppUrlRequest"}, + "output":{"shape":"CreatePresignedMlflowAppUrlResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns a presigned URL that you can use to connect to the MLflow UI attached to your MLflow App. For more information, see Launch the MLflow UI using a presigned URL.

" + }, "CreatePresignedMlflowTrackingServerUrl":{ "name":"CreatePresignedMlflowTrackingServerUrl", "http":{ @@ -1394,6 +1420,19 @@ ], "documentation":"

Deletes an inference experiment.

This operation does not delete your endpoint, variants, or any underlying resources. This operation only deletes the metadata of your experiment.

" }, + "DeleteMlflowApp":{ + "name":"DeleteMlflowApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMlflowAppRequest"}, + "output":{"shape":"DeleteMlflowAppResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Deletes an MLflow App.

" + }, "DeleteMlflowTrackingServer":{ "name":"DeleteMlflowTrackingServer", "http":{ @@ -2192,6 +2231,19 @@ ], "documentation":"

Provides a list of properties for the requested lineage group. For more information, see Cross-Account Lineage Tracking in the Amazon SageMaker Developer Guide.

" }, + "DescribeMlflowApp":{ + "name":"DescribeMlflowApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeMlflowAppRequest"}, + "output":{"shape":"DescribeMlflowAppResponse"}, + "errors":[ + {"shape":"ResourceNotFound"} + ], + "documentation":"

Returns information about an MLflow App.

" + }, "DescribeMlflowTrackingServer":{ "name":"DescribeMlflowTrackingServer", "http":{ @@ -3155,6 +3207,16 @@ "output":{"shape":"ListLineageGroupsResponse"}, "documentation":"

A list of lineage groups shared with your Amazon Web Services account. For more information, see Cross-Account Lineage Tracking in the Amazon SageMaker Developer Guide.

" }, + "ListMlflowApps":{ + "name":"ListMlflowApps", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMlflowAppsRequest"}, + "output":{"shape":"ListMlflowAppsResponse"}, + "documentation":"

Lists all MLflow Apps

" + }, "ListMlflowTrackingServers":{ "name":"ListMlflowTrackingServers", "http":{ @@ -4347,6 +4409,20 @@ ], "documentation":"

Updates an inference experiment that you created. The status of the inference experiment has to be either Created, Running. For more information on the status of an inference experiment, see DescribeInferenceExperiment.

" }, + "UpdateMlflowApp":{ + "name":"UpdateMlflowApp", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateMlflowAppRequest"}, + "output":{"shape":"UpdateMlflowAppResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ResourceNotFound"} + ], + "documentation":"

Updates an MLflow App.

" + }, "UpdateMlflowTrackingServer":{ "name":"UpdateMlflowTrackingServer", "http":{ @@ -4650,6 +4726,13 @@ "pattern":".*" }, "AcceptEula":{"type":"boolean"}, + "AccountDefaultStatus":{ + "type":"string", + "enum":[ + "ENABLED", + "DISABLED" + ] + }, "AccountId":{ "type":"string", "max":12, @@ -5770,6 +5853,30 @@ "min":0, "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:(experiment|experiment-trial-component|artifact|action|context)/.*" }, + "AssociationInfo":{ + "type":"structure", + "required":[ + "SourceArn", + "DestinationArn" + ], + "members":{ + "SourceArn":{ + "shape":"String2048", + "documentation":"

The Amazon Resource Name (ARN) of the AssociationInfo source.

" + }, + "DestinationArn":{ + "shape":"String2048", + "documentation":"

The Amazon Resource Name (ARN) of the AssociationInfo destination.

" + } + }, + "documentation":"

The data type used to describe the relationship between different sources.

" + }, + "AssociationInfoList":{ + "type":"list", + "member":{"shape":"AssociationInfo"}, + "max":10, + "min":0 + }, "AssociationSummaries":{ "type":"list", "member":{"shape":"AssociationSummary"} @@ -6910,6 +7017,24 @@ "type":"string", "min":1 }, + "BaseModel":{ + "type":"structure", + "members":{ + "HubContentName":{ + "shape":"HubContentName", + "documentation":"

The hub content name of the base model.

" + }, + "HubContentVersion":{ + "shape":"HubContentVersion", + "documentation":"

The hub content version of the base model.

" + }, + "RecipeName":{ + "shape":"RecipeName", + "documentation":"

The recipe name of the base model.

" + } + }, + "documentation":"

Identifies the foundation model that was used as the starting point for model customization.

" + }, "BaseModelName":{ "type":"string", "max":256, @@ -7213,6 +7338,10 @@ "ModelApprovalStatus":{ "shape":"ModelApprovalStatus", "documentation":"

The approval status of the model.

" + }, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package summary.

" } }, "documentation":"

Provides summary information about the model package.

" @@ -7519,6 +7648,46 @@ }, "documentation":"

Input object for the batch transform job.

" }, + "BedrockCustomModelDeploymentMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String1024", + "documentation":"

The Amazon Resource Name (ARN) of the metadata for the Amazon Bedrock custom model deployment.

" + } + }, + "documentation":"

The metadata of the Amazon Bedrock custom model deployment.

" + }, + "BedrockCustomModelMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String1024", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Bedrock custom model metadata.

" + } + }, + "documentation":"

The metadata of the Amazon Bedrock custom model.

" + }, + "BedrockModelImportMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String1024", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Bedrock model import metadata.

" + } + }, + "documentation":"

The metadata of the Amazon Bedrock model import.

" + }, + "BedrockProvisionedModelThroughputMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String1024", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Bedrock provisioned model throughput metadata.

" + } + }, + "documentation":"

The metadata of the Amazon Bedrock provisioned model throughput.

" + }, "BestObjectiveNotImproving":{ "type":"structure", "members":{ @@ -7552,6 +7721,11 @@ "box":true, "min":1 }, + "BillableTokenCount":{ + "type":"long", + "box":true, + "min":0 + }, "BlockedReason":{ "type":"string", "max":1024, @@ -8987,11 +9161,11 @@ }, "TargetStateCount":{ "shape":"ClusterInstanceCount", - "documentation":"

The number of nodes running a specific image ID since the last software update request.

" + "documentation":"

Represents the number of running nodes using the desired Image ID.

  1. During software update operations: This count shows the number of nodes running on the desired Image ID. If a rollback occurs, the current image ID and desired image ID (both included in the describe cluster response) swap values. The TargetStateCount then shows the number of nodes running on the newly designated desired image ID (which was previously the current image ID).

  2. During simultaneous scaling and software update operations: This count shows the number of instances running on the desired image ID, including any new instances created as part of the scaling request. New nodes are always created using the desired image ID, so TargetStateCount reflects the total count of nodes running on the desired image ID, even during rollback scenarios.

" }, "SoftwareUpdateStatus":{ "shape":"SoftwareUpdateStatus", - "documentation":"

Status of the last software udpate request.

" + "documentation":"

Status of the last software udpate request.

Status transitions follow these possible sequences:

" }, "ActiveSoftwareUpdateConfig":{"shape":"DeploymentConfiguration"} }, @@ -9162,6 +9336,7 @@ "ml.p4d.24xlarge", "ml.p4de.24xlarge", "ml.p5.48xlarge", + "ml.p5.4xlarge", "ml.p6e-gb200.36xlarge", "ml.trn1.32xlarge", "ml.trn1n.32xlarge", @@ -9272,7 +9447,8 @@ "ml.r7i.12xlarge", "ml.r7i.16xlarge", "ml.r7i.24xlarge", - "ml.r7i.48xlarge" + "ml.r7i.48xlarge", + "ml.p6-b300.48xlarge" ] }, "ClusterKubernetesConfig":{ @@ -9640,7 +9816,6 @@ }, "ClusterOrchestrator":{ "type":"structure", - "required":["Eks"], "members":{ "Eks":{ "shape":"ClusterOrchestratorEksConfig", @@ -12431,6 +12606,57 @@ } } }, + "CreateMlflowAppRequest":{ + "type":"structure", + "required":[ + "Name", + "ArtifactStoreUri", + "RoleArn" + ], + "members":{ + "Name":{ + "shape":"MlflowAppName", + "documentation":"

A string identifying the MLflow app name. This string is not part of the tracking server ARN.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI for a general purpose bucket to use as the MLflow App artifact store.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow App uses to access the artifact store in Amazon S3. The role should have the AmazonS3FullAccess permission.

" + }, + "ModelRegistrationMode":{ + "shape":"ModelRegistrationMode", + "documentation":"

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to AutoModelRegistrationEnabled. To disable automatic model registration, set this value to AutoModelRegistrationDisabled. If not specified, AutomaticModelRegistration defaults to AutoModelRegistrationDisabled.

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week in Coordinated Universal Time (UTC) 24-hour standard time that weekly maintenance updates are scheduled. For example: TUE:03:30.

" + }, + "AccountDefaultStatus":{ + "shape":"AccountDefaultStatus", + "documentation":"

Indicates whether this MLflow app is the default for the entire account.

" + }, + "DefaultDomainIdList":{ + "shape":"DefaultDomainIdList", + "documentation":"

List of SageMaker domain IDs for which this MLflow App is used as the default.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

Tags consisting of key-value pairs used to manage metadata for the MLflow App.

" + } + } + }, + "CreateMlflowAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App.

" + } + } + }, "CreateMlflowTrackingServerRequest":{ "type":"structure", "required":[ @@ -12760,6 +12986,10 @@ "shape":"EntityDescription", "documentation":"

A description of the model package.

" }, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package input.

" + }, "InferenceSpecification":{ "shape":"InferenceSpecification", "documentation":"

Specifies details about inference jobs that you can run with models based on this model package, including the following information:

" @@ -13316,6 +13546,33 @@ } } }, + "CreatePresignedMlflowAppUrlRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App to connect to your MLflow UI.

" + }, + "ExpiresInSeconds":{ + "shape":"ExpiresInSeconds", + "documentation":"

The duration in seconds that your presigned URL is valid. The presigned URL can be used only once.

" + }, + "SessionExpirationDurationInSeconds":{ + "shape":"SessionExpirationDurationInSeconds", + "documentation":"

The duration in seconds that your presigned URL is valid. The presigned URL can be used only once.

" + } + } + }, + "CreatePresignedMlflowAppUrlResponse":{ + "type":"structure", + "members":{ + "AuthorizedUrl":{ + "shape":"MlflowAppUrl", + "documentation":"

A presigned URL with an authorization token.

" + } + } + }, "CreatePresignedMlflowTrackingServerUrlRequest":{ "type":"structure", "required":["TrackingServerName"], @@ -13666,6 +13923,18 @@ "SessionChainingConfig":{ "shape":"SessionChainingConfig", "documentation":"

Contains information about attribute-based access control (ABAC) for the training job.

" + }, + "ServerlessJobConfig":{ + "shape":"ServerlessJobConfig", + "documentation":"

The configuration for serverless training jobs.

" + }, + "MlflowConfig":{ + "shape":"MlflowConfig", + "documentation":"

The MLflow configuration using SageMaker managed MLflow.

" + }, + "ModelPackageConfig":{ + "shape":"ModelPackageConfig", + "documentation":"

The configuration for the model package.

" } } }, @@ -14170,6 +14439,15 @@ "min":1, "pattern":"([\\p{L}\\p{Z}\\p{N}_.:\\/=+\\-@]*)${1,256}" }, + "CustomizationTechnique":{ + "type":"string", + "enum":[ + "SFT", + "DPO", + "RLVR", + "RLAIF" + ] + }, "CustomizedMetricSpecification":{ "type":"structure", "members":{ @@ -14380,6 +14658,10 @@ "FileSystemDataSource":{ "shape":"FileSystemDataSource", "documentation":"

The file system that is associated with a channel.

" + }, + "DatasetSource":{ + "shape":"DatasetSource", + "documentation":"

The dataset resource that's associated with a channel.

" } }, "documentation":"

Describes the location of the channel data.

" @@ -14417,6 +14699,17 @@ }, "documentation":"

Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either AthenaDatasetDefinition or RedshiftDatasetDefinition types.

" }, + "DatasetSource":{ + "type":"structure", + "required":["DatasetArn"], + "members":{ + "DatasetArn":{ + "shape":"HubDataSetArn", + "documentation":"

The Amazon Resource Name (ARN) of the dataset resource.

" + } + }, + "documentation":"

Specifies a dataset source for a channel.

" + }, "DebugHookConfig":{ "type":"structure", "required":["S3OutputPath"], @@ -14524,6 +14817,10 @@ "InstanceConnectivity" ] }, + "DefaultDomainIdList":{ + "type":"list", + "member":{"shape":"DomainId"} + }, "DefaultEbsStorageSettings":{ "type":"structure", "required":[ @@ -15058,6 +15355,25 @@ } } }, + "DeleteMlflowAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App to delete.

" + } + } + }, + "DeleteMlflowAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the deleted MLflow App.

" + } + } + }, "DeleteMlflowTrackingServerRequest":{ "type":"structure", "required":["TrackingServerName"], @@ -18323,6 +18639,75 @@ "LastModifiedBy":{"shape":"UserContext"} } }, + "DescribeMlflowAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App for which to get information.

" + } + } + }, + "DescribeMlflowAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App.

" + }, + "Name":{ + "shape":"MlflowAppName", + "documentation":"

The name of the MLflow App.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI of the general purpose bucket used as the MLflow App artifact store.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

The MLflow version used.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) for an IAM role in your account that the MLflow App uses to access the artifact store in Amazon S3.

" + }, + "Status":{ + "shape":"MlflowAppStatus", + "documentation":"

The current creation status of the described MLflow App.

" + }, + "ModelRegistrationMode":{ + "shape":"ModelRegistrationMode", + "documentation":"

Whether automatic registration of new MLflow models to the SageMaker Model Registry is enabled.

" + }, + "AccountDefaultStatus":{ + "shape":"AccountDefaultStatus", + "documentation":"

Indicates whether this MLflow app is the default for the entire account.

" + }, + "DefaultDomainIdList":{ + "shape":"DefaultDomainIdList", + "documentation":"

List of SageMaker Domain IDs for which this MLflow App is the default.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the MLflow App was created.

" + }, + "CreatedBy":{"shape":"UserContext"}, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the MLflow App was last modified.

" + }, + "LastModifiedBy":{"shape":"UserContext"}, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The day and time of the week when weekly maintenance occurs.

" + }, + "MaintenanceStatus":{ + "shape":"MaintenanceStatus", + "documentation":"

Current maintenance status of the MLflow App.

" + } + } + }, "DescribeMlflowTrackingServerRequest":{ "type":"structure", "required":["TrackingServerName"], @@ -18787,6 +19172,10 @@ "shape":"ModelPackageVersion", "documentation":"

The version of the model package.

" }, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package output.

" + }, "ModelPackageArn":{ "shape":"ModelPackageArn", "documentation":"

The Amazon Resource Name (ARN) of the model package.

" @@ -19435,6 +19824,10 @@ "PipelineVersionId":{ "shape":"PipelineVersionId", "documentation":"

The ID of the pipeline version.

" + }, + "MLflowConfig":{ + "shape":"MLflowConfiguration", + "documentation":"

The MLflow configuration of the pipeline execution.

" } } }, @@ -19939,7 +20332,7 @@ }, "SecondaryStatus":{ "shape":"SecondaryStatus", - "documentation":"

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Interrupted - The job stopped because the managed spot training instances were interrupted.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed wait time.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

" + "documentation":"

Provides detailed information about the state of the training job. For detailed information on the secondary status of the training job, see StatusMessage under SecondaryStatusTransition.

SageMaker provides primary statuses and secondary statuses that apply to each of them:

InProgress
  • Starting - Starting the training job.

  • Pending - The training job is waiting for compute capacity or compute resource provision.

  • Downloading - An optional stage for algorithms that support File training input mode. It indicates that data is being downloaded to the ML storage volumes.

  • Training - Training is in progress.

  • Interrupted - The job stopped because the managed spot training instances were interrupted.

  • Uploading - Training is complete and the model artifacts are being uploaded to the S3 location.

Completed
  • Completed - The training job has completed.

Failed
  • Failed - The training job has failed. The reason for the failure is returned in the FailureReason field of DescribeTrainingJobResponse.

Stopped
  • MaxRuntimeExceeded - The job stopped because it exceeded the maximum allowed runtime.

  • MaxWaitTimeExceeded - The job stopped because it exceeded the maximum allowed wait time.

  • Stopped - The training job has stopped.

Stopping
  • Stopping - Stopping the training job.

Valid values for SecondaryStatus are subject to change.

We no longer support the following secondary statuses:

" }, "FailureReason":{ "shape":"FailureReason", @@ -20029,6 +20422,10 @@ "shape":"BillableTimeInSeconds", "documentation":"

The billable time in seconds. Billable time refers to the absolute wall-clock time.

Multiply BillableTimeInSeconds by the number of instances (InstanceCount) in your training cluster to get the total compute time SageMaker bills you if you run distributed training. The formula is as follows: BillableTimeInSeconds * InstanceCount .

You can calculate the savings from using managed spot training using the formula (1 - BillableTimeInSeconds / TrainingTimeInSeconds) * 100. For example, if BillableTimeInSeconds is 100 and TrainingTimeInSeconds is 500, the savings is 80%.

" }, + "BillableTokenCount":{ + "shape":"BillableTokenCount", + "documentation":"

The billable token count for eligible serverless training jobs.

" + }, "DebugHookConfig":{"shape":"DebugHookConfig"}, "ExperimentConfig":{"shape":"ExperimentConfig"}, "DebugRuleConfigurations":{ @@ -20068,6 +20465,30 @@ "InfraCheckConfig":{ "shape":"InfraCheckConfig", "documentation":"

Contains information about the infrastructure health check configuration for the training job.

" + }, + "ServerlessJobConfig":{ + "shape":"ServerlessJobConfig", + "documentation":"

The configuration for serverless training jobs.

" + }, + "MlflowConfig":{ + "shape":"MlflowConfig", + "documentation":"

The MLflow configuration using SageMaker managed MLflow.

" + }, + "ModelPackageConfig":{ + "shape":"ModelPackageConfig", + "documentation":"

The configuration for the model package.

" + }, + "MlflowDetails":{ + "shape":"MlflowDetails", + "documentation":"

The MLflow details of this job.

" + }, + "ProgressInfo":{ + "shape":"TrainingProgressInfo", + "documentation":"

The Serverless training job progress information.

" + }, + "OutputModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The Amazon Resource Name (ARN) of the output model package containing model weights or checkpoints.

" } } }, @@ -22305,6 +22726,18 @@ }, "documentation":"

This is an error field object that contains the error code and the reason for an operation failure.

" }, + "EvaluationType":{ + "type":"string", + "enum":[ + "LLMAJEvaluation", + "CustomScorerEvaluation", + "BenchmarkEvaluation" + ] + }, + "EvaluatorArn":{ + "type":"string", + "pattern":".*" + }, "EventDetails":{ "type":"structure", "members":{ @@ -23704,7 +24137,7 @@ }, "HubContentDocument":{ "type":"string", - "max":65535, + "max":170391, "min":0, "pattern":".*" }, @@ -23781,7 +24214,7 @@ }, "HubContentMarkdown":{ "type":"string", - "max":65535, + "max":170391, "min":0 }, "HubContentName":{ @@ -23790,9 +24223,15 @@ "min":0, "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}" }, + "HubContentSearchKeyword":{ + "type":"string", + "max":255, + "min":0, + "pattern":".*" + }, "HubContentSearchKeywordList":{ "type":"list", - "member":{"shape":"HubSearchKeyword"}, + "member":{"shape":"HubContentSearchKeyword"}, "max":50, "min":0 }, @@ -23811,7 +24250,9 @@ "Importing", "Deleting", "ImportFailed", - "DeleteFailed" + "DeleteFailed", + "PendingImport", + "PendingDelete" ] }, "HubContentSupportStatus":{ @@ -23827,7 +24268,9 @@ "enum":[ "Model", "Notebook", - "ModelReference" + "ModelReference", + "DataSet", + "JsonDoc" ] }, "HubContentVersion":{ @@ -23836,6 +24279,12 @@ "min":5, "pattern":"\\d{1,4}.\\d{1,4}.\\d{1,4}" }, + "HubDataSetArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub-content\\/)[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/DataSet\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}(\\/\\d{1,4}.\\d{1,4}.\\d{1,4})?" + }, "HubDescription":{ "type":"string", "max":1023, @@ -25373,6 +25822,16 @@ }, "documentation":"

The deployment configuration for an endpoint that hosts inference components. The configuration includes the desired deployment strategy and rollback settings.

" }, + "InferenceComponentMetadata":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"String2048", + "documentation":"

The Amazon Resource Name (ARN) of the inference component metadata.

" + } + }, + "documentation":"

The metadata of the inference component.

" + }, "InferenceComponentName":{ "type":"string", "max":63, @@ -26555,7 +27014,7 @@ "type":"integer", "documentation":"

Optional. Customer requested period in seconds for which the Training cluster is kept alive after the job is finished.

", "box":true, - "max":3600, + "max":21600, "min":0 }, "KendraSettings":{ @@ -27089,6 +27548,28 @@ }, "documentation":"

Lists a summary of the properties of a lineage group. A lineage group provides a group of shareable lineage entity resources.

" }, + "LineageMetadata":{ + "type":"structure", + "members":{ + "ActionArns":{ + "shape":"MapString2048", + "documentation":"

The Amazon Resource Name (ARN) of the lineage metadata action.

" + }, + "ArtifactArns":{ + "shape":"MapString2048", + "documentation":"

The Amazon Resource Name (ARN) of the lineage metadata artifact.

" + }, + "ContextArns":{ + "shape":"MapString2048", + "documentation":"

The Amazon Resource Name (ARN) of the lineage metadata context.

" + }, + "Associations":{ + "shape":"AssociationInfoList", + "documentation":"

The lineage metadata associations.

" + } + }, + "documentation":"

The metadata that tracks relationships between ML artifacts, actions, and contexts.

" + }, "LineageType":{ "type":"string", "enum":[ @@ -29367,6 +29848,64 @@ "type":"integer", "max":100 }, + "ListMlflowAppsRequest":{ + "type":"structure", + "members":{ + "CreatedAfter":{ + "shape":"Timestamp", + "documentation":"

Use the CreatedAfter filter to only list MLflow Apps created after a specific date and time. Listed MLflow Apps are shown with a date and time such as \"2024-03-16T01:46:56+00:00\". The CreatedAfter parameter takes in a Unix timestamp.

" + }, + "CreatedBefore":{ + "shape":"Timestamp", + "documentation":"

Use the CreatedBefore filter to only list MLflow Apps created before a specific date and time. Listed MLflow Apps are shown with a date and time such as \"2024-03-16T01:46:56+00:00\". The CreatedAfter parameter takes in a Unix timestamp.

" + }, + "Status":{ + "shape":"MlflowAppStatus", + "documentation":"

Filter for Mlflow apps with a specific creation status.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

Filter for Mlflow Apps with the specified version.

" + }, + "DefaultForDomainId":{ + "shape":"String", + "documentation":"

Filter for MLflow Apps with the specified default SageMaker Domain ID.

" + }, + "AccountDefaultStatus":{ + "shape":"AccountDefaultStatus", + "documentation":"

Filter for MLflow Apps with the specified AccountDefaultStatus.

" + }, + "SortBy":{ + "shape":"SortMlflowAppBy", + "documentation":"

Filter for MLflow Apps sorting by name, creation time, or creation status.

" + }, + "SortOrder":{ + "shape":"SortOrder", + "documentation":"

Change the order of the listed MLflow Apps. By default, MLflow Apps are listed in Descending order by creation time. To change the list order, specify SortOrder to be Ascending.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, use this token in your next request to receive the next set of results.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of MLflow Apps to list.

" + } + } + }, + "ListMlflowAppsResponse":{ + "type":"structure", + "members":{ + "Summaries":{ + "shape":"MlflowAppSummaries", + "documentation":"

A list of MLflow Apps according to chosen filters.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous response was truncated, you will receive this token. Use it in your next request to receive the next set of results.

" + } + } + }, "ListMlflowTrackingServersRequest":{ "type":"structure", "members":{ @@ -31486,6 +32025,34 @@ "min":1, "pattern":"[a-zA-Z]+ ?\\d+\\.\\d+(\\.\\d+)?" }, + "MLflowArn":{ + "type":"string", + "max":2048, + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-[a-zA-Z-]*/.*" + }, + "MLflowConfiguration":{ + "type":"structure", + "members":{ + "MlflowResourceArn":{ + "shape":"MLflowArn", + "documentation":"

The Amazon Resource Name (ARN) of MLflow configuration resource.

" + }, + "MlflowExperimentName":{ + "shape":"MlflowExperimentEntityName", + "documentation":"

The name of the MLflow configuration.

" + } + }, + "documentation":"

The MLflow configuration.

" + }, + "MaintenanceStatus":{ + "type":"string", + "enum":[ + "MaintenanceInProgress", + "MaintenanceComplete", + "MaintenanceFailed" + ] + }, "MajorMinorVersion":{ "type":"string", "max":64, @@ -31509,6 +32076,13 @@ "DISABLED" ] }, + "MapString2048":{ + "type":"map", + "key":{"shape":"String2048"}, + "value":{"shape":"String2048"}, + "max":5, + "min":0 + }, "MaxAutoMLJobRuntimeInSeconds":{ "type":"integer", "box":true, @@ -31838,6 +32412,13 @@ "min":0, "pattern":"1|2" }, + "MlFlowResourceArn":{ + "type":"string", + "documentation":"

MlflowDetails relevant fields

", + "max":2048, + "min":0, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-[a-zA-Z-]*/.*" + }, "MlReservationArn":{ "type":"string", "max":258, @@ -31866,9 +32447,143 @@ "Comet", "DeepchecksLLMEvaluation", "Fiddler", - "HyperPodClusters" + "HyperPodClusters", + "RunningInstances", + "Datasets", + "Evaluators" + ] + }, + "MlflowAppArn":{ + "type":"string", + "max":128, + "min":1, + "pattern":"arn:aws[a-z\\-]*:sagemaker:[a-z0-9\\-]*:[0-9]{12}:mlflow-app/.*" + }, + "MlflowAppName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9]){0,255}" + }, + "MlflowAppStatus":{ + "type":"string", + "enum":[ + "Creating", + "Created", + "CreateFailed", + "Updating", + "Updated", + "UpdateFailed", + "Deleting", + "DeleteFailed", + "Deleted" ] }, + "MlflowAppSummaries":{ + "type":"list", + "member":{"shape":"MlflowAppSummary"}, + "max":100, + "min":0 + }, + "MlflowAppSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of a listed MLflow App.

" + }, + "Name":{ + "shape":"MlflowAppName", + "documentation":"

The name of the MLflow App.

" + }, + "Status":{ + "shape":"MlflowAppStatus", + "documentation":"

The status of the MLflow App.

" + }, + "CreationTime":{ + "shape":"Timestamp", + "documentation":"

The creation time of a listed MLflow App.

" + }, + "LastModifiedTime":{ + "shape":"Timestamp", + "documentation":"

The last modified time of a listed MLflow App.

" + }, + "MlflowVersion":{ + "shape":"MlflowVersion", + "documentation":"

The version of a listed MLflow App.

" + } + }, + "documentation":"

The summary of the Mlflow App to list.

" + }, + "MlflowAppUrl":{ + "type":"string", + "max":2048, + "min":0 + }, + "MlflowConfig":{ + "type":"structure", + "required":["MlflowResourceArn"], + "members":{ + "MlflowResourceArn":{ + "shape":"MlFlowResourceArn", + "documentation":"

The Amazon Resource Name (ARN) of the MLflow resource.

" + }, + "MlflowExperimentName":{ + "shape":"MlflowExperimentName", + "documentation":"

The MLflow experiment name used for this job.

" + }, + "MlflowRunName":{ + "shape":"MlflowRunName", + "documentation":"

The MLflow run name used for this job.

" + } + }, + "documentation":"

The MLflow configuration using SageMaker managed MLflow.

" + }, + "MlflowDetails":{ + "type":"structure", + "members":{ + "MlflowExperimentId":{ + "shape":"MlflowExperimentId", + "documentation":"

The MLflow experiment ID used for this job.

" + }, + "MlflowRunId":{ + "shape":"MlflowRunId", + "documentation":"

The MLflow run ID used for this job.

" + } + }, + "documentation":"

The MLflow details of this job.

" + }, + "MlflowExperimentEntityName":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "MlflowExperimentId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "MlflowExperimentName":{ + "type":"string", + "documentation":"

MlflowConfig relevant fields

", + "max":256, + "min":1, + "pattern":".*" + }, + "MlflowRunId":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, + "MlflowRunName":{ + "type":"string", + "max":256, + "min":1, + "pattern":".*" + }, "MlflowVersion":{ "type":"string", "max":16, @@ -32846,6 +33561,10 @@ "shape":"ModelPackageVersion", "documentation":"

The version number of a versioned model.

" }, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package.

" + }, "ModelPackageArn":{ "shape":"ModelPackageArn", "documentation":"

The Amazon Resource Name (ARN) of the model package.

" @@ -32968,6 +33687,21 @@ "max":100, "min":1 }, + "ModelPackageConfig":{ + "type":"structure", + "required":["ModelPackageGroupArn"], + "members":{ + "ModelPackageGroupArn":{ + "shape":"ModelPackageGroupArn", + "documentation":"

The Amazon Resource Name (ARN) of the model package group of output model package.

" + }, + "SourceModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The Amazon Resource Name (ARN) of the source model package used for continued fine-tuning and custom model evaluation.

" + } + }, + "documentation":"

The configuration for the Model package.

" + }, "ModelPackageContainerDefinition":{ "type":"structure", "members":{ @@ -33022,6 +33756,15 @@ "ModelDataETag":{ "shape":"String", "documentation":"

The ETag associated with Model Data URL.

" + }, + "IsCheckpoint":{ + "shape":"Boolean", + "documentation":"

The checkpoint of the model package.

", + "box":true + }, + "BaseModel":{ + "shape":"BaseModel", + "documentation":"

The base model of the package.

" } }, "documentation":"

Describes the Docker container for the model package.

" @@ -33143,6 +33886,13 @@ }, "documentation":"

The model card associated with the model package. Since ModelPackageModelCard is tied to a model package, it is a specific usage of a model card and its schema is simplified compared to the schema of ModelCard. The ModelPackageModelCard schema does not include model_package_details, and model_overview is composed of the model_creator and model_artifact properties. For more information about the model package model card schema, see Model package model card schema. For more information about the model card associated with the model package, see View the Details of a Model Version.

" }, + "ModelPackageRegistrationType":{ + "type":"string", + "enum":[ + "Logged", + "Registered" + ] + }, "ModelPackageSecurityConfig":{ "type":"structure", "required":["KmsKeyId"], @@ -33263,7 +34013,11 @@ "shape":"ModelApprovalStatus", "documentation":"

The approval status of the model. This can be one of the following values.

" }, - "ModelLifeCycle":{"shape":"ModelLifeCycle"} + "ModelLifeCycle":{"shape":"ModelLifeCycle"}, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package summary.

" + } }, "documentation":"

Provides summary information about a model package.

" }, @@ -33430,6 +34184,13 @@ }, "documentation":"

The model registry settings for the SageMaker Canvas application.

" }, + "ModelRegistrationMode":{ + "type":"string", + "enum":[ + "AutoModelRegistrationEnabled", + "AutoModelRegistrationDisabled" + ] + }, "ModelSetupTime":{ "type":"integer", "box":true, @@ -35703,6 +36464,10 @@ "fiddler" ] }, + "Peft":{ + "type":"string", + "enum":["LORA"] + }, "PendingDeploymentSummary":{ "type":"structure", "required":["EndpointConfigName"], @@ -36127,6 +36892,30 @@ "EndpointConfig":{ "shape":"EndpointConfigStepMetadata", "documentation":"

The endpoint configuration used to create an endpoint during this step execution.

" + }, + "BedrockCustomModel":{ + "shape":"BedrockCustomModelMetadata", + "documentation":"

The metadata of the Amazon Bedrock custom model used in the pipeline execution step.

" + }, + "BedrockCustomModelDeployment":{ + "shape":"BedrockCustomModelDeploymentMetadata", + "documentation":"

The metadata of the Amazon Bedrock custom model deployment used in pipeline execution step.

" + }, + "BedrockProvisionedModelThroughput":{ + "shape":"BedrockProvisionedModelThroughputMetadata", + "documentation":"

The metadata of the Amazon Bedrock provisioned model throughput used in the pipeline execution step.

" + }, + "BedrockModelImport":{ + "shape":"BedrockModelImportMetadata", + "documentation":"

The metadata of Amazon Bedrock model import used in pipeline execution step.

" + }, + "InferenceComponent":{ + "shape":"InferenceComponentMetadata", + "documentation":"

The metadata of the inference component used in pipeline execution step.

" + }, + "Lineage":{ + "shape":"LineageMetadata", + "documentation":"

The metadata of the lineage used in pipeline execution step.

" } }, "documentation":"

Metadata for a step execution.

" @@ -38313,6 +39102,11 @@ "type":"list", "member":{"shape":"ProductionVariantInstanceType"} }, + "RecipeName":{ + "type":"string", + "max":255, + "min":0 + }, "RecommendationFailureReason":{"type":"string"}, "RecommendationJobArn":{ "type":"string", @@ -38987,7 +39781,8 @@ "ml.p6-b200.48xlarge", "ml.p4de.24xlarge", "ml.p6e-gb200.36xlarge", - "ml.p5.4xlarge" + "ml.p5.4xlarge", + "ml.p6-b300.48xlarge" ] }, "ReservedCapacityOffering":{ @@ -39221,7 +40016,7 @@ }, "VolumeSizeInGB":{ "shape":"OptionalVolumeSizeInGB", - "documentation":"

The size of the ML storage volume that you want to provision.

ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

When using an ML instance with NVMe SSD volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage. Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures storage paths for training datasets, checkpoints, model artifacts, and outputs to use the entire capacity of the instance storage. For example, ML instance families with the NVMe-type instance storage include ml.p4d, ml.g4dn, and ml.g5.

When using an ML instance with the EBS-only storage option and without instance storage, you must define the size of EBS volume through VolumeSizeInGB in the ResourceConfig API. For example, ML instance families that use EBS volumes include ml.c5 and ml.p2.

To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.

To find the default local paths defined by the SageMaker training platform, see Amazon SageMaker Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and Outputs.

", + "documentation":"

The size of the ML storage volume that you want to provision.

SageMaker automatically selects the volume size for serverless training jobs. You cannot customize this setting.

ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

When using an ML instance with NVMe SSD volumes, SageMaker doesn't provision Amazon EBS General Purpose SSD (gp2) storage. Available storage is fixed to the NVMe-type instance's storage capacity. SageMaker configures storage paths for training datasets, checkpoints, model artifacts, and outputs to use the entire capacity of the instance storage. For example, ML instance families with the NVMe-type instance storage include ml.p4d, ml.g4dn, and ml.g5.

When using an ML instance with the EBS-only storage option and without instance storage, you must define the size of EBS volume through VolumeSizeInGB in the ResourceConfig API. For example, ML instance families that use EBS volumes include ml.c5 and ml.p2.

To look up instance types and their instance storage types and volumes, see Amazon EC2 Instance Types.

To find the default local paths defined by the SageMaker training platform, see Amazon SageMaker Training Storage Folders for Training Datasets, Checkpoints, Model Artifacts, and Outputs.

", "box":true }, "VolumeKmsKeyId":{ @@ -40310,6 +41105,59 @@ } } }, + "ServerlessJobBaseModelArn":{ + "type":"string", + "documentation":"

ServerlessJobConfig relevant fields

", + "max":2048, + "min":1, + "pattern":"(arn:[a-z0-9-\\.]{1,63}:sagemaker:\\w+(?:-\\w+)+:(\\d{12}|aws):hub-content\\/)[a-zA-Z0-9](-*[a-zA-Z0-9]){0,62}\\/Model\\/[a-zA-Z0-9](-*[a-zA-Z0-9]){0,63}(\\/\\d{1,4}.\\d{1,4}.\\d{1,4})?" + }, + "ServerlessJobConfig":{ + "type":"structure", + "required":[ + "BaseModelArn", + "JobType" + ], + "members":{ + "BaseModelArn":{ + "shape":"ServerlessJobBaseModelArn", + "documentation":"

The base model Amazon Resource Name (ARN) in SageMaker Public Hub. SageMaker always selects the latest version of the provided model.

" + }, + "AcceptEula":{ + "shape":"AcceptEula", + "documentation":"

Specifies agreement to the model end-user license agreement (EULA). The AcceptEula value must be explicitly defined as True in order to accept the EULA that this model requires. You are responsible for reviewing and complying with any applicable license terms and making sure they are acceptable for your use case before downloading or using a model. For more information, see End-user license agreements section for more details on accepting the EULA.

", + "box":true + }, + "JobType":{ + "shape":"ServerlessJobType", + "documentation":"

The serverless training job type.

" + }, + "CustomizationTechnique":{ + "shape":"CustomizationTechnique", + "documentation":"

The model customization technique.

" + }, + "Peft":{ + "shape":"Peft", + "documentation":"

The parameter-efficient fine-tuning configuration.

" + }, + "EvaluationType":{ + "shape":"EvaluationType", + "documentation":"

The evaluation job type. Required when serverless job type is Evaluation.

" + }, + "EvaluatorArn":{ + "shape":"EvaluatorArn", + "documentation":"

The evaluator Amazon Resource Name (ARN) used as reward function or reward prompt.

" + } + }, + "documentation":"

The configuration for the serverless training job.

" + }, + "ServerlessJobType":{ + "type":"string", + "enum":[ + "FineTuning", + "Evaluation" + ] + }, "ServerlessMaxConcurrency":{ "type":"integer", "box":true, @@ -40584,6 +41432,14 @@ "CreationTime" ] }, + "SortMlflowAppBy":{ + "type":"string", + "enum":[ + "Name", + "CreationTime", + "Status" + ] + }, "SortOrder":{ "type":"string", "enum":[ @@ -41083,6 +41939,10 @@ "PipelineVersionId":{ "shape":"PipelineVersionId", "documentation":"

The ID of the pipeline version to start execution from.

" + }, + "MlflowExperimentName":{ + "shape":"MlflowExperimentEntityName", + "documentation":"

The MLflow experiment name of the start execution.

" } } }, @@ -42230,6 +43090,12 @@ "box":true, "min":0 }, + "TotalStepCountPerEpoch":{ + "type":"long", + "documentation":"

TrainingProgressInfo relevant fields

", + "box":true, + "min":0 + }, "TrackingServerArn":{ "type":"string", "max":2048, @@ -42432,6 +43298,16 @@ "min":0, "pattern":"[\\S\\s]*" }, + "TrainingEpochCount":{ + "type":"long", + "box":true, + "min":0 + }, + "TrainingEpochIndex":{ + "type":"long", + "box":true, + "min":0 + }, "TrainingImageConfig":{ "type":"structure", "required":["TrainingRepositoryAccessMode"], @@ -42600,7 +43476,8 @@ "ml.r7i.24xlarge", "ml.r7i.48xlarge", "ml.p6e-gb200.36xlarge", - "ml.p5.4xlarge" + "ml.p5.4xlarge", + "ml.p6-b300.48xlarge" ] }, "TrainingInstanceTypes":{ @@ -42737,6 +43614,14 @@ "shape":"DebugRuleEvaluationStatuses", "documentation":"

Information about the evaluation status of the rules for the training job.

" }, + "OutputModelPackageArn":{ + "shape":"ModelPackageArn", + "documentation":"

The output model package Amazon Resource Name (ARN) that contains model weights or checkpoint.

" + }, + "ModelPackageConfig":{ + "shape":"ModelPackageConfig", + "documentation":"

The model package configuration.

" + }, "ProfilerConfig":{"shape":"ProfilerConfig"}, "Environment":{ "shape":"TrainingEnvironmentMap", @@ -43151,6 +44036,28 @@ }, "documentation":"

Details of the training plan.

For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan .

" }, + "TrainingProgressInfo":{ + "type":"structure", + "members":{ + "TotalStepCountPerEpoch":{ + "shape":"TotalStepCountPerEpoch", + "documentation":"

The total step count per epoch.

" + }, + "CurrentStep":{ + "shape":"TrainingStepIndex", + "documentation":"

The current step number.

" + }, + "CurrentEpoch":{ + "shape":"TrainingEpochIndex", + "documentation":"

The current epoch number.

" + }, + "MaxEpoch":{ + "shape":"TrainingEpochCount", + "documentation":"

The maximum number of epochs for this job.

" + } + }, + "documentation":"

The serverless training job progress information.

" + }, "TrainingRepositoryAccessMode":{ "type":"string", "enum":[ @@ -43223,6 +44130,11 @@ }, "documentation":"

Defines how the algorithm is used for a training job.

" }, + "TrainingStepIndex":{ + "type":"long", + "box":true, + "min":0 + }, "TrainingTimeInSeconds":{ "type":"integer", "box":true, @@ -45376,6 +46288,49 @@ } } }, + "UpdateMlflowAppRequest":{ + "type":"structure", + "required":["Arn"], + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the MLflow App to update.

" + }, + "Name":{ + "shape":"MlflowAppName", + "documentation":"

The name of the MLflow App to update.

" + }, + "ArtifactStoreUri":{ + "shape":"S3Uri", + "documentation":"

The new S3 URI for the general purpose bucket to use as the artifact store for the MLflow App.

" + }, + "ModelRegistrationMode":{ + "shape":"ModelRegistrationMode", + "documentation":"

Whether to enable or disable automatic registration of new MLflow models to the SageMaker Model Registry. To enable automatic model registration, set this value to AutoModelRegistrationEnabled. To disable automatic model registration, set this value to AutoModelRegistrationDisabled. If not specified, AutomaticModelRegistration defaults to AutoModelRegistrationEnabled

" + }, + "WeeklyMaintenanceWindowStart":{ + "shape":"WeeklyMaintenanceWindowStart", + "documentation":"

The new weekly maintenance window start day and time to update. The maintenance window day and time should be in Coordinated Universal Time (UTC) 24-hour standard time. For example: TUE:03:30.

" + }, + "DefaultDomainIdList":{ + "shape":"DefaultDomainIdList", + "documentation":"

List of SageMaker Domain IDs for which this MLflow App is the default.

" + }, + "AccountDefaultStatus":{ + "shape":"AccountDefaultStatus", + "documentation":"

Indicates whether this this MLflow App is the default for the account.

" + } + } + }, + "UpdateMlflowAppResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"MlflowAppArn", + "documentation":"

The ARN of the updated MLflow App.

" + } + } + }, "UpdateMlflowTrackingServerRequest":{ "type":"structure", "required":["TrackingServerName"], @@ -45452,6 +46407,10 @@ "shape":"ModelApprovalStatus", "documentation":"

The approval status of the model.

" }, + "ModelPackageRegistrationType":{ + "shape":"ModelPackageRegistrationType", + "documentation":"

The package registration type of the model package input.

" + }, "ApprovalDescription":{ "shape":"ApprovalDescription", "documentation":"

A description for the approval status of the model.

" diff --git a/awscli/botocore/data/savingsplans/2019-06-28/service-2.json b/awscli/botocore/data/savingsplans/2019-06-28/service-2.json index dd21b71eb3fe..4863d31e3d6e 100644 --- a/awscli/botocore/data/savingsplans/2019-06-28/service-2.json +++ b/awscli/botocore/data/savingsplans/2019-06-28/service-2.json @@ -57,9 +57,10 @@ "output":{"shape":"DescribeSavingsPlanRatesResponse"}, "errors":[ {"shape":"ResourceNotFoundException"}, - {"shape":"ValidationException"} + {"shape":"ValidationException"}, + {"shape":"InternalServerException"} ], - "documentation":"

Describes the rates for the specified Savings Plan.

" + "documentation":"

Describes the rates for a specific, existing Savings Plan.

" }, "DescribeSavingsPlans":{ "name":"DescribeSavingsPlans", @@ -87,7 +88,7 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Describes the offering rates for the specified Savings Plans.

" + "documentation":"

Describes the offering rates for Savings Plans you might want to purchase.

" }, "DescribeSavingsPlansOfferings":{ "name":"DescribeSavingsPlansOfferings", @@ -216,7 +217,8 @@ "type":"string", "enum":[ "CNY", - "USD" + "USD", + "EUR" ] }, "CurrencyList":{ @@ -650,7 +652,7 @@ }, "returnableUntil":{ "shape":"String", - "documentation":"

The time until when a return for the Savings Plan can be requested. If the Savings Plan is not returnable, the field reflects the Savings Plan start time.

" + "documentation":"

The time until when a return for the Savings Plan can be requested. If the Savings Plan is not returnable, the field reflects the Savings Plans start time.

" } }, "documentation":"

Information about a Savings Plan.

" @@ -909,7 +911,16 @@ "EC2", "Fargate", "Lambda", - "SageMaker" + "SageMaker", + "RDS", + "DSQL", + "DynamoDB", + "ElastiCache", + "DocDB", + "Neptune", + "Timestream", + "Keyspaces", + "DMS" ] }, "SavingsPlanProductTypeList":{ @@ -1045,7 +1056,16 @@ "AmazonECS", "AmazonEKS", "AWSLambda", - "AmazonSageMaker" + "AmazonSageMaker", + "AmazonRDS", + "AuroraDSQL", + "AmazonDynamoDB", + "AmazonElastiCache", + "AmazonDocDB", + "AmazonNeptune", + "AmazonTimestream", + "AmazonMCS", + "AWSDatabaseMigrationSvc" ] }, "SavingsPlanRateServiceCodeList":{ @@ -1057,7 +1077,19 @@ "enum":[ "Hrs", "Lambda-GB-Second", - "Request" + "Request", + "ACU-Hr", + "ReadRequestUnits", + "WriteRequestUnits", + "ReadCapacityUnit-Hrs", + "WriteCapacityUnit-Hrs", + "ReplicatedWriteRequestUnits", + "ReplicatedWriteCapacityUnit-Hrs", + "GB-Hours", + "DPU", + "ElastiCacheProcessingUnit", + "DCU-Hr", + "NCU-hr" ] }, "SavingsPlanRateUsageType":{ @@ -1100,7 +1132,8 @@ "enum":[ "Compute", "EC2Instance", - "SageMaker" + "SageMaker", + "Database" ] }, "SavingsPlanTypeList":{ @@ -1131,7 +1164,8 @@ "savings-plan-type", "payment-option", "start", - "end" + "end", + "instance-family" ] }, "ServiceQuotaExceededException":{ diff --git a/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json b/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json index c6f4428c2344..ed203621eff7 100644 --- a/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json +++ b/awscli/botocore/data/secretsmanager/2017-10-17/service-2.json @@ -1107,6 +1107,10 @@ "SortOrder":{ "shape":"SortOrderType", "documentation":"

Secrets are listed by CreatedDate.

" + }, + "SortBy":{ + "shape":"SortByType", + "documentation":"

If not specified, secrets are listed by CreatedDate.

" } } }, @@ -1759,6 +1763,15 @@ "key":{"shape":"SecretVersionIdType"}, "value":{"shape":"SecretVersionStagesType"} }, + "SortByType":{ + "type":"string", + "enum":[ + "created-date", + "last-accessed-date", + "last-changed-date", + "name" + ] + }, "SortOrderType":{ "type":"string", "enum":[ diff --git a/awscli/botocore/data/securityhub/2018-10-26/service-2.json b/awscli/botocore/data/securityhub/2018-10-26/service-2.json index c06aeea9cf6c..0262cde6a045 100644 --- a/awscli/botocore/data/securityhub/2018-10-26/service-2.json +++ b/awscli/botocore/data/securityhub/2018-10-26/service-2.json @@ -233,7 +233,7 @@ {"shape":"ThrottlingException"}, {"shape":"ConflictException"} ], - "documentation":"

Used by customers to update information about their investigation into a finding. Requested by delegated administrator accounts or member accounts. Delegated administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. BatchUpdateFindings and BatchUpdateFindingV2 both use securityhub:BatchUpdateFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:BatchUpdateFindings action. Updates from BatchUpdateFindingsV2 don't affect the value of finding_info.modified_time, finding_info.modified_time_dt, time, time_dt for a finding. This API is in public preview and subject to change.

" + "documentation":"

Used by customers to update information about their investigation into a finding. Requested by delegated administrator accounts or member accounts. Delegated administrator accounts can update findings for their account and their member accounts. Member accounts can update findings for their account. BatchUpdateFindings and BatchUpdateFindingV2 both use securityhub:BatchUpdateFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:BatchUpdateFindings action. Updates from BatchUpdateFindingsV2 don't affect the value of finding_info.modified_time, finding_info.modified_time_dt, time, time_dt for a finding.

" }, "BatchUpdateStandardsControlAssociations":{ "name":"BatchUpdateStandardsControlAssociations", @@ -252,24 +252,6 @@ ], "documentation":"

For a batch of security controls and standards, this operation updates the enablement status of a control in a standard.

" }, - "ConnectorRegistrationsV2":{ - "name":"ConnectorRegistrationsV2", - "http":{ - "method":"POST", - "requestUri":"/connectorsv2/registrations" - }, - "input":{"shape":"ConnectorRegistrationsV2Request"}, - "output":{"shape":"ConnectorRegistrationsV2Response"}, - "errors":[ - {"shape":"AccessDeniedException"}, - {"shape":"InternalServerException"}, - {"shape":"ValidationException"}, - {"shape":"ThrottlingException"}, - {"shape":"ConflictException"}, - {"shape":"ResourceNotFoundException"} - ], - "documentation":"

Grants permission to complete the authorization based on input parameters. This API is in public preview and subject to change.

" - }, "CreateActionTarget":{ "name":"CreateActionTarget", "http":{ @@ -301,9 +283,10 @@ {"shape":"AccessDeniedException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Enables aggregation across Amazon Web Services Regions. This API is in public preview and subject to change.

" + "documentation":"

Enables aggregation across Amazon Web Services Regions.

" }, "CreateAutomationRule":{ "name":"CreateAutomationRule", @@ -335,9 +318,10 @@ {"shape":"InternalServerException"}, {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, - {"shape":"ConflictException"} + {"shape":"ConflictException"}, + {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Creates a V2 automation rule. This API is in public preview and subject to change.

" + "documentation":"

Creates a V2 automation rule.

" }, "CreateConfigurationPolicy":{ "name":"CreateConfigurationPolicy", @@ -371,9 +355,10 @@ {"shape":"ValidationException"}, {"shape":"ThrottlingException"}, {"shape":"ConflictException"}, - {"shape":"ResourceNotFoundException"} + {"shape":"ResourceNotFoundException"}, + {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Grants permission to create a connectorV2 based on input parameters. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to create a connectorV2 based on input parameters.

" }, "CreateFindingAggregator":{ "name":"CreateFindingAggregator", @@ -443,7 +428,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Grants permission to create a ticket in the chosen ITSM based on finding information for the provided finding metadata UID. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to create a ticket in the chosen ITSM based on finding information for the provided finding metadata UID.

" }, "DeclineInvitations":{ "name":"DeclineInvitations", @@ -493,7 +478,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Deletes the Aggregator V2. This API is in public preview and subject to change.

" + "documentation":"

Deletes the Aggregator V2.

" }, "DeleteAutomationRuleV2":{ "name":"DeleteAutomationRuleV2", @@ -511,7 +496,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Deletes a V2 automation rule. This API is in public preview and subject to change.

" + "documentation":"

Deletes a V2 automation rule.

" }, "DeleteConfigurationPolicy":{ "name":"DeleteConfigurationPolicy", @@ -548,7 +533,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Grants permission to delete a connectorV2. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to delete a connectorV2.

" }, "DeleteFindingAggregator":{ "name":"DeleteFindingAggregator", @@ -699,7 +684,7 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"} ], - "documentation":"

Gets information about the product integration. This API is in public preview and subject to change.

" + "documentation":"

Gets information about the product integration.

" }, "DescribeSecurityHubV2":{ "name":"DescribeSecurityHubV2", @@ -715,7 +700,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns details about the service resource in your account. This API is in public preview and subject to change.

" + "documentation":"

Returns details about the service resource in your account.

" }, "DescribeStandards":{ "name":"DescribeStandards", @@ -813,7 +798,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Disable the service for the current Amazon Web Services Region or specified Amazon Web Services Region. This API is in public preview and subject to change.

" + "documentation":"

Disable the service for the current Amazon Web Services Region or specified Amazon Web Services Region.

" }, "DisassociateFromAdministratorAccount":{ "name":"DisassociateFromAdministratorAccount", @@ -934,7 +919,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Enables the service in account for the current Amazon Web Services Region or specified Amazon Web Services Region. This API is in public preview and subject to change.

" + "documentation":"

Enables the service in account for the current Amazon Web Services Region or specified Amazon Web Services Region.

" }, "GetAdministratorAccount":{ "name":"GetAdministratorAccount", @@ -969,7 +954,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Returns the configuration of the specified Aggregator V2. This API is in public preview and subject to change.

" + "documentation":"

Returns the configuration of the specified Aggregator V2.

" }, "GetAutomationRuleV2":{ "name":"GetAutomationRuleV2", @@ -987,7 +972,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Returns an automation rule for the V2 service. This API is in public preview and subject to change.

" + "documentation":"

Returns an automation rule for the V2 service.

" }, "GetConfigurationPolicy":{ "name":"GetConfigurationPolicy", @@ -1041,7 +1026,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Grants permission to retrieve details for a connectorV2 based on connector id. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to retrieve details for a connectorV2 based on connector id.

" }, "GetEnabledStandards":{ "name":"GetEnabledStandards", @@ -1108,7 +1093,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Returns aggregated statistical data about findings. GetFindingStatisticsV2 use securityhub:GetAdhocInsightResults in the Action element of an IAM policy statement. You must have permission to perform the s action. This API is in public preview and subject to change.

" + "documentation":"

Returns aggregated statistical data about findings. GetFindingStatisticsV2 use securityhub:GetAdhocInsightResults in the Action element of an IAM policy statement. You must have permission to perform the s action.

" }, "GetFindings":{ "name":"GetFindings", @@ -1140,7 +1125,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns findings trend data based on the specified criteria. This operation helps you analyze patterns and changes in findings over time. This API is in public preview and subject to change.

" + "documentation":"

Returns findings trend data based on the specified criteria. This operation helps you analyze patterns and changes in findings over time.

" }, "GetFindingsV2":{ "name":"GetFindingsV2", @@ -1157,7 +1142,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Return a list of findings that match the specified criteria. GetFindings and GetFindingsV2 both use securityhub:GetFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:GetFindings action. This API is in public preview and subject to change.

" + "documentation":"

Return a list of findings that match the specified criteria. GetFindings and GetFindingsV2 both use securityhub:GetFindings in the Action element of an IAM policy statement. You must have permission to perform the securityhub:GetFindings action.

" }, "GetInsightResults":{ "name":"GetInsightResults", @@ -1261,7 +1246,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Retrieves statistical information about Amazon Web Services resources and their associated security findings. This API is in public preview and subject to change.

" + "documentation":"

Retrieves statistical information about Amazon Web Services resources and their associated security findings.

" }, "GetResourcesTrendsV2":{ "name":"GetResourcesTrendsV2", @@ -1277,7 +1262,7 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Returns resource trend data based on the specified criteria. This operation helps you analyze patterns and changes in resource compliance over time. This API is in public preview and subject to change.

" + "documentation":"

Returns resource trend data based on the specified criteria. This operation helps you analyze patterns and changes in resource compliance over time.

" }, "GetResourcesV2":{ "name":"GetResourcesV2", @@ -1295,7 +1280,7 @@ {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Returns a list of resources. This API is in public preview and subject to change.

" + "documentation":"

Returns a list of resources.

" }, "GetSecurityControlDefinition":{ "name":"GetSecurityControlDefinition", @@ -1347,7 +1332,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Retrieves a list of V2 aggregators. This API is in public preview and subject to change.

" + "documentation":"

Retrieves a list of V2 aggregators.

" }, "ListAutomationRules":{ "name":"ListAutomationRules", @@ -1381,7 +1366,7 @@ {"shape":"ThrottlingException"}, {"shape":"ConflictException"} ], - "documentation":"

Returns a list of automation rules and metadata for the calling account. This API is in public preview and subject to change.

" + "documentation":"

Returns a list of automation rules and metadata for the calling account.

" }, "ListConfigurationPolicies":{ "name":"ListConfigurationPolicies", @@ -1434,7 +1419,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Grants permission to retrieve a list of connectorsV2 and their metadata for the calling account. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to retrieve a list of connectorsV2 and their metadata for the calling account.

" }, "ListEnabledProductsForImport":{ "name":"ListEnabledProductsForImport", @@ -1563,6 +1548,24 @@ ], "documentation":"

Returns a list of tags associated with a resource.

" }, + "RegisterConnectorV2":{ + "name":"RegisterConnectorV2", + "http":{ + "method":"POST", + "requestUri":"/connectorsv2/register" + }, + "input":{"shape":"RegisterConnectorV2Request"}, + "output":{"shape":"RegisterConnectorV2Response"}, + "errors":[ + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ValidationException"}, + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Grants permission to complete the authorization based on input parameters.

" + }, "StartConfigurationPolicyAssociation":{ "name":"StartConfigurationPolicyAssociation", "http":{ @@ -1662,7 +1665,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"ConflictException"} ], - "documentation":"

Udpates the configuration for the Aggregator V2. This API is in public preview and subject to change.

" + "documentation":"

Udpates the configuration for the Aggregator V2.

" }, "UpdateAutomationRuleV2":{ "name":"UpdateAutomationRuleV2", @@ -1680,7 +1683,7 @@ {"shape":"ConflictException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Updates a V2 automation rule. This API is in public preview and subject to change.

" + "documentation":"

Updates a V2 automation rule.

" }, "UpdateConfigurationPolicy":{ "name":"UpdateConfigurationPolicy", @@ -1717,7 +1720,7 @@ {"shape":"ConflictException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Grants permission to update a connectorV2 based on its id and input parameters. This API is in public preview and subject to change.

" + "documentation":"

Grants permission to update a connectorV2 based on its id and input parameters.

" }, "UpdateFindingAggregator":{ "name":"UpdateFindingAggregator", @@ -3862,7 +3865,7 @@ }, "SecurityGroups":{ "shape":"NonEmptyStringList", - "documentation":"

The security groups to assign to the instances in the Auto Scaling group.

" + "documentation":"

The security groups to assign to the instances in the Amazon EC2 Auto Scaling group.

" }, "SpotPrice":{ "shape":"NonEmptyString", @@ -4743,7 +4746,7 @@ "documentation":"

An origin that is not an Amazon S3 bucket, with one exception. If the Amazon S3 bucket is configured with static website hosting, use this attribute. If the Amazon S3 bucket is not configured with static website hosting, use the S3OriginConfig type instead.

" } }, - "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), Elemental MediaStore, or other server from which CloudFront gets your files.

" + "documentation":"

A complex type that describes the Amazon S3 bucket, HTTP server (for example, a web server), or other server from which CloudFront gets your files.

" }, "AwsCloudFrontDistributionOriginItemList":{ "type":"list", @@ -8292,7 +8295,7 @@ }, "HealthCheckGracePeriodSeconds":{ "shape":"Integer", - "documentation":"

After a task starts, the amount of time in seconds that the Amazon ECS service scheduler ignores unhealthy Elastic Load Balancing target health checks.

" + "documentation":"

After a task starts, the amount of time in seconds that the Amazon ECS service scheduler ignores unhealthy ELB target health checks.

" }, "LaunchType":{ "shape":"NonEmptyString", @@ -8328,7 +8331,7 @@ }, "Role":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the IAM role that is associated with the service. The role allows the Amazon ECS container agent to register container instances with an Elastic Load Balancing load balancer.

" + "documentation":"

The ARN of the IAM role that is associated with the service. The role allows the Amazon ECS container agent to register container instances with an ELB load balancer.

" }, "SchedulingStrategy":{ "shape":"NonEmptyString", @@ -8370,7 +8373,7 @@ }, "TargetGroupArn":{ "shape":"NonEmptyString", - "documentation":"

The ARN of the Elastic Load Balancing target group or groups associated with a service or task set.

Only specified when using an Application Load Balancer or a Network Load Balancer. For a Classic Load Balancer, the target group ARN is omitted.

" + "documentation":"

The ARN of the ELB target group or groups associated with a service or task set.

Only specified when using an Application Load Balancer or a Network Load Balancer. For a Classic Load Balancer, the target group ARN is omitted.

" } }, "documentation":"

Information about a load balancer that the service uses.

" @@ -16908,37 +16911,6 @@ "SERVICENOW" ] }, - "ConnectorRegistrationsV2Request":{ - "type":"structure", - "required":[ - "AuthCode", - "AuthState" - ], - "members":{ - "AuthCode":{ - "shape":"NonEmptyString", - "documentation":"

The authCode retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

" - }, - "AuthState":{ - "shape":"NonEmptyString", - "documentation":"

The authState retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

" - } - } - }, - "ConnectorRegistrationsV2Response":{ - "type":"structure", - "required":["ConnectorId"], - "members":{ - "ConnectorArn":{ - "shape":"NonEmptyString", - "documentation":"

The Amazon Resource Name (ARN) of the connectorV2.

" - }, - "ConnectorId":{ - "shape":"NonEmptyString", - "documentation":"

The UUID of the connectorV2 to identify connectorV2 resource.

" - } - } - }, "ConnectorStatus":{ "type":"string", "enum":[ @@ -17331,7 +17303,10 @@ }, "CreateConnectorV2Response":{ "type":"structure", - "required":["ConnectorId"], + "required":[ + "ConnectorArn", + "ConnectorId" + ], "members":{ "ConnectorArn":{ "shape":"NonEmptyString", @@ -17344,6 +17319,10 @@ "AuthUrl":{ "shape":"NonEmptyString", "documentation":"

The Url provide to customers for OAuth auth code flow.

" + }, + "ConnectorStatus":{ + "shape":"ConnectorStatus", + "documentation":"

The current status of the connectorV2.

" } } }, @@ -17452,6 +17431,10 @@ "shape":"ClientToken", "documentation":"

The client idempotency token.

", "idempotencyToken":true + }, + "Mode":{ + "shape":"TicketCreationMode", + "documentation":"

The mode for ticket creation. When set to DRYRUN, the ticket is created using a Security Hub owned template test finding to verify the integration is working correctly.

" } } }, @@ -19377,6 +19360,7 @@ "compliance.status", "compliance.control", "finding_info.title", + "finding_info.related_events.traits.category", "finding_info.types", "metadata.product.name", "metadata.product.uid", @@ -19389,7 +19373,8 @@ "vulnerabilities.affected_packages.name", "finding_info.analytic.name", "compliance.standards", - "cloud.account.name" + "cloud.account.name", + "vendor_attributes.severity" ] }, "GroupByResult":{ @@ -19865,7 +19850,6 @@ }, "JiraCloudUpdateConfiguration":{ "type":"structure", - "required":["ProjectKey"], "members":{ "ProjectKey":{ "shape":"NonEmptyString", @@ -21013,7 +20997,9 @@ "evidences.dst_endpoint.port", "evidences.src_endpoint.autonomous_system.number", "evidences.src_endpoint.port", - "resources.image.in_use_count" + "resources.image.in_use_count", + "vulnerabilities.cve.cvss.base_score", + "vendor_attributes.severity_id" ] }, "OcsfNumberFilter":{ @@ -21049,6 +21035,7 @@ "finding_info.title", "finding_info.types", "finding_info.uid", + "finding_info.related_events.traits.category", "finding_info.related_events.uid", "finding_info.related_events.product.uid", "finding_info.related_events.title", @@ -21102,7 +21089,8 @@ "vulnerabilities.cve.epss.score", "vulnerabilities.cve.uid", "vulnerabilities.related_vulnerabilities", - "cloud.account.name" + "cloud.account.name", + "vendor_attributes.severity" ] }, "OcsfStringFilter":{ @@ -21585,6 +21573,10 @@ "JiraCloud":{ "shape":"JiraCloudUpdateConfiguration", "documentation":"

The parameters required to update the configuration for a Jira Cloud integration.

" + }, + "ServiceNow":{ + "shape":"ServiceNowUpdateConfiguration", + "documentation":"

The parameters required to update the configuration for a ServiceNow integration.

" } }, "documentation":"

The parameters required to update the configuration of an integration provider.

", @@ -21663,6 +21655,37 @@ "UNAVAILABLE" ] }, + "RegisterConnectorV2Request":{ + "type":"structure", + "required":[ + "AuthCode", + "AuthState" + ], + "members":{ + "AuthCode":{ + "shape":"NonEmptyString", + "documentation":"

The authCode retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

" + }, + "AuthState":{ + "shape":"NonEmptyString", + "documentation":"

The authState retrieved from authUrl to complete the OAuth 2.0 authorization code flow.

" + } + } + }, + "RegisterConnectorV2Response":{ + "type":"structure", + "required":["ConnectorId"], + "members":{ + "ConnectorArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the connectorV2.

" + }, + "ConnectorId":{ + "shape":"NonEmptyString", + "documentation":"

The UUID of the connectorV2 to identify connectorV2 resource.

" + } + } + }, "RelatedFinding":{ "type":"structure", "required":[ @@ -23334,11 +23357,6 @@ "type":"list", "member":{"shape":"SensitiveDataResult"} }, - "SensitiveNonEmptyString":{ - "type":"string", - "pattern":".*\\S.*", - "sensitive":true - }, "Sequence":{ "type":"structure", "members":{ @@ -23367,19 +23385,22 @@ }, "ServiceNowDetail":{ "type":"structure", - "required":["AuthStatus"], + "required":[ + "SecretArn", + "AuthStatus" + ], "members":{ "InstanceName":{ "shape":"NonEmptyString", "documentation":"

The instanceName of ServiceNow ITSM.

" }, - "ClientId":{ + "SecretArn":{ "shape":"NonEmptyString", - "documentation":"

The clientId of ServiceNow ITSM.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the ServiceNow credentials.

" }, "AuthStatus":{ "shape":"ConnectorAuthStatus", - "documentation":"

The status of the authorization between Jira Cloud and the service.

" + "documentation":"

The status of the authorization between ServiceNow and the service.

" } }, "documentation":"

Information about a ServiceNow ITSM integration.

" @@ -23388,25 +23409,40 @@ "type":"structure", "required":[ "InstanceName", - "ClientId", - "ClientSecret" + "SecretArn" ], "members":{ "InstanceName":{ "shape":"NonEmptyString", "documentation":"

The instance name of ServiceNow ITSM.

" }, - "ClientId":{ + "SecretArn":{ "shape":"NonEmptyString", - "documentation":"

The client ID of ServiceNow ITSM.

" - }, - "ClientSecret":{ - "shape":"SensitiveNonEmptyString", - "documentation":"

The client secret of ServiceNow ITSM.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the ServiceNow credentials.

" } }, "documentation":"

The initial configuration settings required to establish an integration between Security Hub and ServiceNow ITSM.

" }, + "ServiceNowUpdateConfiguration":{ + "type":"structure", + "members":{ + "SecretArn":{ + "shape":"NonEmptyString", + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the ServiceNow credentials.

" + } + }, + "documentation":"

The parameters used to modify an existing ServiceNow integration.

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"NonEmptyString"}, + "Code":{"shape":"NonEmptyString"} + }, + "documentation":"

The request was rejected because it would exceed the service quota limit.

", + "error":{"httpStatusCode":402}, + "exception":true + }, "Severity":{ "type":"structure", "members":{ @@ -24398,6 +24434,11 @@ "error":{"httpStatusCode":429}, "exception":true }, + "TicketCreationMode":{ + "type":"string", + "documentation":"

The mode for creating a ticket.

", + "enum":["DRYRUN"] + }, "Timestamp":{ "type":"timestamp", "timestampFormat":"iso8601" @@ -24824,10 +24865,6 @@ "location":"uri", "locationName":"ConnectorId" }, - "ClientSecret":{ - "shape":"SensitiveNonEmptyString", - "documentation":"

The clientSecret of ServiceNow.

" - }, "Description":{ "shape":"NonEmptyString", "documentation":"

The description of the connectorV2.

" diff --git a/awscli/botocore/data/service-quotas/2019-06-24/service-2.json b/awscli/botocore/data/service-quotas/2019-06-24/service-2.json index 8b60b8588769..83e2365aeaaf 100644 --- a/awscli/botocore/data/service-quotas/2019-06-24/service-2.json +++ b/awscli/botocore/data/service-quotas/2019-06-24/service-2.json @@ -149,6 +149,23 @@ ], "documentation":"

Retrieves information about your Service Quotas Automatic Management configuration. Automatic Management monitors your Service Quotas utilization and notifies you before you run out of your allocated quotas.

" }, + "GetQuotaUtilizationReport":{ + "name":"GetQuotaUtilizationReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetQuotaUtilizationReportRequest"}, + "output":{"shape":"GetQuotaUtilizationReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NoSuchResourceException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Retrieves the quota utilization report for your Amazon Web Services account. This operation returns paginated results showing your quota usage across all Amazon Web Services services, sorted by utilization percentage in descending order (highest utilization first).

You must first initiate a report using the StartQuotaUtilizationReport operation. The report generation process is asynchronous and may take several seconds to complete. Poll this operation periodically to check the status and retrieve results when the report is ready.

Each report contains up to 1,000 quota records per page. Use the NextToken parameter to retrieve additional pages of results. Reports are automatically deleted after 15 minutes.

" + }, "GetRequestedServiceQuotaChange":{ "name":"GetRequestedServiceQuotaChange", "http":{ @@ -390,6 +407,24 @@ ], "documentation":"

Starts Service Quotas Automatic Management for an Amazon Web Services account, including notification preferences and excluded quotas configurations. Automatic Management monitors your Service Quotas utilization and notifies you before you run out of your allocated quotas.

" }, + "StartQuotaUtilizationReport":{ + "name":"StartQuotaUtilizationReport", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartQuotaUtilizationReportRequest"}, + "output":{"shape":"StartQuotaUtilizationReportResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"NoSuchResourceException"}, + {"shape":"IllegalArgumentException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ServiceException"}, + {"shape":"InvalidPaginationTokenException"} + ], + "documentation":"

Initiates the generation of a quota utilization report for your Amazon Web Services account. This asynchronous operation analyzes your quota usage across all Amazon Web Services services and returns a unique report identifier that you can use to retrieve the results.

The report generation process may take several seconds to complete, depending on the number of quotas in your account. Use the GetQuotaUtilizationReport operation to check the status and retrieve the results when the report is ready.

" + }, "StopAutoManagement":{ "name":"StopAutoManagement", "http":{ @@ -492,6 +527,11 @@ "ALL" ] }, + "AppliedValue":{ + "type":"double", + "max":10000000000, + "min":0 + }, "AssociateServiceQuotaTemplateRequest":{ "type":"structure", "members":{} @@ -522,6 +562,11 @@ }, "CustomerServiceEngagementId":{"type":"string"}, "DateTime":{"type":"timestamp"}, + "DefaultValue":{ + "type":"double", + "max":10000000000, + "min":0 + }, "DeleteServiceQuotaIncreaseRequestFromTemplateRequest":{ "type":"structure", "required":[ @@ -683,6 +728,61 @@ } } }, + "GetQuotaUtilizationReportRequest":{ + "type":"structure", + "required":["ReportId"], + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

The unique identifier for the quota utilization report. This identifier is returned by the StartQuotaUtilizationReport operation.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that indicates the next page of results to retrieve. This token is returned in the response when there are more results available. Omit this parameter for the first request.

" + }, + "MaxResults":{ + "shape":"MaxResultsUtilization", + "documentation":"

The maximum number of results to return per page. The default value is 1,000 and the maximum allowed value is 1,000.

" + } + } + }, + "GetQuotaUtilizationReportResponse":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

The unique identifier for the quota utilization report.

" + }, + "Status":{ + "shape":"ReportStatus", + "documentation":"

The current status of the report generation. Possible values are:

" + }, + "GeneratedAt":{ + "shape":"DateTime", + "documentation":"

The timestamp when the report was generated, in ISO 8601 format.

" + }, + "TotalCount":{ + "shape":"TotalCount", + "documentation":"

The total number of quotas included in the report across all pages.

" + }, + "Quotas":{ + "shape":"QuotaUtilizationInfoList", + "documentation":"

A list of quota utilization records, sorted by utilization percentage in descending order. Each record includes the quota code, service code, service name, quota name, namespace, utilization percentage, default value, applied value, and whether the quota is adjustable. Up to 1,000 records are returned per page.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

A token that indicates more results are available. Include this token in the next request to retrieve the next page of results. If this field is not present, you have retrieved all available results.

" + }, + "ErrorCode":{ + "shape":"ReportErrorCode", + "documentation":"

An error code indicating the reason for failure when the report status is FAILED. This field is only present when the status is FAILED.

" + }, + "ErrorMessage":{ + "shape":"ReportErrorMessage", + "documentation":"

A detailed error message describing the failure when the report status is FAILED. This field is only present when the status is FAILED.

" + } + } + }, "GetRequestedServiceQuotaChangeRequest":{ "type":"structure", "required":["RequestId"], @@ -1035,6 +1135,11 @@ "max":100, "min":1 }, + "MaxResultsUtilization":{ + "type":"integer", + "max":1000, + "min":1 + }, "MetricDimensionName":{"type":"string"}, "MetricDimensionValue":{"type":"string"}, "MetricDimensionsMapDefinition":{ @@ -1251,11 +1356,91 @@ "documentation":"

Information about the quota period.

" }, "QuotaUnit":{"type":"string"}, + "QuotaUtilizationInfo":{ + "type":"structure", + "members":{ + "QuotaCode":{ + "shape":"QuotaCode", + "documentation":"

The quota identifier.

" + }, + "ServiceCode":{ + "shape":"ServiceCode", + "documentation":"

The service identifier.

" + }, + "QuotaName":{ + "shape":"QuotaName", + "documentation":"

The quota name.

" + }, + "Namespace":{ + "shape":"QuotaMetricNamespace", + "documentation":"

The namespace of the metric used to track quota usage.

" + }, + "Utilization":{ + "shape":"UtilizationPct", + "documentation":"

The utilization percentage of the quota, calculated as (current usage / applied value) × 100. Values range from 0.0 to 100.0 or higher if usage exceeds the quota limit.

" + }, + "DefaultValue":{ + "shape":"DefaultValue", + "documentation":"

The default value of the quota.

" + }, + "AppliedValue":{ + "shape":"AppliedValue", + "documentation":"

The applied value of the quota, which may be higher than the default value if a quota increase has been requested and approved.

" + }, + "ServiceName":{ + "shape":"ServiceName", + "documentation":"

The service name.

" + }, + "Adjustable":{ + "shape":"QuotaAdjustable", + "documentation":"

Indicates whether the quota value can be increased.

" + } + }, + "documentation":"

Information about a quota's utilization, including the quota code, service information, current usage, and applied limits.

" + }, + "QuotaUtilizationInfoList":{ + "type":"list", + "member":{"shape":"QuotaUtilizationInfo"}, + "max":1000 + }, "QuotaValue":{ "type":"double", "max":10000000000, "min":0 }, + "ReportErrorCode":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[a-zA-Z][a-zA-Z0-9]*" + }, + "ReportErrorMessage":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^.*$" + }, + "ReportId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[0-9a-zA-Z][a-zA-Z0-9-]{1,128}" + }, + "ReportMessage":{ + "type":"string", + "max":350, + "min":0, + "pattern":"^.{0,350}$" + }, + "ReportStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "COMPLETED", + "FAILED" + ] + }, "RequestId":{ "type":"string", "max":128, @@ -1313,6 +1498,10 @@ "INVALID_REQUEST" ] }, + "RequestType":{ + "type":"string", + "enum":["AutomaticManagement"] + }, "RequestedServiceQuotaChange":{ "type":"structure", "members":{ @@ -1320,6 +1509,10 @@ "shape":"RequestId", "documentation":"

The unique identifier.

" }, + "RequestType":{ + "shape":"RequestType", + "documentation":"

The type of quota increase request. Possible values include:

If this field is not present, the request was manually created by a user.

" + }, "CaseId":{ "shape":"CustomerServiceEngagementId", "documentation":"

The case ID.

" @@ -1586,6 +1779,27 @@ "type":"structure", "members":{} }, + "StartQuotaUtilizationReportRequest":{ + "type":"structure", + "members":{} + }, + "StartQuotaUtilizationReportResponse":{ + "type":"structure", + "members":{ + "ReportId":{ + "shape":"ReportId", + "documentation":"

A unique identifier for the quota utilization report. Use this identifier with the GetQuotaUtilizationReport operation to retrieve the report results.

" + }, + "Status":{ + "shape":"ReportStatus", + "documentation":"

The current status of the report generation. The status will be PENDING when the report is first initiated.

" + }, + "Message":{ + "shape":"ReportMessage", + "documentation":"

An optional message providing additional information about the report generation status. This field may contain details about the report initiation or indicate if an existing recent report is being reused.

" + } + } + }, "Statistic":{ "type":"string", "max":256, @@ -1684,6 +1898,11 @@ "documentation":"

You've exceeded the number of tags allowed for a resource. For more information, see Tag restrictions in the Service Quotas User Guide.

", "exception":true }, + "TotalCount":{ + "type":"integer", + "max":2147483647, + "min":0 + }, "UntagResourceRequest":{ "type":"structure", "required":[ @@ -1725,7 +1944,8 @@ "UpdateAutoManagementResponse":{ "type":"structure", "members":{} - } + }, + "UtilizationPct":{"type":"double"} }, "documentation":"

With Service Quotas, you can view and manage your quotas easily as your Amazon Web Services workloads grow. Quotas, also referred to as limits, are the maximum number of resources that you can create in your Amazon Web Services account. For more information, see the Service Quotas User Guide.

You need Amazon Web Services CLI version 2.13.20 or higher to view and manage resource-level quotas such as Instances per domain for Amazon OpenSearch Service.

" } diff --git a/awscli/botocore/data/sesv2/2019-09-27/service-2.json b/awscli/botocore/data/sesv2/2019-09-27/service-2.json index ef0c1260135b..0db96a52e29f 100644 --- a/awscli/botocore/data/sesv2/2019-09-27/service-2.json +++ b/awscli/botocore/data/sesv2/2019-09-27/service-2.json @@ -707,6 +707,20 @@ ], "documentation":"

Retrieve inbox placement and engagement rates for the domains that you use to send email.

" }, + "GetEmailAddressInsights":{ + "name":"GetEmailAddressInsights", + "http":{ + "method":"POST", + "requestUri":"/v2/email/email-address-insights/" + }, + "input":{"shape":"GetEmailAddressInsightsRequest"}, + "output":{"shape":"GetEmailAddressInsightsResponse"}, + "errors":[ + {"shape":"TooManyRequestsException"}, + {"shape":"BadRequestException"} + ], + "documentation":"

Provides validation insights about a specific email address, including syntax validation, DNS record checks, mailbox existence, and other deliverability factors.

" + }, "GetEmailIdentity":{ "name":"GetEmailIdentity", "http":{ @@ -1406,7 +1420,7 @@ "name":"PutEmailIdentityDkimSigningAttributes", "http":{ "method":"PUT", - "requestUri":"/v1/email/identities/{EmailIdentity}/dkim/signing" + "requestUri":"/v2/email/identities/{EmailIdentity}/dkim/signing" }, "input":{"shape":"PutEmailIdentityDkimSigningAttributesRequest"}, "output":{"shape":"PutEmailIdentityDkimSigningAttributesResponse"}, @@ -1758,7 +1772,7 @@ "type":"string", "max":1011, "min":20, - "pattern":"arn:(aws|aws-[a-z-]+):ses:[a-z]{2}-[a-z-]+-[0-9]:[0-9]{1,20}:mailmanager-archive/a-[a-z0-9]{24,62}" + "pattern":"arn:(aws|aws-[a-z-]+):ses:[a-z]{2,4}-[a-z-]+-[0-9]:[0-9]{1,20}:mailmanager-archive/a-[a-z0-9]{24,62}" }, "ArchivingOptions":{ "type":"structure", @@ -2438,6 +2452,10 @@ "shape":"TemplateContent", "documentation":"

The content of the custom verification email. The total size of the email must be less than 10 MB. The message body may contain HTML, with some limitations. For more information, see Custom verification email frequently asked questions in the Amazon SES Developer Guide.

" }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of objects that define the tags (keys and values) to associate with the custom verification email template.

" + }, "SuccessRedirectionURL":{ "shape":"SuccessRedirectionURL", "documentation":"

The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

" @@ -2609,6 +2627,10 @@ "TemplateContent":{ "shape":"EmailTemplateContent", "documentation":"

The content of the email template, composed of a subject line, an HTML part, and a text-only part.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of objects that define the tags (keys and values) to associate with the email template.

" } }, "documentation":"

Represents a request to create an email template. For more information, see the Amazon SES Developer Guide.

" @@ -3334,6 +3356,10 @@ "shape":"DnsTokenList", "documentation":"

If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector for the public key.

Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

" }, + "SigningHostedZone":{ + "shape":"HostedZone", + "documentation":"

The hosted zone where Amazon SES publishes the DKIM public key TXT records for this email identity. This value indicates the DNS zone that customers must reference when configuring their CNAME records for DKIM authentication.

When configuring DKIM for your domain, create CNAME records in your DNS that point to the selectors in this hosted zone. For example:

selector1._domainkey.yourdomain.com CNAME selector1.<SigningHostedZone>

selector2._domainkey.yourdomain.com CNAME selector2.<SigningHostedZone>

selector3._domainkey.yourdomain.com CNAME selector3.<SigningHostedZone>

" + }, "SigningAttributesOrigin":{ "shape":"DkimSigningAttributesOrigin", "documentation":"

A string that indicates how DKIM was configured for the identity. These are the possible values:

" @@ -3559,6 +3585,55 @@ "member":{"shape":"InsightsEmailAddress"}, "max":5 }, + "EmailAddressInsightsConfidenceVerdict":{ + "type":"string", + "documentation":"

The confidence level of SES that the email address meets the validation criteria:

", + "enum":[ + "LOW", + "MEDIUM", + "HIGH" + ] + }, + "EmailAddressInsightsMailboxEvaluations":{ + "type":"structure", + "members":{ + "HasValidSyntax":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Checks that the email address follows proper RFC standards and contains valid characters in the correct format.

" + }, + "HasValidDnsRecords":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Checks that the domain exists, has valid DNS records, and is configured to receive email.

" + }, + "MailboxExists":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Checks that the mailbox exists and can receive messages without actually sending an email.

" + }, + "IsRoleAddress":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Identifies role-based addresses (such as admin@, support@, or info@) that may have lower engagement rates.

" + }, + "IsDisposable":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Checks disposable or temporary email addresses that could negatively impact your sender reputation.

" + }, + "IsRandomInput":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Checks if the input appears to be random text.

" + } + }, + "documentation":"

Contains individual validation checks performed on an email address.

" + }, + "EmailAddressInsightsVerdict":{ + "type":"structure", + "members":{ + "ConfidenceVerdict":{ + "shape":"EmailAddressInsightsConfidenceVerdict", + "documentation":"

The confidence level of the validation verdict.

" + } + }, + "documentation":"

Contains the overall validation verdict for an email address.

" + }, "EmailAddressList":{ "type":"list", "member":{"shape":"EmailAddress"} @@ -4238,6 +4313,10 @@ "shape":"TemplateContent", "documentation":"

The content of the custom verification email.

" }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of objects that define the tags (keys and values) that are associated with the custom verification email template.

" + }, "SuccessRedirectionURL":{ "shape":"SuccessRedirectionURL", "documentation":"

The URL that the recipient of the verification email is sent to if his or her address is successfully verified.

" @@ -4480,6 +4559,27 @@ }, "documentation":"

An object that includes statistics that are related to the domain that you specified.

" }, + "GetEmailAddressInsightsRequest":{ + "type":"structure", + "required":["EmailAddress"], + "members":{ + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The email address to analyze for validation insights.

" + } + }, + "documentation":"

A request to return validation insights about an email address.

" + }, + "GetEmailAddressInsightsResponse":{ + "type":"structure", + "members":{ + "MailboxValidation":{ + "shape":"MailboxValidation", + "documentation":"

Detailed validation results for the email address.

" + } + }, + "documentation":"

Validation insights about an email address.

" + }, "GetEmailIdentityPoliciesRequest":{ "type":"structure", "required":["EmailIdentity"], @@ -4589,6 +4689,10 @@ "TemplateContent":{ "shape":"EmailTemplateContent", "documentation":"

The content of the email template, composed of a subject line, an HTML part, and a text-only part.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

An array of objects that define the tags (keys and values) that are associated with the email template.

" } }, "documentation":"

The following element is returned by the service.

" @@ -4882,6 +4986,7 @@ }, "documentation":"

An object containing additional settings for your VDM configuration as applicable to the Guardian.

" }, + "HostedZone":{"type":"string"}, "HttpsPolicy":{ "type":"string", "documentation":"

The https policy to use for tracking open and click events. If the value is OPTIONAL or HttpsPolicy is not specified, the open trackers use HTTP and click tracker use the original protocol of the link. If the value is REQUIRE, both open and click tracker uses HTTPS and if the value is REQUIRE_OPEN_ONLY open tracker uses HTTPS and link tracker is same as original protocol of the link.

", @@ -5931,6 +6036,20 @@ "TRANSACTIONAL" ] }, + "MailboxValidation":{ + "type":"structure", + "members":{ + "IsValid":{ + "shape":"EmailAddressInsightsVerdict", + "documentation":"

Overall validity assessment with a confidence verdict.

" + }, + "Evaluations":{ + "shape":"EmailAddressInsightsMailboxEvaluations", + "documentation":"

Specific validation checks performed on the email address.

" + } + }, + "documentation":"

Contains detailed validation information about an email address.

" + }, "Max24HourSend":{"type":"double"}, "MaxDeliverySeconds":{ "type":"long", @@ -6452,6 +6571,10 @@ "SuppressedReasons":{ "shape":"SuppressionListReasons", "documentation":"

A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

" + }, + "ValidationAttributes":{ + "shape":"SuppressionValidationAttributes", + "documentation":"

An object that contains additional suppression attributes for your account.

" } }, "documentation":"

A request to change your account's suppression preferences.

" @@ -6585,6 +6708,10 @@ "SuppressedReasons":{ "shape":"SuppressionListReasons", "documentation":"

A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

" + }, + "ValidationOptions":{ + "shape":"SuppressionValidationOptions", + "documentation":"

An object that contains information about the email address suppression preferences for the configuration set in the current Amazon Web Services Region.

" } }, "documentation":"

A request to change the account suppression list preferences for a specific configuration set.

" @@ -6812,6 +6939,10 @@ "DkimTokens":{ "shape":"DnsTokenList", "documentation":"

If you used Easy DKIM to configure DKIM authentication for the domain, then this object contains a set of unique strings that you use to create a set of CNAME records that you add to the DNS configuration for your domain. When Amazon SES detects these records in the DNS configuration for your domain, the DKIM authentication process is complete.

If you configured DKIM authentication for the domain by providing your own public-private key pair, then this object contains the selector that's associated with your public key.

Regardless of the DKIM authentication method you use, Amazon SES searches for the appropriate records in the DNS configuration of the domain for up to 72 hours.

" + }, + "SigningHostedZone":{ + "shape":"HostedZone", + "documentation":"

The hosted zone where Amazon SES publishes the DKIM public key TXT records for this email identity. This value indicates the DNS zone that customers must reference when configuring their CNAME records for DKIM authentication.

When configuring DKIM for your domain, create CNAME records in your DNS that point to the selectors in this hosted zone. For example:

selector1._domainkey.yourdomain.com CNAME selector1.<SigningHostedZone>

selector2._domainkey.yourdomain.com CNAME selector2.<SigningHostedZone>

selector3._domainkey.yourdomain.com CNAME selector3.<SigningHostedZone>

" } }, "documentation":"

If the action is successful, the service sends back an HTTP 200 response.

The following data is returned in JSON format by the service.

" @@ -7583,10 +7714,46 @@ "SuppressedReasons":{ "shape":"SuppressionListReasons", "documentation":"

A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

" - } + }, + "ValidationAttributes":{"shape":"SuppressionValidationAttributes"} }, "documentation":"

An object that contains information about the email address suppression preferences for your account in the current Amazon Web Services Region.

" }, + "SuppressionConditionThreshold":{ + "type":"structure", + "required":["ConditionThresholdEnabled"], + "members":{ + "ConditionThresholdEnabled":{ + "shape":"FeatureStatus", + "documentation":"

Indicates whether Auto Validation is enabled for suppression. Set to ENABLED to enable the Auto Validation feature, or set to DISABLED to disable it.

" + }, + "OverallConfidenceThreshold":{ + "shape":"SuppressionConfidenceThreshold", + "documentation":"

The overall confidence threshold used to determine suppression decisions.

" + } + }, + "documentation":"

Contains Auto Validation settings, allowing you to suppress sending to specific destination(s) if they do not meet required threshold. For details on Auto Validation, see Auto Validation.

" + }, + "SuppressionConfidenceThreshold":{ + "type":"structure", + "required":["ConfidenceVerdictThreshold"], + "members":{ + "ConfidenceVerdictThreshold":{ + "shape":"SuppressionConfidenceVerdictThreshold", + "documentation":"

The confidence level threshold for suppression decisions.

" + } + }, + "documentation":"

Contains the confidence threshold settings for Auto Validation.

" + }, + "SuppressionConfidenceVerdictThreshold":{ + "type":"string", + "documentation":"

The confidence level threshold for suppression validation:

", + "enum":[ + "MEDIUM", + "HIGH", + "MANAGED" + ] + }, "SuppressionListDestination":{ "type":"structure", "required":["SuppressionListImportAction"], @@ -7624,10 +7791,33 @@ "SuppressedReasons":{ "shape":"SuppressionListReasons", "documentation":"

A list that contains the reasons that email addresses are automatically added to the suppression list for your account. This list can contain any or all of the following:

" - } + }, + "ValidationOptions":{"shape":"SuppressionValidationOptions"} }, "documentation":"

An object that contains information about the suppression list preferences for your account.

" }, + "SuppressionValidationAttributes":{ + "type":"structure", + "required":["ConditionThreshold"], + "members":{ + "ConditionThreshold":{ + "shape":"SuppressionConditionThreshold", + "documentation":"

Specifies the condition threshold settings for account-level suppression.

" + } + }, + "documentation":"

Structure containing validation attributes used for suppressing sending to specific destination on account level.

" + }, + "SuppressionValidationOptions":{ + "type":"structure", + "required":["ConditionThreshold"], + "members":{ + "ConditionThreshold":{ + "shape":"SuppressionConditionThreshold", + "documentation":"

Specifies the condition threshold settings for suppression validation.

" + } + }, + "documentation":"

Contains validation options for email address suppression.

" + }, "Tag":{ "type":"structure", "required":[ diff --git a/awscli/botocore/data/signer/2017-08-25/service-2.json b/awscli/botocore/data/signer/2017-08-25/service-2.json index aef9a7fc4f4e..acf96f9ce810 100644 --- a/awscli/botocore/data/signer/2017-08-25/service-2.json +++ b/awscli/botocore/data/signer/2017-08-25/service-2.json @@ -47,7 +47,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Changes the state of an ACTIVE signing profile to CANCELED. A canceled profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs, and is deleted two years after cancelation.

" + "documentation":"

Changes the state of an ACTIVE signing profile to CANCELED. A canceled profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs. See Data Retention for more information on scheduled deletion of a canceled signing profile.

" }, "DescribeSigningJob":{ "name":"DescribeSigningJob", @@ -80,7 +80,7 @@ {"shape":"InternalServiceErrorException"} ], "documentation":"

Retrieves the revocation status of one or more of the signing profile, signing job, and signing certificate.

", - "endpoint":{"hostPrefix":"verification."} + "endpoint":{"hostPrefix":"data-"} }, "GetSigningPlatform":{ "name":"GetSigningPlatform", @@ -243,7 +243,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Changes the state of a signing job to REVOKED. This indicates that the signature is no longer valid.

" + "documentation":"

Changes the state of a signing job to REVOKED. This indicates that the signature is no longer valid.

" }, "RevokeSigningProfile":{ "name":"RevokeSigningProfile", @@ -259,7 +259,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Changes the state of a signing profile to REVOKED. This indicates that signatures generated using the signing profile after an effective start date are no longer valid.

" + "documentation":"

Changes the state of a signing profile to REVOKED. This indicates that signatures generated using the signing profile after an effective start date are no longer valid. A revoked profile is still viewable with the ListSigningProfiles operation, but it cannot perform new signing jobs. See Data Retention for more information on scheduled deletion of a revoked signing profile.

" }, "SignPayload":{ "name":"SignPayload", @@ -294,7 +294,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation for two years after they are performed. Note the following requirements:

You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

For a Java example that shows how to use this action, see StartSigningJob.

" + "documentation":"

Initiates a signing job to be performed on the code provided. Signing jobs are viewable by the ListSigningJobs operation. Note the following requirements:

You can call the DescribeSigningJob and the ListSigningJobs actions after you call StartSigningJob.

For a Java example that shows how to use this action, see StartSigningJob.

" }, "TagResource":{ "name":"TagResource", @@ -367,7 +367,7 @@ }, "action":{ "shape":"String", - "documentation":"

For cross-account signing. Grant a designated account permission to perform one or more of the following actions. Each action is associated with a specific API's operations. For more information about cross-account signing, see Using cross-account signing with signing profiles in the AWS Signer Developer Guide.

You can designate the following actions to an account.

" + "documentation":"

For cross-account signing. Grant a designated account permission to perform one or more of the following actions. Each action is associated with a specific API's operations. For more information about cross-account signing, see Using cross-account signing with signing profiles in the AWS Signer Developer Guide.

You can designate the following actions to an account.

" }, "principal":{ "shape":"String", @@ -1875,5 +1875,5 @@ "bool":{"type":"boolean"}, "string":{"type":"string"} }, - "documentation":"

AWS Signer is a fully managed code-signing service to help you ensure the trust and integrity of your code.

Signer supports the following applications:

With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3.

With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code-signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management.

With Signer and the Notation CLI from the Notary
 Project, you can sign container images stored in a container registry such as Amazon Elastic Container Registry (ECR). The signatures are stored in the registry alongside the images, where they are available for verifying image authenticity and integrity.

For more information about Signer, see the AWS Signer Developer Guide.

" + "documentation":"

AWS Signer is a fully managed code-signing service to help you ensure the trust and integrity of your code.

Signer supports the following applications:

With code signing for AWS Lambda, you can sign AWS Lambda deployment packages. Integrated support is provided for Amazon S3, Amazon CloudWatch, and AWS CloudTrail. In order to sign code, you create a signing profile and then use Signer to sign Lambda zip files in S3.

With code signing for IoT, you can sign code for any IoT device that is supported by AWS. IoT code signing is available for Amazon FreeRTOS and AWS IoT Device Management, and is integrated with AWS Certificate Manager (ACM). In order to sign code, you import a third-party code-signing certificate using ACM, and use that to sign updates in Amazon FreeRTOS and AWS IoT Device Management.

With Signer and the Notation CLI from the Notary
 Project, you can sign container images stored in a container registry such as Amazon Elastic Container Registry (ECR). The signatures are stored in the registry alongside the images, where they are available for verifying image authenticity and integrity.

For more information about Signer, see the AWS Signer Developer Guide.

" } diff --git a/awscli/botocore/data/ssm-sap/2018-05-10/service-2.json b/awscli/botocore/data/ssm-sap/2018-05-10/service-2.json index ef62e27eec5d..ffbd8bfafe60 100644 --- a/awscli/botocore/data/ssm-sap/2018-05-10/service-2.json +++ b/awscli/botocore/data/ssm-sap/2018-05-10/service-2.json @@ -59,7 +59,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets an application registered with AWS Systems Manager for SAP. It also returns the components of the application.

" + "documentation":"

Gets an application registered with AWS Systems Manager for SAP. It also returns the components of the application.

", + "readonly":true }, "GetComponent":{ "name":"GetComponent", @@ -75,7 +76,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the component of an application registered with AWS Systems Manager for SAP.

" + "documentation":"

Gets the component of an application registered with AWS Systems Manager for SAP.

", + "readonly":true }, "GetConfigurationCheckOperation":{ "name":"GetConfigurationCheckOperation", @@ -90,7 +92,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the details of a configuration check operation by specifying the operation ID.

" + "documentation":"

Gets the details of a configuration check operation by specifying the operation ID.

", + "readonly":true }, "GetDatabase":{ "name":"GetDatabase", @@ -105,7 +108,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the SAP HANA database of an application registered with AWS Systems Manager for SAP.

" + "documentation":"

Gets the SAP HANA database of an application registered with AWS Systems Manager for SAP.

", + "readonly":true }, "GetOperation":{ "name":"GetOperation", @@ -120,7 +124,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Gets the details of an operation by specifying the operation ID.

" + "documentation":"

Gets the details of an operation by specifying the operation ID.

", + "readonly":true }, "GetResourcePermission":{ "name":"GetResourcePermission", @@ -152,7 +157,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all the applications registered with AWS Systems Manager for SAP.

" + "documentation":"

Lists all the applications registered with AWS Systems Manager for SAP.

", + "readonly":true }, "ListComponents":{ "name":"ListComponents", @@ -169,7 +175,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all the components registered with AWS Systems Manager for SAP.

" + "documentation":"

Lists all the components registered with AWS Systems Manager for SAP.

", + "readonly":true }, "ListConfigurationCheckDefinitions":{ "name":"ListConfigurationCheckDefinitions", @@ -184,7 +191,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists all configuration check types supported by AWS Systems Manager for SAP.

" + "documentation":"

Lists all configuration check types supported by AWS Systems Manager for SAP.

", + "readonly":true }, "ListConfigurationCheckOperations":{ "name":"ListConfigurationCheckOperations", @@ -200,7 +208,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the configuration check operations performed by AWS Systems Manager for SAP.

" + "documentation":"

Lists the configuration check operations performed by AWS Systems Manager for SAP.

", + "readonly":true }, "ListDatabases":{ "name":"ListDatabases", @@ -216,7 +225,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the SAP HANA databases of an application registered with AWS Systems Manager for SAP.

" + "documentation":"

Lists the SAP HANA databases of an application registered with AWS Systems Manager for SAP.

", + "readonly":true }, "ListOperationEvents":{ "name":"ListOperationEvents", @@ -231,7 +241,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Returns a list of operations events.

Available parameters include OperationID, as well as optional parameters MaxResults, NextToken, and Filters.

" + "documentation":"

Returns a list of operations events.

Available parameters include OperationID, as well as optional parameters MaxResults, NextToken, and Filters.

", + "readonly":true }, "ListOperations":{ "name":"ListOperations", @@ -246,7 +257,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the operations performed by AWS Systems Manager for SAP.

" + "documentation":"

Lists the operations performed by AWS Systems Manager for SAP.

", + "readonly":true }, "ListSubCheckResults":{ "name":"ListSubCheckResults", @@ -261,7 +273,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the sub-check results of a specified configuration check operation.

" + "documentation":"

Lists the sub-check results of a specified configuration check operation.

", + "readonly":true }, "ListSubCheckRuleResults":{ "name":"ListSubCheckRuleResults", @@ -276,7 +289,8 @@ {"shape":"ValidationException"}, {"shape":"InternalServerException"} ], - "documentation":"

Lists the rules of a specified sub-check belonging to a configuration check operation.

" + "documentation":"

Lists the rules of a specified sub-check belonging to a configuration check operation.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -292,7 +306,8 @@ {"shape":"ValidationException"}, {"shape":"ConflictException"} ], - "documentation":"

Lists all tags on an SAP HANA application and/or database registered with AWS Systems Manager for SAP.

" + "documentation":"

Lists all tags on an SAP HANA application and/or database registered with AWS Systems Manager for SAP.

", + "readonly":true }, "PutResourcePermission":{ "name":"PutResourcePermission", @@ -1078,7 +1093,8 @@ "STOPPED", "WARNING", "UNKNOWN", - "ERROR" + "ERROR", + "STOPPING" ] }, "DatabaseSummary":{ diff --git a/awscli/botocore/data/timestream-influxdb/2023-01-27/service-2.json b/awscli/botocore/data/timestream-influxdb/2023-01-27/service-2.json index e9acf9b9886c..a36377d040ea 100644 --- a/awscli/botocore/data/timestream-influxdb/2023-01-27/service-2.json +++ b/awscli/botocore/data/timestream-influxdb/2023-01-27/service-2.json @@ -245,6 +245,44 @@ ], "documentation":"

A list of tags applied to the resource.

" }, + "RebootDbCluster":{ + "name":"RebootDbCluster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDbClusterInput"}, + "output":{"shape":"RebootDbClusterOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Reboots a Timestream for InfluxDB cluster.

", + "idempotent":true + }, + "RebootDbInstance":{ + "name":"RebootDbInstance", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"RebootDbInstanceInput"}, + "output":{"shape":"RebootDbInstanceOutput"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ConflictException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ThrottlingException"} + ], + "documentation":"

Reboots a Timestream for InfluxDB instance.

", + "idempotent":true + }, "TagResource":{ "name":"TagResource", "http":{ @@ -356,7 +394,11 @@ "AVAILABLE", "FAILED", "DELETED", - "MAINTENANCE" + "MAINTENANCE", + "UPDATING_INSTANCE_TYPE", + "REBOOTING", + "REBOOT_FAILED", + "PARTIALLY_AVAILABLE" ] }, "ConflictException":{ @@ -2590,6 +2632,144 @@ "max":65535, "min":1024 }, + "RebootDbClusterInput":{ + "type":"structure", + "required":["dbClusterId"], + "members":{ + "dbClusterId":{ + "shape":"DbClusterId", + "documentation":"

Service-generated unique identifier of the DB cluster to reboot.

" + }, + "instanceIds":{ + "shape":"RebootDbClusterInputInstanceIdsList", + "documentation":"

A list of service-generated unique DB Instance Ids belonging to the DB Cluster to reboot.

" + } + } + }, + "RebootDbClusterInputInstanceIdsList":{ + "type":"list", + "member":{"shape":"DbInstanceId"}, + "max":3, + "min":0 + }, + "RebootDbClusterOutput":{ + "type":"structure", + "members":{ + "dbClusterStatus":{ + "shape":"ClusterStatus", + "documentation":"

The status of the DB Cluster.

" + } + } + }, + "RebootDbInstanceInput":{ + "type":"structure", + "required":["identifier"], + "members":{ + "identifier":{ + "shape":"DbInstanceIdentifier", + "documentation":"

The id of the DB instance to reboot.

" + } + } + }, + "RebootDbInstanceOutput":{ + "type":"structure", + "required":[ + "id", + "name", + "arn", + "vpcSubnetIds" + ], + "members":{ + "id":{ + "shape":"DbInstanceId", + "documentation":"

A service-generated unique identifier.

" + }, + "name":{ + "shape":"DbInstanceName", + "documentation":"

The customer-supplied name that uniquely identifies the DB instance when interacting with the Amazon Timestream for InfluxDB API and CLI commands.

" + }, + "arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the DB instance.

" + }, + "status":{ + "shape":"Status", + "documentation":"

The status of the DB instance.

" + }, + "endpoint":{ + "shape":"String", + "documentation":"

The endpoint used to connect to InfluxDB. The default InfluxDB port is 8086.

" + }, + "port":{ + "shape":"Port", + "documentation":"

The port number on which InfluxDB accepts connections.

" + }, + "networkType":{ + "shape":"NetworkType", + "documentation":"

Specifies whether the networkType of the Timestream for InfluxDB instance is IPV4, which can communicate over IPv4 protocol only, or DUAL, which can communicate over both IPv4 and IPv6 protocols.

" + }, + "dbInstanceType":{ + "shape":"DbInstanceType", + "documentation":"

The Timestream for InfluxDB instance type that InfluxDB runs on.

" + }, + "dbStorageType":{ + "shape":"DbStorageType", + "documentation":"

The Timestream for InfluxDB DB storage type that InfluxDB stores data on.

" + }, + "allocatedStorage":{ + "shape":"AllocatedStorage", + "documentation":"

The amount of storage allocated for your DB storage type (in gibibytes).

" + }, + "deploymentType":{ + "shape":"DeploymentType", + "documentation":"

Specifies whether the Timestream for InfluxDB is deployed as Single-AZ or with a MultiAZ Standby for High availability.

" + }, + "vpcSubnetIds":{ + "shape":"VpcSubnetIdList", + "documentation":"

A list of VPC subnet IDs associated with the DB instance.

" + }, + "publiclyAccessible":{ + "shape":"Boolean", + "documentation":"

Indicates if the DB instance has a public IP to facilitate access.

" + }, + "vpcSecurityGroupIds":{ + "shape":"VpcSecurityGroupIdList", + "documentation":"

A list of VPC security group IDs associated with the DB instance.

" + }, + "dbParameterGroupIdentifier":{ + "shape":"DbParameterGroupIdentifier", + "documentation":"

The id of the DB parameter group assigned to your DB instance.

" + }, + "availabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone in which the DB instance resides.

" + }, + "secondaryAvailabilityZone":{ + "shape":"String", + "documentation":"

The Availability Zone in which the standby instance is located when deploying with a MultiAZ standby instance.

" + }, + "logDeliveryConfiguration":{ + "shape":"LogDeliveryConfiguration", + "documentation":"

Configuration for sending InfluxDB engine logs to send to specified S3 bucket.

" + }, + "influxAuthParametersSecretArn":{ + "shape":"String", + "documentation":"

The Amazon Resource Name (ARN) of the Secrets Manager secret containing the initial InfluxDB authorization parameters. The secret value is a JSON formatted key-value pair holding InfluxDB authorization values: organization, bucket, username, and password.

" + }, + "dbClusterId":{ + "shape":"DbClusterId", + "documentation":"

Specifies the DbCluster to which this DbInstance belongs to.

" + }, + "instanceMode":{ + "shape":"InstanceMode", + "documentation":"

Specifies the DbInstance's role in the cluster.

" + }, + "instanceModes":{ + "shape":"InstanceModeList", + "documentation":"

Specifies the DbInstance's roles in the cluster.

" + } + } + }, "RequestTagMap":{ "type":"map", "key":{"shape":"TagKey"}, @@ -2670,7 +2850,9 @@ "FAILED", "UPDATING_DEPLOYMENT_TYPE", "UPDATING_INSTANCE_TYPE", - "MAINTENANCE" + "MAINTENANCE", + "REBOOTING", + "REBOOT_FAILED" ] }, "String":{"type":"string"}, @@ -2972,7 +3154,7 @@ "VpcSubnetIdList":{ "type":"list", "member":{"shape":"VpcSubnetId"}, - "max":3, + "max":6, "min":1 } }, diff --git a/awscli/botocore/data/wickr/2024-02-01/endpoint-rule-set-1.json b/awscli/botocore/data/wickr/2024-02-01/endpoint-rule-set-1.json new file mode 100644 index 000000000000..1f95801d1f4b --- /dev/null +++ b/awscli/botocore/data/wickr/2024-02-01/endpoint-rule-set-1.json @@ -0,0 +1,350 @@ +{ + "version": "1.0", + "parameters": { + "Region": { + "builtIn": "AWS::Region", + "required": false, + "documentation": "The AWS region used to dispatch the request.", + "type": "string" + }, + "UseDualStack": { + "builtIn": "AWS::UseDualStack", + "required": true, + "default": false, + "documentation": "When true, use the dual-stack endpoint. If the configured endpoint does not support dual-stack, dispatching the request MAY return an error.", + "type": "boolean" + }, + "UseFIPS": { + "builtIn": "AWS::UseFIPS", + "required": true, + "default": false, + "documentation": "When true, send this request to the FIPS-compliant regional endpoint. If the configured endpoint does not have a FIPS compliant endpoint, dispatching the request will return an error.", + "type": "boolean" + }, + "Endpoint": { + "builtIn": "SDK::Endpoint", + "required": false, + "documentation": "Override the endpoint used to send this request", + "type": "string" + } + }, + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Endpoint" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "error": "Invalid Configuration: FIPS and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported", + "type": "error" + }, + { + "conditions": [], + "endpoint": { + "url": { + "ref": "Endpoint" + }, + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [ + { + "fn": "isSet", + "argv": [ + { + "ref": "Region" + } + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "aws.partition", + "argv": [ + { + "ref": "Region" + } + ], + "assign": "PartitionResult" + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + }, + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + } + ] + }, + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://admin.wickr-fips.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS and DualStack are enabled, but this partition does not support one or both", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseFIPS" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsFIPS" + ] + }, + true + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://admin.wickr-fips.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "FIPS is enabled but this partition does not support FIPS", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + { + "ref": "UseDualStack" + }, + true + ] + } + ], + "rules": [ + { + "conditions": [ + { + "fn": "booleanEquals", + "argv": [ + true, + { + "fn": "getAttr", + "argv": [ + { + "ref": "PartitionResult" + }, + "supportsDualStack" + ] + } + ] + } + ], + "rules": [ + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://admin.wickr.{Region}.{PartitionResult#dualStackDnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "DualStack is enabled but this partition does not support DualStack", + "type": "error" + } + ], + "type": "tree" + }, + { + "conditions": [], + "rules": [ + { + "conditions": [], + "endpoint": { + "url": "https://admin.wickr.{Region}.{PartitionResult#dnsSuffix}", + "properties": {}, + "headers": {} + }, + "type": "endpoint" + } + ], + "type": "tree" + } + ], + "type": "tree" + } + ], + "type": "tree" + }, + { + "conditions": [], + "error": "Invalid Configuration: Missing Region", + "type": "error" + } + ], + "type": "tree" + } + ] +} \ No newline at end of file diff --git a/awscli/botocore/data/wickr/2024-02-01/paginators-1.json b/awscli/botocore/data/wickr/2024-02-01/paginators-1.json new file mode 100644 index 000000000000..f51f71e26c6b --- /dev/null +++ b/awscli/botocore/data/wickr/2024-02-01/paginators-1.json @@ -0,0 +1,52 @@ +{ + "pagination": { + "ListBlockedGuestUsers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "blocklist" + }, + "ListBots": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "bots" + }, + "ListDevicesForUser": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "devices" + }, + "ListGuestUsers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "guestlist" + }, + "ListNetworks": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "networks" + }, + "ListSecurityGroupUsers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "users" + }, + "ListSecurityGroups": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "securityGroups" + }, + "ListUsers": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "users" + } + } +} diff --git a/awscli/botocore/data/wickr/2024-02-01/service-2.json b/awscli/botocore/data/wickr/2024-02-01/service-2.json new file mode 100644 index 000000000000..c87cd4fa02d2 --- /dev/null +++ b/awscli/botocore/data/wickr/2024-02-01/service-2.json @@ -0,0 +1,4174 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2024-02-01", + "auth":["aws.auth#sigv4"], + "endpointPrefix":"admin.wickr", + "protocol":"rest-json", + "protocols":["rest-json"], + "serviceFullName":"AWS Wickr Admin API", + "serviceId":"Wickr", + "signatureVersion":"v4", + "signingName":"wickr", + "uid":"wickr-2024-02-01" + }, + "operations":{ + "BatchCreateUser":{ + "name":"BatchCreateUser", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/users", + "responseCode":200 + }, + "input":{"shape":"BatchCreateUserRequest"}, + "output":{"shape":"BatchCreateUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates multiple users in a specified Wickr network. This operation allows you to provision multiple user accounts simultaneously, optionally specifying security groups, and validation requirements for each user.

codeValidation, inviteCode, and inviteCodeTtl are restricted to networks under preview only.

" + }, + "BatchDeleteUser":{ + "name":"BatchDeleteUser", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/users/batch-delete", + "responseCode":200 + }, + "input":{"shape":"BatchDeleteUserRequest"}, + "output":{"shape":"BatchDeleteUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Deletes multiple users from a specified Wickr network. This operation permanently removes user accounts and their associated data from the network.

", + "idempotent":true + }, + "BatchLookupUserUname":{ + "name":"BatchLookupUserUname", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/users/uname-lookup", + "responseCode":200 + }, + "input":{"shape":"BatchLookupUserUnameRequest"}, + "output":{"shape":"BatchLookupUserUnameResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Looks up multiple user usernames from their unique username hashes (unames). This operation allows you to retrieve the email addresses associated with a list of username hashes.

" + }, + "BatchReinviteUser":{ + "name":"BatchReinviteUser", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/users/re-invite", + "responseCode":200 + }, + "input":{"shape":"BatchReinviteUserRequest"}, + "output":{"shape":"BatchReinviteUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Resends invitation codes to multiple users who have pending invitations in a Wickr network. This operation is useful when users haven't accepted their initial invitations or when invitations have expired.

" + }, + "BatchResetDevicesForUser":{ + "name":"BatchResetDevicesForUser", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/users/{userId}/devices", + "responseCode":200 + }, + "input":{"shape":"BatchResetDevicesForUserRequest"}, + "output":{"shape":"BatchResetDevicesForUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Resets multiple devices for a specific user in a Wickr network. This operation forces the selected devices to log out and requires users to re-authenticate, which is useful for security purposes or when devices need to be revoked.

", + "idempotent":true + }, + "BatchToggleUserSuspendStatus":{ + "name":"BatchToggleUserSuspendStatus", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/users/toggleSuspend", + "responseCode":200 + }, + "input":{"shape":"BatchToggleUserSuspendStatusRequest"}, + "output":{"shape":"BatchToggleUserSuspendStatusResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Suspends or unsuspends multiple users in a Wickr network. Suspended users cannot access the network until they are unsuspended. This operation is useful for temporarily restricting access without deleting user accounts.

", + "idempotent":true + }, + "CreateBot":{ + "name":"CreateBot", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/bots", + "responseCode":200 + }, + "input":{"shape":"CreateBotRequest"}, + "output":{"shape":"CreateBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates a new bot in a specified Wickr network. Bots are automated accounts that can send and receive messages, enabling integration with external systems and automation of tasks.

" + }, + "CreateDataRetentionBot":{ + "name":"CreateDataRetentionBot", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/data-retention-bots", + "responseCode":200 + }, + "input":{"shape":"CreateDataRetentionBotRequest"}, + "output":{"shape":"CreateDataRetentionBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates a data retention bot in a Wickr network. Data retention bots are specialized bots that handle message archiving and compliance by capturing and storing messages for regulatory or organizational requirements.

" + }, + "CreateDataRetentionBotChallenge":{ + "name":"CreateDataRetentionBotChallenge", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/data-retention-bots/challenge", + "responseCode":200 + }, + "input":{"shape":"CreateDataRetentionBotChallengeRequest"}, + "output":{"shape":"CreateDataRetentionBotChallengeResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates a new challenge password for the data retention bot. This password is used for authentication when the bot connects to the network.

" + }, + "CreateNetwork":{ + "name":"CreateNetwork", + "http":{ + "method":"POST", + "requestUri":"/networks", + "responseCode":200 + }, + "input":{"shape":"CreateNetworkRequest"}, + "output":{"shape":"CreateNetworkResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates a new Wickr network with specified access level and configuration. This operation provisions a new communication network for your organization.

", + "idempotent":true + }, + "CreateSecurityGroup":{ + "name":"CreateSecurityGroup", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/security-groups", + "responseCode":200 + }, + "input":{"shape":"CreateSecurityGroupRequest"}, + "output":{"shape":"CreateSecurityGroupResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Creates a new security group in a Wickr network. Security groups allow you to organize users and control their permissions, features, and security settings.

", + "idempotent":true + }, + "DeleteBot":{ + "name":"DeleteBot", + "http":{ + "method":"DELETE", + "requestUri":"/networks/{networkId}/bots/{botId}", + "responseCode":200 + }, + "input":{"shape":"DeleteBotRequest"}, + "output":{"shape":"DeleteBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Deletes a bot from a specified Wickr network. This operation permanently removes the bot account and its associated data from the network.

", + "idempotent":true + }, + "DeleteDataRetentionBot":{ + "name":"DeleteDataRetentionBot", + "http":{ + "method":"DELETE", + "requestUri":"/networks/{networkId}/data-retention-bots", + "responseCode":200 + }, + "input":{"shape":"DeleteDataRetentionBotRequest"}, + "output":{"shape":"DeleteDataRetentionBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Deletes the data retention bot from a Wickr network. This operation permanently removes the bot and all its associated data from the database.

", + "idempotent":true + }, + "DeleteNetwork":{ + "name":"DeleteNetwork", + "http":{ + "method":"DELETE", + "requestUri":"/networks/{networkId}", + "responseCode":200 + }, + "input":{"shape":"DeleteNetworkRequest"}, + "output":{"shape":"DeleteNetworkResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Deletes a Wickr network and all its associated resources, including users, bots, security groups, and settings. This operation is permanent and cannot be undone.

", + "idempotent":true + }, + "DeleteSecurityGroup":{ + "name":"DeleteSecurityGroup", + "http":{ + "method":"DELETE", + "requestUri":"/networks/{networkId}/security-groups/{groupId}", + "responseCode":200 + }, + "input":{"shape":"DeleteSecurityGroupRequest"}, + "output":{"shape":"DeleteSecurityGroupResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Deletes a security group from a Wickr network. This operation cannot be performed on the default security group.

", + "idempotent":true + }, + "GetBot":{ + "name":"GetBot", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/bots/{botId}", + "responseCode":200 + }, + "input":{"shape":"GetBotRequest"}, + "output":{"shape":"GetBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves detailed information about a specific bot in a Wickr network, including its status, group membership, and authentication details.

", + "readonly":true + }, + "GetBotsCount":{ + "name":"GetBotsCount", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/bots/count", + "responseCode":200 + }, + "input":{"shape":"GetBotsCountRequest"}, + "output":{"shape":"GetBotsCountResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves the count of bots in a Wickr network, categorized by their status (pending, active, and total).

", + "readonly":true + }, + "GetDataRetentionBot":{ + "name":"GetDataRetentionBot", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/data-retention-bots", + "responseCode":200 + }, + "input":{"shape":"GetDataRetentionBotRequest"}, + "output":{"shape":"GetDataRetentionBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves information about the data retention bot in a Wickr network, including its status and whether the data retention service is enabled.

", + "readonly":true + }, + "GetGuestUserHistoryCount":{ + "name":"GetGuestUserHistoryCount", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/guest-users/count", + "responseCode":200 + }, + "input":{"shape":"GetGuestUserHistoryCountRequest"}, + "output":{"shape":"GetGuestUserHistoryCountResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves historical guest user count data for a Wickr network, showing the number of guest users per billing period over the past 90 days.

", + "readonly":true + }, + "GetNetwork":{ + "name":"GetNetwork", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}", + "responseCode":200 + }, + "input":{"shape":"GetNetworkRequest"}, + "output":{"shape":"GetNetworkResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves detailed information about a specific Wickr network, including its configuration, access level, and status.

", + "readonly":true + }, + "GetNetworkSettings":{ + "name":"GetNetworkSettings", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/settings", + "responseCode":200 + }, + "input":{"shape":"GetNetworkSettingsRequest"}, + "output":{"shape":"GetNetworkSettingsResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves all network-level settings for a Wickr network, including client metrics, data retention, and other configuration options.

", + "readonly":true + }, + "GetOidcInfo":{ + "name":"GetOidcInfo", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/oidc", + "responseCode":200 + }, + "input":{"shape":"GetOidcInfoRequest"}, + "output":{"shape":"GetOidcInfoResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves the OpenID Connect (OIDC) configuration for a Wickr network, including SSO settings and optional token information if access token parameters are provided.

", + "readonly":true + }, + "GetSecurityGroup":{ + "name":"GetSecurityGroup", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/security-groups/{groupId}", + "responseCode":200 + }, + "input":{"shape":"GetSecurityGroupRequest"}, + "output":{"shape":"GetSecurityGroupResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves detailed information about a specific security group in a Wickr network, including its settings, member counts, and configuration.

", + "readonly":true + }, + "GetUser":{ + "name":"GetUser", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/users/{userId}", + "responseCode":200 + }, + "input":{"shape":"GetUserRequest"}, + "output":{"shape":"GetUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves detailed information about a specific user in a Wickr network, including their profile, status, and activity history.

", + "readonly":true + }, + "GetUsersCount":{ + "name":"GetUsersCount", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/users/count", + "responseCode":200 + }, + "input":{"shape":"GetUsersCountRequest"}, + "output":{"shape":"GetUsersCountResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves the count of users in a Wickr network, categorized by their status (pending, active, rejected) and showing how many users can still be added.

", + "readonly":true + }, + "ListBlockedGuestUsers":{ + "name":"ListBlockedGuestUsers", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/guest-users/blocklist", + "responseCode":200 + }, + "input":{"shape":"ListBlockedGuestUsersRequest"}, + "output":{"shape":"ListBlockedGuestUsersResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of guest users who have been blocked from a Wickr network. You can filter and sort the results.

", + "readonly":true + }, + "ListBots":{ + "name":"ListBots", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/bots", + "responseCode":200 + }, + "input":{"shape":"ListBotsRequest"}, + "output":{"shape":"ListBotsResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of bots in a specified Wickr network. You can filter and sort the results based on various criteria.

", + "readonly":true + }, + "ListDevicesForUser":{ + "name":"ListDevicesForUser", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/users/{userId}/devices", + "responseCode":200 + }, + "input":{"shape":"ListDevicesForUserRequest"}, + "output":{"shape":"ListDevicesForUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of devices associated with a specific user in a Wickr network. This operation returns information about all devices where the user has logged into Wickr.

", + "readonly":true + }, + "ListGuestUsers":{ + "name":"ListGuestUsers", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/guest-users", + "responseCode":200 + }, + "input":{"shape":"ListGuestUsersRequest"}, + "output":{"shape":"ListGuestUsersResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of guest users who have communicated with your Wickr network. Guest users are external users from federated networks who can communicate with network members.

", + "readonly":true + }, + "ListNetworks":{ + "name":"ListNetworks", + "http":{ + "method":"GET", + "requestUri":"/networks", + "responseCode":200 + }, + "input":{"shape":"ListNetworksRequest"}, + "output":{"shape":"ListNetworksResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of all Wickr networks associated with your Amazon Web Services account. You can sort the results by network ID or name.

", + "readonly":true + }, + "ListSecurityGroupUsers":{ + "name":"ListSecurityGroupUsers", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/security-groups/{groupId}/users", + "responseCode":200 + }, + "input":{"shape":"ListSecurityGroupUsersRequest"}, + "output":{"shape":"ListSecurityGroupUsersResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of users who belong to a specific security group in a Wickr network.

", + "readonly":true + }, + "ListSecurityGroups":{ + "name":"ListSecurityGroups", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/security-groups", + "responseCode":200 + }, + "input":{"shape":"ListSecurityGroupsRequest"}, + "output":{"shape":"ListSecurityGroupsResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of security groups in a specified Wickr network. You can sort the results by various criteria.

", + "readonly":true + }, + "ListUsers":{ + "name":"ListUsers", + "http":{ + "method":"GET", + "requestUri":"/networks/{networkId}/users", + "responseCode":200 + }, + "input":{"shape":"ListUsersRequest"}, + "output":{"shape":"ListUsersResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Retrieves a paginated list of users in a specified Wickr network. You can filter and sort the results based on various criteria such as name, status, or security group membership.

", + "readonly":true + }, + "RegisterOidcConfig":{ + "name":"RegisterOidcConfig", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/oidc/save", + "responseCode":200 + }, + "input":{"shape":"RegisterOidcConfigRequest"}, + "output":{"shape":"RegisterOidcConfigResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Registers and saves an OpenID Connect (OIDC) configuration for a Wickr network, enabling Single Sign-On (SSO) authentication through an identity provider.

" + }, + "RegisterOidcConfigTest":{ + "name":"RegisterOidcConfigTest", + "http":{ + "method":"POST", + "requestUri":"/networks/{networkId}/oidc/test", + "responseCode":200 + }, + "input":{"shape":"RegisterOidcConfigTestRequest"}, + "output":{"shape":"RegisterOidcConfigTestResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Tests an OpenID Connect (OIDC) configuration for a Wickr network by validating the connection to the identity provider and retrieving its supported capabilities.

" + }, + "UpdateBot":{ + "name":"UpdateBot", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/bots/{botId}", + "responseCode":200 + }, + "input":{"shape":"UpdateBotRequest"}, + "output":{"shape":"UpdateBotResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the properties of an existing bot in a Wickr network. This operation allows you to modify the bot's display name, security group, password, or suspension status.

", + "idempotent":true + }, + "UpdateDataRetention":{ + "name":"UpdateDataRetention", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/data-retention-bots", + "responseCode":200 + }, + "input":{"shape":"UpdateDataRetentionRequest"}, + "output":{"shape":"UpdateDataRetentionResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the data retention bot settings, allowing you to enable or disable the data retention service, or acknowledge the public key message.

", + "idempotent":true + }, + "UpdateGuestUser":{ + "name":"UpdateGuestUser", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/guest-users/{usernameHash}", + "responseCode":200 + }, + "input":{"shape":"UpdateGuestUserRequest"}, + "output":{"shape":"UpdateGuestUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the block status of a guest user in a Wickr network. This operation allows you to block or unblock a guest user from accessing the network.

" + }, + "UpdateNetwork":{ + "name":"UpdateNetwork", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}", + "responseCode":200 + }, + "input":{"shape":"UpdateNetworkRequest"}, + "output":{"shape":"UpdateNetworkResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the properties of an existing Wickr network, such as its name or encryption key configuration.

", + "idempotent":true + }, + "UpdateNetworkSettings":{ + "name":"UpdateNetworkSettings", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/settings", + "responseCode":200 + }, + "input":{"shape":"UpdateNetworkSettingsRequest"}, + "output":{"shape":"UpdateNetworkSettingsResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates network-level settings for a Wickr network. You can modify settings such as client metrics, data retention, and other network-wide options.

", + "idempotent":true + }, + "UpdateSecurityGroup":{ + "name":"UpdateSecurityGroup", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/security-groups/{groupId}", + "responseCode":200 + }, + "input":{"shape":"UpdateSecurityGroupRequest"}, + "output":{"shape":"UpdateSecurityGroupResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the properties of an existing security group in a Wickr network, such as its name or settings.

", + "idempotent":true + }, + "UpdateUser":{ + "name":"UpdateUser", + "http":{ + "method":"PATCH", + "requestUri":"/networks/{networkId}/users", + "responseCode":200 + }, + "input":{"shape":"UpdateUserRequest"}, + "output":{"shape":"UpdateUserResponse"}, + "errors":[ + {"shape":"ValidationError"}, + {"shape":"BadRequestError"}, + {"shape":"ResourceNotFoundError"}, + {"shape":"ForbiddenError"}, + {"shape":"UnauthorizedError"}, + {"shape":"InternalServerError"}, + {"shape":"RateLimitError"} + ], + "documentation":"

Updates the properties of an existing user in a Wickr network. This operation allows you to modify the user's name, password, security group membership, and invite code settings.

codeValidation, inviteCode, and inviteCodeTtl are restricted to networks under preview only.

", + "idempotent":true + } + }, + "shapes":{ + "AccessLevel":{ + "type":"string", + "enum":[ + "STANDARD", + "PREMIUM" + ] + }, + "AppIds":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "BadRequestError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A detailed message explaining what was wrong with the request and how to correct it.

" + } + }, + "documentation":"

The request was invalid or malformed. This error occurs when the request parameters do not meet the API requirements, such as invalid field values, missing required parameters, or improperly formatted data.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "BasicDeviceObject":{ + "type":"structure", + "members":{ + "appId":{ + "shape":"GenericString", + "documentation":"

The unique application ID for the Wickr app on this device.

" + }, + "created":{ + "shape":"GenericString", + "documentation":"

The timestamp when the device first appeared in the Wickr database.

" + }, + "lastLogin":{ + "shape":"GenericString", + "documentation":"

The timestamp when the device last successfully logged into Wickr. This is also used to determine SSO idle time.

" + }, + "statusText":{ + "shape":"GenericString", + "documentation":"

The current status of the device, either 'Active' or 'Reset' depending on whether the device is currently active or has been marked for reset.

" + }, + "suspend":{ + "shape":"Boolean", + "documentation":"

Indicates whether the device is suspended.

" + }, + "type":{ + "shape":"GenericString", + "documentation":"

The operating system of the device (e.g., 'MacOSX', 'Windows', 'iOS', 'Android').

" + } + }, + "documentation":"

Represents a device where a user has logged into Wickr, containing information about the device's type, status, and login history.

" + }, + "BatchCreateUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "users" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where users will be created.

", + "location":"uri", + "locationName":"networkId" + }, + "users":{ + "shape":"BatchCreateUserRequestItems", + "documentation":"

A list of user objects containing the details for each user to be created, including username, name, security groups, and optional invite codes. Maximum 50 users per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency. If you retry a request with the same client token, the service will return the same response without creating duplicate users.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchCreateUserRequestItem":{ + "type":"structure", + "required":[ + "securityGroupIds", + "username" + ], + "members":{ + "firstName":{ + "shape":"SensitiveString", + "documentation":"

The first name of the user.

" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

The last name of the user.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

A list of security group IDs to which the user should be assigned.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The email address or username for the user. Must be unique within the network.

" + }, + "inviteCode":{ + "shape":"GenericString", + "documentation":"

A custom invite code for the user. If not provided, one will be generated automatically.

" + }, + "inviteCodeTtl":{ + "shape":"Integer", + "documentation":"

The time-to-live for the invite code in days. After this period, the invite code will expire.

" + }, + "codeValidation":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user can be verified through a custom invite code.

" + } + }, + "documentation":"

Contains the details for a single user to be created in a batch user creation request.

A user can only be assigned to a single security group. Attempting to add a user to multiple security groups is not supported and will result in an error.

codeValidation, inviteCode, and inviteCodeTtl are restricted to networks under preview only.

" + }, + "BatchCreateUserRequestItems":{ + "type":"list", + "member":{"shape":"BatchCreateUserRequestItem"} + }, + "BatchCreateUserResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch operation.

" + }, + "successful":{ + "shape":"Users", + "documentation":"

A list of user objects that were successfully created, including their assigned user IDs and invite codes.

" + }, + "failed":{ + "shape":"BatchUserErrorResponseItems", + "documentation":"

A list of user creation attempts that failed, including error details explaining why each user could not be created.

" + } + } + }, + "BatchDeleteUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userIds" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which users will be deleted.

", + "location":"uri", + "locationName":"networkId" + }, + "userIds":{ + "shape":"UserIds", + "documentation":"

A list of user IDs identifying the users to be deleted from the network. Maximum 50 users per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency. If you retry a request with the same client token, the service will return the same response without attempting to delete users again.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchDeleteUserResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch deletion operation.

" + }, + "successful":{ + "shape":"BatchUserSuccessResponseItems", + "documentation":"

A list of user IDs that were successfully deleted from the network.

" + }, + "failed":{ + "shape":"BatchUserErrorResponseItems", + "documentation":"

A list of user deletion attempts that failed, including error details explaining why each user could not be deleted.

" + } + } + }, + "BatchDeviceErrorResponseItem":{ + "type":"structure", + "required":["appId"], + "members":{ + "field":{ + "shape":"GenericString", + "documentation":"

The field that caused the error.

" + }, + "reason":{ + "shape":"GenericString", + "documentation":"

A description of why the device operation failed.

" + }, + "appId":{ + "shape":"GenericString", + "documentation":"

The application ID of the device that failed to be processed.

" + } + }, + "documentation":"

Contains error information for a device operation that failed in a batch device request.

" + }, + "BatchDeviceErrorResponseItems":{ + "type":"list", + "member":{"shape":"BatchDeviceErrorResponseItem"} + }, + "BatchDeviceSuccessResponseItem":{ + "type":"structure", + "required":["appId"], + "members":{ + "appId":{ + "shape":"GenericString", + "documentation":"

The application ID of the device that was successfully processed.

" + } + }, + "documentation":"

Contains information about a device that was successfully processed in a batch device operation.

" + }, + "BatchDeviceSuccessResponseItems":{ + "type":"list", + "member":{"shape":"BatchDeviceSuccessResponseItem"} + }, + "BatchLookupUserUnameRequest":{ + "type":"structure", + "required":[ + "networkId", + "unames" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where the users will be looked up.

", + "location":"uri", + "locationName":"networkId" + }, + "unames":{ + "shape":"Unames", + "documentation":"

A list of username hashes (unames) to look up. Each uname is a unique identifier for a user's username. Maximum 50 unames per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchLookupUserUnameResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch lookup operation.

" + }, + "successful":{ + "shape":"BatchUnameSuccessResponseItems", + "documentation":"

A list of successfully resolved username hashes with their corresponding email addresses.

" + }, + "failed":{ + "shape":"BatchUnameErrorResponseItems", + "documentation":"

A list of username hash lookup attempts that failed, including error details explaining why each lookup failed.

" + } + } + }, + "BatchReinviteUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userIds" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where users will be reinvited.

", + "location":"uri", + "locationName":"networkId" + }, + "userIds":{ + "shape":"UserIds", + "documentation":"

A list of user IDs identifying the users to be reinvited to the network. Maximum 50 users per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchReinviteUserResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch reinvitation operation.

" + }, + "successful":{ + "shape":"BatchUserSuccessResponseItems", + "documentation":"

A list of user IDs that were successfully reinvited.

" + }, + "failed":{ + "shape":"BatchUserErrorResponseItems", + "documentation":"

A list of reinvitation attempts that failed, including error details explaining why each user could not be reinvited.

" + } + } + }, + "BatchResetDevicesForUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userId", + "appIds" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the user whose devices will be reset.

", + "location":"uri", + "locationName":"networkId" + }, + "userId":{ + "shape":"UserId", + "documentation":"

The ID of the user whose devices will be reset.

", + "location":"uri", + "locationName":"userId" + }, + "appIds":{ + "shape":"AppIds", + "documentation":"

A list of application IDs identifying the specific devices to be reset for the user. Maximum 50 devices per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchResetDevicesForUserResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch device reset operation.

" + }, + "successful":{ + "shape":"BatchDeviceSuccessResponseItems", + "documentation":"

A list of application IDs that were successfully reset.

" + }, + "failed":{ + "shape":"BatchDeviceErrorResponseItems", + "documentation":"

A list of device reset attempts that failed, including error details explaining why each device could not be reset.

" + } + } + }, + "BatchToggleUserSuspendStatusRequest":{ + "type":"structure", + "required":[ + "networkId", + "suspend", + "userIds" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where users will be suspended or unsuspended.

", + "location":"uri", + "locationName":"networkId" + }, + "suspend":{ + "shape":"Boolean", + "documentation":"

A boolean value indicating whether to suspend (true) or unsuspend (false) the specified users.

", + "location":"querystring", + "locationName":"suspend" + }, + "userIds":{ + "shape":"UserIds", + "documentation":"

A list of user IDs identifying the users whose suspend status will be toggled. Maximum 50 users per batch request.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "BatchToggleUserSuspendStatusResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the overall result of the batch suspend status toggle operation.

" + }, + "successful":{ + "shape":"BatchUserSuccessResponseItems", + "documentation":"

A list of user IDs whose suspend status was successfully toggled.

" + }, + "failed":{ + "shape":"BatchUserErrorResponseItems", + "documentation":"

A list of suspend status toggle attempts that failed, including error details explaining why each user's status could not be changed.

" + } + } + }, + "BatchUnameErrorResponseItem":{ + "type":"structure", + "required":["uname"], + "members":{ + "field":{ + "shape":"GenericString", + "documentation":"

The field that caused the error.

" + }, + "reason":{ + "shape":"GenericString", + "documentation":"

A description of why the username hash lookup failed.

" + }, + "uname":{ + "shape":"Uname", + "documentation":"

The username hash that failed to be looked up.

" + } + }, + "documentation":"

Contains error information for a username hash lookup that failed in a batch uname lookup request.

" + }, + "BatchUnameErrorResponseItems":{ + "type":"list", + "member":{"shape":"BatchUnameErrorResponseItem"} + }, + "BatchUnameSuccessResponseItem":{ + "type":"structure", + "required":[ + "uname", + "username" + ], + "members":{ + "uname":{ + "shape":"Uname", + "documentation":"

The username hash that was successfully resolved.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The email address or username corresponding to the username hash.

" + } + }, + "documentation":"

Contains information about a username hash that was successfully resolved in a batch uname lookup operation.

" + }, + "BatchUnameSuccessResponseItems":{ + "type":"list", + "member":{"shape":"BatchUnameSuccessResponseItem"} + }, + "BatchUserErrorResponseItem":{ + "type":"structure", + "required":["userId"], + "members":{ + "field":{ + "shape":"GenericString", + "documentation":"

The field that caused the error.

" + }, + "reason":{ + "shape":"GenericString", + "documentation":"

A description of why the user operation failed.

" + }, + "userId":{ + "shape":"UserId", + "documentation":"

The user ID associated with the failed operation.

" + } + }, + "documentation":"

Contains error information for a user operation that failed in a batch user request.

" + }, + "BatchUserErrorResponseItems":{ + "type":"list", + "member":{"shape":"BatchUserErrorResponseItem"} + }, + "BatchUserSuccessResponseItem":{ + "type":"structure", + "required":["userId"], + "members":{ + "userId":{ + "shape":"UserId", + "documentation":"

The user ID that was successfully processed.

" + } + }, + "documentation":"

Contains information about a user that was successfully processed in a batch user operation.

" + }, + "BatchUserSuccessResponseItems":{ + "type":"list", + "member":{"shape":"BatchUserSuccessResponseItem"} + }, + "BlockedGuestUser":{ + "type":"structure", + "required":[ + "username", + "admin", + "modified", + "usernameHash" + ], + "members":{ + "username":{ + "shape":"GenericString", + "documentation":"

The username of the blocked guest user.

" + }, + "admin":{ + "shape":"GenericString", + "documentation":"

The username of the administrator who blocked this guest user.

" + }, + "modified":{ + "shape":"GenericString", + "documentation":"

The timestamp when the guest user was blocked or last modified.

" + }, + "usernameHash":{ + "shape":"GenericString", + "documentation":"

The unique username hash identifier for the blocked guest user.

" + } + }, + "documentation":"

Represents a guest user who has been blocked from accessing a Wickr network.

" + }, + "BlockedGuestUserList":{ + "type":"list", + "member":{"shape":"BlockedGuestUser"} + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "Bot":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the bot.

" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

The display name of the bot that is visible to users.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The username of the bot.

" + }, + "uname":{ + "shape":"GenericString", + "documentation":"

The unique username hash identifier for the bot.

" + }, + "pubkey":{ + "shape":"GenericString", + "documentation":"

The public key of the bot used for encryption.

" + }, + "status":{ + "shape":"BotStatus", + "documentation":"

The current status of the bot (1 for pending, 2 for active).

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the security group to which the bot belongs.

" + }, + "hasChallenge":{ + "shape":"Boolean", + "documentation":"

Indicates whether the bot has a password set.

" + }, + "suspended":{ + "shape":"Boolean", + "documentation":"

Indicates whether the bot is currently suspended.

" + }, + "lastLogin":{ + "shape":"GenericString", + "documentation":"

The timestamp of the bot's last login.

" + } + }, + "documentation":"

Represents a bot account in a Wickr network with all its informational fields.

" + }, + "BotId":{ + "type":"string", + "max":10, + "min":1, + "pattern":"[0-9]+" + }, + "BotStatus":{ + "type":"integer", + "box":true + }, + "Bots":{ + "type":"list", + "member":{"shape":"Bot"} + }, + "CallingSettings":{ + "type":"structure", + "members":{ + "canStart11Call":{ + "shape":"Boolean", + "documentation":"

Specifies whether users can start one-to-one calls.

" + }, + "canVideoCall":{ + "shape":"Boolean", + "documentation":"

Specifies whether users can make video calls (as opposed to audio-only calls). Valid only when audio call(canStart11Call) is enabled.

" + }, + "forceTcpCall":{ + "shape":"Boolean", + "documentation":"

When enabled, forces all calls to use TCP protocol instead of UDP for network traversal.

" + } + }, + "documentation":"

Defines the calling feature permissions and settings for users in a security group, controlling what types of calls users can initiate and participate in.

" + }, + "ClientToken":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[a-zA-Z0-9-_:]+" + }, + "CreateBotRequest":{ + "type":"structure", + "required":[ + "networkId", + "username", + "groupId", + "challenge" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where the bot will be created.

", + "location":"uri", + "locationName":"networkId" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The username for the bot. This must be unique within the network and follow the network's naming conventions.

" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

The display name for the bot that will be visible to users in the network.

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the security group to which the bot will be assigned.

" + }, + "challenge":{ + "shape":"SensitiveString", + "documentation":"

The password for the bot account.

" + } + } + }, + "CreateBotResponse":{ + "type":"structure", + "required":["botId"], + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the bot creation operation.

" + }, + "botId":{ + "shape":"BotId", + "documentation":"

The unique identifier assigned to the newly created bot.

" + }, + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the network where the bot was created.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The username of the newly created bot.

" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

The display name of the newly created bot.

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the security group to which the bot was assigned.

" + } + } + }, + "CreateDataRetentionBotChallengeRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the data retention bot.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "CreateDataRetentionBotChallengeResponse":{ + "type":"structure", + "required":["challenge"], + "members":{ + "challenge":{ + "shape":"SensitiveString", + "documentation":"

The newly generated challenge password for the data retention bot.

" + } + } + }, + "CreateDataRetentionBotRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where the data retention bot will be created.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "CreateDataRetentionBotResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating that the data retention bot was successfully provisioned.

" + } + } + }, + "CreateNetworkRequest":{ + "type":"structure", + "required":[ + "networkName", + "accessLevel" + ], + "members":{ + "networkName":{ + "shape":"GenericString", + "documentation":"

The name for the new network. Must be between 1 and 20 characters.

" + }, + "accessLevel":{ + "shape":"AccessLevel", + "documentation":"

The access level for the network. Valid values are STANDARD or PREMIUM, which determine the features and capabilities available to network members.

" + }, + "enablePremiumFreeTrial":{ + "shape":"Boolean", + "documentation":"

Specifies whether to enable a premium free trial for the network. It is optional and has a default value as false. When set to true, the network starts with premium features for a limited trial period.

" + }, + "encryptionKeyArn":{ + "shape":"GenericString", + "documentation":"

The ARN of the Amazon Web Services KMS customer managed key to use for encrypting sensitive data in the network.

" + } + } + }, + "CreateNetworkResponse":{ + "type":"structure", + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The unique identifier assigned to the newly created network.

" + }, + "networkName":{ + "shape":"GenericString", + "documentation":"

The name of the newly created network.

" + }, + "encryptionKeyArn":{ + "shape":"GenericString", + "documentation":"

The ARN of the KMS key being used to encrypt sensitive data in the network.

" + } + } + }, + "CreateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "networkId", + "name", + "securityGroupSettings" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where the security group will be created.

", + "location":"uri", + "locationName":"networkId" + }, + "name":{ + "shape":"GenericString", + "documentation":"

The name for the new security group.

" + }, + "securityGroupSettings":{ + "shape":"SecurityGroupSettingsRequest", + "documentation":"

The configuration settings for the security group, including permissions, federation settings, and feature controls.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "CreateSecurityGroupResponse":{ + "type":"structure", + "required":["securityGroup"], + "members":{ + "securityGroup":{ + "shape":"SecurityGroup", + "documentation":"

The details of the newly created security group, including its ID, name, and settings.

" + } + } + }, + "DataRetentionActionType":{ + "type":"string", + "enum":[ + "ENABLE", + "DISABLE", + "PUBKEY_MSG_ACK" + ] + }, + "DeleteBotRequest":{ + "type":"structure", + "required":[ + "networkId", + "botId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which the bot will be deleted.

", + "location":"uri", + "locationName":"networkId" + }, + "botId":{ + "shape":"BotId", + "documentation":"

The unique identifier of the bot to be deleted.

", + "location":"uri", + "locationName":"botId" + } + } + }, + "DeleteBotResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the bot deletion operation.

" + } + } + }, + "DeleteDataRetentionBotRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which the data retention bot will be deleted.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "DeleteDataRetentionBotResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating that the data retention bot and all associated data were successfully deleted.

" + } + } + }, + "DeleteNetworkRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network to delete.

", + "location":"uri", + "locationName":"networkId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency. If you retry a request with the same client token, the service will return the same response without attempting to delete the network again.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + } + } + }, + "DeleteNetworkResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating that the network deletion has been initiated successfully.

" + } + } + }, + "DeleteSecurityGroupRequest":{ + "type":"structure", + "required":[ + "networkId", + "groupId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which the security group will be deleted.

", + "location":"uri", + "locationName":"networkId" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the security group to delete.

", + "location":"uri", + "locationName":"groupId" + } + } + }, + "DeleteSecurityGroupResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the security group deletion operation.

" + }, + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the network from which the security group was deleted.

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the security group that was deleted.

" + } + } + }, + "Devices":{ + "type":"list", + "member":{"shape":"BasicDeviceObject"} + }, + "ErrorDetail":{ + "type":"structure", + "members":{ + "field":{ + "shape":"GenericString", + "documentation":"

The name of the field that contains an error or warning.

" + }, + "reason":{ + "shape":"GenericString", + "documentation":"

A detailed description of the error or warning.

" + } + }, + "documentation":"

Contains detailed error information explaining why an operation failed, including which field caused the error and the reason for the failure.

" + }, + "ErrorDetailList":{ + "type":"list", + "member":{"shape":"ErrorDetail"} + }, + "ForbiddenError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message explaining why access was denied and what permissions are required.

" + } + }, + "documentation":"

Access to the requested resource is forbidden. This error occurs when the authenticated user does not have the necessary permissions to perform the requested operation, even though they are authenticated.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "GenericString":{ + "type":"string", + "pattern":"[\\S\\s]*" + }, + "GetBotRequest":{ + "type":"structure", + "required":[ + "networkId", + "botId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the bot.

", + "location":"uri", + "locationName":"networkId" + }, + "botId":{ + "shape":"BotId", + "documentation":"

The unique identifier of the bot to retrieve.

", + "location":"uri", + "locationName":"botId" + } + } + }, + "GetBotResponse":{ + "type":"structure", + "members":{ + "botId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the bot.

" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

The display name of the bot that is visible to users.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The username of the bot.

" + }, + "uname":{ + "shape":"GenericString", + "documentation":"

The unique username hash identifier for the bot.

" + }, + "pubkey":{ + "shape":"GenericString", + "documentation":"

The public key of the bot used for encryption.

" + }, + "status":{ + "shape":"BotStatus", + "documentation":"

The current status of the bot (1 for pending, 2 for active).

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the security group to which the bot belongs.

" + }, + "hasChallenge":{ + "shape":"Boolean", + "documentation":"

Indicates whether the bot has a password set.

" + }, + "suspended":{ + "shape":"Boolean", + "documentation":"

Indicates whether the bot is currently suspended.

" + }, + "lastLogin":{ + "shape":"GenericString", + "documentation":"

The timestamp of the bot's last login.

" + } + } + }, + "GetBotsCountRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network for which to retrieve bot counts.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetBotsCountResponse":{ + "type":"structure", + "required":[ + "pending", + "active", + "total" + ], + "members":{ + "pending":{ + "shape":"Integer", + "documentation":"

The number of bots with pending status (invited but not yet activated).

" + }, + "active":{ + "shape":"Integer", + "documentation":"

The number of bots with active status.

" + }, + "total":{ + "shape":"Integer", + "documentation":"

The total number of bots in the network (active and pending).

" + } + } + }, + "GetDataRetentionBotRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the data retention bot.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetDataRetentionBotResponse":{ + "type":"structure", + "members":{ + "botName":{ + "shape":"GenericString", + "documentation":"

The name of the data retention bot.

" + }, + "botExists":{ + "shape":"Boolean", + "documentation":"

Indicates whether a data retention bot exists in the network.

" + }, + "isBotActive":{ + "shape":"Boolean", + "documentation":"

Indicates whether the data retention bot is active and operational.

" + }, + "isDataRetentionBotRegistered":{ + "shape":"Boolean", + "documentation":"

Indicates whether the data retention bot has been registered with the network.

" + }, + "isDataRetentionServiceEnabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether the data retention service is enabled for the network.

" + }, + "isPubkeyMsgAcked":{ + "shape":"Boolean", + "documentation":"

Indicates whether the public key message has been acknowledged by the bot.

" + } + } + }, + "GetGuestUserHistoryCountRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network for which to retrieve guest user history.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetGuestUserHistoryCountResponse":{ + "type":"structure", + "required":["history"], + "members":{ + "history":{ + "shape":"GuestUserHistoryCountList", + "documentation":"

A list of historical guest user counts, organized by month and billing period.

" + } + } + }, + "GetNetworkRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network to retrieve.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetNetworkResponse":{ + "type":"structure", + "required":[ + "networkId", + "networkName", + "accessLevel", + "awsAccountId", + "networkArn" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The unique identifier of the network.

" + }, + "networkName":{ + "shape":"GenericString", + "documentation":"

The name of the network.

" + }, + "accessLevel":{ + "shape":"AccessLevel", + "documentation":"

The access level of the network (STANDARD or PREMIUM), which determines available features and capabilities.

" + }, + "awsAccountId":{ + "shape":"GenericString", + "documentation":"

The Amazon Web Services account ID that owns the network.

" + }, + "networkArn":{ + "shape":"GenericString", + "documentation":"

The Amazon Resource Name (ARN) of the network.

" + }, + "standing":{ + "shape":"Integer", + "documentation":"

The current standing or status of the network.

" + }, + "freeTrialExpiration":{ + "shape":"GenericString", + "documentation":"

The expiration date and time for the network's free trial period, if applicable.

" + }, + "migrationState":{ + "shape":"Integer", + "documentation":"

The SSO redirect URI migration state, managed by the SSO redirect migration wizard. Values: 0 (not started), 1 (in progress), or 2 (completed).

" + }, + "encryptionKeyArn":{ + "shape":"GenericString", + "documentation":"

The ARN of the Amazon Web Services KMS customer managed key used for encrypting sensitive data in the network.

" + } + } + }, + "GetNetworkSettingsRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network whose settings will be retrieved.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetNetworkSettingsResponse":{ + "type":"structure", + "required":["settings"], + "members":{ + "settings":{ + "shape":"SettingsList", + "documentation":"

A list of network settings, where each setting includes a name, value, and type.

" + } + } + }, + "GetOidcInfoRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network whose OIDC configuration will be retrieved.

", + "location":"uri", + "locationName":"networkId" + }, + "clientId":{ + "shape":"GenericString", + "documentation":"

The OAuth client ID for retrieving access tokens (optional).

", + "location":"querystring", + "locationName":"clientId" + }, + "code":{ + "shape":"GenericString", + "documentation":"

The authorization code for retrieving access tokens (optional).

", + "location":"querystring", + "locationName":"code" + }, + "grantType":{ + "shape":"GenericString", + "documentation":"

The OAuth grant type for retrieving access tokens (optional).

", + "location":"querystring", + "locationName":"grantType" + }, + "redirectUri":{ + "shape":"GenericString", + "documentation":"

The redirect URI for the OAuth flow (optional).

", + "location":"querystring", + "locationName":"redirectUri" + }, + "url":{ + "shape":"GenericString", + "documentation":"

The URL for the OIDC provider (optional).

", + "location":"querystring", + "locationName":"url" + }, + "clientSecret":{ + "shape":"SensitiveString", + "documentation":"

The OAuth client secret for retrieving access tokens (optional).

", + "location":"querystring", + "locationName":"clientSecret" + }, + "codeVerifier":{ + "shape":"GenericString", + "documentation":"

The PKCE code verifier for enhanced security in the OAuth flow (optional).

", + "location":"querystring", + "locationName":"codeVerifier" + }, + "certificate":{ + "shape":"GenericString", + "documentation":"

The CA certificate for secure communication with the OIDC provider (optional).

", + "location":"querystring", + "locationName":"certificate" + } + } + }, + "GetOidcInfoResponse":{ + "type":"structure", + "members":{ + "openidConnectInfo":{ + "shape":"OidcConfigInfo", + "documentation":"

The OpenID Connect configuration information for the network, including issuer, client ID, scopes, and other SSO settings.

" + }, + "tokenInfo":{ + "shape":"OidcTokenInfo", + "documentation":"

OAuth token information including access token, refresh token, and expiration details (only present if token parameters were provided in the request).

" + } + } + }, + "GetSecurityGroupRequest":{ + "type":"structure", + "required":[ + "networkId", + "groupId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the security group.

", + "location":"uri", + "locationName":"networkId" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the security group to retrieve.

", + "location":"uri", + "locationName":"groupId" + } + } + }, + "GetSecurityGroupResponse":{ + "type":"structure", + "required":["securityGroup"], + "members":{ + "securityGroup":{ + "shape":"SecurityGroup", + "documentation":"

The detailed information about the security group, including all its settings and member counts.

" + } + } + }, + "GetUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the user.

", + "location":"uri", + "locationName":"networkId" + }, + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the user to retrieve.

", + "location":"uri", + "locationName":"userId" + }, + "startTime":{ + "shape":"SyntheticTimestamp_epoch_seconds", + "documentation":"

The start time for filtering the user's last activity. Only activity after this timestamp will be considered. Time is specified in epoch seconds.

", + "location":"querystring", + "locationName":"startTime" + }, + "endTime":{ + "shape":"SyntheticTimestamp_epoch_seconds", + "documentation":"

The end time for filtering the user's last activity. Only activity before this timestamp will be considered. Time is specified in epoch seconds.

", + "location":"querystring", + "locationName":"endTime" + } + } + }, + "GetUserResponse":{ + "type":"structure", + "required":["userId"], + "members":{ + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the user.

" + }, + "firstName":{ + "shape":"SensitiveString", + "documentation":"

The first name of the user.

" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

The last name of the user.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The email address or username of the user.

" + }, + "isAdmin":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user has administrator privileges in the network.

" + }, + "suspended":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user is currently suspended.

" + }, + "status":{ + "shape":"Integer", + "documentation":"

The current status of the user (1 for pending, 2 for active).

" + }, + "lastActivity":{ + "shape":"Integer", + "documentation":"

The timestamp of the user's last activity in the network, specified in epoch seconds.

" + }, + "lastLogin":{ + "shape":"Integer", + "documentation":"

The timestamp of the user's last login to the network, specified in epoch seconds.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

A list of security group IDs to which the user belongs.

" + } + } + }, + "GetUsersCountRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network for which to retrieve user counts.

", + "location":"uri", + "locationName":"networkId" + } + } + }, + "GetUsersCountResponse":{ + "type":"structure", + "required":[ + "pending", + "active", + "rejected", + "remaining", + "total" + ], + "members":{ + "pending":{ + "shape":"Integer", + "documentation":"

The number of users with pending status (invited but not yet accepted).

" + }, + "active":{ + "shape":"Integer", + "documentation":"

The number of users with active status in the network.

" + }, + "rejected":{ + "shape":"Integer", + "documentation":"

The number of users who have rejected network invitations.

" + }, + "remaining":{ + "shape":"Integer", + "documentation":"

The number of additional users that can be added to the network while maintaining premium free trial eligibility.

" + }, + "total":{ + "shape":"Integer", + "documentation":"

The total number of users in the network (active and pending combined).

" + } + } + }, + "GuestUser":{ + "type":"structure", + "required":[ + "billingPeriod", + "username", + "usernameHash" + ], + "members":{ + "billingPeriod":{ + "shape":"GenericString", + "documentation":"

The billing period when this guest user accessed the network (e.g., '2024-01').

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The username of the guest user.

" + }, + "usernameHash":{ + "shape":"GenericString", + "documentation":"

The unique username hash identifier for the guest user.

" + } + }, + "documentation":"

Represents a guest user who has accessed the network from a federated Wickr network.

" + }, + "GuestUserHistoryCount":{ + "type":"structure", + "required":[ + "month", + "count" + ], + "members":{ + "month":{ + "shape":"GenericString", + "documentation":"

The month and billing period in YYYY_MM format (e.g., '2024_01').

" + }, + "count":{ + "shape":"GenericString", + "documentation":"

The number of guest users who have communicated with your Wickr network during this billing period.

" + } + }, + "documentation":"

Contains the count of guest users for a specific billing period, used for tracking historical guest user activity.

" + }, + "GuestUserHistoryCountList":{ + "type":"list", + "member":{"shape":"GuestUserHistoryCount"} + }, + "GuestUserList":{ + "type":"list", + "member":{"shape":"GuestUser"} + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerError":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message describing the internal server error that occurred.

" + } + }, + "documentation":"

An unexpected error occurred on the server while processing the request. This indicates a problem with the Wickr service itself rather than with the request. If this error persists, contact Amazon Web Services Support.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true + }, + "ListBlockedGuestUsersRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which to list blocked guest users.

", + "location":"uri", + "locationName":"networkId" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of blocked guest users to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The field to sort blocked guest users by. Accepted values include 'username', 'admin', and 'modified'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "username":{ + "shape":"GenericString", + "documentation":"

Filter results to only include blocked guest users with usernames matching this value.

", + "location":"querystring", + "locationName":"username" + }, + "admin":{ + "shape":"GenericString", + "documentation":"

Filter results to only include blocked guest users that were blocked by this administrator.

", + "location":"querystring", + "locationName":"admin" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListBlockedGuestUsersResponse":{ + "type":"structure", + "required":["blocklist"], + "members":{ + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + }, + "blocklist":{ + "shape":"BlockedGuestUserList", + "documentation":"

A list of blocked guest user objects within the current page.

" + } + } + }, + "ListBotsRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which to list bots.

", + "location":"uri", + "locationName":"networkId" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of bots to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The fields to sort bots by. Multiple fields can be specified by separating them with '+'. Accepted values include 'username', 'firstName', 'displayName', 'status', and 'groupId'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

Filter results to only include bots with display names matching this value.

", + "location":"querystring", + "locationName":"displayName" + }, + "username":{ + "shape":"GenericString", + "documentation":"

Filter results to only include bots with usernames matching this value.

", + "location":"querystring", + "locationName":"username" + }, + "status":{ + "shape":"BotStatus", + "documentation":"

Filter results to only include bots with this status (1 for pending, 2 for active).

", + "location":"querystring", + "locationName":"status" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

Filter results to only include bots belonging to this security group.

", + "location":"querystring", + "locationName":"groupId" + } + } + }, + "ListBotsResponse":{ + "type":"structure", + "required":["bots"], + "members":{ + "bots":{ + "shape":"Bots", + "documentation":"

A list of bot objects matching the specified filters and within the current page.

" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + } + } + }, + "ListDevicesForUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the user.

", + "location":"uri", + "locationName":"networkId" + }, + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the user whose devices will be listed.

", + "location":"uri", + "locationName":"userId" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of devices to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The fields to sort devices by. Multiple fields can be specified by separating them with '+'. Accepted values include 'lastlogin', 'type', 'suspend', and 'created'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + } + } + }, + "ListDevicesForUserResponse":{ + "type":"structure", + "required":["devices"], + "members":{ + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + }, + "devices":{ + "shape":"Devices", + "documentation":"

A list of device objects associated with the user within the current page.

" + } + } + }, + "ListGuestUsersRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which to list guest users.

", + "location":"uri", + "locationName":"networkId" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of guest users to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The field to sort guest users by. Accepted values include 'username' and 'billingPeriod'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "username":{ + "shape":"GenericString", + "documentation":"

Filter results to only include guest users with usernames matching this value.

", + "location":"querystring", + "locationName":"username" + }, + "billingPeriod":{ + "shape":"GenericString", + "documentation":"

Filter results to only include guest users from this billing period (e.g., '2024-01').

", + "location":"querystring", + "locationName":"billingPeriod" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListGuestUsersResponse":{ + "type":"structure", + "required":["guestlist"], + "members":{ + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + }, + "guestlist":{ + "shape":"GuestUserList", + "documentation":"

A list of guest user objects within the current page.

" + } + } + }, + "ListNetworksRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of networks to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The field to sort networks by. Accepted values are 'networkId' and 'networkName'. Default is 'networkId'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListNetworksResponse":{ + "type":"structure", + "required":["networks"], + "members":{ + "networks":{ + "shape":"NetworkList", + "documentation":"

A list of network objects for the Amazon Web Services account.

" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + } + } + }, + "ListSecurityGroupUsersRequest":{ + "type":"structure", + "required":[ + "networkId", + "groupId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the security group.

", + "location":"uri", + "locationName":"networkId" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the security group whose users will be listed.

", + "location":"uri", + "locationName":"groupId" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of users to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The field to sort users by. Multiple fields can be specified by separating them with '+'. Accepted values include 'username', 'firstName', and 'lastName'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + } + } + }, + "ListSecurityGroupUsersResponse":{ + "type":"structure", + "required":["users"], + "members":{ + "users":{ + "shape":"Users", + "documentation":"

A list of user objects belonging to the security group within the current page.

" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + } + } + }, + "ListSecurityGroupsRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which to list security groups.

", + "location":"uri", + "locationName":"networkId" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of security groups to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The field to sort security groups by. Accepted values include 'id' and 'name'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + } + } + }, + "ListSecurityGroupsResponse":{ + "type":"structure", + "members":{ + "securityGroups":{ + "shape":"SecurityGroupList", + "documentation":"

A list of security group objects in the current page.

" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + } + } + }, + "ListUsersRequest":{ + "type":"structure", + "required":["networkId"], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network from which to list users.

", + "location":"uri", + "locationName":"networkId" + }, + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token for retrieving the next page of results. This is returned from a previous request when there are more results available.

", + "location":"querystring", + "locationName":"nextToken" + }, + "maxResults":{ + "shape":"Integer", + "documentation":"

The maximum number of users to return in a single page. Valid range is 1-100. Default is 10.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sortFields":{ + "shape":"GenericString", + "documentation":"

The fields to sort users by. Multiple fields can be specified by separating them with '+'. Accepted values include 'username', 'firstName', 'lastName', 'status', and 'groupId'.

", + "location":"querystring", + "locationName":"sortFields" + }, + "sortDirection":{ + "shape":"SortDirection", + "documentation":"

The direction to sort results. Valid values are 'ASC' (ascending) or 'DESC' (descending). Default is 'DESC'.

", + "location":"querystring", + "locationName":"sortDirection" + }, + "firstName":{ + "shape":"SensitiveString", + "documentation":"

Filter results to only include users with first names matching this value.

", + "location":"querystring", + "locationName":"firstName" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

Filter results to only include users with last names matching this value.

", + "location":"querystring", + "locationName":"lastName" + }, + "username":{ + "shape":"GenericString", + "documentation":"

Filter results to only include users with usernames matching this value.

", + "location":"querystring", + "locationName":"username" + }, + "status":{ + "shape":"UserStatus", + "documentation":"

Filter results to only include users with this status (1 for pending, 2 for active).

", + "location":"querystring", + "locationName":"status" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

Filter results to only include users belonging to this security group.

", + "location":"querystring", + "locationName":"groupId" + } + } + }, + "ListUsersResponse":{ + "type":"structure", + "members":{ + "nextToken":{ + "shape":"GenericString", + "documentation":"

The token to use for retrieving the next page of results. If this is not present, there are no more results.

" + }, + "users":{ + "shape":"Users", + "documentation":"

A list of user objects matching the specified filters and within the current page.

" + } + } + }, + "Long":{ + "type":"long", + "box":true + }, + "Network":{ + "type":"structure", + "required":[ + "networkId", + "networkName", + "accessLevel", + "awsAccountId", + "networkArn" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The unique identifier of the network.

" + }, + "networkName":{ + "shape":"GenericString", + "documentation":"

The name of the network.

" + }, + "accessLevel":{ + "shape":"AccessLevel", + "documentation":"

The access level of the network (STANDARD or PREMIUM), which determines available features and capabilities.

" + }, + "awsAccountId":{ + "shape":"GenericString", + "documentation":"

The Amazon Web Services account ID that owns the network.

" + }, + "networkArn":{ + "shape":"GenericString", + "documentation":"

The Amazon Resource Name (ARN) of the network.

" + }, + "standing":{ + "shape":"Integer", + "documentation":"

The current standing or status of the network.

" + }, + "freeTrialExpiration":{ + "shape":"GenericString", + "documentation":"

The expiration date and time for the network's free trial period, if applicable.

" + }, + "migrationState":{ + "shape":"Integer", + "documentation":"

The SSO redirect URI migration state, managed by the SSO redirect migration wizard. Values: 0 (not started), 1 (in progress), or 2 (completed).

" + }, + "encryptionKeyArn":{ + "shape":"GenericString", + "documentation":"

The ARN of the Amazon Web Services KMS customer managed key used for encrypting sensitive data in the network.

" + } + }, + "documentation":"

Represents a Wickr network with all its configuration and status information.

" + }, + "NetworkId":{ + "type":"string", + "max":8, + "min":8, + "pattern":"[0-9]{8}" + }, + "NetworkList":{ + "type":"list", + "member":{"shape":"Network"} + }, + "NetworkSettings":{ + "type":"structure", + "members":{ + "enableClientMetrics":{ + "shape":"Boolean", + "documentation":"

Allows Wickr clients to send anonymized performance and usage metrics to the Wickr backend server for service improvement and troubleshooting.

" + }, + "readReceiptConfig":{ + "shape":"ReadReceiptConfig", + "documentation":"

Configuration for read receipts at the network level, controlling the default behavior for whether senders can see when their messages have been read.

" + }, + "dataRetention":{ + "shape":"Boolean", + "documentation":"

Indicates whether the data retention feature is enabled for the network. When true, messages are captured by the data retention bot for compliance and archiving purposes.

" + } + }, + "documentation":"

Contains network-level configuration settings that apply to all users and security groups within a Wickr network.

" + }, + "OidcConfigInfo":{ + "type":"structure", + "required":[ + "companyId", + "scopes", + "issuer" + ], + "members":{ + "applicationName":{ + "shape":"GenericString", + "documentation":"

The name of the OIDC application as registered with the identity provider.

" + }, + "clientId":{ + "shape":"GenericString", + "documentation":"

The OAuth client ID assigned by the identity provider for authentication requests.

" + }, + "companyId":{ + "shape":"GenericString", + "documentation":"

Custom identifier your end users will use to sign in with SSO.

" + }, + "scopes":{ + "shape":"GenericString", + "documentation":"

The OAuth scopes requested from the identity provider, which determine what user information is accessible (e.g., 'openid profile email').

" + }, + "issuer":{ + "shape":"GenericString", + "documentation":"

The issuer URL of the identity provider, which serves as the base URL for OIDC endpoints and configuration discovery.

" + }, + "clientSecret":{ + "shape":"SensitiveString", + "documentation":"

The OAuth client secret used to authenticate the application with the identity provider.

" + }, + "secret":{ + "shape":"SensitiveString", + "documentation":"

An additional secret credential used by the identity provider for authentication.

" + }, + "redirectUrl":{ + "shape":"GenericString", + "documentation":"

The callback URL where the identity provider redirects users after successful authentication. This URL must be registered with the identity provider.

" + }, + "userId":{ + "shape":"GenericString", + "documentation":"

The claim field from the OIDC token to use as the unique user identifier (e.g., 'email', 'sub', or a custom claim).

" + }, + "customUsername":{ + "shape":"GenericString", + "documentation":"

A custom field mapping to extract the username from the OIDC token when the standard username claim is insufficient.

" + }, + "caCertificate":{ + "shape":"GenericString", + "documentation":"

The X.509 CA certificate for validating SSL/TLS connections to the identity provider when using self-signed or enterprise certificates.

" + }, + "applicationId":{ + "shape":"OidcConfigInfoApplicationIdInteger", + "documentation":"

The unique identifier for the registered OIDC application. Valid range is 1-10.

" + }, + "ssoTokenBufferMinutes":{ + "shape":"Integer", + "documentation":"

The grace period in minutes before the SSO token expires when the system should proactively refresh the token to maintain seamless user access.

" + }, + "extraAuthParams":{ + "shape":"GenericString", + "documentation":"

Additional authentication parameters to include in the OIDC authorization request as a query string. Useful for provider-specific extensions.

" + } + }, + "documentation":"

Contains the OpenID Connect (OIDC) configuration information for Single Sign-On (SSO) authentication, including identity provider settings and client credentials.

" + }, + "OidcConfigInfoApplicationIdInteger":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "OidcTokenInfo":{ + "type":"structure", + "members":{ + "codeVerifier":{ + "shape":"GenericString", + "documentation":"

The PKCE (Proof Key for Code Exchange) code verifier, a cryptographically random string used to enhance security in the OAuth flow.

" + }, + "codeChallenge":{ + "shape":"GenericString", + "documentation":"

The PKCE code challenge, a transformed version of the code verifier sent during the authorization request for verification.

" + }, + "accessToken":{ + "shape":"GenericString", + "documentation":"

The OAuth access token that can be used to access protected resources on behalf of the authenticated user.

" + }, + "idToken":{ + "shape":"GenericString", + "documentation":"

The OpenID Connect ID token containing user identity information and authentication context as a signed JWT.

" + }, + "refreshToken":{ + "shape":"GenericString", + "documentation":"

The OAuth refresh token that can be used to obtain new access tokens without requiring the user to re-authenticate.

" + }, + "tokenType":{ + "shape":"GenericString", + "documentation":"

The type of access token issued, typically 'Bearer', which indicates how the token should be used in API requests.

" + }, + "expiresIn":{ + "shape":"Long", + "documentation":"

The lifetime of the access token in seconds, indicating when the token will expire and need to be refreshed.

" + } + }, + "documentation":"

Contains OAuth token information returned from the identity provider, including access tokens, ID tokens, and PKCE parameters used for secure authentication.

" + }, + "PasswordRequirements":{ + "type":"structure", + "members":{ + "lowercase":{ + "shape":"Integer", + "documentation":"

The minimum number of lowercase letters required in passwords.

" + }, + "minLength":{ + "shape":"Integer", + "documentation":"

The minimum password length in characters.

" + }, + "numbers":{ + "shape":"Integer", + "documentation":"

The minimum number of numeric characters required in passwords.

" + }, + "symbols":{ + "shape":"Integer", + "documentation":"

The minimum number of special symbol characters required in passwords.

" + }, + "uppercase":{ + "shape":"Integer", + "documentation":"

The minimum number of uppercase letters required in passwords.

" + } + }, + "documentation":"

Defines password complexity requirements for users in a security group, including minimum length and character type requirements.

" + }, + "PermittedNetworksList":{ + "type":"list", + "member":{"shape":"NetworkId"} + }, + "PermittedWickrEnterpriseNetwork":{ + "type":"structure", + "required":[ + "domain", + "networkId" + ], + "members":{ + "domain":{ + "shape":"GenericString", + "documentation":"

The domain identifier for the permitted Wickr enterprise network.

" + }, + "networkId":{ + "shape":"NetworkId", + "documentation":"

The network ID of the permitted Wickr enterprise network.

" + } + }, + "documentation":"

Identifies a Wickr enterprise network that is permitted for global federation, allowing users to communicate with members of the specified network.

" + }, + "PermittedWickrEnterpriseNetworksList":{ + "type":"list", + "member":{"shape":"PermittedWickrEnterpriseNetwork"} + }, + "RateLimitError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating that the rate limit was exceeded and suggesting when to retry.

" + } + }, + "documentation":"

The request was throttled because too many requests were sent in a short period of time. Wait a moment and retry the request. Consider implementing exponential backoff in your application.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true + }, + "ReadReceiptConfig":{ + "type":"structure", + "members":{ + "status":{ + "shape":"Status", + "documentation":"

The read receipt status mode for the network.

" + } + }, + "documentation":"

Configuration for read receipts at the network level, controlling whether senders can see when their messages have been read.

" + }, + "RegisterOidcConfigRequest":{ + "type":"structure", + "required":[ + "networkId", + "companyId", + "issuer", + "scopes" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network for which OIDC will be configured.

", + "location":"uri", + "locationName":"networkId" + }, + "companyId":{ + "shape":"GenericString", + "documentation":"

Custom identifier your end users will use to sign in with SSO.

" + }, + "customUsername":{ + "shape":"GenericString", + "documentation":"

A custom field mapping to extract the username from the OIDC token (optional).

The customUsername is only required if you use something other than email as the username field.

" + }, + "extraAuthParams":{ + "shape":"GenericString", + "documentation":"

Additional authentication parameters to include in the OIDC flow (optional).

" + }, + "issuer":{ + "shape":"GenericString", + "documentation":"

The issuer URL of the OIDC provider (e.g., 'https://login.example.com').

" + }, + "scopes":{ + "shape":"GenericString", + "documentation":"

The OAuth scopes to request from the OIDC provider (e.g., 'openid profile email').

" + }, + "secret":{ + "shape":"SensitiveString", + "documentation":"

The client secret for authenticating with the OIDC provider (optional).

" + }, + "ssoTokenBufferMinutes":{ + "shape":"Integer", + "documentation":"

The buffer time in minutes before the SSO token expires to refresh it (optional).

" + }, + "userId":{ + "shape":"GenericString", + "documentation":"

Unique identifier provided by your identity provider to authenticate the access request. Also referred to as clientID.

" + } + } + }, + "RegisterOidcConfigResponse":{ + "type":"structure", + "required":[ + "companyId", + "scopes", + "issuer" + ], + "members":{ + "applicationName":{ + "shape":"GenericString", + "documentation":"

The name of the registered OIDC application.

" + }, + "clientId":{ + "shape":"GenericString", + "documentation":"

The OAuth client ID assigned to the application.

" + }, + "companyId":{ + "shape":"GenericString", + "documentation":"

Custom identifier your end users will use to sign in with SSO.

" + }, + "scopes":{ + "shape":"GenericString", + "documentation":"

The OAuth scopes configured for the application.

" + }, + "issuer":{ + "shape":"GenericString", + "documentation":"

The issuer URL of the OIDC provider.

" + }, + "clientSecret":{ + "shape":"SensitiveString", + "documentation":"

The OAuth client secret for the application.

" + }, + "secret":{ + "shape":"SensitiveString", + "documentation":"

The client secret for authenticating with the OIDC provider.

" + }, + "redirectUrl":{ + "shape":"GenericString", + "documentation":"

The redirect URL configured for the OAuth flow.

" + }, + "userId":{ + "shape":"GenericString", + "documentation":"

The claim field being used as the user identifier.

" + }, + "customUsername":{ + "shape":"GenericString", + "documentation":"

The custom field mapping used for extracting the username.

" + }, + "caCertificate":{ + "shape":"GenericString", + "documentation":"

The CA certificate used for secure communication with the OIDC provider.

" + }, + "applicationId":{ + "shape":"RegisterOidcConfigResponseApplicationIdInteger", + "documentation":"

The unique identifier for the registered OIDC application.

" + }, + "ssoTokenBufferMinutes":{ + "shape":"Integer", + "documentation":"

The buffer time in minutes before the SSO token expires.

" + }, + "extraAuthParams":{ + "shape":"GenericString", + "documentation":"

The additional authentication parameters configured for the OIDC flow.

" + } + } + }, + "RegisterOidcConfigResponseApplicationIdInteger":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "RegisterOidcConfigTestRequest":{ + "type":"structure", + "required":[ + "networkId", + "issuer", + "scopes" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network for which the OIDC configuration will be tested.

", + "location":"uri", + "locationName":"networkId" + }, + "extraAuthParams":{ + "shape":"GenericString", + "documentation":"

Additional authentication parameters to include in the test (optional).

" + }, + "issuer":{ + "shape":"GenericString", + "documentation":"

The issuer URL of the OIDC provider to test.

" + }, + "scopes":{ + "shape":"GenericString", + "documentation":"

The OAuth scopes to test with the OIDC provider.

" + }, + "certificate":{ + "shape":"GenericString", + "documentation":"

The CA certificate for secure communication with the OIDC provider (optional).

" + } + } + }, + "RegisterOidcConfigTestResponse":{ + "type":"structure", + "members":{ + "tokenEndpoint":{ + "shape":"GenericString", + "documentation":"

The token endpoint URL discovered from the OIDC provider.

" + }, + "userinfoEndpoint":{ + "shape":"GenericString", + "documentation":"

The user info endpoint URL discovered from the OIDC provider.

" + }, + "responseTypesSupported":{ + "shape":"StringList", + "documentation":"

The OAuth response types supported by the OIDC provider.

" + }, + "scopesSupported":{ + "shape":"StringList", + "documentation":"

The OAuth scopes supported by the OIDC provider.

" + }, + "issuer":{ + "shape":"GenericString", + "documentation":"

The issuer URL confirmed by the OIDC provider.

" + }, + "authorizationEndpoint":{ + "shape":"GenericString", + "documentation":"

The authorization endpoint URL discovered from the OIDC provider.

" + }, + "endSessionEndpoint":{ + "shape":"GenericString", + "documentation":"

The end session endpoint URL for logging out users from the OIDC provider.

" + }, + "logoutEndpoint":{ + "shape":"GenericString", + "documentation":"

The logout endpoint URL for terminating user sessions.

" + }, + "grantTypesSupported":{ + "shape":"StringList", + "documentation":"

The OAuth grant types supported by the OIDC provider.

" + }, + "revocationEndpoint":{ + "shape":"GenericString", + "documentation":"

The token revocation endpoint URL for invalidating tokens.

" + }, + "tokenEndpointAuthMethodsSupported":{ + "shape":"StringList", + "documentation":"

The authentication methods supported by the token endpoint.

" + }, + "microsoftMultiRefreshToken":{ + "shape":"Boolean", + "documentation":"

Indicates whether the provider supports Microsoft multi-refresh tokens.

" + } + } + }, + "ResourceNotFoundError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message identifying which resource was not found.

" + } + }, + "documentation":"

The requested resource could not be found. This error occurs when you try to access or modify a network, user, bot, security group, or other resource that doesn't exist or has been deleted.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "SecurityGroup":{ + "type":"structure", + "required":[ + "activeMembers", + "botMembers", + "id", + "isDefault", + "name", + "modified", + "securityGroupSettings" + ], + "members":{ + "activeMembers":{ + "shape":"Integer", + "documentation":"

The number of active user members currently in the security group.

" + }, + "botMembers":{ + "shape":"Integer", + "documentation":"

The number of bot members currently in the security group.

" + }, + "activeDirectoryGuid":{ + "shape":"GenericString", + "documentation":"

The GUID of the Active Directory group associated with this security group, if synchronized with LDAP.

" + }, + "id":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the security group.

" + }, + "isDefault":{ + "shape":"Boolean", + "documentation":"

Indicates whether this is the default security group for the network. Each network has only one default group.

" + }, + "name":{ + "shape":"GenericString", + "documentation":"

The human-readable name of the security group.

" + }, + "modified":{ + "shape":"Integer", + "documentation":"

The timestamp when the security group was last modified, specified in epoch seconds.

" + }, + "securityGroupSettings":{ + "shape":"SecurityGroupSettings", + "documentation":"

The comprehensive configuration settings that define capabilities and restrictions for members of this security group.

" + } + }, + "documentation":"

Represents a security group in a Wickr network, containing membership statistics, configuration, and all permission settings that apply to its members.

" + }, + "SecurityGroupId":{ + "type":"string", + "pattern":"[\\S]+" + }, + "SecurityGroupIdList":{ + "type":"list", + "member":{"shape":"SecurityGroupId"} + }, + "SecurityGroupList":{ + "type":"list", + "member":{"shape":"SecurityGroup"} + }, + "SecurityGroupSettings":{ + "type":"structure", + "members":{ + "alwaysReauthenticate":{ + "shape":"Boolean", + "documentation":"

Requires users to reauthenticate every time they return to the application, providing an additional layer of security.

" + }, + "atakPackageValues":{ + "shape":"SecurityGroupStringList", + "documentation":"

Configuration values for ATAK (Android Team Awareness Kit) package integration, when ATAK is enabled.

" + }, + "calling":{ + "shape":"CallingSettings", + "documentation":"

The calling feature permissions and settings that control what types of calls users can initiate and participate in.

" + }, + "checkForUpdates":{ + "shape":"Boolean", + "documentation":"

Enables automatic checking for Wickr client updates to ensure users stay current with the latest version.

" + }, + "enableAtak":{ + "shape":"Boolean", + "documentation":"

Enables ATAK (Android Team Awareness Kit) integration for tactical communication and situational awareness.

" + }, + "enableCrashReports":{ + "shape":"Boolean", + "documentation":"

Allow users to report crashes.

" + }, + "enableFileDownload":{ + "shape":"Boolean", + "documentation":"

Specifies whether users can download files from messages to their devices.

" + }, + "enableGuestFederation":{ + "shape":"Boolean", + "documentation":"

Allows users to communicate with guest users from other Wickr networks and federated external networks.

" + }, + "enableNotificationPreview":{ + "shape":"Boolean", + "documentation":"

Enables message preview text in push notifications, allowing users to see message content before opening the app.

" + }, + "enableOpenAccessOption":{ + "shape":"Boolean", + "documentation":"

Allow users to avoid censorship when they are geo-blocked or have network limitations.

" + }, + "enableRestrictedGlobalFederation":{ + "shape":"Boolean", + "documentation":"

Enables restricted global federation, limiting external communication to only specified permitted networks.

" + }, + "filesEnabled":{ + "shape":"Boolean", + "documentation":"

Enables file sharing capabilities, allowing users to send and receive files in conversations.

" + }, + "forceDeviceLockout":{ + "shape":"Integer", + "documentation":"

Defines the number of failed login attempts before data stored on the device is reset. Should be less than lockoutThreshold.

" + }, + "forceOpenAccess":{ + "shape":"Boolean", + "documentation":"

Automatically enable and enforce Wickr open access on all devices. Valid only if enableOpenAccessOption settings is enabled.

" + }, + "forceReadReceipts":{ + "shape":"Boolean", + "documentation":"

Allow user approved bots to read messages in rooms without using a slash command.

" + }, + "globalFederation":{ + "shape":"Boolean", + "documentation":"

Allows users to communicate with users on other Wickr instances (Wickr Enterprise) outside the current network.

" + }, + "isAtoEnabled":{ + "shape":"Boolean", + "documentation":"

Enforces a two-factor authentication when a user adds a new device to their account.

" + }, + "isLinkPreviewEnabled":{ + "shape":"Boolean", + "documentation":"

Enables automatic preview of links shared in messages, showing webpage thumbnails and descriptions.

" + }, + "locationAllowMaps":{ + "shape":"Boolean", + "documentation":"

Allows map integration in location sharing, enabling users to view shared locations on interactive maps. Only allowed when location setting is enabled.

" + }, + "locationEnabled":{ + "shape":"Boolean", + "documentation":"

Enables location sharing features, allowing users to share their current location with others.

" + }, + "maxAutoDownloadSize":{ + "shape":"Long", + "documentation":"

The maximum file size in bytes that will be automatically downloaded without user confirmation. Only allowed if fileDownload is enabled. Valid Values [512000 (low_quality), 7340032 (high_quality) ]

" + }, + "maxBor":{ + "shape":"Integer", + "documentation":"

The maximum burn-on-read (BOR) time in seconds, which determines how long messages remain visible before auto-deletion after being read.

" + }, + "maxTtl":{ + "shape":"Long", + "documentation":"

The maximum time-to-live (TTL) in seconds for messages, after which they will be automatically deleted from all devices.

" + }, + "messageForwardingEnabled":{ + "shape":"Boolean", + "documentation":"

Enables message forwarding, allowing users to forward messages from one conversation to another.

" + }, + "passwordRequirements":{ + "shape":"PasswordRequirements", + "documentation":"

The password complexity requirements that users must follow when creating or changing passwords.

" + }, + "presenceEnabled":{ + "shape":"Boolean", + "documentation":"

Enables presence indicators that show whether users are online, away, or offline.

" + }, + "quickResponses":{ + "shape":"SecurityGroupStringList", + "documentation":"

A list of pre-defined quick response message templates that users can send with a single tap.

" + }, + "showMasterRecoveryKey":{ + "shape":"Boolean", + "documentation":"

Users will get a master recovery key that can be used to securely sign in to their Wickr account without having access to their primary device for authentication. Available in SSO enabled network.

" + }, + "shredder":{ + "shape":"ShredderSettings", + "documentation":"

The message shredder configuration that controls secure deletion of messages and files from devices.

" + }, + "ssoMaxIdleMinutes":{ + "shape":"Integer", + "documentation":"

The duration for which users SSO session remains inactive before automatically logging them out for security. Available in SSO enabled network.

" + }, + "federationMode":{ + "shape":"Integer", + "documentation":"

The local federation mode controlling how users can communicate with other networks. Values: 0 (none), 1 (federated), 2 (restricted).

" + }, + "lockoutThreshold":{ + "shape":"Integer", + "documentation":"

The number of failed password attempts before a user account is locked out.

" + }, + "permittedNetworks":{ + "shape":"PermittedNetworksList", + "documentation":"

A list of network IDs that are permitted for local federation when federation mode is set to restricted.

" + }, + "permittedWickrAwsNetworks":{ + "shape":"WickrAwsNetworksList", + "documentation":"

A list of permitted Wickr networks for global federation, restricting communication to specific approved networks.

" + }, + "permittedWickrEnterpriseNetworks":{ + "shape":"PermittedWickrEnterpriseNetworksList", + "documentation":"

A list of permitted Wickr Enterprise networks for global federation, restricting communication to specific approved networks.

" + } + }, + "documentation":"

Comprehensive configuration settings that define all user capabilities, restrictions, and features for members of a security group. These settings control everything from calling permissions to federation settings to security policies.

" + }, + "SecurityGroupSettingsRequest":{ + "type":"structure", + "members":{ + "lockoutThreshold":{ + "shape":"Integer", + "documentation":"

The number of failed password attempts before a user account is locked out.

" + }, + "permittedNetworks":{ + "shape":"PermittedNetworksList", + "documentation":"

A list of network IDs that are permitted for local federation when federation mode is set to restricted.

" + }, + "enableGuestFederation":{ + "shape":"Boolean", + "documentation":"

Guest users let you work with people outside your organization that only have limited access to Wickr. Only valid when federationMode is set to Global.

" + }, + "globalFederation":{ + "shape":"Boolean", + "documentation":"

Allow users to securely federate with all Amazon Web Services Wickr networks and Amazon Web Services Enterprise networks.

" + }, + "federationMode":{ + "shape":"Integer", + "documentation":"

The local federation mode. Values: 0 (none), 1 (federated - all networks), 2 (restricted - only permitted networks).

" + }, + "enableRestrictedGlobalFederation":{ + "shape":"Boolean", + "documentation":"

Enables restricted global federation to limit communication to specific permitted networks only. Requires globalFederation to be enabled.

" + }, + "permittedWickrAwsNetworks":{ + "shape":"WickrAwsNetworksList", + "documentation":"

A list of permitted Amazon Web Services Wickr networks for restricted global federation.

" + }, + "permittedWickrEnterpriseNetworks":{ + "shape":"PermittedWickrEnterpriseNetworksList", + "documentation":"

A list of permitted Wickr Enterprise networks for restricted global federation.

" + } + }, + "documentation":"

Contains the security group configuration settings that can be specified when creating or updating a security group. This is a subset of SecurityGroupSettings containing only the modifiable federation and security settings.

" + }, + "SecurityGroupStringList":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "SensitiveString":{ + "type":"string", + "pattern":"[\\S\\s]*", + "sensitive":true + }, + "Setting":{ + "type":"structure", + "required":[ + "optionName", + "value", + "type" + ], + "members":{ + "optionName":{ + "shape":"GenericString", + "documentation":"

The name of the network setting (e.g., 'enableClientMetrics', 'dataRetention').

" + }, + "value":{ + "shape":"GenericString", + "documentation":"

The current value of the setting as a string. Boolean values are represented as 'true' or 'false'.

" + }, + "type":{ + "shape":"GenericString", + "documentation":"

The data type of the setting value (e.g., 'boolean', 'string', 'number').

" + } + }, + "documentation":"

Represents a single network-level configuration setting with its name, value, and data type. Settings control network-wide behaviors and features.

" + }, + "SettingsList":{ + "type":"list", + "member":{"shape":"Setting"} + }, + "ShredderSettings":{ + "type":"structure", + "members":{ + "canProcessManually":{ + "shape":"Boolean", + "documentation":"

Specifies whether users can manually trigger the shredder to delete content.

" + }, + "intensity":{ + "shape":"Integer", + "documentation":"

Prevents Wickr data from being recovered by overwriting deleted Wickr data. Valid Values: Must be one of [0, 20, 60, 100]

" + } + }, + "documentation":"

Configuration for the message shredder feature, which securely deletes messages and files from devices to prevent data recovery.

" + }, + "SortDirection":{ + "type":"string", + "enum":[ + "ASC", + "DESC" + ] + }, + "Status":{ + "type":"string", + "enum":[ + "DISABLED", + "ENABLED", + "FORCE_ENABLED" + ] + }, + "StringList":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "SyntheticTimestamp_epoch_seconds":{ + "type":"timestamp", + "timestampFormat":"unixTimestamp" + }, + "Uname":{"type":"string"}, + "Unames":{ + "type":"list", + "member":{"shape":"GenericString"} + }, + "UnauthorizedError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message explaining why the authentication failed.

" + } + }, + "documentation":"

The request was not authenticated or the authentication credentials were invalid. This error occurs when the request lacks valid authentication credentials or the credentials have expired.

", + "error":{ + "httpStatusCode":401, + "senderFault":true + }, + "exception":true + }, + "UpdateBotRequest":{ + "type":"structure", + "required":[ + "networkId", + "botId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the bot to update.

", + "location":"uri", + "locationName":"networkId" + }, + "botId":{ + "shape":"BotId", + "documentation":"

The unique identifier of the bot to update.

", + "location":"uri", + "locationName":"botId" + }, + "displayName":{ + "shape":"GenericString", + "documentation":"

The new display name for the bot.

" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The ID of the new security group to assign the bot to.

" + }, + "challenge":{ + "shape":"SensitiveString", + "documentation":"

The new password for the bot account.

" + }, + "suspend":{ + "shape":"Boolean", + "documentation":"

Set to true to suspend the bot or false to unsuspend it. Omit this field for standard updates that don't affect suspension status.

" + } + } + }, + "UpdateBotResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the bot update operation.

" + } + } + }, + "UpdateDataRetentionRequest":{ + "type":"structure", + "required":[ + "networkId", + "actionType" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the data retention bot.

", + "location":"uri", + "locationName":"networkId" + }, + "actionType":{ + "shape":"DataRetentionActionType", + "documentation":"

The action to perform. Valid values are 'ENABLE' (to enable the data retention service), 'DISABLE' (to disable the service), or 'PUBKEY_MSG_ACK' (to acknowledge the public key message).

" + } + } + }, + "UpdateDataRetentionResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the update operation.

" + } + } + }, + "UpdateGuestUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "usernameHash", + "block" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network where the guest user's status will be updated.

", + "location":"uri", + "locationName":"networkId" + }, + "usernameHash":{ + "shape":"GenericString", + "documentation":"

The username hash (unique identifier) of the guest user to update.

", + "location":"uri", + "locationName":"usernameHash" + }, + "block":{ + "shape":"Boolean", + "documentation":"

Set to true to block the guest user or false to unblock them.

" + } + } + }, + "UpdateGuestUserResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating the result of the update operation.

" + } + } + }, + "UpdateNetworkRequest":{ + "type":"structure", + "required":[ + "networkId", + "networkName" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network to update.

", + "location":"uri", + "locationName":"networkId" + }, + "networkName":{ + "shape":"GenericString", + "documentation":"

The new name for the network. Must be between 1 and 20 characters.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier for this request to ensure idempotency.

", + "idempotencyToken":true, + "location":"header", + "locationName":"X-Client-Token" + }, + "encryptionKeyArn":{ + "shape":"GenericString", + "documentation":"

The ARN of the Amazon Web Services KMS customer managed key to use for encrypting sensitive data in the network.

" + } + } + }, + "UpdateNetworkResponse":{ + "type":"structure", + "members":{ + "message":{ + "shape":"GenericString", + "documentation":"

A message indicating that the network was updated successfully.

" + } + } + }, + "UpdateNetworkSettingsRequest":{ + "type":"structure", + "required":[ + "networkId", + "settings" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network whose settings will be updated.

", + "location":"uri", + "locationName":"networkId" + }, + "settings":{ + "shape":"NetworkSettings", + "documentation":"

A map of setting names to their new values. Each setting should be provided with its appropriate type (boolean, string, number, etc.).

" + } + } + }, + "UpdateNetworkSettingsResponse":{ + "type":"structure", + "required":["settings"], + "members":{ + "settings":{ + "shape":"SettingsList", + "documentation":"

A list of the updated network settings, showing the new values for each modified setting.

" + } + } + }, + "UpdateSecurityGroupRequest":{ + "type":"structure", + "required":[ + "networkId", + "groupId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the security group to update.

", + "location":"uri", + "locationName":"networkId" + }, + "groupId":{ + "shape":"GenericString", + "documentation":"

The unique identifier of the security group to update.

", + "location":"uri", + "locationName":"groupId" + }, + "name":{ + "shape":"GenericString", + "documentation":"

The new name for the security group.

" + }, + "securityGroupSettings":{ + "shape":"SecurityGroupSettings", + "documentation":"

The updated configuration settings for the security group.

Federation mode - 0 (Local federation), 1 (Restricted federation), 2 (Global federation)

" + } + } + }, + "UpdateSecurityGroupResponse":{ + "type":"structure", + "required":["securityGroup"], + "members":{ + "securityGroup":{ + "shape":"SecurityGroup", + "documentation":"

The updated security group details, including the new settings.

" + } + } + }, + "UpdateUserDetails":{ + "type":"structure", + "members":{ + "firstName":{ + "shape":"SensitiveString", + "documentation":"

The new first name for the user.

" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

The new last name for the user.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The new username or email address for the user.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The updated list of security group IDs to which the user should belong.

" + }, + "inviteCode":{ + "shape":"GenericString", + "documentation":"

A new custom invite code for the user.

" + }, + "inviteCodeTtl":{ + "shape":"Integer", + "documentation":"

The new time-to-live for the invite code in days.

" + }, + "codeValidation":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user can be verified through a custom invite code.

" + } + }, + "documentation":"

Contains the modifiable details for updating an existing user, including name, password, security group membership, and invitation settings.

A user can only be assigned to a single security group. Attempting to add a user to multiple security groups is not supported and will result in an error.

" + }, + "UpdateUserRequest":{ + "type":"structure", + "required":[ + "networkId", + "userId" + ], + "members":{ + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the Wickr network containing the user to update.

", + "location":"uri", + "locationName":"networkId" + }, + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the user to update.

" + }, + "userDetails":{ + "shape":"UpdateUserDetails", + "documentation":"

An object containing the user details to be updated, such as name, password, security groups, and invite code settings.

" + } + } + }, + "UpdateUserResponse":{ + "type":"structure", + "required":[ + "userId", + "networkId", + "suspended" + ], + "members":{ + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier of the updated user.

" + }, + "networkId":{ + "shape":"NetworkId", + "documentation":"

The ID of the network where the user was updated.

" + }, + "securityGroupIds":{ + "shape":"SecurityGroupIdList", + "documentation":"

The list of security group IDs to which the user now belongs after the update.

" + }, + "firstName":{ + "shape":"SensitiveString", + "documentation":"

The updated first name of the user.

" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

The updated last name of the user.

" + }, + "middleName":{ + "shape":"GenericString", + "documentation":"

The middle name of the user (currently not used).

" + }, + "suspended":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user is suspended after the update.

" + }, + "modified":{ + "shape":"Integer", + "documentation":"

The timestamp when the user was last modified, specified in epoch seconds.

" + }, + "status":{ + "shape":"Integer", + "documentation":"

The user's status after the update.

" + }, + "inviteCode":{ + "shape":"GenericString", + "documentation":"

The updated invite code for the user, if applicable.

" + }, + "inviteExpiration":{ + "shape":"Integer", + "documentation":"

The expiration time of the user's invite code, specified in epoch seconds.

" + }, + "codeValidation":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user can be verified through a custom invite code.

" + } + } + }, + "User":{ + "type":"structure", + "members":{ + "userId":{ + "shape":"UserId", + "documentation":"

The unique identifier for the user within the network.

" + }, + "firstName":{ + "shape":"SensitiveString", + "documentation":"

The first name of the user.

" + }, + "lastName":{ + "shape":"SensitiveString", + "documentation":"

The last name of the user.

" + }, + "username":{ + "shape":"GenericString", + "documentation":"

The email address or username of the user. For bots, this must end in 'bot'.

" + }, + "securityGroups":{ + "shape":"SecurityGroupIdList", + "documentation":"

A list of security group IDs to which the user is assigned, determining their permissions and feature access.

" + }, + "isAdmin":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user has administrator privileges in the network.

" + }, + "suspended":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user is currently suspended and unable to access the network.

" + }, + "status":{ + "shape":"Integer", + "documentation":"

The current status of the user (1 for pending invitation, 2 for active).

" + }, + "otpEnabled":{ + "shape":"Boolean", + "documentation":"

Indicates whether one-time password (OTP) authentication is enabled for the user.

" + }, + "scimId":{ + "shape":"GenericString", + "documentation":"

The SCIM (System for Cross-domain Identity Management) identifier for the user, used for identity synchronization. Currently not used.

" + }, + "type":{ + "shape":"GenericString", + "documentation":"

The descriptive type of the user account (e.g., 'user').

" + }, + "cell":{ + "shape":"GenericString", + "documentation":"

The phone number minus country code, used for cloud deployments.

" + }, + "countryCode":{ + "shape":"GenericString", + "documentation":"

The country code for the user's phone number, used for cloud deployments.

" + }, + "challengeFailures":{ + "shape":"Integer", + "documentation":"

The number of failed password attempts for enterprise deployments, used for account lockout policies.

" + }, + "isInviteExpired":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user's email invitation code has expired, applicable to cloud deployments.

" + }, + "isUser":{ + "shape":"Boolean", + "documentation":"

Indicates whether this account is a user (as opposed to a bot or other account type).

" + }, + "inviteCode":{ + "shape":"GenericString", + "documentation":"

The invitation code for this user, used during registration to join the network.

" + }, + "codeValidation":{ + "shape":"Boolean", + "documentation":"

Indicates whether the user can be verified through a custom invite code.

" + }, + "uname":{ + "shape":"GenericString", + "documentation":"

The unique identifier for the user.

" + } + }, + "documentation":"

Represents a user account in a Wickr network with detailed profile information, status, security settings, and authentication details.

codeValidation, inviteCode and inviteCodeTtl are restricted to networks under preview only.

" + }, + "UserId":{ + "type":"string", + "max":10, + "min":1, + "pattern":"[0-9]+" + }, + "UserIds":{ + "type":"list", + "member":{"shape":"UserId"} + }, + "UserStatus":{ + "type":"integer", + "box":true + }, + "Users":{ + "type":"list", + "member":{"shape":"User"} + }, + "ValidationError":{ + "type":"structure", + "members":{ + "reasons":{ + "shape":"ErrorDetailList", + "documentation":"

A list of validation error details, where each item identifies a specific field that failed validation and explains the reason for the failure.

" + } + }, + "documentation":"

One or more fields in the request failed validation. This error provides detailed information about which fields were invalid and why, allowing you to correct the request and retry.

", + "error":{ + "httpStatusCode":422, + "senderFault":true + }, + "exception":true + }, + "WickrAwsNetworks":{ + "type":"structure", + "required":[ + "region", + "networkId" + ], + "members":{ + "region":{ + "shape":"GenericString", + "documentation":"

The Amazon Web Services region identifier where the network is hosted (e.g., 'us-east-1').

" + }, + "networkId":{ + "shape":"NetworkId", + "documentation":"

The network ID of the Wickr Amazon Web Services network.

" + } + }, + "documentation":"

Identifies a Amazon Web Services Wickr network by region and network ID, used for configuring permitted networks for global federation.

" + }, + "WickrAwsNetworksList":{ + "type":"list", + "member":{"shape":"WickrAwsNetworks"} + } + }, + "documentation":"

Welcome to the Amazon Web Services Wickr API Reference.

The Amazon Web Services Wickr application programming interface (API) is designed for administrators to perform key tasks, such as creating and managing Amazon Web Services Wickr, networks, users, security groups, bots and more. This guide provides detailed information about the Amazon Web Services Wickr API, including operations, types, inputs and outputs, and error codes. You can use an Amazon Web Services SDK, the Amazon Web Services Command Line Interface (Amazon Web Services CLI, or the REST API to make API calls for Amazon Web Services Wickr.

Using Amazon Web Services SDK

The SDK clients authenticate your requests by using access keys that you provide. For more information, see Authentication and access using Amazon Web Services SDKs and tools in the Amazon Web Services SDKs and Tools Reference Guide.

Using Amazon Web Services CLI

Use your access keys with the Amazon Web Services CLI to make API calls. For more information about setting up the Amazon Web Services CLI, see Getting started with the Amazon Web Services CLI in the Amazon Web Services Command Line Interface User Guide for Version 2.

Using REST APIs

If you use REST to make API calls, you must authenticate your request by providing a signature. Amazon Web Services Wickr supports Signature Version 4. For more information, see Amazon Web Services Signature Version 4 for API requests in the Amazon Web Services Identity and Access Management User Guide.

Access and permissions to the APIs can be controlled by Amazon Web Services Identity and Access Management. The managed policy Amazon Web ServicesWickrFullAccess grants full administrative permission to the Amazon Web Services Wickr service APIs. For more information on restricting access to specific operations, see Identity and access management for Amazon Web Services Wickr in the Amazon Web Services Wickr Administration Guide.

Types of Errors:

The Amazon Web Services Wickr APIs provide an HTTP interface. HTTP defines ranges of HTTP Status Codes for different types of error responses.

  1. Client errors are indicated by HTTP Status Code class of 4xx

  2. Service errors are indicated by HTTP Status Code class of 5xx

In this reference guide, the documentation for each API has an Errors section that includes a brief discussion about HTTP status codes. We recommend looking there as part of your investigation when you get an error.

" +} diff --git a/awscli/botocore/data/wickr/2024-02-01/waiters-2.json b/awscli/botocore/data/wickr/2024-02-01/waiters-2.json new file mode 100644 index 000000000000..13f60ee66be6 --- /dev/null +++ b/awscli/botocore/data/wickr/2024-02-01/waiters-2.json @@ -0,0 +1,5 @@ +{ + "version": 2, + "waiters": { + } +} diff --git a/awscli/botocore/data/workspaces-web/2020-07-08/service-2.json b/awscli/botocore/data/workspaces-web/2020-07-08/service-2.json index e30864cab857..6be9fc27749d 100644 --- a/awscli/botocore/data/workspaces-web/2020-07-08/service-2.json +++ b/awscli/botocore/data/workspaces-web/2020-07-08/service-2.json @@ -359,6 +359,7 @@ "output":{"shape":"CreateUserSettingsResponse"}, "errors":[ {"shape":"InternalServerException"}, + {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, {"shape":"ThrottlingException"}, {"shape":"ServiceQuotaExceededException"}, @@ -751,7 +752,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets browser settings.

" + "documentation":"

Gets browser settings.

", + "readonly":true }, "GetDataProtectionSettings":{ "name":"GetDataProtectionSettings", @@ -769,7 +771,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the data protection settings.

" + "documentation":"

Gets the data protection settings.

", + "readonly":true }, "GetIdentityProvider":{ "name":"GetIdentityProvider", @@ -787,7 +790,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the identity provider.

" + "documentation":"

Gets the identity provider.

", + "readonly":true }, "GetIpAccessSettings":{ "name":"GetIpAccessSettings", @@ -805,7 +809,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the IP access settings.

" + "documentation":"

Gets the IP access settings.

", + "readonly":true }, "GetNetworkSettings":{ "name":"GetNetworkSettings", @@ -823,7 +828,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the network settings.

" + "documentation":"

Gets the network settings.

", + "readonly":true }, "GetPortal":{ "name":"GetPortal", @@ -841,7 +847,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the web portal.

" + "documentation":"

Gets the web portal.

", + "readonly":true }, "GetPortalServiceProviderMetadata":{ "name":"GetPortalServiceProviderMetadata", @@ -859,7 +866,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the service provider metadata.

" + "documentation":"

Gets the service provider metadata.

", + "readonly":true }, "GetSession":{ "name":"GetSession", @@ -877,7 +885,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets information for a secure browser session.

" + "documentation":"

Gets information for a secure browser session.

", + "readonly":true }, "GetSessionLogger":{ "name":"GetSessionLogger", @@ -895,7 +904,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets details about a specific session logger resource.

" + "documentation":"

Gets details about a specific session logger resource.

", + "readonly":true }, "GetTrustStore":{ "name":"GetTrustStore", @@ -913,7 +923,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the trust store.

" + "documentation":"

Gets the trust store.

", + "readonly":true }, "GetTrustStoreCertificate":{ "name":"GetTrustStoreCertificate", @@ -931,7 +942,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets the trust store certificate.

" + "documentation":"

Gets the trust store certificate.

", + "readonly":true }, "GetUserAccessLoggingSettings":{ "name":"GetUserAccessLoggingSettings", @@ -949,7 +961,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets user access logging settings.

" + "documentation":"

Gets user access logging settings.

", + "readonly":true }, "GetUserSettings":{ "name":"GetUserSettings", @@ -967,7 +980,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Gets user settings.

" + "documentation":"

Gets user settings.

", + "readonly":true }, "ListBrowserSettings":{ "name":"ListBrowserSettings", @@ -984,7 +998,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of browser settings.

" + "documentation":"

Retrieves a list of browser settings.

", + "readonly":true }, "ListDataProtectionSettings":{ "name":"ListDataProtectionSettings", @@ -1001,7 +1016,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of data protection settings.

" + "documentation":"

Retrieves a list of data protection settings.

", + "readonly":true }, "ListIdentityProviders":{ "name":"ListIdentityProviders", @@ -1018,7 +1034,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of identity providers for a specific web portal.

" + "documentation":"

Retrieves a list of identity providers for a specific web portal.

", + "readonly":true }, "ListIpAccessSettings":{ "name":"ListIpAccessSettings", @@ -1035,7 +1052,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of IP access settings.

" + "documentation":"

Retrieves a list of IP access settings.

", + "readonly":true }, "ListNetworkSettings":{ "name":"ListNetworkSettings", @@ -1052,7 +1070,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of network settings.

" + "documentation":"

Retrieves a list of network settings.

", + "readonly":true }, "ListPortals":{ "name":"ListPortals", @@ -1069,7 +1088,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list or web portals.

" + "documentation":"

Retrieves a list or web portals.

", + "readonly":true }, "ListSessionLoggers":{ "name":"ListSessionLoggers", @@ -1086,7 +1106,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists all available session logger resources.

" + "documentation":"

Lists all available session logger resources.

", + "readonly":true }, "ListSessions":{ "name":"ListSessions", @@ -1104,7 +1125,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Lists information for multiple secure browser sessions from a specific portal.

" + "documentation":"

Lists information for multiple secure browser sessions from a specific portal.

", + "readonly":true }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -1122,7 +1144,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of tags for a resource.

" + "documentation":"

Retrieves a list of tags for a resource.

", + "readonly":true }, "ListTrustStoreCertificates":{ "name":"ListTrustStoreCertificates", @@ -1140,7 +1163,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of trust store certificates.

" + "documentation":"

Retrieves a list of trust store certificates.

", + "readonly":true }, "ListTrustStores":{ "name":"ListTrustStores", @@ -1157,7 +1181,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of trust stores.

" + "documentation":"

Retrieves a list of trust stores.

", + "readonly":true }, "ListUserAccessLoggingSettings":{ "name":"ListUserAccessLoggingSettings", @@ -1174,7 +1199,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of user access logging settings.

" + "documentation":"

Retrieves a list of user access logging settings.

", + "readonly":true }, "ListUserSettings":{ "name":"ListUserSettings", @@ -1191,7 +1217,8 @@ {"shape":"ThrottlingException"}, {"shape":"ValidationException"} ], - "documentation":"

Retrieves a list of user settings.

" + "documentation":"

Retrieves a list of user settings.

", + "readonly":true }, "TagResource":{ "name":"TagResource", @@ -1756,6 +1783,110 @@ "max":100, "min":1 }, + "BrandingConfiguration":{ + "type":"structure", + "required":[ + "logo", + "wallpaper", + "favicon", + "localizedStrings", + "colorTheme" + ], + "members":{ + "logo":{ + "shape":"ImageMetadata", + "documentation":"

Metadata for the logo image file, including the MIME type, file extension, and upload timestamp.

" + }, + "wallpaper":{ + "shape":"ImageMetadata", + "documentation":"

Metadata for the wallpaper image file, including the MIME type, file extension, and upload timestamp.

" + }, + "favicon":{ + "shape":"ImageMetadata", + "documentation":"

Metadata for the favicon image file, including the MIME type, file extension, and upload timestamp.

" + }, + "localizedStrings":{ + "shape":"LocalizedBrandingStringMap", + "documentation":"

A map of localized text strings for different languages, allowing the portal to display content in the user's preferred language.

" + }, + "colorTheme":{ + "shape":"ColorTheme", + "documentation":"

The color theme for components on the web portal.

" + }, + "termsOfService":{ + "shape":"Markdown", + "documentation":"

The terms of service text in Markdown format that users must accept before accessing the portal.

" + } + }, + "documentation":"

The branding configuration output including custom images metadata, localized strings, color theme, and terms of service.

" + }, + "BrandingConfigurationCreateInput":{ + "type":"structure", + "required":[ + "logo", + "wallpaper", + "favicon", + "localizedStrings", + "colorTheme" + ], + "members":{ + "logo":{ + "shape":"IconImageInput", + "documentation":"

The logo image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 100 KB in JPEG, PNG, or ICO format.

" + }, + "wallpaper":{ + "shape":"WallpaperImageInput", + "documentation":"

The wallpaper image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 5 MB in JPEG or PNG format.

" + }, + "favicon":{ + "shape":"IconImageInput", + "documentation":"

The favicon image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 100 KB in JPEG, PNG, or ICO format.

" + }, + "localizedStrings":{ + "shape":"LocalizedBrandingStringMap", + "documentation":"

A map of localized text strings for different supported languages. Each locale must provide the required fields browserTabTitle and welcomeText.

" + }, + "colorTheme":{ + "shape":"ColorTheme", + "documentation":"

The color theme for components on the web portal. Choose Light if you upload a dark wallpaper, or Dark for a light wallpaper.

" + }, + "termsOfService":{ + "shape":"Markdown", + "documentation":"

The terms of service text in Markdown format. Users will be presented with the terms of service after successfully signing in.

" + } + }, + "documentation":"

The input configuration for creating branding settings.

" + }, + "BrandingConfigurationUpdateInput":{ + "type":"structure", + "members":{ + "logo":{ + "shape":"IconImageInput", + "documentation":"

The logo image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 100 KB in JPEG, PNG, or ICO format.

" + }, + "wallpaper":{ + "shape":"WallpaperImageInput", + "documentation":"

The wallpaper image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 5 MB in JPEG or PNG format.

" + }, + "favicon":{ + "shape":"IconImageInput", + "documentation":"

The favicon image for the portal. Provide either a binary image file or an S3 URI pointing to the image file. Maximum 100 KB in JPEG, PNG, or ICO format.

" + }, + "localizedStrings":{ + "shape":"LocalizedBrandingStringMap", + "documentation":"

A map of localized text strings for different supported languages. Each locale must provide the required fields browserTabTitle and welcomeText.

" + }, + "colorTheme":{ + "shape":"ColorTheme", + "documentation":"

The color theme for components on the web portal. Choose Light if you upload a dark wallpaper, or Dark for a light wallpaper.

" + }, + "termsOfService":{ + "shape":"Markdown", + "documentation":"

The terms of service text in Markdown format. To remove existing terms of service, provide an empty string.

" + } + }, + "documentation":"

The input configuration for updating branding settings. All fields are optional when updating existing branding.

" + }, "BrowserPolicy":{ "type":"string", "max":131072, @@ -1942,6 +2073,13 @@ "max":512, "min":1 }, + "ColorTheme":{ + "type":"string", + "enum":[ + "Light", + "Dark" + ] + }, "ConfidenceLevel":{ "type":"integer", "box":true, @@ -1968,6 +2106,10 @@ }, "exception":true }, + "ContactLinkUrl":{ + "type":"string", + "pattern":"(https://|mailto:).*" + }, "CookieDomain":{ "type":"string", "max":253, @@ -2472,6 +2614,14 @@ "toolbarConfiguration":{ "shape":"ToolbarConfiguration", "documentation":"

The configuration of the toolbar. This allows administrators to select the toolbar type and visual mode, set maximum display resolution for sessions, and choose which items are visible to end users during their sessions. If administrators do not modify these settings, end users retain control over their toolbar preferences.

" + }, + "brandingConfigurationInput":{ + "shape":"BrandingConfigurationCreateInput", + "documentation":"

The branding configuration input that customizes the appearance of the web portal for end users. This includes a custom logo, favicon, wallpaper, localized strings, color theme, and an optional terms of service.

" + }, + "webAuthnAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use WebAuthn redirection for passwordless login to websites within the streaming session.

" } } }, @@ -2591,8 +2741,7 @@ }, "DeleteBrowserSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteDataProtectionSettingsRequest":{ "type":"structure", @@ -2608,8 +2757,7 @@ }, "DeleteDataProtectionSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteIdentityProviderRequest":{ "type":"structure", @@ -2625,8 +2773,7 @@ }, "DeleteIdentityProviderResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteIpAccessSettingsRequest":{ "type":"structure", @@ -2642,8 +2789,7 @@ }, "DeleteIpAccessSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteNetworkSettingsRequest":{ "type":"structure", @@ -2659,8 +2805,7 @@ }, "DeleteNetworkSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeletePortalRequest":{ "type":"structure", @@ -2676,8 +2821,7 @@ }, "DeletePortalResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteSessionLoggerRequest":{ "type":"structure", @@ -2693,8 +2837,7 @@ }, "DeleteSessionLoggerResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteTrustStoreRequest":{ "type":"structure", @@ -2710,8 +2853,7 @@ }, "DeleteTrustStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserAccessLoggingSettingsRequest":{ "type":"structure", @@ -2727,8 +2869,7 @@ }, "DeleteUserAccessLoggingSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DeleteUserSettingsRequest":{ "type":"structure", @@ -2744,8 +2885,7 @@ }, "DeleteUserSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "Description":{ "type":"string", @@ -2775,8 +2915,7 @@ }, "DisassociateBrowserSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateDataProtectionSettingsRequest":{ "type":"structure", @@ -2792,8 +2931,7 @@ }, "DisassociateDataProtectionSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateIpAccessSettingsRequest":{ "type":"structure", @@ -2809,8 +2947,7 @@ }, "DisassociateIpAccessSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateNetworkSettingsRequest":{ "type":"structure", @@ -2826,8 +2963,7 @@ }, "DisassociateNetworkSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateSessionLoggerRequest":{ "type":"structure", @@ -2843,8 +2979,7 @@ }, "DisassociateSessionLoggerResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateTrustStoreRequest":{ "type":"structure", @@ -2860,8 +2995,7 @@ }, "DisassociateTrustStoreResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateUserAccessLoggingSettingsRequest":{ "type":"structure", @@ -2877,8 +3011,7 @@ }, "DisassociateUserAccessLoggingSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisassociateUserSettingsRequest":{ "type":"structure", @@ -2894,8 +3027,7 @@ }, "DisassociateUserSettingsResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "DisconnectTimeoutInMinutes":{ "type":"integer", @@ -2996,8 +3128,7 @@ }, "ExpireSessionResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "FieldName":{"type":"string"}, "FolderStructure":{ @@ -3318,6 +3449,26 @@ "type":"list", "member":{"shape":"ToolbarItem"} }, + "IconImage":{ + "type":"blob", + "max":102400, + "min":0 + }, + "IconImageInput":{ + "type":"structure", + "members":{ + "blob":{ + "shape":"IconImage", + "documentation":"

The image provided as a binary image file.

" + }, + "s3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI pointing to the image file. The URI must use the format s3://bucket-name/key-name. You must have read access to the S3 object.

" + } + }, + "documentation":"

The input for an icon image (logo or favicon). Provide either a binary image file or an S3 URI pointing to the image file. Maximum 100 KB in JPEG, PNG, or ICO format.

", + "union":true + }, "IdentityProvider":{ "type":"structure", "required":["identityProviderArn"], @@ -3394,6 +3545,29 @@ "max":60, "min":0 }, + "ImageMetadata":{ + "type":"structure", + "required":[ + "mimeType", + "fileExtension", + "lastUploadTimestamp" + ], + "members":{ + "mimeType":{ + "shape":"MimeType", + "documentation":"

The MIME type of the image.

" + }, + "fileExtension":{ + "shape":"StringType", + "documentation":"

The file extension of the image.

" + }, + "lastUploadTimestamp":{ + "shape":"Timestamp", + "documentation":"

The timestamp when the image was last uploaded.

" + } + }, + "documentation":"

Metadata information about an uploaded image file.

" + }, "InlineRedactionConfiguration":{ "type":"structure", "required":["inlineRedactionPatterns"], @@ -4035,6 +4209,111 @@ } } }, + "Locale":{ + "type":"string", + "enum":[ + "de-DE", + "en-US", + "es-ES", + "fr-FR", + "id-ID", + "it-IT", + "ja-JP", + "ko-KR", + "pt-BR", + "zh-CN", + "zh-TW" + ] + }, + "LocalizedBrandingStringMap":{ + "type":"map", + "key":{"shape":"Locale"}, + "value":{"shape":"LocalizedBrandingStrings"} + }, + "LocalizedBrandingStrings":{ + "type":"structure", + "required":[ + "browserTabTitle", + "welcomeText" + ], + "members":{ + "browserTabTitle":{ + "shape":"LocalizedBrandingStringsBrowserTabTitleString", + "documentation":"

The text displayed in the browser tab title.

" + }, + "welcomeText":{ + "shape":"LocalizedBrandingStringsWelcomeTextString", + "documentation":"

The welcome text displayed on the sign-in page.

" + }, + "loginTitle":{ + "shape":"LocalizedBrandingStringsLoginTitleString", + "documentation":"

The title text for the login section. This field is optional and defaults to \"Sign In\".

" + }, + "loginDescription":{ + "shape":"LocalizedBrandingStringsLoginDescriptionString", + "documentation":"

The description text for the login section. This field is optional and defaults to \"Sign in to your session\".

" + }, + "loginButtonText":{ + "shape":"LocalizedBrandingStringsLoginButtonTextString", + "documentation":"

The text displayed on the login button. This field is optional and defaults to \"Sign In\".

" + }, + "contactLink":{ + "shape":"ContactLinkUrl", + "documentation":"

A contact link URL. The URL must start with https:// or mailto:. If not provided, the contact button will be hidden from the web portal screen.

" + }, + "contactButtonText":{ + "shape":"LocalizedBrandingStringsContactButtonTextString", + "documentation":"

The text displayed on the contact button. This field is optional and defaults to \"Contact us\".

" + }, + "loadingText":{ + "shape":"LocalizedBrandingStringsLoadingTextString", + "documentation":"

The text displayed during session loading. This field is optional and defaults to \"Loading your session\".

" + } + }, + "documentation":"

Localized text strings for a specific language that customize the web portal.

" + }, + "LocalizedBrandingStringsBrowserTabTitleString":{ + "type":"string", + "max":25, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsContactButtonTextString":{ + "type":"string", + "max":30, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsLoadingTextString":{ + "type":"string", + "max":300, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsLoginButtonTextString":{ + "type":"string", + "max":30, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsLoginDescriptionString":{ + "type":"string", + "max":250, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsLoginTitleString":{ + "type":"string", + "max":100, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, + "LocalizedBrandingStringsWelcomeTextString":{ + "type":"string", + "max":150, + "min":0, + "pattern":"[^<>&'`~\\\\]*" + }, "LogConfiguration":{ "type":"structure", "members":{ @@ -4052,6 +4331,12 @@ "Json" ] }, + "Markdown":{ + "type":"string", + "max":153600, + "min":0, + "sensitive":true + }, "MaxConcurrentSessions":{ "type":"integer", "box":true, @@ -4076,6 +4361,14 @@ "box":true, "min":1 }, + "MimeType":{ + "type":"string", + "enum":[ + "image/png", + "image/jpeg", + "image/x-icon" + ] + }, "NetworkSettings":{ "type":"structure", "required":["networkSettingsArn"], @@ -4439,6 +4732,10 @@ }, "documentation":"

The S3 log configuration.

" }, + "S3Uri":{ + "type":"string", + "pattern":"s3://[a-z0-9][a-z0-9\\.\\-]{1,61}[a-z0-9]/.+" + }, "SamlMetadata":{ "type":"string", "max":204800, @@ -4735,8 +5032,7 @@ }, "TagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "TagValue":{ "type":"string", @@ -4858,8 +5154,7 @@ }, "Unit":{ "type":"structure", - "members":{ - } + "members":{} }, "UntagResourceRequest":{ "type":"structure", @@ -4884,8 +5179,7 @@ }, "UntagResourceResponse":{ "type":"structure", - "members":{ - } + "members":{} }, "UpdateBrowserSettingsRequest":{ "type":"structure", @@ -5269,6 +5563,14 @@ "toolbarConfiguration":{ "shape":"ToolbarConfiguration", "documentation":"

The configuration of the toolbar. This allows administrators to select the toolbar type and visual mode, set maximum display resolution for sessions, and choose which items are visible to end users during their sessions. If administrators do not modify these settings, end users retain control over their toolbar preferences.

" + }, + "brandingConfigurationInput":{ + "shape":"BrandingConfigurationUpdateInput", + "documentation":"

The branding configuration that customizes the appearance of the web portal for end users. When updating user settings without an existing branding configuration, all fields (logo, favicon, wallpaper, localized strings, and color theme) are required except for terms of service. When updating user settings with an existing branding configuration, all fields are optional.

" + }, + "webAuthnAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use WebAuthn redirection for passwordless login to websites within the streaming session.

" } } }, @@ -5390,6 +5692,14 @@ "toolbarConfiguration":{ "shape":"ToolbarConfiguration", "documentation":"

The configuration of the toolbar. This allows administrators to select the toolbar type and visual mode, set maximum display resolution for sessions, and choose which items are visible to end users during their sessions. If administrators do not modify these settings, end users retain control over their toolbar preferences.

" + }, + "brandingConfiguration":{ + "shape":"BrandingConfiguration", + "documentation":"

The branding configuration output that customizes the appearance of the web portal for end users.

" + }, + "webAuthnAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use WebAuthn redirection for passwordless login to websites within the streaming session.

" } }, "documentation":"

A user settings resource that can be associated with a web portal. Once associated with a web portal, user settings control how users can transfer data between a streaming session and the their local devices.

" @@ -5445,6 +5755,14 @@ "toolbarConfiguration":{ "shape":"ToolbarConfiguration", "documentation":"

The configuration of the toolbar. This allows administrators to select the toolbar type and visual mode, set maximum display resolution for sessions, and choose which items are visible to end users during their sessions. If administrators do not modify these settings, end users retain control over their toolbar preferences.

" + }, + "brandingConfiguration":{ + "shape":"BrandingConfiguration", + "documentation":"

The branding configuration output that customizes the appearance of the web portal for end users.

" + }, + "webAuthnAllowed":{ + "shape":"EnabledType", + "documentation":"

Specifies whether the user can use WebAuthn redirection for passwordless login to websites within the streaming session.

" } }, "documentation":"

The summary of user settings.

" @@ -5520,6 +5838,26 @@ "min":1, "pattern":"vpc-[0-9a-z]*" }, + "WallpaperImage":{ + "type":"blob", + "max":5242880, + "min":0 + }, + "WallpaperImageInput":{ + "type":"structure", + "members":{ + "blob":{ + "shape":"WallpaperImage", + "documentation":"

The image provided as a binary image file.

" + }, + "s3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 URI pointing to the image file. The URI must use the format s3://bucket-name/key-name. You must have read access to the S3 object.

" + } + }, + "documentation":"

The input for a wallpaper image. Provide the image as either a binary image file or an S3 URI. Maximum 5 MB in JPEG or PNG format.

", + "union":true + }, "WebContentFilteringPolicy":{ "type":"structure", "members":{ diff --git a/awscli/botocore/data/workspaces/2015-04-08/service-2.json b/awscli/botocore/data/workspaces/2015-04-08/service-2.json index 7e672a450d41..3f4e466c6c79 100644 --- a/awscli/botocore/data/workspaces/2015-04-08/service-2.json +++ b/awscli/botocore/data/workspaces/2015-04-08/service-2.json @@ -123,7 +123,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Copies the specified image from the specified Region to the current Region. For more information about copying images, see Copy a Custom WorkSpaces Image.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web ServicesSupport.

Before copying a shared image, be sure to verify that it has been shared from the correct Amazon Web Services account. To determine if an image has been shared and to see the ID of the Amazon Web Services account that owns an image, use the DescribeWorkSpaceImages and DescribeWorkspaceImagePermissions API operations.

" + "documentation":"

Copies the specified image from the specified Region to the current Region. For more information about copying images, see Copy a Custom WorkSpaces Image.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web Services Support.

Before copying a shared image, be sure to verify that it has been shared from the correct Amazon Web Services account. To determine if an image has been shared and to see the ID of the Amazon Web Services account that owns an image, use the DescribeWorkSpaceImages and DescribeWorkspaceImagePermissions API operations.

" }, "CreateAccountLinkInvitation":{ "name":"CreateAccountLinkInvitation", @@ -1329,7 +1329,7 @@ }, "input":{"shape":"TerminateWorkspacesRequest"}, "output":{"shape":"TerminateWorkspacesResult"}, - "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web ServicesSupport before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" + "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web Services Support before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" }, "TerminateWorkspacesPool":{ "name":"TerminateWorkspacesPool", @@ -1448,7 +1448,7 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

After an image has been shared, the recipient account can copy the image to other Regions as needed.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web ServicesSupport.

For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

" + "documentation":"

Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

After an image has been shared, the recipient account can copy the image to other Regions as needed.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web Services Support.

For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

" }, "UpdateWorkspacesPool":{ "name":"UpdateWorkspacesPool", @@ -1889,7 +1889,8 @@ "ValidationError.InsufficientMemory", "ValidationError.UnsupportedOperatingSystem", "DeploymentError.InternalServerError", - "DeploymentError.WorkspaceUnreachable" + "DeploymentError.WorkspaceUnreachable", + "ValidationError.ApplicationOldVersionExists" ] }, "AssociationState":{ @@ -2814,6 +2815,16 @@ "enum":[ "PENDING", "IN_PROGRESS", + "PROCESSING_SOURCE_IMAGE", + "IMAGE_TESTING_START", + "UPDATING_OPERATING_SYSTEM", + "IMAGE_COMPATIBILITY_CHECKING", + "IMAGE_TESTING_GENERALIZATION", + "CREATING_TEST_INSTANCE", + "INSTALLING_COMPONENTS", + "GENERALIZING", + "VALIDATING", + "PUBLISHING", "COMPLETED", "ERROR" ] @@ -3498,6 +3509,14 @@ "shape":"CustomWorkspaceImageImportState", "documentation":"

The state of the WorkSpace image.

" }, + "StateMessage":{ + "shape":"WorkflowStateMessage", + "documentation":"

The state message of the WorkSpace image import workflow.

" + }, + "ProgressPercentage":{ + "shape":"Percentage", + "documentation":"

The estimated progress percentage of the WorkSpace image import workflow.

" + }, "Created":{ "shape":"Timestamp", "documentation":"

The timestamp when the WorkSpace image import was created.

" @@ -5316,6 +5335,11 @@ "type":"list", "member":{"shape":"PendingCreateStandbyWorkspacesRequest"} }, + "Percentage":{ + "type":"integer", + "max":100, + "min":0 + }, "Platform":{ "type":"string", "enum":["WINDOWS"] @@ -6523,6 +6547,11 @@ "type":"list", "member":{"shape":"WorkSpaceAssociatedResourceType"} }, + "WorkflowStateMessage":{ + "type":"string", + "max":1000, + "min":1 + }, "Workspace":{ "type":"structure", "members":{ diff --git a/awscli/botocore/httpsession.py b/awscli/botocore/httpsession.py index 95d99ac9bc9f..dcddba5ae90f 100644 --- a/awscli/botocore/httpsession.py +++ b/awscli/botocore/httpsession.py @@ -4,6 +4,7 @@ import socket import sys from base64 import b64encode +from concurrent.futures import CancelledError from urllib3 import PoolManager, Timeout, proxy_from_url from urllib3.exceptions import ( @@ -17,6 +18,7 @@ ) from urllib3.exceptions import ReadTimeoutError as URLLib3ReadTimeoutError from urllib3.exceptions import SSLError as URLLib3SSLError +from urllib3.poolmanager import PoolKey from urllib3.util.retry import Retry from urllib3.util.ssl_ import ( OP_NO_COMPRESSION, @@ -28,8 +30,6 @@ ) from urllib3.util.url import parse_url -from concurrent.futures import CancelledError - try: from urllib3.util.ssl_ import OP_NO_TICKET, PROTOCOL_TLS_CLIENT except ImportError: @@ -75,6 +75,15 @@ DEFAULT_TIMEOUT = 60 MAX_POOL_CONNECTIONS = 10 DEFAULT_CA_BUNDLE = os.path.join(os.path.dirname(__file__), 'cacert.pem') +BUFFER_SIZE = None +if hasattr(PoolKey, 'key_blocksize'): + # urllib3 2.0 implemented its own chunking logic and set + # a default blocksize of 16KB. This creates a noticeable + # performance bottleneck when transferring objects + # larger than 100MB. Based on experiments, a blocksize + # of 128KB significantly improves throughput before + # getting diminishing returns. + BUFFER_SIZE = 1024 * 128 try: from certifi import where @@ -330,7 +339,6 @@ def _proxies_kwargs(self, **kwargs): def _get_pool_manager_kwargs(self, **extra_kwargs): pool_manager_kwargs = { - 'strict': True, 'timeout': self._timeout, 'maxsize': self._max_pool_connections, 'ssl_context': self._get_ssl_context(), @@ -338,6 +346,8 @@ def _get_pool_manager_kwargs(self, **extra_kwargs): 'cert_file': self._cert_file, 'key_file': self._key_file, } + if BUFFER_SIZE: + pool_manager_kwargs['blocksize'] = BUFFER_SIZE pool_manager_kwargs.update(**extra_kwargs) return pool_manager_kwargs diff --git a/awscli/botocore/serialize.py b/awscli/botocore/serialize.py index 55a536cf35c7..1b436c7b75c3 100644 --- a/awscli/botocore/serialize.py +++ b/awscli/botocore/serialize.py @@ -989,6 +989,10 @@ def serialize_to_request(self, parameters, operation_model): if input_shape is not None: self._serialize_payload(parameters, serialized, input_shape) + host_prefix = self._expand_host_prefix(parameters, operation_model) + if host_prefix is not None: + serialized['host_prefix'] = host_prefix + self._serialize_headers(serialized, operation_model) return serialized diff --git a/awscli/botocore/useragent.py b/awscli/botocore/useragent.py index 9c559cbdd32f..630d18d6a6ec 100644 --- a/awscli/botocore/useragent.py +++ b/awscli/botocore/useragent.py @@ -585,9 +585,10 @@ def _build_app_id(self): User-Agent header. """ if self._client_config and self._client_config.user_agent_appid: - return [ - UserAgentComponent('app', self._client_config.user_agent_appid) - ] + appid = sanitize_user_agent_string_component( + raw_str=self._client_config.user_agent_appid, allow_hash=True + ) + return [RawStringUserAgentComponent(f'app/{appid}')] else: return [] diff --git a/awscli/botocore/utils.py b/awscli/botocore/utils.py index 51a509ebe0a6..04d516c13030 100644 --- a/awscli/botocore/utils.py +++ b/awscli/botocore/utils.py @@ -4082,7 +4082,6 @@ def _get_bearer_env_var_name(signing_name): 'ds-data': 'directory-service-data', 'dynamodbstreams': 'dynamodb-streams', 'elasticbeanstalk': 'elastic-beanstalk', - 'elastictranscoder': 'elastic-transcoder', 'elb': 'elastic-load-balancing', 'elbv2': 'elastic-load-balancing-v2', 'es': 'elasticsearch-service', diff --git a/awscli/customizations/argrename.py b/awscli/customizations/argrename.py index f91215c76c22..aec333db1a29 100644 --- a/awscli/customizations/argrename.py +++ b/awscli/customizations/argrename.py @@ -24,7 +24,6 @@ 'ec2.create-image.no-no-reboot': 'reboot', 'ec2.*.no-egress': 'ingress', 'ec2.*.no-disable-api-termination': 'enable-api-termination', - 'elastictranscoder.*.output': 'job-output', 'swf.register-activity-type.version': 'activity-version', 'swf.register-workflow-type.version': 'workflow-version', 'datapipeline.*.query': 'objects-query', diff --git a/awscli/customizations/cloudtrail/validation.py b/awscli/customizations/cloudtrail/validation.py index f4229bade451..d5255f4b8438 100644 --- a/awscli/customizations/cloudtrail/validation.py +++ b/awscli/customizations/cloudtrail/validation.py @@ -274,9 +274,10 @@ def load_digest_keys_in_range(self, bucket, prefix, start_date, end_date): """ digests = [] marker = self._create_digest_key(start_date, prefix) + s3_digest_files_prefix = self._create_digest_prefix(start_date, prefix) client = self._client_provider.get_client(bucket) paginator = client.get_paginator('list_objects') - page_iterator = paginator.paginate(Bucket=bucket, Marker=marker) + page_iterator = paginator.paginate(Bucket=bucket, Marker=marker, Prefix=s3_digest_files_prefix) key_filter = page_iterator.search('Contents[*].Key') # Create a target start end end date target_start_date = format_date(normalize_date(start_date)) @@ -287,7 +288,7 @@ def load_digest_keys_in_range(self, bucket, prefix, start_date, end_date): # Ensure digests are from the same trail. digest_key_regex = re.compile(self._create_digest_key_regex(prefix)) for key in key_filter: - if digest_key_regex.match(key): + if key and digest_key_regex.match(key): # Use a lexicographic comparison to know when to stop. extracted_date = extract_digest_key_date(key) if extracted_date > target_end_date: @@ -358,6 +359,25 @@ def _create_digest_key(self, start_date, key_prefix): key = key_prefix + '/' + key return key + def _create_digest_prefix(self, start_date, key_prefix): + """Creates an S3 prefix to scope listing to trail's region. + + :return: Returns a prefix string to limit S3 listing scope. + """ + template = 'AWSLogs/' + template_params = { + 'account_id': self.account_id, + 'source_region': self.trail_source_region + } + if self.organization_id: + template += '{organization_id}/' + template_params['organization_id'] = self.organization_id + template += '{account_id}/CloudTrail-Digest/{source_region}' + prefix = template.format(**template_params) + if key_prefix: + prefix = key_prefix + '/' + prefix + return prefix + def _create_digest_key_regex(self, key_prefix): """Creates a regular expression used to match against S3 keys""" template = 'AWSLogs/' diff --git a/awscli/customizations/ecs/expressgateway/color_utils.py b/awscli/customizations/ecs/expressgateway/color_utils.py index 038120df60ae..7335d22f0877 100644 --- a/awscli/customizations/ecs/expressgateway/color_utils.py +++ b/awscli/customizations/ecs/expressgateway/color_utils.py @@ -24,7 +24,7 @@ class ColorUtils: def __init__(self): # Initialize colorama - init(autoreset=True, strip=False) + init(autoreset=False, strip=False) def make_green(self, text, use_color=True): if not use_color: diff --git a/awscli/customizations/ecs/expressgateway/display_strategy.py b/awscli/customizations/ecs/expressgateway/display_strategy.py new file mode 100644 index 000000000000..a92ac9de6b7e --- /dev/null +++ b/awscli/customizations/ecs/expressgateway/display_strategy.py @@ -0,0 +1,238 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""Display strategy implementations for ECS Express Gateway Service monitoring.""" + +import asyncio +import time + +from botocore.exceptions import ClientError +from colorama import Style + +from awscli.customizations.ecs.exceptions import MonitoringError +from awscli.customizations.ecs.expressgateway.stream_display import ( + StreamDisplay, +) +from awscli.customizations.utils import uni_print + + +class DisplayStrategy: + """Base class for display strategies. + + Each strategy controls its own execution model, timing, and output format. + """ + + def execute_monitoring(self, collector, start_time, timeout_minutes): + """Execute the monitoring loop. + + Args: + collector: ServiceViewCollector instance for data fetching + start_time: Start timestamp for timeout calculation + timeout_minutes: Maximum monitoring duration in minutes + """ + raise NotImplementedError + + +class InteractiveDisplayStrategy(DisplayStrategy): + """Interactive display strategy with async spinner and keyboard navigation. + + Uses dual async tasks: + - Data task: Polls ECS APIs every 5 seconds + - Spinner task: Updates display every 100ms with rotating spinner + """ + + def __init__(self, display, use_color): + """Initialize the interactive display strategy. + + Args: + display: Display instance from prompt_toolkit_display module + providing the interactive terminal interface + use_color: Whether to use colored output + """ + self.display = display + self.use_color = use_color + + def execute_monitoring(self, collector, start_time, timeout_minutes): + """Execute async monitoring with spinner and keyboard controls.""" + try: + final_output, timed_out = asyncio.run( + self._execute_async(collector, start_time, timeout_minutes) + ) + if timed_out: + uni_print(final_output + "\nMonitoring timed out!\n") + else: + uni_print(final_output + "\nMonitoring Complete!\n") + finally: + uni_print(Style.RESET_ALL) + + async def _execute_async(self, collector, start_time, timeout_minutes): + """Async execution with dual tasks for data and spinner.""" + spinner_chars = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" + spinner_index = 0 + current_output = "Waiting for initial data" + timed_out = False + + async def update_data(): + nonlocal current_output, timed_out + while True: + current_time = time.time() + if current_time - start_time > timeout_minutes * 60: + timed_out = True + # Only exit if app is running to avoid "Application is not running" error + if self.display.app.is_running: + self.display.app.exit() + break + + try: + loop = asyncio.get_event_loop() + new_output = await loop.run_in_executor( + None, collector.get_current_view, "{SPINNER}" + ) + current_output = new_output + except ClientError as e: + if ( + e.response.get('Error', {}).get('Code') + == 'InvalidParameterException' + ): + error_message = e.response.get('Error', {}).get( + 'Message', '' + ) + if ( + "Cannot call DescribeServiceRevisions for a service that is INACTIVE" + in error_message + ): + current_output = "Service is inactive" + else: + raise + else: + raise + + await asyncio.sleep(5.0) + + async def update_spinner(): + nonlocal spinner_index + while True: + spinner_char = spinner_chars[spinner_index] + display_output = current_output.replace( + "{SPINNER}", spinner_char + ) + status_text = f"Getting updates... {spinner_char} | up/down to scroll, q to quit" + self.display.display(display_output, status_text) + spinner_index = (spinner_index + 1) % len(spinner_chars) + await asyncio.sleep(0.1) + + data_task = asyncio.create_task(update_data()) + spinner_task = asyncio.create_task(update_spinner()) + display_task = None + + try: + display_task = asyncio.create_task(self.display.run()) + + done, pending = await asyncio.wait( + [display_task, data_task], return_when=asyncio.FIRST_COMPLETED + ) + + if data_task in done: + # Retrieve and re-raise any exception from the task. + # asyncio.wait() doesn't retrieve exceptions itself. + exc = data_task.exception() + if exc: + raise exc + + # Cancel pending tasks + for task in pending: + task.cancel() + # Await cancelled task to ensure proper cleanup and prevent + # warnings about unawaited tasks + try: + await task + except asyncio.CancelledError: + pass + + finally: + # Ensure display app is properly shut down + # Only exit if app is running to avoid "Application is not running" error + if self.display.app.is_running: + self.display.app.exit() + spinner_task.cancel() + if display_task is not None and not display_task.done(): + display_task.cancel() + # Await cancelled task to ensure proper cleanup and prevent + # warnings about unawaited tasks + try: + await display_task + except asyncio.CancelledError: + pass + + return current_output.replace("{SPINNER}", ""), timed_out + + +class TextOnlyDisplayStrategy(DisplayStrategy): + """Text-only display strategy with diff detection and timestamped output. + + Uses synchronous polling loop with change detection to output only + individual resource changes with timestamps. + """ + + def __init__(self, use_color): + self.stream_display = StreamDisplay(use_color) + + def execute_monitoring(self, collector, start_time, timeout_minutes): + """Execute synchronous monitoring with text output.""" + self.stream_display.show_startup_message() + + try: + while True: + current_time = time.time() + if current_time - start_time > timeout_minutes * 60: + self.stream_display.show_timeout_message() + break + + try: + collector.get_current_view("") + + # Extract cached result for diff detection + managed_resources, info = collector.cached_monitor_result + + self.stream_display.show_monitoring_data( + managed_resources, info + ) + + except ClientError as e: + if ( + e.response.get('Error', {}).get('Code') + == 'InvalidParameterException' + ): + error_message = e.response.get('Error', {}).get( + 'Message', '' + ) + if ( + "Cannot call DescribeServiceRevisions for a service that is INACTIVE" + in error_message + ): + self.stream_display.show_service_inactive_message() + break + else: + raise + else: + raise + + time.sleep(5.0) + + except KeyboardInterrupt: + self.stream_display.show_user_stop_message() + except MonitoringError as e: + self.stream_display.show_error_message(e) + finally: + self.stream_display.show_completion_message() + uni_print(Style.RESET_ALL) diff --git a/awscli/customizations/ecs/expressgateway/managedresource.py b/awscli/customizations/ecs/expressgateway/managedresource.py index f4b3303bf678..07bb8070723a 100644 --- a/awscli/customizations/ecs/expressgateway/managedresource.py +++ b/awscli/customizations/ecs/expressgateway/managedresource.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import sys -from datetime import datetime +from datetime import datetime, timezone import dateutil.parser @@ -113,6 +113,58 @@ def get_status_string(self, spinner_char, depth=0, use_color=True): lines.append("") return '\n'.join(lines) + def get_stream_string(self, timestamp, use_color=True): + """Returns the resource information formatted for stream/text-only display. + + Args: + timestamp (str): Timestamp string to prefix the output + use_color (bool): Whether to use ANSI color codes (default: True) + + Returns: + str: Formatted string with timestamp prefix and bracket-enclosed status + """ + lines = [] + parts = [f"[{timestamp}]"] + + # If both resource_type and identifier are None, show a placeholder + if not self.resource_type and not self.identifier: + parts.append( + self.color_utils.make_cyan("Unknown Resource", use_color) + ) + else: + if self.resource_type: + parts.append( + self.color_utils.make_cyan(self.resource_type, use_color) + ) + + if self.identifier: + colored_id = self.color_utils.color_by_status( + self.identifier, self.status, use_color + ) + parts.append(colored_id) + + if self.status: + status_text = self.color_utils.color_by_status( + self.status, self.status, use_color + ) + parts.append(f"[{status_text}]") + + lines.append(" ".join(parts)) + + if self.reason: + lines.append(f" Reason: {self.reason}") + + if self.updated_at: + updated_time = datetime.fromtimestamp( + self.updated_at, tz=timezone.utc + ).strftime("%Y-%m-%d %H:%M:%SZ") + lines.append(f" Last Updated At: {updated_time}") + + if self.additional_info: + lines.append(f" Info: {self.additional_info}") + + return "\n".join(lines) + def combine(self, other_resource): """Returns the version of the resource which has the most up to date timestamp. @@ -130,22 +182,28 @@ def combine(self, other_resource): else other_resource ) - def diff(self, other_resource): - """Returns a tuple of (self_diff, other_diff) for resources that are different. + def compare_properties(self, other_resource): + """Compares individual resource properties to detect changes. + + This compares properties like status, reason, updated_at, additional_info + to detect if a resource has changed between polls. Args: other_resource (ManagedResource): Resource to compare against Returns: - tuple: (self_diff, other_diff) where: - - self_diff (ManagedResource): This resource if different, None if same - - other_diff (ManagedResource): Other resource if different, None if same + bool: True if properties differ, False if same """ if not other_resource: - return (self, None) - if ( + # No previous resource means it's new/different + return True + + # Resources are different if any field differs + return ( self.resource_type != other_resource.resource_type or self.identifier != other_resource.identifier - ): - return (self, other_resource) - return (None, None) + or self.status != other_resource.status + or self.reason != other_resource.reason + or self.updated_at != other_resource.updated_at + or self.additional_info != other_resource.additional_info + ) diff --git a/awscli/customizations/ecs/expressgateway/managedresourcegroup.py b/awscli/customizations/ecs/expressgateway/managedresourcegroup.py index b5643bc3b355..1bf3080cd6b2 100644 --- a/awscli/customizations/ecs/expressgateway/managedresourcegroup.py +++ b/awscli/customizations/ecs/expressgateway/managedresourcegroup.py @@ -39,7 +39,7 @@ def __init__( ): self.resource_type = resource_type self.identifier = identifier - # maintain input ordering + # Maintain input ordering self.sorted_resource_keys = [ self._create_key(resource) for resource in resources ] @@ -57,6 +57,86 @@ def _create_key(self, resource): identifier = resource.identifier if resource.identifier else "" return resource_type + "/" + identifier + def get_stream_string(self, timestamp, use_color=True): + """Returns flattened stream strings for all resources in the group. + + Args: + timestamp (str): Timestamp string to prefix each resource + use_color (bool): Whether to use ANSI color codes (default: True) + + Returns: + str: All flattened resources formatted for stream display, separated by newlines + """ + flat_resources = [] + + for resource in self.resource_mapping.values(): + if isinstance(resource, ManagedResourceGroup): + # Recursively flatten nested groups + nested = resource.get_stream_string(timestamp, use_color) + if nested: + flat_resources.append(nested) + elif isinstance(resource, ManagedResource): + # Get stream string for individual resource + flat_resources.append( + resource.get_stream_string(timestamp, use_color) + ) + + return "\n".join(flat_resources) + + def get_changed_resources(self, previous_resources_dict): + """Get flattened list of resources that have changed properties. + + Compares individual resource properties (status, reason, updated_at, etc.) + against previous state to detect changes. This is used for change detection + in TEXT-ONLY mode, NOT for DEPLOYMENT diff (use compare_resource_sets for that). + + Args: + previous_resources_dict: Dict of {(resource_type, identifier): ManagedResource} + from previous poll. Can be empty dict for first poll. + + Returns: + tuple: (changed_resources, updated_dict, removed_keys) + - changed_resources: List of ManagedResource that changed or None if no changes + - updated_dict: Updated dict with current resources for next comparison + - removed_keys: Set of keys that were removed since last poll + """ + current_resources = self._flatten_to_list() + changed_resources = [] + updated_dict = {} + + for resource in current_resources: + resource_key = (resource.resource_type, resource.identifier) + previous_resource = previous_resources_dict.get(resource_key) + + if not previous_resource: + changed_resources.append(resource) + else: + if resource.compare_properties(previous_resource): + changed_resources.append(resource) + + updated_dict[resource_key] = resource + + current_keys = { + (r.resource_type, r.identifier) for r in current_resources + } + removed_keys = set(previous_resources_dict.keys()) - current_keys + + return ( + changed_resources if changed_resources else None, + updated_dict, + removed_keys, + ) + + def _flatten_to_list(self): + """Flatten this resource group into a list of individual resources.""" + flat_list = [] + for resource in self.resource_mapping.values(): + if isinstance(resource, ManagedResourceGroup): + flat_list.extend(resource._flatten_to_list()) + elif isinstance(resource, ManagedResource): + flat_list.append(resource) + return flat_list + def is_terminal(self): return not self.resource_mapping or all( [ @@ -188,8 +268,12 @@ def _combine_child_resources(self, resource_a, resource_b): else: return resource_b - def diff(self, other_resource_group): - """Returns two ManagedResourceGroups representing unique resources in each group. + def compare_resource_sets(self, other_resource_group): + """Compares resource SETS between two groups to find additions/removals. + + This is used for DEPLOYMENT view to show which resources were added or removed + between service configurations, NOT for detecting property changes within + individual resources (that's compare_properties() in ManagedResource). Args: other_resource_group (ManagedResourceGroup): Resource group to compare against @@ -218,7 +302,7 @@ def diff(self, other_resource_group): common_keys = self_keys & other_keys common_diff = { - key: self.resource_mapping[key].diff( + key: self.resource_mapping[key].compare_resource_sets( other_resource_group.resource_mapping.get(key) ) for key in common_keys diff --git a/awscli/customizations/ecs/expressgateway/stream_display.py b/awscli/customizations/ecs/expressgateway/stream_display.py new file mode 100644 index 000000000000..821bdc8a7b10 --- /dev/null +++ b/awscli/customizations/ecs/expressgateway/stream_display.py @@ -0,0 +1,112 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""Stream display implementation for ECS Express Gateway Service monitoring.""" + +import time +from datetime import datetime, timezone + +from awscli.customizations.ecs.expressgateway.managedresourcegroup import ( + ManagedResourceGroup, +) +from awscli.customizations.utils import uni_print + + +class StreamDisplay: + """Stream display for monitoring that outputs changes to stdout. + + Provides text-based monitoring output suitable for non-interactive + environments, logging, or piping to other commands. + """ + + def __init__(self, use_color=True): + self.previous_resources_by_key = {} + self.use_color = use_color + + def show_startup_message(self): + """Show startup message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Starting monitoring...\n") + + def show_polling_message(self): + """Show polling message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Polling for updates...\n") + + def show_monitoring_data(self, resource_group, info): + """Show monitoring data for resources with diff detection. + + Args: + resource_group: ManagedResourceGroup or None + info: Additional info text to display + """ + timestamp = self._get_timestamp() + + if resource_group: + ( + changed_resources, + updated_dict, + removed_keys, + ) = resource_group.get_changed_resources( + self.previous_resources_by_key + ) + + self.previous_resources_by_key = updated_dict + + if changed_resources: + self._print_flattened_resources_list( + changed_resources, timestamp + ) + + if info: + uni_print(f"[{timestamp}] {info}\n") + + def _print_flattened_resources_list(self, resources_list, timestamp): + """Print individual resources from a flat list as timestamped lines. + + Args: + resources_list: List of ManagedResource objects to print + timestamp: Timestamp string to prefix each line + """ + for resource in resources_list: + output = resource.get_stream_string(timestamp, self.use_color) + uni_print(output + "\n") + + def show_timeout_message(self): + """Show timeout message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Monitoring timeout reached!\n") + + def show_service_inactive_message(self): + """Show service inactive message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Service is inactive\n") + + def show_completion_message(self): + """Show completion message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Monitoring complete!\n") + + def show_user_stop_message(self): + """Show user stop message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Monitoring stopped by user\n") + + def show_error_message(self, error): + """Show error message.""" + timestamp = self._get_timestamp() + uni_print(f"[{timestamp}] Error: {error}\n") + + def _get_timestamp(self): + """Get formatted timestamp.""" + return datetime.now(timezone.utc).strftime("%Y-%m-%d %H:%M:%SZ") diff --git a/awscli/customizations/ecs/monitorexpressgatewayservice.py b/awscli/customizations/ecs/monitorexpressgatewayservice.py index 3004fe69ecc9..66128f619b5e 100644 --- a/awscli/customizations/ecs/monitorexpressgatewayservice.py +++ b/awscli/customizations/ecs/monitorexpressgatewayservice.py @@ -18,10 +18,17 @@ allowing users to track resource creation progress, deployment status, and service health through an interactive command-line interface with live updates and visual indicators. -The module implements two primary monitoring modes: +The data collection logic is handled by ServiceViewCollector, which parses AWS resources and +formats monitoring output. This module focuses on display and user interaction. + +The module implements two resource view modes: - RESOURCE: Displays all resources associated with the service - DEPLOYMENT: Shows resources that have changed in the most recent deployment +And two display modes: +- INTERACTIVE: Real-time display with spinner and keyboard navigation (requires TTY) +- TEXT-ONLY: Text output with timestamps and change detection (works without TTY) + Key Features: - Real-time progress monitoring with spinner animations - Diff-based resource tracking for deployment changes @@ -33,30 +40,24 @@ ECSExpressGatewayServiceWatcher: Core monitoring logic and resource tracking Usage: - aws ecs monitor-express-gateway-service --service-arn [--resource-view RESOURCE|DEPLOYMENT] + aws ecs monitor-express-gateway-service --service-arn [--resource-view RESOURCE|DEPLOYMENT] [--mode INTERACTIVE|TEXT-ONLY] """ -import asyncio import sys -import threading import time -from functools import reduce from botocore.exceptions import ClientError from awscli.customizations.commands import BasicCommand from awscli.customizations.ecs.exceptions import MonitoringError -from awscli.customizations.ecs.expressgateway.managedresource import ( - ManagedResource, -) -from awscli.customizations.ecs.expressgateway.managedresourcegroup import ( - ManagedResourceGroup, +from awscli.customizations.ecs.expressgateway.display_strategy import ( + InteractiveDisplayStrategy, + TextOnlyDisplayStrategy, ) from awscli.customizations.ecs.prompt_toolkit_display import Display +from awscli.customizations.ecs.serviceviewcollector import ServiceViewCollector from awscli.customizations.utils import uni_print -TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%SZ" - class ECSMonitorExpressGatewayService(BasicCommand): """AWS CLI command for monitoring ECS Express Gateway Service deployments. @@ -69,14 +70,16 @@ class ECSMonitorExpressGatewayService(BasicCommand): DESCRIPTION = ( "Monitors the progress of resource creation for an ECS Express Gateway Service. " - "This command provides real-time monitoring of service deployments with interactive " - "progress display, showing the status of load balancers, security groups, auto-scaling " + "This command provides real-time monitoring of service deployments showing the status " + "of load balancers, security groups, auto-scaling " "configurations, and other AWS resources as they are created or updated. " "Use ``--resource-view RESOURCE`` to view all service resources, or ``--resource-view DEPLOYMENT`` to track only " "resources that have changed in the most recent deployment. " - "The command requires a terminal (TTY) to run and the monitoring session continues " - "until manually stopped by the user or the specified timeout is reached. " - "Use keyboard shortcuts to navigate: up/down to scroll through resources, 'q' to quit monitoring." + "Choose ``--mode INTERACTIVE`` for real-time display with keyboard navigation (requires TTY), " + "or ``--mode TEXT-ONLY`` for text output with timestamps (works without TTY). " + "The monitoring session continues until manually stopped by the user or the specified timeout is reached. " + "In INTERACTIVE mode, use keyboard shortcuts: up/down to scroll through resources, 'q' to quit. " + "In TEXT-ONLY mode, press Ctrl+C to stop monitoring." ) ARG_TABLE = [ @@ -101,6 +104,16 @@ class ECSMonitorExpressGatewayService(BasicCommand): 'default': 'RESOURCE', 'choices': ['RESOURCE', 'DEPLOYMENT'], }, + { + 'name': 'mode', + 'help_text': ( + "Display mode for monitoring output. " + "INTERACTIVE (default if TTY available) - Real-time display with spinner and keyboard navigation. " + "TEXT-ONLY - Text output with timestamps and change detection (works without TTY)." + ), + 'required': False, + 'choices': ['INTERACTIVE', 'TEXT-ONLY'], + }, { 'name': 'timeout', 'help_text': ( @@ -131,15 +144,12 @@ def _run_main(self, parsed_args, parsed_globals): parsed_globals: Global CLI configuration including region and endpoint """ try: - # Check if running in a TTY for interactive display - if not sys.stdout.isatty(): - uni_print( - "Error: This command requires a TTY. " - "Please run this command in a terminal.", - sys.stderr, - ) - return 1 + display_mode = self._determine_display_mode(parsed_args.mode) + except ValueError as e: + uni_print(f"aws: [ERROR]: {str(e)}", sys.stderr) + return 1 + try: self._client = self._session.create_client( 'ecs', region_name=parsed_globals.region, @@ -154,14 +164,52 @@ def _run_main(self, parsed_args, parsed_globals): self._client, parsed_args.service_arn, parsed_args.resource_view, + display_mode, timeout_minutes=parsed_args.timeout, use_color=use_color, ).exec() except MonitoringError as e: uni_print(f"Error monitoring service: {e}", sys.stderr) + return 1 + + def _determine_display_mode(self, requested_mode): + """Determine and validate the display mode. + + Args: + requested_mode: User-requested mode ('interactive', 'text-only', or None) + + Returns: + str: Validated display mode ('interactive' or 'text-only') + + Raises: + ValueError: If interactive mode is requested without TTY + """ + # Determine display mode with auto-detection + if requested_mode is None: + # Auto-detect: interactive if TTY available, else text-only + return 'INTERACTIVE' if sys.stdout.isatty() else 'TEXT-ONLY' + + # Validate requested mode + if requested_mode == 'INTERACTIVE': + if not sys.stdout.isatty(): + raise ValueError( + "Interactive mode requires a TTY (terminal). " + "Use --mode TEXT-ONLY for non-interactive environments." + ) + return 'INTERACTIVE' + + # text-only mode doesn't require TTY + return requested_mode def _should_use_color(self, parsed_globals): - """Determine if color output should be used based on global settings.""" + """Determine if color output should be used based on global settings. + + Args: + parsed_globals: Global CLI configuration + + Returns: + bool: True if color should be used + """ if parsed_globals.color == 'on': return True elif parsed_globals.color == 'off': @@ -180,7 +228,8 @@ class ECSExpressGatewayServiceWatcher: Args: client: ECS client for API calls service_arn (str): ARN of the service to monitor - mode (str): Monitoring mode - 'RESOURCE' or 'DEPLOYMENT' + resource_view (str): Resource view mode - 'RESOURCE' or 'DEPLOYMENT' + display_mode (str): Display mode - 'INTERACTIVE' or 'TEXT-ONLY' timeout_minutes (int): Maximum monitoring time in minutes (default: 30) """ @@ -188,21 +237,24 @@ def __init__( self, client, service_arn, - mode, + resource_view, + display_mode, timeout_minutes=30, - display=None, + display_strategy=None, use_color=True, + collector=None, ): - self._client = client self.service_arn = service_arn - self.mode = mode + self.display_mode = display_mode self.timeout_minutes = timeout_minutes - self.last_described_gateway_service_response = None - self.last_execution_time = 0 - self.cached_monitor_result = None self.start_time = time.time() self.use_color = use_color - self.display = display or Display() + self.display_strategy = ( + display_strategy or self._create_display_strategy() + ) + self.collector = collector or ServiceViewCollector( + client, service_arn, resource_view, use_color + ) @staticmethod def is_monitoring_available(): @@ -210,620 +262,27 @@ def is_monitoring_available(): return sys.stdout.isatty() def exec(self): - """Start monitoring the express gateway service with progress display.""" - - def monitor_service(spinner_char): - return self._monitor_express_gateway_service( - spinner_char, self.service_arn, self.mode - ) - - asyncio.run(self._execute_with_progress_async(monitor_service, 100)) - - async def _execute_with_progress_async( - self, execution, progress_refresh_millis, execution_refresh_millis=5000 - ): - """Execute monitoring loop with animated progress display.""" - spinner_chars = "⠋⠙⠹⠸⠼⠴⠦⠧⠇⠏" - spinner_index = 0 - - # Initialize with basic service resource - service_resource = ManagedResource("Service", self.service_arn) - initial_output = service_resource.get_status_string( - spinner_char="{SPINNER}", use_color=self.use_color - ) - current_output = initial_output - - async def update_data(): - nonlocal current_output - while True: - current_time = time.time() - if current_time - self.start_time > self.timeout_minutes * 60: - break - try: - loop = asyncio.get_event_loop() - new_output = await loop.run_in_executor( - None, execution, "{SPINNER}" - ) - current_output = new_output - except ClientError as e: - if ( - e.response.get('Error', {}).get('Code') - == 'InvalidParameterException' - ): - error_message = e.response.get('Error', {}).get( - 'Message', '' - ) - if ( - "Cannot call DescribeServiceRevisions for a service that is INACTIVE" - in error_message - ): - current_output = "Service is inactive" - else: - raise - else: - raise - await asyncio.sleep(execution_refresh_millis / 1000.0) - - async def update_spinner(): - nonlocal spinner_index - while True: - spinner_char = spinner_chars[spinner_index] - display_output = current_output.replace( - "{SPINNER}", spinner_char - ) - status_text = f"Getting updates... {spinner_char} | up/down to scroll, q to quit" - self.display.display(display_output, status_text) - spinner_index = (spinner_index + 1) % len(spinner_chars) - await asyncio.sleep(progress_refresh_millis / 1000.0) - - # Start both tasks - data_task = asyncio.create_task(update_data()) - spinner_task = asyncio.create_task(update_spinner()) - - try: - await self.display.run() - finally: - data_task.cancel() - spinner_task.cancel() - final_output = current_output.replace("{SPINNER}", "") - uni_print(final_output + "\nMonitoring Complete!\n") - - def _monitor_express_gateway_service( - self, spinner_char, service_arn, mode, execution_refresh_millis=5000 - ): - """Monitor service status and return formatted output. - - Args: - spinner_char (char): Character to print representing progress (unused with single spinner) - execution_refresh_millis (int): Refresh interval in milliseconds - service_arn (str): Service ARN to monitor - mode (str): Monitoring mode ('RESOURCE' or 'DEPLOYMENT') - - Returns: - str: Formatted status output - """ - current_time = time.time() - - if ( - current_time - self.last_execution_time - >= execution_refresh_millis / 1000.0 - ): - try: - describe_gateway_service_response = ( - self._client.describe_express_gateway_service( - serviceArn=service_arn - ) - ) - if not describe_gateway_service_response: - self.cached_monitor_result = ( - None, - "Trying to describe gateway service", - ) - elif ( - not ( - service := describe_gateway_service_response.get( - "service" - ) - ) - or not service.get("serviceArn") - or not service.get("activeConfigurations") - ): - self.cached_monitor_result = ( - None, - "Trying to describe gateway service", - ) - else: - self.last_described_gateway_service_response = ( - describe_gateway_service_response - ) - described_gateway_service = ( - describe_gateway_service_response.get("service") - ) - - if mode == "DEPLOYMENT": - managed_resources, info = self._diff_service_view( - described_gateway_service - ) - else: - managed_resources, info = self._combined_service_view( - described_gateway_service - ) - - service_resources = [ - self._parse_cluster(described_gateway_service), - self._parse_service(described_gateway_service), - ] - if managed_resources: - service_resources.append(managed_resources) - service_resource = ManagedResourceGroup( - resources=service_resources - ) - self._update_cached_monitor_results(service_resource, info) - except ClientError as e: - if ( - e.response.get('Error', {}).get('Code') - == 'InvalidParameterException' - ): - error_message = e.response.get('Error', {}).get( - 'Message', '' - ) - if ( - "Cannot call DescribeServiceRevisions for a service that is INACTIVE" - in error_message - ): - empty_resource_group = ManagedResourceGroup() - self._update_cached_monitor_results( - empty_resource_group, "Service is inactive" - ) - else: - raise - else: - raise - - self.last_execution_time = current_time - - if not self.cached_monitor_result: - return "Waiting for initial data" - else: - # Generate the output every iteration. This allow the underlying resources to utilize spinners - service_resource, info = self.cached_monitor_result - status_string = ( - service_resource.get_status_string( - spinner_char=spinner_char, use_color=self.use_color - ) - if service_resource - else None - ) - - output = "\n".join([x for x in [status_string, info] if x]) - return output - - def _diff_service_view(self, describe_gateway_service_response): - """Generate diff view showing changes in the latest deployment. - - Computes differences between source and target service revisions to show - what resources are being updated or disassociated in the current deployment. - - Args: - describe_gateway_service_response (dict): Service description from API - - Returns: - tuple: (resources, info_output) where: - - resources (ManagedResourceGroup): Diff view of resources - - info_output (str): Informational messages - """ - service_arn = describe_gateway_service_response.get("serviceArn") - list_service_deployments_response = ( - self._client.list_service_deployments( - service=service_arn, maxResults=1 - ) + """Execute monitoring using the appropriate display strategy.""" + self.display_strategy.execute_monitoring( + collector=self.collector, + start_time=self.start_time, + timeout_minutes=self.timeout_minutes, ) - listed_service_deployments = self._validate_and_parse_response( - list_service_deployments_response, - "ListServiceDeployments", - expected_field="serviceDeployments", - ) - if ( - not listed_service_deployments - or "serviceDeploymentArn" not in listed_service_deployments[0] - ): - return ( - None, - "Waiting for a deployment to start", - ) - deployment_arn = listed_service_deployments[0].get( - "serviceDeploymentArn" - ) - - describe_service_deployments_response = ( - self._client.describe_service_deployments( - serviceDeploymentArns=[deployment_arn] - ) - ) - described_service_deployments = self._validate_and_parse_response( - describe_service_deployments_response, - "DescribeServiceDeployments", - expected_field="serviceDeployments", - eventually_consistent=True, - ) - described_service_deployment = described_service_deployments[0] - if ( - not described_service_deployment - or not described_service_deployment.get("targetServiceRevision") - ): - return ( - None, - "Waiting for a deployment to start", - ) - - target_sr = described_service_deployment.get( - "targetServiceRevision" - ).get("arn") - - target_sr_resources_list, described_target_sr_list = ( - self._describe_and_parse_service_revisions([target_sr]) - ) - if len(target_sr_resources_list) != 1: - return (None, "Trying to describe service revisions") - target_sr_resources = target_sr_resources_list[0] - described_target_sr = described_target_sr_list[0] - - task_def_arn = described_target_sr.get("taskDefinition") - if "sourceServiceRevisions" in described_service_deployment: - source_sr_resources, _ = ( - self._describe_and_parse_service_revisions( - [ - sr.get("arn") - for sr in described_service_deployment.get( - "sourceServiceRevisions" - ) - ] - ) - ) - if len(source_sr_resources) != len( - described_service_deployment.get("sourceServiceRevisions") - ): - return (None, "Trying to describe service revisions)") - source_sr_resources_combined = reduce( - lambda x, y: x.combine(y), source_sr_resources - ) - else: - source_sr_resources_combined = ManagedResourceGroup() - - updating_resources, disassociating_resources = ( - target_sr_resources.diff(source_sr_resources_combined) - ) - updating_resources.resource_type = "Updating" - disassociating_resources.resource_type = "Disassociating" - service_resources = ManagedResourceGroup( - resource_type="Deployment", - identifier=deployment_arn, - status=described_service_deployment.get("status"), - reason=described_service_deployment.get("statusReason"), - resources=[ - ManagedResource( - resource_type="TargetServiceRevision", identifier=target_sr - ), - ManagedResource( - resource_type="TaskDefinition", identifier=task_def_arn - ), - updating_resources, - disassociating_resources, - ], - ) - return service_resources, None - - def _combined_service_view(self, describe_gateway_service_response): - """Generate combined view of all active service resources. - - Extracts and combines resources from all active service configurations, - resolving conflicts by taking the version with the latest timestamp. - - Args: - describe_gateway_service_response (dict): Service description from API + def _create_display_strategy(self): + """Create display strategy based on display mode. Returns: - tuple: (resources, info_output) where: - - resources (ManagedResourceGroup): Combined view of all resources - - info_output (str): Informational messages - """ - service_revision_arns = [ - config.get("serviceRevisionArn") - for config in describe_gateway_service_response.get( - "activeConfigurations" - ) - ] - service_revision_resources, _ = ( - self._describe_and_parse_service_revisions(service_revision_arns) - ) - - if len(service_revision_resources) != len(service_revision_arns): - return (None, "Trying to describe service revisions") - - service_resource = reduce( - lambda x, y: x.combine(y), service_revision_resources - ) - - return service_resource, None - - def _update_cached_monitor_results(self, resource, info): - """Update cached monitoring results with new data. - - Args: - resource: New resource data (replaces existing if provided) - info: New info message (always replaces existing) - """ - if not self.cached_monitor_result: - self.cached_monitor_result = (resource, info) - else: - self.cached_monitor_result = ( - resource or self.cached_monitor_result[0], - info, - ) - - def _validate_and_parse_response( - self, - response, - operation_name, - expected_field=None, - eventually_consistent=False, - ): - """Validate API response and extract expected field. - - Args: - response: API response to validate - operation_name: Name of the operation for error messages - expected_field: Field to extract from response (optional) - eventually_consistent: Whether to filter out MISSING failures - - Returns: - Extracted field value or None if no expected_field specified + DisplayStrategy: Appropriate strategy for the selected mode Raises: - MonitoringError: If response is invalid or missing required fields + ValueError: If display mode is not 'INTERACTIVE' or 'TEXT-ONLY' """ - if not response: - raise MonitoringError(f"{operation_name} response is empty") - - self._parse_failures(response, operation_name, eventually_consistent) - - if not expected_field: - return None - - if response.get(expected_field) is None: - raise MonitoringError( - f"{operation_name} response is missing {expected_field}" + if self.display_mode == 'TEXT-ONLY': + return TextOnlyDisplayStrategy(use_color=self.use_color) + elif self.display_mode == 'INTERACTIVE': + return InteractiveDisplayStrategy( + display=Display(), use_color=self.use_color ) - return response.get(expected_field) - - def _parse_failures(self, response, operation_name, eventually_consistent): - """Parse and raise errors for API response failures. - - Args: - response: API response to check for failures - operation_name: Name of the operation for error messages - eventually_consistent: Whether to filter out MISSING failures for eventually consistent operations - - Raises: - MonitoringError: If failures are found in the response - """ - failures = response.get("failures") - - if not failures: - return - - if any(not f.get('arn') or not f.get('reason') for f in failures): - raise MonitoringError( - "Invalid failure response: missing arn or reason" - ) - - if eventually_consistent: - failures = [ - failure - for failure in failures - if failure.get("reason") != "MISSING" - ] - - if not failures: - return - - failure_msgs = [ - f"{f['arn']} failed with {f['reason']}" for f in failures - ] - joined_msgs = '\n'.join(failure_msgs) - raise MonitoringError(f"{operation_name}:\n{joined_msgs}") - - def _describe_and_parse_service_revisions(self, arns): - """Describe and parse service revisions into managed resources. - - Args: - arns (list): List of service revision ARNs to describe - - Returns: - tuple: (parsed_resources, described_revisions) where: - - parsed_resources (list): List of ManagedResourceGroup objects - - described_revisions (list): Raw API response data - """ - # API supports up to 20 arns, DescribeExpressGatewayService should never return more than 5 - describe_service_revisions_response = ( - self._client.describe_service_revisions(serviceRevisionArns=arns) - ) - described_service_revisions = self._validate_and_parse_response( - describe_service_revisions_response, - "DescribeServiceRevisions", - expected_field="serviceRevisions", - eventually_consistent=True, - ) - - return [ - self._parse_ecs_managed_resources(sr) - for sr in described_service_revisions - ], described_service_revisions - - def _parse_cluster(self, service): - return ManagedResource("Cluster", service.get("cluster")) - - def _parse_service(self, service): - service_arn = service.get("serviceArn") - cluster = service.get("cluster") - describe_service_response = self._client.describe_services( - cluster=cluster, services=[service_arn] - ) - described_service = self._validate_and_parse_response( - describe_service_response, "DescribeServices", "services" - )[0] - return ManagedResource( - "Service", - service.get("serviceArn"), - additional_info=described_service - and described_service.get("events")[0].get("message") - if described_service.get("events") - else None, - ) - - def _parse_ecs_managed_resources(self, service_revision): - managed_resources = service_revision.get("ecsManagedResources") - if not managed_resources: - return ManagedResourceGroup() - - parsed_resources = [] - if "ingressPaths" in managed_resources: - parsed_resources.append( - ManagedResourceGroup( - resource_type="IngressPaths", - resources=[ - self._parse_ingress_path_resources(ingress_path) - for ingress_path in managed_resources.get( - "ingressPaths" - ) - ], - ) - ) - if "autoScaling" in managed_resources: - parsed_resources.append( - self._parse_auto_scaling_configuration( - managed_resources.get("autoScaling") - ) - ) - if "metricAlarms" in managed_resources: - parsed_resources.append( - self._parse_metric_alarms( - managed_resources.get("metricAlarms") - ) - ) - if "serviceSecurityGroups" in managed_resources: - parsed_resources.append( - self._parse_service_security_groups( - managed_resources.get("serviceSecurityGroups") - ) - ) - if "logGroups" in managed_resources: - parsed_resources.append( - self._parse_log_groups(managed_resources.get("logGroups")) - ) - return ManagedResourceGroup(resources=parsed_resources) - - def _parse_ingress_path_resources(self, ingress_path): - resources = [] - if ingress_path.get("loadBalancer"): - resources.append( - self._parse_managed_resource( - ingress_path.get("loadBalancer"), "LoadBalancer" - ) - ) - if ingress_path.get("loadBalancerSecurityGroups"): - resources.extend( - self._parse_managed_resource_list( - ingress_path.get("loadBalancerSecurityGroups"), - "LoadBalancerSecurityGroup", - ) - ) - if ingress_path.get("certificate"): - resources.append( - self._parse_managed_resource( - ingress_path.get("certificate"), "Certificate" - ) - ) - if ingress_path.get("listener"): - resources.append( - self._parse_managed_resource( - ingress_path.get("listener"), "Listener" - ) - ) - if ingress_path.get("rule"): - resources.append( - self._parse_managed_resource(ingress_path.get("rule"), "Rule") - ) - if ingress_path.get("targetGroups"): - resources.extend( - self._parse_managed_resource_list( - ingress_path.get("targetGroups"), "TargetGroup" - ) - ) - return ManagedResourceGroup( - resource_type="IngressPath", - identifier=ingress_path.get("endpoint"), - resources=resources, - ) - - def _parse_auto_scaling_configuration(self, auto_scaling_configuration): - resources = [] - if auto_scaling_configuration.get("scalableTarget"): - resources.append( - self._parse_managed_resource( - auto_scaling_configuration.get("scalableTarget"), - "ScalableTarget", - ) - ) - if auto_scaling_configuration.get("applicationAutoScalingPolicies"): - resources.extend( - self._parse_managed_resource_list( - auto_scaling_configuration.get( - "applicationAutoScalingPolicies" - ), - "ApplicationAutoScalingPolicy", - ) - ) - return ManagedResourceGroup( - resource_type="AutoScalingConfiguration", resources=resources - ) - - def _parse_metric_alarms(self, metric_alarms): - return ManagedResourceGroup( - resource_type="MetricAlarms", - resources=self._parse_managed_resource_list( - metric_alarms, "MetricAlarm" - ), - ) - - def _parse_service_security_groups(self, service_security_groups): - return ManagedResourceGroup( - resource_type="ServiceSecurityGroups", - resources=self._parse_managed_resource_list( - service_security_groups, "SecurityGroup" - ), - ) - - def _parse_log_groups(self, logs_groups): - return ManagedResourceGroup( - resource_type="LogGroups", - resources=self._parse_managed_resource_list( - logs_groups, "LogGroup" - ), - ) - - def _parse_managed_resource(self, resource, resource_type): - return ManagedResource( - resource_type, - resource.get("arn"), - status=resource.get("status"), - updated_at=resource.get("updatedAt"), - reason=resource.get("statusReason"), - ) - - def _parse_managed_resource_list(self, data_list, resource_type): - return [ - self._parse_managed_resource(data, resource_type) - for data in data_list - ] + else: + raise ValueError(f"Invalid display mode: {self.display_mode}") diff --git a/awscli/customizations/ecs/monitormutatinggatewayservice.py b/awscli/customizations/ecs/monitormutatinggatewayservice.py index 682eb53e6678..9f055ee5dd94 100644 --- a/awscli/customizations/ecs/monitormutatinggatewayservice.py +++ b/awscli/customizations/ecs/monitormutatinggatewayservice.py @@ -64,7 +64,7 @@ def __call__(self, parser, namespace, values, option_string=None): class MonitoringResourcesArgument(CustomArgument): - """Custom CLI argument for enabling resource monitoring. + """Custom CLI argument for enabling resource monitoring with optional mode. Adds the --monitor-resources flag to gateway service commands, allowing users to opt into real-time monitoring of resource changes. @@ -74,14 +74,13 @@ def __init__(self, name): super().__init__( name, help_text=( - 'Enable live monitoring of service resource status. ' + 'Enable monitoring of service resource status. ' 'Specify ``DEPLOYMENT`` to show only resources that are being added or removed ' 'as part of the latest service deployment, or ``RESOURCE`` to show all resources ' 'from all active configurations of the service. ' 'Defaults based on operation type: create-express-gateway-service and ' 'update-express-gateway-service default to ``DEPLOYMENT`` mode. ' - 'delete-express-gateway-service defaults to ``RESOURCE`` mode. ' - 'Requires a terminal (TTY) to run.' + 'delete-express-gateway-service defaults to ``RESOURCE`` mode.' ), choices=['DEPLOYMENT', 'RESOURCE'], nargs='?', @@ -90,6 +89,23 @@ def __init__(self, name): ) +class MonitoringModeArgument(CustomArgument): + """Custom CLI argument for monitor display mode. Only used when --monitor-resources is specified.""" + + def __init__(self): + super().__init__( + 'monitor-mode', + help_text=( + 'Display mode for monitoring output (requires ``--monitor-resources``). ' + 'INTERACTIVE (default if TTY available) - Real-time display with keyboard navigation. ' + 'TEXT-ONLY - Text output with timestamps, suitable for logging and non-interactive environments.' + ), + choices=['INTERACTIVE', 'TEXT-ONLY'], + nargs='?', + dest='monitor_mode', + ) + + class MonitorMutatingGatewayService: """Event handler for monitoring gateway service mutations. @@ -110,6 +126,7 @@ def __init__(self, api, default_resource_view, watcher_class=None): self.session = None self.parsed_globals = None self.effective_resource_view = None + self.effective_mode = None self._watcher_class = watcher_class or ECSExpressGatewayServiceWatcher def before_building_argument_table_parser(self, session, **kwargs): @@ -131,6 +148,7 @@ def building_argument_table(self, argument_table, session, **kwargs): argument_table['monitor-resources'] = MonitoringResourcesArgument( 'monitor-resources' ) + argument_table['monitor-mode'] = MonitoringModeArgument() def operation_args_parsed(self, parsed_args, parsed_globals, **kwargs): """Store monitoring flag state and globals after argument parsing. @@ -139,21 +157,71 @@ def operation_args_parsed(self, parsed_args, parsed_globals, **kwargs): parsed_args: Parsed command line arguments parsed_globals: Global CLI configuration """ + self._parse_and_validate_monitoring_args(parsed_args, parsed_globals) + + def _parse_and_validate_monitoring_args(self, parsed_args, parsed_globals): + """Parse and validate monitoring-related arguments. + + Extracts monitor_resources and monitor_mode from parsed_args, + validates their combination, and sets effective_resource_view + and effective_mode. + + Args: + parsed_args: Parsed command line arguments + parsed_globals: Global CLI configuration + + Raises: + ValueError: If monitor-mode is used without monitor-resources + """ # Store parsed_globals for later use self.parsed_globals = parsed_globals - # Get monitor_resources value and determine actual monitoring mode + # Parse monitor_resources to determine if monitoring is enabled monitor_value = getattr(parsed_args, 'monitor_resources', None) + self.effective_resource_view = self._parse_monitor_resources( + monitor_value + ) + + # Validate monitor_mode + mode_value = getattr(parsed_args, 'monitor_mode', None) + self.effective_mode = self._validate_mode( + mode_value, self.effective_resource_view + ) + + def _parse_monitor_resources(self, monitor_value): + """Parse monitor_resources value to determine resource view. + Args: + monitor_value: Value from --monitor-resources flag + + Returns: + str or None: Resource view mode (DEPLOYMENT/RESOURCE) or None + """ if monitor_value is None: - # Not specified, no monitoring - self.effective_resource_view = None + return None elif monitor_value == '__DEFAULT__': - # Flag specified without value, use default based on operation - self.effective_resource_view = self.default_resource_view + return self.default_resource_view else: - # Explicit choice provided (DEPLOYMENT or RESOURCE) - self.effective_resource_view = monitor_value + return monitor_value + + def _validate_mode(self, mode_value, resource_view): + """Validate the monitor mode value. + + Args: + mode_value: Value from --monitor-mode flag + resource_view: Effective resource view (None if not monitoring) + + Returns: + str: Display mode ('INTERACTIVE' or 'TEXT-ONLY') + + Raises: + ValueError: If mode is specified without resource monitoring + """ + if mode_value is not None and resource_view is None: + raise ValueError( + "--monitor-mode can only be used with --monitor-resources" + ) + return mode_value if mode_value else 'INTERACTIVE' def after_call(self, parsed, context, http_response, **kwargs): """Start monitoring after successful API call if flag is enabled. @@ -171,13 +239,20 @@ def after_call(self, parsed, context, http_response, **kwargs): ).get('serviceArn'): return - # Check monitoring availability - if not self._watcher_class.is_monitoring_available(): + # Interactive mode requires TTY, text-only does not + # Default to text-only if no TTY available + if self.effective_mode == 'INTERACTIVE' and not sys.stdout.isatty(): uni_print( - "Monitoring is not available (requires TTY). Skipping monitoring.\n", - out_file=sys.stderr, + "aws: [ERROR]: Interactive mode requires a TTY (terminal). " + "Monitoring skipped. Use --monitor-mode TEXT-ONLY for non-interactive environments.", + sys.stderr, ) return + elif self.effective_mode == 'INTERACTIVE' and sys.stdout.isatty(): + pass # Interactive mode with TTY - OK + elif not sys.stdout.isatty(): + # No TTY - force text-only mode + self.effective_mode = 'TEXT-ONLY' if not self.session or not self.parsed_globals: uni_print( @@ -199,20 +274,13 @@ def after_call(self, parsed, context, http_response, **kwargs): # Clear output when monitoring is invoked parsed.clear() - try: - self._watcher_class( - ecs_client, - service_arn, - self.effective_resource_view, - use_color=self._should_use_color(self.parsed_globals), - ).exec() - except Exception as e: - uni_print( - "Encountered an error, terminating monitoring\n" - + str(e) - + "\n", - out_file=sys.stderr, - ) + self._watcher_class( + ecs_client, + service_arn, + self.effective_resource_view, + self.effective_mode, + use_color=self._should_use_color(self.parsed_globals), + ).exec() def _should_use_color(self, parsed_globals): """Determine if color output should be used based on global settings.""" diff --git a/awscli/customizations/ecs/serviceviewcollector.py b/awscli/customizations/ecs/serviceviewcollector.py new file mode 100644 index 000000000000..ea799fff0dbe --- /dev/null +++ b/awscli/customizations/ecs/serviceviewcollector.py @@ -0,0 +1,541 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "license" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF +# ANY KIND, either express or implied. See the License for the specific +# language governing permissions and limitations under the License. + +"""Service view collector for ECS Express Gateway Service monitoring. + +This module provides business logic for collecting and formatting +ECS Express Gateway Service monitoring data. +""" + +import time +from functools import reduce + +from botocore.exceptions import ClientError + +from awscli.customizations.ecs.exceptions import MonitoringError +from awscli.customizations.ecs.expressgateway.managedresource import ( + ManagedResource, +) +from awscli.customizations.ecs.expressgateway.managedresourcegroup import ( + ManagedResourceGroup, +) + + +class ServiceViewCollector: + """Collects and formats ECS Express Gateway Service monitoring data. + + Responsible for: + - Making ECS API calls + - Parsing resource data + - Formatting output strings + - Caching responses + + Args: + client: ECS client for API calls + service_arn (str): ARN of the service to monitor + mode (str): Monitoring mode - 'RESOURCE' or 'DEPLOYMENT' + use_color (bool): Whether to use color in output + """ + + def __init__(self, client, service_arn, mode, use_color=True): + self._client = client + self.service_arn = service_arn + self.mode = mode + self.use_color = use_color + self.last_described_gateway_service_response = None + self.last_execution_time = 0 + self.cached_monitor_result = None + + def get_current_view( + self, spinner_char="{SPINNER}", execution_refresh_millis=5000 + ): + """Get current monitoring view as formatted string. + + Args: + spinner_char (str): Character for progress indication + execution_refresh_millis (int): Refresh interval in milliseconds + + Returns: + str: Formatted monitoring output + """ + current_time = time.time() + + if ( + current_time - self.last_execution_time + >= execution_refresh_millis / 1000.0 + ): + try: + describe_gateway_service_response = ( + self._client.describe_express_gateway_service( + serviceArn=self.service_arn + ) + ) + if not describe_gateway_service_response: + self.cached_monitor_result = ( + None, + "Trying to describe gateway service", + ) + elif ( + not ( + service := describe_gateway_service_response.get( + "service" + ) + ) + or not service.get("serviceArn") + or service.get("activeConfigurations") is None + ): + self.cached_monitor_result = ( + None, + "Trying to describe gateway service", + ) + else: + self.last_described_gateway_service_response = ( + describe_gateway_service_response + ) + described_gateway_service = ( + describe_gateway_service_response.get("service") + ) + + if self.mode == "DEPLOYMENT": + managed_resources, info = self._diff_service_view( + described_gateway_service + ) + else: + managed_resources, info = self._combined_service_view( + described_gateway_service + ) + + service_resources = [ + self._parse_cluster(described_gateway_service), + self._parse_service(described_gateway_service), + ] + if managed_resources: + service_resources.append(managed_resources) + service_resource = ManagedResourceGroup( + resources=service_resources + ) + self._update_cached_monitor_results(service_resource, info) + except ClientError as e: + if ( + e.response.get('Error', {}).get('Code') + == 'InvalidParameterException' + ): + error_message = e.response.get('Error', {}).get( + 'Message', '' + ) + if ( + "Cannot call DescribeServiceRevisions for a service that is INACTIVE" + in error_message + ): + empty_resource_group = ManagedResourceGroup() + self._update_cached_monitor_results( + empty_resource_group, "Service is inactive" + ) + else: + raise + else: + raise + + self.last_execution_time = current_time + + if not self.cached_monitor_result: + return "Waiting for initial data" + else: + service_resource, info = self.cached_monitor_result + status_string = ( + service_resource.get_status_string( + spinner_char=spinner_char, use_color=self.use_color + ) + if service_resource + else None + ) + + output = "\n".join([x for x in [status_string, info] if x]) + return output + + def _diff_service_view(self, describe_gateway_service_response): + """Generate diff view showing changes in the latest deployment.""" + service_arn = describe_gateway_service_response.get("serviceArn") + list_service_deployments_response = ( + self._client.list_service_deployments( + service=service_arn, maxResults=1 + ) + ) + listed_service_deployments = self._validate_and_parse_response( + list_service_deployments_response, + "ListServiceDeployments", + expected_field="serviceDeployments", + ) + if ( + not listed_service_deployments + or "serviceDeploymentArn" not in listed_service_deployments[0] + ): + return ( + None, + "Waiting for a deployment to start", + ) + + deployment_arn = listed_service_deployments[0].get( + "serviceDeploymentArn" + ) + + describe_service_deployments_response = ( + self._client.describe_service_deployments( + serviceDeploymentArns=[deployment_arn] + ) + ) + described_service_deployments = self._validate_and_parse_response( + describe_service_deployments_response, + "DescribeServiceDeployments", + expected_field="serviceDeployments", + eventually_consistent=True, + ) + if not described_service_deployments: + return (None, "Waiting for a deployment to start") + + described_service_deployment = described_service_deployments[0] + if ( + not described_service_deployment + or not described_service_deployment.get("targetServiceRevision") + ): + return ( + None, + "Waiting for a deployment to start", + ) + + target_sr = described_service_deployment.get( + "targetServiceRevision" + ).get("arn") + + target_sr_resources_list, described_target_sr_list = ( + self._describe_and_parse_service_revisions([target_sr]) + ) + if len(target_sr_resources_list) != 1: + return (None, "Trying to describe service revisions") + target_sr_resources = target_sr_resources_list[0] + described_target_sr = described_target_sr_list[0] + + task_def_arn = described_target_sr.get("taskDefinition") + if "sourceServiceRevisions" in described_service_deployment: + source_sr_resources, _ = ( + self._describe_and_parse_service_revisions( + [ + sr.get("arn") + for sr in described_service_deployment.get( + "sourceServiceRevisions" + ) + ] + ) + ) + if len(source_sr_resources) != len( + described_service_deployment.get("sourceServiceRevisions") + ): + return (None, "Trying to describe service revisions") + source_sr_resources_combined = reduce( + lambda x, y: x.combine(y), source_sr_resources + ) + else: + source_sr_resources_combined = ManagedResourceGroup() + + updating_resources, disassociating_resources = ( + target_sr_resources.compare_resource_sets( + source_sr_resources_combined + ) + ) + updating_resources.resource_type = "Updating" + disassociating_resources.resource_type = "Disassociating" + service_resources = ManagedResourceGroup( + resource_type="Deployment", + identifier=deployment_arn, + status=described_service_deployment.get("status"), + reason=described_service_deployment.get("statusReason"), + resources=[ + ManagedResource( + resource_type="TargetServiceRevision", identifier=target_sr + ), + ManagedResource( + resource_type="TaskDefinition", identifier=task_def_arn + ), + updating_resources, + disassociating_resources, + ], + ) + return service_resources, None + + def _combined_service_view(self, describe_gateway_service_response): + """Generate combined view of all active service resources.""" + service_revision_arns = [ + config.get("serviceRevisionArn") + for config in describe_gateway_service_response.get( + "activeConfigurations" + ) + ] + service_revision_resources, _ = ( + self._describe_and_parse_service_revisions(service_revision_arns) + ) + + # If empty, we're still waiting for active configurations + if not service_revision_resources or len( + service_revision_resources + ) != len(service_revision_arns): + return (None, "Trying to describe service revisions") + + service_resource = reduce( + lambda x, y: x.combine(y), service_revision_resources + ) + + return service_resource, None + + def _update_cached_monitor_results(self, resource, info): + """Update cached monitoring results with new data.""" + if not self.cached_monitor_result: + self.cached_monitor_result = (resource, info) + else: + self.cached_monitor_result = ( + resource or self.cached_monitor_result[0], + info, + ) + + def _validate_and_parse_response( + self, + response, + operation_name, + expected_field=None, + eventually_consistent=False, + ): + """Validate API response and extract expected field.""" + if not response: + raise MonitoringError(f"{operation_name} response is empty") + + self._parse_failures(response, operation_name, eventually_consistent) + + if not expected_field: + return None + + if response.get(expected_field) is None: + raise MonitoringError( + f"{operation_name} response is missing {expected_field}" + ) + return response.get(expected_field) + + def _parse_failures(self, response, operation_name, eventually_consistent): + """Parse and raise errors for API response failures.""" + failures = response.get("failures") + + if not failures: + return + + if any(not f.get('arn') or not f.get('reason') for f in failures): + raise MonitoringError( + "Invalid failure response: missing arn or reason" + ) + + if eventually_consistent: + failures = [ + failure + for failure in failures + if failure.get("reason") != "MISSING" + ] + + if not failures: + return + + failure_msgs = [ + f"{f['arn']} failed with {f['reason']}" for f in failures + ] + joined_msgs = '\n'.join(failure_msgs) + raise MonitoringError(f"{operation_name}:\n{joined_msgs}") + + def _describe_and_parse_service_revisions(self, arns): + """Describe and parse service revisions into managed resources.""" + describe_service_revisions_response = ( + self._client.describe_service_revisions(serviceRevisionArns=arns) + ) + described_service_revisions = self._validate_and_parse_response( + describe_service_revisions_response, + "DescribeServiceRevisions", + expected_field="serviceRevisions", + eventually_consistent=True, + ) + + return [ + self._parse_ecs_managed_resources(sr) + for sr in described_service_revisions + ], described_service_revisions + + def _parse_cluster(self, service): + return ManagedResource("Cluster", service.get("cluster")) + + def _parse_service(self, service): + service_arn = service.get("serviceArn") + cluster = service.get("cluster") + describe_service_response = self._client.describe_services( + cluster=cluster, services=[service_arn] + ) + described_service = self._validate_and_parse_response( + describe_service_response, "DescribeServices", "services" + )[0] + return ManagedResource( + "Service", + service.get("serviceArn"), + additional_info=described_service + and described_service.get("events")[0].get("message") + if described_service.get("events") + else None, + ) + + def _parse_ecs_managed_resources(self, service_revision): + managed_resources = service_revision.get("ecsManagedResources") + if not managed_resources: + return ManagedResourceGroup() + + parsed_resources = [] + if "ingressPaths" in managed_resources: + parsed_resources.append( + ManagedResourceGroup( + resource_type="IngressPaths", + resources=[ + self._parse_ingress_path_resources(ingress_path) + for ingress_path in managed_resources.get( + "ingressPaths" + ) + ], + ) + ) + if "autoScaling" in managed_resources: + parsed_resources.append( + self._parse_auto_scaling_configuration( + managed_resources.get("autoScaling") + ) + ) + if "metricAlarms" in managed_resources: + parsed_resources.append( + self._parse_metric_alarms( + managed_resources.get("metricAlarms") + ) + ) + if "serviceSecurityGroups" in managed_resources: + parsed_resources.append( + self._parse_service_security_groups( + managed_resources.get("serviceSecurityGroups") + ) + ) + if "logGroups" in managed_resources: + parsed_resources.append( + self._parse_log_groups(managed_resources.get("logGroups")) + ) + return ManagedResourceGroup(resources=parsed_resources) + + def _parse_ingress_path_resources(self, ingress_path): + resources = [] + if ingress_path.get("loadBalancer"): + resources.append( + self._parse_managed_resource( + ingress_path.get("loadBalancer"), "LoadBalancer" + ) + ) + if ingress_path.get("loadBalancerSecurityGroups"): + resources.extend( + self._parse_managed_resource_list( + ingress_path.get("loadBalancerSecurityGroups"), + "LoadBalancerSecurityGroup", + ) + ) + if ingress_path.get("certificate"): + resources.append( + self._parse_managed_resource( + ingress_path.get("certificate"), "Certificate" + ) + ) + if ingress_path.get("listener"): + resources.append( + self._parse_managed_resource( + ingress_path.get("listener"), "Listener" + ) + ) + if ingress_path.get("rule"): + resources.append( + self._parse_managed_resource(ingress_path.get("rule"), "Rule") + ) + if ingress_path.get("targetGroups"): + resources.extend( + self._parse_managed_resource_list( + ingress_path.get("targetGroups"), "TargetGroup" + ) + ) + return ManagedResourceGroup( + resource_type="IngressPath", + identifier=ingress_path.get("endpoint"), + resources=resources, + ) + + def _parse_auto_scaling_configuration(self, auto_scaling_configuration): + resources = [] + if auto_scaling_configuration.get("scalableTarget"): + resources.append( + self._parse_managed_resource( + auto_scaling_configuration.get("scalableTarget"), + "ScalableTarget", + ) + ) + if auto_scaling_configuration.get("applicationAutoScalingPolicies"): + resources.extend( + self._parse_managed_resource_list( + auto_scaling_configuration.get( + "applicationAutoScalingPolicies" + ), + "ApplicationAutoScalingPolicy", + ) + ) + return ManagedResourceGroup( + resource_type="AutoScalingConfiguration", resources=resources + ) + + def _parse_metric_alarms(self, metric_alarms): + return ManagedResourceGroup( + resource_type="MetricAlarms", + resources=self._parse_managed_resource_list( + metric_alarms, "MetricAlarm" + ), + ) + + def _parse_service_security_groups(self, service_security_groups): + return ManagedResourceGroup( + resource_type="ServiceSecurityGroups", + resources=self._parse_managed_resource_list( + service_security_groups, "SecurityGroup" + ), + ) + + def _parse_log_groups(self, logs_groups): + return ManagedResourceGroup( + resource_type="LogGroups", + resources=self._parse_managed_resource_list( + logs_groups, "LogGroup" + ), + ) + + def _parse_managed_resource(self, resource, resource_type): + return ManagedResource( + resource_type, + resource.get("arn"), + status=resource.get("status"), + updated_at=resource.get("updatedAt"), + reason=resource.get("statusReason"), + ) + + def _parse_managed_resource_list(self, data_list, resource_type): + return [ + self._parse_managed_resource(data, resource_type) + for data in data_list + ] diff --git a/awscli/customizations/emr/argumentschema.py b/awscli/customizations/emr/argumentschema.py index b3168857bb13..b5497e4c50d1 100644 --- a/awscli/customizations/emr/argumentschema.py +++ b/awscli/customizations/emr/argumentschema.py @@ -868,3 +868,47 @@ } }, } + +MONITORING_CONFIGURATION_SCHEMA = { + "type": "object", + "properties": { + "CloudWatchLogConfiguration": { + "type": "object", + "description": "CloudWatch log configuration settings and metadata that specify settings like log files to monitor and where to send them.", + "properties": { + "Enabled": { + "type": "boolean", + "description": "Specifies if CloudWatch logging is enabled.", + "required": True + }, + "LogGroupName": { + "type": "string", + "description": "The name of the CloudWatch log group where logs are published." + }, + "LogStreamNamePrefix": { + "type": "string", + "description": "The prefix of the log stream name." + }, + "EncryptionKeyArn": { + "type": "string", + "description": "The ARN of the encryption key used to encrypt the logs." + }, + "LogTypes": { + "type": "map", + "key": { + "type": "string", + "description": "Log type category" + }, + "value": { + "type": "array", + "items": { + "type": "string" + }, + "description": "File names (STDOUT or STDERR) for the log type" + }, + "description": "A map of log types to file names for publishing logs to the standard output or standard error streams for CloudWatch. Valid log types include STEP_LOGS, SPARK_DRIVER, and SPARK_EXECUTOR. Valid file names for each type include STDOUT and STDERR." + } + } + } + } +} diff --git a/awscli/customizations/emr/createcluster.py b/awscli/customizations/emr/createcluster.py index 4075c85a6303..c970d581f771 100644 --- a/awscli/customizations/emr/createcluster.py +++ b/awscli/customizations/emr/createcluster.py @@ -213,6 +213,11 @@ class CreateCluster(Command): 'schema': argumentschema.AUTO_TERMINATION_POLICY_SCHEMA, 'help_text': helptext.AUTO_TERMINATION_POLICY, }, + { + 'name': 'monitoring-configuration', + 'schema': argumentschema.MONITORING_CONFIGURATION_SCHEMA, + 'help_text': helptext.MONITORING_CONFIGURATION, + }, { 'name': 'extended-support', 'action': 'store_true', @@ -554,6 +559,13 @@ def _run_main_command(self, parsed_args, parsed_globals): parsed_args.auto_termination_policy, ) + if parsed_args.monitoring_configuration is not None: + emrutils.apply_dict( + params, + 'MonitoringConfiguration', + parsed_args.monitoring_configuration, + ) + self._validate_required_applications(parsed_args) run_job_flow_response = emrutils.call( diff --git a/awscli/customizations/emr/helptext.py b/awscli/customizations/emr/helptext.py index d88d0026386a..f58235c2162c 100755 --- a/awscli/customizations/emr/helptext.py +++ b/awscli/customizations/emr/helptext.py @@ -570,3 +570,13 @@ ) EXTENDED_SUPPORT = '

Reserved.

' + +MONITORING_CONFIGURATION = ( + '

Monitoring configuration for an Amazon EMR cluster. ' + 'The configuration specifies CloudWatch logging settings for the cluster. ' + 'You can configure the CloudWatchLogConfiguration which includes ' + 'the Enabled flag (required), LogGroupName, LogStreamNamePrefix, ' + 'EncryptionKeyArn, and LogTypes. The LogTypes parameter is a map ' + 'of log type categories (e.g., "STEP_LOGS", "SPARK_DRIVER", ' + '"SPARK_EXECUTOR") to a list of file names (e.g., "STDOUT", "STDERR").

' +) diff --git a/awscli/customizations/login/login.py b/awscli/customizations/login/login.py index 4959b6d9c104..f102ab2ae795 100644 --- a/awscli/customizations/login/login.py +++ b/awscli/customizations/login/login.py @@ -20,6 +20,7 @@ RequiredInputValidator, ) from awscli.customizations.configure.writer import ConfigFileWriter +from awscli.customizations.exceptions import ConfigurationError from awscli.customizations.login.utils import ( CrossDeviceLoginTokenFetcher, LoginType, @@ -96,6 +97,10 @@ def _run_main(self, parsed_args, parsed_globals): if profile_name not in self._session.available_profiles: self._session._profile_map[profile_name] = {} + # Abort if the profile is already configured with a different style + # of credentials, since they'd still have precedence over login + self.ensure_profile_does_not_have_existing_credentials(profile_name) + config = botocore.config.Config( region_name=region, signature_version=botocore.UNSIGNED, @@ -177,6 +182,37 @@ def accept_change_to_existing_profile_if_needed( else: uni_print('Invalid response. Please enter "y" or "n"') + def ensure_profile_does_not_have_existing_credentials(self, profile_name): + """ + Raises an error if the specified profile is already + configured with a different style of credentials. + """ + config = self._session.full_config['profiles'].get(profile_name, {}) + existing_credentials_style = None + + if 'web_identity_token_file' in config: + existing_credentials_style = 'Web Identity' + elif 'sso_role_name' in config or 'sso_account_id' in config: + existing_credentials_style = 'SSO' + elif 'aws_access_key_id' in config: + existing_credentials_style = 'Access Key' + elif 'role_arn' in config: + existing_credentials_style = 'Assume Role' + elif 'credential_process' in config: + existing_credentials_style = 'Credential Process' + + if existing_credentials_style: + raise ConfigurationError( + f'Profile \'{profile_name}\' is already configured ' + f'with {existing_credentials_style} credentials.\n\n' + f'You may run \'aws login --profile new-profile-name\' to ' + f'create a new profile with the specified name. Otherwise you ' + f'must first manually remove the existing credentials ' + f'from \'{profile_name}\'.\n' + ) + + return False + @staticmethod def resolve_sign_in_type(parsed_args): if parsed_args.remote: diff --git a/awscli/customizations/logs/startlivetail.py b/awscli/customizations/logs/startlivetail.py index 53cc3789c9c5..0e99bbebb855 100644 --- a/awscli/customizations/logs/startlivetail.py +++ b/awscli/customizations/logs/startlivetail.py @@ -620,6 +620,7 @@ def __init__( log_events, session_metadata: LiveTailSessionMetadata, app_output=None, + app_input=None, ) -> None: self._log_events = log_events self._session_metadata = session_metadata @@ -633,9 +634,9 @@ def __init__( self._session_metadata, self._keywords_to_highlight, ) - self._create_ui(app_output) + self._create_ui(app_output, app_input) - def _create_ui(self, app_output): + def _create_ui(self, app_output, app_input): prompt_buffer = Buffer() self._prompt_buffer_control = BufferControl(prompt_buffer) prompt_buffer_window = Window(self._prompt_buffer_control) @@ -677,6 +678,7 @@ def _create_ui(self, app_output): key_bindings=self._key_bindings, refresh_interval=1, output=app_output, + input=app_input, ) @property diff --git a/awscli/examples/apigateway/update-vpc-link.rst b/awscli/examples/apigateway/update-vpc-link.rst new file mode 100644 index 000000000000..19c86490b815 --- /dev/null +++ b/awscli/examples/apigateway/update-vpc-link.rst @@ -0,0 +1,47 @@ +**Example 1: To update an existing VPC link name** + +The following ``update-vpc-link`` example updates the name of the specified VPC link. :: + + aws apigateway update-vpc-link \ + --vpc-link-id ab3de6 \ + --patch-operations op=replace,path=/name,value=my-vpc-link + +Output:: + + { + "id": "ab3de6", + "name": "my-vpc-link", + "targetArns": [ + "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/net/my-lb/12a456s89aaa12345" + ], + "status": "AVAILABLE", + "statusMessage": "Your vpc link is ready for use", + "tags": {} + } + +For more information, see `Updating existing VPC link `__ in the *AWS CLI Command Reference*. + + +**Example 2: To update an existing VPC link name and description** + +The following ``update-vpc-link`` example updates name of the specified VPC link. :: + + aws apigateway update-vpc-link \ + --vpc-link-id ab3de6 \ + --patch-operations op=replace,path=/name,value=my-vpc-link op=replace,path=/description,value="My custom description" + +Output:: + + { + "id": "ab3de6", + "name": "my-vpc-link", + "description": "My custom description", + "targetArns": [ + "arn:aws:elasticloadbalancing:us-east-1:123456789012:loadbalancer/net/my-lb/12a456s89aaa12345" + ], + "status": "AVAILABLE", + "statusMessage": "Your vpc link is ready for use", + "tags": {} + } + +For more information, see `Updating existing VPC link `__ in the *AWS CLI Command Reference*. diff --git a/awscli/examples/cloudformation/create-generated-template.rst b/awscli/examples/cloudformation/create-generated-template.rst new file mode 100644 index 000000000000..9f321fcba5c1 --- /dev/null +++ b/awscli/examples/cloudformation/create-generated-template.rst @@ -0,0 +1,50 @@ +**To create a generated template from scanned resources** + +The following ``create-generated-template`` example creates a generated template named ``MyTemplate`` from scanned resources. :: + + aws cloudformation create-generated-template \ + --generated-template-name MyTemplate \ + --resources file://resources.json + +Contents of ``resources.json``:: + + [ + { + "ResourceType": "AWS::EKS::Cluster", + "LogicalResourceId":"MyCluster", + "ResourceIdentifier": { + "ClusterName": "MyAppClusterName" + } + }, + { + "ResourceType": "AWS::AutoScaling::AutoScalingGroup", + "LogicalResourceId":"MyASG", + "ResourceIdentifier": { + "AutoScalingGroupName": "MyAppASGName" + } + }, + { + "ResourceType": "AWS::EKS::Nodegroup", + "LogicalResourceId":"MyNodegroup", + "ResourceIdentifier": { + "NodegroupName": "MyAppNodegroupName" + } + }, + { + "ResourceType": "AWS::IAM::Role", + "LogicalResourceId":"MyRole", + "ResourceIdentifier": { + "RoleId": "arn:aws::iam::123456789012:role/MyAppIAMRole" + } + } + ] + +Output:: + + { + "Arn": + "arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/7fc8512c-d8cb-4e02-b266-d39c48344e48", + "Name": "MyTemplate" + } + +For more information, see `Create a CloudFormation template from resources scanned with IaC generator `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/create-stack-refactor.rst b/awscli/examples/cloudformation/create-stack-refactor.rst new file mode 100644 index 000000000000..0e2d974df7d6 --- /dev/null +++ b/awscli/examples/cloudformation/create-stack-refactor.rst @@ -0,0 +1,16 @@ +**To create the stack definition for a stack refactor operation** + +The following ``create-stack-refactor`` example creates the stack definition for stack refactoring. :: + + aws cloudformation create-stack-refactor \ + --stack-definitions \ + StackName=Stack1,TemplateBody@=file://template1-updated.yaml \ + StackName=Stack2,TemplateBody@=file://template2-updated.yaml + +Output:: + + { + "StackRefactorId": "9c384f70-4e07-4ed7-a65d-fee5eb430841" + } + +For more information, see `Stack refactoring `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/delete-generated-template.rst b/awscli/examples/cloudformation/delete-generated-template.rst new file mode 100644 index 000000000000..817305663b49 --- /dev/null +++ b/awscli/examples/cloudformation/delete-generated-template.rst @@ -0,0 +1,10 @@ +**To delete a generated template** + +The following ``delete-generated-template`` example deletes the specified template. :: + + aws cloudformation delete-generated-template \ + --generated-template-name MyTemplate + +This command produces no output. + +For more information, see `Generating templates from existing resources `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/describe-generated-template.rst b/awscli/examples/cloudformation/describe-generated-template.rst new file mode 100644 index 000000000000..e66305302b62 --- /dev/null +++ b/awscli/examples/cloudformation/describe-generated-template.rst @@ -0,0 +1,62 @@ +**To describe a generated template** + +The following ``describe-generated-template`` example describes the specified template. :: + + aws cloudformation describe-generated-template \ + --generated-template-name MyTemplate + +Output:: + + { + "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedTemplate/7d881acf-f307-4ded-910e-f8fb49b96894", + "GeneratedTemplateName": "MyTemplate", + "Resources": [ + { + "ResourceType": "AWS::EC2::SecurityGroup", + "LogicalResourceId": "EC2SecurityGroup", + "ResourceIdentifier": { + "Id": "sg-1234567890abcdef0" + }, + "ResourceStatus": "COMPLETE", + "ResourceStatusReason": "Resource Template complete", + "Warnings": [] + }, + { + "ResourceType": "AWS::EC2::Instance", + "LogicalResourceId": "EC2Instance", + "ResourceIdentifier": { + "InstanceId": "i-1234567890abcdef0" + }, + "ResourceStatus": "COMPLETE", + "ResourceStatusReason": "Resource Template complete", + "Warnings": [] + }, + { + "ResourceType": "AWS::EC2::KeyPair", + "LogicalResourceId": "EC2KeyPairSshkeypair", + "ResourceIdentifier": { + "KeyName": "sshkeypair" + }, + "ResourceStatus": "COMPLETE", + "ResourceStatusReason": "Resource Template complete", + "Warnings": [] + } + ], + "Status": "COMPLETE", + "StatusReason": "All resources complete", + "CreationTime": "2025-09-23T19:38:06.435000+00:00", + "LastUpdatedTime": "2025-09-23T19:38:10.798000+00:00", + "Progress": { + "ResourcesSucceeded": 3, + "ResourcesFailed": 0, + "ResourcesProcessing": 0, + "ResourcesPending": 0 + }, + "TemplateConfiguration": { + "DeletionPolicy": "RETAIN", + "UpdateReplacePolicy": "RETAIN" + }, + "TotalWarnings": 0 + } + +For more information, see `Generating templates from existing resources `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/describe-resource-scan.rst b/awscli/examples/cloudformation/describe-resource-scan.rst new file mode 100644 index 000000000000..bda035638add --- /dev/null +++ b/awscli/examples/cloudformation/describe-resource-scan.rst @@ -0,0 +1,38 @@ +**To describe a resource scan** + +The following ``describe-resource-scan`` example describes the resource scan with the specified scan ID. :: + + aws cloudformation describe-resource-scan --region \ + --resource-scan-id arn:aws:cloudformation:us-east-1:123456789012:resourceScan/0a699f15-489c-43ca-a3ef-3e6ecfa5da60 + +Output:: + + { + "ResourceScanId": "arn:aws:cloudformation:us-east-1:123456789012:resourceScan/0a699f15-489c-43ca-a3ef-3e6ecfa5da60", + "Status": "COMPLETE", + "StartTime": "2025-08-21T03:10:38.485000+00:00", + "EndTime": "2025-08-21T03:20:28.485000+00:00", + "PercentageCompleted": 100.0, + "ResourceTypes": [ + "AWS::CloudFront::CachePolicy", + "AWS::CloudFront::OriginRequestPolicy", + "AWS::EC2::DHCPOptions", + "AWS::EC2::InternetGateway", + "AWS::EC2::KeyPair", + "AWS::EC2::NetworkAcl", + "AWS::EC2::NetworkInsightsPath", + "AWS::EC2::NetworkInterface", + "AWS::EC2::PlacementGroup", + "AWS::EC2::Route", + "AWS::EC2::RouteTable", + "AWS::EC2::SecurityGroup", + "AWS::EC2::Subnet", + "AWS::EC2::SubnetCidrBlock", + "AWS::EC2::SubnetNetworkAclAssociation", + "AWS::EC2::SubnetRouteTableAssociation", + ... + ], + "ResourcesRead": 676 + } + +For more information, see `Generating templates from existing resources `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/describe-stack-refactor.rst b/awscli/examples/cloudformation/describe-stack-refactor.rst new file mode 100644 index 000000000000..fa3612dd6982 --- /dev/null +++ b/awscli/examples/cloudformation/describe-stack-refactor.rst @@ -0,0 +1,20 @@ +**To describe a stack refactor operation** + +The following ``describe-stack-refactor`` example describes the stack refactor operation with the specified stack refactor ID. :: + + aws cloudformation describe-stack-refactor \ + --stack-refactor-id 9c384f70-4e07-4ed7-a65d-fee5eb430841 + +Output:: + + { + "StackRefactorId": "9c384f70-4e07-4ed7-a65d-fee5eb430841", + "StackIds": [ + "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack1/3e6a1ff0-94b1-11f0-aa6f-0a88d2e03acf", + "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack2/5da91650-94b1-11f0-81cf-0a23500e151b" + ], + "ExecutionStatus": "AVAILABLE", + "Status": "CREATE_COMPLETE" + } + +For more information, see `Stack refactoring `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/execute-stack-refactor.rst b/awscli/examples/cloudformation/execute-stack-refactor.rst new file mode 100644 index 000000000000..e083bd3b913e --- /dev/null +++ b/awscli/examples/cloudformation/execute-stack-refactor.rst @@ -0,0 +1,10 @@ +**To complete a stack refactor operation** + +The following ``execute-stack-refactor`` example completes the stack refactor operation with the specified stack refactor ID. :: + + aws cloudformation execute-stack-refactor \ + --stack-refactor-id 9c384f70-4e07-4ed7-a65d-fee5eb430841 + +This command produces no output. + +For more information, see `Stack refactoring `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/list-generated-templates.rst b/awscli/examples/cloudformation/list-generated-templates.rst new file mode 100644 index 000000000000..30ae27d0d65d --- /dev/null +++ b/awscli/examples/cloudformation/list-generated-templates.rst @@ -0,0 +1,41 @@ +**To list generated templates** + +The following ``list-generated-templates`` example lists all generated templates. :: + + aws cloudformation list-generated-templates + +Output:: + + { + "Summaries": [ + { + "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedtemplate/7fc8512c-d8cb-4e02-b266-d39c48344e48", + "GeneratedTemplateName": "MyTemplate", + "Status": "COMPLETE", + "StatusReason": "All resources complete", + "CreationTime": "2025-09-23T20:13:24.283000+00:00", + "LastUpdatedTime": "2025-09-23T20:13:28.610000+00:00", + "NumberOfResources": 4 + }, + { + "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedTemplate/f10dd1c4-edc6-4823-8153-ab6112b8d051", + "GeneratedTemplateName": "MyEC2InstanceTemplate", + "Status": "COMPLETE", + "StatusReason": "All resources complete", + "CreationTime": "2024-08-08T19:35:49.790000+00:00", + "LastUpdatedTime": "2024-08-08T19:35:52.207000+00:00", + "NumberOfResources": 3 + }, + { + "GeneratedTemplateId": "arn:aws:cloudformation:us-east-1:123456789012:generatedTemplate/e5a1c89f-7ce2-41bd-9bdf-75b7c852e3ca", + "GeneratedTemplateName": "MyEKSNodeGroupTemplate", + "Status": "COMPLETE", + "StatusReason": "All resources complete", + "CreationTime": "2024-07-16T20:39:27.883000+00:00", + "LastUpdatedTime": "2024-07-16T20:39:35.766000+00:00", + "NumberOfResources": 4 + } + ] + } + +For more information, see `Generating templates from existing resources `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/list-resource-scan-related-resources.rst b/awscli/examples/cloudformation/list-resource-scan-related-resources.rst new file mode 100644 index 000000000000..1150d4799d30 --- /dev/null +++ b/awscli/examples/cloudformation/list-resource-scan-related-resources.rst @@ -0,0 +1,47 @@ +**To list related resources from a resource scan** + +The following ``list-resource-scan-related-resources`` example lists resources from the specified resource scan that are related to resources in ``resources.json``. :: + + aws cloudformation list-resource-scan-related-resources \ + --resource-scan-id arn:aws:cloudformation:us-east-1:123456789012:resourceScan/0a699f15-489c-43ca-a3ef-3e6ecfa5da60 \ + --resources file://resources.json + +Contents of ``resources.json``:: + + [ + { + "ResourceType": "AWS::EKS::Cluster", + "ResourceIdentifier": { + "ClusterName": "MyAppClusterName" + } + }, + { + "ResourceType": "AWS::AutoScaling::AutoScalingGroup", + "ResourceIdentifier": { + "AutoScalingGroupName": "MyAppASGName" + } + } + ] + +Output:: + + { + "RelatedResources": [ + { + "ResourceType": "AWS::EKS::Nodegroup", + "ResourceIdentifier": { + "NodegroupName": "MyAppNodegroupName" + }, + "ManagedByStack": false + }, + { + "ResourceType": "AWS::IAM::Role", + "ResourceIdentifier": { + "RoleId": "arn:aws::iam::123456789012:role/MyAppIAMRole" + }, + "ManagedByStack": false + } + ] + } + +For more information, see `Create a CloudFormation template from resources scanned with IaC generator `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/list-resource-scan-resources.rst b/awscli/examples/cloudformation/list-resource-scan-resources.rst new file mode 100644 index 000000000000..1b072381806a --- /dev/null +++ b/awscli/examples/cloudformation/list-resource-scan-resources.rst @@ -0,0 +1,30 @@ +**To list resources from a resource scan** + +The following ``list-resource-scan-resources`` example lists resources from the specified resource scan, filtered by resource identifier. :: + + aws cloudformation list-resource-scan-resources \ + --resource-scan-id arn:aws:cloudformation:us-east-1:123456789012:resourceScan/0a699f15-489c-43ca-a3ef-3e6ecfa5da60 \ + --resource-identifier MyApp + +Output:: + + { + "Resources": [ + { + "ResourceType": "AWS::EKS::Cluster", + "ResourceIdentifier": { + "ClusterName": "MyAppClusterName" + }, + "ManagedByStack": false + }, + { + "ResourceType": "AWS::AutoScaling::AutoScalingGroup", + "ResourceIdentifier": { + "AutoScalingGroupName": "MyAppASGName" + }, + "ManagedByStack": false + } + ] + } + +For more information, see `Create a CloudFormation template from resources scanned with IaC generator `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/list-stack-refactor-actions.rst b/awscli/examples/cloudformation/list-stack-refactor-actions.rst new file mode 100644 index 000000000000..1e0c4e06ae16 --- /dev/null +++ b/awscli/examples/cloudformation/list-stack-refactor-actions.rst @@ -0,0 +1,71 @@ +**To list actions for a stack refactor operation** + +The following ``list-stack-refactor-actions`` example lists actions for the stack refactor operation with the specified stack refactor ID. :: + + aws cloudformation list-stack-refactor-actions \ + --stack-refactor-id 9c384f70-4e07-4ed7-a65d-fee5eb430841 + +Output:: + + { + "StackRefactorActions": [ + { + "Action": "MOVE", + "Entity": "RESOURCE", + "PhysicalResourceId": "MyTestLambdaRole", + "Description": "No configuration changes detected.", + "Detection": "AUTO", + "TagResources": [], + "UntagResources": [], + "ResourceMapping": { + "Source": { + "StackName": "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack1/3e6a1ff0-94b1-11f0-aa6f-0a88d2e03acf", + "LogicalResourceId": "MyLambdaRole" + }, + "Destination": { + "StackName": "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack2/5da91650-94b1-11f0-81cf-0a23500e151b", + "LogicalResourceId": "MyLambdaRole" + } + } + }, + { + "Action": "MOVE", + "Entity": "RESOURCE", + "PhysicalResourceId": "MyTestFunction", + "Description": "Resource configuration changes will be validated during refactor execution.", + "Detection": "AUTO", + "TagResources": [ + { + "Key": "aws:cloudformation:stack-name", + "Value": "Stack2" + }, + { + "Key": "aws:cloudformation:logical-id", + "Value": "MyFunction" + }, + { + "Key": "aws:cloudformation:stack-id", + "Value": "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack2/5da91650-94b1-11f0-81cf-0a23500e151b" + } + ], + "UntagResources": [ + "aws:cloudformation:stack-name", + "aws:cloudformation:logical-id", + "aws:cloudformation:stack-id" + ], + "ResourceMapping": { + "Source": { + "StackName": "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack1/3e6a1ff0-94b1-11f0-aa6f-0a88d2e03acf", + "LogicalResourceId": "MyFunction" + }, + "Destination": { + "StackName": "arn:aws:cloudformation:us-east-1:123456789012:stack/Stack2/5da91650-94b1-11f0-81cf-0a23500e151b", + "LogicalResourceId": "MyFunction" + } + } + } + ] + } + + +For more information, see `Stack refactoring `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/cloudformation/start-resource-scan.rst b/awscli/examples/cloudformation/start-resource-scan.rst new file mode 100644 index 000000000000..5b26692dd015 --- /dev/null +++ b/awscli/examples/cloudformation/start-resource-scan.rst @@ -0,0 +1,14 @@ +**To start a resource scan** + +The following ``start-resource-scan`` example starts a resource scan that scans all existing resources in the current account and Region. :: + + aws cloudformation start-resource-scan + +Output:: + + { + "ResourceScanId": + "arn:aws:cloudformation:us-east-1:123456789012:resourceScan/0a699f15-489c-43ca-a3ef-3e6ecfa5da60" + } + +For more information, see `Start a resource scan with CloudFormation IaC generator `__ in the *AWS CloudFormation User Guide*. diff --git a/awscli/examples/elastictranscoder/cancel-job.rst b/awscli/examples/elastictranscoder/cancel-job.rst deleted file mode 100644 index 4acd71e65e90..000000000000 --- a/awscli/examples/elastictranscoder/cancel-job.rst +++ /dev/null @@ -1,8 +0,0 @@ -**To cancel a job for ElasticTranscoder** - -This cancels the specified job for ElasticTranscoder. - -Command:: - - aws elastictranscoder cancel-job --id 3333333333333-abcde3 - diff --git a/awscli/examples/elastictranscoder/create-job.rst b/awscli/examples/elastictranscoder/create-job.rst deleted file mode 100644 index 6b10d1a28bbf..000000000000 --- a/awscli/examples/elastictranscoder/create-job.rst +++ /dev/null @@ -1,94 +0,0 @@ -**To create a job for ElasticTranscoder** - -The following ``create-job`` example creates a job for ElasticTranscoder. :: - - aws elastictranscoder create-job \ - --pipeline-id 1111111111111-abcde1 \ - --inputs file://inputs.json \ - --outputs file://outputs.json \ - --output-key-prefix "recipes/" \ - --user-metadata file://user-metadata.json - -Contents of ``inputs.json``:: - - [{ - "Key":"ETS_example_file.mp4", - "FrameRate":"auto", - "Resolution":"auto", - "AspectRatio":"auto", - "Interlaced":"auto", - "Container":"mp4" - }] - -Contents of outputs.json:: - - [ - { - "Key":"webm/ETS_example_file-kindlefirehd.webm", - "Rotate":"0", - "PresetId":"1351620000001-100250" - } - ] - -Contents of ``user-metadata.json``:: - - { - "Food type":"Italian", - "Cook book":"recipe notebook" - } - -Output:: - - { - "Job": { - "Status": "Submitted", - "Inputs": [ - { - "Container": "mp4", - "FrameRate": "auto", - "Key": "ETS_example_file.mp4", - "AspectRatio": "auto", - "Resolution": "auto", - "Interlaced": "auto" - } - ], - "Playlists": [], - "Outputs": [ - { - "Status": "Submitted", - "Rotate": "0", - "PresetId": "1351620000001-100250", - "Watermarks": [], - "Key": "webm/ETS_example_file-kindlefirehd.webm", - "Id": "1" - } - ], - "PipelineId": "3333333333333-abcde3", - "OutputKeyPrefix": "recipes/", - "UserMetadata": { - "Cook book": "recipe notebook", - "Food type": "Italian" - }, - "Output": { - "Status": "Submitted", - "Rotate": "0", - "PresetId": "1351620000001-100250", - "Watermarks": [], - "Key": "webm/ETS_example_file-kindlefirehd.webm", - "Id": "1" - }, - "Timing": { - "SubmitTimeMillis": 1533838012298 - }, - "Input": { - "Container": "mp4", - "FrameRate": "auto", - "Key": "ETS_example_file.mp4", - "AspectRatio": "auto", - "Resolution": "auto", - "Interlaced": "auto" - }, - "Id": "1533838012294-example", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:job/1533838012294-example" - } - } diff --git a/awscli/examples/elastictranscoder/create-pipeline.rst b/awscli/examples/elastictranscoder/create-pipeline.rst deleted file mode 100644 index 166bdd4c94f0..000000000000 --- a/awscli/examples/elastictranscoder/create-pipeline.rst +++ /dev/null @@ -1,94 +0,0 @@ -**To create a pipeline for ElasticTranscoder** - -The following ``create-pipeline`` example creates a pipeline for ElasticTranscoder. :: - - aws elastictranscoder create-pipeline \ - --name Default \ - --input-bucket salesoffice.example.com-source \ - --role arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role \ - --notifications Progressing="",Completed="",Warning="",Error=arn:aws:sns:us-east-1:111222333444:ETS_Errors \ - --content-config file://content-config.json \ - --thumbnail-config file://thumbnail-config.json - -Contents of ``content-config.json``:: - - { - "Bucket":"salesoffice.example.com-public-promos", - "Permissions":[ - { - "GranteeType":"Email", - "Grantee":"marketing-promos@example.com", - "Access":[ - "FullControl" - ] - } - ], - "StorageClass":"Standard" - } - -Contents of ``thumbnail-config.json``:: - - { - "Bucket":"salesoffice.example.com-public-promos-thumbnails", - "Permissions":[ - { - "GranteeType":"Email", - "Grantee":"marketing-promos@example.com", - "Access":[ - "FullControl" - ] - } - ], - "StorageClass":"ReducedRedundancy" - } - -Output:: - - { - "Pipeline": { - "Status": "Active", - "ContentConfig": { - "Bucket": "salesoffice.example.com-public-promos", - "StorageClass": "Standard", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Name": "Default", - "ThumbnailConfig": { - "Bucket": "salesoffice.example.com-public-promos-thumbnails", - "StorageClass": "ReducedRedundancy", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "arn:aws:sns:us-east-1:123456789012:ETS_Errors" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "salesoffice.example.com-source", - "Id": "1533765810590-example", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/1533765810590-example" - }, - "Warnings": [ - { - "Message": "The SNS notification topic for Error events and the pipeline are in different regions, which increases processing time for jobs in the pipeline and can incur additional charges. To decrease processing time and prevent cross-regional charges, use the same region for the SNS notification topic and the pipeline.", - "Code": "6006" - } - ] - } diff --git a/awscli/examples/elastictranscoder/create-preset.rst b/awscli/examples/elastictranscoder/create-preset.rst deleted file mode 100644 index bbf482c49e84..000000000000 --- a/awscli/examples/elastictranscoder/create-preset.rst +++ /dev/null @@ -1,141 +0,0 @@ -**To create a preset for ElasticTranscoder** - -The following ``create-preset`` example creates a preset for ElasticTranscoder. :: - - aws elastictranscoder create-preset \ - --name DefaultPreset \ - --description "Use for published videos" \ - --container mp4 \ - --video file://video.json \ - --audio file://audio.json \ - --thumbnails file://thumbnails.json - -Contents of ``video.json``:: - - { - "Codec":"H.264", - "CodecOptions":{ - "Profile":"main", - "Level":"2.2", - "MaxReferenceFrames":"3", - "MaxBitRate":"", - "BufferSize":"", - "InterlacedMode":"Progressive", - "ColorSpaceConversionMode":"None" - }, - "KeyframesMaxDist":"240", - "FixedGOP":"false", - "BitRate":"1600", - "FrameRate":"auto", - "MaxFrameRate":"30", - "MaxWidth":"auto", - "MaxHeight":"auto", - "SizingPolicy":"Fit", - "PaddingPolicy":"Pad", - "DisplayAspectRatio":"auto", - "Watermarks":[ - { - "Id":"company logo", - "MaxWidth":"20%", - "MaxHeight":"20%", - "SizingPolicy":"ShrinkToFit", - "HorizontalAlign":"Right", - "HorizontalOffset":"10px", - "VerticalAlign":"Bottom", - "VerticalOffset":"10px", - "Opacity":"55.5", - "Target":"Content" - } - ] - } - -Contents of ``audio.json``:: - - { - "Codec":"AAC", - "CodecOptions":{ - "Profile":"AAC-LC" - }, - "SampleRate":"44100", - "BitRate":"96", - "Channels":"2" - } - -Contents of ``thumbnails.json``:: - - { - "Format":"png", - "Interval":"120", - "MaxWidth":"auto", - "MaxHeight":"auto", - "SizingPolicy":"Fit", - "PaddingPolicy":"Pad" - } - - -Output:: - - { - "Preset": { - "Thumbnails": { - "SizingPolicy": "Fit", - "MaxWidth": "auto", - "Format": "png", - "PaddingPolicy": "Pad", - "Interval": "120", - "MaxHeight": "auto" - }, - "Container": "mp4", - "Description": "Use for published videos", - "Video": { - "SizingPolicy": "Fit", - "MaxWidth": "auto", - "PaddingPolicy": "Pad", - "MaxFrameRate": "30", - "FrameRate": "auto", - "MaxHeight": "auto", - "KeyframesMaxDist": "240", - "FixedGOP": "false", - "Codec": "H.264", - "Watermarks": [ - { - "SizingPolicy": "ShrinkToFit", - "VerticalOffset": "10px", - "VerticalAlign": "Bottom", - "Target": "Content", - "MaxWidth": "20%", - "MaxHeight": "20%", - "HorizontalAlign": "Right", - "HorizontalOffset": "10px", - "Opacity": "55.5", - "Id": "company logo" - } - ], - "CodecOptions": { - "Profile": "main", - "MaxBitRate": "32", - "InterlacedMode": "Progressive", - "Level": "2.2", - "ColorSpaceConversionMode": "None", - "MaxReferenceFrames": "3", - "BufferSize": "5" - }, - "BitRate": "1600", - "DisplayAspectRatio": "auto" - }, - "Audio": { - "Channels": "2", - "CodecOptions": { - "Profile": "AAC-LC" - }, - "SampleRate": "44100", - "Codec": "AAC", - "BitRate": "96" - }, - "Type": "Custom", - "Id": "1533765290724-example" - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:preset/1533765290724-example", - "Name": "DefaultPreset" - }, - "Warning": "" - } diff --git a/awscli/examples/elastictranscoder/delete-pipeline.rst b/awscli/examples/elastictranscoder/delete-pipeline.rst deleted file mode 100644 index 2ccfc95cdc29..000000000000 --- a/awscli/examples/elastictranscoder/delete-pipeline.rst +++ /dev/null @@ -1,13 +0,0 @@ -**To delete the specified ElasticTranscoder pipeline** - -This deletes the specified ElasticTranscoder pipeline. - -Command:: - - aws elastictranscoder delete-pipeline --id 1111111111111-abcde1 - -Output:: - - { - "Success":"true" - } diff --git a/awscli/examples/elastictranscoder/delete-preset.rst b/awscli/examples/elastictranscoder/delete-preset.rst deleted file mode 100644 index e6907a59e10d..000000000000 --- a/awscli/examples/elastictranscoder/delete-preset.rst +++ /dev/null @@ -1,8 +0,0 @@ -**To delete the specified ElasticTranscoder preset** - -This deletes the specified ElasticTranscoder preset. - -Command:: - - aws elastictranscoder delete-preset --id 5555555555555-abcde5 - diff --git a/awscli/examples/elastictranscoder/list-jobs-by-pipeline.rst b/awscli/examples/elastictranscoder/list-jobs-by-pipeline.rst deleted file mode 100644 index 7217beff9aed..000000000000 --- a/awscli/examples/elastictranscoder/list-jobs-by-pipeline.rst +++ /dev/null @@ -1,13 +0,0 @@ -**To retrieve a list of ElasticTranscoder jobs in the specified pipeline** - -This example retrieves a list of ElasticTranscoder jobs in the specified pipeline. - -Command:: - - aws elastictranscoder list-jobs-by-pipeline --pipeline-id 1111111111111-abcde1 - -Output:: - - { - "Jobs": [] - } diff --git a/awscli/examples/elastictranscoder/list-jobs-by-status.rst b/awscli/examples/elastictranscoder/list-jobs-by-status.rst deleted file mode 100644 index 84f62d272f01..000000000000 --- a/awscli/examples/elastictranscoder/list-jobs-by-status.rst +++ /dev/null @@ -1,14 +0,0 @@ -**To retrieve a list of ElasticTranscoder jobs with a status of Complete** - -This example retrieves a list of ElasticTranscoder jobs with a status of Complete. - -Command:: - - aws elastictranscoder list-jobs-by-status --status Complete - -Output:: - - { - "Jobs": [] - } - diff --git a/awscli/examples/elastictranscoder/list-pipelines.rst b/awscli/examples/elastictranscoder/list-pipelines.rst deleted file mode 100644 index f318f28505e3..000000000000 --- a/awscli/examples/elastictranscoder/list-pipelines.rst +++ /dev/null @@ -1,84 +0,0 @@ -**To retrieve a list of ElasticTranscoder pipelines** - -This example retrieves a list of ElasticTranscoder pipelines. - -Command:: - - aws elastictranscoder list-pipelines - -Output:: - - { - "Pipelines": [ - { - "Status": "Active", - "ContentConfig": { - "Bucket": "ets-example", - "Permissions": [] - }, - "Name": "example-pipeline", - "ThumbnailConfig": { - "Bucket": "ets-example", - "Permissions": [] - }, - "Notifications": { - "Completed": "arn:aws:sns:us-west-2:123456789012:ets_example", - "Warning": "", - "Progressing": "", - "Error": "" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "OutputBucket": "ets-example", - "Id": "3333333333333-abcde3", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/3333333333333-abcde3" - }, - { - "Status": "Paused", - "ContentConfig": { - "Bucket": "ets-example", - "Permissions": [] - }, - "Name": "example-php-test", - "ThumbnailConfig": { - "Bucket": "ets-example", - "Permissions": [] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "OutputBucket": "ets-example", - "Id": "3333333333333-abcde2", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/3333333333333-abcde2" - }, - { - "Status": "Active", - "ContentConfig": { - "Bucket": "ets-west-output", - "Permissions": [] - }, - "Name": "pipeline-west", - "ThumbnailConfig": { - "Bucket": "ets-west-output", - "Permissions": [] - }, - "Notifications": { - "Completed": "arn:aws:sns:us-west-2:123456789012:ets-notifications", - "Warning": "", - "Progressing": "", - "Error": "" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-west-input", - "OutputBucket": "ets-west-output", - "Id": "3333333333333-abcde1", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/3333333333333-abcde1" - } - ] - } - diff --git a/awscli/examples/elastictranscoder/list-presets.rst b/awscli/examples/elastictranscoder/list-presets.rst deleted file mode 100644 index 239cf9ab9239..000000000000 --- a/awscli/examples/elastictranscoder/list-presets.rst +++ /dev/null @@ -1,95 +0,0 @@ -**To retrieve a list of ElasticTranscoder presets** - -This example retrieves a list of ElasticTranscoder presets. - -Command:: - - aws elastictranscoder list-presets --max-items 2 - -Output:: - - { - "Presets": [ - { - "Container": "mp4", - "Name": "KindleFireHD-preset", - "Video": { - "Resolution": "1280x720", - "FrameRate": "30", - "KeyframesMaxDist": "90", - "FixedGOP": "false", - "Codec": "H.264", - "Watermarks": [], - "CodecOptions": { - "Profile": "main", - "MaxReferenceFrames": "3", - "ColorSpaceConversionMode": "None", - "InterlacedMode": "Progressive", - "Level": "4" - }, - "AspectRatio": "16:9", - "BitRate": "2200" - }, - "Audio": { - "Channels": "2", - "CodecOptions": { - "Profile": "AAC-LC" - }, - "SampleRate": "48000", - "Codec": "AAC", - "BitRate": "160" - }, - "Type": "Custom", - "Id": "3333333333333-abcde2", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:preset/3333333333333-abcde2", - "Thumbnails": { - "AspectRatio": "16:9", - "Interval": "60", - "Resolution": "192x108", - "Format": "png" - } - }, - { - "Thumbnails": { - "AspectRatio": "16:9", - "Interval": "60", - "Resolution": "192x108", - "Format": "png" - }, - "Container": "mp4", - "Description": "Custom preset for transcoding jobs", - "Video": { - "Resolution": "1280x720", - "FrameRate": "30", - "KeyframesMaxDist": "90", - "FixedGOP": "false", - "Codec": "H.264", - "Watermarks": [], - "CodecOptions": { - "Profile": "main", - "MaxReferenceFrames": "3", - "ColorSpaceConversionMode": "None", - "InterlacedMode": "Progressive", - "Level": "3.1" - }, - "AspectRatio": "16:9", - "BitRate": "2200" - }, - "Audio": { - "Channels": "2", - "CodecOptions": { - "Profile": "AAC-LC" - }, - "SampleRate": "44100", - "Codec": "AAC", - "BitRate": "160" - }, - "Type": "Custom", - "Id": "3333333333333-abcde3", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:preset/3333333333333-abcde3", - "Name": "Roman's Preset" - } - ], - "NextToken": "eyJQYWdlVG9rZW4iOiBudWxsLCAiYm90b190cnVuY2F0ZV9hbW91bnQiOiAyfQ==" - } - diff --git a/awscli/examples/elastictranscoder/read-job.rst b/awscli/examples/elastictranscoder/read-job.rst deleted file mode 100644 index dd2bfe1b84ab..000000000000 --- a/awscli/examples/elastictranscoder/read-job.rst +++ /dev/null @@ -1,65 +0,0 @@ -**To retrieve an ElasticTranscoder job** - -This example retrieves the specified ElasticTranscoder job. - -Command:: - - aws elastictranscoder read-job --id 1533838012294-example - -Output:: - - { - "Job": { - "Status": "Progressing", - "Inputs": [ - { - "Container": "mp4", - "FrameRate": "auto", - "Key": "ETS_example_file.mp4", - "AspectRatio": "auto", - "Resolution": "auto", - "Interlaced": "auto" - } - ], - "Playlists": [], - "Outputs": [ - { - "Status": "Progressing", - "Rotate": "0", - "PresetId": "1351620000001-100250", - "Watermarks": [], - "Key": "webm/ETS_example_file-kindlefirehd.webm", - "Id": "1" - } - ], - "PipelineId": "3333333333333-abcde3", - "OutputKeyPrefix": "recipes/", - "UserMetadata": { - "Cook book": "recipe notebook", - "Food type": "Italian" - }, - "Output": { - "Status": "Progressing", - "Rotate": "0", - "PresetId": "1351620000001-100250", - "Watermarks": [], - "Key": "webm/ETS_example_file-kindlefirehd.webm", - "Id": "1" - }, - "Timing": { - "SubmitTimeMillis": 1533838012298, - "StartTimeMillis": 1533838013786 - }, - "Input": { - "Container": "mp4", - "FrameRate": "auto", - "Key": "ETS_example_file.mp4", - "AspectRatio": "auto", - "Resolution": "auto", - "Interlaced": "auto" - }, - "Id": "1533838012294-example", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:job/1533838012294-example" - } - } - diff --git a/awscli/examples/elastictranscoder/read-pipeline.rst b/awscli/examples/elastictranscoder/read-pipeline.rst deleted file mode 100644 index 471180183c2d..000000000000 --- a/awscli/examples/elastictranscoder/read-pipeline.rst +++ /dev/null @@ -1,59 +0,0 @@ -**To retrieve an ElasticTranscoder pipeline** - -This example retrieves the specified ElasticTranscoder pipeline. - -Command:: - - aws elastictranscoder read-pipeline --id 3333333333333-abcde3 - -Output:: - - { - "Pipeline": { - "Status": "Active", - "ContentConfig": { - "Bucket": "ets-example", - "StorageClass": "Standard", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Name": "Default", - "ThumbnailConfig": { - "Bucket": "ets-example", - "StorageClass": "ReducedRedundancy", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "arn:aws:sns:us-east-1:123456789012:ETS_Errors" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "Id": "3333333333333-abcde3", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/3333333333333-abcde3" - }, - "Warnings": [ - { - "Message": "The SNS notification topic for Error events and the pipeline are in different regions, which increases processing time for jobs in the pipeline and can incur additional charges. To decrease processing time and prevent cross-regional charges, use the same region for the SNS notification topic and the pipeline.", - "Code": "6006" - } - ] - } - diff --git a/awscli/examples/elastictranscoder/read-preset.rst b/awscli/examples/elastictranscoder/read-preset.rst deleted file mode 100644 index 8ab201c41372..000000000000 --- a/awscli/examples/elastictranscoder/read-preset.rst +++ /dev/null @@ -1,100 +0,0 @@ -**To retrieve an ElasticTranscoder preset** - -This example retrieves the specified ElasticTranscoder preset. - -Command:: - - aws elastictranscoder read-preset --id 1351620000001-500020 - -Output:: - - { - "Preset": { - "Thumbnails": { - "SizingPolicy": "ShrinkToFit", - "MaxWidth": "192", - "Format": "png", - "PaddingPolicy": "NoPad", - "Interval": "300", - "MaxHeight": "108" - }, - "Container": "fmp4", - "Description": "System preset: MPEG-Dash Video - 4.8M", - "Video": { - "SizingPolicy": "ShrinkToFit", - "MaxWidth": "1280", - "PaddingPolicy": "NoPad", - "FrameRate": "30", - "MaxHeight": "720", - "KeyframesMaxDist": "60", - "FixedGOP": "true", - "Codec": "H.264", - "Watermarks": [ - { - "SizingPolicy": "ShrinkToFit", - "VerticalOffset": "10%", - "VerticalAlign": "Top", - "Target": "Content", - "MaxWidth": "10%", - "MaxHeight": "10%", - "HorizontalAlign": "Left", - "HorizontalOffset": "10%", - "Opacity": "100", - "Id": "TopLeft" - }, - { - "SizingPolicy": "ShrinkToFit", - "VerticalOffset": "10%", - "VerticalAlign": "Top", - "Target": "Content", - "MaxWidth": "10%", - "MaxHeight": "10%", - "HorizontalAlign": "Right", - "HorizontalOffset": "10%", - "Opacity": "100", - "Id": "TopRight" - }, - { - "SizingPolicy": "ShrinkToFit", - "VerticalOffset": "10%", - "VerticalAlign": "Bottom", - "Target": "Content", - "MaxWidth": "10%", - "MaxHeight": "10%", - "HorizontalAlign": "Left", - "HorizontalOffset": "10%", - "Opacity": "100", - "Id": "BottomLeft" - }, - { - "SizingPolicy": "ShrinkToFit", - "VerticalOffset": "10%", - "VerticalAlign": "Bottom", - "Target": "Content", - "MaxWidth": "10%", - "MaxHeight": "10%", - "HorizontalAlign": "Right", - "HorizontalOffset": "10%", - "Opacity": "100", - "Id": "BottomRight" - } - ], - "CodecOptions": { - "Profile": "main", - "MaxBitRate": "4800", - "InterlacedMode": "Progressive", - "Level": "3.1", - "ColorSpaceConversionMode": "None", - "MaxReferenceFrames": "3", - "BufferSize": "9600" - }, - "BitRate": "4800", - "DisplayAspectRatio": "auto" - }, - "Type": "System", - "Id": "1351620000001-500020", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:preset/1351620000001-500020", - "Name": "System preset: MPEG-Dash Video - 4.8M" - } - } - diff --git a/awscli/examples/elastictranscoder/update-pipeline-notifications.rst b/awscli/examples/elastictranscoder/update-pipeline-notifications.rst deleted file mode 100644 index faf4a0061d96..000000000000 --- a/awscli/examples/elastictranscoder/update-pipeline-notifications.rst +++ /dev/null @@ -1,52 +0,0 @@ -**To update the notifications of an ElasticTranscoder pipeline** - -This example updates the notifications of the specified ElasticTranscoder pipeline. - -Command:: - - aws elastictranscoder update-pipeline-notifications --id 1111111111111-abcde1 --notifications Progressing=arn:aws:sns:us-west-2:0123456789012:my-topic,Completed=arn:aws:sns:us-west-2:0123456789012:my-topic,Warning=arn:aws:sns:us-west-2:0123456789012:my-topic,Error=arn:aws:sns:us-east-1:111222333444:ETS_Errors - -Output:: - - { - "Pipeline": { - "Status": "Active", - "ContentConfig": { - "Bucket": "ets-example", - "StorageClass": "Standard", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Name": "Default", - "ThumbnailConfig": { - "Bucket": "ets-example", - "StorageClass": "ReducedRedundancy", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Notifications": { - "Completed": "arn:aws:sns:us-west-2:0123456789012:my-topic", - "Warning": "arn:aws:sns:us-west-2:0123456789012:my-topic", - "Progressing": "arn:aws:sns:us-west-2:0123456789012:my-topic", - "Error": "arn:aws:sns:us-east-1:111222333444:ETS_Errors" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "Id": "1111111111111-abcde1", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/1111111111111-abcde1" - } - } diff --git a/awscli/examples/elastictranscoder/update-pipeline-status.rst b/awscli/examples/elastictranscoder/update-pipeline-status.rst deleted file mode 100644 index b4587e6c2830..000000000000 --- a/awscli/examples/elastictranscoder/update-pipeline-status.rst +++ /dev/null @@ -1,53 +0,0 @@ -**To update the status of an ElasticTranscoder pipeline** - -This example updates the status of the specified ElasticTranscoder pipeline. - -Command:: - - aws elastictranscoder update-pipeline-status --id 1111111111111-abcde1 --status Paused - -Output:: - - { - "Pipeline": { - "Status": "Paused", - "ContentConfig": { - "Bucket": "ets-example", - "StorageClass": "Standard", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Name": "Default", - "ThumbnailConfig": { - "Bucket": "ets-example", - "StorageClass": "ReducedRedundancy", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "arn:aws:sns:us-east-1:803981987763:ETS_Errors" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "Id": "1111111111111-abcde1", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/1111111111111-abcde1" - } - } - diff --git a/awscli/examples/elastictranscoder/update-pipeline.rst b/awscli/examples/elastictranscoder/update-pipeline.rst deleted file mode 100644 index c5b1e84f6a00..000000000000 --- a/awscli/examples/elastictranscoder/update-pipeline.rst +++ /dev/null @@ -1,95 +0,0 @@ -**To update an ElasticTranscoder pipeline** - -The following ``update-pipeline`` example updates the specified ElasticTranscoder pipeline. :: - - aws elastictranscoder update-pipeline \ - --id 1111111111111-abcde1 - --name DefaultExample \ - --input-bucket salesoffice.example.com-source \ - --role arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role \ - --notifications Progressing="",Completed="",Warning="",Error=arn:aws:sns:us-east-1:111222333444:ETS_Errors \ - --content-config file://content-config.json \ - --thumbnail-config file://thumbnail-config.json - -Contents of ``content-config.json``:: - - { - "Bucket":"salesoffice.example.com-public-promos", - "Permissions":[ - { - "GranteeType":"Email", - "Grantee":"marketing-promos@example.com", - "Access":[ - "FullControl" - ] - } - ], - "StorageClass":"Standard" - } - -Contents of ``thumbnail-config.json``:: - - { - "Bucket":"salesoffice.example.com-public-promos-thumbnails", - "Permissions":[ - { - "GranteeType":"Email", - "Grantee":"marketing-promos@example.com", - "Access":[ - "FullControl" - ] - } - ], - "StorageClass":"ReducedRedundancy" - } - -Output:: - - { - "Pipeline": { - "Status": "Active", - "ContentConfig": { - "Bucket": "ets-example", - "StorageClass": "Standard", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Name": "DefaultExample", - "ThumbnailConfig": { - "Bucket": "ets-example", - "StorageClass": "ReducedRedundancy", - "Permissions": [ - { - "Access": [ - "FullControl" - ], - "Grantee": "marketing-promos@example.com", - "GranteeType": "Email" - } - ] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "arn:aws:sns:us-east-1:111222333444:ETS_Errors" - }, - "Role": "arn:aws:iam::123456789012:role/Elastic_Transcoder_Default_Role", - "InputBucket": "ets-example", - "Id": "3333333333333-abcde3", - "Arn": "arn:aws:elastictranscoder:us-west-2:123456789012:pipeline/3333333333333-abcde3" - }, - "Warnings": [ - { - "Message": "The SNS notification topic for Error events and the pipeline are in different regions, which increases processing time for jobs in the pipeline and can incur additional charges. To decrease processing time and prevent cross-regional charges, use the same region for the SNS notification topic and the pipeline.", - "Code": "6006" - } - ] - } diff --git a/awscli/examples/medical-imaging/create-datastore.rst b/awscli/examples/medical-imaging/create-datastore.rst index ca8d1e86cad3..9f1f7884a601 100644 --- a/awscli/examples/medical-imaging/create-datastore.rst +++ b/awscli/examples/medical-imaging/create-datastore.rst @@ -1,6 +1,7 @@ -**To create a data store** +**Example 1: To create a data store** -The following ``create-datastore`` code example creates a data store with the name ``my-datastore``. When you create a datastore without specifying a ``--lossless-storage-format``, AWS HealthImaging defaults to HTJ2K (High Throughput JPEG 2000). :: +The following ``create-datastore`` code example creates a data store with the name ``my-datastore``. +When you create a datastore without specifying a ``--lossless-storage-format``, AWS HealthImaging defaults to HTJ2K (High Throughput JPEG 2000). :: aws medical-imaging create-datastore \ --datastore-name "my-datastore" @@ -12,4 +13,20 @@ Output:: "datastoreStatus": "CREATING" } +**Example 2: To create a data store with JPEG 2000 Lossless storage format** + +A data store configured with JPEG 2000 Lossless storage format will transcode and persist lossless image frames in JPEG 2000 format. Image frames can then be retrieved in +JPEG 2000 Lossless without transcoding. The following ``create-datastore`` code example creates a data store configured for JPEG 2000 Lossless storage format with the name ``my-datastore``. :: + + aws medical-imaging create-datastore \ + --datastore-name "my-datastore" \ + --lossless-storage-format JPEG_2000_LOSSLESS + +Output:: + + { + "datastoreId": "12345678901234567890123456789012", + "datastoreStatus": "CREATING" + } + For more information, see `Creating a data store `__ in the *AWS HealthImaging Developer Guide*. diff --git a/awscli/examples/medical-imaging/get-datastore.rst b/awscli/examples/medical-imaging/get-datastore.rst index e74dcde8961e..6c3d924c45c8 100644 --- a/awscli/examples/medical-imaging/get-datastore.rst +++ b/awscli/examples/medical-imaging/get-datastore.rst @@ -1,4 +1,4 @@ -**To get a data store's properties** +**Example 1: To get a data store's properties** The following ``get-datastore`` code example gets a data store's properties. :: @@ -9,14 +9,36 @@ The following ``get-datastore`` code example gets a data store's properties. :: Output:: { - "datastoreProperties": { - "datastoreId": "12345678901234567890123456789012", - "datastoreName": "TestDatastore123", - "datastoreStatus": "ACTIVE", - "losslessStorageFormat": "JPEG_2000_LOSSLESS", - "datastoreArn": "arn:aws:medical-imaging:us-east-1:123456789012:datastore/12345678901234567890123456789012", - "createdAt": "2022-11-15T23:33:09.643000+00:00", - "updatedAt": "2022-11-15T23:33:09.643000+00:00" + "datastoreProperties": { + "datastoreId": "12345678901234567890123456789012", + "datastoreName": "TestDatastore123", + "datastoreStatus": "ACTIVE", + "losslessStorageFormat": "HTJ2K" + "datastoreArn": "arn:aws:medical-imaging:us-east-1:123456789012:datastore/12345678901234567890123456789012", + "createdAt": "2022-11-15T23:33:09.643000+00:00", + "updatedAt": "2022-11-15T23:33:09.643000+00:00" + } + } + +**Example 2: To get data store's properties configured for JPEG2000** + +The following ``get-datastore`` code example gets a data store's properties for a data store configured for JPEG 2000 Lossless storage format. :: + + aws medical-imaging get-datastore \ + --datastore-id 12345678901234567890123456789012 + + +Output:: + + { + "datastoreProperties": { + "datastoreId": "12345678901234567890123456789012", + "datastoreName": "TestDatastore123", + "datastoreStatus": "ACTIVE", + "losslessStorageFormat": "JPEG_2000_LOSSLESS", + "datastoreArn": "arn:aws:medical-imaging:us-east-1:123456789012:datastore/12345678901234567890123456789012", + "createdAt": "2022-11-15T23:33:09.643000+00:00", + "updatedAt": "2022-11-15T23:33:09.643000+00:00" } } diff --git a/awscli/topics/s3-config.rst b/awscli/topics/s3-config.rst index 2857691a514e..ad127f776ec9 100644 --- a/awscli/topics/s3-config.rst +++ b/awscli/topics/s3-config.rst @@ -15,6 +15,11 @@ are provided in the case where you need to modify one of these values, either for performance reasons or to account for the specific environment where these ``aws s3`` commands are being run. +.. note:: + S3 commands have an option to use a custom endpoint using ``--endpoint-url``. + This overrides the default endpoint the command will use. + Use caution when configuring this parameter as it can cause unintended behavior including S3 redirect issues. + See `Service-specific endpoints `_ page in the *AWS SDK reference guide* for more information. Configuration Values ==================== diff --git a/configure b/configure index a33b29fba330..a9a12e7605a7 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.71 for awscli 2.32.7. +# Generated by GNU Autoconf 2.71 for awscli 2.32.31. # # # Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation, @@ -607,8 +607,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='awscli' PACKAGE_TARNAME='awscli' -PACKAGE_VERSION='2.32.7' -PACKAGE_STRING='awscli 2.32.7' +PACKAGE_VERSION='2.32.31' +PACKAGE_STRING='awscli 2.32.31' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1255,7 +1255,7 @@ _ACEOF fi if $ac_init_version; then cat <<\_ACEOF -awscli configure 2.32.7 +awscli configure 2.32.31 generated by GNU Autoconf 2.71 Copyright (C) 2021 Free Software Foundation, Inc. @@ -1292,7 +1292,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by awscli $as_me 2.32.7, which was +It was created by awscli $as_me 2.32.31, which was generated by GNU Autoconf 2.71. Invocation command line was $ $0$ac_configure_args_raw @@ -2668,7 +2668,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by awscli $as_me 2.32.7, which was +This file was extended by awscli $as_me 2.32.31, which was generated by GNU Autoconf 2.71. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -2723,7 +2723,7 @@ ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config='$ac_cs_config_escaped' ac_cs_version="\\ -awscli config.status 2.32.7 +awscli config.status 2.32.31 configured by $0, generated by GNU Autoconf 2.71, with options \\"\$ac_cs_config\\" diff --git a/configure.ac b/configure.ac index 3b4e4d7a07d4..24687e60d31f 100644 --- a/configure.ac +++ b/configure.ac @@ -1,5 +1,5 @@ AC_CONFIG_MACRO_DIRS([m4]) -AC_INIT([awscli], [2.32.7]) +AC_INIT([awscli], [2.32.31]) AC_CONFIG_SRCDIR([bin/aws]) AM_PATH_PYTHON([3.8]) diff --git a/doc/source/conf.py b/doc/source/conf.py index 8f82db10f0f9..9c1a6b97af39 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -66,7 +66,7 @@ # The short X.Y version. version = '2.0' # The full version, including alpha/beta/rc tags. -release = '2.32.7' +release = '2.32.31' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -318,13 +318,6 @@ '', 1, ), - ( - 'reference/elastictranscoder/index', - 'aws-elastictranscoder', - 'Amazon Elastic Transcoder', - '', - 1, - ), ('reference/elb/index', 'aws-elb', 'Elastic Load Balancing', '', 1), ('reference/emr/index', 'aws-emr', 'Amazon Elastic MapReduce', '', 1), ( diff --git a/hssyoo.sh b/hssyoo.sh new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/pyproject.toml b/pyproject.toml index 708e1345626a..9a3d2b277bd1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -43,7 +43,7 @@ dependencies = [ "awscrt==0.29.1", "python-dateutil>=2.1,<=2.9.0", "jmespath>=0.7.1,<1.1.0", - "urllib3>=1.25.4,<1.27", + "urllib3>=1.25.4,<=2.6.3", ] dynamic = ["version"] diff --git a/requirements/download-deps/portable-exe-lock.txt b/requirements/download-deps/portable-exe-lock.txt index ab6ad0eac274..c70db7e10782 100644 --- a/requirements/download-deps/portable-exe-lock.txt +++ b/requirements/download-deps/portable-exe-lock.txt @@ -100,9 +100,9 @@ pyinstaller==6.11.1 \ --hash=sha256:ddc0fddd75f07f7e423da1f0822e389a42af011f9589e0269b87e0d89aa48c1f \ --hash=sha256:e21c7806e34f40181e7606926a14579f848bfb1dc52cbca7eea66eccccbfe977 # via -r requirements/portable-exe-extras.txt -pyinstaller-hooks-contrib==2025.10 \ - --hash=sha256:a1a737e5c0dccf1cf6f19a25e2efd109b9fec9ddd625f97f553dac16ee884881 \ - --hash=sha256:aa7a378518772846221f63a84d6306d9827299323243db890851474dfd1231a9 +pyinstaller-hooks-contrib==2025.11 \ + --hash=sha256:777e163e2942474aa41a8e6d31ac1635292d63422c3646c176d584d04d971c34 \ + --hash=sha256:dfe18632e06655fa88d218e0d768fd753e1886465c12a6d4bce04f1aaeec917d # via pyinstaller python-dateutil==2.9.0 \ --hash=sha256:78e73e19c63f5b20ffa567001531680d939dc042bf7850431877645523c66709 \ @@ -164,9 +164,9 @@ six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 # via python-dateutil -urllib3==1.26.20 \ - --hash=sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e \ - --hash=sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32 +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 # via awscli (pyproject.toml) wcwidth==0.2.14 \ --hash=sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605 \ diff --git a/requirements/download-deps/portable-exe-win-lock.txt b/requirements/download-deps/portable-exe-win-lock.txt index ce4254839cbf..4ad0f9d035a6 100644 --- a/requirements/download-deps/portable-exe-win-lock.txt +++ b/requirements/download-deps/portable-exe-win-lock.txt @@ -98,9 +98,9 @@ pyinstaller==6.11.1 \ --hash=sha256:ddc0fddd75f07f7e423da1f0822e389a42af011f9589e0269b87e0d89aa48c1f \ --hash=sha256:e21c7806e34f40181e7606926a14579f848bfb1dc52cbca7eea66eccccbfe977 # via -r D:/a/aws-cli/aws-cli/requirements/portable-exe-extras.txt -pyinstaller-hooks-contrib==2025.10 \ - --hash=sha256:a1a737e5c0dccf1cf6f19a25e2efd109b9fec9ddd625f97f553dac16ee884881 \ - --hash=sha256:aa7a378518772846221f63a84d6306d9827299323243db890851474dfd1231a9 +pyinstaller-hooks-contrib==2025.11 \ + --hash=sha256:777e163e2942474aa41a8e6d31ac1635292d63422c3646c176d584d04d971c34 \ + --hash=sha256:dfe18632e06655fa88d218e0d768fd753e1886465c12a6d4bce04f1aaeec917d # via pyinstaller python-dateutil==2.9.0 \ --hash=sha256:78e73e19c63f5b20ffa567001531680d939dc042bf7850431877645523c66709 \ @@ -166,9 +166,9 @@ six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 # via python-dateutil -urllib3==1.26.20 \ - --hash=sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e \ - --hash=sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32 +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 # via awscli (D:/a/aws-cli/aws-cli/pyproject.toml) wcwidth==0.2.14 \ --hash=sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605 \ diff --git a/requirements/download-deps/system-sandbox-lock.txt b/requirements/download-deps/system-sandbox-lock.txt index 547e9266d2ff..60187b1d1ece 100644 --- a/requirements/download-deps/system-sandbox-lock.txt +++ b/requirements/download-deps/system-sandbox-lock.txt @@ -126,9 +126,9 @@ six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 # via python-dateutil -urllib3==1.26.20 \ - --hash=sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e \ - --hash=sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32 +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 # via awscli (pyproject.toml) wcwidth==0.2.14 \ --hash=sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605 \ diff --git a/requirements/download-deps/system-sandbox-win-lock.txt b/requirements/download-deps/system-sandbox-win-lock.txt index 2825e6a46773..0fa8278526cb 100644 --- a/requirements/download-deps/system-sandbox-win-lock.txt +++ b/requirements/download-deps/system-sandbox-win-lock.txt @@ -126,9 +126,9 @@ six==1.17.0 \ --hash=sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274 \ --hash=sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81 # via python-dateutil -urllib3==1.26.20 \ - --hash=sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e \ - --hash=sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32 +urllib3==2.6.3 \ + --hash=sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed \ + --hash=sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4 # via awscli (D:/a/aws-cli/aws-cli/pyproject.toml) wcwidth==0.2.14 \ --hash=sha256:4d478375d31bc5395a3c55c40ccdf3354688364cd61c4f6adacaa9215d0b3605 \ diff --git a/tests/functional/botocore/endpoint-rules/account/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/account/endpoint-tests-1.json index 02755aa5e185..7cf2e2f6dacf 100644 --- a/tests/functional/botocore/endpoint-rules/account/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/account/endpoint-tests-1.json @@ -202,85 +202,43 @@ } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1" - } - ] - }, - "url": "https://account-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-gov-west-1" + "signingRegion": "eusc-de-east-1" } ] }, - "url": "https://account-fips.us-gov-west-1.amazonaws.com" + "url": "https://account-fips.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1" - } - ] - }, - "url": "https://account.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-gov-west-1" + "signingRegion": "eusc-de-east-1" } ] }, - "url": "https://account.us-gov-west-1.amazonaws.com" + "url": "https://account.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": false, "UseDualStack": false } @@ -454,43 +412,85 @@ } }, { - "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "eusc-de-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://account-fips.eusc-de-east-1.amazonaws.eu" + "url": "https://account-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account-fips.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "eusc-de-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://account.eusc-de-east-1.amazonaws.eu" + "url": "https://account.us-gov-west-1.api.aws" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://account.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } diff --git a/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json index 37819cee8835..69d7ecd43bd8 100644 --- a/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/artifact/endpoint-tests-1.json @@ -202,85 +202,43 @@ } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1" - } - ] - }, - "url": "https://artifact-fips.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-gov-west-1" + "signingRegion": "eusc-de-east-1" } ] }, - "url": "https://artifact-fips.us-gov-west-1.amazonaws.com" + "url": "https://artifact-fips.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "properties": { - "authSchemes": [ - { - "name": "sigv4", - "signingRegion": "us-gov-west-1" - } - ] - }, - "url": "https://artifact.us-gov-west-1.api.aws" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "us-gov-west-1" + "signingRegion": "eusc-de-east-1" } ] }, - "url": "https://artifact.us-gov-west-1.amazonaws.com" + "url": "https://artifact.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": false, "UseDualStack": false } @@ -454,43 +412,85 @@ } }, { - "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "eusc-de-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://artifact-fips.eusc-de-east-1.amazonaws.eu" + "url": "https://artifact-fips.us-gov-west-1.api.aws" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact-fips.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { "properties": { "authSchemes": [ { "name": "sigv4", - "signingRegion": "eusc-de-east-1" + "signingRegion": "us-gov-west-1" } ] }, - "url": "https://artifact.eusc-de-east-1.amazonaws.eu" + "url": "https://artifact.us-gov-west-1.api.aws" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingRegion": "us-gov-west-1" + } + ] + }, + "url": "https://artifact.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } diff --git a/tests/functional/botocore/endpoint-rules/geo-places/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/geo-places/endpoint-tests-1.json index 7e1ed3b5217b..9b9b4a403dd4 100644 --- a/tests/functional/botocore/endpoint-rules/geo-places/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/geo-places/endpoint-tests-1.json @@ -138,53 +138,27 @@ } }, { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://places.geo-fips.us-gov-west-1.api.aws/v2" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://places.geo-fips.us-gov-west-1.amazonaws.com/v2" + "url": "https://geo-places-fips.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://places.geo.us-gov-west-1.api.aws/v2" - } - }, - "params": { - "Region": "us-gov-west-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://places.geo.us-gov-west-1.amazonaws.com/v2" + "url": "https://geo-places.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-gov-west-1", + "Region": "eusc-de-east-1", "UseFIPS": false, "UseDualStack": false } @@ -294,27 +268,53 @@ } }, { - "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://geo-places-fips.eusc-de-east-1.amazonaws.eu" + "url": "https://places.geo-fips.us-gov-west-1.api.aws/v2" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://places.geo-fips.us-gov-west-1.amazonaws.com/v2" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://geo-places.eusc-de-east-1.amazonaws.eu" + "url": "https://places.geo.us-gov-west-1.api.aws/v2" } }, "params": { - "Region": "eusc-de-east-1", + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://places.geo.us-gov-west-1.amazonaws.com/v2" + } + }, + "params": { + "Region": "us-gov-west-1", "UseFIPS": false, "UseDualStack": false } diff --git a/tests/functional/botocore/endpoint-rules/health/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/health/endpoint-tests-1.json index 5d268ee9aac6..ea40a667ac79 100644 --- a/tests/functional/botocore/endpoint-rules/health/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/health/endpoint-tests-1.json @@ -308,6 +308,523 @@ "expect": { "error": "Invalid Configuration: Missing Region" } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region aws-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "us-east-1" + } + ] + }, + "url": "https://global.health.amazonaws.com" + } + }, + "params": { + "Region": "aws-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.cn-north-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.cn-north-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-north-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region aws-cn-global with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "properties": { + "authSchemes": [ + { + "name": "sigv4", + "signingName": "health", + "signingRegion": "cn-northwest-1" + } + ] + }, + "url": "https://global.health.amazonaws.com.cn" + } + }, + "params": { + "Region": "aws-cn-global", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-gov-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-gov-east-1.api.aws" + } + }, + "params": { + "Region": "us-gov-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-iso-east-1.api.aws.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-iso-east-1.api.aws.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-isob-east-1.api.aws.scloud" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-isob-east-1.api.aws.scloud" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.us-isof-south-1.api.aws.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.us-isof-south-1.api.aws.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.eu-isoe-west-1.api.cloud-aws.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.eu-isoe-west-1.api.cloud-aws.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health.eusc-de-east-1.api.amazonwebservices.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health.eusc-de-east-1.api.amazonwebservices.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://health-fips.eusc-de-east-1.api.amazonwebservices.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://health-fips.eusc-de-east-1.api.amazonwebservices.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "UseFIPS": false, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + }, + { + "documentation": "For custom endpoint with fips enabled and dualstack disabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false, + "Endpoint": "https://example.com" + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true, + "Endpoint": "https://example.com" + } } ], "version": "1.0" diff --git a/tests/functional/botocore/endpoint-rules/kafkaconnect/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/kafkaconnect/endpoint-tests-1.json index 61032a9e6dfd..b8709ef1fe60 100644 --- a/tests/functional/botocore/endpoint-rules/kafkaconnect/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/kafkaconnect/endpoint-tests-1.json @@ -1,457 +1,322 @@ { "testCases": [ { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with region not set and fips disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-northeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-northeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-southeast-1.amazonaws.com" + "url": "https://example.com" } }, "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": false } }, { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect.ap-southeast-2.amazonaws.com" - } + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" }, "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false + "Endpoint": "https://example.com", + "UseFIPS": true } }, { - "documentation": "For region ca-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For custom endpoint with fips disabled and dualstack enabled", "expect": { - "endpoint": { - "url": "https://kafkaconnect.ca-central-1.amazonaws.com" - } + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" }, "params": { - "Region": "ca-central-1", + "Endpoint": "https://example.com", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-central-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.eu-central-1.amazonaws.com" + "url": "https://kafkaconnect-fips.us-east-1.api.aws" } }, "params": { - "Region": "eu-central-1", - "UseFIPS": false, - "UseDualStack": false + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region eu-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.eu-north-1.amazonaws.com" + "url": "https://kafkaconnect-fips.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-north-1", - "UseFIPS": false, + "Region": "us-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.eu-west-1.amazonaws.com" + "url": "https://kafkaconnect.us-east-1.api.aws" } }, "params": { - "Region": "eu-west-1", + "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region eu-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.eu-west-2.amazonaws.com" + "url": "https://kafkaconnect.us-east-1.amazonaws.com" } }, "params": { - "Region": "eu-west-2", + "Region": "us-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region eu-west-3 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.eu-west-3.amazonaws.com" + "url": "https://kafkaconnect-fips.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "eu-west-3", - "UseFIPS": false, - "UseDualStack": false + "Region": "cn-northwest-1", + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region sa-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.sa-east-1.amazonaws.com" + "url": "https://kafkaconnect-fips.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "sa-east-1", - "UseFIPS": false, + "Region": "cn-northwest-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-east-1.amazonaws.com" + "url": "https://kafkaconnect.cn-northwest-1.api.amazonwebservices.com.cn" } }, "params": { - "Region": "us-east-1", + "Region": "cn-northwest-1", "UseFIPS": false, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-east-2 with FIPS disabled and DualStack disabled", + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-east-2.amazonaws.com" + "url": "https://kafkaconnect.cn-northwest-1.amazonaws.com.cn" } }, "params": { - "Region": "us-east-2", + "Region": "cn-northwest-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-west-1.amazonaws.com" + "url": "https://kafkaconnect-fips.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-west-1", - "UseFIPS": false, + "Region": "eusc-de-east-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-west-2.amazonaws.com" + "url": "https://kafkaconnect.eusc-de-east-1.amazonaws.eu" } }, "params": { - "Region": "us-west-2", + "Region": "eusc-de-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-east-1.api.aws" - } - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-east-1.amazonaws.com" + "url": "https://kafkaconnect-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-east-1.api.aws" + "url": "https://kafkaconnect.us-iso-east-1.c2s.ic.gov" } }, "params": { - "Region": "us-east-1", + "Region": "us-iso-east-1", "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.cn-north-1.amazonaws.com.cn" + "url": "https://kafkaconnect-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-north-1", + "Region": "us-isob-east-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.cn-north-1.api.amazonwebservices.com.cn" - } - }, - "params": { - "Region": "cn-north-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.cn-north-1.amazonaws.com.cn" + "url": "https://kafkaconnect.us-isob-east-1.sc2s.sgov.gov" } }, "params": { - "Region": "cn-north-1", + "Region": "us-isob-east-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect-fips.us-gov-east-1.api.aws" - } - }, - "params": { - "Region": "us-gov-east-1", - "UseFIPS": true, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-gov-east-1.amazonaws.com" + "url": "https://kafkaconnect-fips.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-gov-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-gov-east-1.api.aws" + "url": "https://kafkaconnect.eu-isoe-west-1.cloud.adc-e.uk" } }, "params": { - "Region": "us-gov-east-1", - "UseFIPS": false, - "UseDualStack": true - } - }, - { - "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://kafkaconnect.us-gov-east-1.amazonaws.com" - } - }, - "params": { - "Region": "us-gov-east-1", + "Region": "eu-isoe-west-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://kafkaconnect-fips.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-isof-south-1", "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-iso-east-1.c2s.ic.gov" + "url": "https://kafkaconnect.us-isof-south-1.csp.hci.ic.gov" } }, "params": { - "Region": "us-iso-east-1", + "Region": "us-isof-south-1", "UseFIPS": false, "UseDualStack": false } }, { - "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://kafkaconnect-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://kafkaconnect.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-isob-east-1", + "Region": "us-gov-west-1", "UseFIPS": true, - "UseDualStack": false + "UseDualStack": true } }, { - "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://kafkaconnect.us-isob-east-1.sc2s.sgov.gov" + "url": "https://kafkaconnect.us-gov-west-1.amazonaws.com" } }, "params": { - "Region": "us-isob-east-1", - "UseFIPS": false, + "Region": "us-gov-west-1", + "UseFIPS": true, "UseDualStack": false } }, { - "documentation": "For custom endpoint with region set and fips disabled and dualstack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://kafkaconnect.us-gov-west-1.api.aws" } }, "params": { - "Region": "us-east-1", + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" + "UseDualStack": true } }, { - "documentation": "For custom endpoint with region not set and fips disabled and dualstack disabled", + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://example.com" + "url": "https://kafkaconnect.us-gov-west-1.amazonaws.com" } }, "params": { + "Region": "us-gov-west-1", "UseFIPS": false, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips enabled and dualstack disabled", - "expect": { - "error": "Invalid Configuration: FIPS and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false, - "Endpoint": "https://example.com" - } - }, - { - "documentation": "For custom endpoint with fips disabled and dualstack enabled", - "expect": { - "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" - }, - "params": { - "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": true, - "Endpoint": "https://example.com" + "UseDualStack": false } }, { diff --git a/tests/functional/botocore/endpoint-rules/nova-act/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/nova-act/endpoint-tests-1.json new file mode 100644 index 000000000000..5330867f72b8 --- /dev/null +++ b/tests/functional/botocore/endpoint-rules/nova-act/endpoint-tests-1.json @@ -0,0 +1,330 @@ +{ + "testCases": [ + { + "documentation": "For custom endpoint with region not set and fips disabled", + "expect": { + "endpoint": { + "url": "https://example.com" + } + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false + } + }, + { + "documentation": "For custom endpoint with fips enabled", + "expect": { + "error": "Invalid Configuration: FIPS and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": true + } + }, + { + "documentation": "For custom endpoint with fips disabled and dualstack enabled", + "expect": { + "error": "Invalid Configuration: Dualstack and custom endpoint are not supported" + }, + "params": { + "Endpoint": "https://example.com", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-east-1.api.aws" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-east-1.amazonaws.com" + } + }, + "params": { + "Region": "us-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act.cn-northwest-1.api.amazonwebservices.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region cn-northwest-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.cn-northwest-1.amazonaws.com.cn" + } + }, + "params": { + "Region": "cn-northwest-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.eusc-de-east-1.amazonaws.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eusc-de-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.eusc-de-east-1.amazonaws.eu" + } + }, + "params": { + "Region": "eusc-de-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-iso-east-1.c2s.ic.gov" + } + }, + "params": { + "Region": "us-iso-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-isob-east-1.sc2s.sgov.gov" + } + }, + "params": { + "Region": "us-isob-east-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region eu-isoe-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.eu-isoe-west-1.cloud.adc-e.uk" + } + }, + "params": { + "Region": "eu-isoe-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-isof-south-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-isof-south-1.csp.hci.ic.gov" + } + }, + "params": { + "Region": "us-isof-south-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS enabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act-fips.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": true, + "UseDualStack": false + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack enabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-gov-west-1.api.aws" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": true + } + }, + { + "documentation": "For region us-gov-west-1 with FIPS disabled and DualStack disabled", + "expect": { + "endpoint": { + "url": "https://nova-act.us-gov-west-1.amazonaws.com" + } + }, + "params": { + "Region": "us-gov-west-1", + "UseFIPS": false, + "UseDualStack": false + } + }, + { + "documentation": "Missing region", + "expect": { + "error": "Invalid Configuration: Missing Region" + } + } + ], + "version": "1.0" +} \ No newline at end of file diff --git a/tests/functional/botocore/endpoint-rules/s3/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/s3/endpoint-tests-1.json index ebf86522fe2c..2f961d84bd02 100644 --- a/tests/functional/botocore/endpoint-rules/s3/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/s3/endpoint-tests-1.json @@ -7701,6 +7701,20 @@ "Accelerate": false } }, + { + "documentation": "S3 Outposts invalid bucket name", + "expect": { + "error": "Invalid Outposts Bucket alias - it must be a valid bucket name." + }, + "params": { + "Region": "us-east-1", + "Bucket": "test-accessp-o0b1de75431d83bebd/8xz5w8ijx1qzlbp3i3kbeta0--op-s3", + "Endpoint": "https://example.amazonaws.com", + "UseFIPS": false, + "UseDualStack": false, + "Accelerate": false + } + }, { "documentation": "S3 Outposts bucketAlias Invalid hardware type", "expect": { diff --git a/tests/functional/botocore/endpoint-rules/elastictranscoder/endpoint-tests-1.json b/tests/functional/botocore/endpoint-rules/wickr/endpoint-tests-1.json similarity index 63% rename from tests/functional/botocore/endpoint-rules/elastictranscoder/endpoint-tests-1.json rename to tests/functional/botocore/endpoint-rules/wickr/endpoint-tests-1.json index f59a62c174fd..29d6df91fcd7 100644 --- a/tests/functional/botocore/endpoint-rules/elastictranscoder/endpoint-tests-1.json +++ b/tests/functional/botocore/endpoint-rules/wickr/endpoint-tests-1.json @@ -1,153 +1,62 @@ { "testCases": [ { - "documentation": "For region ap-northeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.ap-northeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-northeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-south-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.ap-south-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-south-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.ap-southeast-1.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region ap-southeast-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.ap-southeast-2.amazonaws.com" - } - }, - "params": { - "Region": "ap-southeast-2", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region eu-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.eu-west-1.amazonaws.com" - } - }, - "params": { - "Region": "eu-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-east-1.amazonaws.com" + "url": "https://admin.wickr-fips.us-east-1.api.aws" } }, "params": { "Region": "us-east-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-1 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.us-west-1.amazonaws.com" - } - }, - "params": { - "Region": "us-west-1", - "UseFIPS": false, - "UseDualStack": false - } - }, - { - "documentation": "For region us-west-2 with FIPS disabled and DualStack disabled", - "expect": { - "endpoint": { - "url": "https://elastictranscoder.us-west-2.amazonaws.com" - } - }, - "params": { - "Region": "us-west-2", - "UseFIPS": false, - "UseDualStack": false + "UseFIPS": true, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-east-1.api.aws" + "url": "https://admin.wickr-fips.us-east-1.amazonaws.com" } }, "params": { "Region": "us-east-1", "UseFIPS": true, - "UseDualStack": true + "UseDualStack": false } }, { - "documentation": "For region us-east-1 with FIPS enabled and DualStack disabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-east-1.amazonaws.com" + "url": "https://admin.wickr.us-east-1.api.aws" } }, "params": { "Region": "us-east-1", - "UseFIPS": true, - "UseDualStack": false + "UseFIPS": false, + "UseDualStack": true } }, { - "documentation": "For region us-east-1 with FIPS disabled and DualStack enabled", + "documentation": "For region us-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-east-1.api.aws" + "url": "https://admin.wickr.us-east-1.amazonaws.com" } }, "params": { "Region": "us-east-1", "UseFIPS": false, - "UseDualStack": true + "UseDualStack": false } }, { "documentation": "For region cn-north-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://admin.wickr-fips.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -160,7 +69,7 @@ "documentation": "For region cn-north-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.cn-north-1.amazonaws.com.cn" + "url": "https://admin.wickr-fips.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -173,7 +82,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.cn-north-1.api.amazonwebservices.com.cn" + "url": "https://admin.wickr.cn-north-1.api.amazonwebservices.com.cn" } }, "params": { @@ -186,7 +95,7 @@ "documentation": "For region cn-north-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.cn-north-1.amazonaws.com.cn" + "url": "https://admin.wickr.cn-north-1.amazonaws.com.cn" } }, "params": { @@ -199,7 +108,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-gov-east-1.api.aws" + "url": "https://admin.wickr-fips.us-gov-east-1.api.aws" } }, "params": { @@ -212,7 +121,7 @@ "documentation": "For region us-gov-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-gov-east-1.amazonaws.com" + "url": "https://admin.wickr-fips.us-gov-east-1.amazonaws.com" } }, "params": { @@ -225,7 +134,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack enabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-gov-east-1.api.aws" + "url": "https://admin.wickr.us-gov-east-1.api.aws" } }, "params": { @@ -238,7 +147,7 @@ "documentation": "For region us-gov-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-gov-east-1.amazonaws.com" + "url": "https://admin.wickr.us-gov-east-1.amazonaws.com" } }, "params": { @@ -251,7 +160,7 @@ "documentation": "For region us-iso-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-iso-east-1.c2s.ic.gov" + "url": "https://admin.wickr-fips.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -264,7 +173,7 @@ "documentation": "For region us-iso-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-iso-east-1.c2s.ic.gov" + "url": "https://admin.wickr.us-iso-east-1.c2s.ic.gov" } }, "params": { @@ -277,7 +186,7 @@ "documentation": "For region us-isob-east-1 with FIPS enabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder-fips.us-isob-east-1.sc2s.sgov.gov" + "url": "https://admin.wickr-fips.us-isob-east-1.sc2s.sgov.gov" } }, "params": { @@ -290,7 +199,7 @@ "documentation": "For region us-isob-east-1 with FIPS disabled and DualStack disabled", "expect": { "endpoint": { - "url": "https://elastictranscoder.us-isob-east-1.sc2s.sgov.gov" + "url": "https://admin.wickr.us-isob-east-1.sc2s.sgov.gov" } }, "params": { diff --git a/tests/functional/botocore/models/endpoints.json b/tests/functional/botocore/models/endpoints.json index 2824180d62bd..fe37d5f6ac19 100644 --- a/tests/functional/botocore/models/endpoints.json +++ b/tests/functional/botocore/models/endpoints.json @@ -889,18 +889,6 @@ "us-west-2" : { } } }, - "elastictranscoder" : { - "endpoints" : { - "ap-northeast-1" : { }, - "ap-south-1" : { }, - "ap-southeast-1" : { }, - "ap-southeast-2" : { }, - "eu-west-1" : { }, - "us-east-1" : { }, - "us-west-1" : { }, - "us-west-2" : { } - } - }, "email" : { "endpoints" : { "eu-west-1" : { }, diff --git a/tests/functional/botocore/test_client_class_names.py b/tests/functional/botocore/test_client_class_names.py index d569124dc628..6c751bd44cfc 100644 --- a/tests/functional/botocore/test_client_class_names.py +++ b/tests/functional/botocore/test_client_class_names.py @@ -37,7 +37,6 @@ 'efs': 'EFS', 'elasticache': 'ElastiCache', 'elasticbeanstalk': 'ElasticBeanstalk', - 'elastictranscoder': 'ElasticTranscoder', 'elb': 'ElasticLoadBalancing', 'emr': 'EMR', 'glacier': 'Glacier', diff --git a/tests/functional/botocore/test_h2_required.py b/tests/functional/botocore/test_h2_required.py index 97aa73b58bd5..c0673d8116a7 100644 --- a/tests/functional/botocore/test_h2_required.py +++ b/tests/functional/botocore/test_h2_required.py @@ -19,8 +19,8 @@ 'qbusiness': ['Chat'], 'kinesis': ['SubscribeToShard'], 'lexv2-runtime': ['StartConversation'], - # Added only to keep a record of this feature being incompatible - 'bedrock-runtime': ['InvokeModelWithBidirectionalStream'], + # Added only to keep a record of this feature being incompatible + 'bedrock-runtime': ['InvokeModelWithBidirectionalStream'], } @@ -53,16 +53,21 @@ def _all_test_cases(): @pytest.mark.validates_models @pytest.mark.parametrize("h2_service", H2_SERVICES) -def test_all_uses_of_h2_are_known(h2_service): +def test_all_uses_of_h2_are_known(h2_service, record_property): # Validates that a service that requires HTTP 2 for all operations is known message = f'Found unknown HTTP 2 service: {h2_service}' + # Store the service name in a PyTest custom property + record_property('aws_service', h2_service) assert _KNOWN_SERVICES.get(h2_service) is _H2_REQUIRED, message @pytest.mark.validates_models @pytest.mark.parametrize("h2_service, operation", H2_OPERATIONS) -def test_all_h2_operations_are_known(h2_service, operation): +def test_all_h2_operations_are_known(h2_service, operation, record_property): # Validates that an operation that requires HTTP 2 is known known_operations = _KNOWN_SERVICES.get(h2_service, []) message = f'Found unknown HTTP 2 operation: {h2_service}.{operation}' + # Store the service name and operation in PyTest custom properties + record_property('aws_service', h2_service) + record_property('aws_operation', operation) assert operation in known_operations, message diff --git a/tests/functional/botocore/test_paginator_config.py b/tests/functional/botocore/test_paginator_config.py index 313486e7eb65..8bc38803e306 100644 --- a/tests/functional/botocore/test_paginator_config.py +++ b/tests/functional/botocore/test_paginator_config.py @@ -151,14 +151,23 @@ def _pagination_configs(): @pytest.mark.parametrize( "operation_name, page_config, service_model", _pagination_configs() ) -def test_lint_pagination_configs(operation_name, page_config, service_model): +def test_lint_pagination_configs( + operation_name, page_config, service_model, record_property +): + # Store common details of the operation + record_property('aws_service', service_model.service_name) + record_property('aws_operation', operation_name) _validate_known_pagination_keys(page_config) _validate_result_key_exists(page_config) _validate_referenced_operation_exists(operation_name, service_model) - _validate_operation_has_output(operation_name, service_model) + _validate_operation_has_output( + operation_name, service_model, record_property + ) _validate_input_keys_match(operation_name, page_config, service_model) _validate_output_keys_match(operation_name, page_config, service_model) - _validate_new_numeric_keys(operation_name, page_config, service_model) + _validate_new_numeric_keys( + operation_name, page_config, service_model, record_property + ) def _validate_known_pagination_keys(page_config): @@ -185,10 +194,14 @@ def _validate_referenced_operation_exists(operation_name, service_model): ) -def _validate_operation_has_output(operation_name, service_model): +def _validate_operation_has_output( + operation_name, service_model, record_property +): op_model = service_model.operation_model(operation_name) output = op_model.output_shape if output is None or not output.members: + if output: + record_property('shape', output.type_name) raise AssertionError( "Pagination config refers to operation " f"that does not have any output: {operation_name}" @@ -212,13 +225,9 @@ def _validate_input_keys_match(operation_name, page_config, service_model): limit_key = page_config['limit_key'] if limit_key not in valid_input_names: raise AssertionError( - "limit_key '{}' refers to a non existent " - "input member for operation: {}, valid keys: " - "{}".format( - limit_key, - operation_name, - ', '.join(list(valid_input_names)), - ) + f"limit_key '{limit_key}' refers to a non existent " + f"input member for operation: {operation_name}, valid keys: " + f"{', '.join(list(valid_input_names))}." ) @@ -238,7 +247,8 @@ def _validate_output_keys_match(operation_name, page_config, service_model): else: if output_key not in output_members: raise AssertionError( - f"Pagination key '{key_name}' refers to an output " + f"Pagination key '{key_name}' for operation " + f"{operation_name} refers to an output " f"member that does not exist: {output_key}" ) output_members.remove(output_key) @@ -255,16 +265,15 @@ def _validate_output_keys_match(operation_name, page_config, service_model): f.write(f"'{key}',\n") raise AssertionError( "There are member names in the output shape of " - "{} that are not accounted for in the pagination " - "config for service {}: {}".format( - operation_name, - service_model.service_name, - ', '.join(output_members), - ) + f"{operation_name} that are not accounted for in the pagination " + f"config for service {service_model.service_name}: " + f"{', '.join(output_members)}" ) -def _validate_new_numeric_keys(operation_name, page_config, service_model): +def _validate_new_numeric_keys( + operation_name, page_config, service_model, record_property +): output_shape = service_model.operation_model(operation_name).output_shape for key in _get_list_value(page_config, 'result_key'): current_shape = output_shape @@ -279,6 +288,7 @@ def _validate_new_numeric_keys(operation_name, page_config, service_model): and (service_model.service_name, operation_name) not in KNOWN_PAGINATORS_WITH_INTEGER_OUTPUTS ): + record_property('shape', current_shape.name) raise AssertionError( f'There is a new operation {operation_name} for service ' f'{service_model.service_name} that is configured to sum ' diff --git a/tests/functional/botocore/test_regions.py b/tests/functional/botocore/test_regions.py index fa211c4738e2..78662f5cf29c 100644 --- a/tests/functional/botocore/test_regions.py +++ b/tests/functional/botocore/test_regions.py @@ -44,7 +44,6 @@ 'elasticbeanstalk': 'elasticbeanstalk.ap-northeast-1.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.ap-northeast-1.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.ap-northeast-1.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.ap-northeast-1.amazonaws.com', 'glacier': 'glacier.ap-northeast-1.amazonaws.com', 'iot': 'iot.ap-northeast-1.amazonaws.com', 'kinesis': 'kinesis.ap-northeast-1.amazonaws.com', @@ -79,7 +78,6 @@ 'elasticbeanstalk': 'elasticbeanstalk.ap-southeast-1.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.ap-southeast-1.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.ap-southeast-1.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.ap-southeast-1.amazonaws.com', 'kinesis': 'kinesis.ap-southeast-1.amazonaws.com', 'kms': 'kms.ap-southeast-1.amazonaws.com', 'logs': 'logs.ap-southeast-1.amazonaws.com', @@ -207,7 +205,6 @@ 'elasticbeanstalk': 'elasticbeanstalk.eu-west-1.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.eu-west-1.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.eu-west-1.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.eu-west-1.amazonaws.com', 'email': 'email.eu-west-1.amazonaws.com', 'glacier': 'glacier.eu-west-1.amazonaws.com', 'iot': 'iot.eu-west-1.amazonaws.com', @@ -283,7 +280,6 @@ 'elasticbeanstalk': 'elasticbeanstalk.us-east-1.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.us-east-1.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.us-east-1.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.us-east-1.amazonaws.com', 'email': 'email.us-east-1.amazonaws.com', 'glacier': 'glacier.us-east-1.amazonaws.com', 'iam': 'iam.amazonaws.com', @@ -349,7 +345,6 @@ 'elasticbeanstalk': 'elasticbeanstalk.us-west-1.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.us-west-1.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.us-west-1.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.us-west-1.amazonaws.com', 'glacier': 'glacier.us-west-1.amazonaws.com', 'kinesis': 'kinesis.us-west-1.amazonaws.com', 'kms': 'kms.us-west-1.amazonaws.com', @@ -387,7 +382,6 @@ 'elasticfilesystem': 'elasticfilesystem.us-west-2.amazonaws.com', 'elasticloadbalancing': 'elasticloadbalancing.us-west-2.amazonaws.com', 'elasticmapreduce': 'elasticmapreduce.us-west-2.amazonaws.com', - 'elastictranscoder': 'elastictranscoder.us-west-2.amazonaws.com', 'email': 'email.us-west-2.amazonaws.com', 'glacier': 'glacier.us-west-2.amazonaws.com', 'iot': 'iot.us-west-2.amazonaws.com', @@ -454,18 +448,6 @@ def test_single_service_region_endpoint( assert result['endpoint_url'] == expected_endpoint -# Ensure that all S3 regions use s3v4 instead of v4 -def test_all_s3_endpoints_have_s3v4(patched_session): - session = patched_session - partitions = session.get_available_partitions() - resolver = session._get_internal_component('endpoint_resolver') - for partition_name in partitions: - for endpoint in session.get_available_regions('s3', partition_name): - resolved = resolver.construct_endpoint('s3', endpoint) - assert 's3v4' in resolved['signatureVersions'] - assert 'v4' not in resolved['signatureVersions'] - - @pytest.mark.parametrize( "service_name, expected_endpoint", KNOWN_AWS_PARTITION_WIDE.items() ) diff --git a/tests/functional/botocore/test_supported_protocols.py b/tests/functional/botocore/test_supported_protocols.py index f14bb9a2ec97..901202e47a88 100644 --- a/tests/functional/botocore/test_supported_protocols.py +++ b/tests/functional/botocore/test_supported_protocols.py @@ -51,9 +51,11 @@ def _single_protocol_test_cases(): _multi_protocol_test_cases(), ) def test_services_with_protocols_trait_have_supported_protocol( - service_name, supported_protocols + service_name, supported_protocols, record_property ): message = f"No protocols supported for service {service_name}" + # Store the service name in PyTest custom properties + record_property('aws_service', service_name) assert any( protocol in PRIORITY_ORDERED_SUPPORTED_PROTOCOLS for protocol in supported_protocols @@ -66,7 +68,9 @@ def test_services_with_protocols_trait_have_supported_protocol( _single_protocol_test_cases(), ) def test_services_without_protocols_trait_have_supported_protocol( - service_name, supported_protocol + service_name, supported_protocol, record_property ): message = f"Service protocol not supported for {service_name}" + # Store the service name in PyTest custom properties + record_property('aws_service', service_name) assert supported_protocol in PRIORITY_ORDERED_SUPPORTED_PROTOCOLS, message diff --git a/tests/functional/ecs/test_monitorexpressgatewayservice.py b/tests/functional/ecs/test_monitorexpressgatewayservice.py new file mode 100644 index 000000000000..0d9eab356aa0 --- /dev/null +++ b/tests/functional/ecs/test_monitorexpressgatewayservice.py @@ -0,0 +1,298 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ + +from unittest.mock import ANY, Mock, patch + +import pytest + +from awscli.customizations.ecs import inject_commands +from awscli.customizations.ecs.monitorexpressgatewayservice import ( + ECSMonitorExpressGatewayService, +) + + +@pytest.fixture +def mock_watcher_class(): + """Fixture that provides a mock watcher class.""" + return Mock() + + +@pytest.fixture +def mock_session(): + """Fixture that provides a mock session.""" + return Mock() + + +@pytest.fixture +def command(mock_session, mock_watcher_class): + """Fixture that provides an ECSMonitorExpressGatewayService command.""" + return ECSMonitorExpressGatewayService( + mock_session, watcher_class=mock_watcher_class + ) + + +@pytest.fixture +def command_with_mock_session(mock_session, mock_watcher_class): + """Fixture that provides command with mock session and client configured.""" + client = Mock() + mock_session.create_client.return_value = client + command = ECSMonitorExpressGatewayService( + mock_session, watcher_class=mock_watcher_class + ) + return command + + +class TestECSMonitorExpressGatewayService: + def test_init(self, command): + assert command.name == 'monitor-express-gateway-service' + assert command.DESCRIPTION.startswith('Monitors the progress') + + def test_add_arguments(self, command): + command._build_arg_table() + arg_table = command.arg_table + + assert 'service-arn' in arg_table + assert 'resource-view' in arg_table + assert 'timeout' in arg_table + assert 'mode' in arg_table + + # Verify resource-view argument has correct choices + resource_view_arg = arg_table['resource-view'] + assert resource_view_arg.choices == ['RESOURCE', 'DEPLOYMENT'] + + # Verify mode argument has correct choices + mode_arg = arg_table['mode'] + assert mode_arg.choices == ['INTERACTIVE', 'TEXT-ONLY'] + + @patch('sys.stdout.isatty', return_value=False) + def test_run_main_with_text_only_mode( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='RESOURCE', + timeout=30, + mode='TEXT-ONLY', + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='off', + ) + + command._run_main(parsed_args, parsed_globals) + + # Verify watcher was created with correct parameters (positional) + mock_watcher_class.assert_called_once_with( + ANY, + parsed_args.service_arn, + 'RESOURCE', + 'TEXT-ONLY', + timeout_minutes=30, + use_color=False, + ) + + # Verify watcher was executed + mock_watcher.exec.assert_called_once() + + @patch('sys.stdout.isatty', return_value=True) + def test_run_main_with_interactive_mode( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='DEPLOYMENT', + timeout=60, + mode='INTERACTIVE', + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='auto', + ) + + command._run_main(parsed_args, parsed_globals) + + # Verify watcher was created with correct mode + mock_watcher_class.assert_called_once_with( + ANY, + parsed_args.service_arn, + 'DEPLOYMENT', + 'INTERACTIVE', + timeout_minutes=60, + use_color=True, + ) + + @patch('sys.stdout.isatty', return_value=True) + def test_run_main_auto_mode_with_tty( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='RESOURCE', + timeout=30, + mode=None, # Auto mode + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='auto', + ) + + command._run_main(parsed_args, parsed_globals) + + # When mode is None and TTY is available, should use INTERACTIVE + args = mock_watcher_class.call_args[0] + assert args[3] == 'INTERACTIVE' + + @patch('sys.stdout.isatty', return_value=False) + def test_run_main_auto_mode_without_tty( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='RESOURCE', + timeout=30, + mode=None, # Auto mode + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='auto', + ) + + command._run_main(parsed_args, parsed_globals) + + # When mode is None and TTY is not available, should use TEXT-ONLY + args = mock_watcher_class.call_args[0] + assert args[3] == 'TEXT-ONLY' + + @patch('sys.stdout.isatty', return_value=False) + def test_run_main_with_color_on( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='RESOURCE', + timeout=30, + mode='TEXT-ONLY', + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='on', + ) + + command._run_main(parsed_args, parsed_globals) + + # Verify color setting is True when color='on' + call_kwargs = mock_watcher_class.call_args[1] + assert call_kwargs['use_color'] is True + + @patch('sys.stdout.isatty', return_value=False) + def test_run_main_creates_ecs_client( + self, + mock_isatty, + mock_session, + command_with_mock_session, + mock_watcher_class, + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view='RESOURCE', + timeout=30, + mode='TEXT-ONLY', + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='off', + ) + + command._run_main(parsed_args, parsed_globals) + + # Verify ECS client was created with correct parameters + mock_session.create_client.assert_called_once_with( + 'ecs', + region_name='us-west-2', + endpoint_url=None, + verify=True, + ) + + # Verify client was passed to watcher + args = mock_watcher_class.call_args[0] + assert args[0] is not None # Client was created and passed + + @patch('sys.stdout.isatty', return_value=False) + def test_run_main_with_default_resource_view( + self, mock_isatty, command_with_mock_session, mock_watcher_class + ): + command = command_with_mock_session + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + parsed_args = Mock( + service_arn='arn:aws:ecs:us-west-2:123456789012:service/cluster/service', + resource_view=None, # Not specified, should use default + timeout=30, + mode='TEXT-ONLY', + ) + parsed_globals = Mock( + region='us-west-2', + endpoint_url=None, + verify_ssl=True, + color='off', + ) + + command._run_main(parsed_args, parsed_globals) + + # Verify default resource view is passed + args = mock_watcher_class.call_args[0] + assert args[2] is None # Resource view is passed as-is + + +class TestCommandRegistration: + def test_inject_commands_registers_monitor_command(self, mock_session): + command_table = {} + + inject_commands(command_table, mock_session) + + # Verify monitor command is registered + assert 'monitor-express-gateway-service' in command_table + command = command_table['monitor-express-gateway-service'] + assert isinstance(command, ECSMonitorExpressGatewayService) diff --git a/tests/functional/ecs/test_monitormutatinggatewayservice.py b/tests/functional/ecs/test_monitormutatinggatewayservice.py index b4c34281e872..b3e693ab0a9b 100644 --- a/tests/functional/ecs/test_monitormutatinggatewayservice.py +++ b/tests/functional/ecs/test_monitormutatinggatewayservice.py @@ -11,8 +11,11 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from unittest.mock import Mock +from unittest.mock import Mock, patch +import pytest + +from awscli.customizations.ecs import inject_commands from awscli.customizations.ecs.monitormutatinggatewayservice import ( MUTATION_HANDLERS, MonitoringResourcesArgument, @@ -21,6 +24,30 @@ ) +@pytest.fixture +def mock_watcher_class(): + """Fixture that provides a mock watcher class.""" + watcher_class = Mock() + watcher_class.is_monitoring_available.return_value = True + return watcher_class + + +@pytest.fixture +def mock_session(): + """Fixture that provides a mock session.""" + return Mock() + + +@pytest.fixture +def handler(mock_watcher_class): + """Fixture that provides a MonitorMutatingGatewayService handler.""" + return MonitorMutatingGatewayService( + 'create-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + class TestMonitoringResourcesArgument: def test_add_to_parser(self): parser = Mock() @@ -94,16 +121,6 @@ def test_operation_args_parsed_with_monitor_resources_false(self): assert not self.handler.effective_resource_view - def test_operation_args_parsed_no_monitor_resources_attr(self): - parsed_args = Mock() - # Remove the attribute - del parsed_args.monitor_resources - parsed_globals = Mock() - - self.handler.operation_args_parsed(parsed_args, parsed_globals) - - assert not self.handler.effective_resource_view - def test_after_call_with_monitoring_enabled(self): # Setup mock_watcher_class = Mock() @@ -264,3 +281,353 @@ def test_register_monitor_mutating_gateway_service(self): assert ( 'after-call.ecs.CreateExpressGatewayService' in registered_events ) + + +class TestMonitorModeParameter: + """Tests for --monitor-mode parameter functionality.""" + + def test_monitor_mode_argument_added_to_table(self, handler): + """Test that --monitor-mode is added to argument table.""" + argument_table = {} + session = Mock() + + handler.building_argument_table(argument_table, session) + + assert 'monitor-mode' in argument_table + + @patch('sys.stdout.isatty', return_value=True) + def test_operation_args_parsed_with_monitor_mode_and_resources( + self, mock_isatty, handler, mock_session + ): + """Test operation_args_parsed with both --monitor-mode and --monitor-resources.""" + handler.session = mock_session + parsed_args = Mock() + parsed_args.monitor_resources = 'RESOURCE' + parsed_args.monitor_mode = 'TEXT-ONLY' + parsed_globals = Mock() + + # Should not raise + handler.operation_args_parsed(parsed_args, parsed_globals) + + assert handler.effective_resource_view == 'RESOURCE' + assert handler.effective_mode == 'TEXT-ONLY' + + @patch('sys.stdout.isatty', return_value=True) + def test_operation_args_parsed_with_monitor_mode_without_resources_raises( + self, mock_isatty, handler, mock_session + ): + """Test operation_args_parsed with --monitor-mode but no --monitor-resources raises ValueError.""" + handler.session = mock_session + parsed_args = Mock() + parsed_args.monitor_resources = None + parsed_args.monitor_mode = 'TEXT-ONLY' + parsed_globals = Mock() + + with pytest.raises(ValueError) as exc_info: + handler.operation_args_parsed(parsed_args, parsed_globals) + + assert ( + '--monitor-mode can only be used with --monitor-resources' + in str(exc_info.value) + ) + + @patch('sys.stdout.isatty', return_value=True) + def test_operation_args_parsed_defaults_mode_to_interactive( + self, mock_isatty, handler, mock_session + ): + """Test operation_args_parsed defaults mode to INTERACTIVE when not specified.""" + handler.session = mock_session + parsed_args = Mock() + parsed_args.monitor_resources = 'DEPLOYMENT' + parsed_args.monitor_mode = None + parsed_globals = Mock() + + handler.operation_args_parsed(parsed_args, parsed_globals) + + assert handler.effective_mode == 'INTERACTIVE' + + @patch('sys.stdout.isatty', return_value=True) + def test_operation_args_parsed_without_monitor_resources( + self, mock_isatty, handler, mock_session + ): + """Test operation_args_parsed disables monitoring when --monitor-resources not provided.""" + handler.session = mock_session + parsed_args = Mock() + parsed_args.monitor_resources = None + parsed_args.monitor_mode = None + parsed_globals = Mock() + + handler.operation_args_parsed(parsed_args, parsed_globals) + + assert handler.effective_resource_view is None + + @patch('sys.stdout.isatty', return_value=True) + def test_after_call_with_interactive_mode( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test monitoring starts with interactive mode when specified.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'auto' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'INTERACTIVE' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + # Verify watcher was created with correct parameters + call_args = mock_watcher_class.call_args + assert call_args is not None + + # Check positional arguments + assert ( + call_args[0][1] + == 'arn:aws:ecs:us-west-2:123456789:service/test-service' + ) # service_arn + assert call_args[0][2] == 'DEPLOYMENT' # resource_view + assert call_args[0][3] == 'INTERACTIVE' + + @patch('sys.stdout.isatty', return_value=False) + def test_after_call_with_text_only_mode( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test monitoring starts with text-only mode when specified.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'off' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'RESOURCE' + handler.effective_mode = 'TEXT-ONLY' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + # Verify watcher was created with correct parameters + call_args = mock_watcher_class.call_args + assert call_args is not None + + # Check positional arguments + assert ( + call_args[0][1] + == 'arn:aws:ecs:us-west-2:123456789:service/test-service' + ) # service_arn + assert call_args[0][2] == 'RESOURCE' # resource_view + assert call_args[0][3] == 'TEXT-ONLY' + + @patch('sys.stdout.isatty', return_value=True) + def test_after_call_with_color_on( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test use_color=True when color='on'.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'on' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'INTERACTIVE' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + # Check keyword arguments + call_args = mock_watcher_class.call_args + assert call_args[1]['use_color'] is True + + @patch('sys.stdout.isatty', return_value=False) + def test_after_call_with_color_off( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test use_color=False when color='off'.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'off' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'TEXT-ONLY' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + call_args = mock_watcher_class.call_args + assert call_args[1]['use_color'] is False + + @patch('sys.stdout.isatty', return_value=True) + def test_after_call_with_color_auto_with_tty( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test use_color=True when color='auto' with TTY.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'auto' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'INTERACTIVE' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + call_args = mock_watcher_class.call_args + assert call_args[1]['use_color'] is True + + @patch('sys.stdout.isatty', return_value=False) + def test_after_call_with_color_auto_without_tty( + self, mock_isatty, mock_session, mock_watcher_class + ): + """Test use_color=False when color='auto' without TTY.""" + handler = MonitorMutatingGatewayService( + 'create-express-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher + + mock_parsed_globals = Mock() + mock_parsed_globals.region = 'us-west-2' + mock_parsed_globals.endpoint_url = None + mock_parsed_globals.verify_ssl = True + mock_parsed_globals.color = 'auto' + + mock_ecs_client = Mock() + mock_session.create_client.return_value = mock_ecs_client + + handler.session = mock_session + handler.parsed_globals = mock_parsed_globals + handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'TEXT-ONLY' + + parsed = { + 'service': { + 'serviceArn': 'arn:aws:ecs:us-west-2:123456789:service/test-service' + } + } + context = {} + http_response = Mock() + http_response.status_code = 200 + + handler.after_call(parsed, context, http_response) + + call_args = mock_watcher_class.call_args + assert call_args[1]['use_color'] is False diff --git a/tests/functional/login/test_login.py b/tests/functional/login/test_login.py index 29c870b72e51..a89fa0a31670 100644 --- a/tests/functional/login/test_login.py +++ b/tests/functional/login/test_login.py @@ -6,6 +6,7 @@ import pytest +from awscli.customizations.exceptions import ConfigurationError from awscli.customizations.login.login import LoginCommand DEFAULT_ARGS = Namespace(remote=False) @@ -230,3 +231,68 @@ def test_new_profile_without_region( }, 'configfile', ) + + +@pytest.mark.parametrize( + 'profile_config,expected_to_abort', + [ + pytest.param({}, False, id="Empty profile"), + pytest.param( + {'login_session': 'arn:aws:iam::0123456789012:user/Admin'}, + False, + id="Existing login profile", + ), + pytest.param( + {'web_identity_token_file': '/path'}, + True, + id="Web Identity Token profile", + ), + pytest.param({'sso_role_name': 'role'}, True, id="SSO profile"), + pytest.param( + {'aws_access_key_id': 'AKIAIOSFODNN7EXAMPLE'}, + True, + id="IAM access key profile", + ), + pytest.param( + {'role_arn': 'arn:aws:iam::123456789012:role/MyRole'}, + True, + id="Assume role profile", + ), + pytest.param( + {'credential_process': '/path/to/credential/process'}, + True, + id="Credential process profile", + ), + ], +) +@mock.patch('awscli.customizations.login.utils.get_base_sign_in_uri') +@mock.patch( + 'awscli.customizations.login.utils.SameDeviceLoginTokenFetcher.fetch_token' +) +def test_abort_if_profile_has_existing_credentials( + mock_token_fetcher, + mock_base_sign_in_uri, + mock_login_command, + mock_session, + mock_token_loader, + profile_config, + expected_to_abort, +): + mock_base_sign_in_uri.return_value = 'https://foo' + mock_token_fetcher.return_value = ( + { + 'accessToken': 'access_token', + 'idToken': SAMPLE_ID_TOKEN, + 'expiresIn': 3600, + }, + 'arn:aws:iam::0123456789012:user/Admin', + ) + mock_session.full_config = {'profiles': {'profile-name': profile_config}} + + if expected_to_abort: + with pytest.raises(ConfigurationError): + mock_login_command._run_main(DEFAULT_ARGS, DEFAULT_GLOBAL_ARGS) + mock_token_fetcher.assert_not_called() + else: + mock_login_command._run_main(DEFAULT_ARGS, DEFAULT_GLOBAL_ARGS) + mock_token_fetcher.assert_called_once() diff --git a/tests/functional/test_no_event_streams.py b/tests/functional/test_no_event_streams.py index fd3fbcb457cc..1672d017ddfc 100644 --- a/tests/functional/test_no_event_streams.py +++ b/tests/functional/test_no_event_streams.py @@ -20,7 +20,7 @@ @pytest.mark.validates_models -def test_no_event_stream_unless_allowed(): +def test_no_event_stream_unless_allowed(record_property): driver = create_clidriver() help_command = driver.create_help_command() errors = [] @@ -31,18 +31,24 @@ def test_no_event_stream_unless_allowed(): op_help = sub_command.create_help_command() model = op_help.obj if isinstance(model, OperationModel): - full_command = '%s %s' % (command_name, sub_name) + full_command = f'{command_name} {sub_name}' if ( model.has_event_stream_input or model.has_event_stream_output ): if full_command in _ALLOWED_COMMANDS: continue + # Store the service and operation in + # PyTest custom properties + record_property( + 'aws_service', model.service_model.service_name + ) + record_property('aws_operation', model.name) supported_commands = '\n'.join(_ALLOWED_COMMANDS) errors.append( - 'The "%s" command uses event streams ' + f'The {full_command} command uses event streams ' 'which is only supported for these operations:\n' - '%s' % (full_command, supported_commands) + f'{supported_commands}' ) if errors: raise AssertionError('\n' + '\n'.join(errors)) diff --git a/tests/functional/test_shadowing.py b/tests/functional/test_shadowing.py index 34060573226b..ffa3d8016053 100644 --- a/tests/functional/test_shadowing.py +++ b/tests/functional/test_shadowing.py @@ -11,6 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. import pytest +from botocore.model import OperationModel from awscli.clidriver import create_clidriver @@ -29,7 +30,9 @@ def _generate_command_tests(): @pytest.mark.parametrize( "command_name, command_table, builtins", _generate_command_tests() ) -def test_no_shadowed_builtins(command_name, command_table, builtins): +def test_no_shadowed_builtins( + command_name, command_table, builtins, record_property +): """Verify no command params are shadowed or prefixed by the built in param. The CLI parses all command line options into a single namespace. @@ -58,13 +61,21 @@ def test_no_shadowed_builtins(command_name, command_table, builtins): errors = [] for sub_name, sub_command in command_table.items(): op_help = sub_command.create_help_command() + model = op_help.obj arg_table = op_help.arg_table for arg_name in arg_table: if any(p.startswith(arg_name) for p in builtins): + if isinstance(model, OperationModel): + # Store the service and operation in + # PyTest custom properties + record_property( + 'aws_service', model.service_model.service_name + ) + record_property('aws_operation', model.name) # Then we are shadowing or prefixing a top level argument errors.append( 'Shadowing/Prefixing a top level option: ' - '%s.%s.%s' % (command_name, sub_name, arg_name) + f'{command_name}.{sub_name}.{arg_name}' ) if errors: raise AssertionError('\n' + '\n'.join(errors)) diff --git a/tests/integration/test_cli.py b/tests/integration/test_cli.py index 3fb10e086853..0b249db1e63c 100644 --- a/tests/integration/test_cli.py +++ b/tests/integration/test_cli.py @@ -126,11 +126,11 @@ def test_operation_help_with_required_option(self): self.assertEqual(p.stderr, '') def test_help_with_warning_blocks(self): - p = aws('elastictranscoder create-pipeline help') + p = aws('bedrock-runtime invoke-model help') self.assertEqual(p.rc, 0, p.stderr) # Check text that appears in the warning block to ensure # the block was actually rendered. - self.assertRegex(p.stdout, r'To\s+receive\s+notifications') + self.assertRegex(p.stdout, r"To\s+deny\s+all\s+inference\s+access") def test_param_shorthand(self): p = aws( diff --git a/tests/unit/botocore/response_parsing/json/errors/elastictranscoder-delete-pipeline.json b/tests/unit/botocore/response_parsing/json/errors/elastictranscoder-delete-pipeline.json deleted file mode 100644 index d28b03002d18..000000000000 --- a/tests/unit/botocore/response_parsing/json/errors/elastictranscoder-delete-pipeline.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "message": "1 validation error detected: Value 'foobar' at 'id' failed to satisfy constraint: Member must satisfy regular expression pattern: ^\\d{13}-\\w{6}$", - "__headers__": { - "x-amzn-requestid": "1234", - "content-length": "120", - "date": "Wed, 19 Feb 2014 22:13:13 GMT", - "nncoection": "close", - "content-type": "application/json", - "x-amzn-errortype": "ValidationException:" - } -} diff --git a/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-delete-pipeline.json b/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-delete-pipeline.json deleted file mode 100644 index 5249d826deb8..000000000000 --- a/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-delete-pipeline.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "ResponseMetadata": {"RequestId": "1234"}, - "Error": { - "Message": "1 validation error detected: Value 'foobar' at 'id' failed to satisfy constraint: Member must satisfy regular expression pattern: ^\\d{13}-\\w{6}$", - "Code": "ValidationException" - } -} diff --git a/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-list-pipelines.json b/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-list-pipelines.json deleted file mode 100644 index 60c1ee05b568..000000000000 --- a/tests/unit/botocore/response_parsing/json/expected/elastictranscoder-list-pipelines.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "ResponseMetadata": {}, - "NextPageToken": null, - "Pipelines": [ - { - "ContentConfig": { - "Bucket": "pipeline-12345", - "StorageClass": "Standard", - "Permissions": [] - }, - "Status": "Active", - "Name": "test-pipeline", - "ThumbnailConfig": { - "Bucket": "12345", - "StorageClass": "Standard", - "Permissions": [] - }, - "Notifications": { - "Completed": "", - "Warning": "", - "Progressing": "", - "Error": "" - }, - "Role": "arn:aws:iam::12345:role/Elastic_Transcoder_Default_Role", - "InputBucket": "12345", - "OutputBucket": null, - "Id": "12345", - "Arn": "arn:aws:elastictranscoder:us-west-2:12345:pipeline/12345" - } - ] -} diff --git a/tests/unit/botocore/test_http_session.py b/tests/unit/botocore/test_http_session.py index e2202e8ad95a..1165f3fdcc53 100644 --- a/tests/unit/botocore/test_http_session.py +++ b/tests/unit/botocore/test_http_session.py @@ -1,4 +1,5 @@ import socket +from concurrent.futures import CancelledError import pytest from botocore.awsrequest import ( @@ -12,13 +13,13 @@ ProxyConnectionError, ) from botocore.httpsession import ( + BUFFER_SIZE, ProxyConfiguration, URLLib3Session, get_cert_path, mask_proxy_url, ) from urllib3.exceptions import NewConnectionError, ProtocolError, ProxyError -from concurrent.futures import CancelledError from tests import mock, unittest @@ -149,7 +150,6 @@ def assert_request_sent( def _assert_manager_call(self, manager, *assert_args, **assert_kwargs): call_kwargs = { - 'strict': True, 'maxsize': mock.ANY, 'timeout': mock.ANY, 'ssl_context': mock.ANY, @@ -157,6 +157,8 @@ def _assert_manager_call(self, manager, *assert_args, **assert_kwargs): 'cert_file': None, 'key_file': None, } + if BUFFER_SIZE: + call_kwargs['blocksize'] = BUFFER_SIZE call_kwargs.update(assert_kwargs) manager.assert_called_with(*assert_args, **call_kwargs) diff --git a/tests/unit/botocore/test_serialize.py b/tests/unit/botocore/test_serialize.py index c05805f2e957..f28228490221 100644 --- a/tests/unit/botocore/test_serialize.py +++ b/tests/unit/botocore/test_serialize.py @@ -614,3 +614,54 @@ def test_restxml_serializes_unicode(self): self.serialize_to_request(params) except UnicodeEncodeError: self.fail("RestXML serializer failed to serialize unicode text.") + +class TestRpcV2CBORHostPrefix(unittest.TestCase): + def setUp(self): + self.model = { + 'metadata': { + 'protocol': 'smithy-rpc-v2-cbor', + 'apiVersion': '2014-01-01', + 'serviceId': 'MyService', + 'targetPrefix': 'sampleservice', + 'documentation': '', + }, + 'operations': { + 'TestHostPrefixOperation': { + 'name': 'TestHostPrefixOperation', + 'input': {'shape': 'InputShape'}, + 'endpoint': {'hostPrefix': '{Foo}'}, + }, + 'TestNoHostPrefixOperation': { + 'name': 'TestNoHostPrefixOperation', + 'input': {'shape': 'InputShape'}, + }, + }, + 'shapes': { + 'InputShape': { + 'type': 'structure', + 'members': { + 'Foo': {'shape': 'StringType', 'hostLabel': True}, + }, + }, + 'StringType': {'type': 'string'}, + }, + } + self.service_model = ServiceModel(self.model) + + def test_host_prefix_added_to_serialized_request(self): + operation_model = self.service_model.operation_model('TestHostPrefixOperation') + serializer = serialize.create_serializer('smithy-rpc-v2-cbor') + + params = {'Foo': 'bound'} + serialized = serializer.serialize_to_request(params, operation_model) + + self.assertEqual(serialized['host_prefix'], 'bound') + + def test_no_host_prefix_when_not_configured(self): + operation_model = self.service_model.operation_model('TestNoHostPrefixOperation') + serializer = serialize.create_serializer('smithy-rpc-v2-cbor') + + params = {'Foo': 'bound'} + serialized = serializer.serialize_to_request(params, operation_model) + + self.assertNotIn('host_prefix', serialized) diff --git a/tests/unit/botocore/test_useragent.py b/tests/unit/botocore/test_useragent.py index 8b24425a385a..b6a238d90c1d 100644 --- a/tests/unit/botocore/test_useragent.py +++ b/tests/unit/botocore/test_useragent.py @@ -236,3 +236,27 @@ def test_non_positive_user_agent_component_size_config_raises(): with pytest.raises(ValueError) as excinfo: UserAgentComponentSizeConfig(-1, ',') assert 'Invalid `max_size_in_bytes`' in str(excinfo.value) + + +def test_hash_in_user_agent_appid(): + ua = UserAgentString( + platform_name='linux', + platform_version='1.2.3-foo', + platform_machine='x86_64', + python_version='3.8.20', + python_implementation='Dpython', + execution_env='AWS_Lambda_python3.8', + ).with_client_config(Config(user_agent_appid='fooapp#1.0.0')) + + actual = ua.to_string() + expected = ( + f'Botocore/{botocore_version} ' + 'ua/2.1 ' + 'os/linux#1.2.3-foo ' + 'md/arch#x86_64 ' + 'lang/python#3.8.20 ' + 'md/pyimpl#Dpython ' + 'exec-env/AWS_Lambda_python3.8 ' + 'app/fooapp#1.0.0' + ) + assert actual == expected \ No newline at end of file diff --git a/tests/unit/customizations/cloudtrail/test_validation.py b/tests/unit/customizations/cloudtrail/test_validation.py index 6c9a28599994..41a8f485cb4a 100644 --- a/tests/unit/customizations/cloudtrail/test_validation.py +++ b/tests/unit/customizations/cloudtrail/test_validation.py @@ -595,15 +595,16 @@ def test_calls_list_objects_correctly(self): mock_search = mock_paginate.return_value.search mock_search.return_value = [] provider = self._get_mock_provider(s3_client) - provider.load_digest_keys_in_range('1', 'prefix', START_DATE, END_DATE) - marker = ( - 'prefix/AWSLogs/{account}/CloudTrail-Digest/us-east-1/' - '2014/08/09/{account}_CloudTrail-Digest_us-east-1_foo_' - 'us-east-1_20140809T235900Z.json.gz' - ) + provider.load_digest_keys_in_range( + '1', 'prefix', START_DATE, END_DATE) + marker = ('prefix/AWSLogs/{account}/CloudTrail-Digest/us-east-1/' + '2014/08/09/{account}_CloudTrail-Digest_us-east-1_foo_' + 'us-east-1_20140809T235900Z.json.gz') + prefix = 'prefix/AWSLogs/{account}/CloudTrail-Digest/us-east-1' mock_paginate.assert_called_once_with( - Bucket='1', Marker=marker.format(account=TEST_ACCOUNT_ID) - ) + Bucket='1', + Marker=marker.format(account=TEST_ACCOUNT_ID), + Prefix=prefix.format(account=TEST_ACCOUNT_ID)) def test_calls_list_objects_correctly_org_trails(self): s3_client = mock.Mock() @@ -627,14 +628,62 @@ def test_calls_list_objects_correctly_org_trails(self): '2014/08/09/{member_account}_CloudTrail-Digest_us-east-1_foo_' 'us-east-1_20140809T235900Z.json.gz' ) + prefix = ( + 'prefix/AWSLogs/{organization_id}/{member_account}/' + 'CloudTrail-Digest/us-east-1' + ) mock_paginate.assert_called_once_with( Bucket='1', Marker=marker.format( member_account=TEST_ORGANIZATION_ACCOUNT_ID, - organization_id=TEST_ORGANIZATION_ID, + organization_id=TEST_ORGANIZATION_ID ), + Prefix=prefix.format( + member_account=TEST_ORGANIZATION_ACCOUNT_ID, + organization_id=TEST_ORGANIZATION_ID + ) ) + def test_create_digest_prefix_without_key_prefix(self): + mock_s3_client_provider = mock.Mock() + provider = DigestProvider( + mock_s3_client_provider, TEST_ACCOUNT_ID, 'foo', 'us-east-1') + prefix = provider._create_digest_prefix(START_DATE, None) + expected = 'AWSLogs/{account}/CloudTrail-Digest/us-east-1'.format( + account=TEST_ACCOUNT_ID) + self.assertEqual(expected, prefix) + + def test_create_digest_prefix_with_key_prefix(self): + mock_s3_client_provider = mock.Mock() + provider = DigestProvider( + mock_s3_client_provider, TEST_ACCOUNT_ID, 'foo', 'us-east-1') + prefix = provider._create_digest_prefix(START_DATE, 'my-prefix') + expected = 'my-prefix/AWSLogs/{account}/CloudTrail-Digest/us-east-1'.format( + account=TEST_ACCOUNT_ID) + self.assertEqual(expected, prefix) + + def test_create_digest_prefix_org_trail(self): + mock_s3_client_provider = mock.Mock() + provider = DigestProvider( + mock_s3_client_provider, TEST_ORGANIZATION_ACCOUNT_ID, + 'foo', 'us-east-1', 'us-east-1', TEST_ORGANIZATION_ID) + prefix = provider._create_digest_prefix(START_DATE, None) + expected = 'AWSLogs/{org}/{account}/CloudTrail-Digest/us-east-1'.format( + org=TEST_ORGANIZATION_ID, + account=TEST_ORGANIZATION_ACCOUNT_ID) + self.assertEqual(expected, prefix) + + def test_create_digest_prefix_org_trail_with_key_prefix(self): + mock_s3_client_provider = mock.Mock() + provider = DigestProvider( + mock_s3_client_provider, TEST_ORGANIZATION_ACCOUNT_ID, + 'foo', 'us-east-1', 'us-east-1', TEST_ORGANIZATION_ID) + prefix = provider._create_digest_prefix(START_DATE, 'custom-prefix') + expected = 'custom-prefix/AWSLogs/{org}/{account}/CloudTrail-Digest/us-east-1'.format( + org=TEST_ORGANIZATION_ID, + account=TEST_ORGANIZATION_ACCOUNT_ID) + self.assertEqual(expected, prefix) + def test_ensures_digest_has_proper_metadata(self): out = BytesIO() f = gzip.GzipFile(fileobj=out, mode="wb") diff --git a/tests/unit/customizations/ecs/expressgateway/test_display_strategy.py b/tests/unit/customizations/ecs/expressgateway/test_display_strategy.py new file mode 100644 index 000000000000..9aa97cbb410c --- /dev/null +++ b/tests/unit/customizations/ecs/expressgateway/test_display_strategy.py @@ -0,0 +1,293 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ + +import asyncio +import time +from unittest.mock import Mock, patch + +import pytest +from botocore.exceptions import ClientError +from prompt_toolkit.application import create_app_session +from prompt_toolkit.output import DummyOutput + +from awscli.customizations.ecs.expressgateway.display_strategy import ( + DisplayStrategy, + InteractiveDisplayStrategy, + TextOnlyDisplayStrategy, +) + + +class TestDisplayStrategy: + """Test base DisplayStrategy class.""" + + def test_base_strategy_not_implemented(self): + """Test base class raises NotImplementedError.""" + strategy = DisplayStrategy() + with pytest.raises(NotImplementedError): + strategy.execute_monitoring(None, None, None) + + +@pytest.fixture +def app_session(): + """Fixture that creates and manages an app session for prompt_toolkit.""" + with create_app_session(output=DummyOutput()) as session: + yield session + + +@pytest.fixture +def mock_display(): + """Fixture that creates a mock display for testing.""" + + async def mock_run_async(): + await asyncio.sleep(0.01) + + display = Mock() + display.display = Mock() + display.run = Mock(return_value=mock_run_async()) + return display + + +class TestInteractiveDisplayStrategy: + """Test InteractiveDisplayStrategy.""" + + @patch('time.sleep') + def test_execute_with_mock_display( + self, mock_sleep, app_session, mock_display + ): + """Test strategy executes with mocked display.""" + mock_collector = Mock() + mock_collector.get_current_view = Mock( + return_value="Test output {SPINNER}" + ) + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + # Verify display was called + assert mock_display.display.called + assert mock_display.run.called + + def test_strategy_uses_provided_color_setting(self): + """Test strategy respects use_color parameter.""" + mock_display = Mock() + + strategy_with_color = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + assert strategy_with_color.use_color is True + + strategy_no_color = InteractiveDisplayStrategy( + display=mock_display, use_color=False + ) + assert strategy_no_color.use_color is False + + @patch('time.sleep') + def test_completion_message_on_normal_exit( + self, mock_sleep, app_session, mock_display, capsys + ): + """Test displays completion message when monitoring completes normally.""" + mock_collector = Mock() + mock_collector.get_current_view = Mock(return_value="Resources ready") + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + captured = capsys.readouterr() + assert "Monitoring Complete!" in captured.out + assert "Monitoring timed out!" not in captured.out + + @patch('time.sleep') + def test_collector_output_is_displayed( + self, mock_sleep, app_session, mock_display, capsys + ): + """Test that collector output appears in final output.""" + mock_collector = Mock() + unique_output = "LoadBalancer lb-12345 ACTIVE" + mock_collector.get_current_view = Mock(return_value=unique_output) + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + captured = capsys.readouterr() + assert unique_output in captured.out + + @patch('time.sleep') + def test_execute_handles_service_inactive( + self, mock_sleep, app_session, mock_display, capsys + ): + """Test strategy handles service inactive error.""" + mock_collector = Mock() + error = ClientError( + error_response={ + 'Error': { + 'Code': 'InvalidParameterException', + 'Message': 'Cannot call DescribeServiceRevisions for a service that is INACTIVE', + } + }, + operation_name='DescribeServiceRevisions', + ) + mock_collector.get_current_view = Mock(side_effect=error) + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + # Strategy should handle the error and set output to "Service is inactive" + captured = capsys.readouterr() + assert "Service is inactive" in captured.out + + @patch('time.sleep') + def test_execute_other_client_errors_propagate( + self, mock_sleep, app_session, mock_display + ): + """Test strategy propagates non-service-inactive ClientErrors.""" + mock_collector = Mock() + error = ClientError( + error_response={ + 'Error': { + 'Code': 'AccessDeniedException', + 'Message': 'Access denied', + } + }, + operation_name='DescribeServiceRevisions', + ) + mock_collector.get_current_view = Mock(side_effect=error) + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + + # Other client errors should propagate + with pytest.raises(ClientError) as exc_info: + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + assert ( + exc_info.value.response['Error']['Code'] == 'AccessDeniedException' + ) + + @patch('time.sleep') + def test_display_cleanup_on_exception( + self, mock_sleep, app_session, mock_display + ): + """Test display app is properly shut down when exception occurs.""" + mock_collector = Mock() + error = ClientError( + error_response={'Error': {'Code': 'ThrottlingException'}}, + operation_name='DescribeServiceRevisions', + ) + mock_collector.get_current_view = Mock(side_effect=error) + + strategy = InteractiveDisplayStrategy( + display=mock_display, use_color=True + ) + mock_sleep.side_effect = KeyboardInterrupt() + + with pytest.raises(ClientError): + strategy.execute_monitoring( + mock_collector, time.time(), timeout_minutes=1 + ) + + # Verify app.exit() was called in finally block despite exception + mock_display.app.exit.assert_called() + + +class TestTextOnlyDisplayStrategy: + """Test TextOnlyDisplayStrategy.""" + + @patch('time.sleep') + def test_execute_with_mock_collector(self, mock_sleep, capsys): + """Test strategy executes sync loop with text output.""" + mock_collector = Mock() + mock_collector.get_current_view = Mock(return_value="Test output") + mock_collector.cached_monitor_result = (None, "Test info") + + strategy = TextOnlyDisplayStrategy(use_color=True) + + # Make sleep raise to exit loop after first iteration + mock_sleep.side_effect = KeyboardInterrupt() + + start_time = time.time() + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + output = capsys.readouterr().out + printed_output = output + assert "Starting monitoring" in printed_output + assert "stopped by user" in printed_output + assert "complete" in printed_output + + @patch('time.sleep') + @patch('time.time') + def test_execute_handles_timeout(self, mock_time, mock_sleep, capsys): + """Test strategy handles timeout correctly.""" + mock_collector = Mock() + mock_collector.get_current_view = Mock(return_value="Test output") + mock_collector.cached_monitor_result = (None, None) + + strategy = TextOnlyDisplayStrategy(use_color=True) + + # Simulate timeout after first poll + start_time = 1000.0 + mock_time.side_effect = [ + 1000.0, # First check - within timeout + 2000.0, # Second check - exceeded timeout + ] + + strategy.execute_monitoring( + mock_collector, start_time, timeout_minutes=1 + ) + + output = capsys.readouterr().out + printed_output = output + assert "timeout reached" in printed_output.lower() + + def test_strategy_uses_provided_color_setting(self): + """Test strategy respects use_color parameter.""" + strategy_with_color = TextOnlyDisplayStrategy(use_color=True) + assert strategy_with_color.stream_display.use_color is True + + strategy_no_color = TextOnlyDisplayStrategy(use_color=False) + assert strategy_no_color.stream_display.use_color is False diff --git a/tests/unit/customizations/ecs/expressgateway/test_managedresource.py b/tests/unit/customizations/ecs/expressgateway/test_managedresource.py index db8157e066a2..2b253d4e7317 100644 --- a/tests/unit/customizations/ecs/expressgateway/test_managedresource.py +++ b/tests/unit/customizations/ecs/expressgateway/test_managedresource.py @@ -1,4 +1,6 @@ -import unittest +import re + +import pytest from awscli.customizations.ecs.expressgateway.managedresource import ( TERMINAL_RESOURCE_STATUSES, @@ -6,33 +8,33 @@ ) -class TestManagedResource(unittest.TestCase): +class TestManagedResource: def test_is_terminal_active(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) - self.assertTrue(resource.is_terminal()) + assert resource.is_terminal() def test_is_terminal_failed(self): resource = ManagedResource( "LoadBalancer", "lb-123", "FAILED", 1761230543.151 ) - self.assertTrue(resource.is_terminal()) + assert resource.is_terminal() def test_is_terminal_provisioning(self): resource = ManagedResource( "LoadBalancer", "lb-123", "PROVISIONING", 1761230543.151 ) - self.assertFalse(resource.is_terminal()) + assert not resource.is_terminal() def test_get_status_string_active(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) status_string = resource.get_status_string("⠋") - self.assertIn("LoadBalancer", status_string) - self.assertIn("lb-123", status_string) - self.assertIn("ACTIVE", status_string) + assert "LoadBalancer" in status_string + assert "lb-123" in status_string + assert "ACTIVE" in status_string def test_get_status_string_failed_with_reason(self): resource = ManagedResource( @@ -43,8 +45,8 @@ def test_get_status_string_failed_with_reason(self): "Connection timeout", ) status_string = resource.get_status_string("⠋") - self.assertIn("FAILED", status_string) - self.assertIn("Connection timeout", status_string) + assert "FAILED" in status_string + assert "Connection timeout" in status_string def test_get_status_string_active_with_reason(self): resource = ManagedResource( @@ -55,8 +57,8 @@ def test_get_status_string_active_with_reason(self): "Load balancer ready", ) status_string = resource.get_status_string("⠋") - self.assertIn("ACTIVE", status_string) - self.assertIn("Load balancer ready", status_string) + assert "ACTIVE" in status_string + assert "Load balancer ready" in status_string def test_combine_newer_resource(self): older = ManagedResource( @@ -66,7 +68,7 @@ def test_combine_newer_resource(self): "LoadBalancer", "lb-123", "ACTIVE", 1761230600.151 ) result = older.combine(newer) - self.assertEqual(result, newer) + assert result == newer def test_combine_older_resource(self): older = ManagedResource( @@ -76,33 +78,33 @@ def test_combine_older_resource(self): "LoadBalancer", "lb-123", "ACTIVE", 1761230600.151 ) result = newer.combine(older) - self.assertEqual(result, newer) + assert result == newer def test_combine_with_none(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) result = resource.combine(None) - self.assertEqual(result, resource) + assert result == resource def test_is_terminal_deleted_status(self): resource = ManagedResource("LoadBalancer", "lb-123", "DELETED") - self.assertTrue(resource.is_terminal()) + assert resource.is_terminal() def test_is_terminal_no_status(self): resource = ManagedResource("LoadBalancer", "lb-123", None) - self.assertFalse(resource.is_terminal()) + assert not resource.is_terminal() def test_init_with_string_timestamp(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", "2025-11-05T18:00:00Z" ) - self.assertIsInstance(resource.updated_at, float) - self.assertGreater(resource.updated_at, 0) + assert isinstance(resource.updated_at, float) + assert resource.updated_at > 0 def test_init_with_none_timestamp(self): resource = ManagedResource("LoadBalancer", "lb-123", "ACTIVE", None) - self.assertIsNone(resource.updated_at) + assert resource.updated_at is None def test_combine_with_no_timestamp(self): resource1 = ManagedResource( @@ -112,7 +114,7 @@ def test_combine_with_no_timestamp(self): "LoadBalancer", "lb-123", "PROVISIONING", None ) result = resource1.combine(resource2) - self.assertEqual(result, resource1) + assert result == resource1 def test_combine_equal_timestamps(self): timestamp = 1761230543.151 @@ -123,7 +125,7 @@ def test_combine_equal_timestamps(self): "LoadBalancer", "lb-123", "PROVISIONING", timestamp ) result = resource1.combine(resource2) - self.assertEqual(result, resource1) + assert result == resource1 def test_get_status_string_with_depth(self): resource = ManagedResource( @@ -132,7 +134,7 @@ def test_get_status_string_with_depth(self): status_string = resource.get_status_string("⠋", depth=2) # Should have proper indentation lines = status_string.split('\n') - self.assertTrue(lines[0].startswith(" ")) # 2 spaces for depth=2 + assert lines[0].startswith(" ") # 2 spaces for depth=2 def test_get_status_string_with_additional_info(self): resource = ManagedResource( @@ -143,44 +145,159 @@ def test_get_status_string_with_additional_info(self): additional_info="Load balancer is healthy", ) status_string = resource.get_status_string("⠋") - self.assertIn("Load balancer is healthy", status_string) + assert "Load balancer is healthy" in status_string def test_get_status_string_no_identifier(self): resource = ManagedResource( "LoadBalancer", None, "ACTIVE", 1761230543.151 ) status_string = resource.get_status_string("⠋") - self.assertIn("LoadBalancer", status_string) - self.assertIn("ACTIVE", status_string) + assert "LoadBalancer" in status_string + assert "ACTIVE" in status_string def test_get_status_string_no_color(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) status_string = resource.get_status_string("⠋", use_color=False) - self.assertIn("LoadBalancer", status_string) - self.assertIn("lb-123", status_string) - self.assertIn("ACTIVE", status_string) + assert "LoadBalancer" in status_string + assert "lb-123" in status_string + assert "ACTIVE" in status_string # Should not contain ANSI color codes - self.assertNotIn("\x1b[", status_string) + assert "\x1b[" not in status_string def test_get_status_string_with_color(self): resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) status_string = resource.get_status_string("⠋", use_color=True) - self.assertIn("LoadBalancer", status_string) - self.assertIn("lb-123", status_string) - self.assertIn("ACTIVE", status_string) + assert "LoadBalancer" in status_string + assert "lb-123" in status_string + assert "ACTIVE" in status_string # Should contain ANSI color codes - self.assertIn("\x1b[", status_string) + assert "\x1b[" in status_string + def test_get_stream_string_basic(self): + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "[2025-12-15 10:00:00]" in stream_string + assert "LoadBalancer" in stream_string + assert "lb-123" in stream_string + assert "ACTIVE" in stream_string -class TestConstants(unittest.TestCase): - def test_terminal_resource_statuses(self): - expected_statuses = ["ACTIVE", "DELETED", "FAILED"] - self.assertEqual(TERMINAL_RESOURCE_STATUSES, expected_statuses) + def test_get_stream_string_with_reason(self): + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "PROVISIONING", + 1761230543.151, + "Waiting for DNS propagation", + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "Reason: Waiting for DNS propagation" in stream_string + + def test_get_stream_string_with_additional_info(self): + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "ACTIVE", + 1761230543.151, + additional_info="DNS: example.elb.amazonaws.com", + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "Info: DNS: example.elb.amazonaws.com" in stream_string + + def test_get_stream_string_with_updated_at(self): + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "Last Updated At:" in stream_string + # Check timestamp format YYYY-MM-DD HH:MM:SS + assert re.search(r"\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}", stream_string) + def test_get_stream_string_all_fields(self): + resource = ManagedResource( + "TargetGroup", + "tg-456", + "PROVISIONING", + 1761230543.151, + "Registering targets", + "Health check interval: 30s", + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "[2025-12-15 10:00:00]" in stream_string + assert "TargetGroup" in stream_string + assert "tg-456" in stream_string + assert "PROVISIONING" in stream_string + assert "Reason: Registering targets" in stream_string + assert "Last Updated At:" in stream_string + assert "Info: Health check interval: 30s" in stream_string -if __name__ == '__main__': - unittest.main() + def test_get_stream_string_no_identifier(self): + resource = ManagedResource( + "LoadBalancer", None, "ACTIVE", 1761230543.151 + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "[2025-12-15 10:00:00]" in stream_string + assert "LoadBalancer" in stream_string + assert "ACTIVE" in stream_string + assert "None" not in stream_string + + def test_get_stream_string_no_status(self): + resource = ManagedResource( + "LoadBalancer", "lb-123", None, 1761230543.151 + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "LoadBalancer" in stream_string + assert "lb-123" in stream_string + # Should not have status brackets when status is None + assert "[None]" not in stream_string + + def test_get_stream_string_no_color(self): + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + stream_string = resource.get_stream_string( + "2025-12-15 10:00:00", use_color=False + ) + assert "LoadBalancer" in stream_string + assert "lb-123" in stream_string + assert "ACTIVE" in stream_string + # Should not contain ANSI color codes + assert "\x1b[" not in stream_string + + def test_get_stream_string_with_color(self): + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + stream_string = resource.get_stream_string( + "2025-12-15 10:00:00", use_color=True + ) + assert "LoadBalancer" in stream_string + assert "lb-123" in stream_string + assert "ACTIVE" in stream_string + # Should contain ANSI color codes + assert "\x1b[" in stream_string + + def test_get_stream_string_failed_status(self): + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "FAILED", + 1761230543.151, + "Connection timeout", + ) + stream_string = resource.get_stream_string("2025-12-15 10:00:00") + assert "FAILED" in stream_string + assert "Reason: Connection timeout" in stream_string + # Failed status should use color coding + assert "\x1b[" in stream_string + + +class TestConstants: + def test_terminal_resource_statuses(self): + expected_statuses = ["ACTIVE", "DELETED", "FAILED"] + assert TERMINAL_RESOURCE_STATUSES == expected_statuses diff --git a/tests/unit/customizations/ecs/expressgateway/test_managedresourcegroup.py b/tests/unit/customizations/ecs/expressgateway/test_managedresourcegroup.py index f6d99e6bf6f8..d32300e4c5a3 100644 --- a/tests/unit/customizations/ecs/expressgateway/test_managedresourcegroup.py +++ b/tests/unit/customizations/ecs/expressgateway/test_managedresourcegroup.py @@ -1,4 +1,4 @@ -import unittest +import pytest from awscli.customizations.ecs.expressgateway.managedresource import ( ManagedResource, @@ -8,172 +8,112 @@ ) -class TestManagedResourceGroup(unittest.TestCase): - def setUp(self): - self.resource1 = ManagedResource( - "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 - ) - self.resource2 = ManagedResource( - "Certificate", "cert-456", "PROVISIONING", 1761230543.151 - ) +@pytest.fixture +def resource1(): + return ManagedResource("LoadBalancer", "lb-123", "ACTIVE", 1761230543.151) + +@pytest.fixture +def resource2(): + return ManagedResource( + "Certificate", "cert-456", "PROVISIONING", 1761230543.151 + ) + + +class TestManagedResourceGroup: def test_is_terminal_all_terminal(self): terminal_resource = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) group = ManagedResourceGroup(resources=[terminal_resource]) - self.assertTrue(group.is_terminal()) + assert group.is_terminal() - def test_is_terminal_mixed(self): - group = ManagedResourceGroup( - resources=[self.resource1, self.resource2] - ) - self.assertFalse(group.is_terminal()) + def test_is_terminal_mixed(self, resource1, resource2): + group = ManagedResourceGroup(resources=[resource1, resource2]) + assert not group.is_terminal() def test_is_terminal_empty(self): group = ManagedResourceGroup() - self.assertTrue(group.is_terminal()) + assert group.is_terminal() - def test_get_status_string_with_header(self): + def test_get_status_string_with_header(self, resource1): group = ManagedResourceGroup( resource_type="IngressPaths", identifier="endpoint-1", - resources=[self.resource1], + resources=[resource1], ) status_string = group.get_status_string("⠋") - self.assertIn("IngressPaths", status_string) - self.assertIn("endpoint-1", status_string) - - def test_create_key(self): - group = ManagedResourceGroup() - key = group._create_key(self.resource1) - self.assertEqual(key, "LoadBalancer/lb-123") - - def test_get_status_string_empty_group(self): - group = ManagedResourceGroup(resource_type="EmptyGroup", resources=[]) - status_string = group.get_status_string("⠋") - self.assertIn("EmptyGroup", status_string) - self.assertIn("", status_string) + assert "IngressPaths" in status_string + assert "endpoint-1" in status_string - def test_combine_resource_groups(self): - group1 = ManagedResourceGroup(resources=[self.resource1]) - group2 = ManagedResourceGroup(resources=[self.resource2]) - combined = group1.combine(group2) - self.assertEqual(len(combined.resource_mapping), 2) + def test_compare_resource_sets_unique_resources( + self, resource1, resource2 + ): + # Test compare_resource_sets with completely different resources + group1 = ManagedResourceGroup(resources=[resource1]) # LoadBalancer + group2 = ManagedResourceGroup(resources=[resource2]) # Certificate - def test_combine_child_resources_both_none(self): - group = ManagedResourceGroup() - result = group._combine_child_resources(None, None) - self.assertIsNone(result) - - def test_combine_child_resources_first_none(self): - group = ManagedResourceGroup() - resource = ManagedResource("LoadBalancer", "lb-123", "ACTIVE") - result = group._combine_child_resources(None, resource) - self.assertEqual(result, resource) - - def test_combine_overlapping_resources(self): - older_resource = ManagedResource( - "LoadBalancer", "lb-123", "PROVISIONING", 1761230543.151 - ) - newer_resource = ManagedResource( - "LoadBalancer", "lb-123", "ACTIVE", 1761230600.151 - ) - - group1 = ManagedResourceGroup(resources=[older_resource]) - group2 = ManagedResourceGroup(resources=[newer_resource]) - - combined = group1.combine(group2) - - key = "LoadBalancer/lb-123" - self.assertIn(key, combined.resource_mapping) - self.assertEqual(combined.resource_mapping[key].status, "ACTIVE") - - def test_create_key_with_none_values(self): - group = ManagedResourceGroup() - resource = ManagedResource(None, None) - key = group._create_key(resource) - self.assertEqual(key, "/") - - def test_create_key_partial_none(self): - group = ManagedResourceGroup() - - resource1 = ManagedResource(None, "identifier") - key1 = group._create_key(resource1) - self.assertEqual(key1, "/identifier") - - resource2 = ManagedResource("ResourceType", None) - key2 = group._create_key(resource2) - self.assertEqual(key2, "ResourceType/") - - def test_diff_unique_resources(self): - # Test diff with completely different resources - group1 = ManagedResourceGroup( - resources=[self.resource1] - ) # LoadBalancer - group2 = ManagedResourceGroup( - resources=[self.resource2] - ) # Certificate - - diff1, diff2 = group1.diff(group2) + diff1, diff2 = group1.compare_resource_sets(group2) # Each group should contain its unique resource - self.assertEqual(len(diff1.resource_mapping), 1) - self.assertEqual(len(diff2.resource_mapping), 1) - self.assertIn("LoadBalancer/lb-123", diff1.resource_mapping) - self.assertIn("Certificate/cert-456", diff2.resource_mapping) - - def test_diff_overlapping_resources(self): - # Test diff with same resource type but different identifiers + assert len(diff1.resource_mapping) == 1 + assert len(diff2.resource_mapping) == 1 + assert "LoadBalancer/lb-123" in diff1.resource_mapping + assert "Certificate/cert-456" in diff2.resource_mapping + + def test_compare_resource_sets_overlapping_resources( + self, resource1, resource2 + ): + # Test compare_resource_sets with same resource type but different identifiers resource3 = ManagedResource( "LoadBalancer", "lb-456", "FAILED", 1761230600.151 ) group1 = ManagedResourceGroup( - resources=[self.resource1, self.resource2] + resources=[resource1, resource2] ) # lb-123, cert-456 group2 = ManagedResourceGroup( - resources=[self.resource2, resource3] + resources=[resource2, resource3] ) # cert-456, lb-456 - diff1, diff2 = group1.diff(group2) + diff1, diff2 = group1.compare_resource_sets(group2) # group1 unique: lb-123, group2 unique: lb-456, common: cert-456 (should not appear in diff) - self.assertEqual(len(diff1.resource_mapping), 1) - self.assertEqual(len(diff2.resource_mapping), 1) - self.assertIn("LoadBalancer/lb-123", diff1.resource_mapping) - self.assertIn("LoadBalancer/lb-456", diff2.resource_mapping) + assert len(diff1.resource_mapping) == 1 + assert len(diff2.resource_mapping) == 1 + assert "LoadBalancer/lb-123" in diff1.resource_mapping + assert "LoadBalancer/lb-456" in diff2.resource_mapping # Common resource should not be in either diff - self.assertNotIn("Certificate/cert-456", diff1.resource_mapping) - self.assertNotIn("Certificate/cert-456", diff2.resource_mapping) + assert "Certificate/cert-456" not in diff1.resource_mapping + assert "Certificate/cert-456" not in diff2.resource_mapping - def test_diff_identical_groups(self): - # Test diff with identical resource groups - group1 = ManagedResourceGroup( - resources=[self.resource1, self.resource2] - ) - group2 = ManagedResourceGroup( - resources=[self.resource1, self.resource2] - ) + def test_compare_resource_sets_identical_groups( + self, resource1, resource2 + ): + # Test compare_resource_sets with identical resource groups + group1 = ManagedResourceGroup(resources=[resource1, resource2]) + group2 = ManagedResourceGroup(resources=[resource1, resource2]) - diff1, diff2 = group1.diff(group2) + diff1, diff2 = group1.compare_resource_sets(group2) # No differences should be found - self.assertEqual(len(diff1.resource_mapping), 0) - self.assertEqual(len(diff2.resource_mapping), 0) + assert len(diff1.resource_mapping) == 0 + assert len(diff2.resource_mapping) == 0 - def test_diff_empty_groups(self): - # Test diff with empty groups - group1 = ManagedResourceGroup(resources=[self.resource1]) + def test_compare_resource_sets_empty_groups(self, resource1): + # Test compare_resource_sets with empty groups + group1 = ManagedResourceGroup(resources=[resource1]) group2 = ManagedResourceGroup(resources=[]) - diff1, diff2 = group1.diff(group2) + diff1, diff2 = group1.compare_resource_sets(group2) # group1 should contain its resource, group2 should be empty - self.assertEqual(len(diff1.resource_mapping), 1) - self.assertEqual(len(diff2.resource_mapping), 0) - self.assertIn("LoadBalancer/lb-123", diff1.resource_mapping) + assert len(diff1.resource_mapping) == 1 + assert len(diff2.resource_mapping) == 0 + assert "LoadBalancer/lb-123" in diff1.resource_mapping - def test_diff_excludes_matching_types_without_identifier(self): + def test_compare_resource_sets_excludes_matching_types_without_identifier( + self, + ): # Test that resources in other are excluded if self has same type without identifier resource_without_id = ManagedResource("LoadBalancer", None) resource_with_id = ManagedResource("LoadBalancer", "lb-456") @@ -185,79 +125,75 @@ def test_diff_excludes_matching_types_without_identifier(self): resources=[resource_with_id] ) # LoadBalancer/lb-456 - diff1, diff2 = group1.diff(group2) + diff1, diff2 = group1.compare_resource_sets(group2) # group1 should contain its resource without identifier - self.assertEqual(len(diff1.resource_mapping), 1) - self.assertIn("LoadBalancer/", diff1.resource_mapping) + assert len(diff1.resource_mapping) == 1 + assert "LoadBalancer/" in diff1.resource_mapping # group2 should be empty because LoadBalancer/lb-456 is excluded by LoadBalancer/ - self.assertEqual(len(diff2.resource_mapping), 0) + assert len(diff2.resource_mapping) == 0 def test_get_status_string_with_status(self): group = ManagedResourceGroup( resource_type="IngressPaths", identifier="test-id", status="ACTIVE" ) result = group.get_status_string("⠋") - self.assertIn("IngressPaths", result) - self.assertIn("test-id", result) - self.assertIn("✓", result) # Green checkmark for ACTIVE - self.assertIn("ACTIVE", result) + assert "IngressPaths" in result + assert "test-id" in result + assert "✓" in result # Green checkmark for ACTIVE + assert "ACTIVE" in result def test_get_status_string_without_status(self): group = ManagedResourceGroup( resource_type="IngressPaths", identifier="test-id" ) result = group.get_status_string("⠋") - self.assertIn("IngressPaths", result) - self.assertIn("test-id", result) - self.assertNotIn("✓", result) # No symbol when no status - self.assertNotIn("ACTIVE", result) + assert "IngressPaths" in result + assert "test-id" in result + assert "✓" not in result # No symbol when no status + assert "ACTIVE" not in result def test_get_status_string_status_without_identifier(self): group = ManagedResourceGroup( resource_type="IngressPaths", status="FAILED" ) result = group.get_status_string("⠋") - self.assertIn("IngressPaths", result) - self.assertIn("X", result) # Red X for FAILED - self.assertIn("FAILED", result) + assert "IngressPaths" in result + assert "X" in result # Red X for FAILED + assert "FAILED" in result - def test_get_status_string_no_color(self): + def test_get_status_string_no_color(self, resource1): group = ManagedResourceGroup( resource_type="IngressPaths", identifier="test-id", status="ACTIVE", - resources=[self.resource1], + resources=[resource1], ) result = group.get_status_string("⠋", use_color=False) - self.assertIn("IngressPaths", result) - self.assertIn("test-id", result) - self.assertIn("✓", result) # Checkmark should still be there - self.assertIn("ACTIVE", result) + assert "IngressPaths" in result + assert "test-id" in result + assert "✓" in result # Checkmark should still be there + assert "ACTIVE" in result # Should not contain ANSI color codes - self.assertNotIn("\x1b[", result) + assert "\x1b[" not in result - def test_get_status_string_with_color(self): + def test_get_status_string_with_color(self, resource1): group = ManagedResourceGroup( resource_type="IngressPaths", identifier="test-id", status="ACTIVE", - resources=[self.resource1], + resources=[resource1], ) result = group.get_status_string("⠋", use_color=True) - self.assertIn("IngressPaths", result) - self.assertIn("test-id", result) - self.assertIn("✓", result) # Checkmark should be there - self.assertIn("ACTIVE", result) + assert "IngressPaths" in result + assert "test-id" in result + assert "✓" in result # Checkmark should be there + assert "ACTIVE" in result # Should contain ANSI color codes - self.assertIn("\x1b[", result) + assert "\x1b[" in result def test_combine_prioritizes_resources_with_identifier(self): - from awscli.customizations.ecs.expressgateway.managedresource import ( - ManagedResource, - ) - resource_with_id = ManagedResource( "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 ) @@ -279,10 +215,232 @@ def test_combine_prioritizes_resources_with_identifier(self): result = group1.combine(group2) # Should only have the resource with identifier - self.assertEqual(len(result.resource_mapping), 1) + assert len(result.resource_mapping) == 1 combined_resource = list(result.resource_mapping.values())[0] - self.assertEqual(combined_resource.identifier, "lb-123") + assert combined_resource.identifier == "lb-123" + + def test_get_stream_string_empty_group(self): + """Test empty resource group returns empty string""" + group = ManagedResourceGroup() + result = group.get_stream_string("2025-12-15 10:00:00") + assert result == "" + + def test_get_stream_string_single_resource(self): + """Test resource group with single resource""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + group = ManagedResourceGroup(resources=[resource]) + result = group.get_stream_string("2025-12-15 10:00:00") + assert "[2025-12-15 10:00:00]" in result + assert "LoadBalancer" in result + assert "lb-123" in result + assert "ACTIVE" in result + + def test_get_stream_string_multiple_resources(self): + """Test resource group with multiple resources""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + resource2 = ManagedResource( + "TargetGroup", "tg-456", "PROVISIONING", 1761230543.151 + ) + group = ManagedResourceGroup(resources=[resource1, resource2]) + result = group.get_stream_string("2025-12-15 10:00:00") + + # Should have both resources + assert "LoadBalancer" in result + assert "lb-123" in result + assert "TargetGroup" in result + assert "tg-456" in result + + # Should have newline between resources + lines = result.split("\n") + assert len(lines) > 1 + + def test_get_stream_string_nested_groups(self): + """Test resource group with nested groups""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + resource2 = ManagedResource( + "TargetGroup", "tg-456", "ACTIVE", 1761230543.151 + ) + nested_group = ManagedResourceGroup(resources=[resource2]) + + group = ManagedResourceGroup(resources=[resource1, nested_group]) + result = group.get_stream_string("2025-12-15 10:00:00") + + # Should have flattened both resources + assert "LoadBalancer" in result + assert "lb-123" in result + assert "TargetGroup" in result + assert "tg-456" in result + + def test_get_stream_string_deeply_nested_groups(self): + """Test resource group with multiple levels of nesting""" + resource1 = ManagedResource( + "Cluster", "cluster-1", "ACTIVE", 1761230543.151 + ) + resource2 = ManagedResource( + "Service", "service-1", "ACTIVE", 1761230543.151 + ) + resource3 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + + nested_level2 = ManagedResourceGroup(resources=[resource3]) + nested_level1 = ManagedResourceGroup( + resources=[resource2, nested_level2] + ) + group = ManagedResourceGroup(resources=[resource1, nested_level1]) + + result = group.get_stream_string("2025-12-15 10:00:00") + + # Should have all resources flattened + assert "Cluster" in result + assert "Service" in result + assert "LoadBalancer" in result + + def test_get_stream_string_with_resource_details(self): + """Test that resource details are preserved in stream output""" + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "PROVISIONING", + 1761230543.151, + "Waiting for DNS propagation", + "DNS: example.elb.amazonaws.com", + ) + group = ManagedResourceGroup(resources=[resource]) + result = group.get_stream_string("2025-12-15 10:00:00") + + assert "Reason: Waiting for DNS propagation" in result + assert "Info: DNS: example.elb.amazonaws.com" in result + assert "Last Updated At:" in result + + def test_get_stream_string_preserves_order(self): + """Test that resource order is preserved in output""" + resource1 = ManagedResource( + "Cluster", "cluster-1", "ACTIVE", 1761230543.151 + ) + resource2 = ManagedResource( + "Service", "service-1", "ACTIVE", 1761230543.151 + ) + resource3 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + + group = ManagedResourceGroup( + resources=[resource1, resource2, resource3] + ) + result = group.get_stream_string("2025-12-15 10:00:00") + + # Find positions of each resource type in output + cluster_pos = result.find("Cluster") + service_pos = result.find("Service") + lb_pos = result.find("LoadBalancer") + + # Order should be preserved + assert cluster_pos < service_pos + assert service_pos < lb_pos + + def test_get_stream_string_no_color(self): + """Test stream string without color codes""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + group = ManagedResourceGroup(resources=[resource]) + result = group.get_stream_string( + "2025-12-15 10:00:00", use_color=False + ) + + assert "LoadBalancer" in result + assert "lb-123" in result + # Should not contain ANSI color codes + assert "\x1b[" not in result + + def test_get_stream_string_with_color(self): + """Test stream string with color codes""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + group = ManagedResourceGroup(resources=[resource]) + result = group.get_stream_string("2025-12-15 10:00:00", use_color=True) + + assert "LoadBalancer" in result + assert "lb-123" in result + # Should contain ANSI color codes + assert "\x1b[" in result + + def test_get_stream_string_mixed_resource_types(self): + """Test group with various resource types and statuses""" + resources = [ + ManagedResource("Cluster", "cluster-1", "ACTIVE", 1761230543.151), + ManagedResource( + "Service", "service-1", "UPDATING", 1761230543.151 + ), + ManagedResource( + "LoadBalancer", + "lb-123", + "PROVISIONING", + 1761230543.151, + "Creating", + ), + ManagedResource( + "TargetGroup", "tg-456", "FAILED", 1761230543.151, "Error" + ), + ] + group = ManagedResourceGroup(resources=resources) + result = group.get_stream_string("2025-12-15 10:00:00") + + # All resources should be present + assert "Cluster" in result + assert "Service" in result + assert "LoadBalancer" in result + assert "TargetGroup" in result + + # All statuses should be present + assert "ACTIVE" in result + assert "UPDATING" in result + assert "PROVISIONING" in result + assert "FAILED" in result + + def test_get_stream_string_with_group_metadata(self): + """Test that group-level metadata doesn't affect stream output""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + group = ManagedResourceGroup( + resource_type="ManagedResources", + identifier="group-1", + resources=[resource], + status="ACTIVE", + reason="All resources healthy", + ) + result = group.get_stream_string("2025-12-15 10:00:00") + + # Should still show the actual resource, not group metadata + assert "LoadBalancer" in result + assert "lb-123" in result + + # Group metadata should not appear in stream output + # (stream output is for individual resources only) + assert "group-1" not in result + + def test_get_stream_string_empty_nested_group(self): + """Test nested group that is empty is handled correctly""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", 1761230543.151 + ) + empty_nested = ManagedResourceGroup() + group = ManagedResourceGroup(resources=[resource, empty_nested]) + result = group.get_stream_string("2025-12-15 10:00:00") -if __name__ == '__main__': - unittest.main() + # Should show the resource from non-empty group + assert "LoadBalancer" in result + # Empty nested group shouldn't add extra content + lines = [line for line in result.split("\n") if line.strip()] + # LoadBalancer resource produces multiple lines (timestamp line + optional detail lines) + assert len(lines) > 0 diff --git a/tests/unit/customizations/ecs/expressgateway/test_stream_display.py b/tests/unit/customizations/ecs/expressgateway/test_stream_display.py new file mode 100644 index 000000000000..4a86d3661ef7 --- /dev/null +++ b/tests/unit/customizations/ecs/expressgateway/test_stream_display.py @@ -0,0 +1,388 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ + +import time + +import pytest + +from awscli.customizations.ecs.expressgateway.managedresource import ( + ManagedResource, +) +from awscli.customizations.ecs.expressgateway.managedresourcegroup import ( + ManagedResourceGroup, +) +from awscli.customizations.ecs.expressgateway.stream_display import ( + StreamDisplay, +) + + +@pytest.fixture +def display(): + """Fixture that returns a StreamDisplay with color enabled.""" + return StreamDisplay(use_color=True) + + +class TestStreamDisplay: + """Test StreamDisplay for text-based monitoring output.""" + + def test_show_startup_message(self, display, capsys): + """Test startup message includes timestamp""" + display.show_startup_message() + + output = capsys.readouterr().out + assert "Starting monitoring..." in output + assert "[" in output + + def test_show_polling_message(self, display, capsys): + """Test polling message includes timestamp""" + display.show_polling_message() + + output = capsys.readouterr().out + assert "Polling for updates..." in output + + def test_show_monitoring_data_with_info(self, display, capsys): + """Test showing info message""" + display.show_monitoring_data(None, "Info message") + + output = capsys.readouterr().out + assert "Info message" in output and output.endswith("\n") + + def test_show_monitoring_data_first_poll_shows_all(self, display, capsys): + """Test first poll shows all resources""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource_group = ManagedResourceGroup(resources=[resource]) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + assert "LoadBalancer" in output + assert "lb-123" in output + + def test_show_monitoring_data_no_changes(self, display, capsys): + """Test no output when resources haven't changed""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource_group = ManagedResourceGroup(resources=[resource]) + + # First poll - show all + display.show_monitoring_data(resource_group, None) + capsys.readouterr() # Clear output to test second call + + # Second poll - same resources, no changes + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + assert output == "" + + def test_show_monitoring_data_with_new_resource(self, display, capsys): + """Test output when new resources are added""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource_group1 = ManagedResourceGroup(resources=[resource1]) + + display.show_monitoring_data(resource_group1, None) + capsys.readouterr() # Clear initial output to verify new resource shows + + # Second resource group with additional resource + resource2 = ManagedResource( + "TargetGroup", "tg-456", "ACTIVE", None, None + ) + resource_group2 = ManagedResourceGroup( + resources=[resource1, resource2] + ) + + display.show_monitoring_data(resource_group2, None) + + output = capsys.readouterr().out + assert "TargetGroup" in output + + def test_show_timeout_message(self, display, capsys): + """Test timeout message""" + display.show_timeout_message() + + output = capsys.readouterr().out + assert "timeout reached" in output.lower() + + def test_show_service_inactive_message(self, display, capsys): + """Test service inactive message""" + display.show_service_inactive_message() + + output = capsys.readouterr().out + assert "inactive" in output.lower() + + def test_show_completion_message(self, display, capsys): + """Test completion message""" + display.show_completion_message() + + output = capsys.readouterr().out + assert "complete" in output.lower() + + def test_show_user_stop_message(self, display, capsys): + """Test user stop message""" + display.show_user_stop_message() + + output = capsys.readouterr().out + assert "stopped by user" in output.lower() + + def test_show_error_message(self, display, capsys): + """Test error message""" + display.show_error_message("Test error") + + output = capsys.readouterr().out + assert "Error" in output + assert "Test error" in output + + def test_use_color_parameter(self): + """Test use_color parameter is stored""" + display_with_color = StreamDisplay(use_color=True) + assert display_with_color.use_color is True + + display_no_color = StreamDisplay(use_color=False) + assert display_no_color.use_color is False + + def test_print_flattened_resources_with_reason(self, display, capsys): + """Test resource with reason prints on separate line""" + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "CREATING", + None, + "Waiting for DNS propagation", + ) + resource_group = ManagedResourceGroup(resources=[resource]) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + lines = output.splitlines() + + # Find the line with LoadBalancer + lb_line_idx = next( + i for i, line in enumerate(lines) if "LoadBalancer" in line + ) + reason_line_idx = next( + i + for i, line in enumerate(lines) + if "Reason: Waiting for DNS propagation" in line + ) + + # Reason should be on a different line than LoadBalancer + assert lb_line_idx != reason_line_idx + # Reason should come after the LoadBalancer line + assert reason_line_idx > lb_line_idx + + def test_print_flattened_resources_with_updated_at(self, display, capsys): + """Test resource with updated_at timestamp prints on separate line""" + current_time = time.time() + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", current_time, None + ) + resource_group = ManagedResourceGroup(resources=[resource]) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + lines = output.splitlines() + + # Find the line with LoadBalancer + lb_line_idx = next( + i for i, line in enumerate(lines) if "LoadBalancer" in line + ) + updated_line_idx = next( + i for i, line in enumerate(lines) if "Last Updated At:" in line + ) + + # Updated timestamp should be on a different line than LoadBalancer + assert lb_line_idx != updated_line_idx + # Updated timestamp should come after the LoadBalancer line + assert updated_line_idx > lb_line_idx + + def test_print_flattened_resources_with_additional_info( + self, display, capsys + ): + """Test resource with additional_info prints on separate line""" + resource = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource.additional_info = "DNS: example.elb.amazonaws.com" + resource_group = ManagedResourceGroup(resources=[resource]) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + lines = output.splitlines() + + # Find the line with LoadBalancer + lb_line_idx = next( + i for i, line in enumerate(lines) if "LoadBalancer" in line + ) + info_line_idx = next( + i + for i, line in enumerate(lines) + if "Info: DNS: example.elb.amazonaws.com" in line + ) + + # Info should be on a different line than LoadBalancer + assert lb_line_idx != info_line_idx + # Info should come after the LoadBalancer line + assert info_line_idx > lb_line_idx + + def test_print_flattened_resources_complete_multi_line( + self, display, capsys + ): + """Test resource with all fields prints on multiple lines""" + resource = ManagedResource( + "LoadBalancer", + "lb-123", + "CREATING", + time.time(), + "Provisioning load balancer", + ) + resource.additional_info = "Type: application" + resource_group = ManagedResourceGroup(resources=[resource]) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + lines = output.splitlines() + + # Find all the relevant lines + lb_line_idx = next( + i + for i, line in enumerate(lines) + if "LoadBalancer" in line and "lb-123" in line + ) + reason_line_idx = next( + i + for i, line in enumerate(lines) + if "Reason: Provisioning load balancer" in line + ) + updated_line_idx = next( + i for i, line in enumerate(lines) if "Last Updated At:" in line + ) + info_line_idx = next( + i + for i, line in enumerate(lines) + if "Info: Type: application" in line + ) + + # All detail lines should be different from the main line + assert reason_line_idx != lb_line_idx + assert updated_line_idx != lb_line_idx + assert info_line_idx != lb_line_idx + + # All detail lines should come after the main line + assert reason_line_idx > lb_line_idx + assert updated_line_idx > lb_line_idx + assert info_line_idx > lb_line_idx + + def test_diff_detects_status_change(self, display, capsys): + """Test diff detects when status changes""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "CREATING", None, None + ) + resource_group1 = ManagedResourceGroup(resources=[resource1]) + + # First poll + display.show_monitoring_data(resource_group1, None) + capsys.readouterr() # Clear initial output to test status change detection + + # Second poll - same resource but status changed + resource2 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource_group2 = ManagedResourceGroup(resources=[resource2]) + + display.show_monitoring_data(resource_group2, None) + + output = capsys.readouterr().out + assert "LoadBalancer" in output + assert "ACTIVE" in output + + def test_diff_detects_reason_change(self, display, capsys): + """Test diff detects when reason changes""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "CREATING", None, "Creating resources" + ) + resource_group1 = ManagedResourceGroup(resources=[resource1]) + + # First poll + display.show_monitoring_data(resource_group1, None) + capsys.readouterr() # Clear initial output to test reason change detection + + # Second poll - same resource but reason changed + resource2 = ManagedResource( + "LoadBalancer", "lb-123", "CREATING", None, "Waiting for DNS" + ) + resource_group2 = ManagedResourceGroup(resources=[resource2]) + + display.show_monitoring_data(resource_group2, None) + + output = capsys.readouterr().out + assert "Waiting for DNS" in output + + def test_diff_detects_additional_info_change(self, display, capsys): + """Test diff detects when additional_info changes""" + resource1 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource1.additional_info = "DNS: old.example.com" + resource_group1 = ManagedResourceGroup(resources=[resource1]) + + # First poll + display.show_monitoring_data(resource_group1, None) + capsys.readouterr() # Clear initial output to test additional_info change detection + + # Second poll - same resource but additional_info changed + resource2 = ManagedResource( + "LoadBalancer", "lb-123", "ACTIVE", None, None + ) + resource2.additional_info = "DNS: new.example.com" + resource_group2 = ManagedResourceGroup(resources=[resource2]) + + display.show_monitoring_data(resource_group2, None) + + output = capsys.readouterr().out + assert "new.example.com" in output + + def test_resource_with_none_type_shows_identifier(self, display, capsys): + """Test resources with resource_type=None show identifier without type""" + resource = ManagedResource( + None, + "mystery-resource-123", + "FAILED", + reason="Something went wrong", + ) + resource_group = ManagedResourceGroup( + resource_type="TestGroup", resources=[resource] + ) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + assert "mystery-resource-123" in output + assert "FAILED" in output + + def test_resource_with_none_type_and_none_identifier( + self, display, capsys + ): + """Test resources with both resource_type=None and identifier=None show 'Unknown Resource' placeholder""" + resource = ManagedResource(None, None, "ACTIVE") + resource_group = ManagedResourceGroup( + resource_type="TestGroup", resources=[resource] + ) + + display.show_monitoring_data(resource_group, None) + + output = capsys.readouterr().out + assert "Unknown Resource" in output + assert "ACTIVE" in output diff --git a/tests/unit/customizations/ecs/test_monitorexpressgatewayservice.py b/tests/unit/customizations/ecs/test_monitorexpressgatewayservice.py index 37d9fa0a63e1..f6900905ad0e 100644 --- a/tests/unit/customizations/ecs/test_monitorexpressgatewayservice.py +++ b/tests/unit/customizations/ecs/test_monitorexpressgatewayservice.py @@ -19,12 +19,6 @@ ECSMonitorExpressGatewayService, ) -# Suppress thread exception warnings - tests use KeyboardInterrupt to exit monitoring loops, -# which causes expected exceptions in background threads -pytestmark = pytest.mark.filterwarnings( - "ignore::pytest.PytestUnhandledThreadExceptionWarning" -) - class TestECSMonitorExpressGatewayServiceCommand: """Test the command class through public interface""" @@ -83,315 +77,153 @@ def test_non_monitoring_error_bubbles_up(self, mock_isatty): @patch('sys.stdout.isatty') def test_interactive_mode_requires_tty(self, mock_isatty, capsys): - """Test command fails when not in TTY""" + """Test interactive mode fails without TTY""" # Not in TTY mock_isatty.return_value = False mock_session = Mock() + mock_client = Mock() + mock_session.create_client.return_value = mock_client + command = ECSMonitorExpressGatewayService(mock_session) parsed_args = Mock( - service_arn="test-arn", resource_view="RESOURCE", timeout=30 + service_arn="test-arn", + resource_view="RESOURCE", + timeout=30, + mode='INTERACTIVE', ) parsed_globals = Mock( - region="us-west-2", endpoint_url=None, verify_ssl=True + region="us-west-2", + endpoint_url=None, + verify_ssl=True, + color='auto', ) result = command._run_main(parsed_args, parsed_globals) captured = capsys.readouterr() + assert "Interactive mode requires a TTY" in captured.err + assert "aws: [ERROR]:" in captured.err assert result == 1 - assert "This command requires a TTY" in captured.err + @patch('sys.stdout.isatty') + def test_text_only_mode_without_tty(self, mock_isatty, capsys): + """Test command uses text-only mode when not in TTY""" + # Not in TTY + mock_isatty.return_value = False -class TestECSExpressGatewayServiceWatcher: - """Test the watcher class through public interface""" + mock_session = Mock() + mock_client = Mock() + mock_session.create_client.return_value = mock_client - @patch('sys.stdout.isatty') - def test_is_monitoring_available_with_tty(self, mock_isatty): - """Test is_monitoring_available returns True when TTY is available""" - mock_isatty.return_value = True - assert ( - ECSExpressGatewayServiceWatcher.is_monitoring_available() is True - ) + mock_watcher_class = Mock() + mock_watcher = Mock() + mock_watcher_class.return_value = mock_watcher - @patch('sys.stdout.isatty') - def test_is_monitoring_available_without_tty(self, mock_isatty): - """Test is_monitoring_available returns False when TTY is not available""" - mock_isatty.return_value = False - assert ( - ECSExpressGatewayServiceWatcher.is_monitoring_available() is False + command = ECSMonitorExpressGatewayService( + mock_session, watcher_class=mock_watcher_class ) - def setup_method(self): - self.app_session = create_app_session(output=DummyOutput()) - self.app_session.__enter__() - self.mock_client = Mock() - self.service_arn = ( - "arn:aws:ecs:us-west-2:123456789012:service/my-cluster/my-service" + parsed_args = Mock( + service_arn="test-arn", + resource_view="RESOURCE", + timeout=30, + mode=None, ) + parsed_globals = Mock( + region="us-west-2", endpoint_url=None, verify_ssl=True + ) + + command._run_main(parsed_args, parsed_globals) + + +@pytest.fixture +def watcher_app_session(): + """Fixture that creates and manages an app session for watcher tests.""" + with create_app_session(output=DummyOutput()) as session: + yield session - def teardown_method(self): - if hasattr(self, 'app_session'): - self.app_session.__exit__(None, None, None) - def _create_watcher_with_mocks(self, resource_view="RESOURCE", timeout=1): - """Helper to create watcher with mocked display""" - mock_display = Mock() - mock_display.has_terminal.return_value = True - mock_display._check_keypress.return_value = None - mock_display._restore_terminal.return_value = None - mock_display.display.return_value = None +@pytest.fixture +def service_arn(): + """Fixture that provides a test service ARN.""" + return "arn:aws:ecs:us-west-2:123456789012:service/my-cluster/my-service" + + +class TestECSExpressGatewayServiceWatcher: + """Test the watcher class through public interface""" + + def test_init_creates_collector_with_correct_parameters(self): + """Test watcher creates collector with correct client, service_arn, resource_view, use_color""" + mock_client = Mock() + service_arn = "arn:aws:ecs:us-west-2:123456789012:service/test-service" watcher = ECSExpressGatewayServiceWatcher( - self.mock_client, - self.service_arn, - resource_view, - timeout_minutes=timeout, - display=mock_display, + mock_client, + service_arn, + resource_view="DEPLOYMENT", + display_mode="TEXT-ONLY", + use_color=False, ) - # Mock exec to call the monitoring method once and print output - original_monitor = watcher._monitor_express_gateway_service - - def mock_exec(): - try: - output = original_monitor("⠋", self.service_arn, resource_view) - print(output) - print("Monitoring Complete!") - except Exception as e: - # Re-raise expected exceptions - if isinstance(e, (ClientError, MonitoringError)): - raise - # For other exceptions, just print and complete - print("Monitoring Complete!") - - watcher.exec = mock_exec - return watcher - - @patch('time.sleep') - def test_exec_successful_all_mode_monitoring(self, mock_sleep, capsys): - """Test successful monitoring in RESOURCE mode with resource parsing""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [ - { - "arn": "rev-arn", - "ecsManagedResources": { - "ingressPaths": [ - { - "endpoint": "https://api.example.com", - "loadBalancer": { - "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-lb/1234567890abcdef", - "status": "ACTIVE", - }, - "targetGroups": [ - { - "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:targetgroup/my-tg/1234567890abcdef", - "status": "HEALTHY", - } - ], - } - ], - "serviceSecurityGroups": [ - { - "arn": "arn:aws:ec2:us-west-2:123456789012:security-group/sg-1234567890abcdef0", - "status": "ACTIVE", - } - ], - "logGroups": [ - { - "arn": "arn:aws:logs:us-west-2:123456789012:log-group:/aws/ecs/my-service", - "status": "ACTIVE", - } - ], - }, - } - ] - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Running"}]}] - } + # Verify collector was created with correct parameters + assert watcher.collector is not None + assert watcher.collector._client == mock_client + assert watcher.collector.service_arn == service_arn + assert watcher.collector.mode == "DEPLOYMENT" + assert watcher.collector.use_color is False - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Verify parsed resources appear in output - assert "Cluster" in output_text - assert "Service" in output_text - assert "IngressPath" in output_text - assert "LoadBalancer" in output_text - assert "TargetGroup" in output_text - assert "SecurityGroup" in output_text - assert "LogGroup" in output_text - - # Specific identifiers - assert "https://api.example.com" in output_text # IngressPath endpoint - assert "my-lb" in output_text # LoadBalancer identifier - assert "my-tg" in output_text # TargetGroup identifier - assert ( - "sg-1234567890abcdef0" in output_text - ) # SecurityGroup identifier - assert "/aws/ecs/my-service" in output_text # LogGroup identifier - - # Status values - assert "ACTIVE" in output_text # LoadBalancer and SecurityGroup status - assert "HEALTHY" in output_text # TargetGroup status + def test_init_uses_injected_collector(self): + """Test watcher uses injected collector instead of creating one""" + mock_collector = Mock() + mock_display_strategy = Mock() - @patch('time.sleep') - def test_exec_successful_delta_mode_with_deployment( - self, mock_sleep, capsys - ): - """Test DEPLOYMENT mode executes successfully""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [], - } - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Service running"}]}] - } + watcher = ECSExpressGatewayServiceWatcher( + Mock(), + "arn:aws:ecs:us-west-2:123456789012:service/test-service", + "RESOURCE", + "INTERACTIVE", + collector=mock_collector, + display_strategy=mock_display_strategy, + ) - watcher.exec() - captured = capsys.readouterr() + assert watcher.collector == mock_collector - # Verify DEPLOYMENT mode executes successfully - assert captured.out - - @patch('time.sleep') - def test_exec_combined_view_multiple_revisions(self, mock_sleep, capsys): - """Test RESOURCE mode combines multiple service revisions correctly""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - # Multiple active configurations (combined view) - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [ - {"serviceRevisionArn": "rev-1"}, - {"serviceRevisionArn": "rev-2"}, - ], - } - } - - # Mock multiple revisions with different resources - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [ - { - "arn": "rev-1", - "ecsManagedResources": { - "ingressPaths": [ - { - "endpoint": "https://api.example.com", - "loadBalancer": { - "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/api-lb/1234", - "status": "ACTIVE", - }, - } - ], - "serviceSecurityGroups": [ - { - "arn": "arn:aws:ec2:us-west-2:123456789012:security-group/sg-api123", - "status": "ACTIVE", - } - ], - }, - }, - { - "arn": "rev-2", - "ecsManagedResources": { - "ingressPaths": [ - { - "endpoint": "https://web.example.com", - "loadBalancer": { - "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/web-lb/5678", - "status": "CREATING", - }, - } - ], - "logGroups": [ - { - "arn": "arn:aws:logs:us-west-2:123456789012:log-group:/aws/ecs/web-logs", - "status": "ACTIVE", - } - ], - }, - }, - ] - } - - self.mock_client.describe_services.return_value = { - "services": [ - {"events": [{"message": "Multiple revisions active"}]} - ] - } + def test_exec_calls_display_strategy_with_correct_parameters( + self, watcher_app_session + ): + """Test exec() calls display strategy with collector, start_time, and timeout""" + mock_collector = Mock() + mock_display_strategy = Mock() - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Verify combined view shows resources from both revisions - # Resource types from both revisions - assert "IngressPath" in output_text - assert "LoadBalancer" in output_text - assert "SecurityGroup" in output_text # From rev-1 - assert "LogGroup" in output_text # From rev-2 - - # Specific identifiers from both revisions - assert "https://api.example.com" in output_text # From rev-1 - assert "https://web.example.com" in output_text # From rev-2 - assert "api-lb" in output_text # From rev-1 - assert "web-lb" in output_text # From rev-2 - assert "sg-api123" in output_text # From rev-1 - assert "/aws/ecs/web-logs" in output_text # From rev-2 - - # Status values from both revisions - assert "ACTIVE" in output_text # From both revisions - assert "CREATING" in output_text # From rev-2 - - @patch('time.sleep') - def test_exec_keyboard_interrupt_handling(self, mock_sleep, capsys): - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [], - } - } + watcher = ECSExpressGatewayServiceWatcher( + Mock(), + "arn:aws:ecs:us-west-2:123456789012:service/test-service", + "RESOURCE", + "INTERACTIVE", + timeout_minutes=15, + display_strategy=mock_display_strategy, + collector=mock_collector, + ) watcher.exec() - captured = capsys.readouterr() - # Verify completion message is printed - assert "Monitoring Complete!" in captured.out + # Verify display strategy was called once + mock_display_strategy.execute_monitoring.assert_called_once() - @patch('time.sleep') - def test_exec_with_service_not_found_error(self, mock_sleep): - """Test exec() with service not found error bubbles up""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() + # Verify correct parameters were passed + call_args = mock_display_strategy.execute_monitoring.call_args + assert call_args.kwargs['collector'] == mock_collector + assert call_args.kwargs['start_time'] == watcher.start_time + assert call_args.kwargs['timeout_minutes'] == 15 - error = ClientError( + def test_exec_propagates_exceptions_from_display_strategy( + self, watcher_app_session + ): + """Test exec() propagates exceptions from display strategy""" + mock_display_strategy = Mock() + mock_display_strategy.execute_monitoring.side_effect = ClientError( error_response={ 'Error': { 'Code': 'ServiceNotFoundException', @@ -400,12 +232,19 @@ def test_exec_with_service_not_found_error(self, mock_sleep): }, operation_name='DescribeExpressGatewayService', ) - self.mock_client.describe_express_gateway_service.side_effect = error + + watcher = ECSExpressGatewayServiceWatcher( + Mock(), + "arn:aws:ecs:us-west-2:123456789012:service/test-service", + "RESOURCE", + "INTERACTIVE", + display_strategy=mock_display_strategy, + collector=Mock(), + ) with pytest.raises(ClientError) as exc_info: watcher.exec() - # Verify the specific error is raised assert ( exc_info.value.response['Error']['Code'] == 'ServiceNotFoundException' @@ -414,326 +253,6 @@ def test_exec_with_service_not_found_error(self, mock_sleep): exc_info.value.response['Error']['Message'] == 'Service not found' ) - @patch('time.sleep') - def test_exec_with_inactive_service_handled_gracefully( - self, mock_sleep, capsys - ): - """Test exec() handles inactive service gracefully""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.side_effect = ClientError( - error_response={ - 'Error': { - 'Code': 'InvalidParameterException', - 'Message': 'Cannot call DescribeServiceRevisions for a service that is INACTIVE', - } - }, - operation_name='DescribeExpressGatewayService', - ) - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Service is inactive"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - - # Verify inactive service is handled and appropriate message shown - assert "inactive" in captured.out.lower() - - @patch('time.sleep') - def test_exec_with_empty_resources(self, mock_sleep, capsys): - """Test parsing edge case: empty/null resources""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - # Empty ecsManagedResources - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [{"arn": "rev-arn", "ecsManagedResources": {}}] - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "No resources"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Should handle empty resources gracefully but still show basic structure - assert "Cluster" in output_text - assert "Service" in output_text - # Should NOT contain resource types since ecsManagedResources is empty - assert "IngressPath" not in output_text - assert "LoadBalancer" not in output_text - - @patch('time.sleep') - def test_exec_with_autoscaling_resources(self, mock_sleep, capsys): - """Test autoscaling resource parsing with scalableTarget and policies""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [ - { - "arn": "rev-arn", - "ecsManagedResources": { - "autoScaling": { - "scalableTarget": { - "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scalable-target/1234567890abcdef", - "status": "ACTIVE", - }, - "applicationAutoScalingPolicies": [ - { - "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scaling-policy/cpu-policy", - "status": "ACTIVE", - }, - { - "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scaling-policy/memory-policy", - "status": "ACTIVE", - }, - ], - } - }, - } - ] - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Autoscaling active"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - assert "AutoScaling" in output_text - assert "ScalableTarget" in output_text - assert "AutoScalingPolicy" in output_text - # ScalableTarget identifier - assert "1234567890abcdef" in output_text - # Policy identifiers - assert "cpu-policy" in output_text - assert "memory-policy" in output_text - - @patch('time.sleep') - def test_exec_with_malformed_resource_data(self, mock_sleep, capsys): - """Test parsing edge case: malformed resource data""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - # Malformed resources - missing required fields - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [ - { - "arn": "rev-arn", - "ecsManagedResources": { - "ingressPaths": [ - {"endpoint": "https://example.com"} - ], # Missing loadBalancer - "serviceSecurityGroups": [ - {"status": "ACTIVE"} - ], # Missing arn - }, - } - ] - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Malformed data"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Should handle malformed data gracefully and show what it can parse - assert "IngressPath" in output_text - assert "https://example.com" in output_text - # Should show SecurityGroup type even with missing arn - assert "SecurityGroup" in output_text - # Should NOT show LoadBalancer since it's missing from IngressPath - assert "LoadBalancer" not in output_text - - @patch('time.sleep') - def test_exec_eventually_consistent_missing_deployment( - self, mock_sleep, capsys - ): - """Test eventually consistent behavior: deployment missing after list""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [], - } - } - # List shows deployment exists - self.mock_client.list_service_deployments.return_value = { - "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] - } - # But describe fails (eventually consistent) - self.mock_client.describe_service_deployments.return_value = { - "serviceDeployments": [], - "failures": [{"arn": "deploy-arn", "reason": "MISSING"}], - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Eventually consistent"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Should handle eventually consistent missing deployment gracefully - # Should show waiting state when deployment is missing - assert "Trying to describe gateway service" in output_text - assert "Monitoring Complete" in output_text - - @patch('time.sleep') - def test_exec_eventually_consistent_missing_revision( - self, mock_sleep, capsys - ): - """Test eventually consistent behavior: service revision missing after deployment describe""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [], - } - } - self.mock_client.list_service_deployments.return_value = { - "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] - } - self.mock_client.describe_service_deployments.return_value = { - "serviceDeployments": [ - { - "serviceDeploymentArn": "deploy-arn", - "status": "IN_PROGRESS", - "targetServiceRevision": {"arn": "target-rev"}, - } - ] - } - # Service revision missing (eventually consistent) - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [], - "failures": [{"arn": "target-rev", "reason": "MISSING"}], - } - self.mock_client.describe_services.return_value = { - "services": [{"events": [{"message": "Revision missing"}]}] - } - - watcher.exec() - captured = capsys.readouterr() - output_text = captured.out - - # Should handle eventually consistent missing revision gracefully - # Should show waiting state when revision is missing - assert "Trying to describe gateway service" in output_text - assert "Monitoring Complete" in output_text - - @patch('time.sleep') - def test_exec_with_api_failures(self, mock_sleep): - """Test failure parsing: API returns failures""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - # API returns failures - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [], - "failures": [{"arn": "rev-arn", "reason": "ServiceNotFound"}], - } - - with pytest.raises(MonitoringError) as exc_info: - watcher.exec() - - # Should raise MonitoringError with failure details - error_message = str(exc_info.value) - assert "DescribeServiceRevisions" in error_message - assert "rev-arn" in error_message - assert "ServiceNotFound" in error_message - - @patch('time.sleep') - def test_exec_with_malformed_api_failures(self, mock_sleep): - """Test failure parsing: malformed failure responses""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - # Malformed failures - missing arn or reason - self.mock_client.describe_service_revisions.return_value = { - "serviceRevisions": [], - "failures": [{"reason": "ServiceNotFound"}], # Missing arn - } - - with pytest.raises(MonitoringError) as exc_info: - watcher.exec() - - # Should raise MonitoringError about invalid failure response - error_message = str(exc_info.value) - assert "Invalid failure response" in error_message - assert "missing arn or reason" in error_message - - @patch('time.sleep') - def test_exec_with_missing_response_fields(self, mock_sleep): - """Test response validation: missing required fields""" - watcher = self._create_watcher_with_mocks() - mock_sleep.side_effect = KeyboardInterrupt() - - self.mock_client.describe_express_gateway_service.return_value = { - "service": { - "serviceArn": self.service_arn, - "cluster": "my-cluster", - "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], - } - } - # Missing serviceRevisions field - self.mock_client.describe_service_revisions.return_value = {} - - with pytest.raises(MonitoringError) as exc_info: - watcher.exec() - - # Should raise MonitoringError about empty response - error_message = str(exc_info.value) - assert "DescribeServiceRevisions" in error_message - assert "empty" in error_message - class TestMonitoringError: """Test MonitoringError exception class""" @@ -748,14 +267,6 @@ def test_monitoring_error_creation(self): class TestColorSupport: """Test color support functionality""" - def setup_method(self): - self.app_session = create_app_session(output=DummyOutput()) - self.app_session.__enter__() - - def teardown_method(self): - if hasattr(self, 'app_session'): - self.app_session.__exit__(None, None, None) - def test_should_use_color_on(self): """Test _should_use_color returns True when color is 'on'""" command = ECSMonitorExpressGatewayService(Mock()) @@ -792,7 +303,7 @@ def test_should_use_color_auto_no_tty(self, mock_isatty): assert command._should_use_color(parsed_globals) is False - def test_watcher_accepts_use_color_parameter(self): + def test_watcher_accepts_use_color_parameter(self, watcher_app_session): """Test ECSExpressGatewayServiceWatcher accepts use_color parameter""" mock_client = Mock() @@ -801,15 +312,30 @@ def test_watcher_accepts_use_color_parameter(self): mock_client, "arn:aws:ecs:us-east-1:123456789012:service/test-service", "ALL", + "INTERACTIVE", use_color=True, ) - assert watcher.use_color is True + assert watcher.collector.use_color is True # Test with use_color=False watcher = ECSExpressGatewayServiceWatcher( mock_client, "arn:aws:ecs:us-east-1:123456789012:service/test-service", "ALL", + "INTERACTIVE", use_color=False, ) - assert watcher.use_color is False + assert watcher.collector.use_color is False + + def test_invalid_display_mode_raises_error(self): + """Test that invalid display mode raises ValueError""" + mock_client = Mock() + + with pytest.raises(ValueError) as exc_info: + ECSExpressGatewayServiceWatcher( + mock_client, + "arn:aws:ecs:us-east-1:123456789012:service/test-service", + "RESOURCE", + "INVALID_MODE", + ) + assert "Invalid display mode: INVALID_MODE" in str(exc_info.value) diff --git a/tests/unit/customizations/ecs/test_monitormutatinggatewayservice.py b/tests/unit/customizations/ecs/test_monitormutatinggatewayservice.py index 37afe0a78324..f65c5617eeb7 100644 --- a/tests/unit/customizations/ecs/test_monitormutatinggatewayservice.py +++ b/tests/unit/customizations/ecs/test_monitormutatinggatewayservice.py @@ -68,25 +68,34 @@ def test_call_with_explicit_value(self): assert namespace.monitor_resources == 'RESOURCE' +@pytest.fixture +def mock_watcher_class(): + """Fixture that provides a mock watcher class.""" + watcher_class = Mock() + watcher_class.is_monitoring_available.return_value = True + return watcher_class + + +@pytest.fixture +def handler(mock_watcher_class): + """Fixture that provides a MonitorMutatingGatewayService handler.""" + return MonitorMutatingGatewayService( + 'create-gateway-service', + 'DEPLOYMENT', + watcher_class=mock_watcher_class, + ) + + class TestMonitorMutatingGatewayService: """Test the event handler for monitoring gateway service mutations.""" - def setup_method(self): - self.mock_watcher_class = Mock() - self.mock_watcher_class.is_monitoring_available.return_value = True - self.handler = MonitorMutatingGatewayService( - 'create-gateway-service', - 'DEPLOYMENT', - watcher_class=self.mock_watcher_class, - ) - - def test_init(self): + def test_init(self, handler): """Test proper initialization of the handler.""" - assert self.handler.api == 'create-gateway-service' - assert self.handler.default_resource_view == 'DEPLOYMENT' - assert self.handler.api_pascal_case == 'CreateGatewayService' - assert self.handler.session is None - assert self.handler.parsed_globals is None + assert handler.api == 'create-gateway-service' + assert handler.default_resource_view == 'DEPLOYMENT' + assert handler.api_pascal_case == 'CreateGatewayService' + assert handler.session is None + assert handler.parsed_globals is None def test_pascal_case_conversion(self): """Test API name conversion to PascalCase.""" @@ -102,82 +111,83 @@ def test_pascal_case_conversion(self): handler = MonitorMutatingGatewayService(api_name, 'RESOURCE') assert handler.api_pascal_case == expected_pascal - def test_before_building_argument_table_parser(self): + def test_before_building_argument_table_parser(self, handler): """Test storing session for later use.""" session = Mock() - self.handler.before_building_argument_table_parser(session) + handler.before_building_argument_table_parser(session) - assert self.handler.session == session + assert handler.session == session - def test_building_argument_table(self): + def test_building_argument_table(self, handler): """Test adding monitoring argument to the command's argument table.""" argument_table = {} session = Mock() - self.handler.building_argument_table(argument_table, session) + handler.building_argument_table(argument_table, session) assert 'monitor-resources' in argument_table assert isinstance( argument_table['monitor-resources'], MonitoringResourcesArgument ) - def test_operation_args_parsed_with_flag(self): + def test_operation_args_parsed_with_flag(self, handler): """Test storing monitoring flag when enabled with default.""" parsed_args = Mock() parsed_args.monitor_resources = '__DEFAULT__' parsed_globals = Mock() - self.handler.operation_args_parsed(parsed_args, parsed_globals) + handler.operation_args_parsed(parsed_args, parsed_globals) - assert self.handler.effective_resource_view == 'DEPLOYMENT' + assert handler.effective_resource_view == 'DEPLOYMENT' - def test_operation_args_parsed_with_explicit_choice(self): + def test_operation_args_parsed_with_explicit_choice(self, handler): """Test storing monitoring flag with explicit choice.""" parsed_args = Mock() parsed_args.monitor_resources = 'RESOURCE' parsed_globals = Mock() - self.handler.operation_args_parsed(parsed_args, parsed_globals) + handler.operation_args_parsed(parsed_args, parsed_globals) - assert self.handler.effective_resource_view == 'RESOURCE' + assert handler.effective_resource_view == 'RESOURCE' - def test_operation_args_parsed_without_flag(self): + def test_operation_args_parsed_without_flag(self, handler): """Test storing monitoring flag when disabled.""" parsed_args = Mock() parsed_args.monitor_resources = None + parsed_args.monitor_mode = None parsed_globals = Mock() - self.handler.operation_args_parsed(parsed_args, parsed_globals) + handler.operation_args_parsed(parsed_args, parsed_globals) - assert self.handler.effective_resource_view is None + assert handler.effective_resource_view is None - def test_operation_args_parsed_missing_attribute(self): + def test_operation_args_parsed_missing_attribute(self, handler): """Test handling missing monitor_resources attribute.""" # Mock without monitor_resources attribute parsed_args = Mock(spec=[]) parsed_globals = Mock() - self.handler.operation_args_parsed(parsed_args, parsed_globals) + handler.operation_args_parsed(parsed_args, parsed_globals) - assert self.handler.effective_resource_view is None + assert handler.effective_resource_view is None - def test_after_call_monitoring_disabled(self): + def test_after_call_monitoring_disabled(self, handler): """Test that monitoring is skipped when flag is disabled.""" - self.handler.effective_resource_view = None + handler.effective_resource_view = None parsed = {} context = Mock() http_response = Mock() http_response.status_code = 200 # Should return early without doing anything - self.handler.after_call(parsed, context, http_response) + handler.after_call(parsed, context, http_response) # No assertions needed - just verify no exceptions - def test_after_call_http_error(self): + def test_after_call_http_error(self, handler): """Test that monitoring is skipped on HTTP errors.""" - self.handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_resource_view = 'DEPLOYMENT' parsed = { 'service': {'serviceArn': 'arn:aws:ecs:us-west-2:123:service/test'} } @@ -186,13 +196,13 @@ def test_after_call_http_error(self): http_response.status_code = 400 # Should return early without doing anything - self.handler.after_call(parsed, context, http_response) + handler.after_call(parsed, context, http_response) # No assertions needed - just verify no exceptions - def test_after_call_missing_service_arn(self): + def test_after_call_missing_service_arn(self, handler): """Test that monitoring is skipped when service ARN is missing.""" - self.handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_resource_view = 'DEPLOYMENT' # Missing serviceArn parsed = {'service': {}} context = Mock() @@ -200,15 +210,15 @@ def test_after_call_missing_service_arn(self): http_response.status_code = 200 # Should return early without doing anything - self.handler.after_call(parsed, context, http_response) + handler.after_call(parsed, context, http_response) # No assertions needed - just verify no exceptions - def test_after_call_missing_session(self, capsys): + def test_after_call_missing_session(self, handler, capsys): """Test handling when session is not available.""" - self.handler.effective_resource_view = 'DEPLOYMENT' - self.handler.session = None - self.handler.parsed_globals = None + handler.effective_resource_view = 'DEPLOYMENT' + handler.session = None + handler.parsed_globals = None parsed = { 'service': {'serviceArn': 'arn:aws:ecs:us-west-2:123:service/test'} @@ -217,7 +227,7 @@ def test_after_call_missing_session(self, capsys): http_response = Mock() http_response.status_code = 200 - self.handler.after_call(parsed, context, http_response) + handler.after_call(parsed, context, http_response) captured = capsys.readouterr() assert ( @@ -238,6 +248,7 @@ def test_after_call_successful_monitoring(self): ) handler.monitor_resources = '__DEFAULT__' handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'TEXT-ONLY' mock_session = Mock() mock_parsed_globals = Mock() @@ -277,17 +288,20 @@ def test_after_call_successful_monitoring(self): mock_client, service_arn, 'DEPLOYMENT', + 'TEXT-ONLY', use_color=False, ) mock_watcher.exec.assert_called_once() # Verify parsed response was cleared assert parsed == {} - def test_after_call_monitoring_not_available(self, capsys): - """Test that monitoring is skipped when not available (no TTY).""" + @patch('sys.stdout.isatty', return_value=False) + def test_after_call_interactive_mode_without_tty( + self, mock_isatty, capsys + ): + """Test that interactive mode is skipped when TTY is not available.""" # Setup handler state mock_watcher_class = Mock() - mock_watcher_class.is_monitoring_available.return_value = False handler = MonitorMutatingGatewayService( 'create-gateway-service', @@ -295,6 +309,7 @@ def test_after_call_monitoring_not_available(self, capsys): watcher_class=mock_watcher_class, ) handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'INTERACTIVE' mock_session = Mock() mock_parsed_globals = Mock() @@ -314,7 +329,6 @@ def test_after_call_monitoring_not_available(self, capsys): # Setup call parameters service_arn = 'arn:aws:ecs:us-west-2:123456789012:service/test-service' parsed = {'service': {'serviceArn': service_arn}} - original_parsed = dict(parsed) context = Mock() http_response = Mock() http_response.status_code = 200 @@ -322,17 +336,19 @@ def test_after_call_monitoring_not_available(self, capsys): # Execute handler.after_call(parsed, context, http_response) - # Verify parsed response was not cleared - assert parsed == original_parsed - - # Verify warning message was printed + # Verify error message was printed captured = capsys.readouterr() assert ( - "Monitoring is not available (requires TTY). Skipping monitoring.\n" + "aws: [ERROR]: Interactive mode requires a TTY (terminal)." in captured.err ) + assert "Use --monitor-mode TEXT-ONLY" in captured.err - def test_after_call_exception_handling(self, capsys): + # Verify watcher was not called + mock_watcher_class.assert_not_called() + + @patch('sys.stdout.isatty', return_value=True) + def test_after_call_exception_handling(self, mock_isatty, capsys): """Test exception handling in after_call method.""" # Setup handler state mock_watcher_class = Mock() @@ -346,6 +362,7 @@ def test_after_call_exception_handling(self, capsys): watcher_class=mock_watcher_class, ) handler.effective_resource_view = 'DEPLOYMENT' + handler.effective_mode = 'INTERACTIVE' mock_session = Mock() mock_parsed_globals = Mock() @@ -369,16 +386,13 @@ def test_after_call_exception_handling(self, capsys): http_response = Mock() http_response.status_code = 200 - # Execute - should not raise exception - handler.after_call(parsed, context, http_response) - - captured = capsys.readouterr() - assert "Encountered an error, terminating monitoring" in captured.err - assert "Test exception" in captured.err + # Execute - exception should propagate + with pytest.raises(Exception, match="Test exception"): + handler.after_call(parsed, context, http_response) - def test_events(self): + def test_events(self, handler): """Test that correct events are returned for CLI integration.""" - events = self.handler.events() + events = handler.events() expected_events = [ "before-building-argument-table-parser.ecs.create-gateway-service", diff --git a/tests/unit/customizations/ecs/test_prompt_toolkit_display.py b/tests/unit/customizations/ecs/test_prompt_toolkit_display.py index 85517dafebb7..3c47a5cd86eb 100644 --- a/tests/unit/customizations/ecs/test_prompt_toolkit_display.py +++ b/tests/unit/customizations/ecs/test_prompt_toolkit_display.py @@ -13,7 +13,7 @@ class TestPromptToolkitDisplay: @pytest.fixture def display(self): with create_app_session(output=DummyOutput()): - return Display() + yield Display() def test_init(self, display): """Test Display initialization.""" diff --git a/tests/unit/customizations/ecs/test_serviceviewcollector.py b/tests/unit/customizations/ecs/test_serviceviewcollector.py new file mode 100644 index 000000000000..ebab9fefab83 --- /dev/null +++ b/tests/unit/customizations/ecs/test_serviceviewcollector.py @@ -0,0 +1,772 @@ +# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You +# may not use this file except in compliance with the License. A copy of +# the License is located at +# +# http://aws.amazon.com/apache2.0/ + +from unittest.mock import Mock + +import pytest +from botocore.exceptions import ClientError + +from awscli.customizations.ecs.exceptions import MonitoringError +from awscli.customizations.ecs.serviceviewcollector import ( + ServiceViewCollector, +) + + +class TestServiceViewCollector: + """Test ServiceViewCollector business logic""" + + def setup_method(self): + self.mock_client = Mock() + self.service_arn = ( + "arn:aws:ecs:us-west-2:123456789012:service/my-cluster/my-service" + ) + + def test_get_current_view_resource_mode(self): + """Test get_current_view in RESOURCE mode parses resources""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "rev-arn", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://api.example.com", + "loadBalancer": { + "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-lb/1234567890abcdef", + "status": "ACTIVE", + }, + } + ], + }, + } + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Running"}]}] + } + + output = collector.get_current_view("⠋") + + assert "Cluster" in output + assert "Service" in output + assert "IngressPath" in output + assert "LoadBalancer" in output + assert "https://api.example.com" in output + assert "ACTIVE" in output + + def test_get_current_view_handles_inactive_service(self): + """Test get_current_view handles inactive service gracefully""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.side_effect = ClientError( + error_response={ + 'Error': { + 'Code': 'InvalidParameterException', + 'Message': 'Cannot call DescribeServiceRevisions for a service that is INACTIVE', + } + }, + operation_name='DescribeExpressGatewayService', + ) + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Service is inactive"}]}] + } + + output = collector.get_current_view("⠋") + + assert "inactive" in output.lower() + + def test_get_current_view_with_api_failures(self): + """Test get_current_view raises MonitoringError on API failures""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [], + "failures": [{"arn": "rev-arn", "reason": "ServiceNotFound"}], + } + + with pytest.raises(MonitoringError) as exc_info: + collector.get_current_view("⠋") + + error_message = str(exc_info.value) + assert "DescribeServiceRevisions" in error_message + assert "rev-arn" in error_message + assert "ServiceNotFound" in error_message + + def test_get_current_view_caches_results(self): + """Test get_current_view caches results within refresh interval""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [{"arn": "rev-arn", "ecsManagedResources": {}}] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Running"}]}] + } + + # First call + collector.get_current_view("⠋") + call_count_first = ( + self.mock_client.describe_express_gateway_service.call_count + ) + + # Second call within refresh interval (default 5000ms) + collector.get_current_view("⠙") + # Should use cached result, not call API again + call_count_second = ( + self.mock_client.describe_express_gateway_service.call_count + ) + assert call_count_first == call_count_second # Cached, no new API call + + def test_combined_view_multiple_revisions(self): + """Test RESOURCE mode combines multiple service revisions correctly""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + # Multiple active configurations (combined view) + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [ + {"serviceRevisionArn": "rev-1"}, + {"serviceRevisionArn": "rev-2"}, + ], + } + } + + # Mock multiple revisions with different resources + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "rev-1", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://api.example.com", + "loadBalancer": { + "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/api-lb/1234", + "status": "ACTIVE", + }, + } + ], + "serviceSecurityGroups": [ + { + "arn": "arn:aws:ec2:us-west-2:123456789012:security-group/sg-api123", + "status": "ACTIVE", + } + ], + }, + }, + { + "arn": "rev-2", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://web.example.com", + "loadBalancer": { + "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/web-lb/5678", + "status": "CREATING", + }, + } + ], + "logGroups": [ + { + "arn": "arn:aws:logs:us-west-2:123456789012:log-group:/aws/ecs/web-logs", + "status": "ACTIVE", + } + ], + }, + }, + ] + } + + self.mock_client.describe_services.return_value = { + "services": [ + {"events": [{"message": "Multiple revisions active"}]} + ] + } + + output = collector.get_current_view("⠋") + + # Verify combined view shows resources from both revisions + assert "IngressPath" in output + assert "LoadBalancer" in output + assert "SecurityGroup" in output # From rev-1 + assert "LogGroup" in output # From rev-2 + assert "https://api.example.com" in output # From rev-1 + assert "https://web.example.com" in output # From rev-2 + assert "api-lb" in output # From rev-1 + assert "web-lb" in output # From rev-2 + assert "sg-api123" in output # From rev-1 + assert "/aws/ecs/web-logs" in output # From rev-2 + assert "ACTIVE" in output # From both revisions + assert "CREATING" in output # From rev-2 + + def test_get_current_view_with_empty_resources(self): + """Test parsing edge case: empty/null resources""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + # Empty ecsManagedResources + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [{"arn": "rev-arn", "ecsManagedResources": {}}] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "No resources"}]}] + } + + output = collector.get_current_view("⠋") + + # Should handle empty resources gracefully but still show basic structure + assert "Cluster" in output + assert "Service" in output + # Should NOT contain resource types since ecsManagedResources is empty + assert "IngressPath" not in output + assert "LoadBalancer" not in output + + def test_get_current_view_with_autoscaling_resources(self): + """Test autoscaling resource parsing with scalableTarget and policies""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "rev-arn", + "ecsManagedResources": { + "autoScaling": { + "scalableTarget": { + "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scalable-target/1234567890abcdef", + "status": "ACTIVE", + }, + "applicationAutoScalingPolicies": [ + { + "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scaling-policy/cpu-policy", + "status": "ACTIVE", + }, + { + "arn": "arn:aws:application-autoscaling:us-west-2:123456789012:scaling-policy/memory-policy", + "status": "ACTIVE", + }, + ], + } + }, + } + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Autoscaling active"}]}] + } + + output = collector.get_current_view("⠋") + + assert "AutoScaling" in output + assert "ScalableTarget" in output + assert "AutoScalingPolicy" in output + assert "1234567890abcdef" in output + assert "cpu-policy" in output + assert "memory-policy" in output + + def test_get_current_view_with_malformed_resource_data(self): + """Test parsing edge case: malformed resource data""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + # Malformed resources - missing required fields + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "rev-arn", + "ecsManagedResources": { + "ingressPaths": [ + {"endpoint": "https://example.com"} + ], # Missing loadBalancer + "serviceSecurityGroups": [ + {"status": "ACTIVE"} + ], # Missing arn + }, + } + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Malformed data"}]}] + } + + output = collector.get_current_view("⠋") + + # Should handle malformed data gracefully and show what it can parse + assert "IngressPath" in output + assert "https://example.com" in output + # Should show SecurityGroup type even with missing arn + assert "SecurityGroup" in output + # Should NOT show LoadBalancer since it's missing from IngressPath + assert "LoadBalancer" not in output + + def test_eventually_consistent_missing_deployment(self): + """Test eventually consistent behavior: deployment missing after list""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + # List shows deployment exists + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] + } + # But describe fails (eventually consistent) + self.mock_client.describe_service_deployments.return_value = { + "serviceDeployments": [], + "failures": [{"arn": "deploy-arn", "reason": "MISSING"}], + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Eventually consistent"}]}] + } + + output = collector.get_current_view("⠋") + + # Should handle eventually consistent missing deployment gracefully + assert "Waiting for a deployment to start" in output + + def test_eventually_consistent_missing_revision(self): + """Test eventually consistent behavior: service revision missing""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] + } + self.mock_client.describe_service_deployments.return_value = { + "serviceDeployments": [ + { + "serviceDeploymentArn": "deploy-arn", + "status": "IN_PROGRESS", + "targetServiceRevision": {"arn": "target-rev"}, + } + ] + } + # Service revision missing (eventually consistent) + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [], + "failures": [{"arn": "target-rev", "reason": "MISSING"}], + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Revision missing"}]}] + } + + output = collector.get_current_view("⠋") + + # Should handle eventually consistent missing revision gracefully + assert "Trying to describe service revisions" in output + + def test_eventually_consistent_mixed_failures(self): + """Test eventually consistent behavior: filters MISSING but raises for other failures""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] + } + self.mock_client.describe_service_deployments.return_value = { + "serviceDeployments": [ + { + "serviceDeploymentArn": "deploy-arn", + "status": "IN_PROGRESS", + "targetServiceRevision": {"arn": "target-rev"}, + } + ] + } + # Mixed failures: MISSING (should be filtered) and ServiceNotFound (should raise) + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [], + "failures": [ + {"arn": "target-rev", "reason": "MISSING"}, + {"arn": "other-rev", "reason": "ServiceNotFound"}, + ], + } + + # Should raise error for non-MISSING failure + with pytest.raises(MonitoringError) as exc_info: + collector.get_current_view("⠋") + + error_message = str(exc_info.value) + # Should include non-MISSING failure + assert "other-rev" in error_message + assert "ServiceNotFound" in error_message + # Should NOT include MISSING failure + assert "target-rev" not in error_message + + def test_with_malformed_api_failures(self): + """Test failure parsing: malformed failure responses""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + # Malformed failures - missing arn or reason + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [], + "failures": [{"reason": "ServiceNotFound"}], # Missing arn + } + + with pytest.raises(MonitoringError) as exc_info: + collector.get_current_view("⠋") + + # Should raise MonitoringError about invalid failure response + error_message = str(exc_info.value) + assert "Invalid failure response" in error_message + assert "missing arn or reason" in error_message + + def test_with_missing_response_fields(self): + """Test response validation: missing required fields""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + # Missing serviceRevisions field + self.mock_client.describe_service_revisions.return_value = {} + + with pytest.raises(MonitoringError) as exc_info: + collector.get_current_view("⠋") + + # Should raise MonitoringError about missing field + error_message = str(exc_info.value) + assert "DescribeServiceRevisions" in error_message + assert ( + "response is" in error_message + ) # "response is missing" or "response is empty" + + def test_deployment_mode_diff_view(self): + """Test DEPLOYMENT mode shows diff of target vs source revisions""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] + } + self.mock_client.describe_service_deployments.return_value = { + "serviceDeployments": [ + { + "serviceDeploymentArn": "deploy-arn", + "status": "IN_PROGRESS", + "targetServiceRevision": {"arn": "target-rev"}, + "sourceServiceRevisions": [{"arn": "source-rev"}], + } + ] + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "target-rev", + "taskDefinition": "task-def-arn", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://new-api.example.com", + "loadBalancer": { + "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/new-lb/1234", + "status": "CREATING", + }, + } + ], + }, + }, + { + "arn": "source-rev", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://old-api.example.com", + "loadBalancer": { + "arn": "arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/old-lb/5678", + "status": "ACTIVE", + }, + } + ], + }, + }, + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Deployment in progress"}]}] + } + + output = collector.get_current_view("⠋") + + # Should show deployment diff + # Initially will show "Trying to describe service revisions" due to mismatch + # But implementation still shows Cluster/Service + assert "Trying to describe service revisions" in output + + def test_waiting_for_deployment_to_start(self): + """Test DEPLOYMENT mode when no deployment exists yet""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + # No deployments + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "No deployment"}]}] + } + + output = collector.get_current_view("⠋") + + assert "Waiting for a deployment to start" in output + + def test_deployment_missing_target_revision(self): + """Test DEPLOYMENT mode when deployment is missing target revision""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "DEPLOYMENT" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [], + } + } + self.mock_client.list_service_deployments.return_value = { + "serviceDeployments": [{"serviceDeploymentArn": "deploy-arn"}] + } + self.mock_client.describe_service_deployments.return_value = { + "serviceDeployments": [ + { + "serviceDeploymentArn": "deploy-arn", + "status": "IN_PROGRESS", + # Missing targetServiceRevision + } + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "Deployment starting"}]}] + } + + output = collector.get_current_view("⠋") + + assert "Waiting for a deployment to start" in output + + def test_missing_service_in_response(self): + """Test handling when service field is missing""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = {} + + output = collector.get_current_view("⠋") + + assert "Trying to describe gateway service" in output + + def test_service_missing_required_fields(self): + """Test handling when service is missing required fields""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + # Missing activeConfigurations + self.mock_client.describe_express_gateway_service.return_value = { + "service": {"serviceArn": self.service_arn} + } + + output = collector.get_current_view("⠋") + + assert "Trying to describe gateway service" in output + + def test_parse_all_resource_types(self): + """Test parsing all supported resource types""" + collector = ServiceViewCollector( + self.mock_client, self.service_arn, "RESOURCE" + ) + + self.mock_client.describe_express_gateway_service.return_value = { + "service": { + "serviceArn": self.service_arn, + "cluster": "my-cluster", + "activeConfigurations": [{"serviceRevisionArn": "rev-arn"}], + } + } + self.mock_client.describe_service_revisions.return_value = { + "serviceRevisions": [ + { + "arn": "rev-arn", + "ecsManagedResources": { + "ingressPaths": [ + { + "endpoint": "https://api.example.com", + "loadBalancer": { + "arn": "lb-arn", + "status": "ACTIVE", + }, + "loadBalancerSecurityGroups": [ + {"arn": "lb-sg-arn", "status": "ACTIVE"} + ], + "certificate": { + "arn": "cert-arn", + "status": "ACTIVE", + }, + "listener": { + "arn": "listener-arn", + "status": "ACTIVE", + }, + "rule": { + "arn": "rule-arn", + "status": "ACTIVE", + }, + "targetGroups": [ + {"arn": "tg-arn", "status": "ACTIVE"} + ], + } + ], + "autoScaling": { + "scalableTarget": { + "arn": "st-arn", + "status": "ACTIVE", + }, + "applicationAutoScalingPolicies": [ + {"arn": "policy-arn", "status": "ACTIVE"} + ], + }, + "metricAlarms": [ + {"arn": "alarm-arn", "status": "ACTIVE"} + ], + "serviceSecurityGroups": [ + {"arn": "sg-arn", "status": "ACTIVE"} + ], + "logGroups": [{"arn": "log-arn", "status": "ACTIVE"}], + }, + } + ] + } + self.mock_client.describe_services.return_value = { + "services": [{"events": [{"message": "All resources"}]}] + } + + output = collector.get_current_view("⠋") + + # Verify all resource types are parsed + assert "IngressPath" in output + assert "LoadBalancer" in output + assert "LoadBalancerSecurityGroup" in output + assert "Certificate" in output + assert "Listener" in output + assert "Rule" in output + assert "TargetGroup" in output + assert "AutoScalingConfiguration" in output + assert "ScalableTarget" in output + assert "ApplicationAutoScalingPolicy" in output + assert "MetricAlarms" in output + assert "ServiceSecurityGroups" in output + assert "LogGroups" in output diff --git a/tests/unit/customizations/emr/test_create_cluster_release_label.py b/tests/unit/customizations/emr/test_create_cluster_release_label.py index df45de1b04fd..9b39cf54ca01 100644 --- a/tests/unit/customizations/emr/test_create_cluster_release_label.py +++ b/tests/unit/customizations/emr/test_create_cluster_release_label.py @@ -1828,6 +1828,39 @@ def test_create_cluster_with_auto_termination_policy(self): } self.assert_params_for_cmd(cmd, result) + def test_create_cluster_with_monitoring_configuration(self): + cmd = ( + self.prefix + + '--release-label emr-5.34.0 ' + + '--monitoring-configuration ' + + 'CloudWatchLogConfiguration={Enabled=true,LogGroupName=MyLogGroup,' + + 'LogStreamNamePrefix=MyPrefix,EncryptionKeyArn=arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012,' + + 'LogTypes={STEP_LOGS=[STDOUT,STDERR],SPARK_DRIVER=[STDOUT],SPARK_EXECUTOR=[STDERR]}} ' + + '--instance-groups ' + + DEFAULT_INSTANCE_GROUPS_ARG + ) + result = { + 'Name': DEFAULT_CLUSTER_NAME, + 'Instances': DEFAULT_INSTANCES, + 'ReleaseLabel': 'emr-5.34.0', + 'VisibleToAllUsers': True, + 'Tags': [], + 'MonitoringConfiguration': { + 'CloudWatchLogConfiguration': { + 'Enabled': True, + 'LogGroupName': 'MyLogGroup', + 'LogStreamNamePrefix': 'MyPrefix', + 'EncryptionKeyArn': 'arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012', + 'LogTypes': { + 'STEP_LOGS': ['STDOUT', 'STDERR'], + 'SPARK_DRIVER': ['STDOUT'], + 'SPARK_EXECUTOR': ['STDERR'], + }, + }, + }, + } + self.assert_params_for_cmd(cmd, result) + def test_create_cluster_with_log_encryption_kms_key_id(self): test_log_uri = 's3://test/logs' test_log_encryption_kms_key_id = 'valid_kms_key' diff --git a/tests/unit/customizations/logs/test_startlivetail.py b/tests/unit/customizations/logs/test_startlivetail.py index be9aadad9a08..6ff407eb2055 100644 --- a/tests/unit/customizations/logs/test_startlivetail.py +++ b/tests/unit/customizations/logs/test_startlivetail.py @@ -17,6 +17,7 @@ from prompt_toolkit.buffer import Buffer from prompt_toolkit.key_binding import KeyPressEvent from prompt_toolkit.output import DummyOutput +from prompt_toolkit.input import create_pipe_input from awscli.compat import StringIO from awscli.customizations.logs.startlivetail import ( @@ -607,7 +608,8 @@ def setUp(self) -> None: self.log_events = [] self.session_metadata = LiveTailSessionMetadata() self.ui = InteractiveUI( - self.log_events, self.session_metadata, app_output=DummyOutput() + self.log_events, self.session_metadata, app_output=DummyOutput(), + app_input=create_pipe_input() ) def test_update_toolbar(self): diff --git a/tests/unit/customizations/test_waiters.py b/tests/unit/customizations/test_waiters.py index 32f9a0f5c615..33e00cc1d60b 100644 --- a/tests/unit/customizations/test_waiters.py +++ b/tests/unit/customizations/test_waiters.py @@ -194,7 +194,7 @@ def test_dynamodb_table_exists(self): self.parsed_response = {'Table': {'TableStatus': 'ACTIVE'}} self.assert_params_for_cmd(cmdline, result) - def test_elastictranscoder_jobs_complete(self): + def test_rds_jobs_complete(self): cmdline = 'rds wait db-instance-available' cmdline += ' --db-instance-identifier abc' result = {'DBInstanceIdentifier': 'abc'}