Skip to content

Commit 0119570

Browse files
Updated API models and rebuilt service gems.
1 parent 6eaf989 commit 0119570

File tree

97 files changed

+18695
-293
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

97 files changed

+18695
-293
lines changed

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -689,6 +689,7 @@ RubyGems.org page under "LINKS" section.
689689
| IAM Roles Anywhere | Aws::RolesAnywhere | aws-sdk-rolesanywhere | 2018-05-10 |
690690
| Inspector Scan | Aws::InspectorScan | aws-sdk-inspectorscan | 2023-08-08 |
691691
| Inspector2 | Aws::Inspector2 | aws-sdk-inspector2 | 2020-06-08 |
692+
| MailManager | Aws::MailManager | aws-sdk-mailmanager | 2023-10-17 |
692693
| Managed Streaming for Kafka | Aws::Kafka | aws-sdk-kafka | 2018-11-14 |
693694
| Managed Streaming for Kafka Connect | Aws::KafkaConnect | aws-sdk-kafkaconnect | 2021-09-14 |
694695
| Migration Hub Strategy Recommendations | Aws::MigrationHubStrategyRecommendations | aws-sdk-migrationhubstrategyrecommendations | 2020-02-19 |

apis/cloudfront/2020-05-31/api-2.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
"endpointPrefix":"cloudfront",
66
"globalEndpoint":"cloudfront.amazonaws.com",
77
"protocol":"rest-xml",
8+
"protocols":["rest-xml"],
89
"serviceAbbreviation":"CloudFront",
910
"serviceFullName":"Amazon CloudFront",
1011
"serviceId":"CloudFront",

apis/glue/2017-03-31/api-2.json

Lines changed: 15 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
"endpointPrefix":"glue",
66
"jsonVersion":"1.1",
77
"protocol":"json",
8+
"protocols":["json"],
89
"serviceFullName":"AWS Glue",
910
"serviceId":"Glue",
1011
"signatureVersion":"v4",
@@ -5428,7 +5429,8 @@
54285429
"WorkerType":{"shape":"WorkerType"},
54295430
"CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"},
54305431
"ExecutionClass":{"shape":"ExecutionClass"},
5431-
"SourceControlDetails":{"shape":"SourceControlDetails"}
5432+
"SourceControlDetails":{"shape":"SourceControlDetails"},
5433+
"MaintenanceWindow":{"shape":"MaintenanceWindow"}
54325434
}
54335435
},
54345436
"CreateJobResponse":{
@@ -9015,7 +9017,8 @@
90159017
"GlueVersion":{"shape":"GlueVersionString"},
90169018
"CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"},
90179019
"ExecutionClass":{"shape":"ExecutionClass"},
9018-
"SourceControlDetails":{"shape":"SourceControlDetails"}
9020+
"SourceControlDetails":{"shape":"SourceControlDetails"},
9021+
"MaintenanceWindow":{"shape":"MaintenanceWindow"}
90199022
}
90209023
},
90219024
"JobBookmarkEntry":{
@@ -9098,7 +9101,8 @@
90989101
"NotificationProperty":{"shape":"NotificationProperty"},
90999102
"GlueVersion":{"shape":"GlueVersionString"},
91009103
"DPUSeconds":{"shape":"NullableDouble"},
9101-
"ExecutionClass":{"shape":"ExecutionClass"}
9104+
"ExecutionClass":{"shape":"ExecutionClass"},
9105+
"MaintenanceWindow":{"shape":"MaintenanceWindow"}
91029106
}
91039107
},
91049108
"JobRunList":{
@@ -9116,7 +9120,8 @@
91169120
"FAILED",
91179121
"TIMEOUT",
91189122
"ERROR",
9119-
"WAITING"
9123+
"WAITING",
9124+
"EXPIRED"
91209125
]
91219126
},
91229127
"JobUpdate":{
@@ -9145,7 +9150,8 @@
91459150
"GlueVersion":{"shape":"GlueVersionString"},
91469151
"CodeGenConfigurationNodes":{"shape":"CodeGenConfigurationNodes"},
91479152
"ExecutionClass":{"shape":"ExecutionClass"},
9148-
"SourceControlDetails":{"shape":"SourceControlDetails"}
9153+
"SourceControlDetails":{"shape":"SourceControlDetails"},
9154+
"MaintenanceWindow":{"shape":"MaintenanceWindow"}
91499155
}
91509156
},
91519157
"Join":{
@@ -9781,6 +9787,10 @@
97819787
"SSE-KMS"
97829788
]
97839789
},
9790+
"MaintenanceWindow":{
9791+
"type":"string",
9792+
"pattern":"^(Sun|Mon|Tue|Wed|Thu|Fri|Sat):([01]?[0-9]|2[0-3])$"
9793+
},
97849794
"ManyInputs":{
97859795
"type":"list",
97869796
"member":{"shape":"NodeId"},

apis/glue/2017-03-31/docs-2.json

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5418,6 +5418,15 @@
54185418
"MLUserDataEncryption$MlUserDataEncryptionMode": "<p>The encryption mode applied to user data. Valid values are:</p> <ul> <li> <p>DISABLED: encryption is disabled</p> </li> <li> <p>SSEKMS: use of server-side encryption with Key Management Service (SSE-KMS) for user data stored in Amazon S3.</p> </li> </ul>"
54195419
}
54205420
},
5421+
"MaintenanceWindow": {
5422+
"base": null,
5423+
"refs": {
5424+
"CreateJobRequest$MaintenanceWindow": "<p>This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.</p> <p>Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.</p>",
5425+
"Job$MaintenanceWindow": "<p>This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.</p> <p>Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.</p>",
5426+
"JobRun$MaintenanceWindow": "<p>This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.</p> <p>Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.</p>",
5427+
"JobUpdate$MaintenanceWindow": "<p>This field specifies a day of the week and hour for a maintenance window for streaming jobs. Glue periodically performs maintenance activities. During these maintenance windows, Glue will need to restart your streaming jobs.</p> <p>Glue will restart the job within 3 hours of the specified maintenance window. For instance, if you set up the maintenance window for Monday at 10:00AM GMT, your jobs will be restarted between 10:00AM GMT to 1:00PM GMT.</p>"
5428+
}
5429+
},
54215430
"ManyInputs": {
54225431
"base": null,
54235432
"refs": {
@@ -6287,7 +6296,7 @@
62876296
"GetMLTransformResponse$MaxCapacity": "<p>The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href=\"https://aws.amazon.com/glue/pricing/\">Glue pricing page</a>. </p> <p>When the <code>WorkerType</code> field is set to a value other than <code>Standard</code>, the <code>MaxCapacity</code> field is set automatically and becomes read-only.</p>",
62886297
"Job$MaxCapacity": "<p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href=\"https://aws.amazon.com/glue/pricing/\"> Glue pricing page</a>.</p> <p>For Glue version 2.0 or later jobs, you cannot specify a <code>Maximum capacity</code>. Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p> <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:</p> <ul> <li> <p>When you specify a Python shell job (<code>JobCommand.Name</code>=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>=\"glueetl\") or Apache Spark streaming ETL job (<code>JobCommand.Name</code>=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p> </li> </ul>",
62896298
"JobRun$MaxCapacity": "<p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href=\"https://aws.amazon.com/glue/pricing/\"> Glue pricing page</a>.</p> <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>. Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p> <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:</p> <ul> <li> <p>When you specify a Python shell job (<code>JobCommand.Name</code>=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>=\"glueetl\") or Apache Spark streaming ETL job (<code>JobCommand.Name</code>=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p> </li> </ul>",
6290-
"JobRun$DPUSeconds": "<p>This field populates only for Auto Scaling job runs, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for <code>G.1X</code>, 2 for <code>G.2X</code>, or 0.25 for <code>G.025X</code> workers). This value may be different than the <code>executionEngineRuntime</code> * <code>MaxCapacity</code> as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the <code>MaxCapacity</code>. Therefore, it is possible that the value of <code>DPUSeconds</code> is less than <code>executionEngineRuntime</code> * <code>MaxCapacity</code>.</p>",
6299+
"JobRun$DPUSeconds": "<p>This field can be set for either job runs with execution class <code>FLEX</code> or when Auto Scaling is enabled, and represents the total time each executor ran during the lifecycle of a job run in seconds, multiplied by a DPU factor (1 for <code>G.1X</code>, 2 for <code>G.2X</code>, or 0.25 for <code>G.025X</code> workers). This value may be different than the <code>executionEngineRuntime</code> * <code>MaxCapacity</code> as in the case of Auto Scaling jobs, as the number of executors running at a given time may be less than the <code>MaxCapacity</code>. Therefore, it is possible that the value of <code>DPUSeconds</code> is less than <code>executionEngineRuntime</code> * <code>MaxCapacity</code>.</p>",
62916300
"JobUpdate$MaxCapacity": "<p>For Glue version 1.0 or earlier jobs, using the standard worker type, the number of Glue data processing units (DPUs) that can be allocated when this job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href=\"https://aws.amazon.com/glue/pricing/\"> Glue pricing page</a>.</p> <p>For Glue version 2.0+ jobs, you cannot specify a <code>Maximum capacity</code>. Instead, you should specify a <code>Worker type</code> and the <code>Number of workers</code>.</p> <p>Do not set <code>MaxCapacity</code> if using <code>WorkerType</code> and <code>NumberOfWorkers</code>.</p> <p>The value that can be allocated for <code>MaxCapacity</code> depends on whether you are running a Python shell job, an Apache Spark ETL job, or an Apache Spark streaming ETL job:</p> <ul> <li> <p>When you specify a Python shell job (<code>JobCommand.Name</code>=\"pythonshell\"), you can allocate either 0.0625 or 1 DPU. The default is 0.0625 DPU.</p> </li> <li> <p>When you specify an Apache Spark ETL job (<code>JobCommand.Name</code>=\"glueetl\") or Apache Spark streaming ETL job (<code>JobCommand.Name</code>=\"gluestreaming\"), you can allocate from 2 to 100 DPUs. The default is 10 DPUs. This job type cannot have a fractional DPU allocation.</p> </li> </ul>",
62926301
"MLTransform$MaxCapacity": "<p>The number of Glue data processing units (DPUs) that are allocated to task runs for this transform. You can allocate from 2 to 100 DPUs; the default is 10. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB of memory. For more information, see the <a href=\"http://aws.amazon.com/glue/pricing/\">Glue pricing page</a>. </p> <p> <code>MaxCapacity</code> is a mutually exclusive option with <code>NumberOfWorkers</code> and <code>WorkerType</code>.</p> <ul> <li> <p>If either <code>NumberOfWorkers</code> or <code>WorkerType</code> is set, then <code>MaxCapacity</code> cannot be set.</p> </li> <li> <p>If <code>MaxCapacity</code> is set then neither <code>NumberOfWorkers</code> or <code>WorkerType</code> can be set.</p> </li> <li> <p>If <code>WorkerType</code> is set, then <code>NumberOfWorkers</code> is required (and vice versa).</p> </li> <li> <p> <code>MaxCapacity</code> and <code>NumberOfWorkers</code> must both be at least 1.</p> </li> </ul> <p>When the <code>WorkerType</code> field is set to a value other than <code>Standard</code>, the <code>MaxCapacity</code> field is set automatically and becomes read-only.</p>",
62936302
"Session$MaxCapacity": "<p>The number of Glue data processing units (DPUs) that can be allocated when the job runs. A DPU is a relative measure of processing power that consists of 4 vCPUs of compute capacity and 16 GB memory. </p>",
@@ -8400,7 +8409,7 @@
84008409
"GetDataQualityRulesetEvaluationRunResponse$Timeout": "<p>The timeout for a run in minutes. This is the maximum time that a run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. The default is 2,880 minutes (48 hours).</p>",
84018410
"GetMLTransformResponse$Timeout": "<p>The timeout for a task run for this transform in minutes. This is the maximum time that a task run for this transform can consume resources before it is terminated and enters <code>TIMEOUT</code> status. The default is 2,880 minutes (48 hours).</p>",
84028411
"Job$Timeout": "<p>The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. The default is 2,880 minutes (48 hours).</p>",
8403-
"JobRun$Timeout": "<p>The <code>JobRun</code> timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. This value overrides the timeout value set in the parent job.</p> <p>Streaming jobs do not have a timeout. The default for non-streaming jobs is 2,880 minutes (48 hours).</p>",
8412+
"JobRun$Timeout": "<p>The <code>JobRun</code> timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. This value overrides the timeout value set in the parent job.</p> <p>The maximum value for timeout for batch jobs is 7 days or 10080 minutes. The default is 2880 minutes (48 hours) for batch jobs.</p> <p>Any existing Glue jobs that have a greater timeout value are defaulted to 7 days. For instance you have specified a timeout of 20 days for a batch job, it will be stopped on the 7th day.</p> <p>Streaming jobs must have timeout values less than 7 days or 10080 minutes. When the value is left blank, the job will be restarted after 7 days based if you have not setup a maintenance window. If you have setup maintenance window, it will be restarted during the maintenance window after 7 days.</p>",
84048413
"JobUpdate$Timeout": "<p>The job timeout in minutes. This is the maximum time that a job run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. The default is 2,880 minutes (48 hours).</p>",
84058414
"MLTransform$Timeout": "<p>The timeout in minutes of the machine learning transform.</p>",
84068415
"StartDataQualityRuleRecommendationRunRequest$Timeout": "<p>The timeout for a run in minutes. This is the maximum time that a run can consume resources before it is terminated and enters <code>TIMEOUT</code> status. The default is 2,880 minutes (48 hours).</p>",

apis/lightsail/2016-11-28/api-2.json

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
"endpointPrefix":"lightsail",
66
"jsonVersion":"1.1",
77
"protocol":"json",
8+
"protocols":["json"],
89
"serviceFullName":"Amazon Lightsail",
910
"serviceId":"Lightsail",
1011
"signatureVersion":"v4",
@@ -6335,7 +6336,8 @@
63356336
"type":"string",
63366337
"enum":[
63376338
"dualstack",
6338-
"ipv4"
6339+
"ipv4",
6340+
"ipv6"
63396341
]
63406342
},
63416343
"Ipv6Address":{
@@ -6786,7 +6788,8 @@
67866788
"tcp",
67876789
"all",
67886790
"udp",
6789-
"icmp"
6791+
"icmp",
6792+
"icmpv6"
67906793
]
67916794
},
67926795
"NonEmptyString":{
@@ -7596,7 +7599,8 @@
75967599
"members":{
75977600
"resourceType":{"shape":"ResourceType"},
75987601
"resourceName":{"shape":"ResourceName"},
7599-
"ipAddressType":{"shape":"IpAddressType"}
7602+
"ipAddressType":{"shape":"IpAddressType"},
7603+
"acceptBundleUpdate":{"shape":"boolean"}
76007604
}
76017605
},
76027606
"SetIpAddressTypeResult":{

0 commit comments

Comments
 (0)