Skip to content

Commit 80b99f4

Browse files
Daily Sync with Botocore v1.42.49 on 2026/02/16 (#363)
1 parent 17797d8 commit 80b99f4

File tree

4 files changed

+293
-8
lines changed

4 files changed

+293
-8
lines changed

sample/sagemaker/2017-07-24/service-2.json

Lines changed: 155 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -8937,6 +8937,12 @@
89378937
"Disable"
89388938
]
89398939
},
8940+
"ClusterDnsName":{
8941+
"type":"string",
8942+
"max":275,
8943+
"min":16,
8944+
"pattern":"((fs|fc)i?-[0-9a-f]{8,}\\..{4,253})"
8945+
},
89408946
"ClusterEbsVolumeConfig":{
89418947
"type":"structure",
89428948
"members":{
@@ -9076,6 +9082,49 @@
90769082
},
90779083
"documentation":"<p>A summary of an event in a HyperPod cluster.</p>"
90789084
},
9085+
"ClusterFsxLustreConfig":{
9086+
"type":"structure",
9087+
"required":[
9088+
"DnsName",
9089+
"MountName"
9090+
],
9091+
"members":{
9092+
"DnsName":{
9093+
"shape":"ClusterDnsName",
9094+
"documentation":"<p>The DNS name of the Amazon FSx for Lustre file system.</p>"
9095+
},
9096+
"MountName":{
9097+
"shape":"ClusterMountName",
9098+
"documentation":"<p>The mount name of the Amazon FSx for Lustre file system.</p>"
9099+
},
9100+
"MountPath":{
9101+
"shape":"ClusterFsxMountPath",
9102+
"documentation":"<p>The local path where the Amazon FSx for Lustre file system is mounted on instances.</p>"
9103+
}
9104+
},
9105+
"documentation":"<p>Defines the configuration for attaching an Amazon FSx for Lustre file system to instances in a SageMaker HyperPod cluster instance group.</p>"
9106+
},
9107+
"ClusterFsxMountPath":{
9108+
"type":"string",
9109+
"max":1024,
9110+
"min":1,
9111+
"pattern":"/[a-zA-Z0-9._/-]+"
9112+
},
9113+
"ClusterFsxOpenZfsConfig":{
9114+
"type":"structure",
9115+
"required":["DnsName"],
9116+
"members":{
9117+
"DnsName":{
9118+
"shape":"ClusterDnsName",
9119+
"documentation":"<p>The DNS name of the Amazon FSx for OpenZFS file system.</p>"
9120+
},
9121+
"MountPath":{
9122+
"shape":"ClusterFsxMountPath",
9123+
"documentation":"<p>The local path where the Amazon FSx for OpenZFS file system is mounted on instances.</p>"
9124+
}
9125+
},
9126+
"documentation":"<p>Defines the configuration for attaching an Amazon FSx for OpenZFS file system to instances in a SageMaker HyperPod cluster instance group.</p>"
9127+
},
90799128
"ClusterInstanceCount":{
90809129
"type":"integer",
90819130
"box":true,
@@ -9173,7 +9222,11 @@
91739222
"shape":"SoftwareUpdateStatus",
91749223
"documentation":"<p>Status of the last software udpate request.</p> <p>Status transitions follow these possible sequences:</p> <ul> <li> <p>Pending -&gt; InProgress -&gt; Succeeded</p> </li> <li> <p>Pending -&gt; InProgress -&gt; RollbackInProgress -&gt; RollbackComplete</p> </li> <li> <p>Pending -&gt; InProgress -&gt; RollbackInProgress -&gt; Failed</p> </li> </ul>"
91759224
},
9176-
"ActiveSoftwareUpdateConfig":{"shape":"DeploymentConfiguration"}
9225+
"ActiveSoftwareUpdateConfig":{"shape":"DeploymentConfiguration"},
9226+
"SlurmConfig":{
9227+
"shape":"ClusterSlurmConfigDetails",
9228+
"documentation":"<p>The Slurm configuration for the instance group.</p>"
9229+
}
91779230
},
91789231
"documentation":"<p>Details of an instance group in a SageMaker HyperPod cluster.</p>"
91799232
},
@@ -9253,6 +9306,10 @@
92539306
"shape":"ClusterKubernetesConfig",
92549307
"documentation":"<p>Specifies the Kubernetes configuration for the instance group. You describe what you want the labels and taints to look like, and the cluster works to reconcile the actual state with the declared state for nodes in this instance group. </p>"
92559308
},
9309+
"SlurmConfig":{
9310+
"shape":"ClusterSlurmConfig",
9311+
"documentation":"<p>Specifies the Slurm configuration for the instance group.</p>"
9312+
},
92569313
"CapacityRequirements":{
92579314
"shape":"ClusterCapacityRequirements",
92589315
"documentation":"<p>Specifies the capacity requirements for the instance group.</p>"
@@ -9325,6 +9382,14 @@
93259382
"EbsVolumeConfig":{
93269383
"shape":"ClusterEbsVolumeConfig",
93279384
"documentation":"<p>Defines the configuration for attaching additional Amazon Elastic Block Store (EBS) volumes to the instances in the SageMaker HyperPod cluster instance group. The additional EBS volume is attached to each instance within the SageMaker HyperPod cluster instance group and mounted to <code>/opt/sagemaker</code>.</p>"
9385+
},
9386+
"FsxLustreConfig":{
9387+
"shape":"ClusterFsxLustreConfig",
9388+
"documentation":"<p>Defines the configuration for attaching an Amazon FSx for Lustre file system to the instances in the SageMaker HyperPod cluster instance group.</p>"
9389+
},
9390+
"FsxOpenZfsConfig":{
9391+
"shape":"ClusterFsxOpenZfsConfig",
9392+
"documentation":"<p>Defines the configuration for attaching an Amazon FSx for OpenZFS file system to the instances in the SageMaker HyperPod cluster instance group.</p>"
93289393
}
93299394
},
93309395
"documentation":"<p>Defines the configuration for attaching additional storage to the instances in the SageMaker HyperPod cluster instance group. To learn more, see <a href=\"https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-hyperpod-release-notes.html#sagemaker-hyperpod-release-notes-20240620\">SageMaker HyperPod release notes: June 20, 2024</a>.</p>",
@@ -9333,7 +9398,7 @@
93339398
"ClusterInstanceStorageConfigs":{
93349399
"type":"list",
93359400
"member":{"shape":"ClusterInstanceStorageConfig"},
9336-
"max":2,
9401+
"max":4,
93379402
"min":0
93389403
},
93399404
"ClusterInstanceType":{
@@ -9624,6 +9689,12 @@
96249689
},
96259690
"documentation":"<p>Metadata information about a HyperPod cluster showing information about the cluster level operations, such as creating, updating, and deleting.</p>"
96269691
},
9692+
"ClusterMountName":{
9693+
"type":"string",
9694+
"max":8,
9695+
"min":1,
9696+
"pattern":"([A-Za-z0-9_-]{1,8})"
9697+
},
96279698
"ClusterName":{
96289699
"type":"string",
96299700
"max":63,
@@ -9826,6 +9897,10 @@
98269897
"Eks":{
98279898
"shape":"ClusterOrchestratorEksConfig",
98289899
"documentation":"<p>The Amazon EKS cluster used as the orchestrator for the SageMaker HyperPod cluster.</p>"
9900+
},
9901+
"Slurm":{
9902+
"shape":"ClusterOrchestratorSlurmConfig",
9903+
"documentation":"<p>The Slurm orchestrator configuration for the SageMaker HyperPod cluster.</p>"
98299904
}
98309905
},
98319906
"documentation":"<p>The type of orchestrator used for the SageMaker HyperPod cluster.</p>"
@@ -9841,6 +9916,28 @@
98419916
},
98429917
"documentation":"<p>The configuration settings for the Amazon EKS cluster used as the orchestrator for the SageMaker HyperPod cluster.</p>"
98439918
},
9919+
"ClusterOrchestratorSlurmConfig":{
9920+
"type":"structure",
9921+
"members":{
9922+
"SlurmConfigStrategy":{
9923+
"shape":"ClusterSlurmConfigStrategy",
9924+
"documentation":"<p>The strategy for managing partitions for the Slurm configuration. Valid values are <code>Managed</code>, <code>Overwrite</code>, and <code>Merge</code>.</p>"
9925+
}
9926+
},
9927+
"documentation":"<p>The configuration settings for the Slurm orchestrator used with the SageMaker HyperPod cluster.</p>"
9928+
},
9929+
"ClusterPartitionName":{
9930+
"type":"string",
9931+
"max":1024,
9932+
"min":0,
9933+
"pattern":"[a-zA-Z0-9](-*[a-zA-Z0-9])*"
9934+
},
9935+
"ClusterPartitionNames":{
9936+
"type":"list",
9937+
"member":{"shape":"ClusterPartitionName"},
9938+
"max":1,
9939+
"min":0
9940+
},
98449941
"ClusterPrivateDnsHostname":{
98459942
"type":"string",
98469943
"pattern":"ip-((25[0-5]|(2[0-4]|1\\d|[1-9]|)\\d)-?\\b){4}\\..*"
@@ -10035,6 +10132,52 @@
1003510132
"type":"string",
1003610133
"pattern":"[a-z0-9]([-a-z0-9]*[a-z0-9]){0,39}?"
1003710134
},
10135+
"ClusterSlurmConfig":{
10136+
"type":"structure",
10137+
"required":["NodeType"],
10138+
"members":{
10139+
"NodeType":{
10140+
"shape":"ClusterSlurmNodeType",
10141+
"documentation":"<p>The type of Slurm node for the instance group. Valid values are <code>Controller</code>, <code>Worker</code>, and <code>Login</code>.</p>"
10142+
},
10143+
"PartitionNames":{
10144+
"shape":"ClusterPartitionNames",
10145+
"documentation":"<p>The list of Slurm partition names that the instance group belongs to.</p>"
10146+
}
10147+
},
10148+
"documentation":"<p>The Slurm configuration for an instance group in a SageMaker HyperPod cluster.</p>"
10149+
},
10150+
"ClusterSlurmConfigDetails":{
10151+
"type":"structure",
10152+
"required":["NodeType"],
10153+
"members":{
10154+
"NodeType":{
10155+
"shape":"ClusterSlurmNodeType",
10156+
"documentation":"<p>The type of Slurm node for the instance group. Valid values are <code>Controller</code>, <code>Worker</code>, and <code>Login</code>.</p>"
10157+
},
10158+
"PartitionNames":{
10159+
"shape":"ClusterPartitionNames",
10160+
"documentation":"<p>The list of Slurm partition names that the instance group belongs to.</p>"
10161+
}
10162+
},
10163+
"documentation":"<p>The Slurm configuration details for an instance group in a SageMaker HyperPod cluster.</p>"
10164+
},
10165+
"ClusterSlurmConfigStrategy":{
10166+
"type":"string",
10167+
"enum":[
10168+
"Overwrite",
10169+
"Managed",
10170+
"Merge"
10171+
]
10172+
},
10173+
"ClusterSlurmNodeType":{
10174+
"type":"string",
10175+
"enum":[
10176+
"Controller",
10177+
"Login",
10178+
"Compute"
10179+
]
10180+
},
1003810181
"ClusterSortBy":{
1003910182
"type":"string",
1004010183
"enum":[
@@ -11301,7 +11444,7 @@
1130111444
},
1130211445
"Orchestrator":{
1130311446
"shape":"ClusterOrchestrator",
11304-
"documentation":"<p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is <code>\"eks\"</code>, which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.</p>"
11447+
"documentation":"<p>The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, supported values are <code>\"Eks\"</code> and <code>\"Slurm\"</code>, which is to use an Amazon Elastic Kubernetes Service or Slurm cluster as the orchestrator.</p> <note> <p>If you specify the <code>Orchestrator</code> field, you must provide exactly one orchestrator configuration: either <code>Eks</code> or <code>Slurm</code>. Specifying both or providing an empty configuration returns a validation error.</p> </note>"
1130511448
},
1130611449
"NodeRecovery":{
1130711450
"shape":"ClusterNodeRecovery",
@@ -37478,7 +37621,13 @@
3747837621
"ml.r7i.16xlarge",
3747937622
"ml.r7i.24xlarge",
3748037623
"ml.r7i.48xlarge",
37481-
"ml.p5.4xlarge"
37624+
"ml.p5.4xlarge",
37625+
"ml.g7e.2xlarge",
37626+
"ml.g7e.4xlarge",
37627+
"ml.g7e.8xlarge",
37628+
"ml.g7e.12xlarge",
37629+
"ml.g7e.24xlarge",
37630+
"ml.g7e.48xlarge"
3748237631
]
3748337632
},
3748437633
"ProcessingJob":{
@@ -45528,7 +45677,8 @@
4552845677
"AutoScaling":{
4552945678
"shape":"ClusterAutoScalingConfig",
4553045679
"documentation":"<p>Updates the autoscaling configuration for the cluster. Use to enable or disable automatic node scaling.</p>"
45531-
}
45680+
},
45681+
"Orchestrator":{"shape":"ClusterOrchestrator"}
4553245682
}
4553345683
},
4553445684
"UpdateClusterResponse":{

src/sagemaker_core/main/code_injection/shape_dag.py

Lines changed: 50 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1718,6 +1718,21 @@
17181718
],
17191719
"type": "structure",
17201720
},
1721+
"ClusterFsxLustreConfig": {
1722+
"members": [
1723+
{"name": "DnsName", "shape": "ClusterDnsName", "type": "string"},
1724+
{"name": "MountName", "shape": "ClusterMountName", "type": "string"},
1725+
{"name": "MountPath", "shape": "ClusterFsxMountPath", "type": "string"},
1726+
],
1727+
"type": "structure",
1728+
},
1729+
"ClusterFsxOpenZfsConfig": {
1730+
"members": [
1731+
{"name": "DnsName", "shape": "ClusterDnsName", "type": "string"},
1732+
{"name": "MountPath", "shape": "ClusterFsxMountPath", "type": "string"},
1733+
],
1734+
"type": "structure",
1735+
},
17211736
"ClusterInstanceGroupDetails": {
17221737
"members": [
17231738
{"name": "CurrentCount", "shape": "ClusterNonNegativeInstanceCount", "type": "integer"},
@@ -1767,6 +1782,7 @@
17671782
"shape": "DeploymentConfiguration",
17681783
"type": "structure",
17691784
},
1785+
{"name": "SlurmConfig", "shape": "ClusterSlurmConfigDetails", "type": "structure"},
17701786
],
17711787
"type": "structure",
17721788
},
@@ -1799,6 +1815,7 @@
17991815
},
18001816
{"name": "ImageId", "shape": "ImageId", "type": "string"},
18011817
{"name": "KubernetesConfig", "shape": "ClusterKubernetesConfig", "type": "structure"},
1818+
{"name": "SlurmConfig", "shape": "ClusterSlurmConfig", "type": "structure"},
18021819
{
18031820
"name": "CapacityRequirements",
18041821
"shape": "ClusterCapacityRequirements",
@@ -1833,7 +1850,9 @@
18331850
},
18341851
"ClusterInstanceStorageConfig": {
18351852
"members": [
1836-
{"name": "EbsVolumeConfig", "shape": "ClusterEbsVolumeConfig", "type": "structure"}
1853+
{"name": "EbsVolumeConfig", "shape": "ClusterEbsVolumeConfig", "type": "structure"},
1854+
{"name": "FsxLustreConfig", "shape": "ClusterFsxLustreConfig", "type": "structure"},
1855+
{"name": "FsxOpenZfsConfig", "shape": "ClusterFsxOpenZfsConfig", "type": "structure"},
18371856
],
18381857
"type": "structure",
18391858
},
@@ -1970,13 +1989,27 @@
19701989
},
19711990
"ClusterOnDemandOptions": {"members": [], "type": "structure"},
19721991
"ClusterOrchestrator": {
1973-
"members": [{"name": "Eks", "shape": "ClusterOrchestratorEksConfig", "type": "structure"}],
1992+
"members": [
1993+
{"name": "Eks", "shape": "ClusterOrchestratorEksConfig", "type": "structure"},
1994+
{"name": "Slurm", "shape": "ClusterOrchestratorSlurmConfig", "type": "structure"},
1995+
],
19741996
"type": "structure",
19751997
},
19761998
"ClusterOrchestratorEksConfig": {
19771999
"members": [{"name": "ClusterArn", "shape": "EksClusterArn", "type": "string"}],
19782000
"type": "structure",
19792001
},
2002+
"ClusterOrchestratorSlurmConfig": {
2003+
"members": [
2004+
{"name": "SlurmConfigStrategy", "shape": "ClusterSlurmConfigStrategy", "type": "string"}
2005+
],
2006+
"type": "structure",
2007+
},
2008+
"ClusterPartitionNames": {
2009+
"member_shape": "ClusterPartitionName",
2010+
"member_type": "string",
2011+
"type": "list",
2012+
},
19802013
"ClusterRestrictedInstanceGroupDetails": {
19812014
"members": [
19822015
{"name": "CurrentCount", "shape": "ClusterNonNegativeInstanceCount", "type": "integer"},
@@ -2068,6 +2101,20 @@
20682101
"member_type": "structure",
20692102
"type": "list",
20702103
},
2104+
"ClusterSlurmConfig": {
2105+
"members": [
2106+
{"name": "NodeType", "shape": "ClusterSlurmNodeType", "type": "string"},
2107+
{"name": "PartitionNames", "shape": "ClusterPartitionNames", "type": "list"},
2108+
],
2109+
"type": "structure",
2110+
},
2111+
"ClusterSlurmConfigDetails": {
2112+
"members": [
2113+
{"name": "NodeType", "shape": "ClusterSlurmNodeType", "type": "string"},
2114+
{"name": "PartitionNames", "shape": "ClusterPartitionNames", "type": "list"},
2115+
],
2116+
"type": "structure",
2117+
},
20712118
"ClusterSpotOptions": {"members": [], "type": "structure"},
20722119
"ClusterSummaries": {
20732120
"member_shape": "ClusterSummary",
@@ -16574,6 +16621,7 @@
1657416621
},
1657516622
{"name": "ClusterRole", "shape": "RoleArn", "type": "string"},
1657616623
{"name": "AutoScaling", "shape": "ClusterAutoScalingConfig", "type": "structure"},
16624+
{"name": "Orchestrator", "shape": "ClusterOrchestrator", "type": "structure"},
1657716625
],
1657816626
"type": "structure",
1657916627
},

src/sagemaker_core/main/resources.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3314,7 +3314,7 @@ def create(
33143314
restricted_instance_groups: The specialized instance groups for training models like Amazon Nova to be created in the SageMaker HyperPod cluster.
33153315
vpc_config: Specifies the Amazon Virtual Private Cloud (VPC) that is associated with the Amazon SageMaker HyperPod cluster. You can control access to and from your resources by configuring your VPC. For more information, see Give SageMaker access to resources in your Amazon VPC. When your Amazon VPC and subnets support IPv6, network communications differ based on the cluster orchestration platform: Slurm-orchestrated clusters automatically configure nodes with dual IPv6 and IPv4 addresses, allowing immediate IPv6 network communications. In Amazon EKS-orchestrated clusters, nodes receive dual-stack addressing, but pods can only use IPv6 when the Amazon EKS cluster is explicitly IPv6-enabled. For information about deploying an IPv6 Amazon EKS cluster, see Amazon EKS IPv6 Cluster Deployment. Additional resources for IPv6 configuration: For information about adding IPv6 support to your VPC, see to IPv6 Support for VPC. For information about creating a new IPv6-compatible VPC, see Amazon VPC Creation Guide. To configure SageMaker HyperPod with a custom Amazon VPC, see Custom Amazon VPC Setup for SageMaker HyperPod.
33163316
tags: Custom tags for managing the SageMaker HyperPod cluster as an Amazon Web Services resource. You can add tags to your cluster in the same way you add them in other Amazon Web Services services that support tagging. To learn more about tagging Amazon Web Services resources in general, see Tagging Amazon Web Services Resources User Guide.
3317-
orchestrator: The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, the only supported value is "eks", which is to use an Amazon Elastic Kubernetes Service cluster as the orchestrator.
3317+
orchestrator: The type of orchestrator to use for the SageMaker HyperPod cluster. Currently, supported values are "Eks" and "Slurm", which is to use an Amazon Elastic Kubernetes Service or Slurm cluster as the orchestrator. If you specify the Orchestrator field, you must provide exactly one orchestrator configuration: either Eks or Slurm. Specifying both or providing an empty configuration returns a validation error.
33183318
node_recovery: The node recovery mode for the SageMaker HyperPod cluster. When set to Automatic, SageMaker HyperPod will automatically reboot or replace faulty nodes when issues are detected. When set to None, cluster administrators will need to manually manage any faulty cluster instances.
33193319
tiered_storage_config: The configuration for managed tier checkpointing on the HyperPod cluster. When enabled, this feature uses a multi-tier storage approach for storing model checkpoints, providing faster checkpoint operations and improved fault tolerance across cluster nodes.
33203320
node_provisioning_mode: The mode for provisioning nodes in the cluster. You can specify the following modes: Continuous: Scaling behavior that enables 1) concurrent operation execution within instance groups, 2) continuous retry mechanisms for failed operations, 3) enhanced customer visibility into cluster events through detailed event streams, 4) partial provisioning capabilities. Your clusters and instance groups remain InService while scaling. This mode is only supported for EKS orchestrated clusters.
@@ -3479,6 +3479,7 @@ def update(
34793479
node_provisioning_mode: Optional[str] = Unassigned(),
34803480
cluster_role: Optional[str] = Unassigned(),
34813481
auto_scaling: Optional[shapes.ClusterAutoScalingConfig] = Unassigned(),
3482+
orchestrator: Optional[shapes.ClusterOrchestrator] = Unassigned(),
34823483
) -> Optional["Cluster"]:
34833484
"""
34843485
Update a Cluster resource
@@ -3517,6 +3518,7 @@ def update(
35173518
"NodeProvisioningMode": node_provisioning_mode,
35183519
"ClusterRole": cluster_role,
35193520
"AutoScaling": auto_scaling,
3521+
"Orchestrator": orchestrator,
35203522
}
35213523
logger.debug(f"Input request: {operation_input_args}")
35223524
# serialize the input request

0 commit comments

Comments
 (0)