You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
// Estimator: type of resource estimator to be used in scale up.
@@ -840,17 +840,17 @@ type Pool struct {
840
840
// MaxSize: defines the maximum size of the pool. Note that this field is only used when autoscaling is enabled on the pool.
841
841
MaxSizeuint32`json:"max_size"`
842
842
843
-
// ContainerRuntime: customization of the container runtime is available for each pool. Note that `docker` has been deprecated since version 1.20 and will be removed by version 1.24.
843
+
// ContainerRuntime: customization of the container runtime is available for each pool.
844
844
// Default value: unknown_runtime
845
845
ContainerRuntimeRuntime`json:"container_runtime"`
846
846
847
847
// Autohealing: defines whether the autohealing feature is enabled for the pool.
848
848
Autohealingbool`json:"autohealing"`
849
849
850
-
// Tags: tags associated with the pool.
850
+
// Tags: tags associated with the pool, see [managing tags](https://www.scaleway.com/en/docs/containers/kubernetes/api-cli/managing-tags).
851
851
Tags []string`json:"tags"`
852
852
853
-
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created.
853
+
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created, placement groups are limited to 20 instances.
// KubeletArgs: kubelet arguments to be used by this pool. Note that this feature is experimental.
@@ -862,7 +862,10 @@ type Pool struct {
862
862
// Zone: zone in which the pool's nodes will be spawned.
863
863
Zone scw.Zone`json:"zone"`
864
864
865
-
// RootVolumeType: defines the system volume disk type. Two different types of volume (`volume_type`) are provided: `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. `b_ssd` is a remote block storage which means your system is stored on a centralized and resilient cluster.
865
+
// RootVolumeType: * `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. This type is not available for all node types
866
+
// * `sbs-5k` is a remote block storage which means your system is stored on a centralized and resilient cluster with 5k IOPS limits
867
+
// * `sbs-15k` is a faster remote block storage which means your system is stored on a centralized and resilient cluster with 15k IOPS limits
868
+
// * `b_ssd` is the legacy remote block storage which means your system is stored on a centralized and resilient cluster. Consider using `sbs-5k` or `sbs-15k` instead.
// Estimator: type of resource estimator to be used in scale up.
@@ -983,7 +986,7 @@ type CreateClusterRequestPoolConfig struct {
983
986
// NodeType: node type is the type of Scaleway Instance wanted for the pool. Nodes with insufficient memory are not eligible (DEV1-S, PLAY2-PICO, STARDUST). 'external' is a special node type used to provision instances from other cloud providers in a Kosmos Cluster.
984
987
NodeTypestring`json:"node_type"`
985
988
986
-
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created.
989
+
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created, placement groups are limited to 20 instances.
// Autoscaling: defines whether the autoscaling feature is enabled for the pool.
@@ -998,14 +1001,14 @@ type CreateClusterRequestPoolConfig struct {
998
1001
// MaxSize: defines the maximum size of the pool. Note that this field is only used when autoscaling is enabled on the pool.
999
1002
MaxSize*uint32`json:"max_size"`
1000
1003
1001
-
// ContainerRuntime: customization of the container runtime is available for each pool. Note that `docker` has been deprecated since version 1.20 and will be removed by version 1.24.
1004
+
// ContainerRuntime: customization of the container runtime is available for each pool.
1002
1005
// Default value: unknown_runtime
1003
1006
ContainerRuntimeRuntime`json:"container_runtime"`
1004
1007
1005
1008
// Autohealing: defines whether the autohealing feature is enabled for the pool.
1006
1009
Autohealingbool`json:"autohealing"`
1007
1010
1008
-
// Tags: tags associated with the pool.
1011
+
// Tags: tags associated with the pool, see [managing tags](https://www.scaleway.com/en/docs/containers/kubernetes/api-cli/managing-tags).
1009
1012
Tags []string`json:"tags"`
1010
1013
1011
1014
// KubeletArgs: kubelet arguments to be used by this pool. Note that this feature is experimental.
@@ -1017,7 +1020,10 @@ type CreateClusterRequestPoolConfig struct {
1017
1020
// Zone: zone in which the pool's nodes will be spawned.
1018
1021
Zone scw.Zone`json:"zone"`
1019
1022
1020
-
// RootVolumeType: defines the system volume disk type. Two different types of volume (`volume_type`) are provided: `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. `b_ssd` is a remote block storage which means your system is stored on a centralized and resilient cluster.
1023
+
// RootVolumeType: * `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. This type is not available for all node types
1024
+
// * `sbs-5k` is a remote block storage which means your system is stored on a centralized and resilient cluster with 5k IOPS limits
1025
+
// * `sbs-15k` is a faster remote block storage which means your system is stored on a centralized and resilient cluster with 15k IOPS limits
1026
+
// * `b_ssd` is the legacy remote block storage which means your system is stored on a centralized and resilient cluster. Consider using `sbs-5k` or `sbs-15k` instead.
// Estimator: type of resource estimator to be used in scale up.
@@ -1356,7 +1362,7 @@ type CreateClusterRequest struct {
1356
1362
// Precisely one of ProjectID, OrganizationID must be set.
1357
1363
ProjectID*string`json:"project_id,omitempty"`
1358
1364
1359
-
// Type: type of the cluster (possible values are kapsule, multicloud, kapsule-dedicated-8, kapsule-dedicated-16).
1365
+
// Type: type of the cluster. See [list available cluster types](#list-available-cluster-types-for-a-cluster) for a list of valid types.
1360
1366
Typestring`json:"type"`
1361
1367
1362
1368
// Name: cluster name.
@@ -1422,7 +1428,7 @@ type CreatePoolRequest struct {
1422
1428
// NodeType: node type is the type of Scaleway Instance wanted for the pool. Nodes with insufficient memory are not eligible (DEV1-S, PLAY2-PICO, STARDUST). 'external' is a special node type used to provision instances from other cloud providers in a Kosmos Cluster.
1423
1429
NodeTypestring`json:"node_type"`
1424
1430
1425
-
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created.
1431
+
// PlacementGroupID: placement group ID in which all the nodes of the pool will be created, placement groups are limited to 20 instances.
// Autoscaling: defines whether the autoscaling feature is enabled for the pool.
@@ -1437,14 +1443,14 @@ type CreatePoolRequest struct {
1437
1443
// MaxSize: defines the maximum size of the pool. Note that this field is only used when autoscaling is enabled on the pool.
1438
1444
MaxSize*uint32`json:"max_size,omitempty"`
1439
1445
1440
-
// ContainerRuntime: customization of the container runtime is available for each pool. Note that `docker` has been deprecated since version 1.20 and will be removed by version 1.24.
1446
+
// ContainerRuntime: customization of the container runtime is available for each pool.
1441
1447
// Default value: unknown_runtime
1442
1448
ContainerRuntimeRuntime`json:"container_runtime"`
1443
1449
1444
1450
// Autohealing: defines whether the autohealing feature is enabled for the pool.
1445
1451
Autohealingbool`json:"autohealing"`
1446
1452
1447
-
// Tags: tags associated with the pool.
1453
+
// Tags: tags associated with the pool, see [managing tags](https://www.scaleway.com/en/docs/containers/kubernetes/api-cli/managing-tags).
1448
1454
Tags []string`json:"tags"`
1449
1455
1450
1456
// KubeletArgs: kubelet arguments to be used by this pool. Note that this feature is experimental.
@@ -1456,7 +1462,10 @@ type CreatePoolRequest struct {
1456
1462
// Zone: zone in which the pool's nodes will be spawned.
1457
1463
Zone scw.Zone`json:"zone"`
1458
1464
1459
-
// RootVolumeType: defines the system volume disk type. Two different types of volume (`volume_type`) are provided: `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. `b_ssd` is a remote block storage which means your system is stored on a centralized and resilient cluster.
1465
+
// RootVolumeType: * `l_ssd` is a local block storage which means your system is stored locally on your node's hypervisor. This type is not available for all node types
1466
+
// * `sbs-5k` is a remote block storage which means your system is stored on a centralized and resilient cluster with 5k IOPS limits
1467
+
// * `sbs-15k` is a faster remote block storage which means your system is stored on a centralized and resilient cluster with 15k IOPS limits
1468
+
// * `b_ssd` is the legacy remote block storage which means your system is stored on a centralized and resilient cluster. Consider using `sbs-5k` or `sbs-15k` instead.
// DeleteCluster: Delete a specific Kubernetes cluster and all its associated pools and nodes. Note that this method will not delete any Load Balancer or Block Volume that are associated with the cluster.
2296
+
// DeleteCluster: Delete a specific Kubernetes cluster and all its associated pools and nodes, and possibly its associated Load Balancers or Block Volumes.
// SetClusterType: Change the type of a specific Kubernetes cluster. To see the possible values you can enter for the `type` field, [list available cluster types](#path-clusters-list-available-cluster-types-for-a-cluster).
2367
+
// SetClusterType: Change the type of a specific Kubernetes cluster. To see the possible values you can enter for the `type` field, [list available cluster types](#list-available-cluster-types-for-a-cluster).
// ResetClusterAdminToken: Reset the admin token for a specific Kubernetes cluster. This will revoke the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to download kubeconfig again to keep interacting with the cluster.
2501
+
// ResetClusterAdminToken: Reset the admin token for a specific Kubernetes cluster. This will revoke the old admin token (which will not be usable afterwards) and create a new one. Note that you will need to download the kubeconfig again to keep interacting with the cluster.
// MigrateClusterToSBSCSI: Enable the latest CSI compatible with Scaleway Block Storage (SBS) and migrate all existing PersistentVolumes/VolumeSnapshotContents to SBS.
2536
+
// Make sure to have the necessary Quota before running this command.
// UpdatePool: Update the attributes of a specific pool, such as its desired size, autoscaling settings, and tags.
2871
+
// UpdatePool: Update the attributes of a specific pool, such as its desired size, autoscaling settings, and tags. To upgrade a pool, you will need to use the dedicated endpoint.
@@ -3099,7 +3110,7 @@ func (s *API) GetNode(req *GetNodeRequest, opts ...scw.RequestOption) (*Node, er
3099
3110
return&resp, nil
3100
3111
}
3101
3112
3102
-
// Deprecated: ReplaceNode: Replace a specific Node. The node will first be cordoned (scheduling will be disabled on it). The existing pods on the node will then be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster), disruption of your applications can be expected.
3113
+
// Deprecated: ReplaceNode: Replace a specific Node. The node will first be drained and pods will be rescheduled onto another node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster, or with specific constraints), disruption of your applications may occur.
// RebootNode: Reboot a specific Node. The node will first be cordoned (scheduling will be disabled on it). The existing pods on the node will then be drained and rescheduled onto another schedulable node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster), disruption of your applications can be expected.
3149
+
// RebootNode: Reboot a specific Node. The node will first be drained and pods will be rescheduled onto another node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster, or with specific constraints), disruption of your applications may occur.
// DeleteNode: Delete a specific Node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster), disruption of your applications can be expected.
3185
+
// DeleteNode: Delete a specific Node. The node will first be drained and pods will be rescheduled onto another node. Note that when there is not enough space to reschedule all the pods (such as in a one-node cluster, or with specific constraints), disruption of your applications may occur.
0 commit comments