@@ -298,9 +298,12 @@ still an unsolved problem.
298
298
//
299
299
// The producer of these objects can decide which approach is more suitable.
300
300
//
301
- // They are consumed by the kube-scheduler if the CSIStorageCapacity beta feature gate
302
- // is enabled there and a CSI driver opts into capacity-aware scheduling with
303
- // CSIDriver.StorageCapacity.
301
+ // They are consumed by the kube-scheduler when a CSI driver opts into capacity-aware
302
+ // scheduling with CSIDriverSpec.StorageCapacity. The scheduler compares the
303
+ // MaximumVolumeSize against the requested size of pending volumes to filter
304
+ // out unsuitable nodes. If MaximumVolumeSize is unset, it falls back to
305
+ // a comparison against the less precise Capacity. If that is also unset,
306
+ // the scheduler assumes that capacity is insufficient and tries some other node.
304
307
type CSIStorageCapacity struct {
305
308
metav1.TypeMeta
306
309
// Standard object's metadata. The name has no particular meaning. It must be
@@ -339,7 +342,7 @@ type CSIStorageCapacity struct {
339
342
// The semantic is currently (CSI spec 1.2) defined as:
340
343
// The available capacity, in bytes, of the storage that can be used
341
344
// to provision volumes. If not set, that information is currently
342
- // unavailable and treated like zero capacity .
345
+ // unavailable.
343
346
//
344
347
// +optional
345
348
Capacity *resource.Quantity
@@ -354,6 +357,7 @@ type CSIStorageCapacity struct {
354
357
// create a volume with the same parameters as those in
355
358
// GetCapacityRequest. The corresponding value in the Kubernetes
356
359
// API is ResourceRequirements.Requests in a volume claim.
360
+ // Not all CSI drivers provide this information.
357
361
//
358
362
// +optional
359
363
MaximumVolumeSize *resource.Quantity
@@ -1151,7 +1155,7 @@ such a scenario, the scheduler has to make those decisions based on
1151
1155
outdated information, in particular when making one scheduling
1152
1156
decisions affects the next decision.
1153
1157
1154
- [ Scale testing] ( https://github.com/kubernetes-csi/csi-driver-host-path/blob/d6d9639077691986d676984827ea4dd7ee0c5cce /docs/storage-capacity-tracking.md )
1158
+ [ Scale testing] ( https://github.com/kubernetes-csi/csi-driver-host-path/blob/f053a7b0c4b719a5808fc47fdb3eba9cdade2067 /docs/storage-capacity-tracking.md )
1155
1159
showed that this can occur for a fake workload that generates
1156
1160
pods with generic ephemeral inline volumes as quickly as possible: publishing
1157
1161
CSIStorageCapacity objects was sometimes too slow, so scheduling retries were
0 commit comments