@@ -102,13 +102,19 @@ func (b *BindingInfo) StorageResource() *StorageResource {
102
102
}
103
103
}
104
104
105
+ // DynamicProvision represents a dynamically provisioned volume.
106
+ type DynamicProvision struct {
107
+ PVC * v1.PersistentVolumeClaim
108
+ NodeCapacity * storagev1.CSIStorageCapacity
109
+ }
110
+
105
111
// PodVolumes holds pod's volumes information used in volume scheduling.
106
112
type PodVolumes struct {
107
113
// StaticBindings are binding decisions for PVCs which can be bound to
108
114
// pre-provisioned static PVs.
109
115
StaticBindings []* BindingInfo
110
116
// DynamicProvisions are PVCs that require dynamic provisioning
111
- DynamicProvisions []* v1. PersistentVolumeClaim
117
+ DynamicProvisions []* DynamicProvision
112
118
}
113
119
114
120
// InTreeToCSITranslator contains methods required to check migratable status
@@ -310,7 +316,7 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
310
316
311
317
var (
312
318
staticBindings []* BindingInfo
313
- dynamicProvisions []* v1. PersistentVolumeClaim
319
+ dynamicProvisions []* DynamicProvision
314
320
)
315
321
defer func () {
316
322
// Although we do not distinguish nil from empty in this function, for
@@ -377,6 +383,16 @@ func (b *volumeBinder) FindPodVolumes(logger klog.Logger, pod *v1.Pod, podVolume
377
383
return
378
384
}
379
385
386
+ // ConvertDynamicProvisionsToPVCs converts a slice of *DynamicProvision to a
387
+ // slice of PersistentVolumeClaim
388
+ func convertDynamicProvisionsToPVCs (dynamicProvisions []* DynamicProvision ) []* v1.PersistentVolumeClaim {
389
+ pvcs := make ([]* v1.PersistentVolumeClaim , 0 , len (dynamicProvisions ))
390
+ for _ , dynamicProvision := range dynamicProvisions {
391
+ pvcs = append (pvcs , dynamicProvision .PVC )
392
+ }
393
+ return pvcs
394
+ }
395
+
380
396
// AssumePodVolumes will take the matching PVs and PVCs to provision in pod's
381
397
// volume information for the chosen node, and:
382
398
// 1. Update the pvCache with the new prebound PV.
@@ -423,20 +439,21 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
423
439
}
424
440
425
441
// Assume PVCs
426
- newProvisionedPVCs := []* v1. PersistentVolumeClaim {}
427
- for _ , claim := range podVolumes .DynamicProvisions {
442
+ newProvisionedPVCs := []* DynamicProvision {}
443
+ for _ , dynamicProvision := range podVolumes .DynamicProvisions {
428
444
// The claims from method args can be pointing to watcher cache. We must not
429
445
// modify these, therefore create a copy.
430
- claimClone := claim .DeepCopy ()
446
+ claimClone := dynamicProvision . PVC .DeepCopy ()
431
447
metav1 .SetMetaDataAnnotation (& claimClone .ObjectMeta , volume .AnnSelectedNode , nodeName )
432
448
err = b .pvcCache .Assume (claimClone )
433
449
if err != nil {
450
+ pvcs := convertDynamicProvisionsToPVCs (newProvisionedPVCs )
434
451
b .revertAssumedPVs (newBindings )
435
- b .revertAssumedPVCs (newProvisionedPVCs )
452
+ b .revertAssumedPVCs (pvcs )
436
453
return
437
454
}
438
455
439
- newProvisionedPVCs = append (newProvisionedPVCs , claimClone )
456
+ newProvisionedPVCs = append (newProvisionedPVCs , & DynamicProvision { PVC : claimClone } )
440
457
}
441
458
442
459
podVolumes .StaticBindings = newBindings
@@ -446,8 +463,9 @@ func (b *volumeBinder) AssumePodVolumes(logger klog.Logger, assumedPod *v1.Pod,
446
463
447
464
// RevertAssumedPodVolumes will revert assumed PV and PVC cache.
448
465
func (b * volumeBinder ) RevertAssumedPodVolumes (podVolumes * PodVolumes ) {
466
+ pvcs := convertDynamicProvisionsToPVCs (podVolumes .DynamicProvisions )
449
467
b .revertAssumedPVs (podVolumes .StaticBindings )
450
- b .revertAssumedPVCs (podVolumes . DynamicProvisions )
468
+ b .revertAssumedPVCs (pvcs )
451
469
}
452
470
453
471
// BindPodVolumes gets the cached bindings and PVCs to provision in pod's volumes information,
@@ -464,7 +482,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
464
482
}()
465
483
466
484
bindings := podVolumes .StaticBindings
467
- claimsToProvision := podVolumes .DynamicProvisions
485
+ claimsToProvision := convertDynamicProvisionsToPVCs ( podVolumes .DynamicProvisions )
468
486
469
487
// Start API operations
470
488
err = b .bindAPIUpdate (ctx , assumedPod , bindings , claimsToProvision )
@@ -886,8 +904,8 @@ func (b *volumeBinder) findMatchingVolumes(logger klog.Logger, pod *v1.Pod, clai
886
904
// checkVolumeProvisions checks given unbound claims (the claims have gone through func
887
905
// findMatchingVolumes, and do not have matching volumes for binding), and return true
888
906
// if all of the claims are eligible for dynamic provision.
889
- func (b * volumeBinder ) checkVolumeProvisions (logger klog.Logger , pod * v1.Pod , claimsToProvision []* v1.PersistentVolumeClaim , node * v1.Node ) (provisionSatisfied , sufficientStorage bool , dynamicProvisions []* v1. PersistentVolumeClaim , err error ) {
890
- dynamicProvisions = []* v1. PersistentVolumeClaim {}
907
+ func (b * volumeBinder ) checkVolumeProvisions (logger klog.Logger , pod * v1.Pod , claimsToProvision []* v1.PersistentVolumeClaim , node * v1.Node ) (provisionSatisfied , sufficientStorage bool , dynamicProvisions []* DynamicProvision , err error ) {
908
+ dynamicProvisions = []* DynamicProvision {}
891
909
892
910
// We return early with provisionedClaims == nil if a check
893
911
// fails or we encounter an error.
@@ -915,7 +933,7 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
915
933
}
916
934
917
935
// Check storage capacity.
918
- sufficient , err := b .hasEnoughCapacity (logger , provisioner , claim , class , node )
936
+ sufficient , capacity , err := b .hasEnoughCapacity (logger , provisioner , claim , class , node )
919
937
if err != nil {
920
938
return false , false , nil , err
921
939
}
@@ -924,8 +942,10 @@ func (b *volumeBinder) checkVolumeProvisions(logger klog.Logger, pod *v1.Pod, cl
924
942
return true , false , nil , nil
925
943
}
926
944
927
- dynamicProvisions = append (dynamicProvisions , claim )
928
-
945
+ dynamicProvisions = append (dynamicProvisions , & DynamicProvision {
946
+ PVC : claim ,
947
+ NodeCapacity : capacity ,
948
+ })
929
949
}
930
950
logger .V (4 ).Info ("Provisioning for claims of pod that has no matching volumes..." , "claimCount" , len (claimsToProvision ), "pod" , klog .KObj (pod ), "node" , klog .KObj (node ))
931
951
@@ -945,12 +965,12 @@ func (b *volumeBinder) revertAssumedPVCs(claims []*v1.PersistentVolumeClaim) {
945
965
}
946
966
947
967
// hasEnoughCapacity checks whether the provisioner has enough capacity left for a new volume of the given size
948
- // that is available from the node.
949
- func (b * volumeBinder ) hasEnoughCapacity (logger klog.Logger , provisioner string , claim * v1.PersistentVolumeClaim , storageClass * storagev1.StorageClass , node * v1.Node ) (bool , error ) {
968
+ // that is available from the node. This function returns the node capacity based on the PVC's storage class.
969
+ func (b * volumeBinder ) hasEnoughCapacity (logger klog.Logger , provisioner string , claim * v1.PersistentVolumeClaim , storageClass * storagev1.StorageClass , node * v1.Node ) (bool , * storagev1. CSIStorageCapacity , error ) {
950
970
quantity , ok := claim .Spec .Resources .Requests [v1 .ResourceStorage ]
951
971
if ! ok {
952
972
// No capacity to check for.
953
- return true , nil
973
+ return true , nil , nil
954
974
}
955
975
956
976
// Only enabled for CSI drivers which opt into it.
@@ -960,19 +980,19 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
960
980
// Either the provisioner is not a CSI driver or the driver does not
961
981
// opt into storage capacity scheduling. Either way, skip
962
982
// capacity checking.
963
- return true , nil
983
+ return true , nil , nil
964
984
}
965
- return false , err
985
+ return false , nil , err
966
986
}
967
987
if driver .Spec .StorageCapacity == nil || ! * driver .Spec .StorageCapacity {
968
- return true , nil
988
+ return true , nil , nil
969
989
}
970
990
971
991
// Look for a matching CSIStorageCapacity object(s).
972
992
// TODO (for beta): benchmark this and potentially introduce some kind of lookup structure (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-654356718).
973
993
capacities , err := b .csiStorageCapacityLister .List (labels .Everything ())
974
994
if err != nil {
975
- return false , err
995
+ return false , nil , err
976
996
}
977
997
978
998
sizeInBytes := quantity .Value ()
@@ -981,15 +1001,15 @@ func (b *volumeBinder) hasEnoughCapacity(logger klog.Logger, provisioner string,
981
1001
capacitySufficient (capacity , sizeInBytes ) &&
982
1002
b .nodeHasAccess (logger , node , capacity ) {
983
1003
// Enough capacity found.
984
- return true , nil
1004
+ return true , capacity , nil
985
1005
}
986
1006
}
987
1007
988
1008
// TODO (?): this doesn't give any information about which pools where considered and why
989
1009
// they had to be rejected. Log that above? But that might be a lot of log output...
990
1010
logger .V (4 ).Info ("Node has no accessible CSIStorageCapacity with enough capacity for PVC" ,
991
1011
"node" , klog .KObj (node ), "PVC" , klog .KObj (claim ), "size" , sizeInBytes , "storageClass" , klog .KObj (storageClass ))
992
- return false , nil
1012
+ return false , nil , nil
993
1013
}
994
1014
995
1015
func capacitySufficient (capacity * storagev1.CSIStorageCapacity , sizeInBytes int64 ) bool {
0 commit comments