Skip to content

Commit c4331b0

Browse files
author
zouyee
committed
Move VolumeZone predicate to its Filter plugin
Signed-off-by: Zou Nengren <[email protected]>
1 parent 04d71d8 commit c4331b0

File tree

6 files changed

+134
-503
lines changed

6 files changed

+134
-503
lines changed

pkg/scheduler/algorithm/predicates/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ go_library(
3434
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
3535
"//staging/src/k8s.io/client-go/listers/storage/v1:go_default_library",
3636
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
37-
"//staging/src/k8s.io/cloud-provider/volume/helpers:go_default_library",
3837
"//staging/src/k8s.io/csi-translation-lib:go_default_library",
3938
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
4039
"//vendor/k8s.io/klog:go_default_library",

pkg/scheduler/algorithm/predicates/predicates.go

Lines changed: 0 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@ import (
3434
utilfeature "k8s.io/apiserver/pkg/util/feature"
3535
corelisters "k8s.io/client-go/listers/core/v1"
3636
storagelisters "k8s.io/client-go/listers/storage/v1"
37-
volumehelpers "k8s.io/cloud-provider/volume/helpers"
3837
csilibplugins "k8s.io/csi-translation-lib/plugins"
3938
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
4039
"k8s.io/kubernetes/pkg/features"
@@ -535,137 +534,6 @@ var CinderVolumeFilter = VolumeFilter{
535534
},
536535
}
537536

538-
// VolumeZoneChecker contains information to check the volume zone for a predicate.
539-
type VolumeZoneChecker struct {
540-
pvLister corelisters.PersistentVolumeLister
541-
pvcLister corelisters.PersistentVolumeClaimLister
542-
scLister storagelisters.StorageClassLister
543-
}
544-
545-
// NewVolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given
546-
// that some volumes may have zone scheduling constraints. The requirement is that any
547-
// volume zone-labels must match the equivalent zone-labels on the node. It is OK for
548-
// the node to have more zone-label constraints (for example, a hypothetical replicated
549-
// volume might allow region-wide access)
550-
//
551-
// Currently this is only supported with PersistentVolumeClaims, and looks to the labels
552-
// only on the bound PersistentVolume.
553-
//
554-
// Working with volumes declared inline in the pod specification (i.e. not
555-
// using a PersistentVolume) is likely to be harder, as it would require
556-
// determining the zone of a volume during scheduling, and that is likely to
557-
// require calling out to the cloud provider. It seems that we are moving away
558-
// from inline volume declarations anyway.
559-
func NewVolumeZonePredicate(pvLister corelisters.PersistentVolumeLister, pvcLister corelisters.PersistentVolumeClaimLister, scLister storagelisters.StorageClassLister) FitPredicate {
560-
c := &VolumeZoneChecker{
561-
pvLister: pvLister,
562-
pvcLister: pvcLister,
563-
scLister: scLister,
564-
}
565-
return c.predicate
566-
}
567-
568-
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
569-
// If a pod doesn't have any volume attached to it, the predicate will always be true.
570-
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
571-
if len(pod.Spec.Volumes) == 0 {
572-
return true, nil, nil
573-
}
574-
575-
node := nodeInfo.Node()
576-
if node == nil {
577-
return false, nil, fmt.Errorf("node not found")
578-
}
579-
580-
nodeConstraints := make(map[string]string)
581-
for k, v := range node.ObjectMeta.Labels {
582-
if k != v1.LabelZoneFailureDomain && k != v1.LabelZoneRegion {
583-
continue
584-
}
585-
nodeConstraints[k] = v
586-
}
587-
588-
if len(nodeConstraints) == 0 {
589-
// The node has no zone constraints, so we're OK to schedule.
590-
// In practice, when using zones, all nodes must be labeled with zone labels.
591-
// We want to fast-path this case though.
592-
return true, nil, nil
593-
}
594-
595-
namespace := pod.Namespace
596-
manifest := &(pod.Spec)
597-
for i := range manifest.Volumes {
598-
volume := &manifest.Volumes[i]
599-
if volume.PersistentVolumeClaim == nil {
600-
continue
601-
}
602-
pvcName := volume.PersistentVolumeClaim.ClaimName
603-
if pvcName == "" {
604-
return false, nil, fmt.Errorf("PersistentVolumeClaim had no name")
605-
}
606-
pvc, err := c.pvcLister.PersistentVolumeClaims(namespace).Get(pvcName)
607-
if err != nil {
608-
return false, nil, err
609-
}
610-
611-
if pvc == nil {
612-
return false, nil, fmt.Errorf("PersistentVolumeClaim was not found: %q", pvcName)
613-
}
614-
615-
pvName := pvc.Spec.VolumeName
616-
if pvName == "" {
617-
scName := v1helper.GetPersistentVolumeClaimClass(pvc)
618-
if len(scName) == 0 {
619-
return false, nil, fmt.Errorf("PersistentVolumeClaim had no pv name and storageClass name")
620-
}
621-
622-
class, _ := c.scLister.Get(scName)
623-
if class == nil {
624-
return false, nil, fmt.Errorf("StorageClass %q claimed by PersistentVolumeClaim %q not found",
625-
scName, pvcName)
626-
627-
}
628-
if class.VolumeBindingMode == nil {
629-
return false, nil, fmt.Errorf("VolumeBindingMode not set for StorageClass %q", scName)
630-
}
631-
if *class.VolumeBindingMode == storage.VolumeBindingWaitForFirstConsumer {
632-
// Skip unbound volumes
633-
continue
634-
}
635-
636-
return false, nil, fmt.Errorf("PersistentVolume had no name")
637-
}
638-
639-
pv, err := c.pvLister.Get(pvName)
640-
if err != nil {
641-
return false, nil, err
642-
}
643-
644-
if pv == nil {
645-
return false, nil, fmt.Errorf("PersistentVolume was not found: %q", pvName)
646-
}
647-
648-
for k, v := range pv.ObjectMeta.Labels {
649-
if k != v1.LabelZoneFailureDomain && k != v1.LabelZoneRegion {
650-
continue
651-
}
652-
nodeV, _ := nodeConstraints[k]
653-
volumeVSet, err := volumehelpers.LabelZonesToSet(v)
654-
if err != nil {
655-
klog.Warningf("Failed to parse label for %q: %q. Ignoring the label. err=%v. ", k, v, err)
656-
continue
657-
}
658-
659-
if !volumeVSet.Has(nodeV) {
660-
klog.V(10).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, node.Name, pvName, k)
661-
return false, []PredicateFailureReason{ErrVolumeZoneConflict}, nil
662-
}
663-
}
664-
}
665-
666-
return true, nil, nil
667-
}
668-
669537
// GetResourceRequest returns a *schedulernodeinfo.Resource that covers the largest
670538
// width in each resource dimension. Because init-containers run sequentially, we collect
671539
// the max in each dimension iteratively. In contrast, we sum the resource vectors for

0 commit comments

Comments
 (0)