Skip to content

Commit b52cdca

Browse files
authored
Merge pull request kubernetes#86638 from danielqsj/ServiceAntiAffinityPriority
Move ServiceAntiAffinityPriority to score plugin
2 parents 9d31c93 + f53c81f commit b52cdca

File tree

6 files changed

+110
-347
lines changed

6 files changed

+110
-347
lines changed

pkg/scheduler/algorithm/priorities/metadata.go

Lines changed: 4 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,8 @@ func NewMetadataFactory(
5656

5757
// priorityMetadata is a type that is passed as metadata for priority functions
5858
type priorityMetadata struct {
59-
podSelector labels.Selector
60-
podFirstServiceSelector labels.Selector
61-
podTopologySpreadMap *podTopologySpreadMap
59+
podSelector labels.Selector
60+
podTopologySpreadMap *podTopologySpreadMap
6261
}
6362

6463
// PriorityMetadata is a MetadataProducer. Node info can be nil.
@@ -83,20 +82,11 @@ func (pmf *MetadataFactory) PriorityMetadata(
8382
return nil
8483
}
8584
return &priorityMetadata{
86-
podSelector: getSelector(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
87-
podFirstServiceSelector: getFirstServiceSelector(pod, pmf.serviceLister),
88-
podTopologySpreadMap: tpSpreadMap,
85+
podSelector: getSelector(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
86+
podTopologySpreadMap: tpSpreadMap,
8987
}
9088
}
9189

92-
// getFirstServiceSelector returns one selector of services the given pod.
93-
func getFirstServiceSelector(pod *v1.Pod, sl corelisters.ServiceLister) (firstServiceSelector labels.Selector) {
94-
if services, err := schedulerlisters.GetPodServices(sl, pod); err == nil && len(services) > 0 {
95-
return labels.SelectorFromSet(services[0].Spec.Selector)
96-
}
97-
return nil
98-
}
99-
10090
// getSelector returns a selector for the services, RCs, RSs, and SSs matching the given pod.
10191
func getSelector(pod *v1.Pod, sl corelisters.ServiceLister, cl corelisters.ReplicationControllerLister, rsl appslisters.ReplicaSetLister, ssl appslisters.StatefulSetLister) labels.Selector {
10292
labelSet := make(labels.Set)

pkg/scheduler/algorithm/priorities/selector_spreading.go

Lines changed: 0 additions & 117 deletions
Original file line numberDiff line numberDiff line change
@@ -154,23 +154,6 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
154154
return nil
155155
}
156156

157-
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
158-
type ServiceAntiAffinity struct {
159-
podLister schedulerlisters.PodLister
160-
serviceLister corelisters.ServiceLister
161-
labels []string
162-
}
163-
164-
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
165-
func NewServiceAntiAffinityPriority(podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (PriorityMapFunction, PriorityReduceFunction) {
166-
antiAffinity := &ServiceAntiAffinity{
167-
podLister: podLister,
168-
serviceLister: serviceLister,
169-
labels: labels,
170-
}
171-
return antiAffinity.CalculateAntiAffinityPriorityMap, antiAffinity.CalculateAntiAffinityPriorityReduce
172-
}
173-
174157
// countMatchingPods counts pods based on namespace and matching all selectors
175158
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int {
176159
if nodeInfo.Pods() == nil || len(nodeInfo.Pods()) == 0 || selector.Empty() {
@@ -188,103 +171,3 @@ func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *sch
188171
}
189172
return count
190173
}
191-
192-
// CalculateAntiAffinityPriorityMap spreads pods by minimizing the number of pods belonging to the same service
193-
// on given machine
194-
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
195-
var firstServiceSelector labels.Selector
196-
197-
node := nodeInfo.Node()
198-
if node == nil {
199-
return framework.NodeScore{}, fmt.Errorf("node not found")
200-
}
201-
priorityMeta, ok := meta.(*priorityMetadata)
202-
if ok {
203-
firstServiceSelector = priorityMeta.podFirstServiceSelector
204-
} else {
205-
firstServiceSelector = getFirstServiceSelector(pod, s.serviceLister)
206-
}
207-
// Pods matched namespace,selector on current node.
208-
var selector labels.Selector
209-
if firstServiceSelector != nil {
210-
selector = firstServiceSelector
211-
} else {
212-
selector = labels.NewSelector()
213-
}
214-
score := countMatchingPods(pod.Namespace, selector, nodeInfo)
215-
216-
return framework.NodeScore{
217-
Name: node.Name,
218-
Score: int64(score),
219-
}, nil
220-
}
221-
222-
// CalculateAntiAffinityPriorityReduce computes each node score with the same value for a particular label.
223-
// The label to be considered is provided to the struct (ServiceAntiAffinity).
224-
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriorityReduce(pod *v1.Pod, meta interface{}, sharedLister schedulerlisters.SharedLister, result framework.NodeScoreList) error {
225-
reduceResult := make([]float64, len(result))
226-
for _, label := range s.labels {
227-
if err := s.updateNodeScoresForLabel(sharedLister, result, reduceResult, label); err != nil {
228-
return err
229-
}
230-
}
231-
232-
// Update the result after all labels have been evaluated.
233-
for i, nodeScore := range reduceResult {
234-
result[i].Score = int64(nodeScore)
235-
}
236-
return nil
237-
}
238-
239-
// updateNodeScoresForLabel updates the node scores for a single label. Note it does not update the
240-
// original result from the map phase directly, but instead updates the reduceResult, which is used
241-
// to update the original result finally. This makes sure that each call to updateNodeScoresForLabel
242-
// receives the same mapResult to work with.
243-
// Why are doing this? This is a workaround for the migration from priorities to score plugins.
244-
// Historically the priority is designed to handle only one label, and multiple priorities are configured
245-
// to work with multiple labels. Using multiple plugins is not allowed in the new framework. Therefore
246-
// we need to modify the old priority to be able to handle multiple labels so that it can be mapped
247-
// to a single plugin. This will be deprecated soon.
248-
func (s *ServiceAntiAffinity) updateNodeScoresForLabel(sharedLister schedulerlisters.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error {
249-
var numServicePods int64
250-
var labelValue string
251-
podCounts := map[string]int64{}
252-
labelNodesStatus := map[string]string{}
253-
maxPriorityFloat64 := float64(framework.MaxNodeScore)
254-
255-
for _, hostPriority := range mapResult {
256-
numServicePods += hostPriority.Score
257-
nodeInfo, err := sharedLister.NodeInfos().Get(hostPriority.Name)
258-
if err != nil {
259-
return err
260-
}
261-
if !labels.Set(nodeInfo.Node().Labels).Has(label) {
262-
continue
263-
}
264-
265-
labelValue = labels.Set(nodeInfo.Node().Labels).Get(label)
266-
labelNodesStatus[hostPriority.Name] = labelValue
267-
podCounts[labelValue] += hostPriority.Score
268-
}
269-
270-
//score int - scale of 0-maxPriority
271-
// 0 being the lowest priority and maxPriority being the highest
272-
for i, hostPriority := range mapResult {
273-
labelValue, ok := labelNodesStatus[hostPriority.Name]
274-
if !ok {
275-
continue
276-
}
277-
// initializing to the default/max node score of maxPriority
278-
fScore := maxPriorityFloat64
279-
if numServicePods > 0 {
280-
fScore = maxPriorityFloat64 * (float64(numServicePods-podCounts[labelValue]) / float64(numServicePods))
281-
}
282-
// The score of current label only accounts for 1/len(s.labels) of the total score.
283-
// The policy API definition only allows a single label to be configured, associated with a weight.
284-
// This is compensated by the fact that the total weight is the sum of all weights configured
285-
// in each policy config.
286-
reduceResult[i] += fScore / float64(len(s.labels))
287-
}
288-
289-
return nil
290-
}

pkg/scheduler/algorithm/priorities/selector_spreading_test.go

Lines changed: 0 additions & 190 deletions
Original file line numberDiff line numberDiff line change
@@ -607,196 +607,6 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
607607
}
608608
}
609609

610-
func TestZoneSpreadPriority(t *testing.T) {
611-
labels1 := map[string]string{
612-
"foo": "bar",
613-
"baz": "blah",
614-
}
615-
labels2 := map[string]string{
616-
"bar": "foo",
617-
"baz": "blah",
618-
}
619-
zone1 := map[string]string{
620-
"zone": "zone1",
621-
}
622-
zone2 := map[string]string{
623-
"zone": "zone2",
624-
}
625-
nozone := map[string]string{
626-
"name": "value",
627-
}
628-
zone0Spec := v1.PodSpec{
629-
NodeName: "machine01",
630-
}
631-
zone1Spec := v1.PodSpec{
632-
NodeName: "machine11",
633-
}
634-
zone2Spec := v1.PodSpec{
635-
NodeName: "machine21",
636-
}
637-
labeledNodes := map[string]map[string]string{
638-
"machine01": nozone, "machine02": nozone,
639-
"machine11": zone1, "machine12": zone1,
640-
"machine21": zone2, "machine22": zone2,
641-
}
642-
tests := []struct {
643-
pod *v1.Pod
644-
pods []*v1.Pod
645-
nodes map[string]map[string]string
646-
services []*v1.Service
647-
expectedList framework.NodeScoreList
648-
name string
649-
}{
650-
{
651-
pod: new(v1.Pod),
652-
nodes: labeledNodes,
653-
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
654-
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
655-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
656-
name: "nothing scheduled",
657-
},
658-
{
659-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
660-
pods: []*v1.Pod{{Spec: zone1Spec}},
661-
nodes: labeledNodes,
662-
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
663-
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
664-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
665-
name: "no services",
666-
},
667-
{
668-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
669-
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
670-
nodes: labeledNodes,
671-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
672-
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
673-
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
674-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
675-
name: "different services",
676-
},
677-
{
678-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
679-
pods: []*v1.Pod{
680-
{Spec: zone0Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
681-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
682-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
683-
},
684-
nodes: labeledNodes,
685-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
686-
expectedList: []framework.NodeScore{{Name: "machine11", Score: framework.MaxNodeScore}, {Name: "machine12", Score: framework.MaxNodeScore},
687-
{Name: "machine21", Score: 0}, {Name: "machine22", Score: 0},
688-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
689-
name: "three pods, one service pod",
690-
},
691-
{
692-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
693-
pods: []*v1.Pod{
694-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
695-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
696-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
697-
},
698-
nodes: labeledNodes,
699-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
700-
expectedList: []framework.NodeScore{{Name: "machine11", Score: 50}, {Name: "machine12", Score: 50},
701-
{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50},
702-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
703-
name: "three pods, two service pods on different machines",
704-
},
705-
{
706-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
707-
pods: []*v1.Pod{
708-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
709-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
710-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
711-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
712-
},
713-
nodes: labeledNodes,
714-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
715-
expectedList: []framework.NodeScore{{Name: "machine11", Score: 0}, {Name: "machine12", Score: 0},
716-
{Name: "machine21", Score: framework.MaxNodeScore}, {Name: "machine22", Score: framework.MaxNodeScore},
717-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
718-
name: "three service label match pods in different namespaces",
719-
},
720-
{
721-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
722-
pods: []*v1.Pod{
723-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
724-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
725-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
726-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
727-
},
728-
nodes: labeledNodes,
729-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
730-
expectedList: []framework.NodeScore{{Name: "machine11", Score: 66}, {Name: "machine12", Score: 66},
731-
{Name: "machine21", Score: 33}, {Name: "machine22", Score: 33},
732-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
733-
name: "four pods, three service pods",
734-
},
735-
{
736-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
737-
pods: []*v1.Pod{
738-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
739-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
740-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
741-
},
742-
nodes: labeledNodes,
743-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
744-
expectedList: []framework.NodeScore{{Name: "machine11", Score: 33}, {Name: "machine12", Score: 33},
745-
{Name: "machine21", Score: 66}, {Name: "machine22", Score: 66},
746-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
747-
name: "service with partial pod label matches",
748-
},
749-
{
750-
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
751-
pods: []*v1.Pod{
752-
{Spec: zone0Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
753-
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
754-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
755-
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
756-
},
757-
nodes: labeledNodes,
758-
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
759-
expectedList: []framework.NodeScore{{Name: "machine11", Score: 75}, {Name: "machine12", Score: 75},
760-
{Name: "machine21", Score: 50}, {Name: "machine22", Score: 50},
761-
{Name: "machine01", Score: 0}, {Name: "machine02", Score: 0}},
762-
name: "service pod on non-zoned node",
763-
},
764-
}
765-
// these local variables just make sure controllerLister\replicaSetLister\statefulSetLister not nil
766-
// when construct metaDataProducer
767-
sss := []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
768-
rcs := []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}
769-
rss := []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}
770-
771-
for _, test := range tests {
772-
t.Run(test.name, func(t *testing.T) {
773-
nodes := makeLabeledNodeList(labeledNodes)
774-
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
775-
zoneSpread := ServiceAntiAffinity{podLister: snapshot.Pods(), serviceLister: fakelisters.ServiceLister(test.services), labels: []string{"zone"}}
776-
777-
metaDataProducer := NewMetadataFactory(
778-
fakelisters.ServiceLister(test.services),
779-
fakelisters.ControllerLister(rcs),
780-
fakelisters.ReplicaSetLister(rss),
781-
fakelisters.StatefulSetLister(sss),
782-
1,
783-
)
784-
metaData := metaDataProducer(test.pod, nodes, snapshot)
785-
list, err := runMapReducePriority(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, metaData, test.pod, snapshot, makeLabeledNodeList(test.nodes))
786-
if err != nil {
787-
t.Errorf("unexpected error: %v", err)
788-
}
789-
790-
// sort the two lists to avoid failures on account of different ordering
791-
sortNodeScoreList(test.expectedList)
792-
sortNodeScoreList(list)
793-
if !reflect.DeepEqual(test.expectedList, list) {
794-
t.Errorf("expected %#v, got %#v", test.expectedList, list)
795-
}
796-
})
797-
}
798-
}
799-
800610
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
801611
nodes := make([]*v1.Node, 0, len(nodeMap))
802612
for nodeName, labels := range nodeMap {

pkg/scheduler/framework/plugins/serviceaffinity/BUILD

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@ go_library(
77
visibility = ["//visibility:public"],
88
deps = [
99
"//pkg/scheduler/algorithm/predicates:go_default_library",
10-
"//pkg/scheduler/algorithm/priorities:go_default_library",
1110
"//pkg/scheduler/framework/plugins/migration:go_default_library",
1211
"//pkg/scheduler/framework/v1alpha1:go_default_library",
1312
"//pkg/scheduler/listers:go_default_library",

0 commit comments

Comments
 (0)