Skip to content

Commit 9844676

Browse files
authored
Merge pull request kubernetes#127409 from sanposhiho/cleanup-resourcefit
chore(noderesourcefit): correct tests and clean up comments
2 parents 048c853 + 4a81a85 commit 9844676

File tree

2 files changed

+37
-46
lines changed

2 files changed

+37
-46
lines changed

pkg/scheduler/framework/plugins/noderesources/fit.go

Lines changed: 20 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -268,14 +268,14 @@ func (f *Fit) EventsToRegister(_ context.Context) ([]framework.ClusterEventWithH
268268
}
269269

270270
return []framework.ClusterEventWithHint{
271-
{Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodChange},
271+
{Event: framework.ClusterEvent{Resource: framework.Pod, ActionType: podActionType}, QueueingHintFn: f.isSchedulableAfterPodEvent},
272272
{Event: framework.ClusterEvent{Resource: framework.Node, ActionType: nodeActionType}, QueueingHintFn: f.isSchedulableAfterNodeChange},
273273
}, nil
274274
}
275275

276-
// isSchedulableAfterPodChange is invoked whenever a pod deleted or updated. It checks whether
276+
// isSchedulableAfterPodEvent is invoked whenever a pod deleted or scaled down. It checks whether
277277
// that change made a previously unschedulable pod schedulable.
278-
func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) {
278+
func (f *Fit) isSchedulableAfterPodEvent(logger klog.Logger, pod *v1.Pod, oldObj, newObj interface{}) (framework.QueueingHint, error) {
279279
originalPod, modifiedPod, err := schedutil.As[*v1.Pod](oldObj, newObj)
280280
if err != nil {
281281
return framework.Queue, err
@@ -286,25 +286,24 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb
286286
logger.V(5).Info("the deleted pod was unscheduled and it wouldn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
287287
return framework.QueueSkip, nil
288288
}
289+
290+
// any deletion event to a scheduled pod could make the unscheduled pod schedulable.
289291
logger.V(5).Info("another scheduled pod was deleted, and it may make the unscheduled pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(originalPod))
290292
return framework.Queue, nil
291293
}
292294

293295
if !f.enableInPlacePodVerticalScaling {
294-
// If InPlacePodVerticalScaling (KEP 1287) is disabled, it cannot free up resources.
296+
// If InPlacePodVerticalScaling (KEP 1287) is disabled, the pod scale down event cannot free up any resources.
295297
logger.V(5).Info("another pod was modified, but InPlacePodVerticalScaling is disabled, so it doesn't make the unscheduled pod schedulable", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
296298
return framework.QueueSkip, nil
297299
}
298300

299-
// Modifications may or may not be relevant. We only care about modifications that
300-
// change the other pod's resource request and the resource is also requested by the
301-
// pod we are trying to schedule.
302-
if !f.isResourceScaleDown(pod, originalPod, modifiedPod) {
301+
if !f.isSchedulableAfterPodScaleDown(pod, originalPod, modifiedPod) {
303302
if loggerV := logger.V(10); loggerV.Enabled() {
304303
// Log more information.
305-
loggerV.Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod))
304+
loggerV.Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod), "diff", cmp.Diff(originalPod, modifiedPod))
306305
} else {
307-
logger.V(5).Info("another Pod got modified, but the modification isn't related to the resource request", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
306+
logger.V(5).Info("pod got scaled down, but the modification isn't related to the resource requests of the target pod", "pod", klog.KObj(pod), "modifiedPod", klog.KObj(modifiedPod))
308307
}
309308
return framework.QueueSkip, nil
310309
}
@@ -313,12 +312,17 @@ func (f *Fit) isSchedulableAfterPodChange(logger klog.Logger, pod *v1.Pod, oldOb
313312
return framework.Queue, nil
314313
}
315314

316-
// isResourceScaleDown checks whether an update event may make the pod schedulable. Specifically:
317-
// - Returns true when an update event shows a scheduled pod's resource request got reduced.
318-
// - Returns true when an update event is for the unscheduled pod itself, and it shows the pod's resource request got reduced.
319-
func (f *Fit) isResourceScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool {
320-
if modifiedPod.UID != targetPod.UID && modifiedPod.Spec.NodeName == "" {
321-
// If the update event is not for targetPod and a scheduled Pod,
315+
// isSchedulableAfterPodScaleDown checks whether the scale down event may make the target pod schedulable. Specifically:
316+
// - Returns true when the update event is for the target pod itself.
317+
// - Returns true when the update event shows a scheduled pod's resource request that the target pod also requests got reduced.
318+
func (f *Fit) isSchedulableAfterPodScaleDown(targetPod, originalPod, modifiedPod *v1.Pod) bool {
319+
if modifiedPod.UID == targetPod.UID {
320+
// If the scaling down event is for targetPod, it would make targetPod schedulable.
321+
return true
322+
}
323+
324+
if modifiedPod.Spec.NodeName == "" {
325+
// If the update event is for a unscheduled Pod,
322326
// it wouldn't make targetPod schedulable.
323327
return false
324328
}

pkg/scheduler/framework/plugins/noderesources/fit_test.go

Lines changed: 17 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1171,51 +1171,38 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
11711171
expectedHint: framework.QueueSkip,
11721172
},
11731173
"skip-queue-on-disable-inplace-pod-vertical-scaling": {
1174-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1175-
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1174+
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1175+
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1176+
// (Actually, this scale down cannot happen when InPlacePodVerticalScaling is disabled.)
11761177
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
11771178
enableInPlacePodVerticalScaling: false,
11781179
expectedHint: framework.QueueSkip,
11791180
},
11801181
"skip-queue-on-other-unscheduled-pod": {
1181-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(),
1182-
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(),
1183-
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(),
1184-
enableInPlacePodVerticalScaling: true,
1185-
expectedHint: framework.QueueSkip,
1186-
},
1187-
"skip-queue-on-other-pod-non-resource-changes": {
1188-
pod: &v1.Pod{},
1189-
oldObj: st.MakePod().Name("pod2").Label("k", "v").Node("fake").Obj(),
1190-
newObj: st.MakePod().Name("pod2").Label("foo", "bar").Node("fake").Obj(),
1191-
enableInPlacePodVerticalScaling: true,
1192-
expectedHint: framework.QueueSkip,
1193-
},
1194-
"skip-queue-on-other-pod-unrelated-resource-changes": {
1195-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1196-
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
1197-
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
1182+
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid0").Obj(),
1183+
oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).UID("uid1").Obj(),
1184+
newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).UID("uid1").Obj(),
11981185
enableInPlacePodVerticalScaling: true,
11991186
expectedHint: framework.QueueSkip,
12001187
},
1201-
"skip-queue-on-other-pod-resource-scale-up": {
1202-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1203-
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1204-
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1188+
"skip-queue-on-other-pod-unrelated-resource-scaled-down": {
1189+
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1190+
oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "2"}).Node("fake").Obj(),
1191+
newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceMemory: "1"}).Node("fake").Obj(),
12051192
enableInPlacePodVerticalScaling: true,
12061193
expectedHint: framework.QueueSkip,
12071194
},
12081195
"queue-on-other-pod-some-resource-scale-down": {
1209-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1210-
oldObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1211-
newObj: st.MakePod().Name("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
1196+
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1197+
oldObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Node("fake").Obj(),
1198+
newObj: st.MakePod().Name("pod2").UID("pod2").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Node("fake").Obj(),
12121199
enableInPlacePodVerticalScaling: true,
12131200
expectedHint: framework.Queue,
12141201
},
12151202
"queue-on-target-pod-some-resource-scale-down": {
1216-
pod: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1217-
oldObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
1218-
newObj: st.MakePod().Name("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1203+
pod: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
1204+
oldObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj(),
1205+
newObj: st.MakePod().Name("pod1").UID("pod1").Req(map[v1.ResourceName]string{v1.ResourceCPU: "1"}).Obj(),
12191206
enableInPlacePodVerticalScaling: true,
12201207
expectedHint: framework.Queue,
12211208
},
@@ -1230,7 +1217,7 @@ func Test_isSchedulableAfterPodChange(t *testing.T) {
12301217
if err != nil {
12311218
t.Fatal(err)
12321219
}
1233-
actualHint, err := p.(*Fit).isSchedulableAfterPodChange(logger, tc.pod, tc.oldObj, tc.newObj)
1220+
actualHint, err := p.(*Fit).isSchedulableAfterPodEvent(logger, tc.pod, tc.oldObj, tc.newObj)
12341221
if tc.expectedErr {
12351222
require.Error(t, err)
12361223
return

0 commit comments

Comments
 (0)