Skip to content

Commit aa9367f

Browse files
authored
Merge pull request kubernetes#3701 from yaroslava-serdiuk/DS_evict
add daemonset eviction for non-empty nodes
2 parents ad6c7b1 + dcf0970 commit aa9367f

File tree

8 files changed

+308
-208
lines changed

8 files changed

+308
-208
lines changed

cluster-autoscaler/core/scale_down.go

Lines changed: 29 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -952,7 +952,7 @@ func (sd *ScaleDown) TryToScaleDown(
952952
errors.InternalError, "failed to find node group for %s", toRemove.Node.Name)}
953953
return
954954
}
955-
result = sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule, nodeGroup)
955+
result = sd.deleteNode(toRemove.Node, toRemove.PodsToReschedule, toRemove.DaemonSetPods, nodeGroup)
956956
if result.ResultType != status.NodeDeleteOk {
957957
klog.Errorf("Failed to delete %s: %v", toRemove.Node.Name, result.Err)
958958
return
@@ -1106,7 +1106,7 @@ func (sd *ScaleDown) scheduleDeleteEmptyNodes(emptyNodes []*apiv1.Node, client k
11061106
return deletedNodes, nil
11071107
}
11081108

1109-
func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
1109+
func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod,
11101110
nodeGroup cloudprovider.NodeGroup) status.NodeDeleteResult {
11111111
deleteSuccessful := false
11121112
drainSuccessful := false
@@ -1134,7 +1134,7 @@ func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
11341134
sd.context.Recorder.Eventf(node, apiv1.EventTypeNormal, "ScaleDown", "marked the node as toBeDeleted/unschedulable")
11351135

11361136
// attempt drain
1137-
evictionResults, err := drainNode(node, pods, sd.context.ClientSet, sd.context.Recorder, sd.context.MaxGracefulTerminationSec, MaxPodEvictionTime, EvictionRetryTime, PodEvictionHeadroom)
1137+
evictionResults, err := drainNode(node, pods, daemonSetPods, sd.context.ClientSet, sd.context.Recorder, sd.context.MaxGracefulTerminationSec, MaxPodEvictionTime, EvictionRetryTime, PodEvictionHeadroom)
11381138
if err != nil {
11391139
return status.NodeDeleteResult{ResultType: status.NodeDeleteErrorFailedToEvictPods, Err: err, PodEvictionResults: evictionResults}
11401140
}
@@ -1154,7 +1154,7 @@ func (sd *ScaleDown) deleteNode(node *apiv1.Node, pods []*apiv1.Pod,
11541154
return status.NodeDeleteResult{ResultType: status.NodeDeleteOk}
11551155
}
11561156

1157-
func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
1157+
func evictPod(podToEvict *apiv1.Pod, isDaemonSetPod bool, client kube_client.Interface, recorder kube_record.EventRecorder,
11581158
maxGracefulTerminationSec int, retryUntil time.Time, waitBetweenRetries time.Duration) status.PodEvictionResult {
11591159
recorder.Eventf(podToEvict, apiv1.EventTypeNormal, "ScaleDown", "deleting pod for node scale down")
11601160

@@ -1184,38 +1184,54 @@ func evictPod(podToEvict *apiv1.Pod, client kube_client.Interface, recorder kube
11841184
return status.PodEvictionResult{Pod: podToEvict, TimedOut: false, Err: nil}
11851185
}
11861186
}
1187-
klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError)
1188-
recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown")
1187+
if !isDaemonSetPod {
1188+
klog.Errorf("Failed to evict pod %s, error: %v", podToEvict.Name, lastError)
1189+
recorder.Eventf(podToEvict, apiv1.EventTypeWarning, "ScaleDownFailed", "failed to delete pod for ScaleDown")
1190+
}
11891191
return status.PodEvictionResult{Pod: podToEvict, TimedOut: true, Err: fmt.Errorf("failed to evict pod %s/%s within allowed timeout (last error: %v)", podToEvict.Namespace, podToEvict.Name, lastError)}
11901192
}
11911193

11921194
// Performs drain logic on the node. Marks the node as unschedulable and later removes all pods, giving
11931195
// them up to MaxGracefulTerminationTime to finish.
1194-
func drainNode(node *apiv1.Node, pods []*apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
1196+
func drainNode(node *apiv1.Node, pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, client kube_client.Interface, recorder kube_record.EventRecorder,
11951197
maxGracefulTerminationSec int, maxPodEvictionTime time.Duration, waitBetweenRetries time.Duration,
11961198
podEvictionHeadroom time.Duration) (evictionResults map[string]status.PodEvictionResult, err error) {
11971199

11981200
evictionResults = make(map[string]status.PodEvictionResult)
1199-
toEvict := len(pods)
12001201
retryUntil := time.Now().Add(maxPodEvictionTime)
1201-
confirmations := make(chan status.PodEvictionResult, toEvict)
1202+
confirmations := make(chan status.PodEvictionResult, len(pods))
1203+
daemonSetConfirmations := make(chan status.PodEvictionResult, len(daemonSetPods))
12021204
for _, pod := range pods {
12031205
evictionResults[pod.Name] = status.PodEvictionResult{Pod: pod, TimedOut: true, Err: nil}
12041206
go func(podToEvict *apiv1.Pod) {
1205-
confirmations <- evictPod(podToEvict, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
1207+
confirmations <- evictPod(podToEvict, false, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
12061208
}(pod)
12071209
}
12081210

1209-
for range pods {
1211+
// Perform eviction of daemonset. We don't want to raise an error if daemonsetPod wasn't evict properly
1212+
for _, daemonSetPod := range daemonSetPods {
1213+
go func(podToEvict *apiv1.Pod) {
1214+
daemonSetConfirmations <- evictPod(podToEvict, true, client, recorder, maxGracefulTerminationSec, retryUntil, waitBetweenRetries)
1215+
}(daemonSetPod)
1216+
1217+
}
1218+
1219+
podsEvictionCounter := 0
1220+
for i := 0; i < len(pods)+len(daemonSetPods); i++ {
12101221
select {
12111222
case evictionResult := <-confirmations:
1223+
podsEvictionCounter++
12121224
evictionResults[evictionResult.Pod.Name] = evictionResult
12131225
if evictionResult.WasEvictionSuccessful() {
12141226
metrics.RegisterEvictions(1)
12151227
}
1228+
case <-daemonSetConfirmations:
12161229
case <-time.After(retryUntil.Sub(time.Now()) + 5*time.Second):
1217-
// All pods initially had results with TimedOut set to true, so the ones that didn't receive an actual result are correctly marked as timed out.
1218-
return evictionResults, errors.NewAutoscalerError(errors.ApiCallError, "Failed to drain node %s/%s: timeout when waiting for creating evictions", node.Namespace, node.Name)
1230+
if podsEvictionCounter < len(pods) {
1231+
// All pods initially had results with TimedOut set to true, so the ones that didn't receive an actual result are correctly marked as timed out.
1232+
return evictionResults, errors.NewAutoscalerError(errors.ApiCallError, "Failed to drain node %s/%s: timeout when waiting for creating evictions", node.Namespace, node.Name)
1233+
}
1234+
klog.Infof("Timeout when waiting for creating daemonSetPods eviction")
12191235
}
12201236
}
12211237

cluster-autoscaler/core/scale_down_test.go

Lines changed: 63 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -643,7 +643,7 @@ func TestDeleteNode(t *testing.T) {
643643
sd := NewScaleDown(&context, clusterStateRegistry)
644644

645645
// attempt delete
646-
result := sd.deleteNode(n1, pods, provider.GetNodeGroup("ng1"))
646+
result := sd.deleteNode(n1, pods, []*apiv1.Pod{}, provider.GetNodeGroup("ng1"))
647647

648648
// verify
649649
if scenario.expectedDeletion {
@@ -672,7 +672,9 @@ func TestDrainNode(t *testing.T) {
672672

673673
p1 := BuildTestPod("p1", 100, 0)
674674
p2 := BuildTestPod("p2", 300, 0)
675+
d1 := BuildTestPod("d1", 150, 0)
675676
n1 := BuildTestNode("n1", 1000, 1000)
677+
676678
SetNodeReadyState(n1, true, time.Time{})
677679

678680
fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
@@ -690,14 +692,17 @@ func TestDrainNode(t *testing.T) {
690692
deletedPods <- eviction.Name
691693
return true, nil, nil
692694
})
693-
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
695+
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{d1}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
694696
assert.NoError(t, err)
695697
deleted := make([]string, 0)
696698
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
697699
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
700+
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
701+
698702
sort.Strings(deleted)
699-
assert.Equal(t, p1.Name, deleted[0])
700-
assert.Equal(t, p2.Name, deleted[1])
703+
assert.Equal(t, d1.Name, deleted[0])
704+
assert.Equal(t, p1.Name, deleted[1])
705+
assert.Equal(t, p2.Name, deleted[2])
701706
}
702707

703708
func TestDrainNodeWithRescheduled(t *testing.T) {
@@ -733,7 +738,7 @@ func TestDrainNodeWithRescheduled(t *testing.T) {
733738
deletedPods <- eviction.Name
734739
return true, nil, nil
735740
})
736-
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
741+
_, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
737742
assert.NoError(t, err)
738743
deleted := make([]string, 0)
739744
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
@@ -755,6 +760,7 @@ func TestDrainNodeWithRetries(t *testing.T) {
755760
p1 := BuildTestPod("p1", 100, 0)
756761
p2 := BuildTestPod("p2", 300, 0)
757762
p3 := BuildTestPod("p3", 300, 0)
763+
d1 := BuildTestPod("d1", 150, 0)
758764
n1 := BuildTestNode("n1", 1000, 1000)
759765
SetNodeReadyState(n1, true, time.Time{})
760766

@@ -782,16 +788,62 @@ func TestDrainNodeWithRetries(t *testing.T) {
782788
return true, nil, fmt.Errorf("too many concurrent evictions")
783789
}
784790
})
785-
_, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
791+
_, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3}, []*apiv1.Pod{d1}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 5*time.Second, 0*time.Second, PodEvictionHeadroom)
786792
assert.NoError(t, err)
787793
deleted := make([]string, 0)
788794
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
789795
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
790796
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
797+
deleted = append(deleted, utils.GetStringFromChan(deletedPods))
791798
sort.Strings(deleted)
792-
assert.Equal(t, p1.Name, deleted[0])
793-
assert.Equal(t, p2.Name, deleted[1])
794-
assert.Equal(t, p3.Name, deleted[2])
799+
assert.Equal(t, d1.Name, deleted[0])
800+
assert.Equal(t, p1.Name, deleted[1])
801+
assert.Equal(t, p2.Name, deleted[2])
802+
assert.Equal(t, p3.Name, deleted[3])
803+
}
804+
805+
func TestDrainNodeDaemonSetEvictionFailure(t *testing.T) {
806+
fakeClient := &fake.Clientset{}
807+
808+
p1 := BuildTestPod("p1", 100, 0)
809+
p2 := BuildTestPod("p2", 300, 0)
810+
d1 := BuildTestPod("d1", 150, 0)
811+
d2 := BuildTestPod("d2", 250, 0)
812+
n1 := BuildTestNode("n1", 1000, 1000)
813+
e1 := fmt.Errorf("eviction_error: d1")
814+
e2 := fmt.Errorf("eviction_error: d2")
815+
816+
fakeClient.Fake.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
817+
return true, nil, errors.NewNotFound(apiv1.Resource("pod"), "whatever")
818+
})
819+
fakeClient.Fake.AddReactor("create", "pods", func(action core.Action) (bool, runtime.Object, error) {
820+
createAction := action.(core.CreateAction)
821+
if createAction == nil {
822+
return false, nil, nil
823+
}
824+
eviction := createAction.GetObject().(*policyv1.Eviction)
825+
if eviction == nil {
826+
return false, nil, nil
827+
}
828+
if eviction.Name == "d1" {
829+
return true, nil, e1
830+
}
831+
if eviction.Name == "d2" {
832+
return true, nil, e2
833+
}
834+
return true, nil, nil
835+
})
836+
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2}, []*apiv1.Pod{d1, d2}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
837+
assert.NoError(t, err)
838+
assert.Equal(t, 2, len(evictionResults))
839+
assert.Equal(t, p1, evictionResults["p1"].Pod)
840+
assert.Equal(t, p2, evictionResults["p2"].Pod)
841+
assert.NoError(t, evictionResults["p1"].Err)
842+
assert.NoError(t, evictionResults["p2"].Err)
843+
assert.False(t, evictionResults["p1"].TimedOut)
844+
assert.False(t, evictionResults["p2"].TimedOut)
845+
assert.True(t, evictionResults["p1"].WasEvictionSuccessful())
846+
assert.True(t, evictionResults["p2"].WasEvictionSuccessful())
795847
}
796848

797849
func TestDrainNodeEvictionFailure(t *testing.T) {
@@ -825,7 +877,7 @@ func TestDrainNodeEvictionFailure(t *testing.T) {
825877
return true, nil, nil
826878
})
827879

828-
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
880+
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 20, 0*time.Second, 0*time.Second, PodEvictionHeadroom)
829881
assert.Error(t, err)
830882
assert.Equal(t, 4, len(evictionResults))
831883
assert.Equal(t, *p1, *evictionResults["p1"].Pod)
@@ -874,7 +926,7 @@ func TestDrainNodeDisappearanceFailure(t *testing.T) {
874926
return true, nil, nil
875927
})
876928

877-
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 0, 0*time.Second, 0*time.Second, 0*time.Second)
929+
evictionResults, err := drainNode(n1, []*apiv1.Pod{p1, p2, p3, p4}, []*apiv1.Pod{}, fakeClient, kube_util.CreateEventRecorder(fakeClient), 0, 0*time.Second, 0*time.Second, 0*time.Second)
878930
assert.Error(t, err)
879931
assert.Equal(t, 4, len(evictionResults))
880932
assert.Equal(t, *p1, *evictionResults["p1"].Pod)

cluster-autoscaler/simulator/cluster.go

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@ type NodeToBeRemoved struct {
5353
Node *apiv1.Node
5454
// PodsToReschedule contains pods on the node that should be rescheduled elsewhere.
5555
PodsToReschedule []*apiv1.Pod
56+
DaemonSetPods []*apiv1.Pod
5657
}
5758

5859
// UnremovableNode represents a node that can't be removed by CA.
@@ -147,6 +148,7 @@ candidateloop:
147148
klog.V(2).Infof("%s: %s for removal", evaluationType, nodeName)
148149

149150
var podsToRemove []*apiv1.Pod
151+
var daemonSetPods []*apiv1.Pod
150152
var blockingPod *drain.BlockingPod
151153

152154
if _, found := destinationMap[nodeName]; !found {
@@ -156,10 +158,10 @@ candidateloop:
156158
}
157159

158160
if fastCheck {
159-
podsToRemove, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
161+
podsToRemove, daemonSetPods, blockingPod, err = FastGetPodsToMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage,
160162
podDisruptionBudgets)
161163
} else {
162-
podsToRemove, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
164+
podsToRemove, daemonSetPods, blockingPod, err = DetailedGetPodsForMove(nodeInfo, *skipNodesWithSystemPods, *skipNodesWithLocalStorage, listers, int32(*minReplicaCount),
163165
podDisruptionBudgets)
164166
}
165167

@@ -180,6 +182,7 @@ candidateloop:
180182
result = append(result, NodeToBeRemoved{
181183
Node: nodeInfo.Node(),
182184
PodsToReschedule: podsToRemove,
185+
DaemonSetPods: daemonSetPods,
183186
})
184187
klog.V(2).Infof("%s: node %s may be removed", evaluationType, nodeName)
185188
if len(result) >= maxCount {
@@ -203,7 +206,7 @@ func FindEmptyNodesToRemove(snapshot ClusterSnapshot, candidates []string) []str
203206
continue
204207
}
205208
// Should block on all pods.
206-
podsToRemove, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
209+
podsToRemove, _, _, err := FastGetPodsToMove(nodeInfo, true, true, nil)
207210
if err == nil && len(podsToRemove) == 0 {
208211
result = append(result, node)
209212
}

cluster-autoscaler/simulator/cluster_test.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -308,10 +308,12 @@ func TestFindNodesToRemove(t *testing.T) {
308308
emptyNodeToRemove := NodeToBeRemoved{
309309
Node: emptyNode,
310310
PodsToReschedule: []*apiv1.Pod{},
311+
DaemonSetPods: []*apiv1.Pod{},
311312
}
312313
drainableNodeToRemove := NodeToBeRemoved{
313314
Node: drainableNode,
314315
PodsToReschedule: []*apiv1.Pod{pod1, pod2},
316+
DaemonSetPods: []*apiv1.Pod{},
315317
}
316318

317319
clusterSnapshot := NewBasicClusterSnapshot()

cluster-autoscaler/simulator/drain.go

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -29,18 +29,18 @@ import (
2929
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
3030
)
3131

32-
// FastGetPodsToMove returns a list of pods that should be moved elsewhere if the node
32+
// FastGetPodsToMove returns a list of pods that should be moved elsewhere
33+
// and a list of DaemonSet pods that should be evicted if the node
3334
// is drained. Raises error if there is an unreplicated pod.
3435
// Based on kubectl drain code. It makes an assumption that RC, DS, Jobs and RS were deleted
3536
// along with their pods (no abandoned pods with dangling created-by annotation). Useful for fast
3637
// checks.
3738
func FastGetPodsToMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool, skipNodesWithLocalStorage bool,
38-
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
39-
var pods []*apiv1.Pod
39+
pdbs []*policyv1.PodDisruptionBudget) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) {
4040
for _, podInfo := range nodeInfo.Pods {
4141
pods = append(pods, podInfo.Pod)
4242
}
43-
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
43+
pods, daemonSetPods, blockingPod, err = drain.GetPodsForDeletionOnNodeDrain(
4444
pods,
4545
pdbs,
4646
skipNodesWithSystemPods,
@@ -51,27 +51,27 @@ func FastGetPodsToMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSyste
5151
time.Now())
5252

5353
if err != nil {
54-
return pods, blockingPod, err
54+
return pods, daemonSetPods, blockingPod, err
5555
}
5656
if pdbBlockingPod, err := checkPdbs(pods, pdbs); err != nil {
57-
return []*apiv1.Pod{}, pdbBlockingPod, err
57+
return []*apiv1.Pod{}, []*apiv1.Pod{}, pdbBlockingPod, err
5858
}
5959

60-
return pods, nil, nil
60+
return pods, daemonSetPods, nil, nil
6161
}
6262

63-
// DetailedGetPodsForMove returns a list of pods that should be moved elsewhere if the node
63+
// DetailedGetPodsForMove returns a list of pods that should be moved elsewhere
64+
// and a list of DaemonSet pods that should be evicted if the node
6465
// is drained. Raises error if there is an unreplicated pod.
6566
// Based on kubectl drain code. It checks whether RC, DS, Jobs and RS that created these pods
6667
// still exist.
6768
func DetailedGetPodsForMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWithSystemPods bool,
6869
skipNodesWithLocalStorage bool, listers kube_util.ListerRegistry, minReplicaCount int32,
69-
pdbs []*policyv1.PodDisruptionBudget) ([]*apiv1.Pod, *drain.BlockingPod, error) {
70-
var pods []*apiv1.Pod
70+
pdbs []*policyv1.PodDisruptionBudget) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) {
7171
for _, podInfo := range nodeInfo.Pods {
7272
pods = append(pods, podInfo.Pod)
7373
}
74-
pods, blockingPod, err := drain.GetPodsForDeletionOnNodeDrain(
74+
pods, daemonSetPods, blockingPod, err = drain.GetPodsForDeletionOnNodeDrain(
7575
pods,
7676
pdbs,
7777
skipNodesWithSystemPods,
@@ -81,13 +81,13 @@ func DetailedGetPodsForMove(nodeInfo *schedulerframework.NodeInfo, skipNodesWith
8181
minReplicaCount,
8282
time.Now())
8383
if err != nil {
84-
return pods, blockingPod, err
84+
return pods, daemonSetPods, blockingPod, err
8585
}
8686
if pdbBlockingPod, err := checkPdbs(pods, pdbs); err != nil {
87-
return []*apiv1.Pod{}, pdbBlockingPod, err
87+
return []*apiv1.Pod{}, []*apiv1.Pod{}, pdbBlockingPod, err
8888
}
8989

90-
return pods, nil, nil
90+
return pods, daemonSetPods, nil, nil
9191
}
9292

9393
func checkPdbs(pods []*apiv1.Pod, pdbs []*policyv1.PodDisruptionBudget) (*drain.BlockingPod, error) {

0 commit comments

Comments
 (0)