Skip to content

Commit 4558dc1

Browse files
committed
node-lifecycle-controller: improve processPod test-coverage
1 parent 06fb818 commit 4558dc1

File tree

2 files changed

+207
-2
lines changed

2 files changed

+207
-2
lines changed

pkg/controller/nodelifecycle/node_lifecycle_controller_test.go

Lines changed: 151 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,7 @@ type nodeLifecycleController struct {
8585
leaseInformer coordinformers.LeaseInformer
8686
nodeInformer coreinformers.NodeInformer
8787
daemonSetInformer appsinformers.DaemonSetInformer
88+
podInformer coreinformers.PodInformer
8889
}
8990

9091
func createNodeLease(nodeName string, renewTime metav1.MicroTime) *coordv1.Lease {
@@ -121,6 +122,15 @@ func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeN
121122
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
122123
}
123124

125+
func (nc *nodeLifecycleController) syncPodStore(pod *v1.Pod) error {
126+
if pod == nil {
127+
return nil
128+
}
129+
newElems := make([]interface{}, 0, 1)
130+
newElems = append(newElems, pod)
131+
return nc.podInformer.Informer().GetStore().Replace(newElems, "newRV")
132+
}
133+
124134
func newNodeLifecycleControllerFromClient(
125135
ctx context.Context,
126136
kubeClient clientset.Interface,
@@ -138,11 +148,12 @@ func newNodeLifecycleControllerFromClient(
138148
leaseInformer := factory.Coordination().V1().Leases()
139149
nodeInformer := factory.Core().V1().Nodes()
140150
daemonSetInformer := factory.Apps().V1().DaemonSets()
151+
podInformer := factory.Core().V1().Pods()
141152

142153
nc, err := NewNodeLifecycleController(
143154
ctx,
144155
leaseInformer,
145-
factory.Core().V1().Pods(),
156+
podInformer,
146157
nodeInformer,
147158
daemonSetInformer,
148159
kubeClient,
@@ -163,7 +174,7 @@ func newNodeLifecycleControllerFromClient(
163174
nc.nodeInformerSynced = alwaysReady
164175
nc.daemonSetInformerSynced = alwaysReady
165176

166-
return &nodeLifecycleController{nc, leaseInformer, nodeInformer, daemonSetInformer}, nil
177+
return &nodeLifecycleController{nc, leaseInformer, nodeInformer, daemonSetInformer, podInformer}, nil
167178
}
168179

169180
func TestMonitorNodeHealth(t *testing.T) {
@@ -3557,3 +3568,141 @@ func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) {
35573568
})
35583569
}
35593570
}
3571+
3572+
func TestProcessPodMarkPodNotReady(t *testing.T) {
3573+
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
3574+
3575+
table := []struct {
3576+
desc string
3577+
fakeNodeHandler *testutil.FakeNodeHandler
3578+
pod *v1.Pod
3579+
expectedPodStatusUpdate bool
3580+
monitorNodeHealth bool
3581+
}{
3582+
{
3583+
desc: "Do not mark pod as NotReady when the scheduled node's healthy is not gathered yet",
3584+
fakeNodeHandler: &testutil.FakeNodeHandler{
3585+
Existing: []*v1.Node{
3586+
{
3587+
ObjectMeta: metav1.ObjectMeta{
3588+
Name: "node0",
3589+
CreationTimestamp: fakeNow,
3590+
},
3591+
Status: v1.NodeStatus{
3592+
Conditions: []v1.NodeCondition{
3593+
{
3594+
Type: v1.NodeReady,
3595+
Status: v1.ConditionFalse,
3596+
LastHeartbeatTime: fakeNow,
3597+
LastTransitionTime: fakeNow,
3598+
},
3599+
},
3600+
},
3601+
},
3602+
},
3603+
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
3604+
},
3605+
pod: testutil.NewPod("pod0", "node0"),
3606+
monitorNodeHealth: false,
3607+
expectedPodStatusUpdate: false,
3608+
},
3609+
{
3610+
desc: "Do not mark pod as NotReady when the scheduled node is ready",
3611+
fakeNodeHandler: &testutil.FakeNodeHandler{
3612+
Existing: []*v1.Node{
3613+
{
3614+
ObjectMeta: metav1.ObjectMeta{
3615+
Name: "node0",
3616+
CreationTimestamp: fakeNow,
3617+
},
3618+
Status: v1.NodeStatus{
3619+
Conditions: []v1.NodeCondition{
3620+
{
3621+
Type: v1.NodeReady,
3622+
Status: v1.ConditionTrue,
3623+
LastHeartbeatTime: fakeNow,
3624+
LastTransitionTime: fakeNow,
3625+
},
3626+
},
3627+
},
3628+
},
3629+
},
3630+
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
3631+
},
3632+
pod: testutil.NewPod("pod0", "node0"),
3633+
monitorNodeHealth: true,
3634+
expectedPodStatusUpdate: false,
3635+
},
3636+
{
3637+
desc: "Pod marked as NotReady when the scheduled node is not ready",
3638+
fakeNodeHandler: &testutil.FakeNodeHandler{
3639+
Existing: []*v1.Node{
3640+
{
3641+
ObjectMeta: metav1.ObjectMeta{
3642+
Name: "node0",
3643+
CreationTimestamp: fakeNow,
3644+
},
3645+
Status: v1.NodeStatus{
3646+
Conditions: []v1.NodeCondition{
3647+
{
3648+
Type: v1.NodeReady,
3649+
Status: v1.ConditionFalse,
3650+
LastHeartbeatTime: fakeNow,
3651+
LastTransitionTime: fakeNow,
3652+
},
3653+
},
3654+
},
3655+
},
3656+
},
3657+
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
3658+
},
3659+
pod: testutil.NewPod("pod0", "node0"),
3660+
monitorNodeHealth: true,
3661+
expectedPodStatusUpdate: true,
3662+
},
3663+
}
3664+
3665+
_, ctx := ktesting.NewTestContext(t)
3666+
for _, item := range table {
3667+
t.Run(item.desc, func(t *testing.T) {
3668+
nodeController, _ := newNodeLifecycleControllerFromClient(
3669+
ctx,
3670+
item.fakeNodeHandler,
3671+
testRateLimiterQPS,
3672+
testRateLimiterQPS,
3673+
testLargeClusterThreshold,
3674+
testUnhealthyThreshold,
3675+
testNodeMonitorGracePeriod,
3676+
testNodeStartupGracePeriod,
3677+
testNodeMonitorPeriod,
3678+
)
3679+
nodeController.now = func() metav1.Time { return fakeNow }
3680+
nodeController.recorder = testutil.NewFakeRecorder()
3681+
nodeController.getPodsAssignedToNode = fakeGetPodsAssignedToNode(item.fakeNodeHandler.Clientset)
3682+
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
3683+
t.Errorf("unexpected error: %v", err)
3684+
}
3685+
if item.monitorNodeHealth {
3686+
if err := nodeController.monitorNodeHealth(ctx); err != nil {
3687+
t.Errorf("unexpected error: %v", err)
3688+
}
3689+
}
3690+
3691+
if err := nodeController.syncPodStore(item.pod); err != nil {
3692+
t.Errorf("unexpected error: %v", err)
3693+
}
3694+
nodeController.podUpdated(nil, item.pod)
3695+
nodeController.processPod(ctx, podUpdateItem{name: item.pod.Name, namespace: item.pod.Namespace})
3696+
3697+
podStatusUpdated := false
3698+
for _, action := range item.fakeNodeHandler.Actions() {
3699+
if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" {
3700+
podStatusUpdated = true
3701+
}
3702+
}
3703+
if podStatusUpdated != item.expectedPodStatusUpdate {
3704+
t.Errorf("expect pod status updated to be %v, but got %v", item.expectedPodStatusUpdate, podStatusUpdated)
3705+
}
3706+
})
3707+
}
3708+
}

pkg/controller/nodelifecycle/scheduler/rate_limited_queue_test.go

Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,56 @@ func CheckSetEq(lhs, rhs sets.String) bool {
3939
return lhs.IsSuperset(rhs) && rhs.IsSuperset(lhs)
4040
}
4141

42+
func TestUniqueQueueGet(t *testing.T) {
43+
var tick int64
44+
now = func() time.Time {
45+
t := time.Unix(tick, 0)
46+
tick++
47+
return t
48+
}
49+
50+
queue := UniqueQueue{
51+
queue: TimedQueue{},
52+
set: sets.NewString(),
53+
}
54+
queue.Add(TimedValue{Value: "first", UID: "11111", AddedAt: now(), ProcessAt: now()})
55+
queue.Add(TimedValue{Value: "second", UID: "22222", AddedAt: now(), ProcessAt: now()})
56+
queue.Add(TimedValue{Value: "third", UID: "33333", AddedAt: now(), ProcessAt: now()})
57+
58+
queuePattern := []string{"first", "second", "third"}
59+
if len(queue.queue) != len(queuePattern) {
60+
t.Fatalf("Queue %v should have length %d", queue.queue, len(queuePattern))
61+
}
62+
if !CheckQueueEq(queuePattern, queue.queue) {
63+
t.Errorf("Invalid queue. Got %v, expected %v", queue.queue, queuePattern)
64+
}
65+
66+
setPattern := sets.NewString("first", "second", "third")
67+
if len(queue.set) != len(setPattern) {
68+
t.Fatalf("Map %v should have length %d", queue.set, len(setPattern))
69+
}
70+
if !CheckSetEq(setPattern, queue.set) {
71+
t.Errorf("Invalid map. Got %v, expected %v", queue.set, setPattern)
72+
}
73+
74+
queue.Get()
75+
queuePattern = []string{"second", "third"}
76+
if len(queue.queue) != len(queuePattern) {
77+
t.Fatalf("Queue %v should have length %d", queue.queue, len(queuePattern))
78+
}
79+
if !CheckQueueEq(queuePattern, queue.queue) {
80+
t.Errorf("Invalid queue. Got %v, expected %v", queue.queue, queuePattern)
81+
}
82+
83+
setPattern = sets.NewString("second", "third")
84+
if len(queue.set) != len(setPattern) {
85+
t.Fatalf("Map %v should have length %d", queue.set, len(setPattern))
86+
}
87+
if !CheckSetEq(setPattern, queue.set) {
88+
t.Errorf("Invalid map. Got %v, expected %v", queue.set, setPattern)
89+
}
90+
}
91+
4292
func TestAddNode(t *testing.T) {
4393
evictor := NewRateLimitedTimedQueue(flowcontrol.NewFakeAlwaysRateLimiter())
4494
evictor.Add("first", "11111")
@@ -306,6 +356,12 @@ func TestSwapLimiter(t *testing.T) {
306356
if qps != createdQPS {
307357
t.Fatalf("QPS does not match create one: %v instead of %v", qps, createdQPS)
308358
}
359+
360+
prev := evictor.limiter
361+
evictor.SwapLimiter(createdQPS)
362+
if prev != evictor.limiter {
363+
t.Fatalf("Limiter should not be swapped if the QPS is the same.")
364+
}
309365
}
310366

311367
func TestAddAfterTry(t *testing.T) {

0 commit comments

Comments
 (0)