@@ -85,6 +85,7 @@ type nodeLifecycleController struct {
85
85
leaseInformer coordinformers.LeaseInformer
86
86
nodeInformer coreinformers.NodeInformer
87
87
daemonSetInformer appsinformers.DaemonSetInformer
88
+ podInformer coreinformers.PodInformer
88
89
}
89
90
90
91
func createNodeLease (nodeName string , renewTime metav1.MicroTime ) * coordv1.Lease {
@@ -121,6 +122,15 @@ func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeN
121
122
return nc .nodeInformer .Informer ().GetStore ().Replace (newElems , "newRV" )
122
123
}
123
124
125
+ func (nc * nodeLifecycleController ) syncPodStore (pod * v1.Pod ) error {
126
+ if pod == nil {
127
+ return nil
128
+ }
129
+ newElems := make ([]interface {}, 0 , 1 )
130
+ newElems = append (newElems , pod )
131
+ return nc .podInformer .Informer ().GetStore ().Replace (newElems , "newRV" )
132
+ }
133
+
124
134
func newNodeLifecycleControllerFromClient (
125
135
ctx context.Context ,
126
136
kubeClient clientset.Interface ,
@@ -138,11 +148,12 @@ func newNodeLifecycleControllerFromClient(
138
148
leaseInformer := factory .Coordination ().V1 ().Leases ()
139
149
nodeInformer := factory .Core ().V1 ().Nodes ()
140
150
daemonSetInformer := factory .Apps ().V1 ().DaemonSets ()
151
+ podInformer := factory .Core ().V1 ().Pods ()
141
152
142
153
nc , err := NewNodeLifecycleController (
143
154
ctx ,
144
155
leaseInformer ,
145
- factory . Core (). V1 (). Pods () ,
156
+ podInformer ,
146
157
nodeInformer ,
147
158
daemonSetInformer ,
148
159
kubeClient ,
@@ -163,7 +174,7 @@ func newNodeLifecycleControllerFromClient(
163
174
nc .nodeInformerSynced = alwaysReady
164
175
nc .daemonSetInformerSynced = alwaysReady
165
176
166
- return & nodeLifecycleController {nc , leaseInformer , nodeInformer , daemonSetInformer }, nil
177
+ return & nodeLifecycleController {nc , leaseInformer , nodeInformer , daemonSetInformer , podInformer }, nil
167
178
}
168
179
169
180
func TestMonitorNodeHealth (t * testing.T ) {
@@ -3557,3 +3568,141 @@ func Test_isNodeExcludedFromDisruptionChecks(t *testing.T) {
3557
3568
})
3558
3569
}
3559
3570
}
3571
+
3572
+ func TestProcessPodMarkPodNotReady (t * testing.T ) {
3573
+ fakeNow := metav1 .Date (2015 , 1 , 1 , 12 , 0 , 0 , 0 , time .UTC )
3574
+
3575
+ table := []struct {
3576
+ desc string
3577
+ fakeNodeHandler * testutil.FakeNodeHandler
3578
+ pod * v1.Pod
3579
+ expectedPodStatusUpdate bool
3580
+ monitorNodeHealth bool
3581
+ }{
3582
+ {
3583
+ desc : "Do not mark pod as NotReady when the scheduled node's healthy is not gathered yet" ,
3584
+ fakeNodeHandler : & testutil.FakeNodeHandler {
3585
+ Existing : []* v1.Node {
3586
+ {
3587
+ ObjectMeta : metav1.ObjectMeta {
3588
+ Name : "node0" ,
3589
+ CreationTimestamp : fakeNow ,
3590
+ },
3591
+ Status : v1.NodeStatus {
3592
+ Conditions : []v1.NodeCondition {
3593
+ {
3594
+ Type : v1 .NodeReady ,
3595
+ Status : v1 .ConditionFalse ,
3596
+ LastHeartbeatTime : fakeNow ,
3597
+ LastTransitionTime : fakeNow ,
3598
+ },
3599
+ },
3600
+ },
3601
+ },
3602
+ },
3603
+ Clientset : fake .NewSimpleClientset (& v1.PodList {Items : []v1.Pod {* testutil .NewPod ("pod0" , "node0" )}}),
3604
+ },
3605
+ pod : testutil .NewPod ("pod0" , "node0" ),
3606
+ monitorNodeHealth : false ,
3607
+ expectedPodStatusUpdate : false ,
3608
+ },
3609
+ {
3610
+ desc : "Do not mark pod as NotReady when the scheduled node is ready" ,
3611
+ fakeNodeHandler : & testutil.FakeNodeHandler {
3612
+ Existing : []* v1.Node {
3613
+ {
3614
+ ObjectMeta : metav1.ObjectMeta {
3615
+ Name : "node0" ,
3616
+ CreationTimestamp : fakeNow ,
3617
+ },
3618
+ Status : v1.NodeStatus {
3619
+ Conditions : []v1.NodeCondition {
3620
+ {
3621
+ Type : v1 .NodeReady ,
3622
+ Status : v1 .ConditionTrue ,
3623
+ LastHeartbeatTime : fakeNow ,
3624
+ LastTransitionTime : fakeNow ,
3625
+ },
3626
+ },
3627
+ },
3628
+ },
3629
+ },
3630
+ Clientset : fake .NewSimpleClientset (& v1.PodList {Items : []v1.Pod {* testutil .NewPod ("pod0" , "node0" )}}),
3631
+ },
3632
+ pod : testutil .NewPod ("pod0" , "node0" ),
3633
+ monitorNodeHealth : true ,
3634
+ expectedPodStatusUpdate : false ,
3635
+ },
3636
+ {
3637
+ desc : "Pod marked as NotReady when the scheduled node is not ready" ,
3638
+ fakeNodeHandler : & testutil.FakeNodeHandler {
3639
+ Existing : []* v1.Node {
3640
+ {
3641
+ ObjectMeta : metav1.ObjectMeta {
3642
+ Name : "node0" ,
3643
+ CreationTimestamp : fakeNow ,
3644
+ },
3645
+ Status : v1.NodeStatus {
3646
+ Conditions : []v1.NodeCondition {
3647
+ {
3648
+ Type : v1 .NodeReady ,
3649
+ Status : v1 .ConditionFalse ,
3650
+ LastHeartbeatTime : fakeNow ,
3651
+ LastTransitionTime : fakeNow ,
3652
+ },
3653
+ },
3654
+ },
3655
+ },
3656
+ },
3657
+ Clientset : fake .NewSimpleClientset (& v1.PodList {Items : []v1.Pod {* testutil .NewPod ("pod0" , "node0" )}}),
3658
+ },
3659
+ pod : testutil .NewPod ("pod0" , "node0" ),
3660
+ monitorNodeHealth : true ,
3661
+ expectedPodStatusUpdate : true ,
3662
+ },
3663
+ }
3664
+
3665
+ _ , ctx := ktesting .NewTestContext (t )
3666
+ for _ , item := range table {
3667
+ t .Run (item .desc , func (t * testing.T ) {
3668
+ nodeController , _ := newNodeLifecycleControllerFromClient (
3669
+ ctx ,
3670
+ item .fakeNodeHandler ,
3671
+ testRateLimiterQPS ,
3672
+ testRateLimiterQPS ,
3673
+ testLargeClusterThreshold ,
3674
+ testUnhealthyThreshold ,
3675
+ testNodeMonitorGracePeriod ,
3676
+ testNodeStartupGracePeriod ,
3677
+ testNodeMonitorPeriod ,
3678
+ )
3679
+ nodeController .now = func () metav1.Time { return fakeNow }
3680
+ nodeController .recorder = testutil .NewFakeRecorder ()
3681
+ nodeController .getPodsAssignedToNode = fakeGetPodsAssignedToNode (item .fakeNodeHandler .Clientset )
3682
+ if err := nodeController .syncNodeStore (item .fakeNodeHandler ); err != nil {
3683
+ t .Errorf ("unexpected error: %v" , err )
3684
+ }
3685
+ if item .monitorNodeHealth {
3686
+ if err := nodeController .monitorNodeHealth (ctx ); err != nil {
3687
+ t .Errorf ("unexpected error: %v" , err )
3688
+ }
3689
+ }
3690
+
3691
+ if err := nodeController .syncPodStore (item .pod ); err != nil {
3692
+ t .Errorf ("unexpected error: %v" , err )
3693
+ }
3694
+ nodeController .podUpdated (nil , item .pod )
3695
+ nodeController .processPod (ctx , podUpdateItem {name : item .pod .Name , namespace : item .pod .Namespace })
3696
+
3697
+ podStatusUpdated := false
3698
+ for _ , action := range item .fakeNodeHandler .Actions () {
3699
+ if action .GetVerb () == "update" && action .GetResource ().Resource == "pods" && action .GetSubresource () == "status" {
3700
+ podStatusUpdated = true
3701
+ }
3702
+ }
3703
+ if podStatusUpdated != item .expectedPodStatusUpdate {
3704
+ t .Errorf ("expect pod status updated to be %v, but got %v" , item .expectedPodStatusUpdate , podStatusUpdated )
3705
+ }
3706
+ })
3707
+ }
3708
+ }
0 commit comments