@@ -118,7 +118,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
118
118
119
119
// MarkPodsNotReady updates ready status of given pods running on
120
120
// given node from master return true if success
121
- func MarkPodsNotReady (kubeClient clientset.Interface , pods []* v1.Pod , nodeName string ) error {
121
+ func MarkPodsNotReady (kubeClient clientset.Interface , recorder record. EventRecorder , pods []* v1.Pod , nodeName string ) error {
122
122
klog .V (2 ).Infof ("Update ready status of pods on node [%v]" , nodeName )
123
123
124
124
errMsg := []string {}
@@ -136,6 +136,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
136
136
if ! utilpod .UpdatePodCondition (& pod .Status , & cond ) {
137
137
break
138
138
}
139
+
139
140
klog .V (2 ).Infof ("Updating ready status of pod %v to false" , pod .Name )
140
141
_ , err := kubeClient .CoreV1 ().Pods (pod .Namespace ).UpdateStatus (context .TODO (), pod , metav1.UpdateOptions {})
141
142
if err != nil {
@@ -147,6 +148,8 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
147
148
klog .Warningf ("Failed to update status for pod %q: %v" , format .Pod (pod ), err )
148
149
errMsg = append (errMsg , fmt .Sprintf ("%v" , err ))
149
150
}
151
+ // record NodeNotReady event after updateStatus to make sure pod still exists
152
+ recorder .Event (pod , v1 .EventTypeWarning , "NodeNotReady" , "Node is not ready" )
150
153
break
151
154
}
152
155
}
0 commit comments