@@ -92,9 +92,12 @@ func (r *RestartDaemonConfig) String() string {
92
92
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
93
93
func (r * RestartDaemonConfig ) waitUp () {
94
94
framework .Logf ("Checking if %v is up by polling for a 200 on its /healthz endpoint" , r )
95
+ nullDev := "/dev/null"
96
+ if framework .NodeOSDistroIs ("windows" ) {
97
+ nullDev = "NUL"
98
+ }
95
99
healthzCheck := fmt .Sprintf (
96
- "curl -s -o /dev/null -I -w \" %%{http_code}\" http://localhost:%v/healthz" , r .healthzPort )
97
-
100
+ "curl -s -o %v -I -w \" %%{http_code}\" http://localhost:%v/healthz" , nullDev , r .healthzPort )
98
101
err := wait .Poll (r .pollInterval , r .pollTimeout , func () (bool , error ) {
99
102
result , err := e2essh .NodeExec (r .nodeName , healthzCheck , framework .TestContext .Provider )
100
103
framework .ExpectNoError (err )
@@ -115,8 +118,12 @@ func (r *RestartDaemonConfig) waitUp() {
115
118
116
119
// kill sends a SIGTERM to the daemon
117
120
func (r * RestartDaemonConfig ) kill () {
121
+ killCmd := fmt .Sprintf ("pgrep %v | xargs -I {} sudo kill {}" , r .daemonName )
122
+ if framework .NodeOSDistroIs ("windows" ) {
123
+ killCmd = fmt .Sprintf ("taskkill /im %v.exe /f" , r .daemonName )
124
+ }
118
125
framework .Logf ("Killing %v" , r )
119
- _ , err := e2essh .NodeExec (r .nodeName , fmt . Sprintf ( "pgrep %v | xargs -I {} sudo kill {}" , r . daemonName ) , framework .TestContext .Provider )
126
+ _ , err := e2essh .NodeExec (r .nodeName , killCmd , framework .TestContext .Provider )
120
127
framework .ExpectNoError (err )
121
128
}
122
129
@@ -319,4 +326,18 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
319
326
framework .Failf ("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n \n %+v" , preRestarts , postRestarts , badNodes , tracker )
320
327
}
321
328
})
329
+
330
+ ginkgo .It ("Kube-proxy should recover after being killed accidentally" , func () {
331
+ nodeIPs , err := e2enode .GetPublicIps (f .ClientSet )
332
+ if err != nil {
333
+ framework .Logf ("Unexpected error occurred: %v" , err )
334
+ }
335
+ for _ , ip := range nodeIPs {
336
+ restarter := NewRestartConfig (
337
+ ip , "kube-proxy" , ports .ProxyHealthzPort , restartPollInterval , restartTimeout )
338
+ // restart method will kill the kube-proxy process and wait for recovery,
339
+ // if not able to recover, will throw test failure.
340
+ restarter .restart ()
341
+ }
342
+ })
322
343
})
0 commit comments