Skip to content

Commit 78c56e6

Browse files
authored
Merge pull request kubernetes#84886 from YangLu1031/daemonRestartTest
Verify kubelet & kube-proxy recovery on Windows nodes
2 parents f3ebd95 + ba2cc99 commit 78c56e6

File tree

1 file changed

+24
-3
lines changed

1 file changed

+24
-3
lines changed

test/e2e/apps/daemon_restart.go

Lines changed: 24 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,12 @@ func (r *RestartDaemonConfig) String() string {
9292
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
9393
func (r *RestartDaemonConfig) waitUp() {
9494
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
95+
nullDev := "/dev/null"
96+
if framework.NodeOSDistroIs("windows") {
97+
nullDev = "NUL"
98+
}
9599
healthzCheck := fmt.Sprintf(
96-
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
97-
100+
"curl -s -o %v -I -w \"%%{http_code}\" http://localhost:%v/healthz", nullDev, r.healthzPort)
98101
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
99102
result, err := e2essh.NodeExec(r.nodeName, healthzCheck, framework.TestContext.Provider)
100103
framework.ExpectNoError(err)
@@ -115,8 +118,12 @@ func (r *RestartDaemonConfig) waitUp() {
115118

116119
// kill sends a SIGTERM to the daemon
117120
func (r *RestartDaemonConfig) kill() {
121+
killCmd := fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName)
122+
if framework.NodeOSDistroIs("windows") {
123+
killCmd = fmt.Sprintf("taskkill /im %v.exe /f", r.daemonName)
124+
}
118125
framework.Logf("Killing %v", r)
119-
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
126+
_, err := e2essh.NodeExec(r.nodeName, killCmd, framework.TestContext.Provider)
120127
framework.ExpectNoError(err)
121128
}
122129

@@ -319,4 +326,18 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
319326
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
320327
}
321328
})
329+
330+
ginkgo.It("Kube-proxy should recover after being killed accidentally", func() {
331+
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
332+
if err != nil {
333+
framework.Logf("Unexpected error occurred: %v", err)
334+
}
335+
for _, ip := range nodeIPs {
336+
restarter := NewRestartConfig(
337+
ip, "kube-proxy", ports.ProxyHealthzPort, restartPollInterval, restartTimeout)
338+
// restart method will kill the kube-proxy process and wait for recovery,
339+
// if not able to recover, will throw test failure.
340+
restarter.restart()
341+
}
342+
})
322343
})

0 commit comments

Comments
 (0)