Skip to content

Commit 496d69e

Browse files
authored
Merge pull request kubernetes#84868 from oomichi/move-e2e-util-2
Move functions from e2e/framework/util.go Part-2
2 parents 9dfcc36 + eb9d1cb commit 496d69e

File tree

6 files changed

+221
-240
lines changed

6 files changed

+221
-240
lines changed

test/e2e/apps/daemon_set.go

Lines changed: 32 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
3939
"k8s.io/kubernetes/test/e2e/framework"
4040
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
41+
testutils "k8s.io/kubernetes/test/utils"
4142

4243
"github.com/onsi/ginkgo"
4344
"github.com/onsi/gomega"
@@ -58,6 +59,35 @@ const (
5859
// node selectors labels to namespaces
5960
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
6061

62+
type updateDSFunc func(*appsv1.DaemonSet)
63+
64+
// updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
65+
// until it succeeds or a timeout expires.
66+
func updateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
67+
daemonsets := c.AppsV1().DaemonSets(namespace)
68+
var updateErr error
69+
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
70+
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
71+
if testutils.IsRetryableAPIError(err) {
72+
return false, nil
73+
}
74+
return false, err
75+
}
76+
// Apply the update, then attempt to push it to the apiserver.
77+
applyUpdate(ds)
78+
if ds, err = daemonsets.Update(ds); err == nil {
79+
framework.Logf("Updating DaemonSet %s", name)
80+
return true, nil
81+
}
82+
updateErr = err
83+
return false, nil
84+
})
85+
if pollErr == wait.ErrWaitTimeout {
86+
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
87+
}
88+
return ds, pollErr
89+
}
90+
6191
// This test must be run in serial because it assumes the Daemon Set pods will
6292
// always get scheduled. If we run other tests in parallel, this may not
6393
// happen. In the future, running in parallel may work if we have an eviction
@@ -399,7 +429,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
399429
framework.Logf("Update the DaemonSet to trigger a rollout")
400430
// We use a nonexistent image here, so that we make sure it won't finish
401431
newImage := "foo:non-existent"
402-
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
432+
newDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
403433
update.Spec.Template.Spec.Containers[0].Image = newImage
404434
})
405435
framework.ExpectNoError(err)
@@ -432,7 +462,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
432462
framework.ExpectNotEqual(len(newPods), 0)
433463

434464
framework.Logf("Roll back the DaemonSet before rollout is complete")
435-
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
465+
rollbackDS, err := updateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
436466
update.Spec.Template.Spec.Containers[0].Image = image
437467
})
438468
framework.ExpectNoError(err)

test/e2e/framework/util.go

Lines changed: 0 additions & 232 deletions
Original file line numberDiff line numberDiff line change
@@ -1363,16 +1363,6 @@ func AddOrUpdateLabelOnNode(c clientset.Interface, nodeName string, labelKey, la
13631363
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
13641364
}
13651365

1366-
// AddOrUpdateLabelOnNodeAndReturnOldValue adds the given label key and value to the given node or updates value and returns the old label value.
1367-
func AddOrUpdateLabelOnNodeAndReturnOldValue(c clientset.Interface, nodeName string, labelKey, labelValue string) string {
1368-
var oldValue string
1369-
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
1370-
ExpectNoError(err)
1371-
oldValue = node.Labels[labelKey]
1372-
ExpectNoError(testutils.AddLabelsToNode(c, nodeName, map[string]string{labelKey: labelValue}))
1373-
return oldValue
1374-
}
1375-
13761366
// ExpectNodeHasLabel expects that the given node has the given label pair.
13771367
func ExpectNodeHasLabel(c clientset.Interface, nodeName string, labelKey string, labelValue string) {
13781368
ginkgo.By("verifying the node has the label " + labelKey + " " + labelValue)
@@ -1435,67 +1425,6 @@ func NodeHasTaint(c clientset.Interface, nodeName string, taint *v1.Taint) (bool
14351425
return true, nil
14361426
}
14371427

1438-
// AddOrUpdateAvoidPodOnNode adds avoidPods annotations to node, will override if it exists
1439-
func AddOrUpdateAvoidPodOnNode(c clientset.Interface, nodeName string, avoidPods v1.AvoidPods) {
1440-
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
1441-
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
1442-
if err != nil {
1443-
if testutils.IsRetryableAPIError(err) {
1444-
return false, nil
1445-
}
1446-
return false, err
1447-
}
1448-
1449-
taintsData, err := json.Marshal(avoidPods)
1450-
ExpectNoError(err)
1451-
1452-
if node.Annotations == nil {
1453-
node.Annotations = make(map[string]string)
1454-
}
1455-
node.Annotations[v1.PreferAvoidPodsAnnotationKey] = string(taintsData)
1456-
_, err = c.CoreV1().Nodes().Update(node)
1457-
if err != nil {
1458-
if !apierrs.IsConflict(err) {
1459-
ExpectNoError(err)
1460-
} else {
1461-
Logf("Conflict when trying to add/update avoidPods %v to %v with error %v", avoidPods, nodeName, err)
1462-
return false, nil
1463-
}
1464-
}
1465-
return true, nil
1466-
})
1467-
ExpectNoError(err)
1468-
}
1469-
1470-
// RemoveAvoidPodsOffNode removes AvoidPods annotations from the node. It does not fail if no such annotation exists.
1471-
func RemoveAvoidPodsOffNode(c clientset.Interface, nodeName string) {
1472-
err := wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
1473-
node, err := c.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
1474-
if err != nil {
1475-
if testutils.IsRetryableAPIError(err) {
1476-
return false, nil
1477-
}
1478-
return false, err
1479-
}
1480-
1481-
if node.Annotations == nil {
1482-
return true, nil
1483-
}
1484-
delete(node.Annotations, v1.PreferAvoidPodsAnnotationKey)
1485-
_, err = c.CoreV1().Nodes().Update(node)
1486-
if err != nil {
1487-
if !apierrs.IsConflict(err) {
1488-
ExpectNoError(err)
1489-
} else {
1490-
Logf("Conflict when trying to remove avoidPods to %v", nodeName)
1491-
return false, nil
1492-
}
1493-
}
1494-
return true, nil
1495-
})
1496-
ExpectNoError(err)
1497-
}
1498-
14991428
// ScaleResource scales resource to the given size.
15001429
func ScaleResource(
15011430
clientset clientset.Interface,
@@ -1586,35 +1515,6 @@ func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns
15861515
return nil
15871516
}
15881517

1589-
type updateDSFunc func(*appsv1.DaemonSet)
1590-
1591-
// UpdateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
1592-
// until it succeeds or a timeout expires.
1593-
func UpdateDaemonSetWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateDSFunc) (ds *appsv1.DaemonSet, err error) {
1594-
daemonsets := c.AppsV1().DaemonSets(namespace)
1595-
var updateErr error
1596-
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
1597-
if ds, err = daemonsets.Get(name, metav1.GetOptions{}); err != nil {
1598-
if testutils.IsRetryableAPIError(err) {
1599-
return false, nil
1600-
}
1601-
return false, err
1602-
}
1603-
// Apply the update, then attempt to push it to the apiserver.
1604-
applyUpdate(ds)
1605-
if ds, err = daemonsets.Update(ds); err == nil {
1606-
Logf("Updating DaemonSet %s", name)
1607-
return true, nil
1608-
}
1609-
updateErr = err
1610-
return false, nil
1611-
})
1612-
if pollErr == wait.ErrWaitTimeout {
1613-
pollErr = fmt.Errorf("couldn't apply the provided updated to DaemonSet %q: %v", name, updateErr)
1614-
}
1615-
return ds, pollErr
1616-
}
1617-
16181518
// RunHostCmd runs the given cmd in the context of the given pod using `kubectl exec`
16191519
// inside of a shell.
16201520
func RunHostCmd(ns, name, cmd string) (string, error) {
@@ -1693,61 +1593,6 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
16931593
return nil
16941594
}
16951595

1696-
// ParseKVLines parses output that looks like lines containing "<key>: <val>"
1697-
// and returns <val> if <key> is found. Otherwise, it returns the empty string.
1698-
func ParseKVLines(output, key string) string {
1699-
delim := ":"
1700-
key = key + delim
1701-
for _, line := range strings.Split(output, "\n") {
1702-
pieces := strings.SplitAfterN(line, delim, 2)
1703-
if len(pieces) != 2 {
1704-
continue
1705-
}
1706-
k, v := pieces[0], pieces[1]
1707-
if k == key {
1708-
return strings.TrimSpace(v)
1709-
}
1710-
}
1711-
return ""
1712-
}
1713-
1714-
// RestartKubeProxy restarts kube-proxy on the given host.
1715-
func RestartKubeProxy(host string) error {
1716-
// TODO: Make it work for all providers.
1717-
if !ProviderIs("gce", "gke", "aws") {
1718-
return fmt.Errorf("unsupported provider for RestartKubeProxy: %s", TestContext.Provider)
1719-
}
1720-
// kubelet will restart the kube-proxy since it's running in a static pod
1721-
Logf("Killing kube-proxy on node %v", host)
1722-
result, err := e2essh.SSH("sudo pkill kube-proxy", host, TestContext.Provider)
1723-
if err != nil || result.Code != 0 {
1724-
e2essh.LogResult(result)
1725-
return fmt.Errorf("couldn't restart kube-proxy: %v", err)
1726-
}
1727-
// wait for kube-proxy to come back up
1728-
sshCmd := "sudo /bin/sh -c 'pgrep kube-proxy | wc -l'"
1729-
err = wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
1730-
Logf("Waiting for kubeproxy to come back up with %v on %v", sshCmd, host)
1731-
result, err := e2essh.SSH(sshCmd, host, TestContext.Provider)
1732-
if err != nil {
1733-
return false, err
1734-
}
1735-
if result.Code != 0 {
1736-
e2essh.LogResult(result)
1737-
return false, fmt.Errorf("failed to run command, exited %d", result.Code)
1738-
}
1739-
if result.Stdout == "0\n" {
1740-
return false, nil
1741-
}
1742-
Logf("kube-proxy is back up.")
1743-
return true, nil
1744-
})
1745-
if err != nil {
1746-
return fmt.Errorf("kube-proxy didn't recover: %v", err)
1747-
}
1748-
return nil
1749-
}
1750-
17511596
// RestartKubelet restarts kubelet on the given host.
17521597
func RestartKubelet(host string) error {
17531598
// TODO: Make it work for all providers and distros.
@@ -1935,33 +1780,6 @@ func WaitForControllerManagerUp() error {
19351780
return fmt.Errorf("waiting for controller-manager timed out")
19361781
}
19371782

1938-
// CheckForControllerManagerHealthy checks that the controller manager does not crash within "duration"
1939-
func CheckForControllerManagerHealthy(duration time.Duration) error {
1940-
var PID string
1941-
cmd := "pidof kube-controller-manager"
1942-
for start := time.Now(); time.Since(start) < duration; time.Sleep(5 * time.Second) {
1943-
result, err := e2essh.SSH(cmd, net.JoinHostPort(GetMasterHost(), sshPort), TestContext.Provider)
1944-
if err != nil {
1945-
// We don't necessarily know that it crashed, pipe could just be broken
1946-
e2essh.LogResult(result)
1947-
return fmt.Errorf("master unreachable after %v", time.Since(start))
1948-
} else if result.Code != 0 {
1949-
e2essh.LogResult(result)
1950-
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
1951-
} else if result.Stdout != PID {
1952-
if PID == "" {
1953-
PID = result.Stdout
1954-
} else {
1955-
//its dead
1956-
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
1957-
}
1958-
} else {
1959-
Logf("kube-controller-manager still healthy after %v", time.Since(start))
1960-
}
1961-
}
1962-
return nil
1963-
}
1964-
19651783
// GenerateMasterRegexp returns a regex for matching master node name.
19661784
func GenerateMasterRegexp(prefix string) string {
19671785
return prefix + "(-...)?"
@@ -2175,56 +1993,6 @@ func UnblockNetwork(from string, to string) {
21751993
}
21761994
}
21771995

2178-
// CheckConnectivityToHost launches a pod to test connectivity to the specified
2179-
// host. An error will be returned if the host is not reachable from the pod.
2180-
//
2181-
// An empty nodeName will use the schedule to choose where the pod is executed.
2182-
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, port, timeout int) error {
2183-
contName := fmt.Sprintf("%s-container", podName)
2184-
2185-
command := []string{
2186-
"nc",
2187-
"-vz",
2188-
"-w", strconv.Itoa(timeout),
2189-
host,
2190-
strconv.Itoa(port),
2191-
}
2192-
2193-
pod := &v1.Pod{
2194-
ObjectMeta: metav1.ObjectMeta{
2195-
Name: podName,
2196-
},
2197-
Spec: v1.PodSpec{
2198-
Containers: []v1.Container{
2199-
{
2200-
Name: contName,
2201-
Image: AgnHostImage,
2202-
Command: command,
2203-
},
2204-
},
2205-
NodeName: nodeName,
2206-
RestartPolicy: v1.RestartPolicyNever,
2207-
},
2208-
}
2209-
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
2210-
_, err := podClient.Create(pod)
2211-
if err != nil {
2212-
return err
2213-
}
2214-
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podName, f.Namespace.Name)
2215-
2216-
if err != nil {
2217-
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, contName)
2218-
if logErr != nil {
2219-
Logf("Warning: Failed to get logs from pod %q: %v", pod.Name, logErr)
2220-
} else {
2221-
Logf("pod %s/%s logs:\n%s", f.Namespace.Name, pod.Name, logs)
2222-
}
2223-
}
2224-
2225-
return err
2226-
}
2227-
22281996
// CoreDump SSHs to the master and all nodes and dumps their logs into dir.
22291997
// It shells out to cluster/log-dump/log-dump.sh to accomplish this.
22301998
func CoreDump(dir string) {

0 commit comments

Comments
 (0)