Skip to content

Commit afac2ca

Browse files
authored
Merge pull request kubernetes#85008 from oomichi/move-utils-3
Move functions from e2e/framework/util.go Part-3
2 parents cfc596b + 94211f1 commit afac2ca

File tree

9 files changed

+202
-188
lines changed

9 files changed

+202
-188
lines changed

test/e2e/cloud/gcp/ha_master.go

Lines changed: 49 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,14 +20,18 @@ import (
2020
"fmt"
2121
"os/exec"
2222
"path"
23+
"regexp"
2324
"strconv"
2425
"strings"
2526
"time"
2627

2728
"github.com/onsi/ginkgo"
29+
v1 "k8s.io/api/core/v1"
30+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2831
clientset "k8s.io/client-go/kubernetes"
2932
"k8s.io/kubernetes/test/e2e/common"
3033
"k8s.io/kubernetes/test/e2e/framework"
34+
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3135
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3236
)
3337

@@ -111,6 +115,48 @@ func removeZoneFromZones(zones []string, zone string) []string {
111115
return zones
112116
}
113117

118+
// generateMasterRegexp returns a regex for matching master node name.
119+
func generateMasterRegexp(prefix string) string {
120+
return prefix + "(-...)?"
121+
}
122+
123+
// waitForMasters waits until the cluster has the desired number of ready masters in it.
124+
func waitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
125+
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
126+
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
127+
if err != nil {
128+
framework.Logf("Failed to list nodes: %v", err)
129+
continue
130+
}
131+
132+
// Filter out nodes that are not master replicas
133+
e2enode.Filter(nodes, func(node v1.Node) bool {
134+
res, err := regexp.Match(generateMasterRegexp(masterPrefix), ([]byte)(node.Name))
135+
if err != nil {
136+
framework.Logf("Failed to match regexp to node name: %v", err)
137+
return false
138+
}
139+
return res
140+
})
141+
142+
numNodes := len(nodes.Items)
143+
144+
// Filter out not-ready nodes.
145+
e2enode.Filter(nodes, func(node v1.Node) bool {
146+
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
147+
})
148+
149+
numReady := len(nodes.Items)
150+
151+
if numNodes == size && numReady == size {
152+
framework.Logf("Cluster has reached the desired number of masters %d", size)
153+
return nil
154+
}
155+
framework.Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
156+
}
157+
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
158+
}
159+
114160
var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
115161
f := framework.NewDefaultFramework("ha-master")
116162
var c clientset.Interface
@@ -123,7 +169,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
123169
framework.SkipUnlessProviderIs("gce")
124170
c = f.ClientSet
125171
ns = f.Namespace.Name
126-
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
172+
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
127173
additionalReplicaZones = make([]string, 0)
128174
existingRCs = make([]string, 0)
129175
})
@@ -139,7 +185,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
139185
for _, zone := range additionalReplicaZones {
140186
removeMasterReplica(zone)
141187
}
142-
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
188+
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
143189
})
144190

145191
type Action int
@@ -167,7 +213,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
167213
framework.ExpectNoError(removeWorkerNodes(zone))
168214
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
169215
}
170-
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
216+
framework.ExpectNoError(waitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
171217
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
172218

173219
// Verify that API server works correctly with HA master.

test/e2e/framework/framework.go

Lines changed: 40 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,9 @@ package framework
2424
import (
2525
"bytes"
2626
"fmt"
27+
"io/ioutil"
2728
"math/rand"
29+
"path"
2830
"strings"
2931
"sync"
3032
"time"
@@ -276,6 +278,43 @@ func (f *Framework) BeforeEach() {
276278
f.flakeReport = NewFlakeReport()
277279
}
278280

281+
// printSummaries prints summaries of tests.
282+
func printSummaries(summaries []TestDataSummary, testBaseName string) {
283+
now := time.Now()
284+
for i := range summaries {
285+
Logf("Printing summary: %v", summaries[i].SummaryKind())
286+
switch TestContext.OutputPrintType {
287+
case "hr":
288+
if TestContext.ReportDir == "" {
289+
Logf(summaries[i].PrintHumanReadable())
290+
} else {
291+
// TODO: learn to extract test name and append it to the kind instead of timestamp.
292+
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
293+
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
294+
Logf("Failed to write file %v with test performance data: %v", filePath, err)
295+
}
296+
}
297+
case "json":
298+
fallthrough
299+
default:
300+
if TestContext.OutputPrintType != "json" {
301+
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
302+
}
303+
if TestContext.ReportDir == "" {
304+
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
305+
Logf("Finished")
306+
} else {
307+
// TODO: learn to extract test name and append it to the kind instead of timestamp.
308+
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
309+
Logf("Writing to %s", filePath)
310+
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
311+
Logf("Failed to write file %v with test performance data: %v", filePath, err)
312+
}
313+
}
314+
}
315+
}
316+
}
317+
279318
// AfterEach deletes the namespace, after reading its events.
280319
func (f *Framework) AfterEach() {
281320
RemoveCleanupAction(f.cleanupHandle)
@@ -368,7 +407,7 @@ func (f *Framework) AfterEach() {
368407
f.flakeReport = nil
369408
}
370409

371-
PrintSummaries(f.TestSummaries, f.BaseName)
410+
printSummaries(f.TestSummaries, f.BaseName)
372411

373412
// Check whether all nodes are ready after the test.
374413
// This is explicitly done at the very end of the test, to avoid

test/e2e/framework/util.go

Lines changed: 0 additions & 175 deletions
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,6 @@ import (
3131
"os/exec"
3232
"path"
3333
"path/filepath"
34-
"regexp"
3534
"sort"
3635
"strconv"
3736
"strings"
@@ -1640,21 +1639,6 @@ func RestartKubelet(host string) error {
16401639
return nil
16411640
}
16421641

1643-
// WaitForKubeletUp waits for the kubelet on the given host to be up.
1644-
func WaitForKubeletUp(host string) error {
1645-
cmd := "curl http://localhost:" + strconv.Itoa(ports.KubeletReadOnlyPort) + "/healthz"
1646-
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
1647-
result, err := e2essh.SSH(cmd, host, TestContext.Provider)
1648-
if err != nil || result.Code != 0 {
1649-
e2essh.LogResult(result)
1650-
}
1651-
if result.Stdout == "ok" {
1652-
return nil
1653-
}
1654-
}
1655-
return fmt.Errorf("waiting for kubelet timed out")
1656-
}
1657-
16581642
// RestartApiserver restarts the kube-apiserver.
16591643
func RestartApiserver(cs clientset.Interface) error {
16601644
// TODO: Make it work for all providers.
@@ -1699,17 +1683,6 @@ func sshRestartMaster() error {
16991683
return nil
17001684
}
17011685

1702-
// WaitForApiserverUp waits for the kube-apiserver to be up.
1703-
func WaitForApiserverUp(c clientset.Interface) error {
1704-
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
1705-
body, err := c.CoreV1().RESTClient().Get().AbsPath("/healthz").Do().Raw()
1706-
if err == nil && string(body) == "ok" {
1707-
return nil
1708-
}
1709-
}
1710-
return fmt.Errorf("waiting for apiserver timed out")
1711-
}
1712-
17131686
// waitForApiserverRestarted waits until apiserver's restart count increased.
17141687
func waitForApiserverRestarted(c clientset.Interface, initialRestartCount int32) error {
17151688
for start := time.Now(); time.Since(start) < time.Minute; time.Sleep(5 * time.Second) {
@@ -1780,101 +1753,6 @@ func WaitForControllerManagerUp() error {
17801753
return fmt.Errorf("waiting for controller-manager timed out")
17811754
}
17821755

1783-
// GenerateMasterRegexp returns a regex for matching master node name.
1784-
func GenerateMasterRegexp(prefix string) string {
1785-
return prefix + "(-...)?"
1786-
}
1787-
1788-
// WaitForMasters waits until the cluster has the desired number of ready masters in it.
1789-
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
1790-
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
1791-
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
1792-
if err != nil {
1793-
Logf("Failed to list nodes: %v", err)
1794-
continue
1795-
}
1796-
1797-
// Filter out nodes that are not master replicas
1798-
e2enode.Filter(nodes, func(node v1.Node) bool {
1799-
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
1800-
if err != nil {
1801-
Logf("Failed to match regexp to node name: %v", err)
1802-
return false
1803-
}
1804-
return res
1805-
})
1806-
1807-
numNodes := len(nodes.Items)
1808-
1809-
// Filter out not-ready nodes.
1810-
e2enode.Filter(nodes, func(node v1.Node) bool {
1811-
return e2enode.IsConditionSetAsExpected(&node, v1.NodeReady, true)
1812-
})
1813-
1814-
numReady := len(nodes.Items)
1815-
1816-
if numNodes == size && numReady == size {
1817-
Logf("Cluster has reached the desired number of masters %d", size)
1818-
return nil
1819-
}
1820-
Logf("Waiting for the number of masters %d, current %d, not ready master nodes %d", size, numNodes, numNodes-numReady)
1821-
}
1822-
return fmt.Errorf("timeout waiting %v for the number of masters to be %d", timeout, size)
1823-
}
1824-
1825-
// GetHostExternalAddress gets the node for a pod and returns the first External
1826-
// address. Returns an error if the node the pod is on doesn't have an External
1827-
// address.
1828-
func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
1829-
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
1830-
if err != nil {
1831-
return "", err
1832-
}
1833-
for _, address := range node.Status.Addresses {
1834-
if address.Type == v1.NodeExternalIP {
1835-
if address.Address != "" {
1836-
externalAddress = address.Address
1837-
break
1838-
}
1839-
}
1840-
}
1841-
if externalAddress == "" {
1842-
err = fmt.Errorf("No external address for pod %v on node %v",
1843-
p.Name, p.Spec.NodeName)
1844-
}
1845-
return
1846-
}
1847-
1848-
// GetHostAddress gets the node for a pod and returns the first
1849-
// address. Returns an error if the node the pod is on doesn't have an
1850-
// address.
1851-
func GetHostAddress(client clientset.Interface, p *v1.Pod) (string, error) {
1852-
node, err := client.CoreV1().Nodes().Get(p.Spec.NodeName, metav1.GetOptions{})
1853-
if err != nil {
1854-
return "", err
1855-
}
1856-
// Try externalAddress first
1857-
for _, address := range node.Status.Addresses {
1858-
if address.Type == v1.NodeExternalIP {
1859-
if address.Address != "" {
1860-
return address.Address, nil
1861-
}
1862-
}
1863-
}
1864-
// If no externalAddress found, try internalAddress
1865-
for _, address := range node.Status.Addresses {
1866-
if address.Type == v1.NodeInternalIP {
1867-
if address.Address != "" {
1868-
return address.Address, nil
1869-
}
1870-
}
1871-
}
1872-
1873-
// If not found, return error
1874-
return "", fmt.Errorf("No address for pod %v on node %v",
1875-
p.Name, p.Spec.NodeName)
1876-
}
1877-
18781756
type extractRT struct {
18791757
http.Header
18801758
}
@@ -2236,43 +2114,6 @@ func CreateEmptyFileOnPod(namespace string, podName string, filePath string) err
22362114
return err
22372115
}
22382116

2239-
// PrintSummaries prints summaries of tests.
2240-
func PrintSummaries(summaries []TestDataSummary, testBaseName string) {
2241-
now := time.Now()
2242-
for i := range summaries {
2243-
Logf("Printing summary: %v", summaries[i].SummaryKind())
2244-
switch TestContext.OutputPrintType {
2245-
case "hr":
2246-
if TestContext.ReportDir == "" {
2247-
Logf(summaries[i].PrintHumanReadable())
2248-
} else {
2249-
// TODO: learn to extract test name and append it to the kind instead of timestamp.
2250-
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".txt")
2251-
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintHumanReadable()), 0644); err != nil {
2252-
Logf("Failed to write file %v with test performance data: %v", filePath, err)
2253-
}
2254-
}
2255-
case "json":
2256-
fallthrough
2257-
default:
2258-
if TestContext.OutputPrintType != "json" {
2259-
Logf("Unknown output type: %v. Printing JSON", TestContext.OutputPrintType)
2260-
}
2261-
if TestContext.ReportDir == "" {
2262-
Logf("%v JSON\n%v", summaries[i].SummaryKind(), summaries[i].PrintJSON())
2263-
Logf("Finished")
2264-
} else {
2265-
// TODO: learn to extract test name and append it to the kind instead of timestamp.
2266-
filePath := path.Join(TestContext.ReportDir, summaries[i].SummaryKind()+"_"+testBaseName+"_"+now.Format(time.RFC3339)+".json")
2267-
Logf("Writing to %s", filePath)
2268-
if err := ioutil.WriteFile(filePath, []byte(summaries[i].PrintJSON()), 0644); err != nil {
2269-
Logf("Failed to write file %v with test performance data: %v", filePath, err)
2270-
}
2271-
}
2272-
}
2273-
}
2274-
}
2275-
22762117
// DumpDebugInfo dumps debug info of tests.
22772118
func DumpDebugInfo(c clientset.Interface, ns string) {
22782119
sl, _ := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
@@ -2326,22 +2167,6 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) {
23262167
return &ds, nil
23272168
}
23282169

2329-
// WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first.
2330-
func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
2331-
Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName)
2332-
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
2333-
_, err := c.CoreV1().PersistentVolumeClaims(ns).Get(pvcName, metav1.GetOptions{})
2334-
if err != nil {
2335-
if apierrs.IsNotFound(err) {
2336-
Logf("Claim %q in namespace %q doesn't exist in the system", pvcName, ns)
2337-
return nil
2338-
}
2339-
Logf("Failed to get claim %q in namespace %q, retrying in %v. Error: %v", pvcName, ns, Poll, err)
2340-
}
2341-
}
2342-
return fmt.Errorf("PersistentVolumeClaim %s is not removed from the system within %v", pvcName, timeout)
2343-
}
2344-
23452170
// GetClusterZones returns the values of zone label collected from all nodes.
23462171
func GetClusterZones(c clientset.Interface) (sets.String, error) {
23472172
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})

0 commit comments

Comments
 (0)