Skip to content

Commit c5c19a6

Browse files
authored
Merge pull request kubernetes#129723 from carlory/NFSPersistentVolumes
remove unrunnable tests from NFSPersistentVolumes
2 parents 2056dbc + 7712d34 commit c5c19a6

File tree

1 file changed

+1
-137
lines changed

1 file changed

+1
-137
lines changed

test/e2e/storage/nfs_persistent_volume-disruptive.go

Lines changed: 1 addition & 137 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ package storage
1919
import (
2020
"context"
2121
"fmt"
22-
"net"
2322
"time"
2423

2524
"github.com/onsi/ginkgo/v2"
@@ -28,15 +27,12 @@ import (
2827
v1 "k8s.io/api/core/v1"
2928
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3029
"k8s.io/apimachinery/pkg/labels"
31-
utilerrors "k8s.io/apimachinery/pkg/util/errors"
3230
clientset "k8s.io/client-go/kubernetes"
3331
"k8s.io/kubernetes/test/e2e/framework"
34-
e2ekubesystem "k8s.io/kubernetes/test/e2e/framework/kubesystem"
3532
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
3633
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
3734
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
3835
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
39-
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
4036
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
4137
"k8s.io/kubernetes/test/e2e/storage/utils"
4238
admissionapi "k8s.io/pod-security-admission/api"
@@ -48,34 +44,7 @@ type disruptiveTest struct {
4844
runTest testBody
4945
}
5046

51-
// checkForControllerManagerHealthy checks that the controller manager does not crash within "duration"
52-
func checkForControllerManagerHealthy(ctx context.Context, duration time.Duration) error {
53-
var PID string
54-
cmd := "pidof kube-controller-manager"
55-
for start := time.Now(); time.Since(start) < duration && ctx.Err() == nil; time.Sleep(5 * time.Second) {
56-
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
57-
if err != nil {
58-
// We don't necessarily know that it crashed, pipe could just be broken
59-
e2essh.LogResult(result)
60-
return fmt.Errorf("master unreachable after %v", time.Since(start))
61-
} else if result.Code != 0 {
62-
e2essh.LogResult(result)
63-
return fmt.Errorf("SSH result code not 0. actually: %v after %v", result.Code, time.Since(start))
64-
} else if result.Stdout != PID {
65-
if PID == "" {
66-
PID = result.Stdout
67-
} else {
68-
//its dead
69-
return fmt.Errorf("controller manager crashed, old PID: %s, new PID: %s", PID, result.Stdout)
70-
}
71-
} else {
72-
framework.Logf("kube-controller-manager still healthy after %v", time.Since(start))
73-
}
74-
}
75-
return nil
76-
}
77-
78-
var _ = utils.SIGDescribe("NFSPersistentVolumes", framework.WithDisruptive(), framework.WithFlaky(), func() {
47+
var _ = utils.SIGDescribe("NFSPersistentVolumes", framework.WithDisruptive(), func() {
7948

8049
f := framework.NewDefaultFramework("disruptive-pv")
8150
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@@ -136,98 +105,6 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes", framework.WithDisruptive(), fr
136105
}
137106
})
138107

139-
ginkgo.Context("when kube-controller-manager restarts", func() {
140-
var (
141-
diskName1, diskName2 string
142-
err error
143-
pvConfig1, pvConfig2 e2epv.PersistentVolumeConfig
144-
pv1, pv2 *v1.PersistentVolume
145-
pvSource1, pvSource2 *v1.PersistentVolumeSource
146-
pvc1, pvc2 *v1.PersistentVolumeClaim
147-
clientPod *v1.Pod
148-
)
149-
150-
ginkgo.BeforeEach(func(ctx context.Context) {
151-
e2eskipper.SkipUnlessProviderIs("gce")
152-
e2eskipper.SkipUnlessSSHKeyPresent()
153-
154-
ginkgo.By("Initializing first PD with PVPVC binding")
155-
pvSource1, diskName1 = createGCEVolume(ctx)
156-
framework.ExpectNoError(err)
157-
pvConfig1 = e2epv.PersistentVolumeConfig{
158-
NamePrefix: "gce-",
159-
Labels: volLabel,
160-
PVSource: *pvSource1,
161-
Prebind: nil,
162-
}
163-
pv1, pvc1, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig1, pvcConfig, ns, false)
164-
framework.ExpectNoError(err)
165-
framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv1, pvc1))
166-
167-
ginkgo.By("Initializing second PD with PVPVC binding")
168-
pvSource2, diskName2 = createGCEVolume(ctx)
169-
framework.ExpectNoError(err)
170-
pvConfig2 = e2epv.PersistentVolumeConfig{
171-
NamePrefix: "gce-",
172-
Labels: volLabel,
173-
PVSource: *pvSource2,
174-
Prebind: nil,
175-
}
176-
pv2, pvc2, err = e2epv.CreatePVPVC(ctx, c, f.Timeouts, pvConfig2, pvcConfig, ns, false)
177-
framework.ExpectNoError(err)
178-
framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv2, pvc2))
179-
180-
ginkgo.By("Attaching both PVC's to a single pod")
181-
clientPod, err = e2epod.CreatePod(ctx, c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, f.NamespacePodSecurityLevel, "")
182-
framework.ExpectNoError(err)
183-
})
184-
185-
ginkgo.AfterEach(func(ctx context.Context) {
186-
// Delete client/user pod first
187-
framework.ExpectNoError(e2epod.DeletePodWithWait(ctx, c, clientPod))
188-
189-
// Delete PV and PVCs
190-
if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv1, pvc1); len(errs) > 0 {
191-
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
192-
}
193-
pv1, pvc1 = nil, nil
194-
if errs := e2epv.PVPVCCleanup(ctx, c, ns, pv2, pvc2); len(errs) > 0 {
195-
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
196-
}
197-
pv2, pvc2 = nil, nil
198-
199-
// Delete the actual disks
200-
if diskName1 != "" {
201-
framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName1))
202-
}
203-
if diskName2 != "" {
204-
framework.ExpectNoError(e2epv.DeletePDWithRetry(ctx, diskName2))
205-
}
206-
})
207-
208-
ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func(ctx context.Context) {
209-
e2eskipper.SkipUnlessSSHKeyPresent()
210-
211-
ginkgo.By("Deleting PVC for volume 2")
212-
err = e2epv.DeletePersistentVolumeClaim(ctx, c, pvc2.Name, ns)
213-
framework.ExpectNoError(err)
214-
pvc2 = nil
215-
216-
ginkgo.By("Restarting the kube-controller-manager")
217-
err = e2ekubesystem.RestartControllerManager(ctx)
218-
framework.ExpectNoError(err)
219-
err = e2ekubesystem.WaitForControllerManagerUp(ctx)
220-
framework.ExpectNoError(err)
221-
framework.Logf("kube-controller-manager restarted")
222-
223-
ginkgo.By("Observing the kube-controller-manager healthy for at least 2 minutes")
224-
// Continue checking for 2 minutes to make sure kube-controller-manager is healthy
225-
err = checkForControllerManagerHealthy(ctx, 2*time.Minute)
226-
framework.ExpectNoError(err)
227-
})
228-
229-
})
230-
231108
ginkgo.Context("when kubelet restarts", func() {
232109
var (
233110
clientPod *v1.Pod
@@ -277,19 +154,6 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes", framework.WithDisruptive(), fr
277154
})
278155
})
279156

280-
// createGCEVolume creates PersistentVolumeSource for GCEVolume.
281-
func createGCEVolume(ctx context.Context) (*v1.PersistentVolumeSource, string) {
282-
diskName, err := e2epv.CreatePDWithRetry(ctx)
283-
framework.ExpectNoError(err)
284-
return &v1.PersistentVolumeSource{
285-
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
286-
PDName: diskName,
287-
FSType: "ext3",
288-
ReadOnly: false,
289-
},
290-
}, diskName
291-
}
292-
293157
// initTestCase initializes spec resources (pv, pvc, and pod) and returns pointers to be consumed
294158
// by the test.
295159
func initTestCase(ctx context.Context, f *framework.Framework, c clientset.Interface, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, ns, nodeName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {

0 commit comments

Comments
 (0)