Skip to content

Commit efe3747

Browse files
committed
Wait for resources owned by pod to be cleaned up in ephemeral volume tests
sig-storage tests that delete pods need to wait for owned resources to also be cleaned up before returning in the case that resources such as ephemeral inline volumes are being used. This was previously implemented by modifying the pod delete call of the e2e framework, which negatively impacted other tests. This was reverted and now the logic has been moved to StopPodAndDependents, which is local to the sig-storage tests. Signed-off-by: hasheddan <[email protected]>
1 parent c2b7aa0 commit efe3747

File tree

2 files changed

+36
-4
lines changed

2 files changed

+36
-4
lines changed

test/e2e/storage/testsuites/ephemeral.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -207,7 +207,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
207207
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
208208
}
209209

210-
defer StopPod(f.ClientSet, pod2)
210+
defer StopPodAndDependents(f.ClientSet, pod2)
211211
return nil
212212
}
213213

@@ -302,7 +302,7 @@ func (t EphemeralTest) TestEphemeral() {
302302
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
303303
defer func() {
304304
// pod might be nil now.
305-
StopPod(client, pod)
305+
StopPodAndDependents(client, pod)
306306
}()
307307
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
308308
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@@ -315,7 +315,7 @@ func (t EphemeralTest) TestEphemeral() {
315315
runningPodData = t.RunningPodCheck(pod)
316316
}
317317

318-
StopPod(client, pod)
318+
StopPodAndDependents(client, pod)
319319
pod = nil // Don't stop twice.
320320

321321
// There should be no dangling PVCs in the namespace now. There might be for
@@ -446,7 +446,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
446446
switch {
447447
case err == nil:
448448
// Pod was created, feature supported.
449-
StopPod(c, pod)
449+
StopPodAndDependents(c, pod)
450450
return true, nil
451451
case apierrors.IsInvalid(err):
452452
// "Invalid" because it uses a feature that isn't supported.

test/e2e/storage/testsuites/provisioning.go

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -712,6 +712,38 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
712712
e2epod.DeletePodWithWait(c, pod)
713713
}
714714

715+
// StopPodAndDependents first tries to log the output of the pod's container,
716+
// then deletes the pod and waits for that to succeed. Also waits for all owned
717+
// resources to be deleted.
718+
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
719+
if pod == nil {
720+
return
721+
}
722+
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw()
723+
if err != nil {
724+
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
725+
} else {
726+
framework.Logf("Pod %s has the following logs: %s", pod.Name, body)
727+
}
728+
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
729+
deletionPolicy := metav1.DeletePropagationForeground
730+
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name,
731+
metav1.DeleteOptions{
732+
// If the pod is the owner of some resources (like ephemeral inline volumes),
733+
// then we want to be sure that those are also gone before we return.
734+
// Blocking pod deletion via metav1.DeletePropagationForeground achieves that.
735+
PropagationPolicy: &deletionPolicy,
736+
})
737+
if err != nil {
738+
if apierrors.IsNotFound(err) {
739+
return // assume pod was already deleted
740+
}
741+
framework.Logf("pod Delete API error: %v", err)
742+
}
743+
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
744+
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
745+
}
746+
715747
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
716748
for _, claim := range pvcs {
717749
// Get new copy of the claim

0 commit comments

Comments
 (0)