Skip to content

Commit a95c8ed

Browse files
committed
integration: check data dir after delete container or pod
The new release containerd should cleanup old pod's data dir after RemovePodSandbox. Signed-off-by: Wei Fu <[email protected]>
1 parent ec759f5 commit a95c8ed

File tree

1 file changed

+78
-6
lines changed

1 file changed

+78
-6
lines changed

integration/release_upgrade_linux_test.go

Lines changed: 78 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package integration
1919
import (
2020
"bufio"
2121
"context"
22+
"encoding/json"
2223
"fmt"
2324
"io"
2425
"os"
@@ -60,6 +61,8 @@ func runUpgradeTestCase(
6061
setupUpgradeVerifyCase func(t *testing.T, criRuntimeService cri.RuntimeService, criImageService cri.ImageManagerService) upgradeVerifyCaseFunc,
6162
) func(t *testing.T) {
6263
return func(t *testing.T) {
64+
// NOTE: Using t.TempDir() here is to ensure there are no leaky
65+
// mountpoint after test completed.
6366
workDir := t.TempDir()
6467

6568
t.Log("Install config for previous release")
@@ -132,7 +135,7 @@ func shouldRecoverAllThePodsAfterUpgrade(t *testing.T, rSvc cri.RuntimeService,
132135
)
133136

134137
secondPodCtx := newPodTCtx(t, rSvc, "stopped-pod", "sandbox")
135-
secondPodCtx.stop()
138+
secondPodCtx.stop(false)
136139

137140
return func(t *testing.T, rSvc cri.RuntimeService, _ cri.ImageManagerService) {
138141
t.Log("List Pods")
@@ -164,7 +167,6 @@ func shouldRecoverAllThePodsAfterUpgrade(t *testing.T, rSvc cri.RuntimeService,
164167
default:
165168
t.Errorf("unexpected container %s in %s", cntr.Id, pod.Id)
166169
}
167-
168170
}
169171

170172
case secondPodCtx.id:
@@ -259,6 +261,9 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
259261
WithCommand("sleep", "1d"))
260262

261263
return func(t *testing.T, rSvc cri.RuntimeService, _ cri.ImageManagerService) {
264+
// TODO(fuweid): make svc re-connect to new socket
265+
podCtx.rSvc = rSvc
266+
262267
t.Log("Manipulating containers in the previous pod")
263268

264269
// For the running container, we get status and stats of it,
@@ -285,6 +290,11 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
285290
require.NoError(t, rSvc.StopContainer(cntr1, 0))
286291
checkContainerState(t, rSvc, cntr1, criruntime.ContainerState_CONTAINER_EXITED)
287292

293+
cntr1DataDir := podCtx.containerDataDir(cntr1)
294+
t.Logf("Container %s's data dir %s should be remained until RemoveContainer", cntr1, cntr1DataDir)
295+
_, err = os.Stat(cntr1DataDir)
296+
require.NoError(t, err)
297+
288298
t.Logf("Starting created container %s", cntr2)
289299
checkContainerState(t, rSvc, cntr2, criruntime.ContainerState_CONTAINER_CREATED)
290300

@@ -297,14 +307,34 @@ func shouldManipulateContainersInPodAfterUpgrade(t *testing.T, rSvc cri.RuntimeS
297307

298308
t.Logf("Removing exited container %s", cntr3)
299309
checkContainerState(t, rSvc, cntr3, criruntime.ContainerState_CONTAINER_EXITED)
310+
311+
cntr3DataDir := podCtx.containerDataDir(cntr3)
312+
_, err = os.Stat(cntr3DataDir)
313+
require.NoError(t, err)
314+
300315
require.NoError(t, rSvc.RemoveContainer(cntr3))
301316

317+
t.Logf("Container %s's data dir %s should be deleted after RemoveContainer", cntr3, cntr3DataDir)
318+
_, err = os.Stat(cntr3DataDir)
319+
require.True(t, os.IsNotExist(err))
320+
302321
// Create a new container in the previous pod, start, stop, and remove it
303-
// TODO(fuweid): make svc re-connect to new socket
304-
podCtx.rSvc = rSvc
305322
podCtx.createContainer("runinpreviouspod", busyboxImage,
306323
criruntime.ContainerState_CONTAINER_EXITED,
307324
WithCommand("sleep", "1d"))
325+
326+
podCtx.stop(true)
327+
podDataDir := podCtx.dataDir()
328+
329+
t.Logf("Pod %s's data dir %s should be deleted", podCtx.id, podDataDir)
330+
_, err = os.Stat(podDataDir)
331+
require.True(t, os.IsNotExist(err))
332+
333+
cntrDataDir := filepath.Dir(cntr3DataDir)
334+
t.Logf("Containers data dir %s should be empty", cntrDataDir)
335+
ents, err := os.ReadDir(cntrDataDir)
336+
require.NoError(t, err)
337+
require.Len(t, ents, 0, cntrDataDir)
308338
}
309339
}
310340

@@ -378,9 +408,51 @@ func (pCtx *podTCtx) createContainer(name, imageRef string, wantedState crirunti
378408
return cnID
379409
}
380410

411+
// containerDataDir returns container metadata dir maintained by CRI plugin.
412+
func (pCtx *podTCtx) containerDataDir(cntrID string) string {
413+
t := pCtx.t
414+
415+
// check if container exists
416+
status, err := pCtx.rSvc.ContainerStatus(cntrID)
417+
require.NoError(t, err)
418+
419+
cfg := criRuntimeInfo(t, pCtx.rSvc)
420+
421+
rootDir := cfg["rootDir"].(string)
422+
return filepath.Join(rootDir, "containers", status.Id)
423+
}
424+
425+
// dataDir returns pod metadata dir maintained by CRI plugin.
426+
func (pCtx *podTCtx) dataDir() string {
427+
t := pCtx.t
428+
429+
cfg := criRuntimeInfo(t, pCtx.rSvc)
430+
rootDir := cfg["rootDir"].(string)
431+
return filepath.Join(rootDir, "sandboxes", pCtx.id)
432+
}
433+
381434
// stop stops that pod.
382-
func (pCtx *podTCtx) stop() {
383-
require.NoError(pCtx.t, pCtx.rSvc.StopPodSandbox(pCtx.id))
435+
func (pCtx *podTCtx) stop(remove bool) {
436+
t := pCtx.t
437+
438+
t.Logf("Stopping pod %s", pCtx.id)
439+
require.NoError(t, pCtx.rSvc.StopPodSandbox(pCtx.id))
440+
if remove {
441+
t.Logf("Removing pod %s", pCtx.id)
442+
require.NoError(t, pCtx.rSvc.RemovePodSandbox(pCtx.id))
443+
}
444+
}
445+
446+
// criRuntimeInfo dumps CRI config.
447+
func criRuntimeInfo(t *testing.T, svc cri.RuntimeService) map[string]interface{} {
448+
resp, err := svc.Status()
449+
require.NoError(t, err)
450+
451+
cfg := map[string]interface{}{}
452+
err = json.Unmarshal([]byte(resp.GetInfo()["config"]), &cfg)
453+
require.NoError(t, err)
454+
455+
return cfg
384456
}
385457

386458
// checkContainerState checks container's state.

0 commit comments

Comments
 (0)