Skip to content

Commit c86c8a1

Browse files
committed
integration: add case related to ContainerStats in upgrade suite
It's used to check new release containerd can parse metric data from existing shim created by previous release. Signed-off-by: Wei Fu <[email protected]>
1 parent acec60f commit c86c8a1

File tree

1 file changed

+76
-2
lines changed

1 file changed

+76
-2
lines changed

integration/release_upgrade_linux_test.go

Lines changed: 76 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,7 @@ func TestUpgrade(t *testing.T) {
5656
t.Run("exec", runUpgradeTestCase(previousReleaseBinDir, execToExistingContainer))
5757
t.Run("manipulate", runUpgradeTestCase(previousReleaseBinDir, shouldManipulateContainersInPodAfterUpgrade))
5858
t.Run("recover-images", runUpgradeTestCase(previousReleaseBinDir, shouldRecoverExistingImages))
59-
// TODO:
60-
// Add stats/stop-existing-running-pods/...
59+
t.Run("metrics", runUpgradeTestCase(previousReleaseBinDir, shouldParseMetricDataCorrectly))
6160
}
6261

6362
func runUpgradeTestCase(
@@ -397,6 +396,81 @@ func shouldRecoverExistingImages(t *testing.T,
397396
}, nil
398397
}
399398

399+
// shouldParseMetricDataCorrectly is to check new release containerd can parse
400+
// metric data from existing shim created by previous release.
401+
func shouldParseMetricDataCorrectly(t *testing.T,
402+
rSvc cri.RuntimeService, iSvc cri.ImageManagerService) (upgradeVerifyCaseFunc, beforeUpgradeHookFunc) {
403+
404+
imageName := images.Get(images.BusyBox)
405+
pullImagesByCRI(t, iSvc, imageName)
406+
407+
scriptVolume := t.TempDir()
408+
scriptInHost := filepath.Join(scriptVolume, "run.sh")
409+
410+
fileSize := 1024 * 1024 * 96 // 96 MiB
411+
require.NoError(t, os.WriteFile(scriptInHost, []byte(fmt.Sprintf(`#!/bin/sh
412+
set -euo pipefail
413+
414+
head -c %d </dev/urandom >/tmp/log
415+
416+
# increase page cache usage
417+
for i in {1..10}; do
418+
cat /tmp/log > /dev/null
419+
done
420+
421+
echo "ready"
422+
423+
while true; do
424+
cat /tmp/log > /dev/null
425+
sleep 1
426+
done
427+
`, fileSize,
428+
),
429+
), 0600))
430+
431+
podLogDir := t.TempDir()
432+
podCtx := newPodTCtx(t, rSvc, "running", "sandbox", WithPodLogDirectory(podLogDir))
433+
434+
scriptInContainer := "/run.sh"
435+
cntrLogName := "running#0.log"
436+
437+
cntr := podCtx.createContainer("app", imageName,
438+
criruntime.ContainerState_CONTAINER_RUNNING,
439+
WithCommand("sh", scriptInContainer),
440+
WithVolumeMount(scriptInHost, scriptInContainer),
441+
WithLogPath(cntrLogName),
442+
)
443+
444+
return func(t *testing.T, rSvc cri.RuntimeService, _ cri.ImageManagerService) {
445+
checkContainerState(t, rSvc, cntr, criruntime.ContainerState_CONTAINER_RUNNING)
446+
447+
logPath := filepath.Join(podLogDir, cntrLogName)
448+
449+
t.Log("Warm-up page cache")
450+
isReady := false
451+
for i := 0; i < 30 && !isReady; i++ {
452+
data, err := os.ReadFile(logPath)
453+
require.NoError(t, err)
454+
455+
isReady = strings.Contains(string(data), "ready")
456+
457+
time.Sleep(1 * time.Second)
458+
}
459+
require.True(t, isReady, "warm-up page cache")
460+
461+
stats, err := rSvc.ContainerStats(cntr)
462+
require.NoError(t, err)
463+
464+
data, err := json.MarshalIndent(stats, "", " ")
465+
require.NoError(t, err)
466+
t.Logf("Dump container %s's metric: \n%s", cntr, string(data))
467+
468+
// NOTE: Just in case that part of inactive cache has been reclaimed.
469+
expectedBytes := uint64(fileSize * 2 / 3)
470+
require.True(t, stats.GetMemory().GetUsageBytes().GetValue() > expectedBytes)
471+
}, nil
472+
}
473+
400474
func newPodTCtx(t *testing.T, rSvc cri.RuntimeService,
401475
name, ns string, opts ...PodSandboxOpts) *podTCtx {
402476

0 commit comments

Comments
 (0)