Skip to content

Commit 77b0576

Browse files
committed
feat: add jailer performance test
The test measures jailer startup time. It is parametrized by the number of parallel jailers starting up and the number of bind mount points present in the system. Signed-off-by: Egor Lazarchuk <[email protected]>
1 parent 25d1628 commit 77b0576

File tree

1 file changed

+88
-0
lines changed

1 file changed

+88
-0
lines changed
Lines changed: 88 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved.
2+
# SPDX-License-Identifier: Apache-2.0
3+
"""Performance benchmark for the jailer."""
4+
5+
import os
6+
import shutil
7+
from concurrent.futures import ProcessPoolExecutor
8+
9+
import pytest
10+
11+
from framework import utils
12+
from framework.jailer import DEFAULT_CHROOT_PATH, JailerContext
13+
from framework.properties import global_props
14+
15+
16+
def setup_bind_mounts(tmp_path, n):
17+
"""
18+
Create bind mount points. The exact location of them
19+
does not matter, they just need to exist.
20+
"""
21+
mounts_paths = tmp_path / "mounts"
22+
os.makedirs(mounts_paths)
23+
for m in range(n):
24+
mount_path = f"{mounts_paths}/mount{m}"
25+
os.makedirs(mount_path)
26+
utils.check_output(f"mount --bind {mount_path} {mount_path}")
27+
28+
29+
def clean_up_mounts(tmp_path):
30+
"""Cleanup mounts and jailer dirs"""
31+
mounts_paths = tmp_path / "mounts"
32+
for d in os.listdir(mounts_paths):
33+
utils.check_output(f"umount {mounts_paths}/{d}")
34+
35+
36+
@pytest.mark.nonci
37+
@pytest.mark.parametrize("parallel", [1, 5, 10])
38+
@pytest.mark.parametrize("mounts", [0, 100, 300, 500])
39+
def test_jailer_startup(
40+
jailer_time_bin, tmp_path, microvm_factory, parallel, mounts, metrics
41+
):
42+
"""
43+
Test the overhead of jailer startup without and with bind mounts
44+
with different parallelism options.
45+
"""
46+
47+
jailer_binary = microvm_factory.jailer_binary_path
48+
49+
setup_bind_mounts(tmp_path, mounts)
50+
51+
metrics.set_dimensions(
52+
{
53+
"instance": global_props.instance,
54+
"cpu_model": global_props.cpu_model,
55+
"performance_test": "test_jailer_startup",
56+
"parallel": str(parallel),
57+
"mounts": str(mounts),
58+
}
59+
)
60+
61+
cmds = []
62+
for i in range(500):
63+
jailer = JailerContext(
64+
jailer_id=f"fakefc{i}",
65+
exec_file=jailer_time_bin,
66+
# Don't deamonize to get the stdout
67+
daemonize=False,
68+
)
69+
jailer.setup()
70+
71+
cmd = [str(jailer_binary), *jailer.construct_param_list()]
72+
cmds.append(cmd)
73+
74+
with ProcessPoolExecutor(max_workers=parallel) as executor:
75+
# Submit all commands and get results
76+
results = executor.map(utils.check_output, cmds)
77+
78+
# Get results as they complete
79+
for result in results:
80+
end_time, start_time = result.stdout.split()
81+
metrics.put_metric(
82+
"startup",
83+
int(end_time) - int(start_time),
84+
unit="Microseconds",
85+
)
86+
87+
clean_up_mounts(tmp_path)
88+
shutil.rmtree(DEFAULT_CHROOT_PATH)

0 commit comments

Comments
 (0)