Skip to content

Commit cf7f340

Browse files
manuelh-devfidencio
authored andcommitted
tests: Read and overwrite kernel_verity_parameters
Read the kernel_verity_paramers from the shim config and adjust the root hash for the negative test. Further, improve some of the test logic by using shared functions. This especially ensures we don't read the full journalctl logs on a node but only the portion of the logs we are actually supposed to look at. Signed-off-by: Manuel Huber <manuelh@nvidia.com>
1 parent 7958be8 commit cf7f340

File tree

2 files changed

+58
-11
lines changed

2 files changed

+58
-11
lines changed

tests/integration/kubernetes/k8s-measured-rootfs.bats

Lines changed: 15 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,11 @@ load "${BATS_TEST_DIRNAME}/../../common.bash"
99
load "${BATS_TEST_DIRNAME}/lib.sh"
1010
load "${BATS_TEST_DIRNAME}/tests_common.sh"
1111

12+
# Currently only the Go runtime provides the config path used here.
13+
# If a Rust hypervisor runs this test, mirror the enabling_hypervisor
14+
# pattern in tests/common.bash to select the correct runtime-rs config.
15+
shim_config_file="/opt/kata/share/defaults/kata-containers/configuration-${KATA_HYPERVISOR}.toml"
16+
1217
check_and_skip() {
1318
case "${KATA_HYPERVISOR}" in
1419
qemu-tdx|qemu-coco-dev)
@@ -29,26 +34,29 @@ setup() {
2934
setup_common || die "setup_common failed"
3035
}
3136

32-
@test "Test cannnot launch pod with measured boot enabled and incorrect hash" {
37+
@test "Test cannot launch pod with measured boot enabled and incorrect hash" {
3338
pod_config="$(new_pod_config nginx "kata-${KATA_HYPERVISOR}")"
3439
auto_generate_policy "${pod_config_dir}" "${pod_config}"
3540

3641
incorrect_hash="1111111111111111111111111111111111111111111111111111111111111111"
3742

38-
# To avoid editing that file on the worker node, here it will be
39-
# enabled via pod annotations.
43+
# Read verity parameters from config, then override via annotations.
44+
kernel_verity_params=$(exec_host "$node" "sed -n 's/^kernel_verity_params = \"\\(.*\\)\"/\\1/p' ${shim_config_file}" || true)
45+
[ -n "${kernel_verity_params}" ] || die "Missing kernel_verity_params in ${shim_config_file}"
46+
47+
kernel_verity_params=$(printf '%s\n' "$kernel_verity_params" | sed -E "s/root_hash=[^,]*/root_hash=${incorrect_hash}/")
4048
set_metadata_annotation "$pod_config" \
41-
"io.katacontainers.config.hypervisor.kernel_params" \
42-
"rootfs_verity.scheme=dm-verity rootfs_verity.hash=$incorrect_hash"
49+
"io.katacontainers.config.hypervisor.kernel_verity_params" \
50+
"${kernel_verity_params}"
4351
# Run on a specific node so we know from where to inspect the logs
4452
set_node "$pod_config" "$node"
4553

4654
# For debug sake
4755
echo "Pod $pod_config file:"
4856
cat $pod_config
49-
kubectl apply -f $pod_config
5057

51-
waitForProcess "60" "3" "exec_host $node journalctl -t kata | grep \"verity: .* metadata block .* is corrupted\""
58+
assert_pod_container_creating "$pod_config"
59+
assert_logs_contain "$node" kata "${node_start_time}" "verity: .* metadata block .* is corrupted"
5260
}
5361

5462
teardown() {

tests/integration/kubernetes/lib.sh

Lines changed: 43 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ assert_pod_fail() {
179179
local container_config="$1"
180180
local duration="${2:-120}"
181181

182-
echo "In assert_pod_fail: $container_config"
182+
echo "In assert_pod_fail: ${container_config}"
183183
echo "Attempt to create the container but it should fail"
184184

185185
retry_kubectl_apply "${container_config}"
@@ -192,13 +192,13 @@ assert_pod_fail() {
192192
local sleep_time=5
193193
while true; do
194194
echo "Waiting for a container to fail"
195-
sleep ${sleep_time}
195+
sleep "${sleep_time}"
196196
elapsed_time=$((elapsed_time+sleep_time))
197197
if [[ $(kubectl get pod "${pod_name}" \
198198
-o jsonpath='{.status.containerStatuses[0].state.waiting.reason}') = *BackOff* ]]; then
199199
return 0
200200
fi
201-
if [ $elapsed_time -gt $duration ]; then
201+
if [[ "${elapsed_time}" -gt "${duration}" ]]; then
202202
echo "The container does not get into a failing state" >&2
203203
break
204204
fi
@@ -207,6 +207,46 @@ assert_pod_fail() {
207207

208208
}
209209

210+
# Create a pod then assert it remains in ContainerCreating.
211+
#
212+
# Parameters:
213+
# $1 - the pod configuration file.
214+
# $2 - the duration to wait (seconds). Defaults to 60. (optional)
215+
#
216+
assert_pod_container_creating() {
217+
local container_config="$1"
218+
local duration="${2:-60}"
219+
220+
echo "In assert_pod_container_creating: ${container_config}"
221+
echo "Attempt to create the container but it should stay in creating state"
222+
223+
retry_kubectl_apply "${container_config}"
224+
if ! pod_name=$(kubectl get pods -o jsonpath='{.items..metadata.name}'); then
225+
echo "Failed to create the pod"
226+
return 1
227+
fi
228+
229+
local elapsed_time=0
230+
local sleep_time=5
231+
while true; do
232+
sleep "${sleep_time}"
233+
elapsed_time=$((elapsed_time+sleep_time))
234+
reason=$(kubectl get pod "${pod_name}" -o jsonpath='{.status.containerStatuses[0].state.waiting.reason}' 2>/dev/null || true)
235+
phase=$(kubectl get pod "${pod_name}" -o jsonpath='{.status.phase}' 2>/dev/null || true)
236+
if [[ "${phase}" != "Pending" ]]; then
237+
echo "Expected pod to remain Pending, got phase: ${phase}" >&2
238+
return 1
239+
fi
240+
if [[ -n "${reason}" && "${reason}" != "ContainerCreating" ]]; then
241+
echo "Expected ContainerCreating, got: ${reason}" >&2
242+
return 1
243+
fi
244+
if [[ "${elapsed_time}" -ge "${duration}" ]]; then
245+
return 0
246+
fi
247+
done
248+
}
249+
210250
# Check the pulled rootfs on host for given node and sandbox_id
211251
#
212252
# Parameters:
@@ -381,4 +421,3 @@ get_node_kata_sandbox_id() {
381421
done
382422
echo $kata_sandbox_id
383423
}
384-

0 commit comments

Comments
 (0)