diff --git a/tests/framework/microvm.py b/tests/framework/microvm.py index 6c550e8c687..a46f742d0a2 100644 --- a/tests/framework/microvm.py +++ b/tests/framework/microvm.py @@ -313,10 +313,14 @@ def kill(self): if self.screen_pid: os.kill(self.screen_pid, signal.SIGKILL) except: - LOG.error( + msg = ( "Failed to kill Firecracker Process. Did it already die (or did the UFFD handler process die and take it down)?" + if self.uffd_handler + else "Failed to kill Firecracker Process. Did it already die?" ) - LOG.error(self.log_data) + + self._dump_debug_information(msg) + raise # if microvm was spawned then check if it gets killed @@ -1058,7 +1062,9 @@ def ssh_iface(self, iface_idx=0): user="root", host=guest_ip, control_path=Path(self.chroot()) / f"ssh-{iface_idx}.sock", - on_error=self._dump_debug_information, + on_error=lambda exc: self._dump_debug_information( + f"Failure executing command via SSH in microVM: {exc}" + ), ) self._connections.append(connection) return connection @@ -1080,17 +1086,18 @@ def thread_backtraces(self): ) return "\n".join(backtraces) - def _dump_debug_information(self, exc: Exception): + def _dump_debug_information(self, what: str): """ Dumps debug information about this microvm Used for example when running a command inside the guest via `SSHConnection.check_output` fails. """ - print( - f"Failure executing command via SSH in microVM: {exc}\n\n" - f"Firecracker logs:\n{self.log_data}\n" - f"Thread backtraces:\n{self.thread_backtraces}" - ) + LOG.error(what) + LOG.error("Firecracker logs:\n%s", self.log_data) + if self.uffd_handler: + LOG.error("Uffd logs:\n%s", self.uffd_handler.log_data) + if not self._killed: + LOG.error("Thread backtraces:\n%s", self.thread_backtraces) def wait_for_ssh_up(self): """Wait for guest running inside the microVM to come up and respond.""" diff --git a/tests/integration_tests/performance/test_snapshot_ab.py b/tests/integration_tests/performance/test_snapshot_ab.py index b4f1b8f15dc..0ce24903743 100644 --- a/tests/integration_tests/performance/test_snapshot_ab.py +++ b/tests/integration_tests/performance/test_snapshot_ab.py @@ -249,3 +249,29 @@ def test_population_latency( break else: raise RuntimeError("UFFD handler did not print population latency after 5s") + + +def test_snapshot_create_latency( + microvm_factory, + guest_kernel_linux_5_10, + rootfs, + metrics, +): + """Measure the latency of creating a Full snapshot""" + + vm = microvm_factory.build(guest_kernel_linux_5_10, rootfs, monitor_memory=False) + vm.spawn() + vm.basic_config(vcpu_count=2, mem_size_mib=512) + vm.start() + vm.pin_threads(0) + + metrics.set_dimensions( + {**vm.dimensions, "performance_test": "test_snapshot_create_latency"} + ) + + for _ in range(ITERATIONS): + vm.snapshot_full() + fc_metrics = vm.flush_metrics() + + value = fc_metrics["latencies_us"]["full_create_snapshot"] / USEC_IN_MSEC + metrics.put_metric("latency", value, "Milliseconds") diff --git a/tests/integration_tests/performance/test_snapshot_perf.py b/tests/integration_tests/performance/test_snapshot_perf.py deleted file mode 100644 index f1c13e7be25..00000000000 --- a/tests/integration_tests/performance/test_snapshot_perf.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# SPDX-License-Identifier: Apache-2.0 -"""Basic tests scenarios for snapshot save/restore.""" - -# How many latencies do we sample per test. -SAMPLE_COUNT = 3 -USEC_IN_MSEC = 1000 - - -def snapshot_create_producer(vm): - """Produce results for snapshot create tests.""" - vm.snapshot_full() - metrics = vm.flush_metrics() - - value = metrics["latencies_us"]["full_create_snapshot"] / USEC_IN_MSEC - - print(f"Latency {value} ms") - - return value - - -def test_snapshot_create_latency( - microvm_factory, - guest_kernel_linux_5_10, - rootfs, - metrics, -): - """Measure the latency of creating a Full snapshot""" - - vm = microvm_factory.build(guest_kernel_linux_5_10, rootfs, monitor_memory=False) - vm.spawn() - vm.basic_config(vcpu_count=2, mem_size_mib=512) - vm.start() - vm.pin_threads(0) - - metrics.set_dimensions( - {**vm.dimensions, "performance_test": "test_snapshot_create_latency"} - ) - for _ in range(SAMPLE_COUNT): - metrics.put_metric("latency", snapshot_create_producer(vm), "Milliseconds")