diff --git a/.buildkite/pipeline_perf.py b/.buildkite/pipeline_perf.py index 66a9314f2d4..c0c6606f20b 100755 --- a/.buildkite/pipeline_perf.py +++ b/.buildkite/pipeline_perf.py @@ -65,6 +65,11 @@ "tests": "integration_tests/performance/test_jailer.py", "devtool_opts": "-c 1-10 -m 0", }, + "pmem": { + "label": "💿 Pmem Performance", + "tests": "integration_tests/performance/test_pmem.py", + "devtool_opts": "-c 1-10 -m 0", + }, } REVISION_A = os.environ.get("REVISION_A") diff --git a/tests/framework/utils_fio.py b/tests/framework/utils_fio.py new file mode 100644 index 00000000000..8aee01232b3 --- /dev/null +++ b/tests/framework/utils_fio.py @@ -0,0 +1,200 @@ +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +"""File containing utility methods for fio-based performance tests""" + +import json +import os +from enum import Enum +from pathlib import Path + +from framework.utils import CmdBuilder + +DEFAULT_RUNTIME_SEC = 30 +DEFAULT_WARMUP_SEC = 10 + + +class Mode(str, Enum): + """ + Modes of fio operation + """ + + # Sequential reads. + READ = "read" + # Sequential writes. + WRITE = "write" + # Sequential trims (Linux block devices and SCSI character devices only). + TRIM = "trim" + # RANDOM reads. + RANDREAD = "randread" + # RANDOM writes. + RANDWRITE = "randwrite" + # RANDOM trims (Linux block devices and SCSI character devices only). + RANDTRIM = "randtrim" + # SEQUENTial mixed reads and writes. + READWRITE = "readwrite" + # RANDOM mixed reads and writes. + RANDRW = "randrw" + + +class Engine(str, Enum): + """ + Fio backend engines + """ + + LIBAIO = "libaio" + PSYNC = "psync" + + +def build_cmd( + file_path: str, + file_size_mb: str | None, + block_size: int, + mode: Mode, + num_jobs: int, + io_engine: Engine, + runtime: int | None = DEFAULT_RUNTIME_SEC, + warmup_time: int | None = DEFAULT_WARMUP_SEC, + write_logs: bool = True, +) -> str: + """Build fio cmd""" + + cmd = ( + CmdBuilder("fio") + .with_arg(f"--name={mode.value}-{block_size}") + .with_arg(f"--filename={file_path}") + ) + + if file_size_mb: + cmd = cmd.with_arg(f"--size={file_size_mb}M") + + cmd = cmd.with_arg(f"--bs={block_size}") + + if runtime and warmup_time: + cmd = ( + cmd.with_arg("--time_based=1") + .with_arg(f"--runtime={runtime}") + .with_arg(f"--ramp_time={warmup_time}") + ) + + cmd = ( + cmd.with_arg(f"--rw={mode.value}") + .with_arg("--direct=1") + .with_arg("--randrepeat=0") + .with_arg(f"--ioengine={io_engine.value}") + .with_arg("--iodepth=32") + .with_arg(f"--numjobs={num_jobs}") + # Set affinity of the entire fio process to a set of vCPUs equal + # in size to number of workers + .with_arg(f"--cpus_allowed={','.join(str(i) for i in range(num_jobs))}") + # Instruct fio to pin one worker per vcpu + .with_arg("--cpus_allowed_policy=split") + .with_arg("--output-format=json+") + .with_arg("--output=./fio.json") + ) + + if write_logs: + cmd = cmd.with_arg("--log_avg_msec=1000").with_arg( + f"--write_bw_log={mode.value}" + ) + # Latency measurements only make sence for psync engine + if io_engine == Engine.PSYNC: + cmd = cmd.with_arg(f"--write_lat_log={mode}") + + return cmd.build() + + +class LogType(Enum): + """Fio log types""" + + BW = "_bw" + CLAT = "_clat" + + +def process_log_files(root_dir: str, log_type: LogType) -> ([[str]], [[str]]): + """ + Parses fio logs which have a form of: + 1000, 2007920, 0, 0, 0 + 1000, 2005276, 1, 0, 0 + 2000, 1996240, 0, 0, 0 + 2000, 1993861, 1, 0, 0 + ... + where the first column is the timestamp, second is the bw/clat and third is the direction + + The logs directory will look smth like this: + readwrite_bw.1.log + readwrite_bw.2.log + readwrite_clat.1.log + readwrite_clat.2.log + readwrite_lat.1.log + readwrite_lat.2.log + readwrite_slat.1.log + readwrite_slat.2.log + + job0 job1 + read write read write + [..] [..] [..] [..] + | | | | + | --|------- ---- + | | ------| | + [[], []] [[], []] + reads writes + + The output is 2 arrays: array of reads and array of writes + """ + paths = [] + for item in os.listdir(root_dir): + if item.endswith(".log") and log_type.value in item: + paths.append(Path(root_dir / item)) + + if not paths: + return [], [] + + reads = [] + writes = [] + for path in sorted(paths): + lines = path.read_text("UTF-8").splitlines() + read_values = [] + write_values = [] + for line in lines: + # See https://fio.readthedocs.io/en/latest/fio_doc.html#log-file-formats + _, value, direction, _ = line.split(",", maxsplit=3) + value = int(value.strip()) + + match direction.strip(): + case "0": + read_values.append(value) + case "1": + write_values.append(value) + case _: + assert False + + reads.append(read_values) + writes.append(write_values) + return reads, writes + + +def process_json_files(root_dir: str) -> ([[int]], [[int]]): + """ + Reads `bw_bytes` values from fio*.json files and + packs them into 2 arrays of bw_reads and bw_writes. + Each entrly is an array in itself of `jobs` per file. + """ + paths = [] + for item in os.listdir(root_dir): + if item.endswith(".json") and "fio" in item: + paths.append(Path(root_dir / item)) + + bw_reads = [] + bw_writes = [] + for path in sorted(paths): + data = json.loads(path.read_text("UTF-8")) + reads = [] + writes = [] + for job in data["jobs"]: + if "read" in job: + reads.append(job["read"]["bw_bytes"]) + if "write" in job: + writes.append(job["write"]["bw_bytes"]) + bw_reads.append(reads) + bw_writes.append(writes) + return bw_reads, bw_writes diff --git a/tests/integration_tests/functional/test_pmem.py b/tests/integration_tests/functional/test_pmem.py index 6e6ba865ee3..81c6acfced0 100644 --- a/tests/integration_tests/functional/test_pmem.py +++ b/tests/integration_tests/functional/test_pmem.py @@ -6,6 +6,7 @@ import os import host_tools.drive as drive_tools +from framework import utils ALIGNMENT = 2 << 20 @@ -140,3 +141,66 @@ def test_pmem_add_as_root_ro(uvm_plain_any, rootfs, microvm_factory): snapshot = vm.snapshot_full() restored_vm = microvm_factory.build_from_snapshot(snapshot) check_pmem_exist(restored_vm, 0, True, True, align(rootfs_size), "squashfs") + + +def inside_buff_cache(vm) -> int: + """Get buffer/cache usage from inside the vm""" + _, stdout, _ = vm.ssh.check_output("free") + # Get the `buffer/cache` of the `free` command which represents + # kernel page cache size + return int(stdout.splitlines()[1].split()[5]) + + +def outside_rssanon(vm) -> int: + """Get RssAnon usage from outside the vm""" + cmd = f"cat /proc/{vm.firecracker_pid}/status | grep RssAnon" + _, stdout, _ = utils.check_output(cmd) + return int(stdout.split()[1]) + + +def test_pmem_dax_memory_saving( + microvm_factory, + guest_kernel_acpi, + rootfs_rw, +): + """ + Test that booting from pmem with DAX enabled indeed saves memory in the + guest by not needing guest to use its page cache + """ + + # Boot from a block device + vm = microvm_factory.build( + guest_kernel_acpi, rootfs_rw, pci=True, monitor_memory=False + ) + vm.spawn() + vm.basic_config() + vm.add_net_iface() + vm.start() + block_cache_usage = inside_buff_cache(vm) + block_rss_usage = outside_rssanon(vm) + + # Boot from pmem with DAX enabled for root device + vm_pmem = microvm_factory.build( + guest_kernel_acpi, rootfs_rw, pci=True, monitor_memory=False + ) + vm_pmem.spawn() + vm_pmem.basic_config( + add_root_device=False, + boot_args="reboot=k panic=1 nomodule swiotlb=noforce console=ttyS0 rootflags=dax", + ) + vm_pmem.add_net_iface() + vm_pmem.add_pmem("pmem", rootfs_rw, True, False) + vm_pmem.start() + pmem_cache_usage = inside_buff_cache(vm_pmem) + pmem_rss_usage = outside_rssanon(vm_pmem) + + # The pmem cache usage should be much lower than drive cache usage. + # The 50% is an arbitrary number, but does provide a good guarantee + # that DAX is working + assert ( + pmem_cache_usage < block_cache_usage * 0.5 + ), f"{block_cache_usage} <= {pmem_cache_usage}" + # RssAnon difference will be smaller, so no multipliers + assert ( + pmem_rss_usage < block_rss_usage + ), f"{block_cache_usage} <= {pmem_cache_usage}" diff --git a/tests/integration_tests/performance/test_block.py b/tests/integration_tests/performance/test_block.py index 8882ee0717c..7a9efc74609 100644 --- a/tests/integration_tests/performance/test_block.py +++ b/tests/integration_tests/performance/test_block.py @@ -3,14 +3,13 @@ """Performance benchmark for block device emulation.""" import concurrent -import glob import os -from pathlib import Path import pytest +import framework.utils_fio as fio import host_tools.drive as drive_tools -from framework.utils import CmdBuilder, check_output, track_cpu_utilization +from framework.utils import check_output, track_cpu_utilization # size of the block device used in the test, in MB BLOCK_DEVICE_SIZE_MB = 2048 @@ -44,41 +43,21 @@ def prepare_microvm_for_test(microvm): check_output("echo 3 > /proc/sys/vm/drop_caches") -def run_fio(microvm, mode, block_size, test_output_dir, fio_engine="libaio"): +def run_fio( + microvm, mode: fio.Mode, block_size: int, test_output_dir, fio_engine: fio.Engine +): """Run a fio test in the specified mode with block size bs.""" - cmd = ( - CmdBuilder("fio") - .with_arg(f"--name={mode}-{block_size}") - .with_arg(f"--numjobs={microvm.vcpus_count}") - .with_arg(f"--runtime={RUNTIME_SEC}") - .with_arg("--time_based=1") - .with_arg(f"--ramp_time={WARMUP_SEC}") - .with_arg("--filename=/dev/vdb") - .with_arg("--direct=1") - .with_arg(f"--rw={mode}") - .with_arg("--randrepeat=0") - .with_arg(f"--bs={block_size}") - .with_arg(f"--size={BLOCK_DEVICE_SIZE_MB}M") - .with_arg(f"--ioengine={fio_engine}") - .with_arg("--iodepth=32") - # Set affinity of the entire fio process to a set of vCPUs equal in size to number of workers - .with_arg( - f"--cpus_allowed={','.join(str(i) for i in range(microvm.vcpus_count))}" - ) - # Instruct fio to pin one worker per vcpu - .with_arg("--cpus_allowed_policy=split") - .with_arg("--log_avg_msec=1000") - .with_arg(f"--write_bw_log={mode}") - .with_arg("--output-format=json+") - .with_arg("--output=/tmp/fio.json") + cmd = fio.build_cmd( + "/dev/vdb", + BLOCK_DEVICE_SIZE_MB, + block_size, + mode, + microvm.vcpus_count, + fio_engine, + RUNTIME_SEC, + WARMUP_SEC, ) - # Latency measurements only make sence for psync engine - if fio_engine == "psync": - cmd = cmd.with_arg(f"--write_lat_log={mode}") - - cmd = cmd.build() - prepare_microvm_for_test(microvm) # Start the CPU load monitor. @@ -101,65 +80,30 @@ def run_fio(microvm, mode, block_size, test_output_dir, fio_engine="libaio"): return cpu_load_future.result() -def process_fio_log_files(root_dir, logs_glob): - """ - Parses all fio log files in the root_dir matching the given glob and - yields tuples of same-timestamp read and write metrics - """ - # We specify `root_dir` for `glob.glob` because otherwise it will - # struggle with directory with names like: - # test_block_performance[vmlinux-5.10.233-Sync-bs4096-randread-1vcpu] - data = [ - Path(root_dir / pathname).read_text("UTF-8").splitlines() - for pathname in glob.glob(logs_glob, root_dir=root_dir) - ] - - # If not data found, there is nothing to iterate over - if not data: - return [], [] - - for tup in zip(*data): - read_values = [] - write_values = [] - - for line in tup: - # See https://fio.readthedocs.io/en/latest/fio_doc.html#log-file-formats - _, value, direction, _ = line.split(",", maxsplit=3) - value = int(value.strip()) - - match direction.strip(): - case "0": - read_values.append(value) - case "1": - write_values.append(value) - case _: - assert False - - yield read_values, write_values - - def emit_fio_metrics(logs_dir, metrics): - """Parses the fio logs in `{logs_dir}/*_[clat|bw].*.log and emits their contents as CloudWatch metrics""" - for bw_read, bw_write in process_fio_log_files(logs_dir, "*_bw.*.log"): - if bw_read: - metrics.put_metric("bw_read", sum(bw_read), "Kilobytes/Second") - if bw_write: - metrics.put_metric("bw_write", sum(bw_write), "Kilobytes/Second") - - for lat_read, lat_write in process_fio_log_files(logs_dir, "*_clat.*.log"): - # latency values in fio logs are in nanoseconds, but cloudwatch only supports - # microseconds as the more granular unit, so need to divide by 1000. - for value in lat_read: + """Parses the fio logs in `logs_dir` and emits their contents as CloudWatch metrics""" + bw_reads, bw_writes = fio.process_log_files(logs_dir, fio.LogType.BW) + for tup in zip(*bw_reads): + metrics.put_metric("bw_read", sum(tup), "Kilobytes/Second") + for tup in zip(*bw_writes): + metrics.put_metric("bw_write", sum(tup), "Kilobytes/Second") + + clat_reads, clat_writes = fio.process_log_files(logs_dir, fio.LogType.CLAT) + # latency values in fio logs are in nanoseconds, but cloudwatch only supports + # microseconds as the more granular unit, so need to divide by 1000. + for tup in zip(*clat_reads): + for value in tup: metrics.put_metric("clat_read", value / 1000, "Microseconds") - for value in lat_write: + for tup in zip(*clat_writes): + for value in tup: metrics.put_metric("clat_write", value / 1000, "Microseconds") @pytest.mark.nonci @pytest.mark.parametrize("vcpus", [1, 2], ids=["1vcpu", "2vcpu"]) -@pytest.mark.parametrize("fio_mode", ["randread", "randwrite"]) +@pytest.mark.parametrize("fio_mode", [fio.Mode.RANDREAD, fio.Mode.RANDWRITE]) @pytest.mark.parametrize("fio_block_size", [4096], ids=["bs4096"]) -@pytest.mark.parametrize("fio_engine", ["libaio", "psync"]) +@pytest.mark.parametrize("fio_engine", [fio.Engine.LIBAIO, fio.Engine.PSYNC]) def test_block_performance( uvm_plain_acpi, vcpus, @@ -208,7 +152,7 @@ def test_block_performance( @pytest.mark.nonci @pytest.mark.parametrize("vcpus", [1, 2], ids=["1vcpu", "2vcpu"]) -@pytest.mark.parametrize("fio_mode", ["randread"]) +@pytest.mark.parametrize("fio_mode", [fio.Mode.RANDREAD]) @pytest.mark.parametrize("fio_block_size", [4096], ids=["bs4096"]) def test_block_vhost_user_performance( uvm_plain_acpi, @@ -246,7 +190,7 @@ def test_block_vhost_user_performance( next_cpu = vm.pin_threads(0) vm.disks_vhost_user["scratch"].pin(next_cpu) - cpu_util = run_fio(vm, fio_mode, fio_block_size, results_dir) + cpu_util = run_fio(vm, fio_mode, fio_block_size, results_dir, fio.Engine.LIBAIO) emit_fio_metrics(results_dir, metrics) diff --git a/tests/integration_tests/performance/test_boottime.py b/tests/integration_tests/performance/test_boottime.py index d80bf026a39..59a5ec56f3c 100644 --- a/tests/integration_tests/performance/test_boottime.py +++ b/tests/integration_tests/performance/test_boottime.py @@ -95,18 +95,37 @@ def to_ms(v, unit): def launch_vm_with_boot_timer( - microvm_factory, guest_kernel_acpi, rootfs_rw, vcpu_count, mem_size_mib, pci_enabled + microvm_factory, + guest_kernel_acpi, + rootfs_rw, + vcpu_count, + mem_size_mib, + pci_enabled, + boot_from_pmem, ): """Launches a microVM with guest-timer and returns the reported metrics for it""" - vm = microvm_factory.build(guest_kernel_acpi, rootfs_rw, pci=pci_enabled) + vm = microvm_factory.build( + guest_kernel_acpi, rootfs_rw, pci=pci_enabled, monitor_memory=False + ) vm.jailer.extra_args.update({"boot-timer": None}) vm.spawn() - vm.basic_config( - vcpu_count=vcpu_count, - mem_size_mib=mem_size_mib, - boot_args=DEFAULT_BOOT_ARGS + " init=/usr/local/bin/init", - enable_entropy_device=True, - ) + if not boot_from_pmem: + vm.basic_config( + vcpu_count=vcpu_count, + mem_size_mib=mem_size_mib, + boot_args=DEFAULT_BOOT_ARGS + " init=/usr/local/bin/init", + enable_entropy_device=True, + ) + else: + vm.basic_config( + add_root_device=False, + vcpu_count=vcpu_count, + mem_size_mib=mem_size_mib, + boot_args=DEFAULT_BOOT_ARGS + " init=/usr/local/bin/init rootflags=dax", + enable_entropy_device=True, + ) + vm.add_pmem("pmem", rootfs_rw, True, True) + vm.add_net_iface() vm.start() vm.pin_threads(0) @@ -119,7 +138,7 @@ def launch_vm_with_boot_timer( def test_boot_timer(microvm_factory, guest_kernel_acpi, rootfs, pci_enabled): """Tests that the boot timer device works""" launch_vm_with_boot_timer( - microvm_factory, guest_kernel_acpi, rootfs, 1, 128, pci_enabled + microvm_factory, guest_kernel_acpi, rootfs, 1, 128, pci_enabled, False ) @@ -127,6 +146,7 @@ def test_boot_timer(microvm_factory, guest_kernel_acpi, rootfs, pci_enabled): "vcpu_count,mem_size_mib", [(1, 128), (1, 1024), (2, 2048), (4, 4096)], ) +@pytest.mark.parametrize("boot_from_pmem", [True, False], ids=["PmemBoot", "BlockBoot"]) @pytest.mark.nonci def test_boottime( microvm_factory, @@ -134,6 +154,7 @@ def test_boottime( rootfs_rw, vcpu_count, mem_size_mib, + boot_from_pmem, pci_enabled, metrics, ): @@ -147,12 +168,14 @@ def test_boottime( vcpu_count, mem_size_mib, pci_enabled, + boot_from_pmem, ) if i == 0: metrics.set_dimensions( { "performance_test": "test_boottime", + "boot_from_pmem": str(boot_from_pmem), **vm.dimensions, } ) diff --git a/tests/integration_tests/performance/test_pmem.py b/tests/integration_tests/performance/test_pmem.py new file mode 100644 index 00000000000..01c250f204d --- /dev/null +++ b/tests/integration_tests/performance/test_pmem.py @@ -0,0 +1,201 @@ +# Copyright 2025 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# SPDX-License-Identifier: Apache-2.0 +"""Performance benchmark for pmem device""" + +import concurrent +import os +from pathlib import Path + +import pytest + +import framework.utils_fio as fio +import host_tools.drive as drive_tools +from framework.utils import track_cpu_utilization + +PMEM_DEVICE_SIZE_MB = 2048 +PMEM_DEVICE_SIZE_SINGLE_READ_MB = 512 +WARMUP_SEC = 10 +RUNTIME_SEC = 30 +GUEST_MEM_MIB = 1024 + + +def run_fio( + microvm, test_output_dir, mode: fio.Mode, block_size: int, fio_engine: fio.Engine +): + """Run a normal fio test""" + cmd = fio.build_cmd( + "/dev/pmem0", + PMEM_DEVICE_SIZE_MB, + block_size, + mode, + microvm.vcpus_count, + fio_engine, + RUNTIME_SEC, + WARMUP_SEC, + ) + + with concurrent.futures.ThreadPoolExecutor() as executor: + cpu_load_future = executor.submit( + track_cpu_utilization, + microvm.firecracker_pid, + RUNTIME_SEC, + omit=WARMUP_SEC, + ) + + rc, _, stderr = microvm.ssh.run(f"cd /tmp; {cmd}") + assert rc == 0, stderr + assert stderr == "" + + microvm.ssh.scp_get("/tmp/fio.json", test_output_dir) + microvm.ssh.scp_get("/tmp/*.log", test_output_dir) + + return cpu_load_future.result() + + +def emit_fio_metrics(logs_dir, metrics): + """Parses the fio logs and emits bandwidth as metrics""" + bw_reads, bw_writes = fio.process_log_files(logs_dir, fio.LogType.BW) + for tup in zip(*bw_reads): + metrics.put_metric("bw_read", sum(tup), "Kilobytes/Second") + for tup in zip(*bw_writes): + metrics.put_metric("bw_write", sum(tup), "Kilobytes/Second") + + clat_reads, clat_writes = fio.process_log_files(logs_dir, fio.LogType.CLAT) + # latency values in fio logs are in nanoseconds, but cloudwatch only supports + # microseconds as the more granular unit, so need to divide by 1000. + for tup in zip(*clat_reads): + for value in tup: + metrics.put_metric("clat_read", value / 1000, "Microseconds") + for tup in zip(*clat_writes): + for value in tup: + metrics.put_metric("clat_write", value / 1000, "Microseconds") + + +@pytest.mark.nonci +@pytest.mark.parametrize("vcpus", [1, 2], ids=["1vcpu", "2vcpu"]) +@pytest.mark.parametrize("fio_mode", [fio.Mode.RANDREAD, fio.Mode.RANDWRITE]) +@pytest.mark.parametrize("fio_block_size", [4096], ids=["bs4096"]) +@pytest.mark.parametrize("fio_engine", [fio.Engine.LIBAIO, fio.Engine.PSYNC]) +def test_pmem_performance( + uvm_plain_acpi, + vcpus, + fio_mode, + fio_block_size, + fio_engine, + metrics, + results_dir, +): + """ + Measure performance of pmem device + """ + vm = uvm_plain_acpi + vm.memory_monitor = None + vm.spawn() + vm.basic_config(vcpu_count=vcpus, mem_size_mib=GUEST_MEM_MIB) + vm.add_net_iface() + # Add a secondary block device for benchmark tests. + fs = drive_tools.FilesystemFile( + os.path.join(vm.fsfiles, "scratch"), PMEM_DEVICE_SIZE_MB + ) + vm.add_pmem("scratch", fs.path, False, False) + vm.start() + vm.pin_threads(0) + + metrics.set_dimensions( + { + "performance_test": "test_pmem_performance", + "fio_mode": fio_mode, + "fio_block_size": str(fio_block_size), + "fio_engine": fio_engine, + **vm.dimensions, + } + ) + + # Do a full read run before benchmarking to deal with shadow page faults. + # The impact of shadow page faults is tested in another test. + run_fio_single_read(vm, 0, results_dir, fio_block_size) + + cpu_util = run_fio(vm, results_dir, fio_mode, fio_block_size, fio_engine) + emit_fio_metrics(results_dir, metrics) + for thread_name, values in cpu_util.items(): + for value in values: + metrics.put_metric(f"cpu_utilization_{thread_name}", value, "Percent") + + +def run_fio_single_read(microvm, run_index, test_output_dir, block_size: int): + """ + Run a single full read test with fio. + The test is single threaded and uses only `libaio` since we just need + to test a sequential + """ + cmd = fio.build_cmd( + "/dev/pmem0", + None, + block_size, + fio.Mode.READ, + 1, + fio.Engine.LIBAIO, + None, + None, + False, + ) + + rc, _, stderr = microvm.ssh.run(f"cd /tmp; {cmd}") + assert rc == 0, stderr + assert stderr == "" + + log_path = Path(test_output_dir) / f"fio_{run_index}.json" + microvm.ssh.scp_get("/tmp/fio.json", log_path) + + +def emit_fio_single_read_metrics(logs_dir, metrics): + """Process json output of the fio command and emmit `read` metrics""" + bw_reads, _ = fio.process_json_files(logs_dir) + for reads in bw_reads: + metrics.put_metric("bw_read", sum(reads) / 1000, "Kilobytes/Second") + + +@pytest.mark.nonci +@pytest.mark.parametrize("fio_block_size", [4096], ids=["bs4096"]) +def test_pmem_first_read( + microvm_factory, + guest_kernel_acpi, + rootfs, + fio_block_size, + metrics, + results_dir, +): + """ + Measure performance of a first full read from the pmem device. + Values should be lower than in normal perf test since the first + read of each page should also trigger a KVM internal page fault + which should slow things down. + """ + + for i in range(10): + vm = microvm_factory.build( + guest_kernel_acpi, rootfs, pci=True, monitor_memory=False + ) + vm.spawn() + vm.basic_config(mem_size_mib=GUEST_MEM_MIB) + vm.add_net_iface() + + fs = drive_tools.FilesystemFile( + os.path.join(vm.fsfiles, "scratch"), + PMEM_DEVICE_SIZE_SINGLE_READ_MB, + ) + vm.add_pmem("scratch", fs.path, False, False) + + vm.start() + vm.pin_threads(0) + + metrics.set_dimensions( + { + "performance_test": "test_pmem_first_read", + "fio_block_size": str(fio_block_size), + **vm.dimensions, + } + ) + run_fio_single_read(vm, i, results_dir, fio_block_size) + + emit_fio_single_read_metrics(results_dir, metrics)