-
Notifications
You must be signed in to change notification settings - Fork 6
feat(tests): add disk performance benchmark tests #324
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from 7 commits
500f788
f43d5eb
577eff2
e58c459
efa4b26
377d577
df0b40f
65157ae
63e2cf5
718b41f
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change | ||||
---|---|---|---|---|---|---|
@@ -0,0 +1,166 @@ | ||||||
import itertools | ||||||
import logging | ||||||
import os | ||||||
import tempfile | ||||||
import urllib.request | ||||||
from urllib.parse import urlparse | ||||||
from uuid import uuid4 | ||||||
|
||||||
import pytest | ||||||
|
||||||
from lib.commands import SSHCommandFailed | ||||||
from lib.common import PackageManagerEnum | ||||||
|
||||||
from .helpers import load_results_from_csv, str_to_tuple | ||||||
|
||||||
MAX_LENGTH = 64 * (1024**3) # 64GiB | ||||||
|
||||||
|
||||||
# use vhd, qcow2, raw... when image_format support will be available | ||||||
@pytest.fixture(scope="module", params=["vhd"]) | ||||||
def image_format(request): | ||||||
return request.param | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="module") | ||||||
def running_unix_vm_with_fio(running_unix_vm): | ||||||
vm = running_unix_vm | ||||||
snapshot = vm.snapshot() | ||||||
|
||||||
package_cmds = { | ||||||
PackageManagerEnum.APT_GET.value: "apt-get update && apt install -y fio", | ||||||
PackageManagerEnum.RPM.value: "yum install -y fio", | ||||||
PackageManagerEnum.UNKNOWN.value: "apk add fio", | ||||||
} | ||||||
|
||||||
package_manager = vm.detect_package_manager().value | ||||||
try: | ||||||
vm.ssh(package_cmds[package_manager]) | ||||||
logging.info(f">> Installing fio with {package_cmds[package_manager]}") | ||||||
except SSHCommandFailed: | ||||||
raise RuntimeError("Unsupported package manager: could not install fio") | ||||||
|
||||||
yield vm | ||||||
|
||||||
# teardown | ||||||
try: | ||||||
snapshot.revert() | ||||||
finally: | ||||||
snapshot.destroy() | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="module") | ||||||
def vdi_on_local_sr(host, local_sr_on_hostA1, image_format): | ||||||
sr = local_sr_on_hostA1 | ||||||
vdi = sr.create_vdi("testVDI", MAX_LENGTH) # , image_format=image_format) | ||||||
logging.info(f">> Created VDI {vdi.uuid} of type {image_format}") | ||||||
|
||||||
yield vdi | ||||||
|
||||||
# teardown | ||||||
logging.info(f"<< Destroying VDI {vdi.uuid}") | ||||||
vdi.destroy() | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="module") | ||||||
def plugged_vbd(vdi_on_local_sr, running_unix_vm_with_fio): | ||||||
vm = running_unix_vm_with_fio | ||||||
vdi = vdi_on_local_sr | ||||||
vbd = vm.create_vbd("autodetect", vdi.uuid) | ||||||
|
||||||
logging.info(f">> Plugging VDI {vdi.uuid} on VM {vm.uuid}") | ||||||
vbd.plug() | ||||||
|
||||||
yield vbd | ||||||
|
||||||
# teardown | ||||||
logging.info(f"<< Unplugging VDI {vdi.uuid} from VM {vm.uuid}") | ||||||
vbd.unplug() | ||||||
vbd.destroy() | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="module") | ||||||
def local_temp_dir(): | ||||||
with tempfile.TemporaryDirectory() as tmpdir: | ||||||
yield tmpdir | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="module") | ||||||
def temp_dir(running_unix_vm_with_fio): | ||||||
vm = running_unix_vm_with_fio | ||||||
tempdir = vm.ssh("mktemp -d") | ||||||
|
||||||
yield tempdir | ||||||
|
||||||
# teardown | ||||||
vm.ssh(f"rm -r {tempdir}") | ||||||
|
||||||
|
||||||
def pytest_addoption(parser): | ||||||
system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") | ||||||
|
||||||
parser.addoption( | ||||||
"--prev-csv", | ||||||
action="store", | ||||||
default=None, | ||||||
help="Path/URI to previous CSV results file for comparison", | ||||||
) | ||||||
parser.addoption( | ||||||
"--block-sizes", | ||||||
action="store", | ||||||
type=lambda value: str_to_tuple(value, sep=","), | ||||||
default=("4k", "16k", "64k", "1M"), | ||||||
help="Comma separated values of block sizes to test in disk benchmarks", | ||||||
) | ||||||
parser.addoption( | ||||||
"--file-sizes", | ||||||
action="store", | ||||||
type=lambda value: str_to_tuple(value, sep=","), | ||||||
default=("1G", "4G", f"{int((system_memory // (1024.**3)) * 2)}G"), | ||||||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Small comment
Suggested change
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. fixed in 65157ae |
||||||
help="Comma separated values of file sizes to test in disk benchmarks", | ||||||
) | ||||||
parser.addoption( | ||||||
"--modes", | ||||||
action="store", | ||||||
type=lambda value: str_to_tuple(value, sep=","), | ||||||
default=("read", "randread", "write", "randwrite"), | ||||||
help="Comma separated values of rw_modes to test in disk benchmarks", | ||||||
) | ||||||
parser.addoption( | ||||||
"--numjobs", | ||||||
action="store", | ||||||
default=1, | ||||||
help="Mapped to fio's --numjobs", | ||||||
) | ||||||
parser.addoption( | ||||||
"--iodepth", | ||||||
action="store", | ||||||
default=1, | ||||||
help="Mapped to fio's --iodepth", | ||||||
) | ||||||
|
||||||
|
||||||
def pytest_generate_tests(metafunc): | ||||||
if {"block_size", "file_size", "rw_mode"} <= set(metafunc.fixturenames): | ||||||
block_sizes = metafunc.config.getoption("block_sizes") | ||||||
file_sizes = metafunc.config.getoption("file_sizes") | ||||||
modes = metafunc.config.getoption("modes") | ||||||
|
||||||
test_cases = list(itertools.product(block_sizes, file_sizes, modes)) | ||||||
metafunc.parametrize("block_size,file_size,rw_mode", test_cases) | ||||||
|
||||||
|
||||||
@pytest.fixture(scope="session") | ||||||
def prev_results(pytestconfig): | ||||||
csv_uri = pytestconfig.getoption("--prev-csv") | ||||||
if not csv_uri: | ||||||
return {} | ||||||
csv_path = csv_uri | ||||||
if urlparse(csv_uri).scheme != "": | ||||||
logging.info("Detected CSV path as an url") | ||||||
csv_path = f"/tmp/{uuid4()}.csv" | ||||||
urllib.request.urlretrieve(csv_uri, csv_path) | ||||||
logging.info(f"Fetching CSV file from {csv_uri} to {csv_path}") | ||||||
if not os.path.exists(csv_path): | ||||||
raise FileNotFoundError(csv_path) | ||||||
return load_results_from_csv(csv_path) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,52 @@ | ||
import csv | ||
import os | ||
import statistics | ||
from datetime import datetime | ||
|
||
system_memory = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") | ||
|
||
|
||
def log_result_csv(test_type, rw_mode, result_json, csv_path): | ||
job = result_json["jobs"][0] | ||
op_data = job[rw_mode.replace("rand", "")] | ||
bw_kbps = op_data["bw"] | ||
iops = op_data["iops"] | ||
latency = op_data["lat_ns"]["mean"] | ||
|
||
result = { | ||
"timestamp": datetime.now().isoformat(), | ||
"test": test_type, | ||
"mode": rw_mode, | ||
"bw_MBps": round(bw_kbps / 1024, 2), | ||
"IOPS": round(iops, 2), | ||
"latency": round(latency, 2), | ||
} | ||
|
||
file_exists = os.path.exists(csv_path) | ||
with open(csv_path, "a", newline="") as f: | ||
writer = csv.DictWriter(f, fieldnames=result.keys()) | ||
if not file_exists: | ||
writer.writeheader() | ||
writer.writerow(result) | ||
|
||
return result | ||
|
||
|
||
def load_results_from_csv(csv_path): | ||
results = {} | ||
with open(csv_path, newline="") as f: | ||
reader = csv.DictReader(f) | ||
for row in reader: | ||
key = (row["test"], row["mode"]) | ||
if key not in results: | ||
results[key] = [] | ||
results[key].append(row) | ||
return results | ||
|
||
|
||
def mean(data, key): | ||
return statistics.mean([float(x[key]) for x in data if key in x]) | ||
|
||
|
||
def str_to_tuple(value, sep=","): | ||
return tuple(item.strip() for item in value.split(sep)) |
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,128 @@ | ||
import json | ||
import logging | ||
import os | ||
import statistics | ||
from datetime import datetime | ||
|
||
import pytest | ||
|
||
from lib.commands import SSHCommandFailed | ||
|
||
from .helpers import load_results_from_csv, log_result_csv, mean | ||
|
||
# Tests default settings # | ||
|
||
CSV_FILE = f"/tmp/results_{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}.csv" | ||
|
||
DEFAULT_SAMPLES_NUM = 10 | ||
DEFAULT_SIZE = "1G" | ||
DEFAULT_BS = "4k" | ||
DEFAULT_IODEPTH = 1 | ||
DEFAULT_NUMJOBS = 1 | ||
DEFAULT_FILE = "fio-testfile" | ||
|
||
|
||
def run_fio( | ||
vm, | ||
test_name, | ||
rw_mode, | ||
temp_dir, | ||
local_temp_dir, | ||
bs=DEFAULT_BS, | ||
iodepth=DEFAULT_IODEPTH, | ||
size=DEFAULT_SIZE, | ||
numjobs=DEFAULT_NUMJOBS, | ||
file_path="", | ||
): | ||
json_output_path = os.path.join(temp_dir, f"{test_name}.json") | ||
local_json_path = os.path.join(local_temp_dir, f"{test_name}.json") | ||
if not file_path: | ||
file_path = os.path.join(temp_dir, DEFAULT_FILE) | ||
fio_cmd = [ | ||
"fio", | ||
f"--name={test_name}", | ||
f"--rw={rw_mode}", | ||
f"--bs={bs}", | ||
f"--iodepth={iodepth}", | ||
f"--size={size}", | ||
f"--filename={file_path}", | ||
"--direct=1", | ||
"--end_fsync=1", | ||
"--fsync_on_close=1", | ||
f"--numjobs={numjobs}", | ||
"--group_reporting", | ||
"--output-format=json", | ||
f"--output={json_output_path}", | ||
] | ||
logging.debug(f"Running {fio_cmd}") | ||
try: | ||
vm.ssh(fio_cmd, check=True) | ||
except SSHCommandFailed as e: | ||
raise RuntimeError(f"fio failed for {test_name}:{e}") | ||
vm.scp(json_output_path, local_json_path, local_dest=True) | ||
logging.debug(f"Stored json at {local_json_path}") | ||
with open(local_json_path) as f: | ||
return json.load(f) | ||
|
||
|
||
def assert_performance_not_degraded(current, previous, threshold=10): | ||
diffs = {} | ||
for metric in ("bw_MBps", "IOPS", "latency"): | ||
try: | ||
curr = mean(current, metric) | ||
prev = mean(previous, metric) | ||
except statistics.StatisticsError: | ||
logging.info(f"Missing metric ({metric}), skipping comparison") | ||
continue | ||
diff = (curr - prev if metric == "latency" else prev - curr) / (prev * 100) | ||
assert ( | ||
diff <= threshold | ||
), f"{metric} changed by {diff:.2f}% (allowed {threshold}%)" | ||
diffs[metric] = diff | ||
|
||
logging.info("Performance difference summary:") | ||
for k, v in diffs.items(): | ||
sign = "+" if v < 0 else "-" | ||
logging.info(f"- {k}: {sign}{abs(v):.2f}%") | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It may be interesting to have a condition that log a special message if the performance are above a threshold to signal a perf improvement. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That could be interesting. Both threshold (for failure and future positive message) could be changed through flags too There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done in 63e2cf5 |
||
|
||
|
||
class TestDiskPerf: | ||
|
||
@pytest.mark.small_vm | ||
def test_disk_benchmark( | ||
self, | ||
pytestconfig, | ||
temp_dir, | ||
local_temp_dir, | ||
prev_results, | ||
block_size, | ||
file_size, | ||
rw_mode, | ||
running_unix_vm_with_fio, | ||
plugged_vbd, | ||
image_format, | ||
): | ||
vm = running_unix_vm_with_fio | ||
vbd = plugged_vbd | ||
device = f"/dev/{vbd.param_get(param_name='device')}" | ||
test_type = "{}-{}-{}-{}".format(block_size, file_size, rw_mode, image_format) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Do we want to add a prefix here, like "bench-fio-", to have a common part of the name. If there is other tests we can have something like "bench-newtool-". There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I know that adding more in the test_type could look heavy but shouldn't we add iodepth and numjob in the test_type string? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We should add the bench- prefix indeed as well as the numjob and iodepth. I forgot to add the latter part when I added those options. I will fix that. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done in 63e2cf5 |
||
|
||
for i in range(DEFAULT_SAMPLES_NUM): | ||
result = run_fio( | ||
vm, | ||
test_type, | ||
rw_mode, | ||
temp_dir, | ||
local_temp_dir, | ||
file_path=device, | ||
bs=block_size, | ||
size=file_size, | ||
iodepth=pytestconfig.getoption("iodepth"), | ||
numjobs=pytestconfig.getoption("numjobs"), | ||
) | ||
summary = log_result_csv(test_type, rw_mode, result, CSV_FILE) | ||
assert summary["IOPS"] > 0 | ||
key = (test_type, rw_mode) | ||
if prev_results and key in prev_results: | ||
results = load_results_from_csv(CSV_FILE) | ||
assert_performance_not_degraded(results[key], prev_results[key]) |
Uh oh!
There was an error while loading. Please reload this page.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Should have a
TODO:
label in this case, otherwise "will" is incorrect. Otherwise we can block this PR as long as the desired code is not merged I suppose.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
added the TODO label in 718b41f
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
As for blocking the PR, idk since the support of image formats is not required to make it work for now. If we don't block this PR we should be cautious to not loose track of this TODO (maybe make a card about this?)