diff --git a/README.md b/README.md index 69708bc61..63eab7be2 100644 --- a/README.md +++ b/README.md @@ -205,7 +205,7 @@ Here's an example of selection we can do thanks to the markers: ``` # Run storage driver tests that either need no VM at all, or advise to use a small VM. Exclude tests that reboot the hosts. -pytest tests/storage -m "(small_vm or no_vm) and not reboot" --hosts=ip_of_poolmaster1,ip_of_poolmaster2 --vm=http://path/to/a_small_vm.xva --sr-disk=auto +pytest tests/storage -m "(small_vm or no_vm) and not reboot" --hosts=ip_of_poolmaster1,ip_of_poolmaster2 --vm=http://path/to/a_small_vm.xva ``` Another example: diff --git a/conftest.py b/conftest.py index ccfbe2c6f..d4874d315 100644 --- a/conftest.py +++ b/conftest.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import itertools @@ -12,6 +14,8 @@ from lib import pxe from lib.common import ( callable_marker, + DiskDevName, + HostAddress, is_uuid, prefix_object_name, setup_formatted_and_mounted_disk, @@ -21,6 +25,7 @@ wait_for, ) from lib.netutil import is_ipv6 +from lib.host import Host from lib.pool import Pool from lib.sr import SR from lib.vm import VM, vm_cache_key_from_def @@ -31,7 +36,7 @@ # need to import them in the global conftest.py so that they are recognized as fixtures. from pkgfixtures import formatted_and_mounted_ext4_disk, sr_disk_wiped -from typing import Dict +from typing import Dict, Generator, Iterable # Do we cache VMs? try: @@ -80,19 +85,12 @@ def pytest_addoption(parser): help="Max lines to output in a ssh log (0 if no limit)" ) parser.addoption( - "--sr-disk", - action="append", - default=[], - help="Name of an available disk (sdb) or partition device (sdb2) to be formatted and used in storage tests. " - "Set it to 'auto' to let the fixtures auto-detect available disks." - ) - parser.addoption( - "--sr-disk-4k", + "--disks", action="append", default=[], - help="Name of an available disk (sdb) or partition device (sdb2) with " - "4KiB blocksize to be formatted and used in storage tests. " - "Set it to 'auto' to let the fixtures auto-detect available disks." + help="HOST:DISKS to authorize for use by tests. " + "DISKS is a possibly-empty comma-separated list. " + "No mention of a given host authorizes use of all its disks." ) def pytest_configure(config): @@ -116,8 +114,8 @@ def pytest_collection_modifyitems(items, config): 'windows_vm', 'hostA2', 'hostB1', - 'sr_disk', - 'sr_disk_4k' + 'unused_512B_disks', + 'unused_4k_disks', ] for item in items: @@ -157,7 +155,7 @@ def pytest_runtest_makereport(item, call): # fixtures @pytest.fixture(scope='session') -def hosts(pytestconfig): +def hosts(pytestconfig) -> Generator[list[Host]]: nested_list = [] def setup_host(hostname_or_ip, *, config=None): @@ -223,6 +221,14 @@ def cleanup_hosts(): cleanup_hosts() +@pytest.fixture(scope='session') +def pools_hosts_by_name_or_ip(hosts: list[Host]) -> Generator[dict[HostAddress, Host]]: + """All hosts of all pools, each indexed by their hostname_or_ip.""" + yield {host.hostname_or_ip: host + for pool_master in hosts + for host in pool_master.pool.hosts + } + @pytest.fixture(scope='session') def registered_xo_cli(): # The fixture is not responsible for establishing the connection. @@ -335,103 +341,71 @@ def local_sr_on_hostB1(hostB1): yield sr @pytest.fixture(scope='session') -def sr_disk(pytestconfig, host): - disks = pytestconfig.getoption("sr_disk") - if len(disks) != 1: - pytest.fail("This test requires exactly one --sr-disk parameter") - disk = disks[0] - if disk == "auto": - logging.info(">> Check for the presence of a free disk device on the master host") - disks = host.available_disks() - assert len(disks) > 0, "a free disk device is required on the master host" - disk = disks[0] - logging.info(f">> Found free disk device(s) on hostA1: {' '.join(disks)}. Using {disk}.") - else: - logging.info(f">> Check that disk or block device {disk} is available on the master host") - assert disk in host.available_disks(), \ - f"disk or block device {disk} is either not present or already used on master host" - yield disk +def disks(pytestconfig, pools_hosts_by_name_or_ip: dict[HostAddress, Host] + ) -> dict[Host, list[Host.BlockDeviceInfo]]: + """Dict identifying names of all disks for on all hosts of first pool.""" + def _parse_disk_option(option_text: str) -> tuple[HostAddress, list[DiskDevName]]: + parsed = option_text.split(sep=":", maxsplit=1) + assert len(parsed) == 2, f"--disks option {option_text!r} is not :[,]*" + host_address, disks_string = parsed + devices = disks_string.split(',') if disks_string else [] + return host_address, devices + + cli_disks = dict(_parse_disk_option(option_text) + for option_text in pytestconfig.getoption("disks")) + + def _host_disks(host: Host, hosts_cli_disks: list[DiskDevName] | None) -> Iterable[Host.BlockDeviceInfo]: + """Filter host disks according to list from `--cli` if given.""" + host_disks = host.disks() + # no disk specified = allow all + if hosts_cli_disks is None: + yield from host_disks + return + # check all disks in --disks=host:... exist + for cli_disk in hosts_cli_disks: + for disk in host_disks: + if disk['name'] == cli_disk: + yield disk + break # names are unique, don't expect another one + else: + raise Exception(f"no {cli_disk!r} disk on host {host.hostname_or_ip}, " + f"has {','.join(disk['name'] for disk in host_disks)}") + + ret = {host: list(_host_disks(host, cli_disks.get(host.hostname_or_ip))) + for host in pools_hosts_by_name_or_ip.values() + } + logging.debug("disks collected: %s", {host.hostname_or_ip: value for host, value in ret.items()}) + return ret @pytest.fixture(scope='session') -def sr_disk_4k(pytestconfig, host): - disks = pytestconfig.getoption("sr_disk_4k") - if len(disks) != 1: - pytest.fail("This test requires exactly one --sr-disks-4k parameter") - disk = disks[0] - if disk == "auto": - logging.info(">> Check for the presence of a free 4KiB block device on the master host") - disks = host.available_disks(4096) - assert len(disks) > 0, "a free 4KiB block device is required on the master host" - disk = disks[0] - logging.info(f">> Found free 4KiB block device(s) on hostA1: {' '.join(disks)}. Using {disk}.") - else: - logging.info(f">> Check that 4KiB block device {disk} is available on the master host") - assert disk in host.available_disks(4096), \ - f"4KiB block device {disk} must be available for use on master host" - yield disk +def unused_512B_disks(disks: dict[Host, list[Host.BlockDeviceInfo]] + ) -> dict[Host, list[Host.BlockDeviceInfo]]: + """Dict identifying names of all 512-bytes-blocks disks for on all hosts of first pool.""" + ret = {host: [disk for disk in host_disks + if disk["log-sec"] == "512" and host.disk_is_available(disk["name"])] + for host, host_disks in disks.items() + } + logging.debug("available disks collected: %s", {host.hostname_or_ip: value for host, value in ret.items()}) + return ret @pytest.fixture(scope='session') -def sr_disk_for_all_hosts(pytestconfig, request, host): - disks = pytestconfig.getoption("sr_disk") - if len(disks) != 1: - pytest.fail("This test requires exactly one --sr-disk parameter") - disk = disks[0] - master_disks = host.available_disks() - assert len(master_disks) > 0, "a free disk device is required on the master host" - - if disk != "auto": - assert disk in master_disks, \ - f"disk or block device {disk} is either not present or already used on master host" - master_disks = [disk] - - candidates = list(master_disks) - for h in host.pool.hosts[1:]: - other_disks = h.available_disks() - candidates = [d for d in candidates if d in other_disks] - - if disk == "auto": - assert len(candidates) > 0, \ - f"a free disk device is required on all pool members. Pool master has: {' '.join(master_disks)}." - logging.info(f">> Found free disk device(s) on all pool hosts: {' '.join(candidates)}. Using {candidates[0]}.") - else: - assert len(candidates) > 0, \ - f"disk or block device {disk} was not found to be present and free on all hosts" - logging.info(f">> Disk or block device {disk} is present and free on all pool members") - yield candidates[0] +def unused_4k_disks(disks: dict[Host, list[Host.BlockDeviceInfo]] + ) -> dict[Host, list[Host.BlockDeviceInfo]]: + """Dict identifying names of all 4K-blocks disks for on all hosts of first pool.""" + ret = {host: [disk for disk in host_disks + if disk["log-sec"] == "4096" and host.disk_is_available(disk["name"])] + for host, host_disks in disks.items() + } + logging.debug("available 4k disks collected: %s", {host.hostname_or_ip: value for host, value in ret.items()}) + return ret @pytest.fixture(scope='session') -def sr_disks_for_all_hosts(pytestconfig, request, host): - disks = pytestconfig.getoption("sr_disk") - assert len(disks) > 0, "This test requires at least one --sr-disk parameter" - # Fetch available disks on the master host - master_disks = host.available_disks() - assert len(master_disks) > 0, "a free disk device is required on the master host" - - if "auto" in disks: - candidates = list(master_disks) - else: - # Validate that all specified disks exist on the master host - for disk in disks: - assert disk in master_disks, \ - f"Disk or block device {disk} is either not present or already used on the master host" - candidates = list(disks) - - # Check if all disks are available on all hosts in the pool - for h in host.pool.hosts[1:]: - other_disks = h.available_disks() - candidates = [d for d in candidates if d in other_disks] - - if "auto" in disks: - # Automatically select disks if "auto" is passed - assert len(candidates) > 0, \ - f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}." - logging.info(">> Using free disk device(s) on all pool hosts: %s.", candidates) - else: - # Ensure specified disks are free on all hosts - assert len(candidates) == len(disks), \ - f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts." - logging.info(">> Disk(s) %s are present and free on all pool members", candidates) - yield candidates +def pool_with_unused_512B_disk(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Pool: + """Returns the first pool, ensuring all hosts have at least one unused 512-bytes-blocks disk.""" + for h in host.pool.hosts: + assert h in unused_512B_disks + assert unused_512B_disks[h], f"host {h} does not have any unused 512B-block disk" + return host.pool @pytest.fixture(scope='module') def vm_ref(request): diff --git a/jobs.py b/jobs.py index 2f418687f..2c9f16405 100755 --- a/jobs.py +++ b/jobs.py @@ -20,7 +20,6 @@ "nb_pools": 2, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": [ "tests/misc", @@ -46,7 +45,6 @@ "nb_pools": 2, "params": { "--vm[]": "multi/all_unix", - "--sr-disk": "auto", }, "paths": ["tests/misc", "tests/migration"], "markers": "multi_vms and not flaky and not reboot", @@ -62,7 +60,6 @@ "nb_pools": 2, "params": { "--vm[]": "multi/all_windows", - "--sr-disk": "auto", }, "paths": ["tests/misc", "tests/migration"], "markers": "multi_vms and not flaky and not reboot", @@ -88,10 +85,9 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage"], - "markers": "(small_vm or no_vm) and not reboot and not quicktest and not sr_disk_4k", + "markers": "(small_vm or no_vm) and not reboot and not quicktest and not unused_4k_disks", "name_filter": "not migration and not linstor", }, "storage-migrations": { @@ -106,10 +102,9 @@ "nb_pools": 2, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage"], - "markers": "not sr_disk_4k", + "markers": "not unused_4k_disks", "name_filter": "migration and not linstor", }, "storage-reboots": { @@ -123,10 +118,9 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage"], - "markers": "reboot and not flaky and not sr_disk_4k", + "markers": "reboot and not flaky and not unused_4k_disks", "name_filter": "not linstor", }, "storage-quicktest": { @@ -138,10 +132,9 @@ ], "nb_pools": 1, "params": { - "--sr-disk": "auto", }, "paths": ["tests/storage"], - "markers": "quicktest and not sr_disk_4k", + "markers": "quicktest and not unused_4k_disks", "name_filter": "not linstor and not zfsvol", }, "linstor-main": { @@ -154,7 +147,6 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage/linstor"], "markers": "(small_vm or no_vm) and not reboot and not quicktest", @@ -171,7 +163,6 @@ "nb_pools": 2, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage/linstor"], "markers": "", @@ -187,7 +178,6 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests/storage/linstor"], "markers": "reboot", @@ -200,7 +190,6 @@ ], "nb_pools": 1, "params": { - "--sr-disk": "auto", }, "paths": ["tests/storage/linstor"], "markers": "quicktest", @@ -215,10 +204,9 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk-4k": "auto", }, "paths": ["tests/storage"], - "markers": "(small_vm or no_vm) and sr_disk_4k and not reboot and not quicktest", + "markers": "(small_vm or no_vm) and unused_4k_disks and not reboot and not quicktest", "name_filter": "not migration", }, "largeblock-migrations": { @@ -232,10 +220,9 @@ "nb_pools": 2, "params": { "--vm": "single/small_vm", - "--sr-disk-4k": "auto", }, "paths": ["tests/storage"], - "markers": "sr_disk_4k", + "markers": "unused_4k_disks", "name_filter": "migration", }, "largeblock-reboots": { @@ -248,10 +235,9 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk-4k": "auto", }, "paths": ["tests/storage"], - "markers": "sr_disk_4k and reboot", + "markers": "unused_4k_disks and reboot", }, "largeblock-quicktest": { "description": "runs `quicktest` on the largeblock storage driver", @@ -261,10 +247,9 @@ ], "nb_pools": 1, "params": { - "--sr-disk-4k": "auto", }, "paths": ["tests/storage"], - "markers": "sr_disk_4k and quicktest", + "markers": "unused_4k_disks and quicktest", }, "sb-main": { "description": "tests uefistored/varstored and SecureBoot using a small unix VM (or no VM when none needed)", @@ -297,7 +282,6 @@ # nb_pools left to 1 so that the job can run on XCP-ng 8.2 with just one pool, but 2 are required in 8.3+ "nb_pools": 1, "params": { - "--sr-disk": "auto", "--vm": "single/small_vm_efitools", }, "paths": ["tests/uefi_sb/test_uefistored_cert_flow.py", "tests/uefi_sb/test_varstored_cert_flow.py"], @@ -420,7 +404,6 @@ "nb_pools": 1, "params": { "--vm": "single/small_vm", - "--sr-disk": "auto", }, "paths": ["tests"], "markers": "flaky", diff --git a/lib/commands.py b/lib/commands.py index 8c5d61d58..24ec1cb07 100644 --- a/lib/commands.py +++ b/lib/commands.py @@ -4,6 +4,7 @@ import subprocess import lib.config as config +from lib.common import HostAddress from lib.netutil import wrap_ip from typing import List, Literal, Union, overload @@ -138,27 +139,32 @@ def _ssh(hostname_or_ip, cmd, check, simple_output, suppress_fingerprint_warning # This function is kept short for shorter pytest traces upon SSH failures, which are common, # as pytest prints the whole function definition that raised the SSHCommandFailed exception @overload -def ssh(hostname_or_ip: str, cmd: Union[str, List[str]], *, check: bool = True, simple_output: Literal[True] = True, +def ssh(hostname_or_ip: HostAddress, cmd: Union[str, List[str]], *, check: bool = True, + simple_output: Literal[True] = True, suppress_fingerprint_warnings: bool = True, background: Literal[False] = False, decode: Literal[True] = True, options: List[str] = []) -> str: ... @overload -def ssh(hostname_or_ip: str, cmd: Union[str, List[str]], *, check: bool = True, simple_output: Literal[True] = True, +def ssh(hostname_or_ip: HostAddress, cmd: Union[str, List[str]], *, check: bool = True, + simple_output: Literal[True] = True, suppress_fingerprint_warnings: bool = True, background: Literal[False] = False, decode: Literal[False], options: List[str] = []) -> bytes: ... @overload -def ssh(hostname_or_ip: str, cmd: Union[str, List[str]], *, check: bool = True, simple_output: Literal[False], +def ssh(hostname_or_ip: HostAddress, cmd: Union[str, List[str]], *, check: bool = True, + simple_output: Literal[False], suppress_fingerprint_warnings: bool = True, background: Literal[False] = False, decode: bool = True, options: List[str] = []) -> SSHResult: ... @overload -def ssh(hostname_or_ip: str, cmd: Union[str, List[str]], *, check: bool = True, simple_output: Literal[False], +def ssh(hostname_or_ip: HostAddress, cmd: Union[str, List[str]], *, check: bool = True, + simple_output: Literal[False], suppress_fingerprint_warnings: bool = True, background: Literal[True], decode: bool = True, options: List[str] = []) -> None: ... @overload -def ssh(hostname_or_ip: str, cmd: Union[str, List[str]], *, check=True, simple_output: bool = True, +def ssh(hostname_or_ip: HostAddress, cmd: Union[str, List[str]], *, check=True, + simple_output: bool = True, suppress_fingerprint_warnings=True, background: bool = False, decode: bool = True, options: List[str] = []) \ -> Union[str, bytes, SSHResult, None]: diff --git a/lib/common.py b/lib/common.py index 85123faf0..101aa19a8 100644 --- a/lib/common.py +++ b/lib/common.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import getpass @@ -15,13 +17,16 @@ import lib.commands as commands -from typing import TYPE_CHECKING, Callable, Dict, Literal, Optional, TypeVar, Union, cast, overload +from typing import TYPE_CHECKING, Callable, Dict, Literal, Optional, TypeAlias, TypeVar, Union, cast, overload if TYPE_CHECKING: import lib.host T = TypeVar("T") +HostAddress: TypeAlias = str +DiskDevName: TypeAlias = str + class PackageManagerEnum(Enum): UNKNOWN = 1 RPM = 2 diff --git a/lib/host.py b/lib/host.py index be9d84363..123aae1cc 100644 --- a/lib/host.py +++ b/lib/host.py @@ -2,6 +2,7 @@ import logging import os +import re import shlex import tempfile import uuid @@ -11,12 +12,13 @@ import lib.commands as commands import lib.pif as pif -from typing import TYPE_CHECKING, Dict, List, Literal, Optional, Union, overload +from typing import TYPE_CHECKING, Dict, List, Literal, Optional, TypedDict, Union, overload if TYPE_CHECKING: - import lib.pool + from lib.pool import Pool from lib.common import ( + DiskDevName, _param_add, _param_clear, _param_get, @@ -50,8 +52,21 @@ def host_data(hostname_or_ip): class Host: xe_prefix = "host" + pool: Pool - def __init__(self, pool: 'lib.pool.Pool', hostname_or_ip): + # Data extraction is automatic, no conversion from str is done. + BlockDeviceInfo = TypedDict('BlockDeviceInfo', {"name": str, + "kname": str, + "pkname": str, + "size": str, + "log-sec": str, + "type": str, + }) + BLOCK_DEVICES_FIELDS = ','.join(k.upper() for k in BlockDeviceInfo.__annotations__) + + block_devices_info: list[BlockDeviceInfo] + + def __init__(self, pool: Pool, hostname_or_ip): self.pool = pool self.hostname_or_ip = hostname_or_ip self.xo_srv_id: Optional[str] = None @@ -68,6 +83,8 @@ def __init__(self, pool: 'lib.pool.Pool', hostname_or_ip): self.xcp_version = version.parse(self.inventory['PRODUCT_VERSION']) self.xcp_version_short = f"{self.xcp_version.major}.{self.xcp_version.minor}" + self.rescan_block_devices_info() + def __str__(self): return self.hostname_or_ip @@ -546,21 +563,32 @@ def management_pif(self): uuid = self.xe('pif-list', {'management': True, 'host-uuid': self.uuid}, minimal=True) return pif.PIF(uuid, self) - def disks(self): - """ List of SCSI disks, e.g ['sda', 'sdb', 'nvme0n1']. """ - disks = self.ssh(['lsblk', '-nd', '-I', '8,259', '--output', 'NAME']).splitlines() - disks.sort() - return disks - - def raw_disk_is_available(self, disk: str) -> bool: + def rescan_block_devices_info(self) -> None: """ - Check if a raw disk (without any identifiable filesystem or partition label) is available. + Initalize static informations about the disks. - It suggests the disk is "raw" and likely unformatted thus available. + Despite those being static, it can be necessary to rescan, + when we test how XCP-ng reacts to changes of hardware (or + reconfiguration of device blocksize), or after a reboot. """ - return self.ssh_with_result(['blkid', '/dev/' + disk]).returncode == 2 - - def disk_is_available(self, disk: str) -> bool: + output_string = self.ssh(["lsblk", "--pairs", "--bytes", + '-I', '8,259', # limit to: sd, blkext + "--output", Host.BLOCK_DEVICES_FIELDS]) + + self.block_devices_info = [ + Host.BlockDeviceInfo({key.lower(): value.strip('"') # type: ignore[misc] + for key, value in re.findall(r'(\S+)=(".*?"|\S+)', line)}) + for line in output_string.strip().splitlines() + ] + logging.debug("blockdevs found: %s", [disk["name"] for disk in self.block_devices_info]) + + def disks(self) -> list[Host.BlockDeviceInfo]: + """ List of BlockDeviceInfo for all disks. """ + # filter out partitions from block_devices + return sorted((disk for disk in self.block_devices_info if not disk["pkname"]), + key=lambda disk: disk["name"]) + + def disk_is_available(self, disk: DiskDevName) -> bool: """ Check if a disk is unmounted and appears available for use. @@ -570,24 +598,7 @@ def disk_is_available(self, disk: str) -> bool: Warn: This function may misclassify LVM_member disks (e.g. in XOSTOR, RAID, ZFS) as "available". Such disks may not have mountpoints but still be in use. """ - return len(self.ssh(['lsblk', '-n', '-o', 'MOUNTPOINT', '/dev/' + disk]).strip()) == 0 - - def available_disks(self, blocksize=512): - """ - Return a list of available disks for formatting, creating SRs or such. - - Returns a list of disk names (eg.: ['sdb', 'sdc']) that don't have any mountpoint in - the output of lsblk (including their children such as partitions or md RAID devices) - """ - avail_disks = [] - blk_output = self.ssh(['lsblk', '-nd', '-I', '8,259', '--output', 'NAME,LOG-SEC']).splitlines() - for line in blk_output: - line = line.split() - disk = line[0] - sec_size = line[1] - if sec_size == str(blocksize): - avail_disks.append(disk) - return [disk for disk in avail_disks if self.disk_is_available(disk)] + return len(self.ssh(['lsblk', '--noheadings', '-o', 'MOUNTPOINT', '/dev/' + disk]).strip()) == 0 def file_exists(self, filepath, regular_file=True): option = '-f' if regular_file else '-e' diff --git a/lib/pool.py b/lib/pool.py index 904192534..5cc1051c8 100644 --- a/lib/pool.py +++ b/lib/pool.py @@ -5,7 +5,7 @@ from packaging import version import lib.commands as commands -from lib.common import _param_get, _param_set, safe_split, wait_for, wait_for_not +from lib.common import HostAddress, _param_get, _param_set, safe_split, wait_for, wait_for_not from lib.host import Host from lib.sr import SR @@ -14,7 +14,7 @@ class Pool: xe_prefix = "pool" - def __init__(self, master_hostname_or_ip: str) -> None: + def __init__(self, master_hostname_or_ip: HostAddress) -> None: master = Host(self, master_hostname_or_ip) assert master.is_master(), f"Host {master_hostname_or_ip} is not a master host. Aborting." self.master = master diff --git a/pkgfixtures.py b/pkgfixtures.py index cbf889173..08cbd75e8 100644 --- a/pkgfixtures.py +++ b/pkgfixtures.py @@ -1,9 +1,17 @@ +from __future__ import annotations + import pytest +from typing import TYPE_CHECKING, Generator import logging from lib.common import setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk +if TYPE_CHECKING: + from lib.common import DiskDevName + from lib.host import Host + from lib.pool import Pool + # Due to a bug in the way pytest handles the setup and teardown of package-scoped fixtures, # we moved the following fixtures out of the main conftest.py. # To workaround the bug, the fixture must be imported either in a package's own conftest.py, @@ -12,27 +20,32 @@ # package scope because previous test packages may have used the disk @pytest.fixture(scope='package') -def sr_disk_wiped(host, sr_disk): +def sr_disk_wiped(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[DiskDevName]: + """A disk on MASTER HOST OF FIRST POOL which we wipe.""" + sr_disk = unused_512B_disks[host][0]["name"] logging.info(">> wipe disk %s" % sr_disk) host.ssh(['wipefs', '-a', '/dev/' + sr_disk]) yield sr_disk # package scope so that the device is unmounted before tests from the next package is executed. @pytest.fixture(scope='package') -def formatted_and_mounted_ext4_disk(host, sr_disk): +def formatted_and_mounted_ext4_disk(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]] + ) -> Generator[str]: + """Mountpoint for newly-formatted disk on MASTER HOST OF FIRST POOL.""" mountpoint = '/var/tmp/sr_disk_mountpoint' + sr_disk = unused_512B_disks[host][0]["name"] setup_formatted_and_mounted_disk(host, sr_disk, 'ext4', mountpoint) yield mountpoint teardown_formatted_and_mounted_disk(host, mountpoint) @pytest.fixture(scope='package') -def host_with_saved_yum_state(host): +def host_with_saved_yum_state(host: Host) -> Generator[Host]: host.yum_save_state() yield host host.yum_restore_saved_state() @pytest.fixture(scope='package') -def pool_with_saved_yum_state(host): +def pool_with_saved_yum_state(host: Host) -> Generator[Pool]: for h in host.pool.hosts: h.yum_save_state() yield host.pool diff --git a/pytest.ini b/pytest.ini index 6886cce3e..8e78251d4 100644 --- a/pytest.ini +++ b/pytest.ini @@ -9,8 +9,8 @@ markers = # * Host-related markers, automatically set based on fixtures hostA2: a second member in the first pool. hostB1: a second pool. - sr_disk: the test needs a free disk or writable block device that it can erase. - sr_disk_4k: the test needs a free 4KiB block device that it can erase. + unused_512B_disks: the test needs one or more free 512B-blocks disk or writable block device that it can erase. + unused_4k_disks: the test needs a free 4KiB block device that it can erase. # * VM-related markers, automatically set based on fixtures no_vm: tests that do not require a VM to run. diff --git a/tests/misc/test_export.py b/tests/misc/test_export.py index b5054eda4..5b007f6f5 100644 --- a/tests/misc/test_export.py +++ b/tests/misc/test_export.py @@ -4,9 +4,7 @@ # Requirements: # From --hosts parameter: -# - host: a XCP-ng host -# From --sr-disk parameter: -# - an additional unused disk to store the exported VM +# - host: a XCP-ng host with an unused disk to store the exported VM # From --vm parameter: # - A VM to import and export diff --git a/tests/storage/ext/conftest.py b/tests/storage/ext/conftest.py index d4a8ac531..d0fb452a9 100644 --- a/tests/storage/ext/conftest.py +++ b/tests/storage/ext/conftest.py @@ -1,10 +1,19 @@ +from __future__ import annotations + import pytest import logging +from typing import TYPE_CHECKING, Generator + +if TYPE_CHECKING: + from lib.host import Host + from lib.sr import SR + @pytest.fixture(scope='package') -def ext_sr(host, sr_disk): +def ext_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[SR]: """ An EXT SR on first host. """ + sr_disk = unused_512B_disks[host][0]["name"] sr = host.sr_create('ext', "EXT-local-SR-test", {'device': '/dev/' + sr_disk}) yield sr # teardown diff --git a/tests/storage/ext/test_ext_sr.py b/tests/storage/ext/test_ext_sr.py index 2de05e461..bb6bf33a5 100644 --- a/tests/storage/ext/test_ext_sr.py +++ b/tests/storage/ext/test_ext_sr.py @@ -1,8 +1,15 @@ +from __future__ import annotations + import pytest from lib.common import vm_image, wait_for from tests.storage import try_to_create_sr_with_missing_device, vdi_is_open +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from lib.host import Host + # Requirements: # - one XCP-ng host with an additional unused disk for the SR @@ -16,8 +23,9 @@ class TestEXTSRCreateDestroy: def test_create_sr_with_missing_device(self, host): try_to_create_sr_with_missing_device('ext', 'EXT-local-SR-test', host) - def test_create_and_destroy_sr(self, host, sr_disk): + def test_create_and_destroy_sr(self, host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> None: # Create and destroy tested in the same test to leave the host as unchanged as possible + sr_disk = unused_512B_disks[host][0]["name"] sr = host.sr_create('ext', "EXT-local-SR-test", {'device': '/dev/' + sr_disk}, verify=True) # import a VM in order to detect vm import issues here rather than in the vm_on_xfs_fixture used in # the next tests, because errors in fixtures break teardown diff --git a/tests/storage/glusterfs/conftest.py b/tests/storage/glusterfs/conftest.py index 6c0e721f7..e1cae9c84 100644 --- a/tests/storage/glusterfs/conftest.py +++ b/tests/storage/glusterfs/conftest.py @@ -72,13 +72,14 @@ def pool_with_glusterfs(pool_without_glusterfs, pool_with_saved_yum_state): pool.exec_on_hosts_on_error_continue(_teardown_host_with_glusterfs) @pytest.fixture(scope='package') -def gluster_disk(host, sr_disk_for_all_hosts): - sr_disk = sr_disk_for_all_hosts +def gluster_disk(pool_with_unused_512B_disk, unused_512B_disks): + pool = pool_with_unused_512B_disk mountpoint = '/mnt/sr_disk' - for h in host.pool.hosts: + for h in pool.hosts: + sr_disk = unused_512B_disks[h][0]["name"] setup_formatted_and_mounted_disk(h, sr_disk, 'xfs', mountpoint) yield - host.pool.exec_on_hosts_on_error_continue( + pool.exec_on_hosts_on_error_continue( lambda h: teardown_formatted_and_mounted_disk(h, mountpoint) ) diff --git a/tests/storage/glusterfs/test_glusterfs_sr.py b/tests/storage/glusterfs/test_glusterfs_sr.py index 62d29427f..9188b753d 100644 --- a/tests/storage/glusterfs/test_glusterfs_sr.py +++ b/tests/storage/glusterfs/test_glusterfs_sr.py @@ -10,7 +10,7 @@ # - one XCP-ng host >= 8.2 with an additional unused disk for the SR # - access to XCP-ng RPM repository from the host -@pytest.mark.usefixtures("sr_disk_for_all_hosts") # don't even run the tests if there's no free disk +@pytest.mark.usefixtures("pool_with_unused_512B_disk") # don't even run the tests if there's no free disk class TestGlusterFSSRCreateDestroy: """ Tests that do not use fixtures that setup the SR or import VMs, @@ -40,7 +40,6 @@ def test_create_and_destroy_sr(self, host, glusterfs_device_config, pool_with_gl vm.destroy(verify=True) sr.destroy(verify=True) -@pytest.mark.usefixtures("sr_disk_for_all_hosts", "glusterfs_sr") class TestGlusterFSSR: @pytest.mark.quicktest def test_quicktest(self, glusterfs_sr): diff --git a/tests/storage/glusterfs/test_glusterfs_sr_crosspool_migration.py b/tests/storage/glusterfs/test_glusterfs_sr_crosspool_migration.py index 60af0e3b4..7bb66e244 100644 --- a/tests/storage/glusterfs/test_glusterfs_sr_crosspool_migration.py +++ b/tests/storage/glusterfs/test_glusterfs_sr_crosspool_migration.py @@ -13,7 +13,7 @@ @pytest.mark.small_vm # run with a small VM to test the features @pytest.mark.big_vm # and ideally with a big VM to test it scales -@pytest.mark.usefixtures("hostB1", "local_sr_on_hostB1", "sr_disk_for_all_hosts") +@pytest.mark.usefixtures("hostB1", "local_sr_on_hostB1") class Test: def test_cold_crosspool_migration(self, host, hostB1, vm_on_glusterfs_sr, local_sr_on_hostB1): cold_migration_then_come_back(vm_on_glusterfs_sr, host, hostB1, local_sr_on_hostB1) diff --git a/tests/storage/glusterfs/test_glusterfs_sr_intrapool_migration.py b/tests/storage/glusterfs/test_glusterfs_sr_intrapool_migration.py index 0cff132f9..4bd361620 100644 --- a/tests/storage/glusterfs/test_glusterfs_sr_intrapool_migration.py +++ b/tests/storage/glusterfs/test_glusterfs_sr_intrapool_migration.py @@ -13,7 +13,7 @@ @pytest.mark.small_vm # run with a small VM to test the features @pytest.mark.big_vm # and ideally with a big VM to test it scales -@pytest.mark.usefixtures("hostA2", "local_sr_on_hostA2", "sr_disk_for_all_hosts") +@pytest.mark.usefixtures("local_sr_on_hostA2") class Test: def test_live_intrapool_shared_migration(self, host, hostA2, vm_on_glusterfs_sr): sr = vm_on_glusterfs_sr.get_sr() diff --git a/tests/storage/iso/test_local_iso_sr.py b/tests/storage/iso/test_local_iso_sr.py index 96013876a..827a7bcca 100644 --- a/tests/storage/iso/test_local_iso_sr.py +++ b/tests/storage/iso/test_local_iso_sr.py @@ -12,9 +12,9 @@ # Requirements: # From --hosts parameter: -# - host: a XCP-ng host, with the default SR being either a shared SR, or a local SR on the master host -# From --sr-disk parameter: -# - an additional unused disk for the SR +# - host: a XCP-ng host, with: +# - the default SR being either a shared SR, or a local SR on the master host +# - an additional unused disk for the SR # From --vm parameter: # - A VM to import diff --git a/tests/storage/iso/test_local_iso_sr_reboot.py b/tests/storage/iso/test_local_iso_sr_reboot.py index 12db19cac..e896600f5 100644 --- a/tests/storage/iso/test_local_iso_sr_reboot.py +++ b/tests/storage/iso/test_local_iso_sr_reboot.py @@ -8,9 +8,9 @@ # Requirements: # From --hosts parameter: -# - host: a XCP-ng host, with the default SR being either a shared SR, or a local SR on the master host -# From --sr-disk parameter: -# - an additional unused disk for the SR +# - host: a XCP-ng host, with: +# - the default SR being either a shared SR, or a local SR on the master host +# - an additional unused disk for the SR # From --vm parameter: # - A VM to import diff --git a/tests/storage/largeblock/conftest.py b/tests/storage/largeblock/conftest.py index 17ea67174..58702a757 100644 --- a/tests/storage/largeblock/conftest.py +++ b/tests/storage/largeblock/conftest.py @@ -1,11 +1,20 @@ +from __future__ import annotations + import pytest import logging +from typing import TYPE_CHECKING, Generator + +if TYPE_CHECKING: + from lib.host import Host + from lib.sr import SR + @pytest.fixture(scope='package') -def largeblock_sr(host, sr_disk_4k): +def largeblock_sr(host: Host, unused_4k_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[SR]: """ A LARGEBLOCK SR on first host. """ - sr = host.sr_create('largeblock', "LARGEBLOCK-local-SR-test", {'device': '/dev/' + sr_disk_4k}) + sr_disk = unused_4k_disks[host][0]["name"] + sr = host.sr_create('largeblock', "LARGEBLOCK-local-SR-test", {'device': '/dev/' + sr_disk}) yield sr # teardown sr.destroy() diff --git a/tests/storage/largeblock/test_largeblock_sr.py b/tests/storage/largeblock/test_largeblock_sr.py index 97ec815af..c2683283e 100644 --- a/tests/storage/largeblock/test_largeblock_sr.py +++ b/tests/storage/largeblock/test_largeblock_sr.py @@ -1,8 +1,15 @@ +from __future__ import annotations + import pytest from lib.common import vm_image, wait_for from tests.storage import try_to_create_sr_with_missing_device, vdi_is_open +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from lib.host import Host + # Requirements: # - one XCP-ng host with an additional unused 4KiB disk for the SR @@ -16,9 +23,10 @@ class TestLARGEBLOCKSRCreateDestroy: def test_create_sr_with_missing_device(self, host): try_to_create_sr_with_missing_device('largeblock', 'LARGEBLOCK-local-SR-test', host) - def test_create_and_destroy_sr(self, host, sr_disk_4k): + def test_create_and_destroy_sr(self, host: Host, unused_4k_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> None: # Create and destroy tested in the same test to leave the host as unchanged as possible - sr = host.sr_create('largeblock', "LARGEBLOCK-local-SR-test", {'device': '/dev/' + sr_disk_4k}, verify=True) + sr_disk = unused_4k_disks[host][0]["name"] + sr = host.sr_create('largeblock', "LARGEBLOCK-local-SR-test", {'device': '/dev/' + sr_disk}, verify=True) # import a VM in order to detect vm import issues here rather than in the vm_on_xfs_fixture used in # the next tests, because errors in fixtures break teardown vm = host.import_vm(vm_image('mini-linux-x86_64-bios'), sr_uuid=sr.uuid) diff --git a/tests/storage/linstor/conftest.py b/tests/storage/linstor/conftest.py index acc965862..256d4c1e6 100644 --- a/tests/storage/linstor/conftest.py +++ b/tests/storage/linstor/conftest.py @@ -1,23 +1,49 @@ +from __future__ import annotations + import pytest +import functools import logging +import os import lib.commands as commands # explicit import for package-scope fixtures from pkgfixtures import pool_with_saved_yum_state +from typing import TYPE_CHECKING, Generator + +if TYPE_CHECKING: + from lib.host import Host + from lib.pool import Pool + GROUP_NAME = 'linstor_group' STORAGE_POOL_NAME = f'{GROUP_NAME}/thin_device' LINSTOR_RELEASE_PACKAGE = 'xcp-ng-release-linstor' LINSTOR_PACKAGE = 'xcp-ng-linstor' @pytest.fixture(scope='package') -def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type): - devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts] - hosts = host.pool.hosts +def lvm_disks(pool_with_unused_512B_disk: Pool, + unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]], + provisioning_type: str) -> Generator[None]: + """ + Common LVM PVs on which a LV is created on each host of the pool. + + On each host in the pool, create PV on each of those disks whose + DEVICE NAME exists ACROSS THE WHOLE POOL. Then make a VG out of + all those, then a LV taking up the whole VG space. + + Return the list of device node paths for that list of devices + used in all hosts. + """ + hosts = pool_with_unused_512B_disk.hosts + + @functools.cache + def host_devices(host: Host) -> list[str]: + return [os.path.join("/dev", disk["name"]) for disk in unused_512B_disks[host][0:1]] for host in hosts: + devices = host_devices(host) for device in devices: try: host.ssh(['pvcreate', '-ff', '-y', device]) @@ -35,11 +61,12 @@ def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type): if provisioning_type == 'thin': host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME]) - yield devices + # FIXME ought to provide storage_pool_name and get rid of that other fixture + yield None for host in hosts: host.ssh(['vgremove', '-f', GROUP_NAME]) - for device in devices: + for device in host_devices(host): host.ssh(['pvremove', device]) @pytest.fixture(scope="package") diff --git a/tests/storage/lvm/conftest.py b/tests/storage/lvm/conftest.py index ab2ffdfaa..fb6845f88 100644 --- a/tests/storage/lvm/conftest.py +++ b/tests/storage/lvm/conftest.py @@ -1,10 +1,19 @@ +from __future__ import annotations + import pytest import logging +from typing import TYPE_CHECKING, Generator + +if TYPE_CHECKING: + from lib.host import Host + from lib.sr import SR + @pytest.fixture(scope='package') -def lvm_sr(host, sr_disk): +def lvm_sr(host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> Generator[SR]: """ An LVM SR on first host. """ + sr_disk = unused_512B_disks[host][0]["name"] sr = host.sr_create('lvm', "LVM-local-SR-test", {'device': '/dev/' + sr_disk}) yield sr # teardown diff --git a/tests/storage/lvm/test_lvm_sr.py b/tests/storage/lvm/test_lvm_sr.py index 8877d26ef..7bbf0a38a 100644 --- a/tests/storage/lvm/test_lvm_sr.py +++ b/tests/storage/lvm/test_lvm_sr.py @@ -1,8 +1,15 @@ +from __future__ import annotations + import pytest from lib.common import vm_image, wait_for from tests.storage import try_to_create_sr_with_missing_device, vdi_is_open +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from lib.host import Host + # Requirements: # - one XCP-ng host with an additional unused disk for the SR @@ -16,7 +23,8 @@ class TestLVMSRCreateDestroy: def test_create_sr_with_missing_device(self, host): try_to_create_sr_with_missing_device('lvm', 'LVM-local-SR-test', host) - def test_create_and_destroy_sr(self, host, sr_disk): + def test_create_and_destroy_sr(self, host: Host, unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]]) -> None: + sr_disk = unused_512B_disks[host][0]["name"] # Create and destroy tested in the same test to leave the host as unchanged as possible sr = host.sr_create('lvm', "LVM-local-SR-test", {'device': '/dev/' + sr_disk}, verify=True) # import a VM in order to detect vm import issues here rather than in the vm_on_xfs_fixture used in diff --git a/tests/storage/xfs/conftest.py b/tests/storage/xfs/conftest.py index c6f73b574..806a5a8cb 100644 --- a/tests/storage/xfs/conftest.py +++ b/tests/storage/xfs/conftest.py @@ -1,7 +1,15 @@ +from __future__ import annotations + import pytest import logging +from typing import TYPE_CHECKING, Generator + +if TYPE_CHECKING: + from lib.host import Host + from lib.sr import SR + @pytest.fixture(scope='package') def host_with_xfsprogs(host): assert not host.file_exists('/usr/sbin/mkfs.xfs'), \ @@ -13,8 +21,10 @@ def host_with_xfsprogs(host): host.yum_restore_saved_state() @pytest.fixture(scope='package') -def xfs_sr(sr_disk, host_with_xfsprogs): +def xfs_sr(unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]], host_with_xfsprogs: Host + ) -> Generator[SR]: """ A XFS SR on first host. """ + sr_disk = unused_512B_disks[host_with_xfsprogs][0]["name"] sr = host_with_xfsprogs.sr_create('xfs', "XFS-local-SR-test", {'device': '/dev/' + sr_disk}) yield sr # teardown diff --git a/tests/storage/xfs/test_xfs_sr.py b/tests/storage/xfs/test_xfs_sr.py index a591319dc..2d567bee7 100644 --- a/tests/storage/xfs/test_xfs_sr.py +++ b/tests/storage/xfs/test_xfs_sr.py @@ -1,3 +1,5 @@ +from __future__ import annotations + import pytest import logging @@ -7,6 +9,11 @@ from lib.common import vm_image, wait_for from tests.storage import vdi_is_open +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from lib.host import Host + # Requirements: # - one XCP-ng host >= 8.2 with an additional unused disk for the SR # - access to XCP-ng RPM repository from the host @@ -18,10 +25,14 @@ class TestXFSSRCreateDestroy: and VM import. """ - def test_create_xfs_sr_without_xfsprogs(self, host, sr_disk): + def test_create_xfs_sr_without_xfsprogs(self, + host: Host, + unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]] + ) -> None: # This test must be the first in the series in this module assert not host.file_exists('/usr/sbin/mkfs.xfs'), \ "xfsprogs must not be installed on the host at the beginning of the tests" + sr_disk = unused_512B_disks[host][0]["name"] sr = None try: sr = host.sr_create('xfs', "XFS-local-SR-test", {'device': '/dev/' + sr_disk}) @@ -31,9 +42,13 @@ def test_create_xfs_sr_without_xfsprogs(self, host, sr_disk): sr.destroy() assert False, "SR creation should not have succeeded!" - def test_create_and_destroy_sr(self, sr_disk, host_with_xfsprogs): + def test_create_and_destroy_sr(self, + unused_512B_disks: dict[Host, list[Host.BlockDeviceInfo]], + host_with_xfsprogs: Host + ) -> None: # Create and destroy tested in the same test to leave the host as unchanged as possible host = host_with_xfsprogs + sr_disk = unused_512B_disks[host][0]["name"] sr = host.sr_create('xfs', "XFS-local-SR-test", {'device': '/dev/' + sr_disk}, verify=True) # import a VM in order to detect vm import issues here rather than in the vm_on_xfs fixture used in # the next tests, because errors in fixtures break teardown diff --git a/tests/uefi_sb/test_varstored_cert_flow.py b/tests/uefi_sb/test_varstored_cert_flow.py index 384402a01..6c067287c 100644 --- a/tests/uefi_sb/test_varstored_cert_flow.py +++ b/tests/uefi_sb/test_varstored_cert_flow.py @@ -15,10 +15,9 @@ # From --hosts parameter: # - host: XCP-ng host >= 8.3 # Master of a, at least, 2 hosts pool +# With a free disk # - hostB1: XCP-ng host >= 8.3 # This host will be joined and ejected from pool A, it means its state will be completely reinitialized from scratch -# From --sr-disk parameter: -# - a free disk on the first host. pytestmark = pytest.mark.default_vm('mini-linux-x86_64-uefi')