diff --git a/jobs.py b/jobs.py index 2dc60f1d8..973bb5571 100755 --- a/jobs.py +++ b/jobs.py @@ -462,6 +462,19 @@ "nb_pools": 2, "params": {}, "paths": ["tests/misc/test_pool.py"], + }, + "limit-tests": { + "description": "Tests verifying we can hit our supported limits", + "requirements": [ + "1 XCP-ng host >= 8.2" + ], + "nb_pools": 1, + "params": { + # The test does not work on Alpine because of how it handles + # multiple interfaces on the same network, so use Debian instead + "--vm": "single/debian_uefi_vm", + }, + "paths": ["tests/limits/test_vif_limit.py"], } } @@ -653,7 +666,7 @@ def extract_tests(cmd): print("*** Checking that all tests that use VMs have VM target markers (small_vm, etc.)... ", end="") tests_missing_vm_markers = extract_tests( - ["pytest", "--collect-only", "-q", "-m", "not no_vm and not (small_vm or multi_vm or big_vm)"] + ["pytest", "--collect-only", "-q", "-m", "not no_vm and not (small_vm or multi_vm or big_vm or debian_uefi_vm)"] ) if tests_missing_vm_markers: error = True diff --git a/lib/vm.py b/lib/vm.py index d5328c4e5..2641edd39 100644 --- a/lib/vm.py +++ b/lib/vm.py @@ -293,10 +293,11 @@ def create_vif(self, vif_num, *, network_uuid=None, network_name=None): network_uuid = self.host.pool.network_named(network_name) assert network_uuid, f"No UUID given, and network name {network_name!r} not found" logging.info("Create VIF %d to network %r on VM %s", vif_num, network_uuid, self.uuid) - self.host.xe('vif-create', {'vm-uuid': self.uuid, - 'device': str(vif_num), - 'network-uuid': network_uuid, - }) + vif_uuid = self.host.xe('vif-create', {'vm-uuid': self.uuid, + 'device': str(vif_num), + 'network-uuid': network_uuid, + }) + return VIF(vif_uuid, self) def is_running_on_host(self, host): return self.is_running() and self.param_get('resident-on') == host.uuid diff --git a/pytest.ini b/pytest.ini index 6886cce3e..c6a48f4da 100644 --- a/pytest.ini +++ b/pytest.ini @@ -30,6 +30,7 @@ markers = small_vm: tests that it is enough to run just once, using the smallest possible VM. big_vm: tests that it would be good to run with a big VM. multi_vms: tests that it would be good to run on a variety of VMs (includes `small_vm` but excludes `big_vm`). + debian_uefi_vm: tests that require a Debian UEFI VM # * Other markers reboot: tests that reboot one or more hosts. diff --git a/tests/limits/__init__.py b/tests/limits/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/tests/limits/test_vif_limit.py b/tests/limits/test_vif_limit.py new file mode 100644 index 000000000..956848679 --- /dev/null +++ b/tests/limits/test_vif_limit.py @@ -0,0 +1,93 @@ +from pkgfixtures import host_with_saved_yum_state +import ipaddress +import logging +import os +import pytest +import tempfile + +# Requirements: +# - one XCP-ng host (--host) >= 8.2 +# - a VM (--vm) +# - the first network on the host can be used to reach the host + +vif_limit = 16 +interface_name = "enX" +vcpus = '8' + +# There is a ResourceWarning due to background=True on an ssh call +# We do ensure the processes are killed +@pytest.mark.filterwarnings("ignore::ResourceWarning") +@pytest.mark.debian_uefi_vm +class TestVIFLimit: + def test_vif_limit(self, host_with_saved_yum_state, imported_vm): + host = host_with_saved_yum_state + vm = imported_vm + if (vm.is_running()): + logging.info("VM already running, shutting it down first") + vm.shutdown(verify=True) + + network_uuid = vm.vifs()[0].param_get('network-uuid') + existing_vifs = len(vm.vifs()) + + logging.info(f'Get {vcpus} vCPUs for the VM') + vm.param_set('VCPUs-max', vcpus) + vm.param_set('VCPUs-at-startup', vcpus) + + logging.info('Create VIFs before starting the VM') + for i in range(existing_vifs, vif_limit): + vm.create_vif(i, network_uuid=network_uuid) + + vm.start() + vm.wait_for_os_booted() + + logging.info('Verify the interfaces exist in the guest') + for i in range(0, vif_limit): + if vm.ssh_with_result([f'test -d /sys/class/net/{interface_name}{i}']).returncode != 0: + guest_error = vm.ssh_with_result(['dmesg | grep -B1 -A3 xen_netfront']).stdout + logging.error("dmesg:\n%s", guest_error) + assert False, "The interface does not exist in the guest, check dmesg output above for errors" + + logging.info('Configure interfaces') + config = '\n'.join([f'iface {interface_name}{i} inet dhcp\n' + f'auto {interface_name}{i}' + for i in range(existing_vifs, vif_limit)]) + vm.ssh([f'echo "{config}" >> /etc/network/interfaces']) + + logging.info('Install iperf3 on VM and host') + if vm.ssh_with_result(['apt install iperf3 --assume-yes']).returncode != 0: + assert False, "Failed to install iperf3 on the VM" + host.yum_install(['iperf3']) + + logging.info('Reconfigure VM networking') + if vm.ssh_with_result(['systemctl restart networking']).returncode != 0: + assert False, "Failed to configure networking" + + # Test iperf on all interfaces in parallel + # Clean up on exceptions + try: + logging.info('Create separate iperf servers on the host') + with tempfile.NamedTemporaryFile('w') as host_script: + iperf_configs = [f'iperf3 -s -p {5100+i} &' + for i in range(0, vif_limit)] + host_script.write('\n'.join(iperf_configs)) + host_script.flush() + host.scp(host_script.name, host_script.name) + host.ssh([f'nohup bash -c "bash {host_script.name}" < /dev/null &>/dev/null &'], + background=True) + + logging.info('Start multiple iperfs on separate interfaces on the VM') + with tempfile.NamedTemporaryFile('w') as vm_script: + iperf_configs = [f'iperf3 --no-delay -c {host.hostname_or_ip} ' + f'-p {5100+i} --bind-dev {interface_name}{i} ' + f'--interval 0 --parallel 1 --time 30 &' + for i in range(0, vif_limit)] + vm_script.write('\n'.join(iperf_configs)) + vm_script.flush() + vm.scp(vm_script.name, vm_script.name) + stdout = vm.ssh([f'bash {vm_script.name}']) + + # TODO: log this into some performance time series DB + logging.info(stdout) + finally: + vm.ssh(['pkill iperf3 || true']) + host.ssh('killall iperf3')