|
1 | 1 | import itertools
|
| 2 | +import git |
2 | 3 | import logging
|
| 4 | +import os |
3 | 5 | import pytest
|
4 | 6 | import tempfile
|
5 | 7 |
|
|
8 | 10 |
|
9 | 11 | import lib.config as global_config
|
10 | 12 |
|
| 13 | +from lib import pxe |
| 14 | +from lib.common import callable_marker, shortened_nodeid, prefix_object_name |
11 | 15 | from lib.common import wait_for, vm_image, is_uuid
|
12 | 16 | from lib.common import setup_formatted_and_mounted_disk, teardown_formatted_and_mounted_disk
|
13 | 17 | from lib.netutil import is_ipv6
|
14 | 18 | from lib.pool import Pool
|
15 |
| -from lib.vm import VM |
| 19 | +from lib.sr import SR |
| 20 | +from lib.vm import VM, vm_cache_key_from_def |
16 | 21 | from lib.xo import xo_cli
|
17 | 22 |
|
18 | 23 | # Import package-scoped fixtures. Although we need to define them in a separate file so that we can
|
|
30 | 35 | # pytest hooks
|
31 | 36 |
|
32 | 37 | def pytest_addoption(parser):
|
| 38 | + parser.addoption( |
| 39 | + "--nest", |
| 40 | + action="store", |
| 41 | + default=None, |
| 42 | + help="XCP-ng or XS master of pool to use for nesting hosts under test", |
| 43 | + ) |
33 | 44 | parser.addoption(
|
34 | 45 | "--hosts",
|
35 | 46 | action="append",
|
@@ -137,22 +148,73 @@ def pytest_runtest_makereport(item, call):
|
137 | 148 |
|
138 | 149 | # fixtures
|
139 | 150 |
|
140 |
| -def setup_host(hostname_or_ip): |
141 |
| - pool = Pool(hostname_or_ip) |
142 |
| - h = pool.master |
143 |
| - return h |
144 |
| - |
145 | 151 | @pytest.fixture(scope='session')
|
146 | 152 | def hosts(pytestconfig):
|
| 153 | + nested_list = [] |
| 154 | + |
| 155 | + def setup_host(hostname_or_ip, *, config=None): |
| 156 | + host_vm = None |
| 157 | + if hostname_or_ip.startswith("cache://"): |
| 158 | + if config is None: |
| 159 | + raise RuntimeError("setup_host: a cache:// host requires --nest") |
| 160 | + nest_hostname = config.getoption("nest") |
| 161 | + if not nest_hostname: |
| 162 | + pytest.fail("--hosts=cache://... requires --nest parameter") |
| 163 | + nest = Pool(nest_hostname).master |
| 164 | + |
| 165 | + protocol, rest = hostname_or_ip.split(":", 1) |
| 166 | + host_vm = nest.import_vm(f"clone:{rest}", nest.main_sr_uuid(), |
| 167 | + use_cache=True) |
| 168 | + nested_list.append(host_vm) |
| 169 | + |
| 170 | + vif = host_vm.vifs()[0] |
| 171 | + mac_address = vif.param_get('MAC') |
| 172 | + logging.info("Nested host has MAC %s", mac_address) |
| 173 | + |
| 174 | + host_vm.start() |
| 175 | + wait_for(host_vm.is_running, "Wait for nested host VM running") |
| 176 | + |
| 177 | + # catch host-vm IP address |
| 178 | + wait_for(lambda: pxe.arp_addresses_for(mac_address), |
| 179 | + "Wait for DHCP server to see nested host in ARP tables", |
| 180 | + timeout_secs=10 * 60) |
| 181 | + ips = pxe.arp_addresses_for(mac_address) |
| 182 | + logging.info("Nested host has IPs %s", ips) |
| 183 | + assert len(ips) == 1 |
| 184 | + host_vm.ip = ips[0] |
| 185 | + |
| 186 | + wait_for(lambda: not os.system(f"nc -zw5 {host_vm.ip} 22"), |
| 187 | + "Wait for ssh up on nested host", retry_delay_secs=5) |
| 188 | + |
| 189 | + hostname_or_ip = host_vm.ip |
| 190 | + |
| 191 | + pool = Pool(hostname_or_ip) |
| 192 | + h = pool.master |
| 193 | + return h |
| 194 | + |
| 195 | + def cleanup_hosts(): |
| 196 | + for vm in nested_list: |
| 197 | + logging.info("Destroying nested host VM %s", vm.uuid) |
| 198 | + vm.destroy(verify=True) |
| 199 | + |
147 | 200 | # a list of master hosts, each from a different pool
|
148 | 201 | hosts_args = pytestconfig.getoption("hosts")
|
149 | 202 | hosts_split = [hostlist.split(',') for hostlist in hosts_args]
|
150 | 203 | hostname_list = list(itertools.chain(*hosts_split))
|
151 |
| - host_list = [setup_host(hostname_or_ip) for hostname_or_ip in hostname_list] |
| 204 | + |
| 205 | + try: |
| 206 | + host_list = [setup_host(hostname_or_ip, config=pytestconfig) |
| 207 | + for hostname_or_ip in hostname_list] |
| 208 | + except Exception: |
| 209 | + cleanup_hosts() |
| 210 | + raise |
| 211 | + |
152 | 212 | if not host_list:
|
153 | 213 | pytest.fail("This test requires at least one --hosts parameter")
|
154 | 214 | yield host_list
|
155 | 215 |
|
| 216 | + cleanup_hosts() |
| 217 | + |
156 | 218 | @pytest.fixture(scope='session')
|
157 | 219 | def registered_xo_cli():
|
158 | 220 | # The fixture is not responsible for establishing the connection.
|
@@ -410,6 +472,172 @@ def imported_vm(host, vm_ref):
|
410 | 472 | logging.info("<< Destroy VM")
|
411 | 473 | vm.destroy(verify=True)
|
412 | 474 |
|
| 475 | +@pytest.fixture(scope="session") |
| 476 | +def tests_git_revision(): |
| 477 | + """ |
| 478 | + Get the git revision string for this tests repo. |
| 479 | +
|
| 480 | + Use of this fixture means impacted tests cannot run unless all |
| 481 | + modifications are commited. |
| 482 | + """ |
| 483 | + test_repo = git.Repo(".") |
| 484 | + assert not test_repo.is_dirty(), "test repo must not be dirty" |
| 485 | + yield test_repo.head.commit.hexsha |
| 486 | + |
| 487 | +@pytest.fixture(scope="function") |
| 488 | +def create_vms(request, host, tests_git_revision): |
| 489 | + """ |
| 490 | + Returns list of VM objects created from `vm_definitions` marker. |
| 491 | +
|
| 492 | + `vm_definitions` marker test author to specify one or more VMs, by |
| 493 | + giving for each VM one `dict`, or a callable taking fixtures as |
| 494 | + arguments and returning such a `dict`. |
| 495 | +
|
| 496 | + Mandatory keys: |
| 497 | + - `name`: name of the VM to create (str) |
| 498 | + - `template`: name (or UUID) of template to use (str) |
| 499 | +
|
| 500 | + Optional keys: see example below |
| 501 | +
|
| 502 | + Example: |
| 503 | + ------- |
| 504 | + > @pytest.mark.vm_definitions( |
| 505 | + > dict(name="vm1", template="Other install media"), |
| 506 | + > dict(name="vm2", |
| 507 | + > template="CentOS 7", |
| 508 | + > params=( |
| 509 | + > dict(param_name="memory-static-max", value="4GiB"), |
| 510 | + > dict(param_name="HVM-boot-params", key="order", value="dcn"), |
| 511 | + > ), |
| 512 | + > vdis=[dict(name="vm 2 system disk", |
| 513 | + > size="100GiB", |
| 514 | + > device="xvda", |
| 515 | + > userdevice="0", |
| 516 | + > )], |
| 517 | + > cd_vbd=dict(device="xvdd", userdevice="3"), |
| 518 | + > vifs=(dict(index=0, network_name=NETWORKS["MGMT"]), |
| 519 | + > dict(index=1, network_uuid=NETWORKS["MYNET_UUID"]), |
| 520 | + > ), |
| 521 | + > )) |
| 522 | + > def test_foo(create_vms): |
| 523 | + > ... |
| 524 | +
|
| 525 | + Example: |
| 526 | + ------- |
| 527 | + > @pytest.mark.dependency(depends=["test_foo"]) |
| 528 | + > @pytest.mark.vm_definitions(dict(name="vm1", image_test="test_foo", image_vm="vm2")) |
| 529 | + > def test_bar(create_vms): |
| 530 | + > ... |
| 531 | +
|
| 532 | + """ |
| 533 | + marker = request.node.get_closest_marker("vm_definitions") |
| 534 | + if marker is None: |
| 535 | + raise Exception("No vm_definitions marker specified.") |
| 536 | + |
| 537 | + vm_defs = [] |
| 538 | + for vm_def in marker.args: |
| 539 | + vm_def = callable_marker(vm_def, request) |
| 540 | + assert "name" in vm_def |
| 541 | + assert "template" in vm_def or "image_test" in vm_def |
| 542 | + if "template" in vm_def: |
| 543 | + assert "image_test" not in vm_def |
| 544 | + # FIXME should check optional vdis contents |
| 545 | + # FIXME should check for extra args |
| 546 | + vm_defs.append(vm_def) |
| 547 | + |
| 548 | + try: |
| 549 | + vms = [] |
| 550 | + vdis = [] |
| 551 | + vbds = [] |
| 552 | + for vm_def in vm_defs: |
| 553 | + if "template" in vm_def: |
| 554 | + _create_vm(request, vm_def, host, vms, vdis, vbds) |
| 555 | + elif "image_test" in vm_def: |
| 556 | + _vm_from_cache(request, vm_def, host, vms, tests_git_revision) |
| 557 | + yield vms |
| 558 | + |
| 559 | + # request.node is an "item" because this fixture has "function" scope |
| 560 | + report = request.node.stash.get(PHASE_REPORT_KEY, None) |
| 561 | + if report is None: |
| 562 | + # user interruption during setup |
| 563 | + logging.warning("test setup result not available: not exporting VMs") |
| 564 | + elif report["setup"].failed: |
| 565 | + logging.warning("setting up a test failed or skipped: not exporting VMs") |
| 566 | + elif ("call" not in report) or report["call"].failed: |
| 567 | + logging.warning("executing test failed or skipped: not exporting VMs") |
| 568 | + else: |
| 569 | + # record this state |
| 570 | + for vm_def, vm in zip(vm_defs, vms): |
| 571 | + nodeid = shortened_nodeid(request.node.nodeid) |
| 572 | + vm.save_to_cache(f"{nodeid}-{vm_def['name']}-{tests_git_revision}") |
| 573 | + |
| 574 | + except Exception: |
| 575 | + logging.error("exception caught...") |
| 576 | + raise |
| 577 | + |
| 578 | + finally: |
| 579 | + for vbd in vbds: |
| 580 | + logging.info("<< Destroy VBD %s", vbd.uuid) |
| 581 | + vbd.destroy() |
| 582 | + for vdi in vdis: |
| 583 | + logging.info("<< Destroy VDI %s", vdi.uuid) |
| 584 | + vdi.destroy() |
| 585 | + for vm in vms: |
| 586 | + logging.info("<< Destroy VM %s", vm.uuid) |
| 587 | + vm.destroy(verify=True) |
| 588 | + |
| 589 | +def _vm_name(request, vm_def): |
| 590 | + return f"{vm_def['name']} in {request.node.nodeid}" |
| 591 | + |
| 592 | +def _create_vm(request, vm_def, host, vms, vdis, vbds): |
| 593 | + vm_name = _vm_name(request, vm_def) |
| 594 | + vm_template = vm_def["template"] |
| 595 | + |
| 596 | + logging.info("Installing VM %r from template %r", vm_name, vm_template) |
| 597 | + |
| 598 | + vm = host.vm_from_template(vm_name, vm_template) |
| 599 | + |
| 600 | + # VM is now created, make sure we clean it up on any subsequent failure |
| 601 | + vms.append(vm) |
| 602 | + |
| 603 | + if "vdis" in vm_def: |
| 604 | + for vdi_def in vm_def["vdis"]: |
| 605 | + sr = SR(host.main_sr_uuid(), host.pool) |
| 606 | + vdi = sr.create_vdi(vdi_def["name"], vdi_def["size"]) |
| 607 | + vdis.append(vdi) |
| 608 | + # connect to VM |
| 609 | + vbd = vm.create_vbd(vdi_def["device"], vdi.uuid) |
| 610 | + vbds.append(vbd) |
| 611 | + vbd.param_set(param_name="userdevice", value=vdi_def["userdevice"]) |
| 612 | + |
| 613 | + if "cd_vbd" in vm_def: |
| 614 | + vm.create_cd_vbd(**vm_def["cd_vbd"]) |
| 615 | + |
| 616 | + if "vifs" in vm_def: |
| 617 | + for vif_def in vm_def["vifs"]: |
| 618 | + vm.create_vif(vif_def["index"], |
| 619 | + network_uuid=vif_def.get("network_uuid", None), |
| 620 | + network_name=vif_def.get("network_name", None)) |
| 621 | + |
| 622 | + if "params" in vm_def: |
| 623 | + for param_def in vm_def["params"]: |
| 624 | + logging.info("Setting param %s", param_def) |
| 625 | + vm.param_set(**param_def) |
| 626 | + |
| 627 | +def _vm_from_cache(request, vm_def, host, vms, tests_hexsha): |
| 628 | + base_vm = host.cached_vm(vm_cache_key_from_def(vm_def, request.node.nodeid, tests_hexsha), |
| 629 | + sr_uuid=host.main_sr_uuid()) |
| 630 | + if base_vm is None: |
| 631 | + raise RuntimeError("No cache found") |
| 632 | + |
| 633 | + # Clone the VM before running tests, so that the original VM remains untouched |
| 634 | + logging.info("Cloning VM from cache") |
| 635 | + vm = base_vm.clone(name=prefix_object_name(_vm_name(request, vm_def))) |
| 636 | + # Remove the description, which may contain a cache identifier |
| 637 | + vm.param_set('name-description', "") |
| 638 | + |
| 639 | + vms.append(vm) |
| 640 | + |
413 | 641 | @pytest.fixture(scope="module")
|
414 | 642 | def started_vm(imported_vm):
|
415 | 643 | vm = imported_vm
|
|
0 commit comments