Skip to content

Commit 21fd773

Browse files
authored
Merge pull request #270 from rushikeshjadhav/feat-storage-linstor
feat(storage/linstor): Enhance multi-disk support and provisioning flexibility in Linstor SR tests
2 parents 68dc796 + 7a83065 commit 21fd773

File tree

3 files changed

+107
-34
lines changed

3 files changed

+107
-34
lines changed

conftest.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -329,6 +329,40 @@ def sr_disk_for_all_hosts(pytestconfig, request, host):
329329
logging.info(f">> Disk or block device {disk} is present and free on all pool members")
330330
yield candidates[0]
331331

332+
@pytest.fixture(scope='session')
333+
def sr_disks_for_all_hosts(pytestconfig, request, host):
334+
disks = pytestconfig.getoption("sr_disk")
335+
assert len(disks) > 0, "This test requires at least one --sr-disk parameter"
336+
# Fetch available disks on the master host
337+
master_disks = host.available_disks()
338+
assert len(master_disks) > 0, "a free disk device is required on the master host"
339+
340+
if "auto" in disks:
341+
candidates = list(master_disks)
342+
else:
343+
# Validate that all specified disks exist on the master host
344+
for disk in disks:
345+
assert disk in master_disks, \
346+
f"Disk or block device {disk} is either not present or already used on the master host"
347+
candidates = list(disks)
348+
349+
# Check if all disks are available on all hosts in the pool
350+
for h in host.pool.hosts[1:]:
351+
other_disks = h.available_disks()
352+
candidates = [d for d in candidates if d in other_disks]
353+
354+
if "auto" in disks:
355+
# Automatically select disks if "auto" is passed
356+
assert len(candidates) > 0, \
357+
f"Free disk devices are required on all pool members. Pool master has: {' '.join(master_disks)}."
358+
logging.info(">> Using free disk device(s) on all pool hosts: %s.", candidates)
359+
else:
360+
# Ensure specified disks are free on all hosts
361+
assert len(candidates) == len(disks), \
362+
f"Some specified disks ({', '.join(disks)}) are not free or available on all hosts."
363+
logging.info(">> Disk(s) %s are present and free on all pool members", candidates)
364+
yield candidates
365+
332366
@pytest.fixture(scope='module')
333367
def vm_ref(request):
334368
ref = request.param

tests/storage/linstor/conftest.py

Lines changed: 51 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -12,57 +12,86 @@
1212
LINSTOR_PACKAGE = 'xcp-ng-linstor'
1313

1414
@pytest.fixture(scope='package')
15-
def lvm_disk(host, sr_disk_for_all_hosts):
16-
device = '/dev/' + sr_disk_for_all_hosts
15+
def lvm_disks(host, sr_disks_for_all_hosts, provisioning_type):
16+
devices = [f"/dev/{disk}" for disk in sr_disks_for_all_hosts]
1717
hosts = host.pool.hosts
1818

1919
for host in hosts:
20-
try:
21-
host.ssh(['pvcreate', '-ff', '-y', device])
22-
except commands.SSHCommandFailed as e:
23-
if e.stdout.endswith('Mounted filesystem?'):
24-
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
20+
for device in devices:
21+
try:
2522
host.ssh(['pvcreate', '-ff', '-y', device])
26-
elif e.stdout.endswith('excluded by a filter.'):
27-
host.ssh(['wipefs', '-a', device])
28-
host.ssh(['pvcreate', '-ff', '-y', device])
29-
else:
30-
raise e
23+
except commands.SSHCommandFailed as e:
24+
if e.stdout.endswith('Mounted filesystem?'):
25+
host.ssh(['vgremove', '-f', GROUP_NAME, '-y'])
26+
host.ssh(['pvcreate', '-ff', '-y', device])
27+
elif e.stdout.endswith('excluded by a filter.'):
28+
host.ssh(['wipefs', '-a', device])
29+
host.ssh(['pvcreate', '-ff', '-y', device])
30+
else:
31+
raise e
3132

32-
host.ssh(['vgcreate', GROUP_NAME, device])
33-
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
33+
host.ssh(['vgcreate', GROUP_NAME] + devices)
34+
if provisioning_type == 'thin':
35+
host.ssh(['lvcreate', '-l', '100%FREE', '-T', STORAGE_POOL_NAME])
3436

35-
yield device
37+
yield devices
3638

3739
for host in hosts:
3840
host.ssh(['vgremove', '-f', GROUP_NAME])
39-
host.ssh(['pvremove', device])
41+
for device in devices:
42+
host.ssh(['pvremove', device])
43+
44+
@pytest.fixture(scope="package")
45+
def storage_pool_name(provisioning_type):
46+
return GROUP_NAME if provisioning_type == "thick" else STORAGE_POOL_NAME
47+
48+
@pytest.fixture(params=["thin", "thick"], scope="session")
49+
def provisioning_type(request):
50+
return request.param
4051

4152
@pytest.fixture(scope='package')
42-
def pool_with_linstor(hostA2, lvm_disk, pool_with_saved_yum_state):
53+
def pool_with_linstor(hostA2, lvm_disks, pool_with_saved_yum_state):
54+
import concurrent.futures
4355
pool = pool_with_saved_yum_state
44-
for host in pool.hosts:
56+
57+
def check_linstor_installed(host):
4558
if host.is_package_installed(LINSTOR_PACKAGE):
4659
raise Exception(
4760
f'{LINSTOR_PACKAGE} is already installed on host {host}. This should not be the case.'
4861
)
4962

50-
for host in pool.hosts:
63+
with concurrent.futures.ThreadPoolExecutor() as executor:
64+
executor.map(check_linstor_installed, pool.hosts)
65+
66+
def install_linstor(host):
67+
logging.info(f"Installing {LINSTOR_PACKAGE} on host {host}...")
5168
host.yum_install([LINSTOR_RELEASE_PACKAGE])
5269
host.yum_install([LINSTOR_PACKAGE], enablerepo="xcp-ng-linstor-testing")
5370
# Needed because the linstor driver is not in the xapi sm-plugins list
5471
# before installing the LINSTOR packages.
5572
host.ssh(["systemctl", "restart", "multipathd"])
5673
host.restart_toolstack(verify=True)
5774

75+
with concurrent.futures.ThreadPoolExecutor() as executor:
76+
executor.map(install_linstor, pool.hosts)
77+
5878
yield pool
5979

80+
# Need to remove this package as we have separate run of `test_create_sr_without_linstor`
81+
# for `thin` and `thick` `provisioning_type`.
82+
def remove_linstor(host):
83+
logging.info(f"Cleaning up python-linstor from host {host}...")
84+
host.yum_remove(["python-linstor"])
85+
86+
with concurrent.futures.ThreadPoolExecutor() as executor:
87+
executor.map(remove_linstor, pool.hosts)
88+
6089
@pytest.fixture(scope='package')
61-
def linstor_sr(pool_with_linstor):
90+
def linstor_sr(pool_with_linstor, provisioning_type, storage_pool_name):
6291
sr = pool_with_linstor.master.sr_create('linstor', 'LINSTOR-SR-test', {
63-
'group-name': STORAGE_POOL_NAME,
92+
'group-name': storage_pool_name,
6493
'redundancy': str(min(len(pool_with_linstor.hosts), 3)),
65-
'provisioning': 'thin'
94+
'provisioning': provisioning_type
6695
}, shared=True)
6796
yield sr
6897
sr.destroy()

tests/storage/linstor/test_linstor_sr.py

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2,13 +2,13 @@
22
import pytest
33
import time
44

5-
from .conftest import STORAGE_POOL_NAME, LINSTOR_PACKAGE
5+
from .conftest import LINSTOR_PACKAGE
66
from lib.commands import SSHCommandFailed
77
from lib.common import wait_for, vm_image
88
from tests.storage import vdi_is_open
99

1010
# Requirements:
11-
# - one XCP-ng host >= 8.2 with an additional unused disk for the SR
11+
# - two or more XCP-ng hosts >= 8.2 with additional unused disk(s) for the SR
1212
# - access to XCP-ng RPM repository from the host
1313

1414
class TestLinstorSRCreateDestroy:
@@ -18,15 +18,15 @@ class TestLinstorSRCreateDestroy:
1818
and VM import.
1919
"""
2020

21-
def test_create_sr_without_linstor(self, host, lvm_disk):
21+
def test_create_sr_without_linstor(self, host, lvm_disks, provisioning_type, storage_pool_name):
2222
# This test must be the first in the series in this module
2323
assert not host.is_package_installed('python-linstor'), \
2424
"linstor must not be installed on the host at the beginning of the tests"
2525
try:
2626
sr = host.sr_create('linstor', 'LINSTOR-SR-test', {
27-
'group-name': STORAGE_POOL_NAME,
27+
'group-name': storage_pool_name,
2828
'redundancy': '1',
29-
'provisioning': 'thin'
29+
'provisioning': provisioning_type
3030
}, shared=True)
3131
try:
3232
sr.destroy()
@@ -36,13 +36,13 @@ def test_create_sr_without_linstor(self, host, lvm_disk):
3636
except SSHCommandFailed as e:
3737
logging.info("SR creation failed, as expected: {}".format(e))
3838

39-
def test_create_and_destroy_sr(self, pool_with_linstor):
39+
def test_create_and_destroy_sr(self, pool_with_linstor, provisioning_type, storage_pool_name):
4040
# Create and destroy tested in the same test to leave the host as unchanged as possible
4141
master = pool_with_linstor.master
4242
sr = master.sr_create('linstor', 'LINSTOR-SR-test', {
43-
'group-name': STORAGE_POOL_NAME,
43+
'group-name': storage_pool_name,
4444
'redundancy': '1',
45-
'provisioning': 'thin'
45+
'provisioning': provisioning_type
4646
}, shared=True)
4747
# import a VM in order to detect vm import issues here rather than in the vm_on_linstor_sr fixture used in
4848
# the next tests, because errors in fixtures break teardown
@@ -53,8 +53,16 @@ def test_create_and_destroy_sr(self, pool_with_linstor):
5353
@pytest.mark.usefixtures("linstor_sr")
5454
class TestLinstorSR:
5555
@pytest.mark.quicktest
56-
def test_quicktest(self, linstor_sr):
57-
linstor_sr.run_quicktest()
56+
def test_quicktest(self, linstor_sr, provisioning_type):
57+
try:
58+
linstor_sr.run_quicktest()
59+
except Exception:
60+
if provisioning_type == "thick":
61+
pytest.xfail(reason="Known failure for thick provisioning")
62+
raise # Let thin failures fail test
63+
else:
64+
if provisioning_type == "thick":
65+
pytest.fail("Expected failure for thick provisioning did not occur (XPASS)")
5866

5967
def test_vdi_is_not_open(self, vdi_on_linstor_sr):
6068
assert not vdi_is_open(vdi_on_linstor_sr)
@@ -147,7 +155,7 @@ def _ensure_resource_remain_diskless(host, controller_option, volume_name, diskl
147155

148156
class TestLinstorDisklessResource:
149157
@pytest.mark.small_vm
150-
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
158+
def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr, storage_pool_name):
151159
vm = vm_on_linstor_sr
152160
vdi_uuids = vm.vdi_uuids(sr_uuid=linstor_sr.uuid)
153161
vdi_uuid = vdi_uuids[0]
@@ -157,10 +165,12 @@ def test_diskless_kept(self, host, linstor_sr, vm_on_linstor_sr):
157165
for member in host.pool.hosts:
158166
controller_option += f"{member.hostname_or_ip},"
159167

168+
sr_group_name = "xcp-sr-" + storage_pool_name.replace("/", "_")
169+
160170
# Get volume name from VDI uuid
161171
# "xcp/volume/{vdi_uuid}/volume-name": "{volume_name}"
162172
output = host.ssh([
163-
"linstor-kv-tool", "--dump-volumes", "-g", "xcp-sr-linstor_group_thin_device",
173+
"linstor-kv-tool", "--dump-volumes", "-g", sr_group_name,
164174
"|", "grep", "volume-name", "|", "grep", vdi_uuid
165175
])
166176
volume_name = output.split(': ')[1].split('"')[1]

0 commit comments

Comments
 (0)