Skip to content

Commit d726751

Browse files
committed
testscript: use self.assert*()-helpers more (raise exception)
Better to get more context when things fail. On-behalf-of: SAP philipp.schuster@sap.com Signed-off-by: Philipp Schuster <philipp.schuster@cyberus-technology.de>
1 parent 14911ce commit d726751

File tree

1 file changed

+24
-20
lines changed

1 file changed

+24
-20
lines changed

tests/testscript.py

Lines changed: 24 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -640,18 +640,18 @@ def test_numa_topology(self):
640640

641641
# Check that there are 2 NUMA nodes
642642
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node0")
643-
assert status == 0
643+
self.assertEqual(status, 0)
644644

645645
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node1")
646-
assert status == 0
646+
self.assertEqual(status, 0)
647647

648648
# Check that there are 2 CPU sockets and 2 threads per core
649649
status, out = ssh(controllerVM, "lscpu | grep Socket | awk '{print $2}'")
650-
assert status == 0, "cmd failed"
650+
self.assertEqual(status, 0)
651651
assert int(out) == 2, "Expect to find 2 sockets"
652652

653653
status, out = ssh(controllerVM, "lscpu | grep Thread\\( | awk '{print $4}'")
654-
assert status == 0, "cmd failed"
654+
self.assertEqual(status, 0)
655655
assert int(out) == 2, "Expect to find 2 threads per core"
656656

657657
def test_cirros_image(self):
@@ -711,10 +711,10 @@ def test_numa_hugepages(self):
711711

712712
# Check that there are 2 NUMA nodes
713713
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node0")
714-
assert status == 0
714+
self.assertEqual(status, 0)
715715

716716
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node1")
717-
assert status == 0
717+
self.assertEqual(status, 0)
718718

719719
# Check that we really use hugepages from the hugepage pool
720720
status, out = controllerVM.execute(
@@ -735,10 +735,10 @@ def test_numa_hugepages_prefault(self):
735735

736736
# Check that there are 2 NUMA nodes
737737
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node0")
738-
assert status == 0
738+
self.assertEqual(status, 0)
739739

740740
status, _ = ssh(controllerVM, "ls /sys/devices/system/node/node1")
741-
assert status == 0
741+
self.assertEqual(status, 0)
742742

743743
# Check that all huge pages are in use
744744
status, out = controllerVM.execute(
@@ -793,7 +793,7 @@ def test_managedsave(self):
793793
wait_for_ssh(controllerVM)
794794

795795
status, _ = ssh(controllerVM, "ls /tmp/foo")
796-
assert status == 0
796+
self.assertEqual(status, 0)
797797

798798
def test_shutdown(self):
799799
"""
@@ -1022,7 +1022,7 @@ def test_live_migration_virsh_non_blocking(self):
10221022

10231023
# Stress the CH VM in order to make the migration take longer
10241024
status, _ = ssh(controllerVM, "screen -dmS stress stress -m 4 --vm-bytes 400M")
1025-
assert status == 0
1025+
self.assertEqual(status, 0)
10261026

10271027
# Do migration in a screen session and detach
10281028
controllerVM.succeed(
@@ -1118,7 +1118,7 @@ def test_disk_resize_raw(self):
11181118
)
11191119
disk_size_host = controllerVM.succeed("ls /tmp/disk.img -l | awk '{print $5}'")
11201120

1121-
assert status == 0
1121+
self.assertEqual(status, 0)
11221122
assert int(disk_size_guest) == disk_size_bytes_100M
11231123
assert int(disk_size_host) == disk_size_bytes_100M
11241124

@@ -1132,7 +1132,7 @@ def test_disk_resize_raw(self):
11321132
)
11331133
disk_size_host = controllerVM.succeed("ls /tmp/disk.img -l | awk '{print $5}'")
11341134

1135-
assert status == 0
1135+
self.assertEqual(status, 0)
11361136
assert int(disk_size_guest) == disk_size_bytes_10M
11371137
assert int(disk_size_host) == disk_size_bytes_10M
11381138

@@ -1146,7 +1146,7 @@ def test_disk_resize_raw(self):
11461146
)
11471147
disk_size_host = controllerVM.succeed("ls /tmp/disk.img -l | awk '{print $5}'")
11481148

1149-
assert status == 0
1149+
self.assertEqual(status, 0)
11501150
assert int(disk_size_guest) == disk_size_bytes_200M
11511151
assert int(disk_size_host) == disk_size_bytes_200M
11521152

@@ -1160,7 +1160,7 @@ def test_disk_resize_raw(self):
11601160
)
11611161
disk_size_host = controllerVM.succeed("ls /tmp/disk.img -l | awk '{print $5}'")
11621162

1163-
assert status == 0
1163+
self.assertEqual(status, 0)
11641164
assert int(disk_size_guest) == disk_size_bytes_100M
11651165
assert int(disk_size_host) == disk_size_bytes_100M
11661166

@@ -1173,7 +1173,7 @@ def test_disk_resize_raw(self):
11731173
)
11741174
disk_size_host = controllerVM.succeed("ls /tmp/disk.img -l | awk '{print $5}'")
11751175

1176-
assert status == 0
1176+
self.assertEqual(status, 0)
11771177
assert int(disk_size_guest) == disk_size_bytes_100M
11781178
assert int(disk_size_host) == disk_size_bytes_100M
11791179

@@ -1234,7 +1234,7 @@ def test_disk_resize_qcow2(self):
12341234
controllerVM, "lsblk --raw -b /dev/vdb | awk '{print $4}' | tail -n1"
12351235
)
12361236

1237-
assert status == 0
1237+
self.assertEqual(status, 0)
12381238
assert int(disk_size_guest) == disk_size_bytes_100M
12391239

12401240
controllerVM.fail(
@@ -1335,7 +1335,7 @@ def test_live_migration_kill_chv_on_sender_side(self):
13351335

13361336
# Stress the CH VM in order to make the migration take longer
13371337
status, _ = ssh(controllerVM, "screen -dmS stress stress -m 4 --vm-bytes 400M")
1338-
assert status == 0
1338+
self.assertEqual(status, 0)
13391339

13401340
# Do migration in a screen session and detach
13411341
controllerVM.succeed(
@@ -1373,7 +1373,7 @@ def test_live_migration_kill_chv_on_receiver_side(self):
13731373

13741374
# Stress the CH VM in order to make the migration take longer
13751375
status, _ = ssh(controllerVM, "screen -dmS stress stress -m 4 --vm-bytes 400M")
1376-
assert status == 0
1376+
self.assertEqual(status, 0)
13771377

13781378
# Do migration in a screen session and detach
13791379
controllerVM.succeed(
@@ -2024,7 +2024,8 @@ def number_of_devices(machine: Machine, filter: str = "") -> int:
20242024
else:
20252025
cmd = f"lspci -n | grep {filter} | wc -l"
20262026
status, out = ssh(machine, cmd)
2027-
assert status == 0
2027+
if status != 0:
2028+
raise RuntimeError("failed to query the number of PCI devices from the guest")
20282029
return int(out)
20292030

20302031

@@ -2129,7 +2130,10 @@ def pci_devices_by_bdf(machine: Machine):
21292130
machine,
21302131
"lspci -n | awk '/^[0-9a-f]{2}:[0-9a-f]{2}\\.[0-9]/{bdf=$1}{class=$3} {print bdf \",\" class}'",
21312132
)
2132-
assert status == 0
2133+
if status != 0:
2134+
raise RuntimeError(
2135+
"failed to get PCI devices grouped by their BDF from the guest"
2136+
)
21332137
out = {}
21342138
for line in lines.splitlines():
21352139
bdf, device_class = line.split(",")

0 commit comments

Comments
 (0)