Skip to content

Commit ce8668a

Browse files
authored
Merge branch 'main' into move/675-node-to-node-encryption
2 parents e3891f6 + 08b1eeb commit ce8668a

File tree

1 file changed

+118
-46
lines changed

1 file changed

+118
-46
lines changed

Tests/iaas/volume-backup/volume-backup-tester.py

Lines changed: 118 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717
import os
1818
import time
1919
import typing
20+
import logging
2021

2122
import openstack
2223

@@ -29,6 +30,21 @@
2930
WAIT_TIMEOUT = 60
3031

3132

33+
class ConformanceTestException(Exception):
34+
pass
35+
36+
37+
def ensure(condition: bool, error_message: str):
38+
"""
39+
Custom replacement for the `assert` statement that is not removed by the
40+
-O optimization parameter.
41+
If the condition does not evaluate to `True`, a ConformanceTestException
42+
will be raised containing the specified error_message string.
43+
"""
44+
if not condition:
45+
raise ConformanceTestException(error_message)
46+
47+
3248
def connect(cloud_name: str, password: typing.Optional[str] = None
3349
) -> openstack.connection.Connection:
3450
"""Create a connection to an OpenStack cloud
@@ -64,133 +80,164 @@ def test_backup(conn: openstack.connection.Connection,
6480
"""
6581

6682
# CREATE VOLUME
67-
print("Creating volume ...")
83+
volume_name = f"{prefix}volume"
84+
logging.info(f"Creating volume '{volume_name}' ...")
6885
volume = conn.block_storage.create_volume(
69-
name=f"{prefix}volume",
86+
name=volume_name,
7087
size=1
7188
)
72-
assert volume is not None, (
73-
"Initial volume creation failed"
89+
ensure(
90+
volume is not None,
91+
f"Creation of initial volume '{volume_name}' failed"
7492
)
7593
volume_id = volume.id
76-
assert conn.block_storage.get_volume(volume_id) is not None, (
77-
"Retrieving initial volume by ID failed"
94+
ensure(
95+
conn.block_storage.get_volume(volume_id) is not None,
96+
f"Retrieving initial volume by ID '{volume_id}' failed"
7897
)
7998

80-
print(
99+
logging.info(
81100
f"↳ waiting for volume with ID '{volume_id}' to reach status "
82101
f"'available' ..."
83102
)
84103
seconds_waited = 0
85104
while conn.block_storage.get_volume(volume_id).status != "available":
86105
time.sleep(1.0)
87106
seconds_waited += 1
88-
assert seconds_waited < timeout, (
107+
ensure(
108+
seconds_waited < timeout,
89109
f"Timeout reached while waiting for volume to reach status "
90110
f"'available' (volume id: {volume_id}) after {seconds_waited} "
91111
f"seconds"
92112
)
93-
print("Create empty volume: PASS")
113+
logging.info("Create empty volume: PASS")
94114

95115
# CREATE BACKUP
96-
print("Creating backup from volume ...")
116+
logging.info("Creating backup from volume ...")
97117
backup = conn.block_storage.create_backup(
98118
name=f"{prefix}volume-backup",
99119
volume_id=volume_id
100120
)
101-
assert backup is not None, (
121+
ensure(
122+
backup is not None,
102123
"Backup creation failed"
103124
)
104125
backup_id = backup.id
105-
assert conn.block_storage.get_backup(backup_id) is not None, (
126+
ensure(
127+
conn.block_storage.get_backup(backup_id) is not None,
106128
"Retrieving backup by ID failed"
107129
)
108130

109-
print(f"↳ waiting for backup '{backup_id}' to become available ...")
131+
logging.info(f"↳ waiting for backup '{backup_id}' to become available ...")
110132
seconds_waited = 0
111133
while conn.block_storage.get_backup(backup_id).status != "available":
112134
time.sleep(1.0)
113135
seconds_waited += 1
114-
assert seconds_waited < timeout, (
136+
ensure(
137+
seconds_waited < timeout,
115138
f"Timeout reached while waiting for backup to reach status "
116139
f"'available' (backup id: {backup_id}) after {seconds_waited} "
117140
f"seconds"
118141
)
119-
print("Create backup from volume: PASS")
142+
logging.info("Create backup from volume: PASS")
120143

121144
# RESTORE BACKUP
122-
print("Restoring backup to volume ...")
123145
restored_volume_name = f"{prefix}restored-backup"
146+
logging.info(f"Restoring backup to volume '{restored_volume_name}' ...")
124147
conn.block_storage.restore_backup(
125148
backup_id,
126149
name=restored_volume_name
127150
)
128151

129-
print(
152+
logging.info(
130153
f"↳ waiting for restoration target volume '{restored_volume_name}' "
131154
f"to be created ..."
132155
)
133156
seconds_waited = 0
134157
while conn.block_storage.find_volume(restored_volume_name) is None:
135158
time.sleep(1.0)
136159
seconds_waited += 1
137-
assert seconds_waited < timeout, (
160+
ensure(
161+
seconds_waited < timeout,
138162
f"Timeout reached while waiting for restored volume to be created "
139163
f"(volume name: {restored_volume_name}) after {seconds_waited} "
140164
f"seconds"
141165
)
142166
# wait for the volume restoration to finish
143-
print(
167+
logging.info(
144168
f"↳ waiting for restoration target volume '{restored_volume_name}' "
145169
f"to reach 'available' status ..."
146170
)
147171
volume_id = conn.block_storage.find_volume(restored_volume_name).id
148172
while conn.block_storage.get_volume(volume_id).status != "available":
149173
time.sleep(1.0)
150174
seconds_waited += 1
151-
assert seconds_waited < timeout, (
175+
ensure(
176+
seconds_waited < timeout,
152177
f"Timeout reached while waiting for restored volume reach status "
153178
f"'available' (volume id: {volume_id}) after {seconds_waited} "
154179
f"seconds"
155180
)
156-
print("Restore volume from backup: PASS")
181+
logging.info("Restore volume from backup: PASS")
157182

158183

159184
def cleanup(conn: openstack.connection.Connection, prefix=DEFAULT_PREFIX,
160-
timeout=WAIT_TIMEOUT):
185+
timeout=WAIT_TIMEOUT) -> bool:
161186
"""
162187
Looks up volume and volume backup resources matching the given prefix and
163188
deletes them.
189+
Returns False if there were any errors during cleanup which might leave
190+
resources behind. Otherwise returns True to indicate cleanup success.
164191
"""
165192

166193
def wait_for_resource(resource_type: str, resource_id: str,
167-
expected_status="available") -> None:
194+
expected_status=("available", )) -> None:
168195
seconds_waited = 0
169196
get_func = getattr(conn.block_storage, f"get_{resource_type}")
170-
while get_func(resource_id).status != expected_status:
197+
while get_func(resource_id).status not in expected_status:
171198
time.sleep(1.0)
172199
seconds_waited += 1
173-
assert seconds_waited < timeout, (
200+
ensure(
201+
seconds_waited < timeout,
174202
f"Timeout reached while waiting for {resource_type} during "
175-
f"cleanup to be in status '{expected_status}' "
203+
f"cleanup to be in status {expected_status} "
176204
f"({resource_type} id: {resource_id}) after {seconds_waited} "
177205
f"seconds"
178206
)
179207

180-
print(f"\nPerforming cleanup for resources with the "
181-
f"'{prefix}' prefix ...")
208+
logging.info(f"Performing cleanup for resources with the "
209+
f"'{prefix}' prefix ...")
182210

211+
cleanup_was_successful = True
183212
backups = conn.block_storage.backups()
184213
for backup in backups:
185214
if backup.name.startswith(prefix):
186215
try:
187-
wait_for_resource("backup", backup.id)
216+
wait_for_resource(
217+
"backup", backup.id,
218+
expected_status=("available", "error")
219+
)
188220
except openstack.exceptions.ResourceNotFound:
189221
# if the resource has vanished on
190222
# its own in the meantime ignore it
191223
continue
192-
print(f"↳ deleting volume backup '{backup.id}' ...")
193-
conn.block_storage.delete_backup(backup.id)
224+
except ConformanceTestException as e:
225+
# This exception happens if the backup state does not reach any
226+
# of the desired ones specified above. We do not need to set
227+
# cleanup_was_successful to False here since any remaining ones
228+
# will be caught in the next loop down below anyway.
229+
logging.warning(str(e))
230+
else:
231+
logging.info(f"↳ deleting volume backup '{backup.id}' ...")
232+
# Setting ignore_missing to False here will make an exception
233+
# bubble up in case the cinder-backup service is not present.
234+
# Since we already catch ResourceNotFound for the backup above,
235+
# the absence of the cinder-backup service is the only
236+
# NotFoundException that is left to be thrown here.
237+
# We treat this as a fatal due to the cinder-backup service
238+
# being mandatory.
239+
conn.block_storage.delete_backup(
240+
backup.id, ignore_missing=False)
194241

195242
# wait for all backups to be cleaned up before attempting to remove volumes
196243
seconds_waited = 0
@@ -200,22 +247,32 @@ def wait_for_resource(resource_type: str, resource_id: str,
200247
) > 0:
201248
time.sleep(1.0)
202249
seconds_waited += 1
203-
assert seconds_waited < timeout, (
204-
f"Timeout reached while waiting for all backups with prefix "
205-
f"'{prefix}' to finish deletion"
206-
)
250+
if seconds_waited >= timeout:
251+
cleanup_was_successful = False
252+
logging.warning(
253+
f"Timeout reached while waiting for all backups with prefix "
254+
f"'{prefix}' to finish deletion during cleanup after "
255+
f"{seconds_waited} seconds"
256+
)
257+
break
207258

208259
volumes = conn.block_storage.volumes()
209260
for volume in volumes:
210261
if volume.name.startswith(prefix):
211262
try:
212-
wait_for_resource("volume", volume.id)
263+
wait_for_resource("volume", volume.id, expected_status=("available", "error"))
213264
except openstack.exceptions.ResourceNotFound:
214265
# if the resource has vanished on
215266
# its own in the meantime ignore it
216267
continue
217-
print(f"↳ deleting volume '{volume.id}' ...")
218-
conn.block_storage.delete_volume(volume.id)
268+
except ConformanceTestException as e:
269+
logging.warning(str(e))
270+
cleanup_was_successful = False
271+
else:
272+
logging.info(f"↳ deleting volume '{volume.id}' ...")
273+
conn.block_storage.delete_volume(volume.id)
274+
275+
return cleanup_was_successful
219276

220277

221278
def main():
@@ -257,25 +314,40 @@ def main():
257314
)
258315
args = parser.parse_args()
259316
openstack.enable_logging(debug=args.debug)
317+
logging.basicConfig(
318+
format="%(levelname)s: %(message)s",
319+
level=logging.DEBUG if args.debug else logging.INFO,
320+
)
260321

261322
# parse cloud name for lookup in clouds.yaml
262323
cloud = os.environ.get("OS_CLOUD", None)
263324
if args.os_cloud:
264325
cloud = args.os_cloud
265-
assert cloud, (
266-
"You need to have the OS_CLOUD environment variable set to your "
267-
"cloud name or pass it via --os-cloud"
268-
)
326+
if not cloud:
327+
raise Exception(
328+
"You need to have the OS_CLOUD environment variable set to your "
329+
"cloud name or pass it via --os-cloud"
330+
)
269331
conn = connect(
270332
cloud,
271333
password=getpass.getpass("Enter password: ") if args.ask else None
272334
)
335+
336+
if not cleanup(conn, prefix=args.prefix, timeout=args.timeout):
337+
raise Exception(
338+
f"Cleanup was not successful, there may be leftover resources "
339+
f"with the '{args.prefix}' prefix"
340+
)
273341
if args.cleanup_only:
274-
cleanup(conn, prefix=args.prefix, timeout=args.timeout)
275-
else:
276-
cleanup(conn, prefix=args.prefix, timeout=args.timeout)
342+
return
343+
try:
277344
test_backup(conn, prefix=args.prefix, timeout=args.timeout)
278-
cleanup(conn, prefix=args.prefix, timeout=args.timeout)
345+
finally:
346+
if not cleanup(conn, prefix=args.prefix, timeout=args.timeout):
347+
logging.info(
348+
f"There may be leftover resources with the "
349+
f"'{args.prefix}' prefix that could not be cleaned up!"
350+
)
279351

280352

281353
if __name__ == "__main__":

0 commit comments

Comments
 (0)