Skip to content

Commit 758f648

Browse files
Update Python files format
1 parent 402080c commit 758f648

17 files changed

+158
-152
lines changed

tests/integration/backups.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -171,9 +171,9 @@ async def pitr_operations(
171171
"restore", **{"backup-id": backup_id, "restore-to-time": "bad"}
172172
)
173173
await action.wait()
174-
assert (
175-
action.status == "failed"
176-
), "restore should fail with bad restore-to-time parameter, but it succeeded"
174+
assert action.status == "failed", (
175+
"restore should fail with bad restore-to-time parameter, but it succeeded"
176+
)
177177

178178
logger.info(f"Restoring backup {backup_id} with year_before restore-to-time parameter")
179179
await juju_.run_action(
@@ -183,9 +183,9 @@ async def pitr_operations(
183183
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
184184
timeout=TIMEOUT,
185185
)
186-
assert await check_test_data_existence(
187-
first_mysql_ip, should_not_exist=[td1, td2]
188-
), "test data should not exist"
186+
assert await check_test_data_existence(first_mysql_ip, should_not_exist=[td1, td2]), (
187+
"test data should not exist"
188+
)
189189

190190
logger.info(f"Restoring backup {backup_id} with year_after restore-to-time parameter")
191191
await juju_.run_action(
@@ -195,9 +195,9 @@ async def pitr_operations(
195195
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
196196
timeout=TIMEOUT,
197197
)
198-
assert await check_test_data_existence(
199-
first_mysql_ip, should_exist=[td1, td2]
200-
), "both test data should exist"
198+
assert await check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), (
199+
"both test data should exist"
200+
)
201201

202202
logger.info(f"Restoring backup {backup_id} with actual restore-to-time parameter")
203203
await juju_.run_action(
@@ -219,9 +219,9 @@ async def pitr_operations(
219219
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
220220
timeout=TIMEOUT,
221221
)
222-
assert await check_test_data_existence(
223-
first_mysql_ip, should_exist=[td1, td2]
224-
), "both test data should exist"
222+
assert await check_test_data_existence(first_mysql_ip, should_exist=[td1, td2]), (
223+
"both test data should exist"
224+
)
225225
clean_backups_from_buckets(cloud_configs, cloud_credentials)
226226

227227

tests/integration/helpers.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -120,9 +120,9 @@ async def get_relation_data(
120120
data = yaml.safe_load(raw_data)
121121
# Filter the data based on the relation name.
122122
relation_data = [v for v in data[unit_name]["relation-info"] if v["endpoint"] == relation_name]
123-
assert (
124-
relation_data
125-
), f"no relation data could be grabbed on relation with endpoint {relation_name}"
123+
assert relation_data, (
124+
f"no relation data could be grabbed on relation with endpoint {relation_name}"
125+
)
126126

127127
return relation_data
128128

@@ -408,9 +408,9 @@ async def get_process_pid(
408408
if return_code == 1:
409409
return None
410410

411-
assert (
412-
return_code == 0
413-
), f"Failed getting pid, unit={unit_name}, container={container_name}, process={process}"
411+
assert return_code == 0, (
412+
f"Failed getting pid, unit={unit_name}, container={container_name}, process={process}"
413+
)
414414

415415
stripped_pid = pid.strip()
416416
if not stripped_pid:

tests/integration/high_availability/high_availability_helpers.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -337,9 +337,9 @@ async def send_signal_to_pod_container_process(
337337
)
338338
response.run_forever(timeout=5)
339339

340-
assert (
341-
response.returncode == 0
342-
), f"Failed to send {signal_code} signal, unit={unit_name}, container={container_name}, process={process}"
340+
assert response.returncode == 0, (
341+
f"Failed to send {signal_code} signal, unit={unit_name}, container={container_name}, process={process}"
342+
)
343343

344344

345345
async def get_process_stat(
@@ -362,9 +362,9 @@ async def get_process_stat(
362362
]
363363
return_code, stat, _ = await ops_test.juju(*get_stat_commands)
364364

365-
assert (
366-
return_code == 0
367-
), f"Failed to get STAT, unit_name={unit_name}, container_name={container_name}, process={process}"
365+
assert return_code == 0, (
366+
f"Failed to get STAT, unit_name={unit_name}, container_name={container_name}, process={process}"
367+
)
368368

369369
return stat
370370

@@ -498,9 +498,9 @@ async def ensure_all_units_continuous_writes_incrementing(
498498
ops_test, unit, credentials
499499
)
500500
logger.info(f"{max_written_value=} on unit {unit.name}")
501-
assert (
502-
max_written_value > last_max_written_value
503-
), "Continuous writes not incrementing"
501+
assert max_written_value > last_max_written_value, (
502+
"Continuous writes not incrementing"
503+
)
504504

505505
last_max_written_value = max_written_value
506506

@@ -573,9 +573,9 @@ async def ensure_process_not_running(
573573
get_pid_commands = ["ssh", "--container", container_name, unit_name, "pgrep", "-x", process]
574574
return_code, pid, _ = await ops_test.juju(*get_pid_commands)
575575

576-
assert (
577-
return_code != 0
578-
), f"Process {process} is still running with pid {pid} on unit {unit_name}, container {container_name}"
576+
assert return_code != 0, (
577+
f"Process {process} is still running with pid {pid} on unit {unit_name}, container {container_name}"
578+
)
579579

580580

581581
def get_sts_partition(ops_test: OpsTest, app_name: str) -> int:

tests/integration/high_availability/test_async_replication.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -250,9 +250,9 @@ async def test_standby_promotion(
250250
assert results[0] > 1, "No data was written to the database"
251251

252252
cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True)
253-
assert (
254-
cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary"
255-
), "standby not promoted to primary"
253+
assert cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary", (
254+
"standby not promoted to primary"
255+
)
256256

257257

258258
@markers.juju3
@@ -278,12 +278,12 @@ async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Mod
278278
)
279279

280280
cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True)
281-
assert (
282-
cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary"
283-
), "standby not promoted to primary"
284-
assert (
285-
cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated"
286-
), "old primary not invalidated"
281+
assert cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary", (
282+
"standby not promoted to primary"
283+
)
284+
assert cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated", (
285+
"old primary not invalidated"
286+
)
287287

288288
# restore mysqld process
289289
for unit in second_model_units:

tests/integration/high_availability/test_k8s_endpoints.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -85,9 +85,9 @@ async def test_labeling_of_k8s_endpoints(ops_test: OpsTest, charm):
8585
)
8686

8787
for primary in cluster_one_primary_addresses:
88-
assert (
89-
primary in cluster_one_ips
90-
), f"{primary} (not belonging to cluster 1) should not be in cluster one addresses"
88+
assert primary in cluster_one_ips, (
89+
f"{primary} (not belonging to cluster 1) should not be in cluster one addresses"
90+
)
9191

9292
assert set(cluster_one_primary_addresses + cluster_one_replica_addresses) == set(
9393
cluster_one_ips
@@ -106,9 +106,9 @@ async def test_labeling_of_k8s_endpoints(ops_test: OpsTest, charm):
106106
)
107107

108108
for primary in cluster_two_primary_addresses:
109-
assert (
110-
primary in cluster_two_ips
111-
), f"{primary} (not belonging to cluster w) should not be in cluster two addresses"
109+
assert primary in cluster_two_ips, (
110+
f"{primary} (not belonging to cluster w) should not be in cluster two addresses"
111+
)
112112

113113
assert set(cluster_two_primary_addresses + cluster_two_replica_addresses) == set(
114114
cluster_two_ips

tests/integration/high_availability/test_log_rotation.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,9 +86,9 @@ async def test_log_rotation(
8686

8787
for file in log_files:
8888
# audit.log can be rotated and new file not created until access to db
89-
assert (
90-
file in ls_output or file == "audit.log"
91-
), f"❌ files other than log files exist {ls_output}"
89+
assert file in ls_output or file == "audit.log", (
90+
f"❌ files other than log files exist {ls_output}"
91+
)
9292

9393
logger.info("Dispatching custom event to rotate logs")
9494
await dispatch_custom_event_for_logrotate(ops_test, unit.name)

tests/integration/high_availability/test_node_drain.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -36,9 +36,9 @@ async def test_pod_eviction_and_pvc_deletion(
3636

3737
logger.info("Waiting until 3 mysql instances are online")
3838
# ensure all units in the cluster are online
39-
assert await ensure_n_online_mysql_members(
40-
ops_test, 3
41-
), "The deployed mysql application is not fully online"
39+
assert await ensure_n_online_mysql_members(ops_test, 3), (
40+
"The deployed mysql application is not fully online"
41+
)
4242

4343
logger.info("Ensuring all units have continuous writes incrementing")
4444
await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
@@ -65,9 +65,9 @@ async def test_pod_eviction_and_pvc_deletion(
6565
)
6666

6767
logger.info("Waiting until 3 mysql instances are online")
68-
assert await ensure_n_online_mysql_members(
69-
ops_test, 3
70-
), "The deployed mysql application is not fully online after primary pod eviction"
68+
assert await ensure_n_online_mysql_members(ops_test, 3), (
69+
"The deployed mysql application is not fully online after primary pod eviction"
70+
)
7171

7272
logger.info("Ensuring all units have continuous writes incrementing")
7373
await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)

tests/integration/high_availability/test_replication_reelection.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,9 +61,9 @@ async def test_kill_primary_check_reelection(
6161
assert primary_name != new_primary_name
6262

6363
# wait (and retry) until the killed pod is back online in the mysql cluster
64-
assert await ensure_n_online_mysql_members(
65-
ops_test, 3
66-
), "Old primary has not come back online after being killed"
64+
assert await ensure_n_online_mysql_members(ops_test, 3), (
65+
"Old primary has not come back online after being killed"
66+
)
6767

6868
await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
6969

tests/integration/high_availability/test_replication_scaling.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,9 +54,9 @@ async def test_scaling_without_data_loss(
5454

5555
# scale up the mysql application
5656
await scale_application(ops_test, mysql_application_name, 4)
57-
assert await ensure_n_online_mysql_members(
58-
ops_test, 4
59-
), "The cluster is not fully online after scaling up"
57+
assert await ensure_n_online_mysql_members(ops_test, 4), (
58+
"The cluster is not fully online after scaling up"
59+
)
6060

6161
# ensure value inserted before scale exists in all units
6262
for attempt in Retrying(stop=stop_after_delay(10), wait=wait_fixed(2)):
@@ -84,9 +84,9 @@ async def test_scaling_without_data_loss(
8484

8585
# scale down the mysql application
8686
await scale_application(ops_test, mysql_application_name, 3)
87-
assert await ensure_n_online_mysql_members(
88-
ops_test, 3
89-
), "The cluster is not fully online after scaling down"
87+
assert await ensure_n_online_mysql_members(ops_test, 3), (
88+
"The cluster is not fully online after scaling down"
89+
)
9090

9191
# ensure data written before scale down is persisted
9292
for unit in ops_test.model.applications[mysql_application_name].units:

tests/integration/high_availability/test_self_healing_network_cut.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -33,9 +33,9 @@ async def test_network_cut_affecting_an_instance(
3333
assert mysql_application_name, "mysql application name is not set"
3434

3535
logger.info("Ensuring that there are 3 online mysql members")
36-
assert await ensure_n_online_mysql_members(
37-
ops_test, 3
38-
), "The deployed mysql application does not have three online nodes"
36+
assert await ensure_n_online_mysql_members(ops_test, 3), (
37+
"The deployed mysql application does not have three online nodes"
38+
)
3939

4040
logger.info("Ensuring that all instances have incrementing continuous writes")
4141
await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)
@@ -102,9 +102,9 @@ async def test_network_cut_affecting_an_instance(
102102
assert isolated_primary_memberrole == "secondary"
103103

104104
logger.info("Ensure there are 3 online mysql members")
105-
assert await ensure_n_online_mysql_members(
106-
ops_test, 3
107-
), "The deployed mysql application does not have three online nodes"
105+
assert await ensure_n_online_mysql_members(ops_test, 3), (
106+
"The deployed mysql application does not have three online nodes"
107+
)
108108

109109
logger.info("Ensure all units have incrementing continuous writes")
110110
await ensure_all_units_continuous_writes_incrementing(ops_test, credentials=credentials)

0 commit comments

Comments
 (0)