Skip to content

Commit ad77ae9

Browse files
Update Python files format
1 parent 8961c8b commit ad77ae9

11 files changed

+96
-96
lines changed

tests/integration/backups.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -160,9 +160,9 @@ async def pitr_operations(
160160
"restore", **{"backup-id": backup_id, "restore-to-time": "bad"}
161161
)
162162
await action.wait()
163-
assert (
164-
action.status == "failed"
165-
), "restore should fail with bad restore-to-time parameter, but it succeeded"
163+
assert action.status == "failed", (
164+
"restore should fail with bad restore-to-time parameter, but it succeeded"
165+
)
166166

167167
logger.info(f"Restoring backup {backup_id} with year_before restore-to-time parameter")
168168
await juju_.run_action(
@@ -172,9 +172,9 @@ async def pitr_operations(
172172
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
173173
timeout=TIMEOUT,
174174
)
175-
assert await check_test_data_existence(
176-
primary_ip, should_not_exist=[td1, td2]
177-
), "test data should not exist"
175+
assert await check_test_data_existence(primary_ip, should_not_exist=[td1, td2]), (
176+
"test data should not exist"
177+
)
178178

179179
logger.info(f"Restoring backup {backup_id} with year_after restore-to-time parameter")
180180
await juju_.run_action(
@@ -184,9 +184,9 @@ async def pitr_operations(
184184
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
185185
timeout=TIMEOUT,
186186
)
187-
assert await check_test_data_existence(
188-
primary_ip, should_exist=[td1, td2]
189-
), "both test data should exist"
187+
assert await check_test_data_existence(primary_ip, should_exist=[td1, td2]), (
188+
"both test data should exist"
189+
)
190190

191191
logger.info(f"Restoring backup {backup_id} with actual restore-to-time parameter")
192192
await juju_.run_action(
@@ -208,9 +208,9 @@ async def pitr_operations(
208208
apps=[MYSQL_APPLICATION_NAME, S3_INTEGRATOR],
209209
timeout=TIMEOUT,
210210
)
211-
assert await check_test_data_existence(
212-
primary_ip, should_exist=[td1, td2]
213-
), "both test data should exist"
211+
assert await check_test_data_existence(primary_ip, should_exist=[td1, td2]), (
212+
"both test data should exist"
213+
)
214214
clean_backups_from_buckets(cloud_configs, cloud_credentials)
215215

216216

tests/integration/high_availability/high_availability_helpers.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -365,8 +365,8 @@ async def ensure_all_units_continuous_writes_incrementing(
365365
ops_test, unit, server_config_credentials
366366
)
367367
logger.info(f"{max_written_value=} on unit {unit.name}")
368-
assert (
369-
max_written_value > last_max_written_value
370-
), "Continuous writes not incrementing"
368+
assert max_written_value > last_max_written_value, (
369+
"Continuous writes not incrementing"
370+
)
371371

372372
last_max_written_value = max_written_value

tests/integration/high_availability/test_async_replication.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -230,9 +230,9 @@ async def test_standby_promotion(
230230
assert results[0] > 1, "No data was written to the database"
231231

232232
cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True)
233-
assert (
234-
cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary"
235-
), "standby not promoted to primary"
233+
assert cluster_set_status["clusters"]["cuzco"]["clusterrole"] == "primary", (
234+
"standby not promoted to primary"
235+
)
236236

237237

238238
@juju3
@@ -267,12 +267,12 @@ async def test_failover(ops_test: OpsTest, first_model: Model, second_model: Mod
267267

268268
cluster_set_status = await get_cluster_status(leader_unit, cluster_set=True)
269269
logger.info("Checking clusters statuses")
270-
assert (
271-
cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary"
272-
), "standby not promoted to primary"
273-
assert (
274-
cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated"
275-
), "old primary not invalidated"
270+
assert cluster_set_status["clusters"]["lima"]["clusterrole"] == "primary", (
271+
"standby not promoted to primary"
272+
)
273+
assert cluster_set_status["clusters"]["cuzco"]["globalstatus"] == "invalidated", (
274+
"old primary not invalidated"
275+
)
276276

277277

278278
@juju3

tests/integration/high_availability/test_replication_logs_rotation.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -67,13 +67,13 @@ async def test_log_rotation(ops_test: OpsTest, highly_available_cluster) -> None
6767
ops_test, unit.name, f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql/"
6868
)
6969

70-
assert len(ls_la_output) == len(
71-
log_files
72-
), f"❌ files other than log files exist {ls_la_output}"
70+
assert len(ls_la_output) == len(log_files), (
71+
f"❌ files other than log files exist {ls_la_output}"
72+
)
7373
directories = [line.split()[-1] for line in ls_la_output]
74-
assert sorted(directories) == sorted(
75-
log_files
76-
), f"❌ file other than logs files exist: {ls_la_output}"
74+
assert sorted(directories) == sorted(log_files), (
75+
f"❌ file other than logs files exist: {ls_la_output}"
76+
)
7777

7878
logger.info("Executing logrotate")
7979
return_code, stdout, _ = await ops_test.juju(
@@ -86,13 +86,13 @@ async def test_log_rotation(ops_test: OpsTest, highly_available_cluster) -> None
8686
ops_test, unit.name, f"{CHARMED_MYSQL_COMMON_DIRECTORY}/var/log/mysql/"
8787
)
8888

89-
assert len(ls_la_output) == len(
90-
log_files + archive_directories
91-
), f"❌ unexpected files/directories in log directory: {ls_la_output}"
89+
assert len(ls_la_output) == len(log_files + archive_directories), (
90+
f"❌ unexpected files/directories in log directory: {ls_la_output}"
91+
)
9292
directories = [line.split()[-1] for line in ls_la_output]
93-
assert sorted(directories) == sorted(
94-
log_files + archive_directories
95-
), f"❌ unexpected files/directories in log directory: {ls_la_output}"
93+
assert sorted(directories) == sorted(log_files + archive_directories), (
94+
f"❌ unexpected files/directories in log directory: {ls_la_output}"
95+
)
9696

9797
logger.info("Ensuring log files were rotated")
9898
for log in set(log_types):

tests/integration/high_availability/test_replication_reelection.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -65,9 +65,9 @@ async def test_kill_primary_check_reelection(ops_test: OpsTest, highly_available
6565
await scale_application(ops_test, mysql_application_name, 3)
6666

6767
# wait (and retry) until the killed pod is back online in the mysql cluster
68-
assert await ensure_n_online_mysql_members(
69-
ops_test, 3
70-
), "Old primary has not come back online after being killed"
68+
assert await ensure_n_online_mysql_members(ops_test, 3), (
69+
"Old primary has not come back online after being killed"
70+
)
7171

7272
await ensure_all_units_continuous_writes_incrementing(ops_test)
7373

tests/integration/high_availability/test_self_healing_network_cut.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -63,25 +63,25 @@ async def test_network_cut(ops_test: OpsTest, highly_available_cluster, continuo
6363
}
6464

6565
# verify that connection is possible
66-
assert is_connection_possible(
67-
config
68-
), f"❌ Connection to host {primary_unit_ip} is not possible"
66+
assert is_connection_possible(config), (
67+
f"❌ Connection to host {primary_unit_ip} is not possible"
68+
)
6969

7070
logger.info(f"Cutting network for {primary_hostname}")
7171
cut_network_from_unit(primary_hostname)
7272

7373
# verify machine is not reachable from peer units
7474
for unit in set(all_units) - {primary_unit}:
7575
hostname = await unit_hostname(ops_test, unit.name)
76-
assert not is_machine_reachable_from(
77-
hostname, primary_hostname
78-
), "❌ unit is reachable from peer"
76+
assert not is_machine_reachable_from(hostname, primary_hostname), (
77+
"❌ unit is reachable from peer"
78+
)
7979

8080
# verify machine is not reachable from controller
8181
controller = await get_controller_machine(ops_test)
82-
assert not is_machine_reachable_from(
83-
controller, primary_hostname
84-
), "❌ unit is reachable from controller"
82+
assert not is_machine_reachable_from(controller, primary_hostname), (
83+
"❌ unit is reachable from controller"
84+
)
8585

8686
# verify that connection is not possible
8787
assert not is_connection_possible(config), "❌ Connection is possible after network cut"
@@ -107,9 +107,9 @@ async def test_network_cut(ops_test: OpsTest, highly_available_cluster, continuo
107107
logger.debug(
108108
f"Waiting until connection possible after network restore on {new_unit_ip}"
109109
)
110-
assert is_connection_possible(
111-
new_unit_config
112-
), "❌ Connection is not possible after network restore"
110+
assert is_connection_possible(new_unit_config), (
111+
"❌ Connection is not possible after network restore"
112+
)
113113

114114
logger.info(f"Waiting for {primary_unit.name} to enter active")
115115
await ops_test.model.block_until(

tests/integration/high_availability/test_self_healing_process_killed.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -40,9 +40,9 @@ async def test_kill_db_process(
4040
primary_unit = await get_primary_unit_wrapper(ops_test, mysql_application_name)
4141

4242
# ensure all units in the cluster are online
43-
assert await ensure_n_online_mysql_members(
44-
ops_test, 3
45-
), "The deployed mysql application is not fully online"
43+
assert await ensure_n_online_mysql_members(ops_test, 3), (
44+
"The deployed mysql application is not fully online"
45+
)
4646

4747
# get running mysqld PID
4848
pid = await get_process_pid(ops_test, primary_unit.name, MYSQL_DAEMON)

tests/integration/high_availability/test_self_healing_restart_forceful.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -97,9 +97,9 @@ async def test_sst_test(ops_test: OpsTest, highly_available_cluster, continuous_
9797

9898
# verify instance is part of the cluster
9999
logger.info("Check if instance in cluster")
100-
assert await is_unit_in_cluster(
101-
primary_unit.name, new_primary_unit
102-
), "❌ Unit not online in the cluster"
100+
assert await is_unit_in_cluster(primary_unit.name, new_primary_unit), (
101+
"❌ Unit not online in the cluster"
102+
)
103103

104104
await ensure_all_units_continuous_writes_incrementing(ops_test)
105105

tests/integration/high_availability/test_self_healing_stop_all.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -64,9 +64,9 @@ async def test_cluster_pause(ops_test: OpsTest, highly_available_cluster, contin
6464
for unit in all_units:
6565
unit_ip = await get_unit_ip(ops_test, unit.name)
6666
config["host"] = unit_ip
67-
assert not is_connection_possible(
68-
config
69-
), f"❌ connection to unit {unit.name} is still possible"
67+
assert not is_connection_possible(config), (
68+
f"❌ connection to unit {unit.name} is still possible"
69+
)
7070

7171
# restart all instances
7272
logger.info("Starting all instances")

tests/integration/high_availability/test_self_healing_stop_primary.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -56,9 +56,9 @@ async def test_replicate_data_on_restart(
5656
}
5757

5858
# verify that connection is possible
59-
assert is_connection_possible(
60-
config
61-
), f"❌ Connection to host {primary_unit_ip} is not possible"
59+
assert is_connection_possible(config), (
60+
f"❌ Connection to host {primary_unit_ip} is not possible"
61+
)
6262

6363
# it's necessary to inhibit update-status-hook to stop the service
6464
# since the charm will restart the service on the hook
@@ -67,9 +67,9 @@ async def test_replicate_data_on_restart(
6767
await graceful_stop_server(ops_test, primary_unit.name)
6868

6969
# verify that connection is gone
70-
assert not is_connection_possible(
71-
config
72-
), f"❌ Connection to host {primary_unit_ip} is possible"
70+
assert not is_connection_possible(config), (
71+
f"❌ Connection to host {primary_unit_ip} is possible"
72+
)
7373

7474
# get primary to write to it
7575
server_config_password = await get_system_user_password(primary_unit, SERVER_CONFIG_USERNAME)
@@ -89,9 +89,9 @@ async def test_replicate_data_on_restart(
8989
await ops_test.model.set_config({"update-status-hook-interval": "5m"})
9090

9191
# verify/wait availability
92-
assert is_connection_possible(
93-
config, retry_if_not_possible=True
94-
), "❌ Connection not possible after restart"
92+
assert is_connection_possible(config, retry_if_not_possible=True), (
93+
"❌ Connection not possible after restart"
94+
)
9595

9696
# read and verify data
9797
select_data_sql = [

0 commit comments

Comments
 (0)