Skip to content

Commit b36400f

Browse files
committed
chore: capitalize comments in Python files
1 parent 6aa13a9 commit b36400f

File tree

91 files changed

+1402
-1402
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

91 files changed

+1402
-1402
lines changed

cardano_node_tests/cardano_cli_coverage.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ def merge_coverage(dict_a: dict, dict_b: dict) -> dict:
9292
dict_a[key] = sorted(new_list)
9393
elif key in dict_a and isinstance(value, addable) and isinstance(dict_a[key], addable):
9494
dict_a[key] += value
95-
# skipped arguments and commands are not in the available commands dict
95+
# Skipped arguments and commands are not in the available commands dict
9696
elif key not in dict_a:
9797
continue
9898
elif not isinstance(value, dict):
@@ -120,17 +120,17 @@ def parse_cmd_output(output: str) -> list[str]:
120120
section_start = True
121121
continue
122122
if section_start:
123-
# skip line with wrapped description from previous command
123+
# Skip line with wrapped description from previous command
124124
if line.startswith(" "):
125125
continue
126-
# skip line with subsection description
126+
# Skip line with subsection description
127127
if not line.startswith(" "):
128128
continue
129129
line_s = line.strip()
130130
if not line_s:
131131
continue
132132
item = line_s.split()[0]
133-
# in case the item looks like "-h,--help", take only the long option
133+
# In case the item looks like "-h,--help", take only the long option
134134
arg = item.split(",")[-1].strip()
135135
cli_args.append(arg)
136136

cardano_node_tests/cluster_management/cache.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,9 @@ class ClusterManagerCache:
1111
Here goes only data that makes sense to reuse in multiple tests.
1212
"""
1313

14-
# single `ClusterLib` instance can be used in multiple tests executed on the same worker
14+
# Single `ClusterLib` instance can be used in multiple tests executed on the same worker
1515
cluster_obj: clusterlib.ClusterLib | None = None
16-
# data for initialized cluster instance
16+
# Data for initialized cluster instance
1717
test_data: dict = dataclasses.field(default_factory=dict)
1818
addrs_data: dict = dataclasses.field(default_factory=dict)
1919
last_checksum: str = ""
@@ -22,7 +22,7 @@ class ClusterManagerCache:
2222
class CacheManager:
2323
"""Set of cache management methods."""
2424

25-
# every pytest worker has its own cache, i.e. this cache is local to single worker
25+
# Every pytest worker has its own cache, i.e. this cache is local to single worker
2626
cache: tp.ClassVar[dict[int, ClusterManagerCache]] = {}
2727

2828
@classmethod

cardano_node_tests/cluster_management/cluster_getter.py

Lines changed: 53 additions & 53 deletions
Large diffs are not rendered by default.

cardano_node_tests/cluster_management/manager.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636

3737
def _get_manager_fixture_line_str() -> str:
3838
"""Get `filename#lineno` of current fixture, called from contextmanager."""
39-
# get past `cache_fixture` and `contextmanager` to the fixture
39+
# Get past `cache_fixture` and `contextmanager` to the fixture
4040
calling_frame = inspect.currentframe().f_back.f_back.f_back # type: ignore
4141
assert calling_frame
4242
return helpers.get_line_str_from_frame(frame=calling_frame)
@@ -173,7 +173,7 @@ def stop_all_clusters(self) -> None:
173173
"""Stop all cluster instances."""
174174
self.log("called `stop_all_clusters`")
175175

176-
# don't stop cluster if it was started outside of test framework
176+
# Don't stop cluster if it was started outside of test framework
177177
if configuration.DEV_CLUSTER_RUNNING:
178178
LOGGER.warning("Ignoring request to stop clusters as 'DEV_CLUSTER_RUNNING' is set.")
179179
return
@@ -261,26 +261,26 @@ def on_test_stop(self) -> None:
261261
if not list(self.instance_dir.glob(f"{common.RESPIN_NEEDED_GLOB}_*")):
262262
logfiles.clean_ignore_rules(ignore_file_id=self.worker_id)
263263

264-
# remove resource locking files created by the worker, ignore resources that have mark
264+
# Remove resource locking files created by the worker, ignore resources that have mark
265265
resource_locking_files = list(
266266
self.instance_dir.glob(f"{common.RESOURCE_LOCKED_GLOB}_@@*@@_{self.worker_id}")
267267
)
268268
for f in resource_locking_files:
269269
f.unlink()
270270

271-
# remove "resource in use" files created by the worker, ignore resources that have mark
271+
# Remove "resource in use" files created by the worker, ignore resources that have mark
272272
resource_in_use_files = list(
273273
self.instance_dir.glob(f"{common.RESOURCE_IN_USE_GLOB}_@@*@@_{self.worker_id}")
274274
)
275275
for f in resource_in_use_files:
276276
f.unlink()
277277

278-
# remove file that indicates that a test is running on the worker
278+
# Remove file that indicates that a test is running on the worker
279279
next(
280280
iter(self.instance_dir.glob(f"{common.TEST_RUNNING_GLOB}*_{self.worker_id}"))
281281
).unlink(missing_ok=True)
282282

283-
# log names of tests that keep running on the cluster instance
283+
# Log names of tests that keep running on the cluster instance
284284
tnames = [
285285
tf.read_text().strip()
286286
for tf in self.instance_dir.glob(f"{common.TEST_RUNNING_GLOB}*")
@@ -339,14 +339,14 @@ def _save_cli_coverage(self) -> None:
339339
def _reload_cluster_obj(self, state_dir: pl.Path) -> None:
340340
"""Reload cluster instance data if necessary."""
341341
addrs_data_checksum = helpers.checksum(state_dir / cluster_nodes.ADDRS_DATA)
342-
# the checksum will not match when cluster was respun
342+
# The checksum will not match when cluster was respun
343343
if addrs_data_checksum == self.cache.last_checksum:
344344
return
345345

346-
# save CLI coverage collected by the old `cluster_obj` instance
346+
# Save CLI coverage collected by the old `cluster_obj` instance
347347
self._save_cli_coverage()
348348

349-
# replace the old `cluster_obj` instance and reload data
349+
# Replace the old `cluster_obj` instance and reload data
350350
self.cache.cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()
351351
self.cache.test_data = {}
352352
self.cache.addrs_data = cluster_nodes.load_addrs_data()
@@ -367,7 +367,7 @@ def init(
367367
368368
**IMPORTANT**: This method must be called before any other method of this class.
369369
"""
370-
# get number of initialized cluster instance once it is possible to start a test
370+
# Get number of initialized cluster instance once it is possible to start a test
371371
instance_num = cluster_getter.ClusterGetter(
372372
worker_id=self.worker_id,
373373
pytest_config=self.pytest_config,
@@ -383,11 +383,11 @@ def init(
383383
)
384384
self._cluster_instance_num = instance_num
385385

386-
# reload cluster instance data if necessary
386+
# Reload cluster instance data if necessary
387387
state_dir = cluster_nodes.get_cluster_env().state_dir
388388
self._reload_cluster_obj(state_dir=state_dir)
389389

390-
# initialize `cardano_clusterlib.ClusterLib` object
390+
# Initialize `cardano_clusterlib.ClusterLib` object
391391
cluster_obj = self.cache.cluster_obj
392392
if not cluster_obj:
393393
msg = "`cluster_obj` not available, that cannot happen"

cardano_node_tests/pytest_plugins/xdist_scheduler.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@ def _split_scope(self, nodeid: str) -> str:
5353
Example:
5454
example/loadsuite/test/test_gamma.py::test_beta0[param]@group_name@long
5555
"""
56-
# check the index of ']' to avoid the case: parametrize mark value has '@'
56+
# Check the index of ']' to avoid the case: parametrize mark value has '@'
5757
param_end_idx = nodeid.rfind("]")
5858
scope_start_idx = param_end_idx if param_end_idx != -1 else 0
5959

@@ -105,11 +105,11 @@ def _assign_work_unit(self, node: workermanage.WorkerController) -> None:
105105
assigned_to_node = self.assigned_work.setdefault(node, collections.OrderedDict())
106106
scope, work_unit = None, None
107107

108-
# check if there are any long-running tests already pending
108+
# Check if there are any long-running tests already pending
109109
long_pending = self._is_long_pending(assigned_to_node)
110110

111111
if long_pending:
112-
# try to find a work unit with no long-running test if there is already a long-running
112+
# Try to find a work unit with no long-running test if there is already a long-running
113113
# test pending
114114
scope = self._get_short_scope()
115115
if scope:
@@ -121,14 +121,14 @@ def _assign_work_unit(self, node: workermanage.WorkerController) -> None:
121121
if scope:
122122
work_unit = self.workqueue.pop(scope)
123123

124-
# grab the first unit of work if none was grabbed above
124+
# Grab the first unit of work if none was grabbed above
125125
if work_unit is None:
126126
scope, work_unit = self.workqueue.popitem(last=False)
127127

128-
# keep track of the assigned work
128+
# Keep track of the assigned work
129129
assigned_to_node[scope] = work_unit
130130

131-
# ask the node to execute the workload
131+
# Ask the node to execute the workload
132132
worker_collection = self.registered_collections[node]
133133
nodeids_indexes = [
134134
worker_collection.index(nodeid)
@@ -150,7 +150,7 @@ def pytest_collection_modifyitems(items: list) -> None:
150150

151151
comps = [item.nodeid]
152152

153-
# add the group name to nodeid as suffix
153+
# Add the group name to nodeid as suffix
154154
if group_marker:
155155
gname = (
156156
group_marker.args[0]
@@ -159,7 +159,7 @@ def pytest_collection_modifyitems(items: list) -> None:
159159
)
160160
comps.append(gname)
161161

162-
# add "long" to nodeid as suffix
162+
# Add "long" to nodeid as suffix
163163
if long_marker:
164164
comps.append(LONG_MARKER)
165165

cardano_node_tests/tests/common.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
_BLD_SKIP_REASON = "transaction era must be the same as node era"
3131
BUILD_UNUSABLE = bool(_BLD_SKIP_REASON)
3232

33-
# common `skipif`s
33+
# Common `skipif`s
3434
SKIPIF_BUILD_UNUSABLE = pytest.mark.skipif(
3535
BUILD_UNUSABLE,
3636
reason=(
@@ -79,7 +79,7 @@
7979
)
8080

8181

82-
# common parametrization
82+
# Common parametrization
8383
PARAM_USE_BUILD_CMD = pytest.mark.parametrize(
8484
"use_build_cmd",
8585
(
@@ -118,16 +118,16 @@
118118
)
119119

120120

121-
# intervals for `wait_for_epoch_interval` (negative values are counted from the end of an epoch)
121+
# Intervals for `wait_for_epoch_interval` (negative values are counted from the end of an epoch)
122122
if cluster_nodes.get_cluster_type().type == cluster_nodes.ClusterType.LOCAL:
123-
# time buffer at the end of an epoch, enough to do something that takes several transactions
123+
# Time buffer at the end of an epoch, enough to do something that takes several transactions
124124
EPOCH_STOP_SEC_BUFFER = -40
125-
# time when all ledger state info is available for the current epoch
125+
# Time when all ledger state info is available for the current epoch
126126
EPOCH_START_SEC_LEDGER_STATE = -19
127-
# time buffer at the end of an epoch after getting ledger state info
127+
# Time buffer at the end of an epoch after getting ledger state info
128128
EPOCH_STOP_SEC_LEDGER_STATE = -15
129129
else:
130-
# we can be more generous on testnets
130+
# We can be more generous on testnets
131131
EPOCH_STOP_SEC_BUFFER = -200
132132
EPOCH_START_SEC_LEDGER_STATE = -300
133133
EPOCH_STOP_SEC_LEDGER_STATE = -200
@@ -166,7 +166,7 @@ def get_test_id(cluster_obj: clusterlib.ClusterLib) -> str:
166166
f"{curr_test.test_function}{curr_test.test_params}_ci{cluster_obj.cluster_id}_{rand_str}"
167167
)
168168

169-
# log test ID to cluster manager log file - getting test ID happens early
169+
# Log test ID to cluster manager log file - getting test ID happens early
170170
# after the start of a test, so the log entry can be used for determining
171171
# time of the test start
172172
cm: cluster_management.ClusterManager = cluster_obj._cluster_manager # type: ignore
@@ -294,7 +294,7 @@ def fail_on_fork(
294294
err_msg.append(f"Following nodes appear to be out of sync: {sorted(unsynced_nodes)}")
295295

296296
if err_msg:
297-
# the local cluster needs to be respun before it is usable again
297+
# The local cluster needs to be respun before it is usable again
298298
cluster_manager.set_needs_respin()
299299
raise AssertionError("\n".join(err_msg))
300300

cardano_node_tests/tests/conftest.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -32,10 +32,10 @@
3232
LOGGER = logging.getLogger(__name__)
3333
INTERRUPTED_NAME = ".session_interrupted"
3434

35-
# make sure there's enough time to stop all cluster instances at the end of session
35+
# Make sure there's enough time to stop all cluster instances at the end of session
3636
workermanage.NodeManager.EXIT_TIMEOUT = 30
3737

38-
# use custom xdist scheduler
38+
# Use custom xdist scheduler
3939
pytest_plugins = ("cardano_node_tests.pytest_plugins.xdist_scheduler",)
4040

4141

@@ -67,7 +67,7 @@ def pytest_addoption(parser: tp.Any) -> None:
6767

6868

6969
def pytest_configure(config: tp.Any) -> None:
70-
# don't bother collecting metadata if all tests are skipped
70+
# Don't bother collecting metadata if all tests are skipped
7171
if config.getvalue("skipall"):
7272
return
7373

@@ -144,7 +144,7 @@ def _skip_all_tests(config: tp.Any, items: list) -> bool:
144144

145145
@pytest.hookimpl(tryfirst=True)
146146
def pytest_collection_modifyitems(config: tp.Any, items: list) -> None: # noqa: C901
147-
# prevent on slave nodes (xdist)
147+
# Prevent on slave nodes (xdist)
148148
if hasattr(config, "slaveinput"):
149149
return
150150

@@ -158,12 +158,12 @@ def _mark_needs_dbsync(item: tp.Any) -> None:
158158
if "needs_dbsync" not in item.keywords:
159159
return
160160

161-
# all tests marked with 'needs_dbsync' are db-sync tests, and should be marked
161+
# All tests marked with 'needs_dbsync' are db-sync tests, and should be marked
162162
# with the 'dbsync' marker as well
163163
if "dbsync" not in item.keywords:
164164
item.add_marker(pytest.mark.dbsync)
165165

166-
# skip all tests that require db-sync when db-sync is not available
166+
# Skip all tests that require db-sync when db-sync is not available
167167
if not configuration.HAS_DBSYNC:
168168
item.add_marker(skip_dbsync_marker)
169169

@@ -218,7 +218,7 @@ def _save_all_cluster_instances_artifacts(
218218
"""Save artifacts of all cluster instances after all tests are finished."""
219219
cluster_manager_obj.log("running `_save_all_cluster_instances_artifacts`")
220220

221-
# stop all cluster instances
221+
# Stop all cluster instances
222222
with helpers.ignore_interrupt():
223223
cluster_manager_obj.save_all_clusters_artifacts()
224224

@@ -227,7 +227,7 @@ def _stop_all_cluster_instances(cluster_manager_obj: cluster_management.ClusterM
227227
"""Stop all cluster instances after all tests are finished."""
228228
cluster_manager_obj.log("running `_stop_all_cluster_instances`")
229229

230-
# stop all cluster instances
230+
# Stop all cluster instances
231231
with helpers.ignore_interrupt():
232232
cluster_manager_obj.stop_all_clusters()
233233

@@ -237,7 +237,7 @@ def _testnet_cleanup(pytest_root_tmp: pl.Path) -> None:
237237
if cluster_nodes.get_cluster_type().type != cluster_nodes.ClusterType.TESTNET:
238238
return
239239

240-
# there's only one cluster instance for testnets, so we don't need to use cluster manager
240+
# There's only one cluster instance for testnets, so we don't need to use cluster manager
241241
cluster_obj = cluster_nodes.get_cluster_type().get_cluster_obj()
242242

243243
destdir = pytest_root_tmp.parent / f"cleanup-{pytest_root_tmp.stem}-{helpers.get_rand_str(8)}"
@@ -339,7 +339,7 @@ def testfile_temp_dir() -> pl.Path:
339339
340340
The dir is specific to a single test file.
341341
"""
342-
# get a dir path based on the test file running
342+
# Get a dir path based on the test file running
343343
dir_path = (
344344
(os.environ.get("PYTEST_CURRENT_TEST") or "unknown")
345345
.split("::")[0]
@@ -368,7 +368,7 @@ def cluster_manager(
368368
request: FixtureRequest,
369369
) -> tp.Generator[cluster_management.ClusterManager, None, None]:
370370
"""Return instance of `cluster_management.ClusterManager`."""
371-
# hide from traceback to make logs errors more readable
371+
# Hide from traceback to make logs errors more readable
372372
__tracebackhide__ = True # pylint: disable=unused-variable
373373

374374
cluster_manager_obj = cluster_management.ClusterManager(

0 commit comments

Comments
 (0)