Skip to content

Commit 4d483f1

Browse files
author
Alan Christie
committed
fix: Failure when finding no tests now more obvious (and style change for typing)
1 parent 40ea6d5 commit 4d483f1

File tree

1 file changed

+60
-45
lines changed

1 file changed

+60
-45
lines changed

src/jote/jote.py

Lines changed: 60 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
from stat import S_IRGRP, S_IRUSR, S_IWGRP, S_IWUSR
1313
import subprocess
1414
import sys
15-
from typing import Any, Dict, List, Optional, Tuple
15+
from typing import Any
1616

1717
from munch import DefaultMunch
1818
import yaml
@@ -97,7 +97,7 @@ def _lint(definition_filename: str) -> bool:
9797
return True
9898

9999

100-
def _get_test_input_url_prefix(test_input_string: str) -> Optional[str]:
100+
def _get_test_input_url_prefix(test_input_string: str) -> str | None:
101101
"""Gets the string's file prefix (e.g. "file://") from what's expected to be
102102
a test input string or None if there isn't one. If the prefix is "file://"
103103
this function returns "file://".
@@ -112,13 +112,13 @@ def _validate_schema(definition_filename: str) -> bool:
112112
"""Checks the Job Definition against the decoder's schema."""
113113

114114
with open(definition_filename, "rt", encoding="UTF-8") as definition_file:
115-
job_def: Optional[Dict[str, Any]] = yaml.load(
115+
job_def: dict[str, Any] | None = yaml.load(
116116
definition_file, Loader=yaml.FullLoader
117117
)
118118
assert job_def
119119

120120
# If the decoder returns something there's been an error.
121-
error: Optional[str] = decoder.validate_job_schema(job_def)
121+
error: str | None = decoder.validate_job_schema(job_def)
122122
if error:
123123
print(
124124
f'! Job definition "{definition_filename}"' " does not comply with schema"
@@ -134,13 +134,13 @@ def _validate_manifest_schema(manifest_filename: str) -> bool:
134134
"""Checks the Manifest against the decoder's schema."""
135135

136136
with open(manifest_filename, "rt", encoding="UTF-8") as definition_file:
137-
job_def: Optional[Dict[str, Any]] = yaml.load(
137+
job_def: dict[str, Any] | None = yaml.load(
138138
definition_file, Loader=yaml.FullLoader
139139
)
140140
assert job_def
141141

142142
# If the decoder returns something there's been an error.
143-
error: Optional[str] = decoder.validate_manifest_schema(job_def)
143+
error: str | None = decoder.validate_manifest_schema(job_def)
144144
if error:
145145
print(f'! Manifest "{manifest_filename}"' " does not comply with schema")
146146
print("! Full response follows:")
@@ -154,7 +154,7 @@ def _check_cwd() -> bool:
154154
"""Checks the execution directory for sanity (cwd). Here we must find
155155
a data-manager directory
156156
"""
157-
expected_directories: List[str] = [_DEFINITION_DIRECTORY, _DATA_DIRECTORY]
157+
expected_directories: list[str] = [_DEFINITION_DIRECTORY, _DATA_DIRECTORY]
158158
for expected_directory in expected_directories:
159159
if not os.path.isdir(expected_directory):
160160
print(f'! Expected directory "{expected_directory}"' " but it is not here")
@@ -167,10 +167,10 @@ def _add_grouped_test(
167167
jd_filename: str,
168168
job_collection: str,
169169
job_name: str,
170-
job: List[DefaultMunch],
171-
run_group_names: List[str],
172-
test_groups: List[DefaultMunch],
173-
grouped_job_definitions: Dict[str, Any],
170+
job: list[DefaultMunch],
171+
run_group_names: list[str],
172+
test_groups: list[DefaultMunch],
173+
grouped_job_definitions: dict[str, Any],
174174
) -> None:
175175
"""Adds a job definition to a test group for a job-definition file.
176176
@@ -180,7 +180,7 @@ def _add_grouped_test(
180180

181181
for run_group_name in run_group_names:
182182
# Find the test-group for this test
183-
test_group_definition: Optional[DefaultMunch] = None
183+
test_group_definition: DefaultMunch | None = None
184184
for test_group in test_groups:
185185
if test_group.name == run_group_name:
186186
test_group_definition = test_group
@@ -224,7 +224,7 @@ def _add_grouped_test(
224224

225225
def _load(
226226
manifest_filename: str, skip_lint: bool
227-
) -> Tuple[List[DefaultMunch], Dict[str, Any], int]:
227+
) -> tuple[list[DefaultMunch], dict[str, Any], int]:
228228
"""Loads definition files listed in the manifest
229229
and extracts the definitions that contain at least one test. The
230230
definition blocks for those that have tests (ignored or otherwise)
@@ -248,10 +248,10 @@ def _load(
248248
return [], {}, -1
249249

250250
with open(manifest_path, "r", encoding="UTF-8") as manifest_file:
251-
manifest: Dict[str, Any] = yaml.load(manifest_file, Loader=yaml.FullLoader)
252-
manifest_munch: Optional[DefaultMunch] = None
251+
manifest: dict[str, Any] = yaml.load(manifest_file, Loader=yaml.FullLoader)
252+
manifest_munch: DefaultMunch | None = None
253253
if manifest:
254-
manifest_munch = DefaultMunch.fromDict(manifest)
254+
manifest_munch = DefaultMunch.fromdict(manifest)
255255
assert manifest_munch
256256

257257
# Iterate through the named files.
@@ -265,8 +265,8 @@ def _load(
265265
# <test compose file>
266266
# - <job-definition>
267267
#
268-
job_definitions: List[DefaultMunch] = []
269-
grouped_job_definitions: Dict[str, Any] = {}
268+
job_definitions: list[DefaultMunch] = []
269+
grouped_job_definitions: dict[str, Any] = {}
270270
num_tests: int = 0
271271

272272
for jd_filename in manifest_munch["job-definition-files"]:
@@ -283,15 +283,15 @@ def _load(
283283

284284
# Load the Job definitions optionally compiling a set of 'run-groups'
285285
with open(jd_path, "r", encoding="UTF-8") as jd_file:
286-
job_def: Dict[str, Any] = yaml.load(jd_file, Loader=yaml.FullLoader)
286+
job_def: dict[str, Any] = yaml.load(jd_file, Loader=yaml.FullLoader)
287287

288288
if job_def:
289-
jd_munch: DefaultMunch = DefaultMunch.fromDict(job_def)
289+
jd_munch: DefaultMunch = DefaultMunch.fromdict(job_def)
290290

291291
jd_collection: str = jd_munch["collection"]
292292

293293
# Test groups defined in this file...
294-
test_groups: List[DefaultMunch] = []
294+
test_groups: list[DefaultMunch] = []
295295
if "test-groups" in jd_munch:
296296
for test_group in jd_munch["test-groups"]:
297297
test_groups.append(test_group)
@@ -300,7 +300,7 @@ def _load(
300300
# It goes into 'job_definitions' if it has at least one non-grouped test,
301301
# and into 'grouped_job_definitions' if it has at least one grouped test.
302302
for jd_name in jd_munch.jobs:
303-
test_run_group_names: List[str] = []
303+
test_run_group_names: list[str] = []
304304
if jd_munch.jobs[jd_name].tests:
305305
# Job has some tests
306306
num_tests += len(jd_munch.jobs[jd_name].tests)
@@ -340,7 +340,7 @@ def _load(
340340
return job_definitions, grouped_job_definitions, num_tests
341341

342342

343-
def _copy_inputs(test_inputs: List[str], project_path: str) -> bool:
343+
def _copy_inputs(test_inputs: list[str], project_path: str) -> bool:
344344
"""Copies all the test files into the test project directory."""
345345

346346
# The files are assumed to reside in the repo's 'data' directory.
@@ -448,7 +448,7 @@ def _check(
448448
assert t_compose
449449
assert isinstance(t_compose, Compose)
450450
assert output_checks
451-
assert isinstance(output_checks, List)
451+
assert isinstance(output_checks, list)
452452

453453
print("# Checking...")
454454

@@ -484,9 +484,9 @@ def _run_nextflow(
484484
command: str,
485485
project_path: str,
486486
nextflow_config_file: str,
487-
test_environment: Optional[Dict[str, str]] = None,
487+
test_environment: dict[str, str] | None = None,
488488
timeout_minutes: int = DEFAULT_TEST_TIMEOUT_M,
489-
) -> Tuple[int, str, str]:
489+
) -> tuple[int, str, str]:
490490
"""Runs nextflow in the project directory returning the exit code,
491491
stdout and stderr.
492492
"""
@@ -527,7 +527,7 @@ def _run_nextflow(
527527
# Inject an environment?
528528
# Yes if some variables are provided.
529529
# We copy the exiting env and add those provided.
530-
env: Optional[Dict[str, Any]] = None
530+
env: dict[str, Any] | None = None
531531
if test_environment:
532532
env = os.environ.copy()
533533
env.update(test_environment)
@@ -556,8 +556,8 @@ def _run_a_test(
556556
job_definition: DefaultMunch,
557557
test_group: str = "",
558558
test_group_ordinal: int = 0,
559-
test_group_environment: Optional[Dict[str, Any]] = None,
560-
) -> Tuple[Optional[Compose], TestResult]:
559+
test_group_environment: dict[str, Any] | None = None,
560+
) -> tuple[Compose | None, TestResult]:
561561
"""Runs a singe test printing a test group and non-zero optional ordinal,
562562
which is used for group test runs. If a test group is provided a valid ordinal
563563
(1..N) must also be used."""
@@ -600,7 +600,7 @@ def _run_a_test(
600600
# Render the command for this test.
601601

602602
# First extract any variables and values from 'options' (if there are any).
603-
job_variables: Dict[str, Any] = {}
603+
job_variables: dict[str, Any] = {}
604604
if job_definition.tests[job_test_name].options:
605605
for variable in job_definition.tests[job_test_name].options:
606606
job_variables[variable] = job_definition.tests[job_test_name].options[
@@ -627,7 +627,7 @@ def _run_a_test(
627627
# A list of input files (relative to this directory)
628628
# We populate this with everything we find declared as an input
629629
# (unless it's of type 'molecules' and the input looks like a molecule)
630-
input_files: List[str] = []
630+
input_files: list[str] = []
631631

632632
# Process every 'input'
633633
if job_definition.tests[job_test_name].inputs:
@@ -707,7 +707,7 @@ def _run_a_test(
707707
job_variables[variable] = ",".join(basename_values)
708708

709709
decoded_command: str = ""
710-
test_environment: Dict[str, str] = {}
710+
test_environment: dict[str, str] = {}
711711

712712
# Jote injects Job variables that are expected.
713713
# 'DM_' variables are injected by the Data Manager,
@@ -725,7 +725,7 @@ def _run_a_test(
725725
if test_group_environment and env_name in test_group_environment:
726726
# The environment variable is provided by the test group,
727727
# we don't need to go to the OS, we'll use what's provided.
728-
env_value: Optional[str] = str(test_group_environment[env_name])
728+
env_value: str | None = str(test_group_environment[env_name])
729729
else:
730730
env_value = os.environ.get(env_name, None)
731731
if env_value is None:
@@ -888,7 +888,7 @@ def _run_ungrouped_tests(
888888
collection: str,
889889
job: str,
890890
job_definition: DefaultMunch,
891-
) -> Tuple[int, int, int, int]:
891+
) -> tuple[int, int, int, int]:
892892
"""Runs the tests for a specific Job definition returning the number
893893
of tests passed, skipped (due to run-level), ignored and failed.
894894
"""
@@ -944,15 +944,16 @@ def _run_ungrouped_tests(
944944

945945
def _run_grouped_tests(
946946
args: argparse.Namespace,
947-
grouped_job_definitions: Dict[str, Any],
948-
) -> Tuple[int, int, int, int]:
947+
grouped_job_definitions: dict[str, Any],
948+
) -> tuple[int, int, int, int, int]:
949949
"""Runs grouped tests.
950950
Test provided indexed by job-definition file path.
951951
Here we run all the tests that belong to a group without resetting
952952
between the tests. At the end of each group we clean up.
953953
"""
954954

955955
# The test status, assume success
956+
tests_found: int = 0
956957
tests_passed: int = 0
957958
tests_skipped: int = 0
958959
tests_ignored: int = 0
@@ -968,7 +969,7 @@ def _run_grouped_tests(
968969
#
969970
# See '_add_grouped_test()', which is used by _load() to build the map.
970971

971-
test_result: Optional[TestResult] = None
972+
test_result: TestResult | None = None
972973
for jd_filename, grouped_tests in grouped_job_definitions.items():
973974
# The grouped definitions are indexed by JobDefinition filename
974975
# and for each there is a list of dictionaries (indexed by group name).
@@ -978,8 +979,8 @@ def _run_grouped_tests(
978979
# A specific group has been named
979980
# and this isn't it, so skip these tests.
980981
continue
981-
group_struct: Dict[str, Any] = file_run_group["test-group"]
982-
jobs: List[Tuple[str, str, DefaultMunch]] = file_run_group["jobs"]
982+
group_struct: dict[str, Any] = file_run_group["test-group"]
983+
jobs: list[tuple[str, str, DefaultMunch]] = file_run_group["jobs"]
983984

984985
# We have a run-group structure (e.g. a name and optional compose file)
985986
# and a list of jobs (job definitions), each with at least one test in
@@ -1032,6 +1033,7 @@ def _run_grouped_tests(
10321033
)
10331034
tests_failed += 1
10341035
return (
1036+
tests_found,
10351037
tests_passed,
10361038
tests_skipped,
10371039
tests_ignored,
@@ -1059,11 +1061,12 @@ def _run_grouped_tests(
10591061
# 1. Apply the group compose file (if there is one)
10601062
# 2. run the tests (in ordinal order)
10611063
# 3. stop the compose file
1062-
group_compose_file: Optional[str] = None
1064+
group_compose_file: str | None = None
10631065
for index, grouped_test in enumerate(grouped_tests):
10641066
# For each grouped test we have a test-group definition [at index 0],
10651067
# an 'ordinal' [1], 'collection' [2], 'job name' [3], 'job test' [4]
10661068
# and the 'job' definition [5]
1069+
tests_found += 1
10671070

10681071
# Start the group compose file?
10691072
if index == 0 and "compose" in grouped_test[0] and not args.dry_run:
@@ -1086,7 +1089,7 @@ def _run_grouped_tests(
10861089
break
10871090

10881091
# Does the test group define an environment?
1089-
test_group_environment: Dict[str, Any] = {}
1092+
test_group_environment: dict[str, Any] = {}
10901093
if grouped_test[0].environment:
10911094
for gt_env in grouped_test[0].environment:
10921095
key: str = list(gt_env.keys())[0]
@@ -1136,7 +1139,7 @@ def _run_grouped_tests(
11361139
if test_result == TestResult.FAILED and args.exit_on_failure:
11371140
break
11381141

1139-
return tests_passed, tests_skipped, tests_ignored, tests_failed
1142+
return tests_found, tests_passed, tests_skipped, tests_ignored, tests_failed
11401143

11411144

11421145
def _wipe() -> None:
@@ -1335,6 +1338,7 @@ def main() -> int:
13351338
arg_parser.error("Cannot use --run-groups and --test")
13361339

13371340
# Args are OK if we get here.
1341+
total_found_count: int = 0
13381342
total_passed_count: int = 0
13391343
total_skipped_count: int = 0
13401344
total_ignore_count: int = 0
@@ -1424,10 +1428,17 @@ def main() -> int:
14241428
# Success so far.
14251429
# Run grouped tests?
14261430
if grouped_job_definitions:
1427-
num_passed, num_skipped, num_ignored, num_failed = _run_grouped_tests(
1431+
(
1432+
num_found,
1433+
num_passed,
1434+
num_skipped,
1435+
num_ignored,
1436+
num_failed,
1437+
) = _run_grouped_tests(
14281438
args,
14291439
grouped_job_definitions,
14301440
)
1441+
total_found_count += num_found
14311442
total_passed_count += num_passed
14321443
total_skipped_count += num_skipped
14331444
total_ignore_count += num_ignored
@@ -1438,7 +1449,8 @@ def main() -> int:
14381449
print(" ---")
14391450
dry_run: str = "[DRY RUN]" if args.dry_run else ""
14401451
summary: str = (
1441-
f"passed={total_passed_count}"
1452+
f"found={total_found_count}"
1453+
f" passed={total_passed_count}"
14421454
f" skipped={total_skipped_count}"
14431455
f" ignored={total_ignore_count}"
14441456
f" failed={total_failed_count}"
@@ -1447,9 +1459,12 @@ def main() -> int:
14471459
if total_failed_count:
14481460
arg_parser.error(f"Done (FAILURE) {summary} {dry_run}")
14491461
failed = True
1462+
elif total_found_count == 0 and not args.allow_no_tests:
1463+
arg_parser.error(f"Done (FAILURE) {summary} (no tests were found) {dry_run}")
1464+
failed = True
14501465
elif total_passed_count == 0 and not args.allow_no_tests:
14511466
arg_parser.error(
1452-
f"Done (FAILURE) {summary}" f" (at least one test must pass)" f" {dry_run}"
1467+
f"Done (FAILURE) {summary} (at least one test must pass) {dry_run}"
14531468
)
14541469
failed = True
14551470
else:

0 commit comments

Comments
 (0)