Skip to content
This repository was archived by the owner on Jul 16, 2025. It is now read-only.

Commit 4be3f77

Browse files
Update --dry-run output (#272)
related to codecov/engineering-team#555 These changes update the output of `--dry-run` option so that: 1. Users can get a list of tests that are being skipped 2. Users have the option to parse the output from JSON > ⚠️ > > The JSON output doesn't include the runner options in the tests to run list. > Instead there's the 'runner_options' key with that info. > This is to make sharding the list of tests easier It's considerably more output, but given that we don't have clear guidelines on the best way to do things I think it's better to provide all options. Eventually adoption will converge to some way or another and we prioritize that.
1 parent 6020fc0 commit 4be3f77

File tree

2 files changed

+96
-24
lines changed

2 files changed

+96
-24
lines changed

codecov_cli/commands/labelanalysis.py

Lines changed: 54 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import json
12
import logging
23
import pathlib
34
import time
@@ -55,13 +56,23 @@
5556
@click.option(
5657
"--dry-run",
5758
"dry_run",
58-
help='Print list of tests to run and options that need to be added to the test runner as a space-separated list to stdout. Format is ATS_TESTS_TO_RUN="<options> <test_1> <test_2> ... <test_n>"',
59+
help=(
60+
"Print list of tests to run AND tests skipped (and options that need to be added to the test runner) to stdout. "
61+
+ "Also prints the same information in JSON format. "
62+
+ "JSON will have keys 'ats_tests_to_run', 'ats_tests_to_skip' and 'runner_options'. "
63+
+ "List of tests to run is prefixed with ATS_TESTS_TO_RUN= "
64+
+ "List of tests to skip is prefixed with ATS_TESTS_TO_SKIP="
65+
),
5966
is_flag=True,
6067
)
6168
@click.option(
6269
"--dry-run-output-path",
6370
"dry_run_output_path",
64-
help="Prints the dry-run list into dry_run_output_path (in addition to stdout)",
71+
help=(
72+
"Prints the dry-run list (ATS_TESTS_TO_RUN) into dry_run_output_path (in addition to stdout)\n"
73+
+ "AND prints ATS_TESTS_TO_SKIP into dry_run_output_path_skipped\n"
74+
+ "AND prints dry-run JSON output into dry_run_output_path.json"
75+
),
6576
type=pathlib.Path,
6677
default=None,
6778
)
@@ -313,18 +324,50 @@ def _dry_run_output(
313324
runner: LabelAnalysisRunnerInterface,
314325
dry_run_output_path: Optional[pathlib.Path],
315326
):
316-
labels_to_run = list(
317-
set(
318-
result.absent_labels
319-
+ result.global_level_labels
320-
+ result.present_diff_labels
321-
)
327+
labels_to_run = set(
328+
result.absent_labels + result.global_level_labels + result.present_diff_labels
329+
)
330+
labels_skipped = set(result.present_report_labels) - labels_to_run
331+
# If the test label can contain spaces and dashes the test runner might
332+
# interpret it as an option and not a label
333+
# So we wrap it in doublequotes just to be extra sure
334+
labels_run_wrapped_double_quotes = sorted(
335+
map(lambda l: '"' + l + '"', labels_to_run)
336+
)
337+
labels_skip_wrapped_double_quotes = sorted(
338+
map(lambda l: '"' + l + '"', labels_skipped)
339+
)
340+
341+
output_as_dict = dict(
342+
runner_options=runner.dry_run_runner_options,
343+
ats_tests_to_run=labels_run_wrapped_double_quotes,
344+
ats_tests_to_skip=labels_skip_wrapped_double_quotes,
322345
)
323-
output = runner.dry_run_runner_options + sorted(labels_to_run)
324346
if dry_run_output_path is not None:
325347
with open(dry_run_output_path, "w") as fd:
326-
fd.write(" ".join(output) + "\n")
327-
click.echo(f"ATS_TESTS_TO_RUN=\"{' '.join(output)}\"")
348+
fd.write(
349+
" ".join(
350+
runner.dry_run_runner_options + labels_run_wrapped_double_quotes
351+
)
352+
+ "\n"
353+
)
354+
with open(str(dry_run_output_path) + "_skipped", "w") as fd:
355+
fd.write(
356+
" ".join(
357+
runner.dry_run_runner_options + labels_skip_wrapped_double_quotes
358+
)
359+
+ "\n"
360+
)
361+
with open(str(dry_run_output_path) + ".json", "w") as fd:
362+
fd.write(json.dumps(output_as_dict) + "\n")
363+
364+
click.echo(json.dumps(output_as_dict))
365+
click.echo(
366+
f"ATS_TESTS_TO_RUN={' '.join(runner.dry_run_runner_options + labels_run_wrapped_double_quotes)}"
367+
)
368+
click.echo(
369+
f"ATS_TESTS_TO_SKIP={' '.join(runner.dry_run_runner_options + labels_skip_wrapped_double_quotes)}"
370+
)
328371

329372

330373
def _fallback_to_collected_labels(

tests/commands/test_invoke_labelanalysis.py

Lines changed: 42 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -123,13 +123,19 @@ def test_labelanalysis_help(self, mocker, fake_ci_provider):
123123
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
124124
" analysis result before falling back to running",
125125
" all tests. Default is to wait forever.",
126-
" --dry-run Print list of tests to run and options that need",
127-
" to be added to the test runner as a space-",
128-
" separated list to stdout. Format is",
129-
' ATS_TESTS_TO_RUN="<options> <test_1> <test_2>',
130-
' ... <test_n>"',
131-
" --dry-run-output-path PATH Prints the dry-run list into dry_run_output_path",
132-
" (in addition to stdout)",
126+
" --dry-run Print list of tests to run AND tests skipped",
127+
" (and options that need to be added to the test",
128+
" runner) to stdout. Also prints the same",
129+
" information in JSON format. JSON will have keys",
130+
" 'ats_tests_to_run', 'ats_tests_to_skip' and",
131+
" 'runner_options'. List of tests to run is",
132+
" prefixed with ATS_TESTS_TO_RUN= List of tests to",
133+
" skip is prefixed with ATS_TESTS_TO_SKIP=",
134+
" --dry-run-output-path PATH Prints the dry-run list (ATS_TESTS_TO_RUN) into",
135+
" dry_run_output_path (in addition to stdout) AND",
136+
" prints ATS_TESTS_TO_SKIP into",
137+
" dry_run_output_path_skipped AND prints dry-run",
138+
" JSON output into dry_run_output_path.json",
133139
" -h, --help Show this message and exit.",
134140
"",
135141
]
@@ -231,7 +237,7 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
231237
fake_runner = get_labelanalysis_deps["fake_runner"]
232238

233239
label_analysis_result = {
234-
"present_report_labels": ["test_present"],
240+
"present_report_labels": ["test_present", "test_in_diff", "test_global"],
235241
"absent_labels": ["test_absent"],
236242
"present_diff_labels": ["test_in_diff"],
237243
"global_level_labels": ["test_global"],
@@ -278,9 +284,14 @@ def test_invoke_label_analysis_dry_run(self, get_labelanalysis_deps, mocker):
278284
print(result.output)
279285
assert result.exit_code == 0
280286
assert (
281-
'ATS_TESTS_TO_RUN="--labels test_absent test_global test_in_diff'
287+
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}'
282288
in result.output
283289
)
290+
assert (
291+
'ATS_TESTS_TO_RUN=--labels "test_absent" "test_global" "test_in_diff"'
292+
in result.output
293+
)
294+
assert 'ATS_TESTS_TO_SKIP=--labels "test_present"' in result.output
284295

285296
def test_invoke_label_analysis_dry_run_with_output_path(
286297
self, get_labelanalysis_deps, mocker
@@ -289,7 +300,7 @@ def test_invoke_label_analysis_dry_run_with_output_path(
289300
fake_runner = get_labelanalysis_deps["fake_runner"]
290301

291302
label_analysis_result = {
292-
"present_report_labels": ["test_present"],
303+
"present_report_labels": ["test_present", "test_in_diff", "test_global"],
293304
"absent_labels": ["test_absent"],
294305
"present_diff_labels": ["test_in_diff"],
295306
"global_level_labels": ["test_global"],
@@ -332,20 +343,38 @@ def test_invoke_label_analysis_dry_run_with_output_path(
332343
],
333344
obj={},
334345
)
335-
labels_file = Path("ats_output_path")
346+
print(result)
347+
print(result.output)
348+
ats_output_path = "ats_output_path"
349+
labels_file = Path(ats_output_path)
350+
skip_labels_file = Path(ats_output_path + "_skipped")
351+
json_output = Path(ats_output_path + ".json")
336352
assert labels_file.exists() and labels_file.is_file()
353+
assert skip_labels_file.exists() and skip_labels_file.is_file()
354+
assert json_output.exists() and json_output.is_file()
337355
with open(labels_file, "r") as fd:
338356
assert fd.readlines() == [
339-
"--labels test_absent test_global test_in_diff\n"
357+
'--labels "test_absent" "test_global" "test_in_diff"\n'
358+
]
359+
with open(skip_labels_file, "r") as fd:
360+
assert fd.readlines() == ['--labels "test_present"\n']
361+
with open(json_output, "r") as fd:
362+
assert fd.readlines() == [
363+
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}\n'
340364
]
341365
mock_get_runner.assert_called()
342366
fake_runner.process_labelanalysis_result.assert_not_called()
343367
print(result.output)
344368
assert result.exit_code == 0
345369
assert (
346-
'ATS_TESTS_TO_RUN="--labels test_absent test_global test_in_diff'
370+
'{"runner_options": ["--labels"], "ats_tests_to_run": ["\\"test_absent\\"", "\\"test_global\\"", "\\"test_in_diff\\""], "ats_tests_to_skip": ["\\"test_present\\""]}'
371+
in result.output
372+
)
373+
assert (
374+
'ATS_TESTS_TO_RUN=--labels "test_absent" "test_global" "test_in_diff"'
347375
in result.output
348376
)
377+
assert 'ATS_TESTS_TO_SKIP=--labels "test_present"' in result.output
349378

350379
def test_fallback_to_collected_labels(self, mocker):
351380
mock_runner = mocker.MagicMock()

0 commit comments

Comments
 (0)