Skip to content
This repository was archived by the owner on Jul 16, 2025. It is now read-only.

Commit ee2889a

Browse files
Add --max-wait-time option for label analysis (#164)
This option allows customers to decide how long they want to way for the label analysis result (e.g. the set of labels to run) before falling back to running the entire test suite. I'm adding this option because on one occasion I've seen it taking more than 40min to get a response. No doubt some processing error happened and it failed silently, or something like that, and the user's CI just waited until it too was killed (for taking too long). One brute force solution is to just have a configurable time limit. Also taking this opportunity to refactor a little bit label analysis and improve coverage.
1 parent c37f0fc commit ee2889a

File tree

2 files changed

+227
-50
lines changed

2 files changed

+227
-50
lines changed

codecov_cli/commands/labelanalysis.py

Lines changed: 46 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,16 @@
11
import logging
22
import time
3+
from typing import List
34

45
import click
56
import requests
67

78
from codecov_cli.fallbacks import CodecovOption, FallbackFieldEnum
89
from codecov_cli.runners import get_runner
9-
from codecov_cli.runners.types import LabelAnalysisRequestResult
10+
from codecov_cli.runners.types import (
11+
LabelAnalysisRequestResult,
12+
LabelAnalysisRunnerInterface,
13+
)
1014

1115
logger = logging.getLogger("codecovcli")
1216

@@ -35,13 +39,21 @@
3539
@click.option(
3640
"--runner-name", "--runner", "runner_name", help="Runner to use", default="python"
3741
)
42+
@click.option(
43+
"--max-wait-time",
44+
"max_wait_time",
45+
help="Max time (in seconds) to wait for the label analysis result before falling back to running all tests. Default is to wait forever.",
46+
default=None,
47+
type=int,
48+
)
3849
@click.pass_context
3950
def label_analysis(
4051
ctx: click.Context,
4152
token: str,
4253
head_commit_sha: str,
4354
base_commit_sha: str,
4455
runner_name: str,
56+
max_wait_time: str,
4557
):
4658
logger.debug(
4759
"Starting label analysis",
@@ -86,20 +98,8 @@ def label_analysis(
8698
"Sorry. Codecov is having problems",
8799
extra=dict(extra_log_attributes=dict(status_code=response.status_code)),
88100
)
89-
if requested_labels:
90-
logger.info(
91-
"Could not get set of tests to run. Falling back to running all collected tests."
92-
)
93-
fake_response = LabelAnalysisRequestResult(
94-
{
95-
"present_report_labels": [],
96-
"absent_labels": requested_labels,
97-
"present_diff_labels": [],
98-
"global_level_labels": [],
99-
}
100-
)
101-
return runner.process_labelanalysis_result(fake_response)
102-
raise click.ClickException("Sorry. Codecov is having problems")
101+
_fallback_to_collected_labels(requested_labels, runner)
102+
return
103103
if response.status_code >= 400:
104104
logger.warning(
105105
"Got a 4XX status code back from Codecov",
@@ -117,6 +117,7 @@ def label_analysis(
117117
eid = response.json()["external_id"]
118118
has_result = False
119119
logger.info("Label request sent. Waiting for result.")
120+
start_wait = time.monotonic()
120121
time.sleep(2)
121122
while not has_result:
122123
resp_data = requests.get(
@@ -134,17 +135,36 @@ def label_analysis(
134135
"Request had problems calculating",
135136
extra=dict(extra_log_attributes=dict(resp_json=resp_json)),
136137
)
137-
if requested_labels:
138-
logger.info("Using requested labels as tests to run")
139-
fake_response = LabelAnalysisRequestResult(
140-
{
141-
"present_report_labels": [],
142-
"absent_labels": requested_labels,
143-
"present_diff_labels": [],
144-
"global_level_labels": [],
145-
}
146-
)
147-
return runner.process_labelanalysis_result(fake_response)
138+
_fallback_to_collected_labels(
139+
collected_labels=requested_labels, runner=runner
140+
)
141+
return
142+
if max_wait_time and (time.monotonic() - start_wait) > max_wait_time:
143+
logger.error(
144+
f"Exceeded max waiting time of {max_wait_time} seconds",
145+
)
146+
_fallback_to_collected_labels(
147+
collected_labels=requested_labels, runner=runner
148+
)
148149
return
149150
logger.info("Waiting more time for result")
150151
time.sleep(5)
152+
153+
154+
def _fallback_to_collected_labels(
155+
collected_labels: List[str], runner: LabelAnalysisRunnerInterface
156+
) -> dict:
157+
logger.info("Trying to fallback on collected labels")
158+
if collected_labels:
159+
logger.info("Using collected labels as tests to run")
160+
fake_response = LabelAnalysisRequestResult(
161+
{
162+
"present_report_labels": [],
163+
"absent_labels": collected_labels,
164+
"present_diff_labels": [],
165+
"global_level_labels": [],
166+
}
167+
)
168+
return runner.process_labelanalysis_result(fake_response)
169+
logger.error("Cannot fallback to collected labels because no labels were collected")
170+
raise click.ClickException("Failed to get list of labels to run")

tests/commands/test_invoke_labelanalysis.py

Lines changed: 181 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
1-
from unittest.mock import patch
2-
31
import pytest
42
import responses
53
from click.testing import CliRunner
64
from responses import matchers
75

6+
from codecov_cli.commands.labelanalysis import _fallback_to_collected_labels
7+
from codecov_cli.commands.labelanalysis import time as labelanalysis_time
88
from codecov_cli.main import cli
9+
from codecov_cli.runners.types import LabelAnalysisRequestResult
910
from tests.factory import FakeProvider, FakeRunner, FakeVersioningSystem
1011

1112

@@ -14,6 +15,35 @@ def fake_ci_provider():
1415
return FakeProvider()
1516

1617

18+
@pytest.fixture
19+
def get_labelanalysis_deps(mocker):
20+
fake_ci_provider = FakeProvider()
21+
fake_versioning_system = FakeVersioningSystem()
22+
collected_labels = [
23+
"test_present",
24+
"test_absent",
25+
"test_in_diff",
26+
"test_global",
27+
]
28+
fake_runner = FakeRunner(collect_tests_response=collected_labels)
29+
fake_runner.process_labelanalysis_result = mocker.MagicMock()
30+
31+
mocker.patch.object(labelanalysis_time, "sleep")
32+
mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
33+
mocker.patch(
34+
"codecov_cli.main.get_versioning_system",
35+
return_value=fake_versioning_system,
36+
)
37+
mock_get_runner = mocker.patch(
38+
"codecov_cli.commands.labelanalysis.get_runner", return_value=fake_runner
39+
)
40+
return {
41+
"mock_get_runner": mock_get_runner,
42+
"fake_runner": fake_runner,
43+
"collected_labels": collected_labels,
44+
}
45+
46+
1747
class TestLabelAnalysisCommand(object):
1848
def test_labelanalysis_help(self, mocker, fake_ci_provider):
1949
mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
@@ -31,6 +61,9 @@ def test_labelanalysis_help(self, mocker, fake_ci_provider):
3161
" --head-sha TEXT Commit SHA (with 40 chars) [required]",
3262
" --base-sha TEXT Commit SHA (with 40 chars) [required]",
3363
" --runner-name, --runner TEXT Runner to use",
64+
" --max-wait-time INTEGER Max time (in seconds) to wait for the label",
65+
" analysis result before falling back to running",
66+
" all tests. Default is to wait forever.",
3467
" --help Show this message and exit.",
3568
"",
3669
]
@@ -53,22 +86,10 @@ def test_invoke_label_analysis_missing_base_sha(self, mocker, fake_ci_provider):
5386
assert result.exit_code != 0
5487
assert "Error: Missing option '--base-sha'." in result.output
5588

56-
def test_invoke_label_analysis(self, mocker):
57-
fake_ci_provider = FakeProvider()
58-
fake_versioning_system = FakeVersioningSystem()
59-
fake_runner = FakeRunner(
60-
collect_tests_response=[
61-
"test_present",
62-
"test_absent",
63-
"test_in_diff",
64-
"test_global",
65-
]
66-
)
67-
mocker.patch("codecov_cli.main.get_ci_adapter", return_value=fake_ci_provider)
68-
mocker.patch(
69-
"codecov_cli.main.get_versioning_system",
70-
return_value=fake_versioning_system,
71-
)
89+
def test_invoke_label_analysis(self, get_labelanalysis_deps, mocker):
90+
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
91+
fake_runner = get_labelanalysis_deps["fake_runner"]
92+
collected_labels = get_labelanalysis_deps["collected_labels"]
7293

7394
label_analysis_result = {
7495
"present_report_labels": ["test_present"],
@@ -77,13 +98,94 @@ def test_invoke_label_analysis(self, mocker):
7798
"global_level_labels": ["test_global"],
7899
}
79100

80-
def side_effect(result):
81-
assert result == label_analysis_result
101+
with responses.RequestsMock() as rsps:
102+
rsps.add(
103+
responses.POST,
104+
"https://api.codecov.io/labels/labels-analysis",
105+
json={"external_id": "label-analysis-request-id"},
106+
status=201,
107+
match=[
108+
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
109+
],
110+
)
111+
rsps.add(
112+
responses.GET,
113+
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
114+
json={"state": "finished", "result": label_analysis_result},
115+
)
116+
cli_runner = CliRunner()
117+
result = cli_runner.invoke(
118+
cli,
119+
["-v", "label-analysis", "--token=STATIC_TOKEN", "--base-sha=BASE_SHA"],
120+
obj={},
121+
)
122+
mock_get_runner.assert_called()
123+
fake_runner.process_labelanalysis_result.assert_called_with(
124+
label_analysis_result
125+
)
126+
print(result.output)
127+
assert result.exit_code == 0
82128

83-
fake_runner.process_labelanalysis_result = side_effect
84-
mock_get_runner = mocker.patch(
85-
"codecov_cli.commands.labelanalysis.get_runner", return_value=fake_runner
129+
def test_fallback_to_collected_labels(self, mocker):
130+
mock_runner = mocker.MagicMock()
131+
collected_labels = ["label_1", "label_2", "label_3"]
132+
fake_response = LabelAnalysisRequestResult(
133+
{
134+
"present_report_labels": [],
135+
"absent_labels": collected_labels,
136+
"present_diff_labels": [],
137+
"global_level_labels": [],
138+
}
86139
)
140+
_fallback_to_collected_labels(collected_labels, mock_runner)
141+
mock_runner.process_labelanalysis_result.assert_called_with(fake_response)
142+
143+
def test_fallback_to_collected_labels_no_labels(self, mocker):
144+
mock_runner = mocker.MagicMock()
145+
with pytest.raises(Exception) as exp:
146+
_fallback_to_collected_labels([], mock_runner)
147+
mock_runner.process_labelanalysis_result.assert_not_called()
148+
assert str(exp.value) == "Failed to get list of labels to run"
149+
150+
def test_fallback_collected_labels_covecov_500_error(
151+
self, get_labelanalysis_deps, mocker
152+
):
153+
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
154+
fake_runner = get_labelanalysis_deps["fake_runner"]
155+
collected_labels = get_labelanalysis_deps["collected_labels"]
156+
with responses.RequestsMock() as rsps:
157+
rsps.add(
158+
responses.POST,
159+
"https://api.codecov.io/labels/labels-analysis",
160+
status=500,
161+
match=[
162+
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
163+
],
164+
)
165+
cli_runner = CliRunner()
166+
result = cli_runner.invoke(
167+
cli,
168+
["-v", "label-analysis", "--token=STATIC_TOKEN", "--base-sha=BASE_SHA"],
169+
obj={},
170+
)
171+
mock_get_runner.assert_called()
172+
fake_runner.process_labelanalysis_result.assert_called_with(
173+
{
174+
"present_report_labels": [],
175+
"absent_labels": collected_labels,
176+
"present_diff_labels": [],
177+
"global_level_labels": [],
178+
}
179+
)
180+
print(result.output)
181+
assert result.exit_code == 0
182+
183+
def test_fallback_collected_labels_codecov_error_processing_label_analysis(
184+
self, get_labelanalysis_deps, mocker
185+
):
186+
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
187+
fake_runner = get_labelanalysis_deps["fake_runner"]
188+
collected_labels = get_labelanalysis_deps["collected_labels"]
87189

88190
with responses.RequestsMock() as rsps:
89191
rsps.add(
@@ -98,7 +200,7 @@ def side_effect(result):
98200
rsps.add(
99201
responses.GET,
100202
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
101-
json={"state": "finished", "result": label_analysis_result},
203+
json={"state": "error"},
102204
)
103205
cli_runner = CliRunner()
104206
result = cli_runner.invoke(
@@ -107,5 +209,60 @@ def side_effect(result):
107209
obj={},
108210
)
109211
mock_get_runner.assert_called()
212+
fake_runner.process_labelanalysis_result.assert_called_with(
213+
{
214+
"present_report_labels": [],
215+
"absent_labels": collected_labels,
216+
"present_diff_labels": [],
217+
"global_level_labels": [],
218+
}
219+
)
110220
print(result.output)
111221
assert result.exit_code == 0
222+
223+
def test_fallback_collected_labels_codecov_max_wait_time_exceeded(
224+
self, get_labelanalysis_deps, mocker
225+
):
226+
mock_get_runner = get_labelanalysis_deps["mock_get_runner"]
227+
fake_runner = get_labelanalysis_deps["fake_runner"]
228+
collected_labels = get_labelanalysis_deps["collected_labels"]
229+
mocker.patch.object(labelanalysis_time, "monotonic", side_effect=[0, 6])
230+
231+
with responses.RequestsMock() as rsps:
232+
rsps.add(
233+
responses.POST,
234+
"https://api.codecov.io/labels/labels-analysis",
235+
json={"external_id": "label-analysis-request-id"},
236+
status=201,
237+
match=[
238+
matchers.header_matcher({"Authorization": "Repotoken STATIC_TOKEN"})
239+
],
240+
)
241+
rsps.add(
242+
responses.GET,
243+
"https://api.codecov.io/labels/labels-analysis/label-analysis-request-id",
244+
json={"state": "processing"},
245+
)
246+
cli_runner = CliRunner()
247+
result = cli_runner.invoke(
248+
cli,
249+
[
250+
"-v",
251+
"label-analysis",
252+
"--token=STATIC_TOKEN",
253+
"--base-sha=BASE_SHA",
254+
"--max-wait-time=5",
255+
],
256+
obj={},
257+
)
258+
print(result)
259+
assert result.exit_code == 0
260+
mock_get_runner.assert_called()
261+
fake_runner.process_labelanalysis_result.assert_called_with(
262+
{
263+
"present_report_labels": [],
264+
"absent_labels": collected_labels,
265+
"present_diff_labels": [],
266+
"global_level_labels": [],
267+
}
268+
)

0 commit comments

Comments
 (0)