Skip to content

Commit 089d9ef

Browse files
authored
Handle disfunctional issue handlers properly in triage (#4476)
### Motivation Fuchsia testcases were referencing monorail, for which the API was deprecated. This implied on the cronjob failing prematurely, and no issues being filed on google3. This PR adds extra logging, so we can troubleshoot this sort of issue more easily. Also, under the triage cronjob, the ValueError exception is handled, to handle gracefully the case where some testcase references an issue finder that is not present under the issue tracker config.
1 parent ca619ef commit 089d9ef

File tree

3 files changed

+39
-3
lines changed

3 files changed

+39
-3
lines changed

src/clusterfuzz/_internal/cron/grouper.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -426,6 +426,10 @@ def group_testcases():
426426
try:
427427
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
428428
testcase)
429+
if issue_tracker:
430+
logs.info(
431+
f'Running grouping with issue tracker {issue_tracker.project}, '
432+
f' for testcase {testcase_id}')
429433
except ValueError:
430434
logs.error('Couldn\'t get issue tracker for issue.')
431435
del testcase_map[testcase_id]

src/clusterfuzz/_internal/cron/triage.py

Lines changed: 25 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -353,37 +353,50 @@ def main():
353353
testcase = data_handler.get_testcase_by_id(testcase_id)
354354
except errors.InvalidTestcaseError:
355355
# Already deleted.
356+
logs.info(
357+
f'Skipping testcase {testcase_id}, since it was already deleted.')
356358
continue
357359

358360
critical_tasks_completed = data_handler.critical_tasks_completed(testcase)
359361

360362
# Skip if testcase's job is removed.
361363
if testcase.job_type not in all_jobs:
364+
logs.info(f'Skipping testcase {testcase_id}, since its job was removed '
365+
f' ({testcase.job_type})')
362366
continue
363367

364368
# Skip if testcase's job is in exclusions list.
365369
if testcase.job_type in excluded_jobs:
370+
logs.info(f'Skipping testcase {testcase_id}, since its job is in the'
371+
f' exclusion list ({testcase.job_type})')
366372
continue
367373

368374
# Emmit the metric for testcases that should be triaged.
369375
_emit_untriaged_testcase_age_metric(critical_tasks_completed, testcase)
370376

371377
# Skip if we are running progression task at this time.
372378
if testcase.get_metadata('progression_pending'):
379+
logs.info(f'Skipping testcase {testcase_id}, progression pending')
373380
continue
374381

375382
# If the testcase has a bug filed already, no triage is needed.
376383
if _is_bug_filed(testcase):
384+
logs.info(
385+
f'Skipping testcase {testcase_id}, since a bug was already filed.')
377386
continue
378387

379388
# Check if the crash is important, i.e. it is either a reproducible crash
380389
# or an unreproducible crash happening frequently.
381390
if not _is_crash_important(testcase):
391+
logs.info(
392+
f'Skipping testcase {testcase_id}, since the crash is not important.')
382393
continue
383394

384395
# Require that all tasks like minimizaton, regression testing, etc have
385396
# finished.
386397
if not critical_tasks_completed:
398+
logs.info(
399+
f'Skipping testcase {testcase_id}, critical tasks still pending.')
387400
continue
388401

389402
# For testcases that are not part of a group, wait an additional time to
@@ -398,29 +411,40 @@ def main():
398411
# metadata works well.
399412
if not testcase.group_id and not dates.time_has_expired(
400413
testcase.timestamp, hours=data_types.MIN_ELAPSED_TIME_SINCE_REPORT):
414+
logs.info(f'Skipping testcase {testcase_id}, pending grouping.')
401415
continue
402416

403417
if not testcase.get_metadata('ran_grouper'):
404418
# Testcase should be considered by the grouper first before filing.
419+
logs.info(f'Skipping testcase {testcase_id}, pending grouping.')
405420
continue
406421

407422
# If this project does not have an associated issue tracker, we cannot
408423
# file this crash anywhere.
409-
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(testcase)
424+
try:
425+
issue_tracker = issue_tracker_utils.get_issue_tracker_for_testcase(
426+
testcase)
427+
except ValueError:
428+
issue_tracker = None
410429
if not issue_tracker:
430+
logs.info(f'No issue tracker detected for testcase {testcase_id}, '
431+
'publishing message.')
411432
issue_filer.notify_issue_update(testcase, 'new')
412433
continue
413434

414435
# If there are similar issues to this test case already filed or recently
415436
# closed, skip filing a duplicate bug.
416437
if _check_and_update_similar_bug(testcase, issue_tracker):
438+
logs.info(f'Skipping testcase {testcase_id}, since a similar bug'
439+
' was already filed.')
417440
continue
418441

419442
# Clean up old triage messages that would be not applicable now.
420443
testcase.delete_metadata(TRIAGE_MESSAGE_KEY, update_testcase=False)
421444

422445
# File the bug first and then create filed bug metadata.
423446
if not _file_issue(testcase, issue_tracker, throttler):
447+
logs.info(f'Issue filing failed for testcase id {testcase_id}')
424448
continue
425449

426450
_create_filed_bug_metadata(testcase)

src/clusterfuzz/_internal/issue_management/issue_tracker_utils.py

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
from clusterfuzz._internal.issue_management import issue_tracker_policy
2222
from clusterfuzz._internal.issue_management import jira
2323
from clusterfuzz._internal.issue_management import monorail
24+
from clusterfuzz._internal.metrics import logs
2425
from clusterfuzz._internal.system import environment
2526

2627
_ISSUE_TRACKER_CACHE_CAPACITY = 8
@@ -43,7 +44,10 @@ def _get_issue_tracker_project_name(testcase=None):
4344
"""Return issue tracker project name given a testcase or default."""
4445
from clusterfuzz._internal.datastore import data_handler
4546
job_type = testcase.job_type if testcase else None
46-
return data_handler.get_issue_tracker_name(job_type)
47+
issue_tracker_name = data_handler.get_issue_tracker_name(job_type)
48+
logs.info(
49+
f'For testcase {testcase.key}, using issue tracker {issue_tracker_name}')
50+
return issue_tracker_name
4751

4852

4953
def request_or_task_cache(func):
@@ -66,8 +70,12 @@ def get_issue_tracker(project_name=None):
6670
issue_project_config = issue_tracker_config.get(project_name)
6771
if not issue_project_config:
6872
raise ValueError('Issue tracker for {} does not exist'.format(project_name))
73+
logs.info(f'Issue tracker = {project_name}, issue tracker config = '
74+
f'{issue_project_config}')
6975

70-
constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_project_config['type'])
76+
issue_tracker_type = issue_project_config['type']
77+
constructor = _ISSUE_TRACKER_CONSTRUCTORS.get(issue_tracker_type)
78+
logs.info(f'Using the issue tracker constructor for {issue_tracker_type}')
7179
if not constructor:
7280
raise ValueError('Invalid issue tracker type: ' +
7381
issue_project_config['type'])

0 commit comments

Comments
 (0)