Skip to content

Commit dbcbd27

Browse files
authored
LPID checks- ensure 'infra' query for extra_jobs has a completed task and parsed job log. (#8905)
1 parent 19fcda5 commit dbcbd27

File tree

2 files changed

+20
-14
lines changed

2 files changed

+20
-14
lines changed

tests/log_parser/test_store_failure_lines.py

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -243,6 +243,11 @@ def mock_full_log_parser(job_logs, mock_parser):
243243
try:
244244
# note: I was using parse_logs, but that is less deterministic
245245
for jl in job_logs:
246+
# if job is already parsed
247+
matching = JobLog.objects.filter(job_id=jl.job.id, name=jl.name, status__in=(1, 3))
248+
if len(matching) == 1:
249+
continue
250+
246251
store_failure_lines(jl)
247252
except:
248253
raise

treeherder/log_parser/intermittents.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import datetime
22

3-
from treeherder.model.models import Group, GroupStatus, Job, JobLog, Push
3+
from treeherder.model.models import Group, GroupStatus, Job, Push
44

55

66
def _check_and_mark_infra(current_job, job_ids, push_ids):
@@ -17,22 +17,23 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
1717
# look for all jobs in pushids matching current_job.job_type.name
1818
# if older are failing for "infra", then ensure same job is passing
1919
# if so mark as intermittent
20-
extra_jobs = JobLog.objects.filter(
21-
job__push__id__range=(push_ids[-1], push_ids[0]),
22-
job__push__repository__id=current_job.repository.id,
23-
job__job_type__name=current_job.job_type.name,
24-
job__failure_classification_id__in=[1, 6],
25-
status__in=(1, 2, 3), # ignore pending
26-
job__result__in=[
20+
extra_jobs = Job.objects.filter(
21+
push__id__range=(push_ids[-1], push_ids[0]),
22+
repository__id=current_job.repository.id,
23+
job_type__name=current_job.job_type.name,
24+
failure_classification_id__in=[1, 6],
25+
job_log__status__in=[1, 3], # ignore pending, failed
26+
state="completed", # ignore running/pending
27+
result__in=[
2728
"busted",
2829
"testfailed",
2930
"exception",
3031
"success",
3132
], # primarily ignore retry/usercancel
3233
).values(
33-
"job__id",
34-
"job__result",
35-
"job__failure_classification_id",
34+
"id",
35+
"result",
36+
"failure_classification_id",
3637
)
3738

3839
if len(extra_jobs) == 0:
@@ -43,7 +44,7 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
4344
# jobs without groups (like marionette) will still get tallied properly here
4445
extra_failed = []
4546
for job in extra_jobs:
46-
if job["job__id"] not in job_ids and job["job__result"] != "success":
47+
if job["id"] not in job_ids and job["result"] != "success":
4748
extra_failed.append(job)
4849

4950
# look for failure rate > 50% and exit early
@@ -54,8 +55,8 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
5455
# theoretically there could be many jobs here
5556
# mark extra_jobs as `intermittent_needs_classification`
5657
for job in extra_failed:
57-
if job["job__failure_classification_id"] not in [4, 8]:
58-
Job.objects.filter(id=job["job__id"]).update(failure_classification_id=8)
58+
if job["failure_classification_id"] not in [4, 8]:
59+
Job.objects.filter(id=job["id"]).update(failure_classification_id=8)
5960

6061

6162
def check_and_mark_intermittent(job_id):

0 commit comments

Comments
 (0)