Skip to content
This repository was archived by the owner on Jun 13, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
70 changes: 65 additions & 5 deletions graphql_api/tests/test_test_analytics.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,28 @@ def test_branch_filter_on_test_results(self) -> None:
)
assert res["testResults"] == {"edges": [{"node": {"name": test.name}}]}

def test_interval_filter_on_test_results(self) -> None:
repo = RepositoryFactory(author=self.owner, active=True, private=True)
test = TestFactory(repository=repo)
test2 = TestFactory(repository=repo)
_ = DailyTestRollupFactory(
test=test,
date=datetime.datetime.now() - datetime.timedelta(days=7),
repoid=repo.repoid,
branch="main",
)
_ = DailyTestRollupFactory(
test=test2,
date=datetime.datetime.now(),
repoid=repo.repoid,
branch="feature",
)
res = self.fetch_test_analytics(
repo.name,
"""testResults(filters: { interval: INTERVAL_1_DAY }) { edges { node { name } } }""",
)
assert res["testResults"] == {"edges": [{"node": {"name": test2.name}}]}

def test_flaky_filter_on_test_results(self) -> None:
repo = RepositoryFactory(author=self.owner, active=True, private=True)
test = TestFactory(repository=repo)
Expand Down Expand Up @@ -167,6 +189,7 @@ def test_failed_filter_on_test_results(self) -> None:
assert res["testResults"] == {"edges": [{"node": {"name": test2.name}}]}

def test_skipped_filter_on_test_results(self) -> None:
# note - this test guards against division by zero errors for the failure/flake rate
repo = RepositoryFactory(author=self.owner, active=True, private=True)
test = TestFactory(repository=repo)
test2 = TestFactory(repository=repo)
Expand Down Expand Up @@ -367,8 +390,8 @@ def test_last_duration_ordering_on_test_results(self) -> None:
)
assert res["testResults"] == {
"edges": [
{"node": {"name": test.name, "lastDuration": 0.0}},
{"node": {"name": test_2.name, "lastDuration": 0.0}},
{"node": {"name": test.name, "lastDuration": 2.0}},
{"node": {"name": test_2.name, "lastDuration": 3.0}},
]
}

Expand Down Expand Up @@ -402,8 +425,8 @@ def test_desc_last_duration_ordering_on_test_results(self) -> None:
)
assert res["testResults"] == {
"edges": [
{"node": {"name": test_2.name, "lastDuration": 0.0}},
{"node": {"name": test.name, "lastDuration": 0.0}},
{"node": {"name": test_2.name, "lastDuration": 3.0}},
{"node": {"name": test.name, "lastDuration": 2.0}},
]
}

Expand Down Expand Up @@ -753,6 +776,43 @@ def test_test_results_aggregates_no_history(self) -> None:
"totalSlowTestsPercentChange": None,
}

def test_test_results_aggregates_no_history_7_days(self) -> None:
repo = RepositoryFactory(
author=self.owner, active=True, private=True, branch="main"
)

for i in range(0, 7):
test = TestFactory(repository=repo)
_ = DailyTestRollupFactory(
test=test,
repoid=repo.repoid,
branch="main",
fail_count=1 if i % 3 == 0 else 0,
skip_count=1 if i % 6 == 0 else 0,
pass_count=1,
avg_duration_seconds=float(i),
last_duration_seconds=float(i),
date=datetime.date.today() - datetime.timedelta(days=i),
)

res = self.fetch_test_analytics(
repo.name,
"""testResultsAggregates(interval: INTERVAL_7_DAY) { totalDuration, slowestTestsDuration, totalFails, totalSkips, totalSlowTests, totalDurationPercentChange, slowestTestsDurationPercentChange, totalFailsPercentChange, totalSkipsPercentChange, totalSlowTestsPercentChange }""",
)

assert res["testResultsAggregates"] == {
"totalDuration": 30.0,
"totalDurationPercentChange": None,
"slowestTestsDuration": 12.0,
"slowestTestsDurationPercentChange": None,
"totalFails": 3,
"totalFailsPercentChange": None,
"totalSkips": 2,
"totalSkipsPercentChange": None,
"totalSlowTests": 1,
"totalSlowTestsPercentChange": None,
}

def test_flake_aggregates(self) -> None:
repo = RepositoryFactory(
author=self.owner, active=True, private=True, branch="main"
Expand Down Expand Up @@ -924,7 +984,7 @@ def test_flake_aggregates_7_days(self) -> None:

res = self.fetch_test_analytics(
repo.name,
"""flakeAggregates(history: INTERVAL_7_DAY) { flakeCount, flakeRate, flakeCountPercentChange, flakeRatePercentChange }""",
"""flakeAggregates(interval: INTERVAL_7_DAY) { flakeCount, flakeRate, flakeCountPercentChange, flakeRatePercentChange }""",
)

assert res["flakeAggregates"] == {
Expand Down
2 changes: 1 addition & 1 deletion graphql_api/tests/test_test_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def test_fetch_test_result_last_duration(self) -> None:
result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][
"node"
]["lastDuration"]
== 0.0
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do you know why this guy changed?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we stopped automatically setting the lastDuration value to 0 from when we stopped computing it because we thought it was singlehandedly ruining the perf of the query

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ahhhh okay yeah i remember

== 1.0
)

def test_fetch_test_result_avg_duration(self) -> None:
Expand Down
2 changes: 1 addition & 1 deletion graphql_api/types/inputs/test_results_filters.graphql
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ input TestResultsFilters {
parameter: TestResultsFilterParameter
test_suites: [String!]
flags: [String!]
history: MeasurementInterval
interval: MeasurementInterval
term: String
}

Expand Down
4 changes: 2 additions & 2 deletions graphql_api/types/test_analytics/test_analytics.graphql
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@ type TestAnalytics {
): TestResultConnection! @cost(complexity: 10, multipliers: ["first", "last"])

"Test results aggregates are analytics data totals across all tests"
testResultsAggregates(history: MeasurementInterval): TestResultsAggregates
testResultsAggregates(interval: MeasurementInterval): TestResultsAggregates

"Flake aggregates are flake totals across all tests"
flakeAggregates(history: MeasurementInterval): FlakeAggregates
flakeAggregates(interval: MeasurementInterval): FlakeAggregates
}

type TestResultConnection {
Expand Down
49 changes: 25 additions & 24 deletions graphql_api/types/test_analytics/test_analytics.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

from codecov.db import sync_to_async
from core.models import Repository
from graphql_api.helpers.connection import queryset_to_connection
from graphql_api.types.enums import OrderingDirection, TestResultsFilterParameter
from graphql_api.types.enums.enum_types import MeasurementInterval
from utils.test_results import (
Expand All @@ -28,70 +27,72 @@ async def resolve_test_results(
info: GraphQLResolveInfo,
ordering=None,
filters=None,
**kwargs,
first: int | None = None,
after: str | None = None,
last: int | None = None,
before: str | None = None,
):
parameter = (
convert_test_results_filter_parameter(filters.get("parameter"))
if filters
else None
)
history = (
convert_history_to_timedelta(filters.get("history"))
interval = (
convert_interval_to_timedelta(filters.get("interval"))
if filters
else timedelta(days=30)
)

queryset = await sync_to_async(generate_test_results)(
repoid=repository.repoid,
history=history,
branch=filters.get("branch") if filters else None,
parameter=parameter,
testsuites=filters.get("test_suites") if filters else None,
flags=filters.get("flags") if filters else None,
term=filters.get("term") if filters else None,
)

return await queryset_to_connection(
queryset,
ordering=(
(ordering.get("parameter"), "name")
(ordering.get("parameter").value, "name")
if ordering
else ("avg_duration", "name")
),
ordering_direction=(
ordering.get("direction") if ordering else OrderingDirection.DESC
),
**kwargs,
repoid=repository.repoid,
interval=interval,
first=first,
after=after,
last=last,
before=before,
branch=filters.get("branch") if filters else None,
parameter=parameter,
testsuites=filters.get("test_suites") if filters else None,
flags=filters.get("flags") if filters else None,
term=filters.get("term") if filters else None,
)

return queryset


@test_analytics_bindable.field("testResultsAggregates")
async def resolve_test_results_aggregates(
repository: Repository,
info: GraphQLResolveInfo,
history: MeasurementInterval | None = None,
interval: MeasurementInterval | None = None,
**_,
):
history = convert_history_to_timedelta(history)
return await sync_to_async(generate_test_results_aggregates)(
repoid=repository.repoid, history=history
repoid=repository.repoid, interval=convert_interval_to_timedelta(interval)
)


@test_analytics_bindable.field("flakeAggregates")
async def resolve_flake_aggregates(
repository: Repository,
info: GraphQLResolveInfo,
history: MeasurementInterval | None = None,
interval: MeasurementInterval | None = None,
**_,
):
history = convert_history_to_timedelta(history)
return await sync_to_async(generate_flake_aggregates)(
repoid=repository.repoid, history=history
repoid=repository.repoid, interval=convert_interval_to_timedelta(interval)
)


def convert_history_to_timedelta(interval: MeasurementInterval | None) -> timedelta:
def convert_interval_to_timedelta(interval: MeasurementInterval | None) -> timedelta:
if interval is None:
return timedelta(days=30)

Expand Down
56 changes: 21 additions & 35 deletions graphql_api/types/test_results/test_results.py
Original file line number Diff line number Diff line change
@@ -1,72 +1,58 @@
from datetime import datetime
from typing import TypedDict

from ariadne import ObjectType
from graphql import GraphQLResolveInfo


class TestDict(TypedDict):
name: str
updated_at: datetime
commits_where_fail: int
failure_rate: float
avg_duration: float
last_duration: float
flake_rate: float
total_fail_count: int
total_skip_count: int
total_pass_count: int
computed_name: str | None

from utils.test_results import TestResultsRow

test_result_bindable = ObjectType("TestResult")


@test_result_bindable.field("name")
def resolve_name(test: TestDict, _: GraphQLResolveInfo) -> str:
return test.get("computed_name") or test["name"].replace("\x1f", " ")
def resolve_name(test: TestResultsRow, _: GraphQLResolveInfo) -> str:
return test.computed_name or test.name.replace("\x1f", " ")


@test_result_bindable.field("updatedAt")
def resolve_updated_at(test: TestDict, _: GraphQLResolveInfo) -> datetime:
return test["updated_at"]
def resolve_updated_at(test: TestResultsRow, _: GraphQLResolveInfo) -> datetime:
return test.updated_at


@test_result_bindable.field("commitsFailed")
def resolve_commits_failed(test: TestDict, _: GraphQLResolveInfo) -> int:
return test["commits_where_fail"]
def resolve_commits_failed(test: TestResultsRow, _: GraphQLResolveInfo) -> int:
return test.commits_where_fail


@test_result_bindable.field("failureRate")
def resolve_failure_rate(test: TestDict, _: GraphQLResolveInfo) -> float:
return test["failure_rate"]
def resolve_failure_rate(test: TestResultsRow, _: GraphQLResolveInfo) -> float:
return test.failure_rate


@test_result_bindable.field("flakeRate")
def resolve_flake_rate(test: TestDict, _: GraphQLResolveInfo) -> float:
return test["flake_rate"]
def resolve_flake_rate(test: TestResultsRow, _: GraphQLResolveInfo) -> float:
return test.flake_rate


@test_result_bindable.field("avgDuration")
def resolve_avg_duration(test: TestDict, _: GraphQLResolveInfo) -> float:
return test["avg_duration"]
def resolve_avg_duration(test: TestResultsRow, _: GraphQLResolveInfo) -> float:
return test.avg_duration


@test_result_bindable.field("lastDuration")
def resolve_last_duration(test: TestDict, _: GraphQLResolveInfo) -> float:
return test["last_duration"]
def resolve_last_duration(test: TestResultsRow, _: GraphQLResolveInfo) -> float:
return test.last_duration


@test_result_bindable.field("totalFailCount")
def resolve_total_fail_count(test: TestDict, _: GraphQLResolveInfo) -> int:
return test["total_fail_count"]
def resolve_total_fail_count(test: TestResultsRow, _: GraphQLResolveInfo) -> int:
return test.total_fail_count


@test_result_bindable.field("totalSkipCount")
def resolve_total_skip_count(test: TestDict, _: GraphQLResolveInfo) -> int:
return test["total_skip_count"]
def resolve_total_skip_count(test: TestResultsRow, _: GraphQLResolveInfo) -> int:
return test.total_skip_count


@test_result_bindable.field("totalPassCount")
def resolve_total_pass_count(test: TestDict, _: GraphQLResolveInfo) -> int:
return test["total_pass_count"]
def resolve_total_pass_count(test: TestResultsRow, _: GraphQLResolveInfo) -> int:
return test.total_pass_count
Loading
Loading