diff --git a/graphql_api/tests/test_test_result.py b/graphql_api/tests/test_test_result.py index 0970030084..93e974e4fa 100644 --- a/graphql_api/tests/test_test_result.py +++ b/graphql_api/tests/test_test_result.py @@ -50,34 +50,6 @@ def setUp(self): flaky_fail_count=1, ) - def test_fetch_test_result_name(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { - name - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][ - 0 - ]["node"]["name"] == self.test.name.replace("\x1f", " ") - def test_fetch_test_result_name_with_computed_name(self) -> None: self.test.computed_name = "Computed Name" self.test.save() @@ -92,281 +64,14 @@ def test_fetch_test_result_name_with_computed_name(self) -> None: edges { node { name - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["name"] - == self.test.computed_name - ) - - def test_fetch_test_result_updated_at(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { updatedAt - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["updatedAt"] - == datetime.now(UTC).isoformat() - ) - - def test_fetch_test_result_commits_failed(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { commitsFailed - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["commitsFailed"] - == 3 - ) - - def test_fetch_test_result_failure_rate(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { failureRate - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["failureRate"] - == 0.75 - ) - - def test_fetch_test_result_last_duration(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { lastDuration - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["lastDuration"] - == 1.0 - ) - - def test_fetch_test_result_avg_duration(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { avgDuration - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][ - 0 - ]["node"]["avgDuration"] == (5.6 / 3) - - def test_fetch_test_result_total_fail_count(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { totalFailCount - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["totalFailCount"] - == 9 - ) - - def test_fetch_test_result_total_skip_count(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { totalSkipCount - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["totalSkipCount"] - == 6 - ) - - def test_fetch_test_result_total_pass_count(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { totalPassCount - } - } - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["totalPassCount"] - == 3 - ) - - def test_fetch_test_result_total_flaky_fail_count(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResults { - edges { - node { totalFlakyFailCount } } @@ -381,9 +86,17 @@ def test_fetch_test_result_total_flaky_fail_count(self) -> None: result = self.gql_request(query, owner=self.owner) assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][0][ - "node" - ]["totalFlakyFailCount"] - == 2 - ) + assert result["owner"]["repository"]["testAnalytics"]["testResults"]["edges"][ + 0 + ]["node"] == { + "name": self.test.computed_name, + "updatedAt": datetime.now(UTC).isoformat(), + "commitsFailed": 3, + "failureRate": 0.75, + "lastDuration": 1.0, + "avgDuration": (5.6 / 3), + "totalFailCount": 9, + "totalSkipCount": 6, + "totalPassCount": 3, + "totalFlakyFailCount": 2, + } diff --git a/graphql_api/tests/test_test_results_headers.py b/graphql_api/tests/test_test_results_headers.py index ceeb62a76a..41adc4155a 100644 --- a/graphql_api/tests/test_test_results_headers.py +++ b/graphql_api/tests/test_test_results_headers.py @@ -29,7 +29,7 @@ def setUp(self): branch="main", ) - def test_fetch_test_result_total_runtime(self) -> None: + def test_fetch_test_result_aggregates(self) -> None: query = """ query { owner(username: "%s") { @@ -38,119 +38,15 @@ def test_fetch_test_result_total_runtime(self) -> None: testAnalytics { testResultsAggregates { totalDuration - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"][ - "totalDuration" - ] - == 465.0 - ) - - def test_fetch_test_result_slowest_tests_runtime(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResultsAggregates { slowestTestsDuration - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"][ - "slowestTestsDuration" - ] - == 30.0 - ) - - def test_fetch_test_result_failed_tests(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResultsAggregates { totalFails - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"][ - "totalFails" - ] - == 30 - ) - - def test_fetch_test_result_skipped_tests(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResultsAggregates { totalSkips - } - } - } - } - } - } - """ % (self.owner.username, self.repository.name) - - result = self.gql_request(query, owner=self.owner) - - assert "errors" not in result - assert ( - result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"][ - "totalSkips" - ] - == 30 - ) - - def test_fetch_test_result_slow_tests(self) -> None: - query = """ - query { - owner(username: "%s") { - repository(name: "%s") { - ... on Repository { - testAnalytics { - testResultsAggregates { totalSlowTests } } } } - } + } } """ % (self.owner.username, self.repository.name) @@ -158,8 +54,16 @@ def test_fetch_test_result_slow_tests(self) -> None: assert "errors" not in result assert ( - result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"][ - "totalSlowTests" - ] - == 1 + result["owner"]["repository"]["testAnalytics"] is not None + and result["owner"]["repository"]["testAnalytics"]["testResultsAggregates"] + is not None ) + assert result["owner"]["repository"]["testAnalytics"][ + "testResultsAggregates" + ] == { + "totalDuration": 465.0, + "slowestTestsDuration": 30.0, + "totalFails": 30, + "totalSkips": 30, + "totalSlowTests": 1, + } diff --git a/graphql_api/types/flake_aggregates/flake_aggregates.py b/graphql_api/types/flake_aggregates/flake_aggregates.py index 0bb536fff5..9efd0c9607 100644 --- a/graphql_api/types/flake_aggregates/flake_aggregates.py +++ b/graphql_api/types/flake_aggregates/flake_aggregates.py @@ -1,37 +1,30 @@ -from typing import TypedDict - from ariadne import ObjectType from graphql import GraphQLResolveInfo -flake_aggregates_bindable = ObjectType("FlakeAggregates") - +from utils.test_results import FlakeAggregates -class FlakeAggregate(TypedDict): - flake_count: int - flake_count_percent_change: float | None - flake_rate: float - flake_rate_percent_change: float | None +flake_aggregates_bindable = ObjectType("FlakeAggregates") @flake_aggregates_bindable.field("flakeCount") -def resolve_flake_count(obj: FlakeAggregate, _: GraphQLResolveInfo) -> int: - return obj["flake_count"] +def resolve_flake_count(obj: FlakeAggregates, _: GraphQLResolveInfo) -> int: + return obj.flake_count @flake_aggregates_bindable.field("flakeCountPercentChange") def resolve_flake_count_percent_change( - obj: FlakeAggregate, _: GraphQLResolveInfo + obj: FlakeAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("flake_count_percent_change") + return obj.flake_count_percent_change @flake_aggregates_bindable.field("flakeRate") -def resolve_flake_rate(obj: FlakeAggregate, _: GraphQLResolveInfo) -> float: - return obj["flake_rate"] +def resolve_flake_rate(obj: FlakeAggregates, _: GraphQLResolveInfo) -> float: + return obj.flake_rate @flake_aggregates_bindable.field("flakeRatePercentChange") def resolve_flake_rate_percent_change( - obj: FlakeAggregate, _: GraphQLResolveInfo + obj: FlakeAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("flake_rate_percent_change") + return obj.flake_rate_percent_change diff --git a/graphql_api/types/test_analytics/test_analytics.py b/graphql_api/types/test_analytics/test_analytics.py index 7625d3225a..089bbf4d3e 100644 --- a/graphql_api/types/test_analytics/test_analytics.py +++ b/graphql_api/types/test_analytics/test_analytics.py @@ -1,15 +1,21 @@ import logging -from datetime import timedelta +from typing import Any, TypedDict from ariadne import ObjectType from graphql.type.definition import GraphQLResolveInfo from codecov.db import sync_to_async from core.models import Repository -from graphql_api.types.enums import OrderingDirection, TestResultsFilterParameter +from graphql_api.types.enums import ( + OrderingDirection, + TestResultsFilterParameter, + TestResultsOrderingParameter, +) from graphql_api.types.enums.enum_types import MeasurementInterval from utils.test_results import ( - GENERATE_TEST_RESULT_PARAM, + FlakeAggregates, + TestResultConnection, + TestResultsAggregates, generate_flake_aggregates, generate_test_results, generate_test_results_aggregates, @@ -19,6 +25,21 @@ log = logging.getLogger(__name__) + +class TestResultsOrdering(TypedDict): + parameter: TestResultsOrderingParameter + direction: OrderingDirection + + +class TestResultsFilters(TypedDict): + parameter: TestResultsFilterParameter | None + interval: MeasurementInterval + branch: str | None + test_suites: list[str] | None + flags: list[str] | None + term: str | None + + # Bindings for GraphQL types test_analytics_bindable: ObjectType = ObjectType("TestAnalytics") @@ -27,37 +48,32 @@ async def resolve_test_results( repository: Repository, info: GraphQLResolveInfo, - ordering=None, - filters=None, + ordering: TestResultsOrdering | None = None, + filters: TestResultsFilters | None = None, first: int | None = None, after: str | None = None, last: int | None = None, before: str | None = None, -): - parameter = ( - convert_test_results_filter_parameter(filters.get("parameter")) - if filters - else None - ) - interval = ( - convert_interval_to_timedelta(filters.get("interval")) - if filters - else timedelta(days=30) - ) - +) -> TestResultConnection: queryset = await sync_to_async(generate_test_results)( - ordering=ordering.get("parameter").value if ordering else "avg_duration", - ordering_direction=( - ordering.get("direction").name if ordering else OrderingDirection.DESC.name - ), + ordering=ordering.get("parameter", TestResultsOrderingParameter.AVG_DURATION) + if ordering + else TestResultsOrderingParameter.AVG_DURATION, + ordering_direction=ordering.get("direction", OrderingDirection.DESC) + if ordering + else OrderingDirection.DESC, repoid=repository.repoid, - interval=interval, + measurement_interval=filters.get( + "interval", MeasurementInterval.INTERVAL_30_DAY + ) + if filters + else MeasurementInterval.INTERVAL_30_DAY, first=first, after=after, last=last, before=before, branch=filters.get("branch") if filters else None, - parameter=parameter, + parameter=filters.get("parameter") if filters else None, testsuites=filters.get("test_suites") if filters else None, flags=filters.get("flags") if filters else None, term=filters.get("term") if filters else None, @@ -71,10 +87,10 @@ async def resolve_test_results_aggregates( repository: Repository, info: GraphQLResolveInfo, interval: MeasurementInterval | None = None, - **_, -): + **_: Any, +) -> TestResultsAggregates | None: return await sync_to_async(generate_test_results_aggregates)( - repoid=repository.repoid, interval=convert_interval_to_timedelta(interval) + repoid=repository.repoid, interval=interval.value if interval else 30 ) @@ -83,52 +99,22 @@ async def resolve_flake_aggregates( repository: Repository, info: GraphQLResolveInfo, interval: MeasurementInterval | None = None, - **_, -): + **_: Any, +) -> FlakeAggregates | None: return await sync_to_async(generate_flake_aggregates)( - repoid=repository.repoid, interval=convert_interval_to_timedelta(interval) + repoid=repository.repoid, interval=interval.value if interval else 30 ) @test_analytics_bindable.field("testSuites") async def resolve_test_suites( - repository: Repository, info: GraphQLResolveInfo, term: str | None = None, **_ -): + repository: Repository, info: GraphQLResolveInfo, term: str | None = None, **_: Any +) -> list[str]: return await sync_to_async(get_test_suites)(repository.repoid, term) @test_analytics_bindable.field("flags") async def resolve_flags( - repository: Repository, info: GraphQLResolveInfo, term: str | None = None, **_ -): + repository: Repository, info: GraphQLResolveInfo, term: str | None = None, **_: Any +) -> list[str]: return await sync_to_async(get_flags)(repository.repoid, term) - - -def convert_interval_to_timedelta(interval: MeasurementInterval | None) -> timedelta: - if interval is None: - return timedelta(days=30) - - match interval: - case MeasurementInterval.INTERVAL_1_DAY: - return timedelta(days=1) - case MeasurementInterval.INTERVAL_7_DAY: - return timedelta(days=7) - case MeasurementInterval.INTERVAL_30_DAY: - return timedelta(days=30) - - -def convert_test_results_filter_parameter( - parameter: TestResultsFilterParameter | None, -) -> GENERATE_TEST_RESULT_PARAM | None: - if parameter is None: - return None - - match parameter: - case TestResultsFilterParameter.FLAKY_TESTS: - return GENERATE_TEST_RESULT_PARAM.FLAKY - case TestResultsFilterParameter.FAILED_TESTS: - return GENERATE_TEST_RESULT_PARAM.FAILED - case TestResultsFilterParameter.SLOWEST_TESTS: - return GENERATE_TEST_RESULT_PARAM.SLOWEST - case TestResultsFilterParameter.SKIPPED_TESTS: - return GENERATE_TEST_RESULT_PARAM.SKIPPED diff --git a/graphql_api/types/test_results_aggregates/test_results_aggregates.py b/graphql_api/types/test_results_aggregates/test_results_aggregates.py index 93b425f94e..7920af95a0 100644 --- a/graphql_api/types/test_results_aggregates/test_results_aggregates.py +++ b/graphql_api/types/test_results_aggregates/test_results_aggregates.py @@ -1,81 +1,68 @@ -from typing import TypedDict - from ariadne import ObjectType from graphql import GraphQLResolveInfo -test_results_aggregates_bindable = ObjectType("TestResultsAggregates") - +from utils.test_results import TestResultsAggregates -class TestResultsAggregates(TypedDict): - total_duration: float - total_duration_percent_change: float | None - slowest_tests_duration: float - slowest_tests_duration_percent_change: float | None - total_slow_tests: int - total_slow_tests_percent_change: float | None - fails: int - fails_percent_change: float | None - skips: int - skips_percent_change: float | None +test_results_aggregates_bindable = ObjectType("TestResultsAggregates") @test_results_aggregates_bindable.field("totalDuration") def resolve_total_duration(obj: TestResultsAggregates, _: GraphQLResolveInfo) -> float: - return obj["total_duration"] + return obj.total_duration @test_results_aggregates_bindable.field("totalDurationPercentChange") def resolve_total_duration_percent_change( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("total_duration_percent_change") + return obj.total_duration_percent_change @test_results_aggregates_bindable.field("slowestTestsDuration") def resolve_slowest_tests_duration( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float: - return obj["slowest_tests_duration"] + return obj.slowest_tests_duration @test_results_aggregates_bindable.field("slowestTestsDurationPercentChange") def resolve_slowest_tests_duration_percent_change( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("slowest_tests_duration_percent_change") + return obj.slowest_tests_duration_percent_change @test_results_aggregates_bindable.field("totalSlowTests") def resolve_total_slow_tests(obj: TestResultsAggregates, _: GraphQLResolveInfo) -> int: - return obj["total_slow_tests"] + return obj.total_slow_tests @test_results_aggregates_bindable.field("totalSlowTestsPercentChange") def resolve_total_slow_tests_percent_change( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("total_slow_tests_percent_change") + return obj.total_slow_tests_percent_change @test_results_aggregates_bindable.field("totalFails") def resolve_total_fails(obj: TestResultsAggregates, _: GraphQLResolveInfo) -> int: - return obj["fails"] + return obj.fails @test_results_aggregates_bindable.field("totalFailsPercentChange") def resolve_total_fails_percent_change( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("fails_percent_change") + return obj.fails_percent_change @test_results_aggregates_bindable.field("totalSkips") def resolve_total_skips(obj: TestResultsAggregates, _: GraphQLResolveInfo) -> int: - return obj["skips"] + return obj.skips @test_results_aggregates_bindable.field("totalSkipsPercentChange") def resolve_total_skips_percent_change( obj: TestResultsAggregates, _: GraphQLResolveInfo ) -> float | None: - return obj.get("skips_percent_change") + return obj.skips_percent_change diff --git a/utils/test_results.py b/utils/test_results.py index e2108091b8..4f0e484992 100644 --- a/utils/test_results.py +++ b/utils/test_results.py @@ -22,6 +22,12 @@ ) from codecov.commands.exceptions import ValidationError +from graphql_api.types.enums import ( + OrderingDirection, + TestResultsFilterParameter, + TestResultsOrderingParameter, +) +from graphql_api.types.enums.enum_types import MeasurementInterval thirty_days_ago = dt.datetime.now(dt.UTC) - dt.timedelta(days=30) @@ -36,13 +42,6 @@ def slow_test_threshold(total_tests: int) -> int: return min(max(slow_tests_to_return, 1), 100) -class GENERATE_TEST_RESULT_PARAM: - FLAKY = "flaky" - FAILED = "failed" - SLOWEST = "slowest" - SKIPPED = "skipped" - - @dataclass class TestResultsQuery: query: str @@ -67,7 +66,29 @@ class TestResultsRow: @dataclass -class Connection: +class TestResultsAggregates: + total_duration: float + total_duration_percent_change: float | None + slowest_tests_duration: float + slowest_tests_duration_percent_change: float | None + total_slow_tests: int + total_slow_tests_percent_change: float | None + fails: int + fails_percent_change: float | None + skips: int + skips_percent_change: float | None + + +@dataclass +class FlakeAggregates: + flake_count: int + flake_count_percent_change: float | None + flake_rate: float + flake_rate_percent_change: float | None + + +@dataclass +class TestResultConnection: edges: list[dict[str, str | TestResultsRow]] page_info: dict total_count: int @@ -96,55 +117,46 @@ def decode_cursor(value: str | None) -> CursorValue | None: ) -def encode_cursor(row: TestResultsRow, ordering: str) -> str: +def encode_cursor(row: TestResultsRow, ordering: TestResultsOrderingParameter) -> str: return b64encode( - DELIMITER.join([str(getattr(row, ordering)), str(row.name)]).encode("utf-8") + DELIMITER.join([str(getattr(row, ordering.value)), str(row.name)]).encode( + "utf-8" + ) ).decode("ascii") def validate( - interval_num_days: int, - ordering: str, - ordering_direction: str, + interval: int, + ordering: TestResultsOrderingParameter, + ordering_direction: OrderingDirection, after: str | None, before: str | None, first: int | None, last: int | None, -) -> ValidationError | None: - if interval_num_days not in {1, 7, 30}: - return ValidationError(f"Invalid interval: {interval_num_days}") - - if ordering_direction not in {"ASC", "DESC"}: - return ValidationError(f"Invalid ordering direction: {ordering_direction}") - - if ordering not in { - "name", - "computed_name", - "avg_duration", - "failure_rate", - "flake_rate", - "commits_where_fail", - "last_duration", - "updated_at", - }: - return ValidationError(f"Invalid ordering field: {ordering}") +) -> None: + if interval not in {1, 7, 30}: + raise ValidationError(f"Invalid interval: {interval}") + + if not isinstance(ordering_direction, OrderingDirection): + raise ValidationError(f"Invalid ordering direction: {ordering_direction}") + + if not isinstance(ordering, TestResultsOrderingParameter): + raise ValidationError(f"Invalid ordering field: {ordering}") if first is not None and last is not None: - return ValidationError("First and last can not be used at the same time") + raise ValidationError("First and last can not be used at the same time") if after is not None and before is not None: - return ValidationError("After and before can not be used at the same time") - - return None + raise ValidationError("After and before can not be used at the same time") def generate_base_query( repoid: int, - ordering: str, - ordering_direction: str, + ordering: TestResultsOrderingParameter, + ordering_direction: OrderingDirection, should_reverse: bool, branch: str | None, - interval_num_days: int, + interval: int, testsuites: list[str] | None = None, term: str | None = None, test_ids: set[str] | None = None, @@ -152,13 +164,19 @@ def generate_base_query( term_filter = f"%{term}%" if term else None if should_reverse: - ordering_direction = "DESC" if ordering_direction == "ASC" else "ASC" + ordering_direction = ( + OrderingDirection.DESC + if ordering_direction == OrderingDirection.ASC + else OrderingDirection.ASC + ) - order_by = f"with_cursor.{ordering} {ordering_direction}, with_cursor.name" + order_by = ( + f"with_cursor.{ordering.value} {ordering_direction.name}, with_cursor.name" + ) params: dict[str, int | str | tuple[str, ...] | None] = { "repoid": repoid, - "interval": f"{interval_num_days} days", + "interval": f"{interval} days", "branch": branch, "test_ids": convert_tuple_else_none(test_ids), "testsuites": convert_tuple_else_none(testsuites), @@ -242,7 +260,7 @@ def generate_base_query( def search_base_query( rows: list[TestResultsRow], - ordering: str, + ordering: TestResultsOrderingParameter, cursor: CursorValue | None, descending: bool = False, ) -> list[TestResultsRow]: @@ -266,11 +284,13 @@ def search_base_query( if not cursor: return rows + print(f"descending: {descending}") + def compare(row: TestResultsRow) -> int: # -1 means row value is to the left of the cursor value (search to the right) # 0 means row value is equal to cursor value # 1 means row value is to the right of the cursor value (search to the left) - row_value = getattr(row, ordering) + row_value = getattr(row, ordering.value) row_value_str = str(row_value) cursor_value_str = cursor.ordered_value row_is_greater = row_value_str > cursor_value_str @@ -312,20 +332,20 @@ def get_relevant_totals( def generate_test_results( - ordering: str, - ordering_direction: str, + ordering: TestResultsOrderingParameter, + ordering_direction: OrderingDirection, repoid: int, - interval: dt.timedelta, + measurement_interval: MeasurementInterval, first: int | None = None, after: str | None = None, last: int | None = None, before: str | None = None, branch: str | None = None, - parameter: GENERATE_TEST_RESULT_PARAM | None = None, + parameter: TestResultsFilterParameter | None = None, testsuites: list[str] | None = None, flags: defaultdict[str, str] | None = None, term: str | None = None, -) -> Connection | ValidationError: +) -> TestResultConnection: """ Function that retrieves aggregated information about all tests in a given repository, for a given time range, optionally filtered by branch name. The fields it calculates are: the test failure rate, commits where this test failed, last duration and average duration of the test. @@ -342,14 +362,10 @@ def generate_test_results( :returns: queryset object containing list of dictionaries of results """ - interval_num_days = interval.days - - if validation_error := validate( - interval_num_days, ordering, ordering_direction, after, before, first, last - ): - return validation_error + interval = measurement_interval.value + validate(interval, ordering, ordering_direction, after, before, first, last) - since = dt.datetime.now(dt.UTC) - interval + since = dt.datetime.now(dt.UTC) - dt.timedelta(days=interval) test_ids: set[str] | None = None @@ -365,14 +381,14 @@ def generate_test_results( flag__flag_name__in=flags ) - filtered_test_ids = set([bridge.test_id for bridge in bridges]) + filtered_test_ids = set([bridge.test_id for bridge in bridges]) # type: ignore test_ids = test_ids & filtered_test_ids if test_ids else filtered_test_ids if parameter is not None: totals = get_relevant_totals(repoid, branch, since) match parameter: - case GENERATE_TEST_RESULT_PARAM.FLAKY: + case TestResultsFilterParameter.FLAKY_TESTS: flaky_test_ids = ( totals.values("test") .annotate(flaky_fail_count_sum=Sum("flaky_fail_count")) @@ -384,7 +400,7 @@ def generate_test_results( test_ids = ( test_ids & flaky_test_id_set if test_ids else flaky_test_id_set ) - case GENERATE_TEST_RESULT_PARAM.FAILED: + case TestResultsFilterParameter.FAILED_TESTS: failed_test_ids = ( totals.values("test") .annotate(fail_count_sum=Sum("fail_count")) @@ -396,7 +412,7 @@ def generate_test_results( test_ids = ( test_ids & failed_test_id_set if test_ids else failed_test_id_set ) - case GENERATE_TEST_RESULT_PARAM.SKIPPED: + case TestResultsFilterParameter.SKIPPED_TESTS: skipped_test_ids = ( totals.values("test") .annotate( @@ -412,7 +428,7 @@ def generate_test_results( test_ids = ( test_ids & skipped_test_id_set if test_ids else skipped_test_id_set ) - case GENERATE_TEST_RESULT_PARAM.SLOWEST: + case TestResultsFilterParameter.SLOWEST_TESTS: num_tests = totals.distinct("test_id").count() slowest_test_ids = ( @@ -441,7 +457,7 @@ def generate_test_results( ordering_direction=ordering_direction, should_reverse=should_reverse, branch=branch, - interval_num_days=interval_num_days, + interval=interval, testsuites=testsuites, term=term, test_ids=test_ids, @@ -459,13 +475,15 @@ def generate_test_results( page_size: int = first or last or 20 cursor_value = decode_cursor(after) if after else decode_cursor(before) - descending = ordering_direction == "DESC" + print(f"cursor_value: {cursor_value}") + descending = ordering_direction == OrderingDirection.DESC search_rows = search_base_query( rows, ordering, cursor_value, descending=descending, ) + print(f"search_rows: {search_rows}") page: list[dict[str, str | TestResultsRow]] = [ {"cursor": encode_cursor(row, ordering), "node": row} @@ -473,7 +491,7 @@ def generate_test_results( if i < page_size ] - return Connection( + return TestResultConnection( edges=page, total_count=len(rows), page_info={ @@ -485,35 +503,103 @@ def generate_test_results( ) -def percent_diff( - current_value: int | float, past_value: int | float -) -> int | float | None: +def percent_diff(current_value: int | float, past_value: int | float) -> float | None: if past_value == 0: return None return round((current_value - past_value) / past_value * 100, 5) -def get_percent_change( - fields: list[str], - curr_numbers: dict[str, int | float], - past_numbers: dict[str, int | float], -) -> dict[str, int | float | None]: - percent_change_fields = {} +@dataclass +class TestResultsAggregateNumbers: + total_duration: float + slowest_tests_duration: float + skips: int + fails: int + total_slow_tests: int + + +@dataclass +class FlakeAggregateNumbers: + flake_count: int + flake_rate: float - percent_change_fields = { - f"{field}_percent_change": percent_diff( - curr_numbers[field], past_numbers[field] + +def test_results_aggregates_from_numbers( + curr_numbers: TestResultsAggregateNumbers | None, + past_numbers: TestResultsAggregateNumbers | None, +) -> TestResultsAggregates | None: + if curr_numbers is None: + return None + if past_numbers is None: + return TestResultsAggregates( + total_duration=curr_numbers.total_duration, + total_duration_percent_change=None, + slowest_tests_duration=curr_numbers.slowest_tests_duration, + slowest_tests_duration_percent_change=None, + total_slow_tests=curr_numbers.total_slow_tests, + total_slow_tests_percent_change=None, + fails=curr_numbers.fails, + fails_percent_change=None, + skips=curr_numbers.skips, + skips_percent_change=None, ) - for field in fields - if past_numbers.get(field) - } + else: + return TestResultsAggregates( + total_duration=curr_numbers.total_duration, + total_duration_percent_change=percent_diff( + curr_numbers.total_duration, + past_numbers.total_duration, + ), + slowest_tests_duration=curr_numbers.slowest_tests_duration, + slowest_tests_duration_percent_change=percent_diff( + curr_numbers.slowest_tests_duration, + past_numbers.slowest_tests_duration, + ), + skips=curr_numbers.skips, + skips_percent_change=percent_diff( + curr_numbers.skips, + past_numbers.skips, + ), + fails=curr_numbers.fails, + fails_percent_change=percent_diff( + curr_numbers.fails, + past_numbers.fails, + ), + total_slow_tests=curr_numbers.total_slow_tests, + total_slow_tests_percent_change=percent_diff( + curr_numbers.total_slow_tests, + past_numbers.total_slow_tests, + ), + ) + + +def flake_aggregates_from_numbers( + curr_numbers: FlakeAggregateNumbers | None, + past_numbers: FlakeAggregateNumbers | None, +) -> FlakeAggregates | None: + if curr_numbers is None: + return None - return percent_change_fields + return FlakeAggregates( + flake_count=curr_numbers.flake_count, + flake_count_percent_change=percent_diff( + curr_numbers.flake_count, past_numbers.flake_count + ) + if past_numbers + else None, + flake_rate=curr_numbers.flake_rate, + flake_rate_percent_change=percent_diff( + curr_numbers.flake_rate, + past_numbers.flake_rate, + ) + if past_numbers + else None, + ) def get_test_results_aggregate_numbers( repo: Repository, since: dt.datetime, until: dt.datetime | None = None -) -> dict[str, float | int]: +) -> TestResultsAggregateNumbers | None: totals = DailyTestRollup.objects.filter( repoid=repo.repoid, date__gte=since, branch=repo.branch ) @@ -554,37 +640,44 @@ def get_test_results_aggregate_numbers( total_slow_tests=Value(slow_test_threshold(num_tests)), ) - return test_headers[0] if len(test_headers) > 0 else {} + if len(test_headers) == 0: + return None + else: + headers = test_headers[0] + return TestResultsAggregateNumbers( + total_duration=headers["total_duration"] or 0.0, + slowest_tests_duration=headers["slowest_tests_duration"] or 0.0, + skips=headers["skips"] or 0, + fails=headers["fails"] or 0, + total_slow_tests=headers["total_slow_tests"] or 0, + ) def generate_test_results_aggregates( - repoid: int, interval: dt.timedelta = dt.timedelta(days=30) -) -> dict[str, float | int | None] | None: + repoid: int, interval: int +) -> TestResultsAggregates | None: repo = Repository.objects.get(repoid=repoid) - since = dt.datetime.now(dt.UTC) - interval + since = dt.datetime.now(dt.UTC) - dt.timedelta(days=interval) curr_numbers = get_test_results_aggregate_numbers(repo, since) - double_time_ago = since - interval + double_time_ago = since - dt.timedelta(days=interval) past_numbers = get_test_results_aggregate_numbers(repo, double_time_ago, since) - return curr_numbers | get_percent_change( - [ - "total_duration", - "slowest_tests_duration", - "skips", - "fails", - "total_slow_tests", - ], - curr_numbers, - past_numbers, + aggregates_with_percentage: TestResultsAggregates | None = ( + test_results_aggregates_from_numbers( + curr_numbers, + past_numbers, + ) ) + return aggregates_with_percentage + def get_flake_aggregate_numbers( repo: Repository, since: dt.datetime, until: dt.datetime | None = None -) -> dict[str, int | float]: +) -> FlakeAggregateNumbers: if until is None: flakes = Flake.objects.filter( Q(repository_id=repo.repoid) @@ -601,7 +694,7 @@ def get_flake_aggregate_numbers( flake_count = flakes.count() - test_ids = [flake.test_id for flake in flakes] + test_ids = [flake.test_id for flake in flakes] # type: ignore test_rollups = DailyTestRollup.objects.filter( repoid=repo.repoid, @@ -613,7 +706,7 @@ def get_flake_aggregate_numbers( test_rollups = test_rollups.filter(date__lt=until.date()) if len(test_rollups) == 0: - return {"flake_count": 0, "flake_rate": 0} + return FlakeAggregateNumbers(flake_count=0, flake_rate=0.0) numerator = 0 denominator = 0 @@ -626,26 +719,20 @@ def get_flake_aggregate_numbers( else: flake_rate = numerator / denominator - return {"flake_count": flake_count, "flake_rate": flake_rate} + return FlakeAggregateNumbers(flake_count=flake_count, flake_rate=flake_rate) -def generate_flake_aggregates( - repoid: int, interval: dt.timedelta = dt.timedelta(days=30) -) -> dict[str, int | float | None]: +def generate_flake_aggregates(repoid: int, interval: int) -> FlakeAggregates | None: repo = Repository.objects.get(repoid=repoid) - since = dt.datetime.today() - interval + since = dt.datetime.today() - dt.timedelta(days=interval) curr_numbers = get_flake_aggregate_numbers(repo, since) - double_time_ago = since - interval + double_time_ago = since - dt.timedelta(days=interval) past_numbers = get_flake_aggregate_numbers(repo, double_time_ago, since) - return curr_numbers | get_percent_change( - ["flake_count", "flake_rate"], - curr_numbers, - past_numbers, - ) + return flake_aggregates_from_numbers(curr_numbers, past_numbers) def get_test_suites(repoid: int, term: str | None = None) -> list[str]: diff --git a/utils/tests/unit/test_cursor.py b/utils/tests/unit/test_cursor.py index 69e8b23254..f5a30d322c 100644 --- a/utils/tests/unit/test_cursor.py +++ b/utils/tests/unit/test_cursor.py @@ -1,5 +1,6 @@ from datetime import datetime +from graphql_api.types.enums.enums import TestResultsOrderingParameter from utils.test_results import CursorValue, TestResultsRow, decode_cursor, encode_cursor @@ -18,7 +19,7 @@ def test_cursor(): total_skip_count=1, total_pass_count=1, ) - cursor = encode_cursor(row, "updated_at") + cursor = encode_cursor(row, TestResultsOrderingParameter.UPDATED_AT) assert cursor == "MjAyNC0wMS0wMSAwMDowMDowMCswMDowMHx0ZXN0" decoded_cursor = decode_cursor(cursor) assert decoded_cursor == CursorValue(str(row.updated_at), "test") diff --git a/utils/tests/unit/test_search_base_query.py b/utils/tests/unit/test_search_base_query.py index 0387c0df7a..0b466f2006 100644 --- a/utils/tests/unit/test_search_base_query.py +++ b/utils/tests/unit/test_search_base_query.py @@ -1,5 +1,6 @@ from datetime import datetime +from graphql_api.types.enums.enums import TestResultsOrderingParameter from utils.test_results import CursorValue, TestResultsRow, search_base_query @@ -22,14 +23,14 @@ def row_factory(name: str, failure_rate: float): def test_search_base_query_cursor_val_none(): rows = [row_factory(str(i), float(i) * 0.1) for i in range(10)] - res = search_base_query(rows, "failure_rate", None) + res = search_base_query(rows, TestResultsOrderingParameter.FAILURE_RATE, None) assert res == rows def test_search_base_query_with_existing_cursor(): rows = [row_factory(str(i), float(i) * 0.1) for i in range(10)] cursor = CursorValue(name="5", ordered_value="0.5") - res = search_base_query(rows, "failure_rate", cursor) + res = search_base_query(rows, TestResultsOrderingParameter.FAILURE_RATE, cursor) assert res == rows[6:] @@ -39,7 +40,7 @@ def test_search_base_query_with_missing_cursor_high_name_low_failure_rate(): # here's where the cursor is pointing at rows = [row_factory(str(i), float(i) * 0.1) for i in range(3)] cursor = CursorValue(name="111111", ordered_value="0.05") - res = search_base_query(rows, "failure_rate", cursor) + res = search_base_query(rows, TestResultsOrderingParameter.FAILURE_RATE, cursor) assert res == rows[1:] @@ -49,7 +50,7 @@ def test_search_base_query_with_missing_cursor_low_name_high_failure_rate(): # here's where the cursor is pointing at rows = [row_factory(str(i), float(i) * 0.1) for i in range(3)] cursor = CursorValue(name="0", ordered_value="0.15") - res = search_base_query(rows, "failure_rate", cursor) + res = search_base_query(rows, TestResultsOrderingParameter.FAILURE_RATE, cursor) assert res == rows[-1:] @@ -59,5 +60,7 @@ def test_search_base_query_descending(): # here's where the cursor is pointing at rows = [row_factory(str(i), float(i) * 0.1) for i in range(2, -1, -1)] cursor = CursorValue(name="0", ordered_value="0.15") - res = search_base_query(rows, "failure_rate", cursor, descending=True) + res = search_base_query( + rows, TestResultsOrderingParameter.FAILURE_RATE, cursor, descending=True + ) assert res == rows[1:]