Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 1 addition & 14 deletions tests/integration/api/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
"""Module for test configurations for the integration test directory."""

from logging import LogRecord
from typing import Iterator, Generator
from typing import Iterator
from unittest.mock import AsyncMock

import pytest
Expand All @@ -30,7 +30,6 @@
)
from merino.main import app
from merino.utils.log_data_creators import RequestSummaryLogDataModel
from merino.middleware import ScopeKey
from tests.integration.api.types import RequestSummaryLogDataFixture


Expand Down Expand Up @@ -70,18 +69,6 @@ def fixture_test_client() -> TestClient:
return TestClient(app)


@pytest.fixture(name="client_with_metrics")
def fixture_client_with_metrics() -> Generator[TestClient, None, None]:
"""Create test client with NoOpMetricsClient in request scope."""

async def asgi_wrapper(scope, receive, send):
scope[ScopeKey.METRICS_CLIENT] = NoOpMetricsClient()
await app(scope, receive, send)

with TestClient(asgi_wrapper) as client:
yield client


Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

could this be the reason why AMO is also failing. does the failing test also depend on this fixture?

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yup, very likely. I checked all the latest test failures, pretty much all of them were caused by log entry assertion errors. Prior to this patch, 6 seemingly unrelated log entries were spitted out from test_manifest.py. Even so when I only ran this test file alone. Probably, there is perhaps something similar running from curated recommedation tests I guess.

DEBUG    {"Timestamp": 1739221032008496896, "Type": "merino.utils.metrics", "Logger": "merino",  "EnvVersion": "2.0", "Severity": 7, "Pid":      metrics.py:48
                             66200, "Fields": {"taskName": "Task-303", "data": "merino.recommendation.prior.update.timing:17|ms|#application:merino-py,deployment.canary:0", "msg": "sending metrics"},
                             "severity": 100}
DEBUG    {"Timestamp": 1739221032010868992, "Type": "merino.utils.metrics", "Logger": "merino", "EnvVersion": "2.0", "Severity": 7, "Pid":      metrics.py:48
                             66200, "Fields": {"taskName": "Task-303", "data": "merino.recommendation.engagement.update.timing:21|ms|#application:merino-py,deployment.canary:0", "msg": "sending
                             metrics"}, "severity": 100}
INFO     {"Timestamp": 1739221032013295104, "Type": "merino.utils.cron", "Logger": "merino", "EnvVersion": "2.0", "Severity": 6, "Pid": 66200,     cron.py:65
                             "Fields": {"taskName": "Task-314", "duration": 0.026813709002453834, "msg": "Cron: successfully ran task fetch_recommendation_fakespot"}, "severity": 200}
DEBUG    {"Timestamp": 1739221032089231104, "Type": "merino.utils.metrics", "Logger": "merino", "EnvVersion": "2.0", "Severity": 7, "Pid":      metrics.py:48
                             66200, "Fields": {"taskName": "Task-303", "data": "merino.response.status_codes.200:1|c|#application:merino-py,deployment.canary:0", "msg": "sending metrics"}, "severity":
                             100}
DEBUG    {"Timestamp": 1739221032091535104, "Type": "merino.utils.metrics", "Logger": "merino",  "EnvVersion": "2.0", "Severity": 7, "Pid":      metrics.py:48
                             66200, "Fields": {"taskName": "Task-303", "data": "merino.providers.initialize.manifest:57|ms|#application:merino-py,deployment.canary:0", "msg": "sending metrics"},
                             "severity": 100}

@pytest.fixture(name="client_with_events")
def fixture_test_client_with_events() -> Iterator[TestClient]:
"""Return a FastAPI TestClient instance.
Expand Down
10 changes: 4 additions & 6 deletions tests/integration/api/v1/manifest/test_manifest.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ async def cleanup_tasks(provider: Provider):


@pytest.mark.asyncio
async def test_get_manifest_success(client_with_metrics, gcp_uploader, mock_manifest, cleanup):
async def test_get_manifest_success(client, gcp_uploader, mock_manifest, cleanup):
"""Uploads a manifest to the gcs bucket and verifies that the endpoint returns the uploaded file."""
# initialize provider on startup
await init_provider()
Expand All @@ -44,7 +44,7 @@ async def test_get_manifest_success(client_with_metrics, gcp_uploader, mock_mani

cleanup(provider)

response = client_with_metrics.get("/api/v1/manifest")
response = client.get("/api/v1/manifest")
assert response.status_code == 200

manifest = ManifestData(**response.json())
Expand All @@ -55,9 +55,7 @@ async def test_get_manifest_success(client_with_metrics, gcp_uploader, mock_mani


@pytest.mark.asyncio
async def test_get_manifest_from_gcs_bucket_should_return_empty_manifest_file(
client_with_metrics, cleanup
):
async def test_get_manifest_from_gcs_bucket_should_return_empty_manifest_file(client, cleanup):
"""Does not upload any manifests to the gcs bucket. Should return none and a 404."""
await init_provider()

Expand All @@ -69,7 +67,7 @@ async def test_get_manifest_from_gcs_bucket_should_return_empty_manifest_file(

cleanup(provider)

response = client_with_metrics.get("/api/v1/manifest")
response = client.get("/api/v1/manifest")
assert response.status_code == 404

assert response.json()["domains"] == []
Expand Down
Loading