Skip to content

Commit 258761b

Browse files
authored
dependencies update (#58)
* dependencies update * e2e tests added * ci optimizations - node22 -> node22:slim - npm install -> npm ci - added caching via build-push-actoin * local registry for dependent builds * added curl to frontend/e2e
1 parent 232759f commit 258761b

30 files changed

+642
-878
lines changed

.github/workflows/backend-ci.yml

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,6 @@ jobs:
106106
run: |
107107
cd backend
108108
uv run pytest tests/integration -v -rs \
109-
--ignore=tests/integration/k8s \
110109
--cov=app \
111110
--cov-report=xml --cov-report=term
112111
@@ -190,7 +189,7 @@ jobs:
190189
K8S_NAMESPACE: integr8scode
191190
run: |
192191
cd backend
193-
uv run pytest tests/integration/k8s -v -rs \
192+
uv run pytest tests/e2e -v -rs \
194193
--cov=app \
195194
--cov-report=xml --cov-report=term
196195

.github/workflows/frontend-ci.yml

Lines changed: 75 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ jobs:
2323
- uses: actions/checkout@v6
2424

2525
- name: Setup Node.js
26-
uses: actions/setup-node@v4
26+
uses: actions/setup-node@v6
2727
with:
2828
node-version: '22'
2929
cache: 'npm'
@@ -51,11 +51,30 @@ jobs:
5151
name: E2E Tests
5252
needs: unit
5353
runs-on: ubuntu-latest
54+
55+
# Local registry for buildx to reference base image (docker-container driver is isolated)
56+
services:
57+
registry:
58+
image: registry:2
59+
ports:
60+
- 5000:5000
61+
62+
env:
63+
MONGO_IMAGE: mongo:8.0
64+
REDIS_IMAGE: redis:7-alpine
65+
KAFKA_IMAGE: apache/kafka:3.9.0
66+
SCHEMA_REGISTRY_IMAGE: confluentinc/cp-schema-registry:7.5.0
67+
5468
steps:
5569
- uses: actions/checkout@v6
5670

71+
- name: Cache and load Docker images
72+
uses: ./.github/actions/docker-cache
73+
with:
74+
images: ${{ env.MONGO_IMAGE }} ${{ env.REDIS_IMAGE }} ${{ env.KAFKA_IMAGE }} ${{ env.SCHEMA_REGISTRY_IMAGE }}
75+
5776
- name: Setup Node.js
58-
uses: actions/setup-node@v4
77+
uses: actions/setup-node@v6
5978
with:
6079
node-version: '22'
6180
cache: 'npm'
@@ -71,6 +90,8 @@ jobs:
7190

7291
- name: Setup Docker Buildx
7392
uses: docker/setup-buildx-action@v3
93+
with:
94+
driver-opts: network=host
7495

7596
- name: Setup Kubernetes (k3s)
7697
run: |
@@ -88,9 +109,59 @@ jobs:
88109
/home/runner/.kube/config > backend/kubeconfig.yaml
89110
chmod 644 backend/kubeconfig.yaml
90111
91-
- name: Build and start full stack
112+
# Build images with GitHub Actions cache for faster subsequent builds
113+
# Base image pushed to local registry so buildx can reference it
114+
- name: Build and push base image
115+
uses: docker/build-push-action@v6
116+
with:
117+
context: ./backend
118+
file: ./backend/Dockerfile.base
119+
push: true
120+
tags: localhost:5000/integr8scode-base:latest
121+
cache-from: type=gha,scope=backend-base
122+
cache-to: type=gha,mode=max,scope=backend-base
123+
124+
# Pull base to Docker daemon (needed for docker-compose)
125+
- name: Load base image to Docker daemon
126+
run: |
127+
docker pull localhost:5000/integr8scode-base:latest
128+
docker tag localhost:5000/integr8scode-base:latest integr8scode-base:latest
129+
130+
- name: Build backend image
131+
uses: docker/build-push-action@v6
132+
with:
133+
context: ./backend
134+
file: ./backend/Dockerfile
135+
load: true
136+
tags: integr8scode-backend:latest
137+
build-contexts: |
138+
base=docker-image://localhost:5000/integr8scode-base:latest
139+
cache-from: type=gha,scope=backend
140+
cache-to: type=gha,mode=max,scope=backend
141+
142+
- name: Build cert-generator image
143+
uses: docker/build-push-action@v6
144+
with:
145+
context: ./cert-generator
146+
file: ./cert-generator/Dockerfile
147+
load: true
148+
tags: integr8scode-cert-generator:latest
149+
cache-from: type=gha,scope=cert-generator
150+
cache-to: type=gha,mode=max,scope=cert-generator
151+
152+
- name: Build frontend image
153+
uses: docker/build-push-action@v6
154+
with:
155+
context: ./frontend
156+
file: ./frontend/Dockerfile
157+
load: true
158+
tags: integr8scode-frontend:latest
159+
cache-from: type=gha,scope=frontend
160+
cache-to: type=gha,mode=max,scope=frontend
161+
162+
- name: Start full stack
92163
run: |
93-
docker compose -f docker-compose.ci.yaml --profile full up -d --build --wait --wait-timeout 300
164+
docker compose -f docker-compose.ci.yaml --profile full up -d --wait --wait-timeout 300
94165
docker compose -f docker-compose.ci.yaml ps
95166
96167
- name: Seed test users

backend/pyproject.toml

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ dependencies = [
1616
"attrs==25.3.0",
1717
"avro-python3==1.10.2",
1818
"backoff==2.2.1",
19-
"blinker==1.8.2",
19+
"blinker==1.9.0",
2020
"Brotli==1.2.0",
2121
"cachetools==6.2.0",
2222
"certifi==2024.8.30",
@@ -30,9 +30,9 @@ dependencies = [
3030
"dishka==1.6.0",
3131
"dnspython==2.7.0",
3232
"durationpy==0.9",
33-
"email_validator==2.2.0",
33+
"email-validator==2.3.0",
3434
"exceptiongroup==1.2.2",
35-
"fastapi==0.124.0",
35+
"fastapi==0.128.0",
3636
"fastavro==1.12.1",
3737
"fonttools==4.61.1",
3838
"frozenlist==1.7.0",
@@ -46,7 +46,7 @@ dependencies = [
4646
"httpx==0.28.1",
4747
"idna==3.10",
4848
"importlib-metadata==6.11.0",
49-
"importlib_resources==6.4.5",
49+
"importlib-resources==6.5.2",
5050
"itsdangerous==2.2.0",
5151
"Jinja2==3.1.6",
5252
"kiwisolver==1.4.9",
@@ -88,7 +88,7 @@ dependencies = [
8888
"pyasn1==0.6.1",
8989
"pyasn1_modules==0.4.2",
9090
"pydantic==2.9.2",
91-
"pydantic-avro==0.7.1",
91+
"pydantic-avro==0.9.1",
9292
"pydantic-settings==2.5.2",
9393
"pydantic_core==2.23.4",
9494
"Pygments==2.19.2",
@@ -194,6 +194,7 @@ python_classes = ["Test*"]
194194
python_functions = ["test_*"]
195195
markers = [
196196
"integration: marks tests as integration tests",
197+
"e2e: marks tests as end-to-end tests requiring full system",
197198
"unit: marks tests as unit tests",
198199
"slow: marks tests as slow running",
199200
"kafka: marks tests as requiring Kafka",

backend/tests/e2e/conftest.py

Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import pytest_asyncio
2+
import redis.asyncio as redis
3+
from beanie import init_beanie
4+
5+
from app.core.database_context import Database
6+
from app.db.docs import ALL_DOCUMENTS
7+
8+
9+
@pytest_asyncio.fixture(autouse=True)
10+
async def _cleanup(db: Database, redis_client: redis.Redis):
11+
"""Clean DB and Redis before each E2E test.
12+
13+
Only pre-test cleanup - post-test cleanup causes event loop issues
14+
when SSE/streaming tests hold connections across loop boundaries.
15+
16+
NOTE: With pytest-xdist, each worker uses a separate Redis database
17+
(gw0→db0, gw1→db1, etc.), so flushdb() is safe and only affects
18+
that worker's database. See tests/conftest.py for REDIS_DB setup.
19+
"""
20+
collections = await db.list_collection_names()
21+
for name in collections:
22+
if not name.startswith("system."):
23+
await db.drop_collection(name)
24+
25+
await redis_client.flushdb()
26+
27+
# Initialize Beanie with document models
28+
await init_beanie(database=db, document_models=ALL_DOCUMENTS)
29+
30+
yield
31+
# No post-test cleanup to avoid "Event loop is closed" errors

backend/tests/integration/k8s/test_execution_routes.py renamed to backend/tests/e2e/test_execution_routes.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -13,8 +13,9 @@
1313
ResourceUsage
1414
)
1515

16+
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
17+
1618

17-
@pytest.mark.k8s
1819
class TestExecution:
1920
"""Test execution endpoints against real backend."""
2021

@@ -104,13 +105,13 @@ async def test_get_execution_result(self, client: AsyncClient, test_user: Dict[s
104105
# Immediately fetch result - no waiting
105106
result_response = await client.get(f"/api/v1/result/{execution_id}")
106107
assert result_response.status_code == 200
107-
108+
108109
result_data = result_response.json()
109110
execution_result = ExecutionResult(**result_data)
110111
assert execution_result.execution_id == execution_id
111112
assert execution_result.status in [e.value for e in ExecutionStatusEnum]
112113
assert execution_result.lang == "python"
113-
114+
114115
# Execution might be in any state - that's fine
115116
# If completed, validate output; if not, that's valid too
116117
if execution_result.status == ExecutionStatusEnum.COMPLETED:
@@ -140,7 +141,7 @@ async def test_execute_with_error(self, client: AsyncClient, test_user: Dict[str
140141
assert exec_response.status_code == 200
141142

142143
execution_id = exec_response.json()["execution_id"]
143-
144+
144145
# No waiting - execution was accepted, error will be processed asynchronously
145146

146147
@pytest.mark.asyncio
@@ -172,7 +173,7 @@ async def test_execute_with_resource_tracking(self, client: AsyncClient, test_us
172173
assert exec_response.status_code == 200
173174

174175
execution_id = exec_response.json()["execution_id"]
175-
176+
176177
# No waiting - execution was accepted, error will be processed asynchronously
177178

178179
# Fetch result and validate resource usage if present
@@ -245,7 +246,7 @@ async def test_execute_with_large_output(self, client: AsyncClient, test_user: D
245246
assert exec_response.status_code == 200
246247

247248
execution_id = exec_response.json()["execution_id"]
248-
249+
249250
# No waiting - execution was accepted, error will be processed asynchronously
250251
# Validate output from result endpoint (best-effort)
251252
result_response = await client.get(f"/api/v1/result/{execution_id}")
@@ -299,7 +300,7 @@ async def test_cancel_running_execution(self, client: AsyncClient, test_user: Di
299300
pytest.skip("Cancellation not wired; backend returned 5xx")
300301
# Should succeed or fail if already completed
301302
assert cancel_response.status_code in [200, 400, 404]
302-
303+
303304
# Cancel response of 200 means cancellation was accepted
304305

305306
@pytest.mark.asyncio
@@ -335,7 +336,7 @@ async def test_execution_with_timeout(self, client: AsyncClient, test_user: Dict
335336
assert exec_response.status_code == 200
336337

337338
execution_id = exec_response.json()["execution_id"]
338-
339+
339340
# Just verify the execution was created - it will run forever until timeout
340341
# No need to wait or observe states
341342

backend/tests/integration/k8s/test_k8s_worker_create_pod.py renamed to backend/tests/e2e/test_k8s_worker_create_pod.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313
from app.services.k8s_worker.worker import KubernetesWorker
1414
from kubernetes.client.rest import ApiException
1515

16-
pytestmark = [pytest.mark.integration, pytest.mark.k8s]
16+
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
1717

1818
_test_logger = logging.getLogger("test.k8s.worker_create_pod")
1919

backend/tests/integration/k8s/test_resource_cleaner_k8s.py renamed to backend/tests/e2e/test_resource_cleaner_k8s.py

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77
from app.services.result_processor.resource_cleaner import ResourceCleaner
88

99

10-
pytestmark = [pytest.mark.integration, pytest.mark.k8s]
10+
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
1111

1212
_test_logger = logging.getLogger("test.k8s.resource_cleaner_k8s")
1313

@@ -36,11 +36,11 @@ async def test_cleanup_orphaned_resources_dry_run() -> None:
3636
async def test_cleanup_nonexistent_pod() -> None:
3737
rc = ResourceCleaner(logger=_test_logger)
3838
await rc.initialize()
39-
39+
4040
# Attempt to delete a pod that doesn't exist - should complete without errors
4141
namespace = os.environ.get("K8S_NAMESPACE", "default")
4242
nonexistent_pod = "integr8s-test-nonexistent-pod"
43-
43+
4444
# Should complete within timeout and not raise any exceptions
4545
start_time = asyncio.get_event_loop().time()
4646
await rc.cleanup_pod_resources(
@@ -50,15 +50,14 @@ async def test_cleanup_nonexistent_pod() -> None:
5050
timeout=5,
5151
)
5252
elapsed = asyncio.get_event_loop().time() - start_time
53-
53+
5454
# Verify it completed quickly (not waiting full timeout for non-existent resources)
5555
assert elapsed < 5, f"Cleanup took {elapsed}s, should be quick for non-existent resources"
56-
56+
5757
# Verify no resources exist with this name (should be empty/zero)
5858
usage = await rc.get_resource_usage(namespace=namespace)
59-
59+
6060
# usage returns counts (int), not lists
6161
# Just check that we got a valid usage report
6262
assert isinstance(usage.get("pods", 0), int)
6363
assert isinstance(usage.get("configmaps", 0), int)
64-

backend/tests/integration/k8s/test_resource_cleaner_integration.py renamed to backend/tests/e2e/test_resource_cleaner_orphan.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,9 @@
88
from app.services.result_processor.resource_cleaner import ResourceCleaner
99
from tests.helpers.eventually import eventually
1010

11-
pytestmark = [pytest.mark.integration, pytest.mark.k8s]
11+
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
1212

13-
_test_logger = logging.getLogger("test.k8s.resource_cleaner_integration")
13+
_test_logger = logging.getLogger("test.k8s.resource_cleaner_orphan")
1414

1515

1616
def _ensure_kubeconfig():

backend/tests/integration/dlq/test_dlq_discard_policy.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
import json
33
import logging
44
import os
5+
import uuid
56
from datetime import datetime, timezone
67

78
import pytest
@@ -15,7 +16,10 @@
1516
from tests.helpers import make_execution_requested_event
1617
from tests.helpers.eventually import eventually
1718

18-
pytestmark = [pytest.mark.integration, pytest.mark.kafka, pytest.mark.mongodb]
19+
# xdist_group: DLQ tests share a Kafka consumer group. When running in parallel,
20+
# different workers' managers consume each other's messages and apply wrong policies.
21+
# Serial execution ensures each test's manager processes only its own messages.
22+
pytestmark = [pytest.mark.integration, pytest.mark.kafka, pytest.mark.mongodb, pytest.mark.xdist_group("dlq")]
1923

2024
_test_logger = logging.getLogger("test.dlq.discard_policy")
2125

@@ -28,7 +32,8 @@ async def test_dlq_manager_discards_with_manual_policy(db) -> None: # type: ign
2832
topic = f"{prefix}{str(KafkaTopic.EXECUTION_EVENTS)}"
2933
manager.set_retry_policy(topic, RetryPolicy(topic=topic, strategy=RetryStrategy.MANUAL))
3034

31-
ev = make_execution_requested_event(execution_id="exec-dlq-discard")
35+
# Use unique execution_id to avoid conflicts with parallel test workers
36+
ev = make_execution_requested_event(execution_id=f"exec-dlq-discard-{uuid.uuid4().hex[:8]}")
3237

3338
payload = {
3439
"event": ev.to_dict(),

0 commit comments

Comments
 (0)