diff --git a/backend/app/api/routes/admin/events.py b/backend/app/api/routes/admin/events.py
index 1e22a218..3620a8be 100644
--- a/backend/app/api/routes/admin/events.py
+++ b/backend/app/api/routes/admin/events.py
@@ -1,4 +1,3 @@
-from dataclasses import asdict
from datetime import datetime
from typing import Annotated
@@ -206,8 +205,8 @@ async def get_replay_status(session_id: str, service: FromDishka[AdminEventsServ
execution_results = status.execution_results
return EventReplayStatusResponse(
**{
- **asdict(session),
- "status": session.status.value,
+ **session.model_dump(),
+ "status": session.status,
"estimated_completion": estimated_completion,
"execution_results": execution_results,
}
diff --git a/backend/app/api/routes/events.py b/backend/app/api/routes/events.py
index 7cfea823..ac1e33b1 100644
--- a/backend/app/api/routes/events.py
+++ b/backend/app/api/routes/events.py
@@ -12,6 +12,7 @@
from app.core.utils import get_client_ip
from app.domain.enums.common import SortOrder
from app.domain.events.event_models import EventFilter
+from app.domain.events.typed import BaseEvent
from app.infrastructure.kafka.events.metadata import AvroEventMetadata as EventMetadata
from app.schemas_pydantic.events import (
DeleteEventResponse,
@@ -296,7 +297,7 @@ async def delete_event(
"admin_email": admin.email,
"event_type": result.event_type,
"aggregate_id": result.aggregate_id,
- "correlation_id": result.correlation_id,
+ "correlation_id": result.metadata.correlation_id,
},
)
@@ -345,9 +346,12 @@ async def replay_aggregate_events(
service_version=settings.SERVICE_VERSION,
user_id=admin.user_id,
)
+ # Extract payload fields (exclude base event fields + event_type discriminator)
+ base_fields = set(BaseEvent.model_fields.keys()) | {"event_type"}
+ extra_fields = {k: v for k, v in event.model_dump().items() if k not in base_fields}
await kafka_event_service.publish_event(
event_type=event.event_type,
- payload=event.payload,
+ payload=extra_fields,
aggregate_id=aggregate_id,
correlation_id=replay_correlation_id,
metadata=meta,
diff --git a/backend/app/api/routes/health.py b/backend/app/api/routes/health.py
index 8bf19ebd..aecbd1e2 100644
--- a/backend/app/api/routes/health.py
+++ b/backend/app/api/routes/health.py
@@ -15,7 +15,7 @@ class LivenessResponse(BaseModel):
status: str = Field(description="Health status")
uptime_seconds: int = Field(description="Server uptime in seconds")
- timestamp: str = Field(description="ISO timestamp of health check")
+ timestamp: datetime = Field(description="Timestamp of health check")
class ReadinessResponse(BaseModel):
@@ -31,7 +31,7 @@ async def liveness() -> LivenessResponse:
return LivenessResponse(
status="ok",
uptime_seconds=int(time.time() - _START_TIME),
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
)
diff --git a/backend/app/api/routes/replay.py b/backend/app/api/routes/replay.py
index f9919cb5..fc83c4a1 100644
--- a/backend/app/api/routes/replay.py
+++ b/backend/app/api/routes/replay.py
@@ -1,5 +1,3 @@
-from dataclasses import asdict
-
from dishka import FromDishka
from dishka.integrations.fastapi import DishkaRoute
from fastapi import APIRouter, Depends, Query
@@ -65,7 +63,7 @@ async def list_replay_sessions(
limit: int = Query(100, ge=1, le=1000),
) -> list[SessionSummary]:
return [
- SessionSummary.model_validate({**asdict(s), **asdict(s)["config"]})
+ SessionSummary.model_validate({**s.model_dump(), **s.model_dump()["config"]})
for s in service.list_sessions(status=status, limit=limit)
]
diff --git a/backend/app/db/docs/event.py b/backend/app/db/docs/event.py
index 87f2b2d6..f5a65609 100644
--- a/backend/app/db/docs/event.py
+++ b/backend/app/db/docs/event.py
@@ -1,37 +1,20 @@
from datetime import datetime, timedelta, timezone
-from typing import Any
from uuid import uuid4
import pymongo
from beanie import Document, Indexed
-from pydantic import BaseModel, ConfigDict, Field
+from pydantic import ConfigDict, Field
from pymongo import ASCENDING, DESCENDING, IndexModel
-from app.domain.enums.common import Environment
from app.domain.enums.events import EventType
-
-
-# Pydantic model required here because Beanie embedded documents must be Pydantic BaseModel subclasses.
-# This is NOT an API schema - it defines the MongoDB subdocument structure.
-class EventMetadata(BaseModel):
- """Event metadata embedded document for Beanie storage."""
-
- model_config = ConfigDict(from_attributes=True)
-
- service_name: str
- service_version: str
- correlation_id: str = Field(default_factory=lambda: str(uuid4()))
- user_id: str | None = None
- ip_address: str | None = None
- user_agent: str | None = None
- environment: Environment = Environment.PRODUCTION
+from app.domain.events.typed import EventMetadata
class EventDocument(Document):
"""Event document for event browsing/admin system.
- Uses payload dict for flexible event data storage.
- This is separate from EventStoreDocument which uses flat structure for Kafka events.
+ Uses extra='allow' for flexible event data storage - event-specific fields
+ are stored directly at document level (no payload wrapper needed).
"""
event_id: Indexed(str, unique=True) = Field(default_factory=lambda: str(uuid4())) # type: ignore[valid-type]
@@ -40,10 +23,12 @@ class EventDocument(Document):
timestamp: Indexed(datetime) = Field(default_factory=lambda: datetime.now(timezone.utc)) # type: ignore[valid-type]
aggregate_id: Indexed(str) | None = None # type: ignore[valid-type]
metadata: EventMetadata
- payload: dict[str, Any] = Field(default_factory=dict)
stored_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
ttl_expires_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc) + timedelta(days=30))
+ # Most event types have execution_id (sparse-indexed)
+ execution_id: str | None = None
+
model_config = ConfigDict(from_attributes=True, extra="allow")
class Settings:
@@ -56,9 +41,9 @@ class Settings:
IndexModel([("metadata.correlation_id", ASCENDING)], name="idx_meta_correlation"),
IndexModel([("metadata.user_id", ASCENDING), ("timestamp", DESCENDING)], name="idx_meta_user_ts"),
IndexModel([("metadata.service_name", ASCENDING), ("timestamp", DESCENDING)], name="idx_meta_service_ts"),
- # Payload sparse indexes
- IndexModel([("payload.execution_id", ASCENDING)], name="idx_payload_execution", sparse=True),
- IndexModel([("payload.pod_name", ASCENDING)], name="idx_payload_pod", sparse=True),
+ # Event-specific field indexes (sparse - only exist on relevant event types)
+ IndexModel([("execution_id", ASCENDING)], name="idx_execution_id", sparse=True),
+ IndexModel([("pod_name", ASCENDING)], name="idx_pod_name", sparse=True),
# TTL index (expireAfterSeconds=0 means use ttl_expires_at value directly)
IndexModel([("ttl_expires_at", ASCENDING)], name="idx_ttl", expireAfterSeconds=0),
# Additional compound indexes for query optimization
@@ -77,7 +62,7 @@ class Settings:
("event_type", pymongo.TEXT),
("metadata.service_name", pymongo.TEXT),
("metadata.user_id", pymongo.TEXT),
- ("payload", pymongo.TEXT),
+ ("execution_id", pymongo.TEXT),
],
name="idx_text_search",
language_override="none",
@@ -90,6 +75,7 @@ class EventArchiveDocument(Document):
"""Archived event with deletion metadata.
Mirrors EventDocument structure with additional archive metadata.
+ Uses extra='allow' for event-specific fields.
"""
event_id: Indexed(str, unique=True) # type: ignore[valid-type]
@@ -98,7 +84,6 @@ class EventArchiveDocument(Document):
timestamp: Indexed(datetime) # type: ignore[valid-type]
aggregate_id: str | None = None
metadata: EventMetadata
- payload: dict[str, Any] = Field(default_factory=dict)
stored_at: datetime | None = None
ttl_expires_at: datetime | None = None
diff --git a/backend/app/db/repositories/admin/admin_events_repository.py b/backend/app/db/repositories/admin/admin_events_repository.py
index 6c5ce164..f5802681 100644
--- a/backend/app/db/repositories/admin/admin_events_repository.py
+++ b/backend/app/db/repositories/admin/admin_events_repository.py
@@ -1,4 +1,3 @@
-from dataclasses import asdict
from datetime import datetime, timedelta, timezone
from typing import Any
@@ -14,9 +13,8 @@
from app.domain.admin import ReplaySessionData, ReplaySessionStatusDetail
from app.domain.admin.replay_updates import ReplaySessionUpdate
from app.domain.enums.replay import ReplayStatus
-from app.domain.events import EventMetadata as DomainEventMetadata
-from app.domain.events.event_models import (
- Event,
+from app.domain.events import (
+ DomainEvent,
EventBrowseResult,
EventDetail,
EventExportRow,
@@ -25,9 +23,10 @@
EventSummary,
HourlyEventCount,
UserEventCount,
+ domain_event_adapter,
)
from app.domain.events.query_builders import EventStatsAggregation
-from app.domain.replay.models import ReplayConfig, ReplayFilter, ReplaySessionState
+from app.domain.replay.models import ReplayFilter, ReplaySessionState
class AdminEventsRepository:
@@ -58,15 +57,7 @@ async def browse_events(
total = await query.count()
docs = await query.sort([(sort_by, sort_order)]).skip(skip).limit(limit).to_list()
- events = [
- Event(
- **{
- **d.model_dump(exclude={"id", "revision_id"}),
- "metadata": DomainEventMetadata(**d.metadata.model_dump()),
- }
- )
- for d in docs
- ]
+ events = [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
return EventBrowseResult(events=events, total=total, skip=skip, limit=limit)
@@ -75,12 +66,7 @@ async def get_event_detail(self, event_id: str) -> EventDetail | None:
if not doc:
return None
- event = Event(
- **{
- **doc.model_dump(exclude={"id", "revision_id"}),
- "metadata": DomainEventMetadata(**doc.metadata.model_dump()),
- }
- )
+ event = domain_event_adapter.validate_python(doc, from_attributes=True)
related_query = {"metadata.correlation_id": doc.metadata.correlation_id, "event_id": {"$ne": event_id}}
related_docs = await (
@@ -178,7 +164,7 @@ async def export_events_csv(self, event_filter: EventFilter) -> list[EventExport
EventExportRow(
event_id=doc.event_id,
event_type=str(doc.event_type),
- timestamp=doc.timestamp.isoformat(),
+ timestamp=doc.timestamp,
correlation_id=doc.metadata.correlation_id or "",
aggregate_id=doc.aggregate_id or "",
user_id=doc.metadata.user_id or "",
@@ -189,17 +175,9 @@ async def export_events_csv(self, event_filter: EventFilter) -> list[EventExport
for doc in docs
]
- async def archive_event(self, event: Event, deleted_by: str) -> bool:
+ async def archive_event(self, event: DomainEvent, deleted_by: str) -> bool:
archive_doc = EventArchiveDocument(
- event_id=event.event_id,
- event_type=event.event_type,
- event_version=event.event_version,
- timestamp=event.timestamp,
- aggregate_id=event.aggregate_id,
- metadata=event.metadata,
- payload=event.payload,
- stored_at=event.stored_at,
- ttl_expires_at=event.ttl_expires_at,
+ **event.model_dump(),
deleted_at=datetime.now(timezone.utc),
deleted_by=deleted_by,
)
@@ -207,9 +185,7 @@ async def archive_event(self, event: Event, deleted_by: str) -> bool:
return True
async def create_replay_session(self, session: ReplaySessionState) -> str:
- data = asdict(session)
- data["config"] = session.config.model_dump()
- doc = ReplaySessionDocument(**data)
+ doc = ReplaySessionDocument(**session.model_dump())
await doc.insert()
return session.session_id
@@ -217,19 +193,15 @@ async def get_replay_session(self, session_id: str) -> ReplaySessionState | None
doc = await ReplaySessionDocument.find_one({"session_id": session_id})
if not doc:
return None
- data = doc.model_dump(exclude={"id", "revision_id"})
- data["config"] = ReplayConfig.model_validate(data["config"])
- return ReplaySessionState(**data)
+ return ReplaySessionState.model_validate(doc, from_attributes=True)
async def update_replay_session(self, session_id: str, updates: ReplaySessionUpdate) -> bool:
- update_dict = {k: (v.value if hasattr(v, "value") else v) for k, v in asdict(updates).items() if v is not None}
+ update_dict = updates.model_dump(exclude_none=True)
if not update_dict:
return False
-
doc = await ReplaySessionDocument.find_one({"session_id": session_id})
if not doc:
return False
-
await doc.set(update_dict)
return True
@@ -276,11 +248,7 @@ async def get_replay_status_with_progress(self, session_id: str) -> ReplaySessio
original_query = doc.config.filter.custom_query
original_events = await EventDocument.find(original_query).limit(10).to_list()
- execution_ids = set()
- for event in original_events:
- exec_id = event.payload.get("execution_id") or event.aggregate_id
- if exec_id:
- execution_ids.add(exec_id)
+ execution_ids = {event.execution_id for event in original_events if event.execution_id}
for exec_id in list(execution_ids)[:10]:
exec_doc = await ExecutionDocument.find_one({"execution_id": exec_id})
@@ -288,7 +256,7 @@ async def get_replay_status_with_progress(self, session_id: str) -> ReplaySessio
execution_results.append(
{
"execution_id": exec_doc.execution_id,
- "status": exec_doc.status.value if exec_doc.status else None,
+ "status": exec_doc.status if exec_doc.status else None,
"stdout": exec_doc.stdout,
"stderr": exec_doc.stderr,
"exit_code": exec_doc.exit_code,
@@ -300,9 +268,7 @@ async def get_replay_status_with_progress(self, session_id: str) -> ReplaySessio
)
# Convert document to domain
- data = doc.model_dump(exclude={"id", "revision_id"})
- data["config"] = ReplayConfig.model_validate(data["config"])
- session = ReplaySessionState(**data)
+ session = ReplaySessionState.model_validate(doc, from_attributes=True)
return ReplaySessionStatusDetail(
session=session,
diff --git a/backend/app/db/repositories/admin/admin_user_repository.py b/backend/app/db/repositories/admin/admin_user_repository.py
index 400d84a6..1c2d2678 100644
--- a/backend/app/db/repositories/admin/admin_user_repository.py
+++ b/backend/app/db/repositories/admin/admin_user_repository.py
@@ -1,5 +1,4 @@
import re
-from dataclasses import asdict
from datetime import datetime, timezone
from beanie.odm.operators.find import BaseFindOperator
@@ -24,9 +23,9 @@ def __init__(self, security_service: SecurityService) -> None:
self.security_service = security_service
async def create_user(self, create_data: DomainUserCreate) -> User:
- doc = UserDocument(**asdict(create_data))
+ doc = UserDocument(**create_data.model_dump())
await doc.insert()
- return User(**doc.model_dump(exclude={"id", "revision_id"}))
+ return User.model_validate(doc, from_attributes=True)
async def list_users(
self, limit: int = 100, offset: int = 0, search: str | None = None, role: UserRole | None = None
@@ -48,19 +47,19 @@ async def list_users(
query = UserDocument.find(*conditions)
total = await query.count()
docs = await query.skip(offset).limit(limit).to_list()
- users = [User(**doc.model_dump(exclude={"id", "revision_id"})) for doc in docs]
+ users = [User.model_validate(doc, from_attributes=True) for doc in docs]
return UserListResult(users=users, total=total, offset=offset, limit=limit)
async def get_user_by_id(self, user_id: str) -> User | None:
doc = await UserDocument.find_one({"user_id": user_id})
- return User(**doc.model_dump(exclude={"id", "revision_id"})) if doc else None
+ return User.model_validate(doc, from_attributes=True) if doc else None
async def update_user(self, user_id: str, update_data: UserUpdate) -> User | None:
doc = await UserDocument.find_one({"user_id": user_id})
if not doc:
return None
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
+ update_dict = update_data.model_dump(exclude_none=True)
# Handle password hashing
if "password" in update_dict:
update_dict["hashed_password"] = self.security_service.get_password_hash(update_dict.pop("password"))
@@ -68,7 +67,7 @@ async def update_user(self, user_id: str, update_data: UserUpdate) -> User | Non
if update_dict:
update_dict["updated_at"] = datetime.now(timezone.utc)
await doc.set(update_dict)
- return User(**doc.model_dump(exclude={"id", "revision_id"}))
+ return User.model_validate(doc, from_attributes=True)
async def delete_user(self, user_id: str, cascade: bool = True) -> dict[str, int]:
deleted_counts = {}
diff --git a/backend/app/db/repositories/event_repository.py b/backend/app/db/repositories/event_repository.py
index b7cbc245..998a863d 100644
--- a/backend/app/db/repositories/event_repository.py
+++ b/backend/app/db/repositories/event_repository.py
@@ -1,5 +1,4 @@
import logging
-from dataclasses import asdict
from datetime import datetime, timedelta, timezone
from typing import Any, Mapping
@@ -10,13 +9,14 @@
from app.core.tracing.utils import add_span_attributes
from app.db.docs import EventArchiveDocument, EventDocument
from app.domain.enums.events import EventType
-from app.domain.events import Event
-from app.domain.events.event_models import (
+from app.domain.events import (
ArchivedEvent,
+ DomainEvent,
EventAggregationResult,
EventListResult,
EventReplayInfo,
EventStatistics,
+ domain_event_adapter,
)
@@ -36,13 +36,9 @@ def _build_time_filter(self, start_time: datetime | None, end_time: datetime | N
"""Build time filter dict for aggregation pipelines."""
return {key: value for key, value in {"$gte": start_time, "$lte": end_time}.items() if value is not None}
- async def store_event(self, event: Event) -> str:
- data = asdict(event)
- if not data.get("stored_at"):
- data["stored_at"] = datetime.now(timezone.utc)
- # Remove None values so EventDocument defaults can apply (e.g., ttl_expires_at)
- data = {k: v for k, v in data.items() if v is not None}
-
+ async def store_event(self, event: DomainEvent) -> str:
+ data = event.model_dump(exclude_none=True)
+ data.setdefault("stored_at", datetime.now(timezone.utc))
doc = EventDocument(**data)
add_span_attributes(
**{
@@ -55,28 +51,25 @@ async def store_event(self, event: Event) -> str:
self.logger.debug(f"Stored event {event.event_id} of type {event.event_type}")
return event.event_id
- async def store_events_batch(self, events: list[Event]) -> list[str]:
+ async def store_events_batch(self, events: list[DomainEvent]) -> list[str]:
if not events:
return []
now = datetime.now(timezone.utc)
docs = []
for event in events:
- data = asdict(event)
- if not data.get("stored_at"):
- data["stored_at"] = now
- # Remove None values so EventDocument defaults can apply
- data = {k: v for k, v in data.items() if v is not None}
+ data = event.model_dump(exclude_none=True)
+ data.setdefault("stored_at", now)
docs.append(EventDocument(**data))
await EventDocument.insert_many(docs)
add_span_attributes(**{"events.batch.count": len(events)})
self.logger.info(f"Stored {len(events)} events in batch")
return [event.event_id for event in events]
- async def get_event(self, event_id: str) -> Event | None:
+ async def get_event(self, event_id: str) -> DomainEvent | None:
doc = await EventDocument.find_one({"event_id": event_id})
if not doc:
return None
- return Event(**doc.model_dump(exclude={"id", "revision_id"}))
+ return domain_event_adapter.validate_python(doc, from_attributes=True)
async def get_events_by_type(
self,
@@ -85,7 +78,7 @@ async def get_events_by_type(
end_time: datetime | None = None,
limit: int = 100,
skip: int = 0,
- ) -> list[Event]:
+ ) -> list[DomainEvent]:
conditions = [
EventDocument.event_type == event_type,
*self._time_conditions(start_time, end_time),
@@ -97,26 +90,26 @@ async def get_events_by_type(
.limit(limit)
.to_list()
)
- return [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ return [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
async def get_events_by_aggregate(
self, aggregate_id: str, event_types: list[EventType] | None = None, limit: int = 100
- ) -> list[Event]:
+ ) -> list[DomainEvent]:
conditions = [
EventDocument.aggregate_id == aggregate_id,
- In(EventDocument.event_type, [t.value for t in event_types]) if event_types else None,
+ In(EventDocument.event_type, list(event_types)) if event_types else None,
]
conditions = [c for c in conditions if c is not None]
docs = (
await EventDocument.find(*conditions).sort([("timestamp", SortDirection.ASCENDING)]).limit(limit).to_list()
)
- return [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ return [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
async def get_events_by_correlation(self, correlation_id: str, limit: int = 100, skip: int = 0) -> EventListResult:
query = EventDocument.find(EventDocument.metadata.correlation_id == correlation_id)
total_count = await query.count()
docs = await query.sort([("timestamp", SortDirection.ASCENDING)]).skip(skip).limit(limit).to_list()
- events = [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ events = [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
return EventListResult(
events=events,
total=total_count,
@@ -133,7 +126,7 @@ async def get_events_by_user(
end_time: datetime | None = None,
limit: int = 100,
skip: int = 0,
- ) -> list[Event]:
+ ) -> list[DomainEvent]:
conditions = [
EventDocument.metadata.user_id == user_id,
In(EventDocument.event_type, event_types) if event_types else None,
@@ -147,14 +140,14 @@ async def get_events_by_user(
.limit(limit)
.to_list()
)
- return [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ return [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
async def get_execution_events(
self, execution_id: str, limit: int = 100, skip: int = 0, exclude_system_events: bool = False
) -> EventListResult:
conditions: list[Any] = [
Or(
- EventDocument.payload["execution_id"] == execution_id,
+ {"execution_id": execution_id},
EventDocument.aggregate_id == execution_id,
),
Not(RegEx(EventDocument.metadata.service_name, "^system-")) if exclude_system_events else None,
@@ -163,7 +156,7 @@ async def get_execution_events(
query = EventDocument.find(*conditions)
total_count = await query.count()
docs = await query.sort([("timestamp", SortDirection.ASCENDING)]).skip(skip).limit(limit).to_list()
- events = [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ events = [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
return EventListResult(
events=events,
total=total_count,
@@ -281,7 +274,7 @@ async def get_user_events_paginated(
total_count = await query.count()
sort_direction = SortDirection.DESCENDING if sort_order == "desc" else SortDirection.ASCENDING
docs = await query.sort([("timestamp", sort_direction)]).skip(skip).limit(limit).to_list()
- events = [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ events = [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
return EventListResult(
events=events,
total=total_count,
@@ -304,7 +297,7 @@ async def query_events(
cursor = EventDocument.find(query)
total_count = await cursor.count()
docs = await cursor.sort([(sort_field, SortDirection.DESCENDING)]).skip(skip).limit(limit).to_list()
- events = [Event(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ events = [domain_event_adapter.validate_python(d, from_attributes=True) for d in docs]
return EventListResult(
events=events, total=total_count, skip=skip, limit=limit, has_more=(skip + limit) < total_count
)
@@ -337,30 +330,13 @@ async def delete_event_with_archival(
return None
deleted_at = datetime.now(timezone.utc)
- archived_doc = EventArchiveDocument(
- event_id=doc.event_id,
- event_type=doc.event_type,
- event_version=doc.event_version,
- timestamp=doc.timestamp,
- metadata=doc.metadata,
- payload=doc.payload,
- aggregate_id=doc.aggregate_id,
- stored_at=doc.stored_at,
- ttl_expires_at=doc.ttl_expires_at,
- deleted_at=deleted_at,
- deleted_by=deleted_by,
- deletion_reason=deletion_reason,
- )
+ archive_fields = {"deleted_at": deleted_at, "deleted_by": deleted_by, "deletion_reason": deletion_reason}
+ archived_doc = EventArchiveDocument.model_validate(doc, from_attributes=True).model_copy(update=archive_fields)
await archived_doc.insert()
await doc.delete()
- return ArchivedEvent(
- **doc.model_dump(exclude={"id", "revision_id"}),
- deleted_at=deleted_at,
- deleted_by=deleted_by,
- deletion_reason=deletion_reason,
- )
+ return ArchivedEvent.model_validate(doc, from_attributes=True).model_copy(update=archive_fields)
- async def get_aggregate_events_for_replay(self, aggregate_id: str, limit: int = 10000) -> list[Event]:
+ async def get_aggregate_events_for_replay(self, aggregate_id: str, limit: int = 10000) -> list[DomainEvent]:
return await self.get_events_by_aggregate(aggregate_id=aggregate_id, limit=limit)
async def get_aggregate_replay_info(self, aggregate_id: str) -> EventReplayInfo | None:
@@ -381,7 +357,7 @@ async def get_aggregate_replay_info(self, aggregate_id: str) -> EventReplayInfo
]
async for doc in EventDocument.aggregate(pipeline):
- events = [Event(**e) for e in doc["events"]]
+ events = [domain_event_adapter.validate_python(e) for e in doc["events"]]
return EventReplayInfo(
events=events,
event_count=doc["event_count"],
diff --git a/backend/app/db/repositories/execution_repository.py b/backend/app/db/repositories/execution_repository.py
index e7b62e67..66406501 100644
--- a/backend/app/db/repositories/execution_repository.py
+++ b/backend/app/db/repositories/execution_repository.py
@@ -1,11 +1,10 @@
import logging
-from dataclasses import asdict
from datetime import datetime, timezone
from typing import Any
from beanie.odm.enums import SortDirection
-from app.db.docs import ExecutionDocument, ResourceUsage
+from app.db.docs import ExecutionDocument
from app.domain.execution import (
DomainExecution,
DomainExecutionCreate,
@@ -19,11 +18,11 @@ def __init__(self, logger: logging.Logger):
self.logger = logger
async def create_execution(self, create_data: DomainExecutionCreate) -> DomainExecution:
- doc = ExecutionDocument(**asdict(create_data))
+ doc = ExecutionDocument(**create_data.model_dump())
self.logger.info("Inserting execution into MongoDB", extra={"execution_id": doc.execution_id})
await doc.insert()
self.logger.info("Inserted execution", extra={"execution_id": doc.execution_id})
- return DomainExecution(**doc.model_dump(exclude={"id"}))
+ return DomainExecution.model_validate(doc, from_attributes=True)
async def get_execution(self, execution_id: str) -> DomainExecution | None:
self.logger.info("Searching for execution in MongoDB", extra={"execution_id": execution_id})
@@ -33,16 +32,14 @@ async def get_execution(self, execution_id: str) -> DomainExecution | None:
return None
self.logger.info("Found execution in MongoDB", extra={"execution_id": execution_id})
- return DomainExecution(**doc.model_dump(exclude={"id"}))
+ return DomainExecution.model_validate(doc, from_attributes=True)
async def update_execution(self, execution_id: str, update_data: DomainExecutionUpdate) -> bool:
doc = await ExecutionDocument.find_one({"execution_id": execution_id})
if not doc:
return False
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
- if "resource_usage" in update_dict:
- update_dict["resource_usage"] = ResourceUsage.model_validate(update_data.resource_usage)
+ update_dict = update_data.model_dump(exclude_none=True)
if update_dict:
update_dict["updated_at"] = datetime.now(timezone.utc)
await doc.set(update_dict)
@@ -60,7 +57,7 @@ async def write_terminal_result(self, result: ExecutionResultDomain) -> bool:
"exit_code": result.exit_code,
"stdout": result.stdout,
"stderr": result.stderr,
- "resource_usage": ResourceUsage.model_validate(result.resource_usage),
+ "resource_usage": result.resource_usage.model_dump() if result.resource_usage else None,
"error_type": result.error_type,
"updated_at": datetime.now(timezone.utc),
}
@@ -78,7 +75,7 @@ async def get_executions(
]
find_query = find_query.sort(beanie_sort)
docs = await find_query.skip(skip).limit(limit).to_list()
- return [DomainExecution(**doc.model_dump(exclude={"id"})) for doc in docs]
+ return [DomainExecution.model_validate(doc, from_attributes=True) for doc in docs]
async def count_executions(self, query: dict[str, Any]) -> int:
return await ExecutionDocument.find(query).count()
diff --git a/backend/app/db/repositories/notification_repository.py b/backend/app/db/repositories/notification_repository.py
index e7c261f9..51d2bb68 100644
--- a/backend/app/db/repositories/notification_repository.py
+++ b/backend/app/db/repositories/notification_repository.py
@@ -1,5 +1,4 @@
import logging
-from dataclasses import asdict
from datetime import UTC, datetime, timedelta
from typing import Any
@@ -23,9 +22,9 @@ def __init__(self, logger: logging.Logger):
self.logger = logger
async def create_notification(self, create_data: DomainNotificationCreate) -> DomainNotification:
- doc = NotificationDocument(**asdict(create_data))
+ doc = NotificationDocument(**create_data.model_dump())
await doc.insert()
- return DomainNotification(**doc.model_dump(exclude={"id"}))
+ return DomainNotification.model_validate(doc, from_attributes=True)
async def update_notification(
self, notification_id: str, user_id: str, update_data: DomainNotificationUpdate
@@ -33,7 +32,7 @@ async def update_notification(
doc = await NotificationDocument.find_one({"notification_id": notification_id, "user_id": user_id})
if not doc:
return False
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
+ update_dict = update_data.model_dump(exclude_none=True)
if update_dict:
await doc.set(update_dict)
return True
@@ -42,7 +41,7 @@ async def get_notification(self, notification_id: str, user_id: str) -> DomainNo
doc = await NotificationDocument.find_one({"notification_id": notification_id, "user_id": user_id})
if not doc:
return None
- return DomainNotification(**doc.model_dump(exclude={"id"}))
+ return DomainNotification.model_validate(doc, from_attributes=True)
async def mark_as_read(self, notification_id: str, user_id: str) -> bool:
doc = await NotificationDocument.find_one({"notification_id": notification_id, "user_id": user_id})
@@ -89,7 +88,7 @@ async def list_notifications(
.limit(limit)
.to_list()
)
- return [DomainNotification(**doc.model_dump(exclude={"id"})) for doc in docs]
+ return [DomainNotification.model_validate(doc, from_attributes=True) for doc in docs]
async def count_notifications(self, user_id: str, *additional_conditions: Any) -> int:
conditions = [NotificationDocument.user_id == user_id, *additional_conditions]
@@ -129,7 +128,7 @@ async def find_pending_notifications(self, batch_size: int = 10) -> list[DomainN
.limit(batch_size)
.to_list()
)
- return [DomainNotification(**doc.model_dump(exclude={"id"})) for doc in docs]
+ return [DomainNotification.model_validate(doc, from_attributes=True) for doc in docs]
async def find_scheduled_notifications(self, batch_size: int = 10) -> list[DomainNotification]:
now = datetime.now(UTC)
@@ -142,7 +141,7 @@ async def find_scheduled_notifications(self, batch_size: int = 10) -> list[Domai
.limit(batch_size)
.to_list()
)
- return [DomainNotification(**doc.model_dump(exclude={"id"})) for doc in docs]
+ return [DomainNotification.model_validate(doc, from_attributes=True) for doc in docs]
async def cleanup_old_notifications(self, days: int = 30) -> int:
cutoff = datetime.now(UTC) - timedelta(days=days)
@@ -160,18 +159,18 @@ async def get_subscription(
if not doc:
# Default: enabled=True for new users (consistent with get_all_subscriptions)
return DomainNotificationSubscription(user_id=user_id, channel=channel, enabled=True)
- return DomainNotificationSubscription(**doc.model_dump(exclude={"id"}))
+ return DomainNotificationSubscription.model_validate(doc, from_attributes=True)
async def upsert_subscription(
self, user_id: str, channel: NotificationChannel, update_data: DomainSubscriptionUpdate
) -> DomainNotificationSubscription:
existing = await NotificationSubscriptionDocument.find_one({"user_id": user_id, "channel": channel})
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
+ update_dict = update_data.model_dump(exclude_none=True)
update_dict["updated_at"] = datetime.now(UTC)
if existing:
await existing.set(update_dict)
- return DomainNotificationSubscription(**existing.model_dump(exclude={"id"}))
+ return DomainNotificationSubscription.model_validate(existing, from_attributes=True)
else:
doc = NotificationSubscriptionDocument(
user_id=user_id,
@@ -179,14 +178,14 @@ async def upsert_subscription(
**update_dict,
)
await doc.insert()
- return DomainNotificationSubscription(**doc.model_dump(exclude={"id"}))
+ return DomainNotificationSubscription.model_validate(doc, from_attributes=True)
async def get_all_subscriptions(self, user_id: str) -> dict[NotificationChannel, DomainNotificationSubscription]:
subs: dict[NotificationChannel, DomainNotificationSubscription] = {}
for channel in NotificationChannel:
doc = await NotificationSubscriptionDocument.find_one({"user_id": user_id, "channel": channel})
if doc:
- subs[channel] = DomainNotificationSubscription(**doc.model_dump(exclude={"id"}))
+ subs[channel] = DomainNotificationSubscription.model_validate(doc, from_attributes=True)
else:
subs[channel] = DomainNotificationSubscription(user_id=user_id, channel=channel, enabled=True)
return subs
@@ -198,7 +197,7 @@ async def get_users_by_roles(self, roles: list[UserRole]) -> list[str]:
UserDocument.is_active == True, # noqa: E712
).to_list()
user_ids = [doc.user_id for doc in docs if doc.user_id]
- self.logger.info(f"Found {len(user_ids)} users with roles {[r.value for r in roles]}")
+ self.logger.info(f"Found {len(user_ids)} users with roles {list(roles)}")
return user_ids
async def get_active_users(self, days: int = 30) -> list[str]:
diff --git a/backend/app/db/repositories/replay_repository.py b/backend/app/db/repositories/replay_repository.py
index 387f489a..1ec73b0d 100644
--- a/backend/app/db/repositories/replay_repository.py
+++ b/backend/app/db/repositories/replay_repository.py
@@ -1,5 +1,4 @@
import logging
-from dataclasses import asdict
from datetime import datetime
from typing import Any, AsyncIterator
@@ -18,7 +17,7 @@ def __init__(self, logger: logging.Logger) -> None:
async def save_session(self, session: ReplaySessionState) -> None:
existing = await ReplaySessionDocument.find_one({"session_id": session.session_id})
- doc = ReplaySessionDocument(**asdict(session))
+ doc = ReplaySessionDocument(**session.model_dump())
if existing:
doc.id = existing.id
await doc.save()
@@ -27,7 +26,7 @@ async def get_session(self, session_id: str) -> ReplaySessionState | None:
doc = await ReplaySessionDocument.find_one({"session_id": session_id})
if not doc:
return None
- return ReplaySessionState(**doc.model_dump(exclude={"id", "revision_id"}))
+ return ReplaySessionState.model_validate(doc, from_attributes=True)
async def list_sessions(
self, status: ReplayStatus | None = None, user_id: str | None = None, limit: int = 100, skip: int = 0
@@ -44,7 +43,7 @@ async def list_sessions(
.limit(limit)
.to_list()
)
- return [ReplaySessionState(**doc.model_dump(exclude={"id", "revision_id"})) for doc in docs]
+ return [ReplaySessionState.model_validate(doc, from_attributes=True) for doc in docs]
async def update_session_status(self, session_id: str, status: ReplayStatus) -> bool:
doc = await ReplaySessionDocument.find_one({"session_id": session_id})
@@ -70,7 +69,7 @@ async def count_sessions(self, *conditions: Any) -> int:
return await ReplaySessionDocument.find(*conditions).count()
async def update_replay_session(self, session_id: str, updates: ReplaySessionUpdate) -> bool:
- update_dict = {k: (v.value if hasattr(v, "value") else v) for k, v in asdict(updates).items() if v is not None}
+ update_dict = updates.model_dump(exclude_none=True)
if not update_dict:
return False
doc = await ReplaySessionDocument.find_one({"session_id": session_id})
@@ -91,9 +90,7 @@ async def fetch_events(
batch = []
async for doc in cursor:
- # Merge payload to top level for schema_registry deserialization
- d = doc.model_dump(exclude={"id", "revision_id", "stored_at", "ttl_expires_at"})
- batch.append({**{k: v for k, v in d.items() if k != "payload"}, **d.get("payload", {})})
+ batch.append(doc.model_dump(exclude={"id", "revision_id", "stored_at", "ttl_expires_at"}))
if len(batch) >= batch_size:
yield batch
batch = []
diff --git a/backend/app/db/repositories/resource_allocation_repository.py b/backend/app/db/repositories/resource_allocation_repository.py
index c2d5e79c..9a103cf6 100644
--- a/backend/app/db/repositories/resource_allocation_repository.py
+++ b/backend/app/db/repositories/resource_allocation_repository.py
@@ -1,4 +1,3 @@
-from dataclasses import asdict
from datetime import datetime, timezone
from uuid import uuid4
@@ -13,10 +12,10 @@ async def count_active(self, language: str) -> int:
async def create_allocation(self, create_data: DomainResourceAllocationCreate) -> DomainResourceAllocation:
doc = ResourceAllocationDocument(
allocation_id=str(uuid4()),
- **asdict(create_data),
+ **create_data.model_dump(),
)
await doc.insert()
- return DomainResourceAllocation(**doc.model_dump(exclude={"id"}))
+ return DomainResourceAllocation.model_validate(doc, from_attributes=True)
async def release_allocation(self, allocation_id: str) -> bool:
doc = await ResourceAllocationDocument.find_one({"allocation_id": allocation_id})
diff --git a/backend/app/db/repositories/saga_repository.py b/backend/app/db/repositories/saga_repository.py
index 5eaf1169..95527dc9 100644
--- a/backend/app/db/repositories/saga_repository.py
+++ b/backend/app/db/repositories/saga_repository.py
@@ -1,4 +1,3 @@
-from dataclasses import asdict
from datetime import datetime, timezone
from typing import Any
@@ -30,7 +29,7 @@ def _filter_conditions(self, saga_filter: SagaFilter) -> list[BaseFindOperator]:
async def upsert_saga(self, saga: Saga) -> bool:
existing = await SagaDocument.find_one({"saga_id": saga.saga_id})
- doc = SagaDocument(**asdict(saga))
+ doc = SagaDocument(**saga.model_dump())
if existing:
doc.id = existing.id
await doc.save()
@@ -41,11 +40,11 @@ async def get_saga_by_execution_and_name(self, execution_id: str, saga_name: str
SagaDocument.execution_id == execution_id,
SagaDocument.saga_name == saga_name,
)
- return Saga(**doc.model_dump(exclude={"id"})) if doc else None
+ return Saga.model_validate(doc, from_attributes=True) if doc else None
async def get_saga(self, saga_id: str) -> Saga | None:
doc = await SagaDocument.find_one({"saga_id": saga_id})
- return Saga(**doc.model_dump(exclude={"id"})) if doc else None
+ return Saga.model_validate(doc, from_attributes=True) if doc else None
async def get_sagas_by_execution(
self, execution_id: str, state: SagaState | None = None, limit: int = 100, skip: int = 0
@@ -60,7 +59,7 @@ async def get_sagas_by_execution(
total = await query.count()
docs = await query.sort([("created_at", SortDirection.DESCENDING)]).skip(skip).limit(limit).to_list()
return SagaListResult(
- sagas=[Saga(**d.model_dump(exclude={"id"})) for d in docs],
+ sagas=[Saga.model_validate(d, from_attributes=True) for d in docs],
total=total,
skip=skip,
limit=limit,
@@ -72,7 +71,7 @@ async def list_sagas(self, saga_filter: SagaFilter, limit: int = 100, skip: int
total = await query.count()
docs = await query.sort([("created_at", SortDirection.DESCENDING)]).skip(skip).limit(limit).to_list()
return SagaListResult(
- sagas=[Saga(**d.model_dump(exclude={"id"})) for d in docs],
+ sagas=[Saga.model_validate(d, from_attributes=True) for d in docs],
total=total,
skip=skip,
limit=limit,
@@ -116,7 +115,7 @@ async def find_timed_out_sagas(
.limit(limit)
.to_list()
)
- return [Saga(**d.model_dump(exclude={"id"})) for d in docs]
+ return [Saga.model_validate(d, from_attributes=True) for d in docs]
async def get_saga_statistics(self, saga_filter: SagaFilter | None = None) -> dict[str, Any]:
conditions = self._filter_conditions(saga_filter) if saga_filter else []
diff --git a/backend/app/db/repositories/saved_script_repository.py b/backend/app/db/repositories/saved_script_repository.py
index a29cbb4f..af46101d 100644
--- a/backend/app/db/repositories/saved_script_repository.py
+++ b/backend/app/db/repositories/saved_script_repository.py
@@ -1,5 +1,3 @@
-from dataclasses import asdict
-
from beanie.operators import Eq
from app.db.docs import SavedScriptDocument
@@ -8,16 +6,16 @@
class SavedScriptRepository:
async def create_saved_script(self, create_data: DomainSavedScriptCreate, user_id: str) -> DomainSavedScript:
- doc = SavedScriptDocument(**asdict(create_data), user_id=user_id)
+ doc = SavedScriptDocument(**create_data.model_dump(), user_id=user_id)
await doc.insert()
- return DomainSavedScript(**doc.model_dump(exclude={"id", "revision_id"}))
+ return DomainSavedScript.model_validate(doc, from_attributes=True)
async def get_saved_script(self, script_id: str, user_id: str) -> DomainSavedScript | None:
doc = await SavedScriptDocument.find_one(
Eq(SavedScriptDocument.script_id, script_id),
Eq(SavedScriptDocument.user_id, user_id),
)
- return DomainSavedScript(**doc.model_dump(exclude={"id", "revision_id"})) if doc else None
+ return DomainSavedScript.model_validate(doc, from_attributes=True) if doc else None
async def update_saved_script(
self,
@@ -32,9 +30,9 @@ async def update_saved_script(
if not doc:
return None
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
+ update_dict = update_data.model_dump(exclude_none=True)
await doc.set(update_dict)
- return DomainSavedScript(**doc.model_dump(exclude={"id", "revision_id"}))
+ return DomainSavedScript.model_validate(doc, from_attributes=True)
async def delete_saved_script(self, script_id: str, user_id: str) -> bool:
doc = await SavedScriptDocument.find_one(
@@ -48,4 +46,4 @@ async def delete_saved_script(self, script_id: str, user_id: str) -> bool:
async def list_saved_scripts(self, user_id: str) -> list[DomainSavedScript]:
docs = await SavedScriptDocument.find(Eq(SavedScriptDocument.user_id, user_id)).to_list()
- return [DomainSavedScript(**d.model_dump(exclude={"id", "revision_id"})) for d in docs]
+ return [DomainSavedScript.model_validate(d, from_attributes=True) for d in docs]
diff --git a/backend/app/db/repositories/sse_repository.py b/backend/app/db/repositories/sse_repository.py
index 0979842d..339dc72a 100644
--- a/backend/app/db/repositories/sse_repository.py
+++ b/backend/app/db/repositories/sse_repository.py
@@ -13,11 +13,11 @@ async def get_execution_status(self, execution_id: str) -> SSEExecutionStatusDom
return SSEExecutionStatusDomain(
execution_id=execution_id,
status=doc.status,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
)
async def get_execution(self, execution_id: str) -> DomainExecution | None:
doc = await ExecutionDocument.find_one({"execution_id": execution_id})
if not doc:
return None
- return DomainExecution(**doc.model_dump(exclude={"id", "revision_id"}))
+ return DomainExecution.model_validate(doc, from_attributes=True)
diff --git a/backend/app/db/repositories/user_repository.py b/backend/app/db/repositories/user_repository.py
index 7f3e928f..4af4d41c 100644
--- a/backend/app/db/repositories/user_repository.py
+++ b/backend/app/db/repositories/user_repository.py
@@ -1,5 +1,4 @@
import re
-from dataclasses import asdict
from datetime import datetime, timezone
from beanie.odm.operators.find import BaseFindOperator
@@ -13,16 +12,16 @@
class UserRepository:
async def get_user(self, username: str) -> User | None:
doc = await UserDocument.find_one({"username": username})
- return User(**doc.model_dump(exclude={"id", "revision_id"})) if doc else None
+ return User.model_validate(doc, from_attributes=True) if doc else None
async def create_user(self, create_data: DomainUserCreate) -> User:
- doc = UserDocument(**asdict(create_data))
+ doc = UserDocument(**create_data.model_dump())
await doc.insert()
- return User(**doc.model_dump(exclude={"id", "revision_id"}))
+ return User.model_validate(doc, from_attributes=True)
async def get_user_by_id(self, user_id: str) -> User | None:
doc = await UserDocument.find_one({"user_id": user_id})
- return User(**doc.model_dump(exclude={"id", "revision_id"})) if doc else None
+ return User.model_validate(doc, from_attributes=True) if doc else None
async def list_users(
self, limit: int = 100, offset: int = 0, search: str | None = None, role: UserRole | None = None
@@ -45,7 +44,7 @@ async def list_users(
total = await query.count()
docs = await query.skip(offset).limit(limit).to_list()
return UserListResult(
- users=[User(**d.model_dump(exclude={"id", "revision_id"})) for d in docs],
+ users=[User.model_validate(d, from_attributes=True) for d in docs],
total=total,
offset=offset,
limit=limit,
@@ -56,11 +55,11 @@ async def update_user(self, user_id: str, update_data: DomainUserUpdate) -> User
if not doc:
return None
- update_dict = {k: v for k, v in asdict(update_data).items() if v is not None}
+ update_dict = update_data.model_dump(exclude_none=True)
if update_dict:
update_dict["updated_at"] = datetime.now(timezone.utc)
await doc.set(update_dict)
- return User(**doc.model_dump(exclude={"id", "revision_id"}))
+ return User.model_validate(doc, from_attributes=True)
async def delete_user(self, user_id: str) -> bool:
doc = await UserDocument.find_one({"user_id": user_id})
diff --git a/backend/app/db/repositories/user_settings_repository.py b/backend/app/db/repositories/user_settings_repository.py
index 3cf2c844..ee05bd4f 100644
--- a/backend/app/db/repositories/user_settings_repository.py
+++ b/backend/app/db/repositories/user_settings_repository.py
@@ -1,14 +1,12 @@
import logging
-from dataclasses import asdict
from datetime import datetime
-from typing import List
from beanie.odm.enums import SortDirection
from beanie.operators import GT, LTE, In
from app.db.docs import EventDocument, UserSettingsDocument, UserSettingsSnapshotDocument
from app.domain.enums.events import EventType
-from app.domain.user.settings_models import DomainUserSettings
+from app.domain.user.settings_models import DomainUserSettings, DomainUserSettingsChangedEvent
class UserSettingsRepository:
@@ -19,11 +17,11 @@ async def get_snapshot(self, user_id: str) -> DomainUserSettings | None:
doc = await UserSettingsDocument.find_one({"user_id": user_id})
if not doc:
return None
- return DomainUserSettings(**doc.model_dump(exclude={"id", "revision_id"}))
+ return DomainUserSettings.model_validate(doc, from_attributes=True)
async def create_snapshot(self, settings: DomainUserSettings) -> None:
existing = await UserSettingsDocument.find_one({"user_id": settings.user_id})
- doc = UserSettingsDocument(**asdict(settings))
+ doc = UserSettingsDocument(**settings.model_dump())
if existing:
doc.id = existing.id
await doc.save()
@@ -32,11 +30,11 @@ async def create_snapshot(self, settings: DomainUserSettings) -> None:
async def get_settings_events(
self,
user_id: str,
- event_types: List[EventType],
+ event_types: list[EventType],
since: datetime | None = None,
until: datetime | None = None,
limit: int | None = None,
- ) -> List[EventDocument]:
+ ) -> list[DomainUserSettingsChangedEvent]:
aggregate_id = f"user_settings_{user_id}"
conditions = [
EventDocument.aggregate_id == aggregate_id,
@@ -50,7 +48,13 @@ async def get_settings_events(
if limit:
find_query = find_query.limit(limit)
- return await find_query.to_list()
+ docs = await find_query.to_list()
+ return [
+ DomainUserSettingsChangedEvent.model_validate(e, from_attributes=True).model_copy(
+ update={"correlation_id": e.metadata.correlation_id}
+ )
+ for e in docs
+ ]
async def count_events_since_snapshot(self, user_id: str) -> int:
aggregate_id = f"user_settings_{user_id}"
diff --git a/backend/app/dlq/manager.py b/backend/app/dlq/manager.py
index fdbb729c..2fad0295 100644
--- a/backend/app/dlq/manager.py
+++ b/backend/app/dlq/manager.py
@@ -148,7 +148,7 @@ def _kafka_msg_to_message(self, msg: Message) -> DLQMessage:
async def _on_start(self) -> None:
"""Start DLQ manager."""
- topic_name = f"{self.settings.KAFKA_TOPIC_PREFIX}{str(self.dlq_topic)}"
+ topic_name = f"{self.settings.KAFKA_TOPIC_PREFIX}{self.dlq_topic}"
self.consumer.subscribe([topic_name])
# Start processing tasks
@@ -219,7 +219,7 @@ def _extract_headers(self, msg: Message) -> dict[str, str]:
async def _record_message_metrics(self, dlq_message: DLQMessage) -> None:
"""Record metrics for received DLQ message."""
- self.metrics.record_dlq_message_received(dlq_message.original_topic, str(dlq_message.event_type))
+ self.metrics.record_dlq_message_received(dlq_message.original_topic, dlq_message.event_type)
self.metrics.record_dlq_message_age(dlq_message.age_seconds)
async def _process_message_with_tracing(self, msg: Message, dlq_message: DLQMessage) -> None:
@@ -233,9 +233,9 @@ async def _process_message_with_tracing(self, msg: Message, dlq_message: DLQMess
context=ctx,
kind=SpanKind.CONSUMER,
attributes={
- str(EventAttributes.KAFKA_TOPIC): str(self.dlq_topic),
- str(EventAttributes.EVENT_TYPE): str(dlq_message.event_type),
- str(EventAttributes.EVENT_ID): dlq_message.event_id or "",
+ EventAttributes.KAFKA_TOPIC: self.dlq_topic,
+ EventAttributes.EVENT_TYPE: dlq_message.event_type,
+ EventAttributes.EVENT_ID: dlq_message.event_id or "",
},
):
await self._process_dlq_message(dlq_message)
@@ -324,9 +324,7 @@ async def _retry_message(self, message: DLQMessage) -> None:
"dlq_retry_timestamp": datetime.now(timezone.utc).isoformat(),
}
hdrs = inject_trace_context(hdrs)
- from typing import cast
-
- kafka_headers = cast(list[tuple[str, str | bytes]], [(k, v.encode()) for k, v in hdrs.items()])
+ kafka_headers: list[tuple[str, str | bytes]] = [(k, v.encode()) for k, v in hdrs.items()]
# Get the original event
event = message.event
@@ -352,7 +350,7 @@ async def _retry_message(self, message: DLQMessage) -> None:
await asyncio.to_thread(self.producer.flush, timeout=5)
# Update metrics
- self.metrics.record_dlq_message_retried(message.original_topic, str(message.event_type), "success")
+ self.metrics.record_dlq_message_retried(message.original_topic, message.event_type, "success")
# Update status
await self._update_message_status(
@@ -371,7 +369,7 @@ async def _retry_message(self, message: DLQMessage) -> None:
async def _discard_message(self, message: DLQMessage, reason: str) -> None:
# Update metrics
- self.metrics.record_dlq_message_discarded(message.original_topic, str(message.event_type), reason)
+ self.metrics.record_dlq_message_discarded(message.original_topic, message.event_type, reason)
# Update status
await self._update_message_status(
@@ -454,7 +452,7 @@ async def retry_message_manually(self, event_id: str) -> bool:
# Guard against invalid states
if doc.status in {DLQMessageStatus.DISCARDED, DLQMessageStatus.RETRIED}:
- self.logger.info("Skipping manual retry", extra={"event_id": event_id, "status": str(doc.status)})
+ self.logger.info("Skipping manual retry", extra={"event_id": event_id, "status": doc.status})
return False
message = self._doc_to_message(doc)
@@ -490,6 +488,30 @@ async def retry_messages_batch(self, event_ids: list[str]) -> DLQBatchRetryResul
return DLQBatchRetryResult(total=len(event_ids), successful=successful, failed=failed, details=details)
+ async def discard_message_manually(self, event_id: str, reason: str) -> bool:
+ """Manually discard a DLQ message with state validation.
+
+ Args:
+ event_id: The event ID to discard
+ reason: Reason for discarding
+
+ Returns:
+ True if discarded, False if not found or in terminal state
+ """
+ doc = await DLQMessageDocument.find_one({"event_id": event_id})
+ if not doc:
+ self.logger.error("Message not found in DLQ", extra={"event_id": event_id})
+ return False
+
+ # Guard against invalid states (terminal states)
+ if doc.status in {DLQMessageStatus.DISCARDED, DLQMessageStatus.RETRIED}:
+ self.logger.info("Skipping manual discard", extra={"event_id": event_id, "status": doc.status})
+ return False
+
+ message = self._doc_to_message(doc)
+ await self._discard_message(message, reason)
+ return True
+
def create_dlq_manager(
settings: Settings,
diff --git a/backend/app/domain/admin/overview_models.py b/backend/app/domain/admin/overview_models.py
index 23f91408..ae805616 100644
--- a/backend/app/domain/admin/overview_models.py
+++ b/backend/app/domain/admin/overview_models.py
@@ -5,7 +5,7 @@
from pydantic.dataclasses import dataclass
-from app.domain.events import Event, EventStatistics
+from app.domain.events import DomainEvent, EventStatistics
from app.domain.user import User as DomainAdminUser
@@ -31,4 +31,4 @@ class AdminUserOverviewDomain:
stats: EventStatistics
derived_counts: DerivedCountsDomain
rate_limit_summary: RateLimitSummaryDomain
- recent_events: List[Event] = field(default_factory=list)
+ recent_events: List[DomainEvent] = field(default_factory=list)
diff --git a/backend/app/domain/admin/replay_updates.py b/backend/app/domain/admin/replay_updates.py
index c326565b..24c034d3 100644
--- a/backend/app/domain/admin/replay_updates.py
+++ b/backend/app/domain/admin/replay_updates.py
@@ -1,16 +1,15 @@
-"""Domain models for replay session updates."""
-
from datetime import datetime
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict
from app.domain.enums.replay import ReplayStatus
-@dataclass
-class ReplaySessionUpdate:
+class ReplaySessionUpdate(BaseModel):
"""Domain model for replay session updates."""
+ model_config = ConfigDict(from_attributes=True)
+
status: ReplayStatus | None = None
total_events: int | None = None
replayed_events: int | None = None
diff --git a/backend/app/domain/events/__init__.py b/backend/app/domain/events/__init__.py
index 9216b541..6d02d07d 100644
--- a/backend/app/domain/events/__init__.py
+++ b/backend/app/domain/events/__init__.py
@@ -1,8 +1,8 @@
-from app.domain.events.event_metadata import EventMetadata
from app.domain.events.event_models import (
- ArchivedEvent,
- Event,
EventAggregationResult,
+ EventBrowseResult,
+ EventDetail,
+ EventExportRow,
EventFilter,
EventListResult,
EventProjection,
@@ -10,20 +10,169 @@
EventReplayInfo,
EventSortOrder,
EventStatistics,
+ EventSummary,
ExecutionEventsResult,
+ HourlyEventCount,
+ UserEventCount,
+)
+from app.domain.events.typed import (
+ # Saga Command Events
+ AllocateResourcesCommandEvent,
+ # Base types
+ ArchivedEvent,
+ # Security Events
+ AuthFailedEvent,
+ BaseEvent,
+ CreatePodCommandEvent,
+ DeletePodCommandEvent,
+ DomainEvent,
+ EventMetadata,
+ # Execution Events
+ ExecutionAcceptedEvent,
+ ExecutionCancelledEvent,
+ ExecutionCompletedEvent,
+ ExecutionFailedEvent,
+ ExecutionQueuedEvent,
+ ExecutionRequestedEvent,
+ ExecutionRunningEvent,
+ ExecutionStartedEvent,
+ ExecutionTimeoutEvent,
+ # Notification Events
+ NotificationClickedEvent,
+ NotificationCreatedEvent,
+ NotificationDeliveredEvent,
+ NotificationFailedEvent,
+ NotificationPreferencesUpdatedEvent,
+ NotificationReadEvent,
+ NotificationSentEvent,
+ # Pod Events
+ PodCreatedEvent,
+ PodDeletedEvent,
+ PodFailedEvent,
+ PodRunningEvent,
+ PodScheduledEvent,
+ PodSucceededEvent,
+ PodTerminatedEvent,
+ # Resource Events
+ QuotaExceededEvent,
+ RateLimitExceededEvent,
+ ReleaseResourcesCommandEvent,
+ ResourceLimitExceededEvent,
+ # Result Events
+ ResultFailedEvent,
+ ResultStoredEvent,
+ # Saga Events
+ SagaCancelledEvent,
+ SagaCompensatedEvent,
+ SagaCompensatingEvent,
+ SagaCompletedEvent,
+ SagaFailedEvent,
+ SagaStartedEvent,
+ # Script Events
+ ScriptDeletedEvent,
+ ScriptSavedEvent,
+ ScriptSharedEvent,
+ SecurityViolationEvent,
+ # System Events
+ ServiceRecoveredEvent,
+ ServiceUnhealthyEvent,
+ SystemErrorEvent,
+ # User Events
+ UserDeletedEvent,
+ UserLoggedInEvent,
+ UserLoggedOutEvent,
+ UserLoginEvent,
+ UserRegisteredEvent,
+ UserSettingsUpdatedEvent,
+ UserUpdatedEvent,
+ domain_event_adapter,
)
__all__ = [
- "ArchivedEvent",
- "Event",
+ # Query/filter/result types
"EventAggregationResult",
+ "EventBrowseResult",
+ "EventDetail",
+ "EventExportRow",
"EventFilter",
"EventListResult",
- "EventMetadata",
"EventProjection",
"EventQuery",
"EventReplayInfo",
"EventSortOrder",
"EventStatistics",
+ "EventSummary",
"ExecutionEventsResult",
+ "HourlyEventCount",
+ "UserEventCount",
+ # Base types
+ "ArchivedEvent",
+ "BaseEvent",
+ "DomainEvent",
+ "EventMetadata",
+ "domain_event_adapter",
+ # Execution Events
+ "ExecutionRequestedEvent",
+ "ExecutionAcceptedEvent",
+ "ExecutionQueuedEvent",
+ "ExecutionStartedEvent",
+ "ExecutionRunningEvent",
+ "ExecutionCompletedEvent",
+ "ExecutionFailedEvent",
+ "ExecutionTimeoutEvent",
+ "ExecutionCancelledEvent",
+ # Pod Events
+ "PodCreatedEvent",
+ "PodScheduledEvent",
+ "PodRunningEvent",
+ "PodSucceededEvent",
+ "PodFailedEvent",
+ "PodTerminatedEvent",
+ "PodDeletedEvent",
+ # Result Events
+ "ResultStoredEvent",
+ "ResultFailedEvent",
+ # User Events
+ "UserSettingsUpdatedEvent",
+ "UserRegisteredEvent",
+ "UserLoginEvent",
+ "UserLoggedInEvent",
+ "UserLoggedOutEvent",
+ "UserUpdatedEvent",
+ "UserDeletedEvent",
+ # Notification Events
+ "NotificationCreatedEvent",
+ "NotificationSentEvent",
+ "NotificationDeliveredEvent",
+ "NotificationFailedEvent",
+ "NotificationReadEvent",
+ "NotificationClickedEvent",
+ "NotificationPreferencesUpdatedEvent",
+ # Saga Events
+ "SagaStartedEvent",
+ "SagaCompletedEvent",
+ "SagaFailedEvent",
+ "SagaCancelledEvent",
+ "SagaCompensatingEvent",
+ "SagaCompensatedEvent",
+ # Saga Command Events
+ "CreatePodCommandEvent",
+ "DeletePodCommandEvent",
+ "AllocateResourcesCommandEvent",
+ "ReleaseResourcesCommandEvent",
+ # Script Events
+ "ScriptSavedEvent",
+ "ScriptDeletedEvent",
+ "ScriptSharedEvent",
+ # Security Events
+ "SecurityViolationEvent",
+ "RateLimitExceededEvent",
+ "AuthFailedEvent",
+ # Resource Events
+ "ResourceLimitExceededEvent",
+ "QuotaExceededEvent",
+ # System Events
+ "SystemErrorEvent",
+ "ServiceUnhealthyEvent",
+ "ServiceRecoveredEvent",
]
diff --git a/backend/app/domain/events/event_metadata.py b/backend/app/domain/events/event_metadata.py
deleted file mode 100644
index c3a57440..00000000
--- a/backend/app/domain/events/event_metadata.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from dataclasses import field, replace
-from uuid import uuid4
-
-from pydantic.dataclasses import dataclass
-
-from app.domain.enums.common import Environment
-
-
-@dataclass
-class EventMetadata:
- """Domain event metadata for auditing and tracing."""
-
- service_name: str
- service_version: str
- correlation_id: str = field(default_factory=lambda: str(uuid4()))
- user_id: str | None = None
- ip_address: str | None = None
- user_agent: str | None = None
- environment: Environment = Environment.PRODUCTION
-
- def with_correlation(self, correlation_id: str) -> "EventMetadata":
- return replace(self, correlation_id=correlation_id)
-
- def with_user(self, user_id: str) -> "EventMetadata":
- return replace(self, user_id=user_id)
diff --git a/backend/app/domain/events/event_models.py b/backend/app/domain/events/event_models.py
index 3dc57627..e3a6f913 100644
--- a/backend/app/domain/events/event_models.py
+++ b/backend/app/domain/events/event_models.py
@@ -2,11 +2,11 @@
from datetime import datetime
from typing import Any
+from pydantic import BaseModel, ConfigDict
from pydantic.dataclasses import dataclass
from app.core.utils import StringEnum
-from app.domain.enums.events import EventType
-from app.domain.events.event_metadata import EventMetadata
+from app.domain.events.typed import DomainEvent
MongoQueryValue = str | dict[str, str | list[str] | float | datetime]
MongoQuery = dict[str, MongoQueryValue]
@@ -40,27 +40,6 @@ class CollectionNames(StringEnum):
DLQ_MESSAGES = "dlq_messages"
-@dataclass
-class Event:
- """Domain model for an event."""
-
- event_id: str
- event_type: EventType
- event_version: str
- timestamp: datetime
- metadata: EventMetadata
- payload: dict[str, Any]
- aggregate_id: str | None = None
- stored_at: datetime | None = None
- ttl_expires_at: datetime | None = None
- status: str | None = None
- error: str | None = None
-
- @property
- def correlation_id(self) -> str | None:
- return self.metadata.correlation_id
-
-
@dataclass
class EventSummary:
"""Lightweight event summary for lists and previews."""
@@ -71,10 +50,11 @@ class EventSummary:
aggregate_id: str | None = None
-@dataclass
-class EventFilter:
+class EventFilter(BaseModel):
"""Filter criteria for querying events."""
+ model_config = ConfigDict(from_attributes=True)
+
event_types: list[str] | None = None
aggregate_id: str | None = None
correlation_id: str | None = None
@@ -104,7 +84,7 @@ def get_sort_direction(self) -> int:
class EventListResult:
"""Result of event list query."""
- events: list[Event]
+ events: list[DomainEvent]
total: int
skip: int
limit: int
@@ -115,7 +95,7 @@ class EventListResult:
class EventBrowseResult:
"""Result for event browsing."""
- events: list[Event]
+ events: list[DomainEvent]
total: int
skip: int
limit: int
@@ -125,7 +105,7 @@ class EventBrowseResult:
class EventDetail:
"""Detailed event information with related events."""
- event: Event
+ event: DomainEvent
related_events: list[EventSummary] = field(default_factory=list)
timeline: list[EventSummary] = field(default_factory=list)
@@ -170,20 +150,11 @@ class EventProjection:
last_updated: datetime | None = None
-@dataclass
-class ArchivedEvent(Event):
- """Archived event with deletion metadata."""
-
- deleted_at: datetime | None = None
- deleted_by: str | None = None
- deletion_reason: str | None = None
-
-
@dataclass
class EventReplayInfo:
"""Information for event replay."""
- events: list[Event]
+ events: list[DomainEvent]
event_count: int
event_types: list[str]
start_time: datetime
@@ -194,11 +165,11 @@ class EventReplayInfo:
class ExecutionEventsResult:
"""Result of execution events query."""
- events: list[Event]
+ events: list[DomainEvent]
access_allowed: bool
include_system_events: bool
- def get_filtered_events(self) -> list[Event]:
+ def get_filtered_events(self) -> list[DomainEvent]:
"""Get events filtered based on access and system event settings."""
if not self.access_allowed:
return []
@@ -210,13 +181,14 @@ def get_filtered_events(self) -> list[Event]:
return events
-@dataclass
-class EventExportRow:
+class EventExportRow(BaseModel):
"""Event export row for CSV."""
+ model_config = ConfigDict(from_attributes=True)
+
event_id: str
event_type: str
- timestamp: str
+ timestamp: datetime
correlation_id: str
aggregate_id: str
user_id: str
diff --git a/backend/app/domain/events/typed.py b/backend/app/domain/events/typed.py
new file mode 100644
index 00000000..92efe7df
--- /dev/null
+++ b/backend/app/domain/events/typed.py
@@ -0,0 +1,610 @@
+from datetime import datetime, timedelta, timezone
+from typing import Annotated, Literal
+from uuid import uuid4
+
+from pydantic import BaseModel, ConfigDict, Discriminator, Field, TypeAdapter
+
+from app.domain.enums.auth import LoginMethod
+from app.domain.enums.common import Environment
+from app.domain.enums.events import EventType
+from app.domain.enums.notification import NotificationChannel, NotificationSeverity
+from app.domain.enums.storage import ExecutionErrorType, StorageType
+from app.domain.execution import ResourceUsageDomain
+
+
+class EventMetadata(BaseModel):
+ """Event metadata - embedded in all events."""
+
+ model_config = ConfigDict(from_attributes=True)
+
+ service_name: str
+ service_version: str
+ correlation_id: str = Field(default_factory=lambda: str(uuid4()))
+ user_id: str | None = None
+ ip_address: str | None = None
+ user_agent: str | None = None
+ environment: Environment = Environment.PRODUCTION
+
+
+class BaseEvent(BaseModel):
+ """Base fields for all domain events."""
+
+ model_config = ConfigDict(from_attributes=True)
+
+ event_id: str = Field(default_factory=lambda: str(uuid4()))
+ event_version: str = "1.0"
+ timestamp: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ aggregate_id: str | None = None
+ metadata: EventMetadata
+ stored_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ ttl_expires_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc) + timedelta(days=30))
+
+
+# --- Execution Events ---
+
+
+class ExecutionRequestedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_REQUESTED] = EventType.EXECUTION_REQUESTED
+ execution_id: str
+ script: str
+ language: str
+ language_version: str
+ runtime_image: str
+ runtime_command: list[str]
+ runtime_filename: str
+ timeout_seconds: int
+ cpu_limit: str
+ memory_limit: str
+ cpu_request: str
+ memory_request: str
+ priority: int = 5
+
+
+class ExecutionAcceptedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_ACCEPTED] = EventType.EXECUTION_ACCEPTED
+ execution_id: str
+ queue_position: int
+ estimated_wait_seconds: float | None = None
+ priority: int = 5
+
+
+class ExecutionQueuedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_QUEUED] = EventType.EXECUTION_QUEUED
+ execution_id: str
+ position_in_queue: int | None = None
+ estimated_start_time: datetime | None = None
+
+
+class ExecutionStartedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_STARTED] = EventType.EXECUTION_STARTED
+ execution_id: str
+ pod_name: str
+ node_name: str | None = None
+ container_id: str | None = None
+
+
+class ExecutionRunningEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_RUNNING] = EventType.EXECUTION_RUNNING
+ execution_id: str
+ pod_name: str
+ progress_percentage: int | None = None
+
+
+class ExecutionCompletedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_COMPLETED] = EventType.EXECUTION_COMPLETED
+ execution_id: str
+ exit_code: int
+ resource_usage: ResourceUsageDomain | None = None
+ stdout: str = ""
+ stderr: str = ""
+
+
+class ExecutionFailedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_FAILED] = EventType.EXECUTION_FAILED
+ execution_id: str
+ exit_code: int
+ error_type: ExecutionErrorType | None = None
+ error_message: str = ""
+ resource_usage: ResourceUsageDomain | None = None
+ stdout: str = ""
+ stderr: str = ""
+
+
+class ExecutionTimeoutEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_TIMEOUT] = EventType.EXECUTION_TIMEOUT
+ execution_id: str
+ timeout_seconds: int
+ resource_usage: ResourceUsageDomain | None = None
+ stdout: str = ""
+ stderr: str = ""
+
+
+class ExecutionCancelledEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_CANCELLED] = EventType.EXECUTION_CANCELLED
+ execution_id: str
+ reason: str
+ cancelled_by: str | None = None
+ force_terminated: bool = False
+
+
+# --- Pod Events ---
+
+
+class PodCreatedEvent(BaseEvent):
+ event_type: Literal[EventType.POD_CREATED] = EventType.POD_CREATED
+ execution_id: str
+ pod_name: str
+ namespace: str = "default"
+
+
+class PodScheduledEvent(BaseEvent):
+ event_type: Literal[EventType.POD_SCHEDULED] = EventType.POD_SCHEDULED
+ execution_id: str
+ pod_name: str
+ node_name: str = ""
+
+
+class PodRunningEvent(BaseEvent):
+ event_type: Literal[EventType.POD_RUNNING] = EventType.POD_RUNNING
+ execution_id: str
+ pod_name: str
+ container_statuses: str = ""
+
+
+class PodSucceededEvent(BaseEvent):
+ event_type: Literal[EventType.POD_SUCCEEDED] = EventType.POD_SUCCEEDED
+ execution_id: str
+ pod_name: str
+ exit_code: int = 0
+ stdout: str | None = None
+ stderr: str | None = None
+
+
+class PodFailedEvent(BaseEvent):
+ event_type: Literal[EventType.POD_FAILED] = EventType.POD_FAILED
+ execution_id: str
+ pod_name: str
+ exit_code: int = 1
+ reason: str | None = None
+ message: str | None = None
+ stdout: str | None = None
+ stderr: str | None = None
+
+
+class PodTerminatedEvent(BaseEvent):
+ event_type: Literal[EventType.POD_TERMINATED] = EventType.POD_TERMINATED
+ execution_id: str
+ pod_name: str
+ exit_code: int = 0
+ reason: str | None = None
+ message: str | None = None
+
+
+class PodDeletedEvent(BaseEvent):
+ event_type: Literal[EventType.POD_DELETED] = EventType.POD_DELETED
+ execution_id: str
+ pod_name: str
+ reason: str | None = None
+
+
+# --- Result Events ---
+
+
+class ResultStoredEvent(BaseEvent):
+ event_type: Literal[EventType.RESULT_STORED] = EventType.RESULT_STORED
+ execution_id: str
+ storage_type: StorageType | None = None
+ storage_path: str = ""
+ size_bytes: int = 0
+
+
+class ResultFailedEvent(BaseEvent):
+ event_type: Literal[EventType.RESULT_FAILED] = EventType.RESULT_FAILED
+ execution_id: str
+ error: str = ""
+ storage_type: StorageType | None = None
+
+
+# --- User Events ---
+
+
+class UserSettingsUpdatedEvent(BaseEvent):
+ event_type: Literal[EventType.USER_SETTINGS_UPDATED] = EventType.USER_SETTINGS_UPDATED
+ user_id: str
+ changed_fields: list[str] = Field(default_factory=list)
+ reason: str | None = None
+
+
+class UserRegisteredEvent(BaseEvent):
+ event_type: Literal[EventType.USER_REGISTERED] = EventType.USER_REGISTERED
+ user_id: str
+ username: str
+ email: str
+
+
+class UserLoginEvent(BaseEvent):
+ event_type: Literal[EventType.USER_LOGIN] = EventType.USER_LOGIN
+ user_id: str
+ login_method: LoginMethod
+ ip_address: str | None = None
+ user_agent: str | None = None
+
+
+class UserLoggedInEvent(BaseEvent):
+ event_type: Literal[EventType.USER_LOGGED_IN] = EventType.USER_LOGGED_IN
+ user_id: str
+ login_method: LoginMethod
+ ip_address: str | None = None
+ user_agent: str | None = None
+
+
+class UserLoggedOutEvent(BaseEvent):
+ event_type: Literal[EventType.USER_LOGGED_OUT] = EventType.USER_LOGGED_OUT
+ user_id: str
+ logout_reason: str | None = None
+
+
+class UserUpdatedEvent(BaseEvent):
+ event_type: Literal[EventType.USER_UPDATED] = EventType.USER_UPDATED
+ user_id: str
+ updated_fields: list[str] = Field(default_factory=list)
+ updated_by: str | None = None
+
+
+class UserDeletedEvent(BaseEvent):
+ event_type: Literal[EventType.USER_DELETED] = EventType.USER_DELETED
+ user_id: str
+ deleted_by: str | None = None
+ reason: str | None = None
+
+
+# --- Notification Events ---
+
+
+class NotificationCreatedEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_CREATED] = EventType.NOTIFICATION_CREATED
+ notification_id: str
+ user_id: str
+ subject: str
+ body: str
+ severity: NotificationSeverity
+ tags: list[str] = Field(default_factory=list)
+ channels: list[NotificationChannel] = Field(default_factory=list)
+
+
+class NotificationSentEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_SENT] = EventType.NOTIFICATION_SENT
+ notification_id: str
+ user_id: str
+ channel: NotificationChannel
+ sent_at: datetime
+
+
+class NotificationDeliveredEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_DELIVERED] = EventType.NOTIFICATION_DELIVERED
+ notification_id: str
+ user_id: str
+ channel: NotificationChannel
+ delivered_at: datetime
+
+
+class NotificationFailedEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_FAILED] = EventType.NOTIFICATION_FAILED
+ notification_id: str
+ user_id: str
+ channel: NotificationChannel
+ error: str
+ retry_count: int = 0
+
+
+class NotificationReadEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_READ] = EventType.NOTIFICATION_READ
+ notification_id: str
+ user_id: str
+ read_at: datetime
+
+
+class NotificationClickedEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_CLICKED] = EventType.NOTIFICATION_CLICKED
+ notification_id: str
+ user_id: str
+ clicked_at: datetime
+ action: str | None = None
+
+
+class NotificationPreferencesUpdatedEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_PREFERENCES_UPDATED] = EventType.NOTIFICATION_PREFERENCES_UPDATED
+ user_id: str
+ changed_fields: list[str] = Field(default_factory=list)
+
+
+# --- Saga Events ---
+
+
+class SagaStartedEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_STARTED] = EventType.SAGA_STARTED
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ initial_event_id: str
+
+
+class SagaCompletedEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_COMPLETED] = EventType.SAGA_COMPLETED
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ completed_steps: list[str] = Field(default_factory=list)
+
+
+class SagaFailedEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_FAILED] = EventType.SAGA_FAILED
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ failed_step: str
+ error: str
+
+
+class SagaCancelledEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_CANCELLED] = EventType.SAGA_CANCELLED
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ reason: str
+ completed_steps: list[str] = Field(default_factory=list)
+ compensated_steps: list[str] = Field(default_factory=list)
+ cancelled_at: datetime | None = None
+ cancelled_by: str | None = None
+
+
+class SagaCompensatingEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_COMPENSATING] = EventType.SAGA_COMPENSATING
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ compensating_step: str
+
+
+class SagaCompensatedEvent(BaseEvent):
+ event_type: Literal[EventType.SAGA_COMPENSATED] = EventType.SAGA_COMPENSATED
+ saga_id: str
+ saga_name: str
+ execution_id: str
+ compensated_steps: list[str] = Field(default_factory=list)
+
+
+# --- Saga Command Events ---
+
+
+class CreatePodCommandEvent(BaseEvent):
+ event_type: Literal[EventType.CREATE_POD_COMMAND] = EventType.CREATE_POD_COMMAND
+ saga_id: str
+ execution_id: str
+ script: str
+ language: str
+ language_version: str
+ runtime_image: str
+ runtime_command: list[str] = Field(default_factory=list)
+ runtime_filename: str
+ timeout_seconds: int
+ cpu_limit: str
+ memory_limit: str
+ cpu_request: str
+ memory_request: str
+ priority: int = 5
+
+
+class DeletePodCommandEvent(BaseEvent):
+ event_type: Literal[EventType.DELETE_POD_COMMAND] = EventType.DELETE_POD_COMMAND
+ saga_id: str
+ execution_id: str
+ reason: str
+ pod_name: str | None = None
+ namespace: str | None = None
+
+
+class AllocateResourcesCommandEvent(BaseEvent):
+ event_type: Literal[EventType.ALLOCATE_RESOURCES_COMMAND] = EventType.ALLOCATE_RESOURCES_COMMAND
+ execution_id: str
+ cpu_request: str
+ memory_request: str
+
+
+class ReleaseResourcesCommandEvent(BaseEvent):
+ event_type: Literal[EventType.RELEASE_RESOURCES_COMMAND] = EventType.RELEASE_RESOURCES_COMMAND
+ execution_id: str
+ cpu_request: str
+ memory_request: str
+
+
+# --- Script Events ---
+
+
+class ScriptSavedEvent(BaseEvent):
+ event_type: Literal[EventType.SCRIPT_SAVED] = EventType.SCRIPT_SAVED
+ script_id: str
+ user_id: str
+ title: str
+ language: str
+
+
+class ScriptDeletedEvent(BaseEvent):
+ event_type: Literal[EventType.SCRIPT_DELETED] = EventType.SCRIPT_DELETED
+ script_id: str
+ user_id: str
+ deleted_by: str | None = None
+
+
+class ScriptSharedEvent(BaseEvent):
+ event_type: Literal[EventType.SCRIPT_SHARED] = EventType.SCRIPT_SHARED
+ script_id: str
+ shared_by: str
+ shared_with: list[str] = Field(default_factory=list)
+ permissions: str
+
+
+# --- Security Events ---
+
+
+class SecurityViolationEvent(BaseEvent):
+ event_type: Literal[EventType.SECURITY_VIOLATION] = EventType.SECURITY_VIOLATION
+ user_id: str | None = None
+ violation_type: str
+ details: str
+ ip_address: str | None = None
+
+
+class RateLimitExceededEvent(BaseEvent):
+ event_type: Literal[EventType.RATE_LIMIT_EXCEEDED] = EventType.RATE_LIMIT_EXCEEDED
+ user_id: str | None = None
+ endpoint: str
+ limit: int
+ window_seconds: int
+
+
+class AuthFailedEvent(BaseEvent):
+ event_type: Literal[EventType.AUTH_FAILED] = EventType.AUTH_FAILED
+ username: str | None = None
+ reason: str
+ ip_address: str | None = None
+
+
+# --- Resource Events ---
+
+
+class ResourceLimitExceededEvent(BaseEvent):
+ event_type: Literal[EventType.RESOURCE_LIMIT_EXCEEDED] = EventType.RESOURCE_LIMIT_EXCEEDED
+ resource_type: str
+ limit: int
+ requested: int
+ user_id: str | None = None
+
+
+class QuotaExceededEvent(BaseEvent):
+ event_type: Literal[EventType.QUOTA_EXCEEDED] = EventType.QUOTA_EXCEEDED
+ quota_type: str
+ limit: int
+ current_usage: int
+ user_id: str
+
+
+# --- System Events ---
+
+
+class SystemErrorEvent(BaseEvent):
+ event_type: Literal[EventType.SYSTEM_ERROR] = EventType.SYSTEM_ERROR
+ error_type: str
+ message: str
+ service_name: str
+ stack_trace: str | None = None
+
+
+class ServiceUnhealthyEvent(BaseEvent):
+ event_type: Literal[EventType.SERVICE_UNHEALTHY] = EventType.SERVICE_UNHEALTHY
+ service_name: str
+ health_check: str
+ reason: str
+
+
+class ServiceRecoveredEvent(BaseEvent):
+ event_type: Literal[EventType.SERVICE_RECOVERED] = EventType.SERVICE_RECOVERED
+ service_name: str
+ health_check: str
+ downtime_seconds: int
+
+
+# --- Archived Event (for deleted events) ---
+
+
+class ArchivedEvent(BaseModel):
+ """Archived event with deletion metadata. Wraps the original event data."""
+
+ model_config = ConfigDict(from_attributes=True)
+
+ event_id: str
+ event_type: EventType
+ event_version: str = "1.0"
+ timestamp: datetime
+ aggregate_id: str | None = None
+ metadata: EventMetadata
+ stored_at: datetime | None = None
+ ttl_expires_at: datetime | None = None
+ # Archive-specific fields
+ deleted_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ deleted_by: str | None = None
+ deletion_reason: str | None = None
+
+
+# --- Discriminated Union: TYPE SYSTEM handles dispatch ---
+
+DomainEvent = Annotated[
+ # Execution Events
+ ExecutionRequestedEvent
+ | ExecutionAcceptedEvent
+ | ExecutionQueuedEvent
+ | ExecutionStartedEvent
+ | ExecutionRunningEvent
+ | ExecutionCompletedEvent
+ | ExecutionFailedEvent
+ | ExecutionTimeoutEvent
+ | ExecutionCancelledEvent
+ # Pod Events
+ | PodCreatedEvent
+ | PodScheduledEvent
+ | PodRunningEvent
+ | PodSucceededEvent
+ | PodFailedEvent
+ | PodTerminatedEvent
+ | PodDeletedEvent
+ # Result Events
+ | ResultStoredEvent
+ | ResultFailedEvent
+ # User Events
+ | UserSettingsUpdatedEvent
+ | UserRegisteredEvent
+ | UserLoginEvent
+ | UserLoggedInEvent
+ | UserLoggedOutEvent
+ | UserUpdatedEvent
+ | UserDeletedEvent
+ # Notification Events
+ | NotificationCreatedEvent
+ | NotificationSentEvent
+ | NotificationDeliveredEvent
+ | NotificationFailedEvent
+ | NotificationReadEvent
+ | NotificationClickedEvent
+ | NotificationPreferencesUpdatedEvent
+ # Saga Events
+ | SagaStartedEvent
+ | SagaCompletedEvent
+ | SagaFailedEvent
+ | SagaCancelledEvent
+ | SagaCompensatingEvent
+ | SagaCompensatedEvent
+ # Saga Command Events
+ | CreatePodCommandEvent
+ | DeletePodCommandEvent
+ | AllocateResourcesCommandEvent
+ | ReleaseResourcesCommandEvent
+ # Script Events
+ | ScriptSavedEvent
+ | ScriptDeletedEvent
+ | ScriptSharedEvent
+ # Security Events
+ | SecurityViolationEvent
+ | RateLimitExceededEvent
+ | AuthFailedEvent
+ # Resource Events
+ | ResourceLimitExceededEvent
+ | QuotaExceededEvent
+ # System Events
+ | SystemErrorEvent
+ | ServiceUnhealthyEvent
+ | ServiceRecoveredEvent,
+ Discriminator("event_type"),
+]
+
+# TypeAdapter for polymorphic loading - validates raw data to correct typed event
+domain_event_adapter: TypeAdapter[DomainEvent] = TypeAdapter(DomainEvent)
diff --git a/backend/app/domain/execution/models.py b/backend/app/domain/execution/models.py
index 2bd30956..2a46c8ea 100644
--- a/backend/app/domain/execution/models.py
+++ b/backend/app/domain/execution/models.py
@@ -1,66 +1,70 @@
from __future__ import annotations
-from dataclasses import field
from datetime import datetime, timezone
from typing import Any, Optional
from uuid import uuid4
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field
from app.domain.enums.execution import ExecutionStatus
from app.domain.enums.storage import ExecutionErrorType
-@dataclass
-class ResourceUsageDomain:
+class ResourceUsageDomain(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
execution_time_wall_seconds: float = 0.0
cpu_time_jiffies: int = 0
clk_tck_hertz: int = 0
peak_memory_kb: int = 0
-@dataclass
-class DomainExecution:
- execution_id: str = field(default_factory=lambda: str(uuid4()))
+class DomainExecution(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
+ execution_id: str = Field(default_factory=lambda: str(uuid4()))
script: str = ""
status: ExecutionStatus = ExecutionStatus.QUEUED
stdout: Optional[str] = None
stderr: Optional[str] = None
lang: str = "python"
lang_version: str = "3.11"
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
resource_usage: Optional[ResourceUsageDomain] = None
user_id: Optional[str] = None
exit_code: Optional[int] = None
error_type: Optional[ExecutionErrorType] = None
-@dataclass
-class ExecutionResultDomain:
+class ExecutionResultDomain(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
execution_id: str
status: ExecutionStatus
exit_code: int
stdout: str
stderr: str
resource_usage: ResourceUsageDomain | None = None
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- metadata: dict[str, Any] = field(default_factory=dict)
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ metadata: dict[str, Any] = Field(default_factory=dict)
error_type: ExecutionErrorType | None = None
-@dataclass
-class LanguageInfoDomain:
+class LanguageInfoDomain(BaseModel):
"""Language runtime information."""
+ model_config = ConfigDict(from_attributes=True)
+
versions: list[str]
file_ext: str
-@dataclass
-class ResourceLimitsDomain:
+class ResourceLimitsDomain(BaseModel):
"""K8s resource limits configuration."""
+ model_config = ConfigDict(from_attributes=True)
+
cpu_limit: str
memory_limit: str
cpu_request: str
@@ -69,10 +73,11 @@ class ResourceLimitsDomain:
supported_runtimes: dict[str, LanguageInfoDomain]
-@dataclass
-class DomainExecutionCreate:
+class DomainExecutionCreate(BaseModel):
"""Execution creation data for repository."""
+ model_config = ConfigDict(from_attributes=True)
+
script: str
user_id: str
lang: str = "python"
@@ -80,10 +85,11 @@ class DomainExecutionCreate:
status: ExecutionStatus = ExecutionStatus.QUEUED
-@dataclass
-class DomainExecutionUpdate:
+class DomainExecutionUpdate(BaseModel):
"""Execution update data for repository."""
+ model_config = ConfigDict(from_attributes=True)
+
status: Optional[ExecutionStatus] = None
stdout: Optional[str] = None
stderr: Optional[str] = None
diff --git a/backend/app/domain/notification/models.py b/backend/app/domain/notification/models.py
index 8a1bac45..0c849281 100644
--- a/backend/app/domain/notification/models.py
+++ b/backend/app/domain/notification/models.py
@@ -1,11 +1,10 @@
from __future__ import annotations
-from dataclasses import field
from datetime import UTC, datetime
from typing import Any
from uuid import uuid4
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field
from app.domain.enums.notification import (
NotificationChannel,
@@ -14,9 +13,10 @@
)
-@dataclass
-class DomainNotification:
- notification_id: str = field(default_factory=lambda: str(uuid4()))
+class DomainNotification(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
+ notification_id: str = Field(default_factory=lambda: str(uuid4()))
user_id: str = ""
channel: NotificationChannel = NotificationChannel.IN_APP
severity: NotificationSeverity = NotificationSeverity.MEDIUM
@@ -25,9 +25,9 @@ class DomainNotification:
subject: str = ""
body: str = ""
action_url: str | None = None
- tags: list[str] = field(default_factory=list)
+ tags: list[str] = Field(default_factory=list)
- created_at: datetime = field(default_factory=lambda: datetime.now(UTC))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
scheduled_for: datetime | None = None
sent_at: datetime | None = None
delivered_at: datetime | None = None
@@ -39,20 +39,21 @@ class DomainNotification:
max_retries: int = 3
error_message: str | None = None
- metadata: dict[str, Any] = field(default_factory=dict)
+ metadata: dict[str, Any] = Field(default_factory=dict)
webhook_url: str | None = None
webhook_headers: dict[str, str] | None = None
-@dataclass
-class DomainNotificationSubscription:
+class DomainNotificationSubscription(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
user_id: str
channel: NotificationChannel
enabled: bool = True
- severities: list[NotificationSeverity] = field(default_factory=list)
- include_tags: list[str] = field(default_factory=list)
- exclude_tags: list[str] = field(default_factory=list)
+ severities: list[NotificationSeverity] = Field(default_factory=list)
+ include_tags: list[str] = Field(default_factory=list)
+ exclude_tags: list[str] = Field(default_factory=list)
webhook_url: str | None = None
slack_webhook: str | None = None
@@ -62,38 +63,41 @@ class DomainNotificationSubscription:
timezone: str = "UTC"
batch_interval_minutes: int = 60
- created_at: datetime = field(default_factory=lambda: datetime.now(UTC))
- updated_at: datetime = field(default_factory=lambda: datetime.now(UTC))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC))
+
+class DomainNotificationListResult(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
-@dataclass
-class DomainNotificationListResult:
notifications: list[DomainNotification]
total: int
unread_count: int
-@dataclass
-class DomainNotificationCreate:
+class DomainNotificationCreate(BaseModel):
"""Data for creating a notification."""
+ model_config = ConfigDict(from_attributes=True)
+
user_id: str
channel: NotificationChannel
subject: str
body: str
severity: NotificationSeverity = NotificationSeverity.MEDIUM
action_url: str | None = None
- tags: list[str] = field(default_factory=list)
+ tags: list[str] = Field(default_factory=list)
scheduled_for: datetime | None = None
webhook_url: str | None = None
webhook_headers: dict[str, str] | None = None
- metadata: dict[str, Any] = field(default_factory=dict)
+ metadata: dict[str, Any] = Field(default_factory=dict)
-@dataclass
-class DomainNotificationUpdate:
+class DomainNotificationUpdate(BaseModel):
"""Data for updating a notification."""
+ model_config = ConfigDict(from_attributes=True)
+
status: NotificationStatus | None = None
sent_at: datetime | None = None
delivered_at: datetime | None = None
@@ -104,10 +108,11 @@ class DomainNotificationUpdate:
error_message: str | None = None
-@dataclass
-class DomainSubscriptionUpdate:
+class DomainSubscriptionUpdate(BaseModel):
"""Data for updating a subscription."""
+ model_config = ConfigDict(from_attributes=True)
+
enabled: bool | None = None
severities: list[NotificationSeverity] | None = None
include_tags: list[str] | None = None
diff --git a/backend/app/domain/replay/models.py b/backend/app/domain/replay/models.py
index 429df321..30d2a185 100644
--- a/backend/app/domain/replay/models.py
+++ b/backend/app/domain/replay/models.py
@@ -1,10 +1,8 @@
-from dataclasses import field
from datetime import datetime, timezone
from typing import Any, Dict, List
from uuid import uuid4
-from pydantic import BaseModel, Field, PrivateAttr
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field, PrivateAttr
from app.domain.enums.events import EventType
from app.domain.enums.replay import ReplayStatus, ReplayTarget, ReplayType
@@ -53,7 +51,7 @@ def to_mongo_query(self) -> Dict[str, Any]:
query["event_id"] = {"$in": self.event_ids}
if self.execution_id:
- query["payload.execution_id"] = str(self.execution_id)
+ query["execution_id"] = str(self.execution_id)
if self.correlation_id:
query["metadata.correlation_id"] = self.correlation_id
@@ -118,10 +116,11 @@ def get_progress_callback(self) -> Any:
return self._progress_callback
-@dataclass
-class ReplaySessionState:
+class ReplaySessionState(BaseModel):
"""Domain replay session model used by services and repository."""
+ model_config = ConfigDict(from_attributes=True)
+
session_id: str
config: ReplayConfig
status: ReplayStatus = ReplayStatus.CREATED
@@ -131,30 +130,32 @@ class ReplaySessionState:
failed_events: int = 0
skipped_events: int = 0
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
started_at: datetime | None = None
completed_at: datetime | None = None
last_event_at: datetime | None = None
- errors: list[dict[str, Any]] = field(default_factory=list)
+ errors: list[dict[str, Any]] = Field(default_factory=list)
# Tracking and admin fields
- correlation_id: str = field(default_factory=lambda: str(uuid4()))
+ correlation_id: str = Field(default_factory=lambda: str(uuid4()))
created_by: str | None = None
target_service: str | None = None
dry_run: bool = False
- triggered_executions: list[str] = field(default_factory=list)
+ triggered_executions: list[str] = Field(default_factory=list)
error: str | None = None
-@dataclass
-class ReplayOperationResult:
+class ReplayOperationResult(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
session_id: str
status: ReplayStatus
message: str
-@dataclass
-class CleanupResult:
+class CleanupResult(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
removed_sessions: int
message: str
diff --git a/backend/app/domain/saga/models.py b/backend/app/domain/saga/models.py
index a885c3bd..f95434be 100644
--- a/backend/app/domain/saga/models.py
+++ b/backend/app/domain/saga/models.py
@@ -1,36 +1,37 @@
-from dataclasses import field
from datetime import datetime, timezone
from typing import Any
from uuid import uuid4
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field
from app.domain.enums.saga import SagaState
-@dataclass
-class Saga:
+class Saga(BaseModel):
"""Domain model for saga."""
+ model_config = ConfigDict(from_attributes=True)
+
saga_id: str
saga_name: str
execution_id: str
state: SagaState
current_step: str | None = None
- completed_steps: list[str] = field(default_factory=list)
- compensated_steps: list[str] = field(default_factory=list)
- context_data: dict[str, Any] = field(default_factory=dict)
+ completed_steps: list[str] = Field(default_factory=list)
+ compensated_steps: list[str] = Field(default_factory=list)
+ context_data: dict[str, Any] = Field(default_factory=dict)
error_message: str | None = None
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
completed_at: datetime | None = None
retry_count: int = 0
-@dataclass
-class SagaFilter:
+class SagaFilter(BaseModel):
"""Filter criteria for saga queries."""
+ model_config = ConfigDict(from_attributes=True)
+
state: SagaState | None = None
execution_ids: list[str] | None = None
user_id: str | None = None
@@ -40,10 +41,11 @@ class SagaFilter:
error_status: bool | None = None
-@dataclass
-class SagaQuery:
+class SagaQuery(BaseModel):
"""Query parameters for saga search."""
+ model_config = ConfigDict(from_attributes=True)
+
filter: SagaFilter
sort_by: str = "created_at"
sort_order: str = "desc"
@@ -51,47 +53,51 @@ class SagaQuery:
skip: int = 0
-@dataclass
-class SagaListResult:
+class SagaListResult(BaseModel):
"""Result of saga list query."""
+ model_config = ConfigDict(from_attributes=True)
+
sagas: list[Saga]
total: int
skip: int
limit: int
- has_more: bool = field(init=False)
- def __post_init__(self) -> None:
- """Calculate has_more after initialization."""
- self.has_more = (self.skip + len(self.sagas)) < self.total
+ @property
+ def has_more(self) -> bool:
+ """Calculate has_more."""
+ return (self.skip + len(self.sagas)) < self.total
-@dataclass
-class SagaDetail:
+class SagaDetail(BaseModel):
"""Detailed saga information."""
+ model_config = ConfigDict(from_attributes=True)
+
saga: Saga
execution_details: dict[str, Any] | None = None
- step_details: list[dict[str, Any]] = field(default_factory=list)
+ step_details: list[dict[str, Any]] = Field(default_factory=list)
-@dataclass
-class SagaStatistics:
+class SagaStatistics(BaseModel):
"""Saga statistics."""
+ model_config = ConfigDict(from_attributes=True)
+
total_sagas: int
- sagas_by_state: dict[str, int] = field(default_factory=dict)
- sagas_by_name: dict[str, int] = field(default_factory=dict)
+ sagas_by_state: dict[str, int] = Field(default_factory=dict)
+ sagas_by_name: dict[str, int] = Field(default_factory=dict)
average_duration_seconds: float = 0.0
success_rate: float = 0.0
failure_rate: float = 0.0
compensation_rate: float = 0.0
-@dataclass
-class SagaConfig:
+class SagaConfig(BaseModel):
"""Configuration for saga orchestration (domain)."""
+ model_config = ConfigDict(from_attributes=True)
+
name: str
timeout_seconds: int = 300
max_retries: int = 3
@@ -104,29 +110,31 @@ class SagaConfig:
publish_commands: bool = False
-@dataclass
-class SagaInstance:
+class SagaInstance(BaseModel):
"""Runtime instance of a saga execution (domain)."""
+ model_config = ConfigDict(from_attributes=True)
+
saga_name: str
execution_id: str
state: SagaState = SagaState.CREATED
- saga_id: str = field(default_factory=lambda: str(uuid4()))
+ saga_id: str = Field(default_factory=lambda: str(uuid4()))
current_step: str | None = None
- completed_steps: list[str] = field(default_factory=list)
- compensated_steps: list[str] = field(default_factory=list)
- context_data: dict[str, Any] = field(default_factory=dict)
+ completed_steps: list[str] = Field(default_factory=list)
+ compensated_steps: list[str] = Field(default_factory=list)
+ context_data: dict[str, Any] = Field(default_factory=dict)
error_message: str | None = None
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
completed_at: datetime | None = None
retry_count: int = 0
-@dataclass
-class DomainResourceAllocation:
+class DomainResourceAllocation(BaseModel):
"""Domain model for resource allocation."""
+ model_config = ConfigDict(from_attributes=True)
+
allocation_id: str
execution_id: str
language: str
@@ -135,14 +143,15 @@ class DomainResourceAllocation:
cpu_limit: str
memory_limit: str
status: str = "active"
- allocated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ allocated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
released_at: datetime | None = None
-@dataclass
-class DomainResourceAllocationCreate:
+class DomainResourceAllocationCreate(BaseModel):
"""Data for creating a resource allocation."""
+ model_config = ConfigDict(from_attributes=True)
+
execution_id: str
language: str
cpu_request: str
diff --git a/backend/app/domain/saved_script/models.py b/backend/app/domain/saved_script/models.py
index 08622426..d62328f2 100644
--- a/backend/app/domain/saved_script/models.py
+++ b/backend/app/domain/saved_script/models.py
@@ -1,41 +1,39 @@
from __future__ import annotations
-from dataclasses import field
from datetime import datetime, timezone
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field
-@dataclass
-class DomainSavedScriptBase:
+class DomainSavedScriptBase(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
name: str
script: str
-@dataclass
class DomainSavedScriptCreate(DomainSavedScriptBase):
lang: str = "python"
lang_version: str = "3.11"
description: str | None = None
-@dataclass
class DomainSavedScript(DomainSavedScriptBase):
script_id: str
user_id: str
- # Optional/defaultable fields must come after non-defaults
lang: str = "python"
lang_version: str = "3.11"
description: str | None = None
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+
+class DomainSavedScriptUpdate(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
-@dataclass
-class DomainSavedScriptUpdate:
name: str | None = None
script: str | None = None
lang: str | None = None
lang_version: str | None = None
description: str | None = None
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
diff --git a/backend/app/domain/sse/models.py b/backend/app/domain/sse/models.py
index c8a59e8c..c585af05 100644
--- a/backend/app/domain/sse/models.py
+++ b/backend/app/domain/sse/models.py
@@ -2,15 +2,16 @@
from datetime import datetime
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict
from app.domain.enums.execution import ExecutionStatus
-@dataclass
-class ShutdownStatus:
+class ShutdownStatus(BaseModel):
"""Status of SSE shutdown process."""
+ model_config = ConfigDict(from_attributes=True)
+
phase: str
initiated: bool
complete: bool
@@ -19,8 +20,9 @@ class ShutdownStatus:
duration: float | None = None
-@dataclass
-class SSEHealthDomain:
+class SSEHealthDomain(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
status: str
kafka_enabled: bool
active_connections: int
@@ -31,14 +33,16 @@ class SSEHealthDomain:
timestamp: datetime
-@dataclass
-class SSEExecutionStatusDomain:
+class SSEExecutionStatusDomain(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
execution_id: str
status: ExecutionStatus
- timestamp: str
+ timestamp: datetime
+
+class SSEEventDomain(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
-@dataclass
-class SSEEventDomain:
aggregate_id: str
timestamp: datetime
diff --git a/backend/app/domain/user/__init__.py b/backend/app/domain/user/__init__.py
index 54601b31..2df59ec8 100644
--- a/backend/app/domain/user/__init__.py
+++ b/backend/app/domain/user/__init__.py
@@ -12,9 +12,9 @@
CachedSettings,
DomainEditorSettings,
DomainNotificationSettings,
- DomainSettingsEvent,
DomainSettingsHistoryEntry,
DomainUserSettings,
+ DomainUserSettingsChangedEvent,
DomainUserSettingsUpdate,
)
from .user_models import (
@@ -37,7 +37,7 @@
"CSRFValidationError",
"DomainEditorSettings",
"DomainNotificationSettings",
- "DomainSettingsEvent",
+ "DomainUserSettingsChangedEvent",
"DomainSettingsHistoryEntry",
"DomainUserCreate",
"DomainUserSettings",
diff --git a/backend/app/domain/user/settings_models.py b/backend/app/domain/user/settings_models.py
index 10a730d2..09af6217 100644
--- a/backend/app/domain/user/settings_models.py
+++ b/backend/app/domain/user/settings_models.py
@@ -1,27 +1,28 @@
from __future__ import annotations
-from dataclasses import field
from datetime import datetime, timezone
-from typing import Any, Dict, List, Optional
+from typing import Any
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict, Field
from app.domain.enums.common import Theme
from app.domain.enums.events import EventType
from app.domain.enums.notification import NotificationChannel
-@dataclass
-class DomainNotificationSettings:
+class DomainNotificationSettings(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
execution_completed: bool = True
execution_failed: bool = True
system_updates: bool = True
security_alerts: bool = True
- channels: List[NotificationChannel] = field(default_factory=list)
+ channels: list[NotificationChannel] = Field(default_factory=list)
+
+class DomainEditorSettings(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
-@dataclass
-class DomainEditorSettings:
theme: str = "auto"
font_size: int = 14
tab_size: int = 4
@@ -30,64 +31,81 @@ class DomainEditorSettings:
show_line_numbers: bool = True
-@dataclass
-class DomainUserSettings:
+class DomainUserSettings(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
user_id: str
theme: Theme = Theme.AUTO
timezone: str = "UTC"
date_format: str = "YYYY-MM-DD"
time_format: str = "24h"
- notifications: DomainNotificationSettings = field(default_factory=DomainNotificationSettings)
- editor: DomainEditorSettings = field(default_factory=DomainEditorSettings)
- custom_settings: Dict[str, Any] = field(default_factory=dict)
+ notifications: DomainNotificationSettings = Field(default_factory=DomainNotificationSettings)
+ editor: DomainEditorSettings = Field(default_factory=DomainEditorSettings)
+ custom_settings: dict[str, Any] = Field(default_factory=dict)
version: int = 1
- created_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- updated_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
+ created_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ updated_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+
+
+class DomainUserSettingsUpdate(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+ theme: Theme | None = None
+ timezone: str | None = None
+ date_format: str | None = None
+ time_format: str | None = None
+ notifications: DomainNotificationSettings | None = None
+ editor: DomainEditorSettings | None = None
+ custom_settings: dict[str, Any] | None = None
-@dataclass
-class DomainUserSettingsUpdate:
- theme: Optional[Theme] = None
- timezone: Optional[str] = None
- date_format: Optional[str] = None
- time_format: Optional[str] = None
- notifications: Optional[DomainNotificationSettings] = None
- editor: Optional[DomainEditorSettings] = None
- custom_settings: Optional[Dict[str, Any]] = None
+class DomainSettingChange(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
-@dataclass
-class DomainSettingChange:
field_path: str
old_value: Any
new_value: Any
- changed_at: datetime = field(default_factory=lambda: datetime.now(timezone.utc))
- change_reason: Optional[str] = None
+ changed_at: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
+ change_reason: str | None = None
-@dataclass
-class DomainSettingsEvent:
+class DomainUserSettingsChangedEvent(BaseModel):
+ """Well-typed domain event for user settings changes."""
+
+ model_config = ConfigDict(from_attributes=True, extra="ignore")
+
+ event_id: str
event_type: EventType
timestamp: datetime
- payload: Dict[str, Any]
- correlation_id: Optional[str] = None
+ user_id: str
+ changed_fields: list[str]
+ theme: Theme | None = None
+ timezone: str | None = None
+ date_format: str | None = None
+ time_format: str | None = None
+ notifications: DomainNotificationSettings | None = None
+ editor: DomainEditorSettings | None = None
+ reason: str | None = None
+ correlation_id: str | None = None
-@dataclass
-class DomainSettingsHistoryEntry:
+class DomainSettingsHistoryEntry(BaseModel):
+ model_config = ConfigDict(from_attributes=True)
+
timestamp: datetime
event_type: EventType
field: str
old_value: Any
new_value: Any
- reason: Optional[str] = None
- correlation_id: Optional[str] = None
+ reason: str | None = None
+ correlation_id: str | None = None
-@dataclass
-class CachedSettings:
+class CachedSettings(BaseModel):
"""Wrapper for cached user settings with expiration time."""
+ model_config = ConfigDict(from_attributes=True)
+
settings: DomainUserSettings
expires_at: datetime
diff --git a/backend/app/domain/user/user_models.py b/backend/app/domain/user/user_models.py
index fa34d066..07c5576d 100644
--- a/backend/app/domain/user/user_models.py
+++ b/backend/app/domain/user/user_models.py
@@ -2,7 +2,7 @@
from datetime import datetime
from typing import List
-from pydantic.dataclasses import dataclass
+from pydantic import BaseModel, ConfigDict
from app.core.utils import StringEnum
from app.domain.enums.user import UserRole
@@ -32,18 +32,20 @@ class UserFilterType(StringEnum):
ROLE = "role"
-@dataclass
-class UserSearchFilter:
+class UserSearchFilter(BaseModel):
"""User search filter criteria."""
+ model_config = ConfigDict(from_attributes=True)
+
search_text: str | None = None
role: UserRole | None = None
-@dataclass
-class User:
+class User(BaseModel):
"""User domain model."""
+ model_config = ConfigDict(from_attributes=True)
+
user_id: str
username: str
email: str
@@ -55,10 +57,11 @@ class User:
updated_at: datetime
-@dataclass
-class UserUpdate:
+class UserUpdate(BaseModel):
"""User update domain model."""
+ model_config = ConfigDict(from_attributes=True)
+
username: str | None = None
email: str | None = None
role: UserRole | None = None
@@ -77,20 +80,22 @@ def has_updates(self) -> bool:
)
-@dataclass
-class UserListResult:
+class UserListResult(BaseModel):
"""Result of listing users."""
+ model_config = ConfigDict(from_attributes=True)
+
users: List[User]
total: int
offset: int
limit: int
-@dataclass
-class PasswordReset:
+class PasswordReset(BaseModel):
"""Password reset domain model."""
+ model_config = ConfigDict(from_attributes=True)
+
user_id: str
new_password: str
@@ -98,10 +103,11 @@ def is_valid(self) -> bool:
return bool(self.user_id and self.new_password and len(self.new_password) >= 8)
-@dataclass
-class UserCreation:
+class UserCreation(BaseModel):
"""User creation domain model (API-facing, with plain password)."""
+ model_config = ConfigDict(from_attributes=True)
+
username: str
email: str
password: str
@@ -120,10 +126,11 @@ def is_valid(self) -> bool:
)
-@dataclass
-class DomainUserCreate:
+class DomainUserCreate(BaseModel):
"""User creation data for repository (with hashed password)."""
+ model_config = ConfigDict(from_attributes=True)
+
username: str
email: str
hashed_password: str
@@ -132,10 +139,11 @@ class DomainUserCreate:
is_superuser: bool = False
-@dataclass
-class DomainUserUpdate:
+class DomainUserUpdate(BaseModel):
"""User update data for repository (with hashed password)."""
+ model_config = ConfigDict(from_attributes=True)
+
username: str | None = None
email: str | None = None
role: UserRole | None = None
diff --git a/backend/app/events/admin_utils.py b/backend/app/events/admin_utils.py
index ea924ade..5e133291 100644
--- a/backend/app/events/admin_utils.py
+++ b/backend/app/events/admin_utils.py
@@ -27,7 +27,8 @@ def admin_client(self) -> AdminClient:
async def check_topic_exists(self, topic: str) -> bool:
"""Check if topic exists."""
try:
- metadata = self._admin.list_topics(timeout=5.0)
+ loop = asyncio.get_running_loop()
+ metadata = await loop.run_in_executor(None, lambda: self._admin.list_topics(timeout=5.0))
return topic in metadata.topics
except Exception as e:
self.logger.error(f"Failed to check topic {topic}: {e}")
@@ -37,10 +38,11 @@ async def create_topic(self, topic: str, num_partitions: int = 1, replication_fa
"""Create a single topic."""
try:
new_topic = NewTopic(topic, num_partitions=num_partitions, replication_factor=replication_factor)
- futures = self._admin.create_topics([new_topic], operation_timeout=30.0)
-
- # Wait for result - result() returns None on success, raises exception on failure
- await asyncio.get_running_loop().run_in_executor(None, lambda: futures[topic].result(timeout=30.0))
+ loop = asyncio.get_running_loop()
+ futures = await loop.run_in_executor(
+ None, lambda: self._admin.create_topics([new_topic], operation_timeout=30.0)
+ )
+ await loop.run_in_executor(None, lambda: futures[topic].result(timeout=30.0))
self.logger.info(f"Topic {topic} created successfully")
return True
except Exception as e:
diff --git a/backend/app/events/consumer_group_monitor.py b/backend/app/events/consumer_group_monitor.py
index 3ce95770..21338dd9 100644
--- a/backend/app/events/consumer_group_monitor.py
+++ b/backend/app/events/consumer_group_monitor.py
@@ -416,7 +416,7 @@ def get_health_summary(self, status: ConsumerGroupStatus) -> Dict[str, Any]:
"""Get a health summary for a consumer group."""
return {
"group_id": status.group_id,
- "health": status.health.value,
+ "health": status.health,
"health_message": status.health_message,
"state": status.state,
"members": status.member_count,
diff --git a/backend/app/events/core/consumer.py b/backend/app/events/core/consumer.py
index ab5656d5..3d0a1355 100644
--- a/backend/app/events/core/consumer.py
+++ b/backend/app/events/core/consumer.py
@@ -216,7 +216,7 @@ def consumer(self) -> Consumer | None:
def get_status(self) -> ConsumerStatus:
return ConsumerStatus(
- state=self._state.value,
+ state=self._state,
is_running=self.is_running,
group_id=self._config.group_id,
client_id=self._config.client_id,
@@ -226,10 +226,8 @@ def get_status(self) -> ConsumerStatus:
consumer_lag=self._metrics.consumer_lag,
commit_failures=self._metrics.commit_failures,
processing_errors=self._metrics.processing_errors,
- last_message_time=(
- self._metrics.last_message_time.isoformat() if self._metrics.last_message_time else None
- ),
- last_updated=self._metrics.last_updated.isoformat() if self._metrics.last_updated else None,
+ last_message_time=self._metrics.last_message_time,
+ last_updated=self._metrics.last_updated,
),
)
diff --git a/backend/app/events/core/dispatcher.py b/backend/app/events/core/dispatcher.py
index b2f527ca..cd7e7d4f 100644
--- a/backend/app/events/core/dispatcher.py
+++ b/backend/app/events/core/dispatcher.py
@@ -60,7 +60,7 @@ async def handle_execution(event: ExecutionRequestedEvent) -> None:
"""
def decorator(handler: Callable[[T], Awaitable[None]]) -> Callable[[T], Awaitable[None]]:
- self.logger.info(f"Registering handler '{handler.__name__}' for event type '{event_type.value}'")
+ self.logger.info(f"Registering handler '{handler.__name__}' for event type '{event_type}'")
# Safe: dispatch() routes by event_type, guaranteeing correct types at runtime
self._handlers[event_type].append(handler) # type: ignore[arg-type]
return handler
@@ -75,7 +75,7 @@ def register_handler(self, event_type: EventType, handler: EventHandler) -> None
event_type: The event type this handler processes
handler: The async handler function
"""
- self.logger.info(f"Registering handler '{handler.__name__}' for event type '{event_type.value}'")
+ self.logger.info(f"Registering handler '{handler.__name__}' for event type '{event_type}'")
self._handlers[event_type].append(handler)
def remove_handler(self, event_type: EventType, handler: EventHandler) -> bool:
@@ -91,7 +91,7 @@ def remove_handler(self, event_type: EventType, handler: EventHandler) -> bool:
"""
if event_type in self._handlers and handler in self._handlers[event_type]:
self._handlers[event_type].remove(handler)
- self.logger.info(f"Removed handler '{handler.__name__}' for event type '{event_type.value}'")
+ self.logger.info(f"Removed handler '{handler.__name__}' for event type '{event_type}'")
# Clean up empty lists
if not self._handlers[event_type]:
del self._handlers[event_type]
@@ -114,10 +114,10 @@ async def dispatch(self, event: BaseEvent) -> None:
if not handlers:
self._event_metrics[event_type]["skipped"] += 1
- self.logger.debug(f"No handlers registered for event type {event_type.value}")
+ self.logger.debug(f"No handlers registered for event type {event_type}")
return
- self.logger.debug(f"Dispatching {event_type.value} to {len(handlers)} handler(s)")
+ self.logger.debug(f"Dispatching {event_type} to {len(handlers)} handler(s)")
# Run handlers concurrently for better performance
tasks = []
@@ -168,7 +168,7 @@ def get_topics_for_registered_handlers(self) -> set[str]:
def get_metrics(self) -> dict[str, dict[str, int]]:
"""Get processing metrics for all event types."""
- return {event_type.value: metrics for event_type, metrics in self._event_metrics.items()}
+ return {event_type: metrics for event_type, metrics in self._event_metrics.items()}
def clear_handlers(self) -> None:
"""Clear all registered handlers (useful for testing)."""
diff --git a/backend/app/events/core/producer.py b/backend/app/events/core/producer.py
index defc65f9..d3f68b83 100644
--- a/backend/app/events/core/producer.py
+++ b/backend/app/events/core/producer.py
@@ -127,7 +127,7 @@ async def _on_start(self) -> None:
def get_status(self) -> dict[str, Any]:
return {
- "state": self._state.value,
+ "state": self._state,
"running": self.is_running,
"config": {
"bootstrap_servers": self._config.bootstrap_servers,
diff --git a/backend/app/events/core/types.py b/backend/app/events/core/types.py
index 259622d7..33b8e3b8 100644
--- a/backend/app/events/core/types.py
+++ b/backend/app/events/core/types.py
@@ -2,6 +2,8 @@
from datetime import datetime, timezone
from typing import Any
+from pydantic import BaseModel, ConfigDict
+
from app.core.utils import StringEnum
@@ -144,23 +146,25 @@ def __post_init__(self) -> None:
self.last_updated = self.last_updated or datetime.now(timezone.utc)
-@dataclass(slots=True)
-class ConsumerMetricsSnapshot:
+class ConsumerMetricsSnapshot(BaseModel):
"""Snapshot of consumer metrics for status reporting."""
+ model_config = ConfigDict(from_attributes=True)
+
messages_consumed: int
bytes_consumed: int
consumer_lag: int
commit_failures: int
processing_errors: int
- last_message_time: str | None
- last_updated: str | None
+ last_message_time: datetime | None
+ last_updated: datetime | None
-@dataclass(slots=True)
-class ConsumerStatus:
+class ConsumerStatus(BaseModel):
"""Consumer status information."""
+ model_config = ConfigDict(from_attributes=True)
+
state: str
is_running: bool
group_id: str
diff --git a/backend/app/events/event_store.py b/backend/app/events/event_store.py
index 10007c01..fe6ce8b3 100644
--- a/backend/app/events/event_store.py
+++ b/backend/app/events/event_store.py
@@ -15,16 +15,6 @@
from app.events.schema.schema_registry import SchemaRegistryManager
from app.infrastructure.kafka.events.base import BaseEvent
-# Base fields stored at document level (everything else goes into payload)
-_BASE_FIELDS = {"event_id", "event_type", "event_version", "timestamp", "aggregate_id", "metadata"}
-_EXCLUDE_FIELDS = {"id", "revision_id", "stored_at", "ttl_expires_at"}
-
-
-def _flatten_doc(doc: "EventDocument") -> dict[str, Any]:
- """Flatten EventDocument payload to top level for schema registry deserialization."""
- d = doc.model_dump(exclude=_EXCLUDE_FIELDS)
- return {**{k: v for k, v in d.items() if k != "payload"}, **d.get("payload", {})}
-
class EventStore:
def __init__(
@@ -57,10 +47,8 @@ async def store_event(self, event: BaseEvent) -> bool:
start = asyncio.get_running_loop().time()
try:
now = datetime.now(timezone.utc)
- data = event.model_dump(exclude={"topic"})
- payload = {k: data.pop(k) for k in list(data) if k not in _BASE_FIELDS}
ttl = now + timedelta(days=self.ttl_days)
- doc = EventDocument(**data, payload=payload, stored_at=now, ttl_expires_at=ttl)
+ doc = EventDocument(**event.model_dump(exclude_none=True), stored_at=now, ttl_expires_at=ttl)
await doc.insert()
add_span_attributes(
@@ -92,11 +80,7 @@ async def store_batch(self, events: list[BaseEvent]) -> dict[str, int]:
now = datetime.now(timezone.utc)
ttl = now + timedelta(days=self.ttl_days)
try:
- docs = []
- for e in events:
- data = e.model_dump(exclude={"topic"})
- payload = {k: data.pop(k) for k in list(data) if k not in _BASE_FIELDS}
- docs.append(EventDocument(**data, payload=payload, stored_at=now, ttl_expires_at=ttl))
+ docs = [EventDocument(**e.model_dump(exclude_none=True), stored_at=now, ttl_expires_at=ttl) for e in events]
try:
await EventDocument.insert_many(docs)
@@ -130,7 +114,7 @@ async def get_event(self, event_id: str) -> BaseEvent | None:
if not doc:
return None
- event = self.schema_registry.deserialize_json(_flatten_doc(doc))
+ event = self.schema_registry.deserialize_json(doc.model_dump())
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_by_id", "event_store")
@@ -156,7 +140,7 @@ async def get_events_by_type(
.limit(limit)
.to_list()
)
- events = [self.schema_registry.deserialize_json(_flatten_doc(doc)) for doc in docs]
+ events = [self.schema_registry.deserialize_json(doc.model_dump()) for doc in docs]
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_by_type", "event_store")
@@ -168,12 +152,12 @@ async def get_execution_events(
event_types: list[EventType] | None = None,
) -> list[BaseEvent]:
start = asyncio.get_running_loop().time()
- query: dict[str, Any] = {"$or": [{"payload.execution_id": execution_id}, {"aggregate_id": execution_id}]}
+ query: dict[str, Any] = {"$or": [{"execution_id": execution_id}, {"aggregate_id": execution_id}]}
if event_types:
query["event_type"] = {"$in": event_types}
docs = await EventDocument.find(query).sort([("timestamp", SortDirection.ASCENDING)]).to_list()
- events = [self.schema_registry.deserialize_json(_flatten_doc(doc)) for doc in docs]
+ events = [self.schema_registry.deserialize_json(doc.model_dump()) for doc in docs]
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_execution_events", "event_store")
@@ -195,7 +179,7 @@ async def get_user_events(
query["timestamp"] = tr
docs = await EventDocument.find(query).sort([("timestamp", SortDirection.DESCENDING)]).limit(limit).to_list()
- events = [self.schema_registry.deserialize_json(_flatten_doc(doc)) for doc in docs]
+ events = [self.schema_registry.deserialize_json(doc.model_dump()) for doc in docs]
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_user_events", "event_store")
@@ -216,7 +200,7 @@ async def get_security_events(
query["timestamp"] = tr
docs = await EventDocument.find(query).sort([("timestamp", SortDirection.DESCENDING)]).limit(limit).to_list()
- events = [self.schema_registry.deserialize_json(_flatten_doc(doc)) for doc in docs]
+ events = [self.schema_registry.deserialize_json(doc.model_dump()) for doc in docs]
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_security_events", "event_store")
@@ -229,7 +213,7 @@ async def get_correlation_chain(self, correlation_id: str) -> list[BaseEvent]:
.sort([("timestamp", SortDirection.ASCENDING)])
.to_list()
)
- events = [self.schema_registry.deserialize_json(_flatten_doc(doc)) for doc in docs]
+ events = [self.schema_registry.deserialize_json(doc.model_dump()) for doc in docs]
duration = asyncio.get_running_loop().time() - start
self.metrics.record_event_query_duration(duration, "get_correlation_chain", "event_store")
@@ -253,7 +237,7 @@ async def replay_events(
query["event_type"] = {"$in": event_types}
async for doc in EventDocument.find(query).sort([("timestamp", SortDirection.ASCENDING)]):
- event = self.schema_registry.deserialize_json(_flatten_doc(doc))
+ event = self.schema_registry.deserialize_json(doc.model_dump())
if callback:
await callback(event)
count += 1
diff --git a/backend/app/events/schema/schema_registry.py b/backend/app/events/schema/schema_registry.py
index fcc73eed..fbbead06 100644
--- a/backend/app/events/schema/schema_registry.py
+++ b/backend/app/events/schema/schema_registry.py
@@ -192,7 +192,7 @@ def deserialize_json(self, data: dict[str, Any]) -> BaseEvent:
return event_class.model_validate(data)
- def set_compatibility(self, subject: str, mode: str) -> None:
+ async def set_compatibility(self, subject: str, mode: str) -> None:
"""
Set compatibility for a subject via REST API.
Valid: BACKWARD, FORWARD, FULL, NONE, BACKWARD_TRANSITIVE, FORWARD_TRANSITIVE, FULL_TRANSITIVE
@@ -210,7 +210,8 @@ def set_compatibility(self, subject: str, mode: str) -> None:
raise ValueError(f"Invalid compatibility mode: {mode}")
url = f"{self.url}/config/{subject}"
- response = httpx.put(url, json={"compatibility": mode})
+ async with httpx.AsyncClient() as client:
+ response = await client.put(url, json={"compatibility": mode})
response.raise_for_status()
self.logger.info(f"Set {subject} compatibility to {mode}")
@@ -222,7 +223,7 @@ async def initialize_schemas(self) -> None:
for event_class in _get_all_event_classes():
# Use event class name with optional prefix for per-run isolation in tests
subject = f"{self.subject_prefix}{event_class.__name__}-value"
- self.set_compatibility(subject, "FORWARD")
+ await self.set_compatibility(subject, "FORWARD")
self.register_schema(subject, event_class)
self._initialized = True
diff --git a/backend/app/infrastructure/kafka/events/__init__.py b/backend/app/infrastructure/kafka/events/__init__.py
index 9539d470..8f0aad85 100644
--- a/backend/app/infrastructure/kafka/events/__init__.py
+++ b/backend/app/infrastructure/kafka/events/__init__.py
@@ -16,6 +16,7 @@
NotificationCreatedEvent,
NotificationDeliveredEvent,
NotificationFailedEvent,
+ NotificationPreferencesUpdatedEvent,
NotificationReadEvent,
NotificationSentEvent,
)
@@ -59,6 +60,7 @@
UserDeletedEvent,
UserLoggedInEvent,
UserLoggedOutEvent,
+ UserLoginEvent,
UserRegisteredEvent,
UserSettingsUpdatedEvent,
UserUpdatedEvent,
@@ -88,6 +90,7 @@
"PodDeletedEvent",
# User
"UserRegisteredEvent",
+ "UserLoginEvent",
"UserLoggedInEvent",
"UserLoggedOutEvent",
"UserUpdatedEvent",
@@ -100,6 +103,7 @@
"NotificationFailedEvent",
"NotificationReadEvent",
"NotificationClickedEvent",
+ "NotificationPreferencesUpdatedEvent",
# Script
"ScriptSavedEvent",
"ScriptDeletedEvent",
diff --git a/backend/app/infrastructure/kafka/events/notification.py b/backend/app/infrastructure/kafka/events/notification.py
index 1659a0ed..197e4fa2 100644
--- a/backend/app/infrastructure/kafka/events/notification.py
+++ b/backend/app/infrastructure/kafka/events/notification.py
@@ -1,5 +1,8 @@
+from datetime import datetime
from typing import ClassVar, Literal
+from pydantic import Field
+
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
from app.domain.enums.notification import NotificationChannel, NotificationSeverity
@@ -24,7 +27,7 @@ class NotificationSentEvent(BaseEvent):
notification_id: str
user_id: str
channel: NotificationChannel
- sent_at: str
+ sent_at: datetime
class NotificationDeliveredEvent(BaseEvent):
@@ -33,7 +36,7 @@ class NotificationDeliveredEvent(BaseEvent):
notification_id: str
user_id: str
channel: NotificationChannel
- delivered_at: str
+ delivered_at: datetime
class NotificationFailedEvent(BaseEvent):
@@ -51,7 +54,7 @@ class NotificationReadEvent(BaseEvent):
topic: ClassVar[KafkaTopic] = KafkaTopic.NOTIFICATION_EVENTS
notification_id: str
user_id: str
- read_at: str
+ read_at: datetime
class NotificationClickedEvent(BaseEvent):
@@ -59,5 +62,12 @@ class NotificationClickedEvent(BaseEvent):
topic: ClassVar[KafkaTopic] = KafkaTopic.NOTIFICATION_EVENTS
notification_id: str
user_id: str
- clicked_at: str
+ clicked_at: datetime
action: str | None = None
+
+
+class NotificationPreferencesUpdatedEvent(BaseEvent):
+ event_type: Literal[EventType.NOTIFICATION_PREFERENCES_UPDATED] = EventType.NOTIFICATION_PREFERENCES_UPDATED
+ topic: ClassVar[KafkaTopic] = KafkaTopic.NOTIFICATION_EVENTS
+ user_id: str
+ changed_fields: list[str] = Field(default_factory=list)
diff --git a/backend/app/infrastructure/kafka/events/user.py b/backend/app/infrastructure/kafka/events/user.py
index 3443019f..0a6e6495 100644
--- a/backend/app/infrastructure/kafka/events/user.py
+++ b/backend/app/infrastructure/kafka/events/user.py
@@ -37,6 +37,15 @@ class UserRegisteredEvent(BaseEvent):
email: str
+class UserLoginEvent(BaseEvent):
+ event_type: Literal[EventType.USER_LOGIN] = EventType.USER_LOGIN
+ topic: ClassVar[KafkaTopic] = KafkaTopic.USER_EVENTS
+ user_id: str
+ login_method: LoginMethod
+ ip_address: str | None = None
+ user_agent: str | None = None
+
+
class UserLoggedInEvent(BaseEvent):
event_type: Literal[EventType.USER_LOGGED_IN] = EventType.USER_LOGGED_IN
topic: ClassVar[KafkaTopic] = KafkaTopic.USER_EVENTS
diff --git a/backend/app/infrastructure/kafka/mappings.py b/backend/app/infrastructure/kafka/mappings.py
index b1dcfe98..5a056eef 100644
--- a/backend/app/infrastructure/kafka/mappings.py
+++ b/backend/app/infrastructure/kafka/mappings.py
@@ -5,6 +5,7 @@
from app.domain.enums.kafka import KafkaTopic
from app.infrastructure.kafka.events.base import BaseEvent
from app.infrastructure.kafka.events.execution import (
+ ExecutionAcceptedEvent,
ExecutionCancelledEvent,
ExecutionCompletedEvent,
ExecutionFailedEvent,
@@ -19,6 +20,7 @@
NotificationCreatedEvent,
NotificationDeliveredEvent,
NotificationFailedEvent,
+ NotificationPreferencesUpdatedEvent,
NotificationReadEvent,
NotificationSentEvent,
)
@@ -62,6 +64,7 @@
UserDeletedEvent,
UserLoggedInEvent,
UserLoggedOutEvent,
+ UserLoginEvent,
UserRegisteredEvent,
UserSettingsUpdatedEvent,
UserUpdatedEvent,
@@ -74,6 +77,7 @@ def get_event_class_for_type(event_type: EventType) -> Type[BaseEvent] | None:
event_map: dict[EventType, Type[BaseEvent]] = {
# Execution events
EventType.EXECUTION_REQUESTED: ExecutionRequestedEvent,
+ EventType.EXECUTION_ACCEPTED: ExecutionAcceptedEvent,
EventType.EXECUTION_QUEUED: ExecutionQueuedEvent,
EventType.EXECUTION_STARTED: ExecutionStartedEvent,
EventType.EXECUTION_RUNNING: ExecutionRunningEvent,
@@ -91,6 +95,7 @@ def get_event_class_for_type(event_type: EventType) -> Type[BaseEvent] | None:
EventType.POD_DELETED: PodDeletedEvent,
# User events
EventType.USER_REGISTERED: UserRegisteredEvent,
+ EventType.USER_LOGIN: UserLoginEvent,
EventType.USER_LOGGED_IN: UserLoggedInEvent,
EventType.USER_LOGGED_OUT: UserLoggedOutEvent,
EventType.USER_UPDATED: UserUpdatedEvent,
@@ -103,6 +108,7 @@ def get_event_class_for_type(event_type: EventType) -> Type[BaseEvent] | None:
EventType.NOTIFICATION_FAILED: NotificationFailedEvent,
EventType.NOTIFICATION_READ: NotificationReadEvent,
EventType.NOTIFICATION_CLICKED: NotificationClickedEvent,
+ EventType.NOTIFICATION_PREFERENCES_UPDATED: NotificationPreferencesUpdatedEvent,
# Script events
EventType.SCRIPT_SAVED: ScriptSavedEvent,
EventType.SCRIPT_DELETED: ScriptDeletedEvent,
diff --git a/backend/app/schemas_pydantic/sse.py b/backend/app/schemas_pydantic/sse.py
index c420b209..4a4b80da 100644
--- a/backend/app/schemas_pydantic/sse.py
+++ b/backend/app/schemas_pydantic/sse.py
@@ -28,7 +28,7 @@ class SSEExecutionEventData(BaseModel):
execution_id: str = Field(description="Execution ID this event relates to")
# Present in most events
- timestamp: str | None = Field(default=None, description="ISO 8601 timestamp")
+ timestamp: datetime | None = Field(default=None, description="Event timestamp")
# Present in business events from Kafka
event_id: str | None = Field(default=None, description="Unique event identifier")
@@ -74,7 +74,7 @@ class SSENotificationEventData(BaseModel):
# Present in control events (connected, heartbeat)
user_id: str | None = Field(default=None, description="User ID for the notification stream")
- timestamp: str | None = Field(default=None, description="ISO 8601 timestamp")
+ timestamp: datetime | None = Field(default=None, description="Event timestamp")
message: str | None = Field(default=None, description="Human-readable message")
# Present only in notification events
@@ -85,7 +85,7 @@ class SSENotificationEventData(BaseModel):
subject: str | None = Field(default=None, description="Notification subject/title")
body: str | None = Field(default=None, description="Notification body content")
action_url: str | None = Field(default=None, description="Optional action URL")
- created_at: str | None = Field(default=None, description="ISO 8601 creation timestamp")
+ created_at: datetime | None = Field(default=None, description="Creation timestamp")
class RedisNotificationMessage(BaseModel):
@@ -98,7 +98,7 @@ class RedisNotificationMessage(BaseModel):
subject: str = Field(description="Notification subject/title")
body: str = Field(description="Notification body content")
action_url: str = Field(default="", description="Optional action URL")
- created_at: str = Field(description="ISO 8601 creation timestamp")
+ created_at: datetime = Field(description="Creation timestamp")
class ShutdownStatusResponse(BaseModel):
diff --git a/backend/app/services/admin/admin_events_service.py b/backend/app/services/admin/admin_events_service.py
index bbe6e442..150524d0 100644
--- a/backend/app/services/admin/admin_events_service.py
+++ b/backend/app/services/admin/admin_events_service.py
@@ -1,7 +1,7 @@
import csv
import json
import logging
-from dataclasses import asdict, dataclass
+from dataclasses import dataclass
from datetime import datetime, timezone
from io import StringIO
from typing import Any, Dict, List
@@ -25,16 +25,18 @@
def _export_row_to_dict(row: EventExportRow) -> dict[str, str]:
"""Convert EventExportRow to dict with display names."""
+ # Use mode="json" to auto-convert datetime to ISO string
+ data = row.model_dump(mode="json")
return {
- "Event ID": row.event_id,
- "Event Type": row.event_type,
- "Timestamp": row.timestamp,
- "Correlation ID": row.correlation_id,
- "Aggregate ID": row.aggregate_id,
- "User ID": row.user_id,
- "Service": row.service,
- "Status": row.status,
- "Error": row.error,
+ "Event ID": data["event_id"],
+ "Event Type": data["event_type"],
+ "Timestamp": data["timestamp"],
+ "Correlation ID": data["correlation_id"],
+ "Aggregate ID": data["aggregate_id"],
+ "User ID": data["user_id"],
+ "Service": data["service"],
+ "Status": data["status"],
+ "Error": data["error"],
}
@@ -234,32 +236,19 @@ async def export_events_json_content(self, *, event_filter: EventFilter, limit:
result = await self._repo.browse_events(
event_filter=event_filter, skip=0, limit=limit, sort_by="timestamp", sort_order=SortDirection.DESCENDING
)
- events_data: list[dict[str, Any]] = []
- for event in result.events:
- event_dict = asdict(event)
- for fld in ["timestamp", "created_at", "updated_at", "stored_at", "ttl_expires_at"]:
- if fld in event_dict and isinstance(event_dict[fld], datetime):
- event_dict[fld] = event_dict[fld].isoformat()
- events_data.append(event_dict)
+ # mode="json" auto-converts datetime fields to ISO strings
+ events_data = [event.model_dump(mode="json") for event in result.events]
export_data: dict[str, Any] = {
"export_metadata": {
"exported_at": datetime.now(timezone.utc).isoformat(),
"total_events": len(events_data),
- "filters_applied": {
- "event_types": event_filter.event_types,
- "aggregate_id": event_filter.aggregate_id,
- "correlation_id": event_filter.correlation_id,
- "user_id": event_filter.user_id,
- "service_name": event_filter.service_name,
- "start_time": event_filter.start_time.isoformat() if event_filter.start_time else None,
- "end_time": event_filter.end_time.isoformat() if event_filter.end_time else None,
- },
+ "filters_applied": event_filter.model_dump(mode="json"),
"export_limit": limit,
},
"events": events_data,
}
- json_content = json.dumps(export_data, indent=2, default=str)
+ json_content = json.dumps(export_data, indent=2)
filename = f"events_export_{datetime.now(timezone.utc).strftime('%Y%m%d_%H%M%S')}.json"
self.logger.info(
"Exported events JSON",
@@ -279,12 +268,13 @@ async def delete_event(self, *, event_id: str, deleted_by: str) -> bool:
await self._repo.archive_event(detail.event, deleted_by)
deleted = await self._repo.delete_event(event_id)
if deleted:
+ correlation_id = detail.event.metadata.correlation_id
self.logger.info(
"Event deleted",
extra={
"event_id": event_id,
"event_type": detail.event.event_type,
- "correlation_id": detail.event.correlation_id,
+ "correlation_id": correlation_id,
"deleted_by": deleted_by,
},
)
diff --git a/backend/app/services/event_bus.py b/backend/app/services/event_bus.py
index a1ea1107..50a361b2 100644
--- a/backend/app/services/event_bus.py
+++ b/backend/app/services/event_bus.py
@@ -9,6 +9,7 @@
from confluent_kafka import Consumer, KafkaError, Producer
from fastapi import Request
+from pydantic import BaseModel, ConfigDict
from app.core.lifecycle import LifecycleEnabled
from app.core.metrics.context import get_connection_metrics
@@ -16,13 +17,14 @@
from app.settings import Settings
-@dataclass
-class EventBusEvent:
+class EventBusEvent(BaseModel):
"""Represents an event on the event bus."""
+ model_config = ConfigDict(from_attributes=True)
+
id: str
event_type: str
- timestamp: str
+ timestamp: datetime
payload: dict[str, Any]
@@ -134,7 +136,7 @@ async def publish(self, event_type: str, data: dict[str, Any]) -> None:
if self.producer:
try:
# Serialize and send message asynchronously
- value = json.dumps(vars(event)).encode("utf-8")
+ value = event.model_dump_json().encode("utf-8")
key = event_type.encode("utf-8") if event_type else None
# Use executor to avoid blocking
@@ -157,7 +159,7 @@ def _create_event(self, event_type: str, data: dict[str, Any]) -> EventBusEvent:
return EventBusEvent(
id=str(uuid4()),
event_type=event_type,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
payload=data,
)
@@ -285,14 +287,9 @@ async def _kafka_listener(self) -> None:
continue
try:
- # Deserialize message
+ # Deserialize message - Pydantic parses timestamp string to datetime
event_dict = json.loads(msg.value().decode("utf-8"))
- event = EventBusEvent(
- id=event_dict.get("id", ""),
- event_type=event_dict.get("event_type", ""),
- timestamp=event_dict.get("timestamp", ""),
- payload=event_dict.get("payload", {}),
- )
+ event = EventBusEvent.model_validate(event_dict)
await self._distribute_event(event.event_type, event)
except Exception as e:
self.logger.error(f"Error processing Kafka message: {e}")
diff --git a/backend/app/services/event_service.py b/backend/app/services/event_service.py
index e7dce14f..d44d9d7c 100644
--- a/backend/app/services/event_service.py
+++ b/backend/app/services/event_service.py
@@ -5,7 +5,8 @@
from app.domain.enums.events import EventType
from app.domain.enums.user import UserRole
from app.domain.events import (
- Event,
+ ArchivedEvent,
+ DomainEvent,
EventAggregationResult,
EventFilter,
EventListResult,
@@ -76,7 +77,7 @@ async def get_execution_events(
owner = None
for e in result.events:
- if e.metadata and e.metadata.user_id:
+ if e.metadata.user_id:
owner = e.metadata.user_id
break
@@ -150,7 +151,7 @@ async def get_events_by_correlation(
) -> EventListResult:
result = await self.repository.get_events_by_correlation(correlation_id=correlation_id, limit=limit, skip=skip)
if not include_all_users or user_role != UserRole.ADMIN:
- filtered = [e for e in result.events if (e.metadata and e.metadata.user_id == user_id)]
+ filtered = [e for e in result.events if e.metadata.user_id == user_id]
return EventListResult(
events=filtered,
total=result.total,
@@ -180,7 +181,7 @@ async def get_event(
event_id: str,
user_id: str,
user_role: UserRole,
- ) -> Event | None:
+ ) -> DomainEvent | None:
event = await self.repository.get_event(event_id)
if not event:
return None
@@ -218,7 +219,7 @@ async def delete_event_with_archival(
event_id: str,
deleted_by: str,
deletion_reason: str = "Admin deletion via API",
- ) -> Event | None:
+ ) -> ArchivedEvent | None:
return await self.repository.delete_event_with_archival(
event_id=event_id,
deleted_by=deleted_by,
@@ -233,7 +234,7 @@ async def get_events_by_aggregate(
aggregate_id: str,
event_types: list[EventType] | None = None,
limit: int = 100,
- ) -> list[Event]:
+ ) -> list[DomainEvent]:
return await self.repository.get_events_by_aggregate(
aggregate_id=aggregate_id,
event_types=event_types,
diff --git a/backend/app/services/execution_service.py b/backend/app/services/execution_service.py
index 09ca7922..370adc90 100644
--- a/backend/app/services/execution_service.py
+++ b/backend/app/services/execution_service.py
@@ -409,7 +409,7 @@ def _build_user_query(
query: ExecutionQuery = {"user_id": str(user_id)}
if status:
- query["status"] = status.value
+ query["status"] = status
if lang:
query["lang"] = lang
diff --git a/backend/app/services/kafka_event_service.py b/backend/app/services/kafka_event_service.py
index 83fe6d20..d2ce2456 100644
--- a/backend/app/services/kafka_event_service.py
+++ b/backend/app/services/kafka_event_service.py
@@ -11,8 +11,8 @@
from app.core.tracing.utils import inject_trace_context
from app.db.repositories.event_repository import EventRepository
from app.domain.enums.events import EventType
-from app.domain.events import Event
from app.domain.events import EventMetadata as DomainEventMetadata
+from app.domain.events import domain_event_adapter
from app.events.core import UnifiedProducer
from app.infrastructure.kafka.events.base import BaseEvent
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
@@ -78,30 +78,32 @@ async def publish_event(
event_id = str(uuid4())
timestamp = datetime.now(timezone.utc)
- # Convert to domain metadata for storage (only include defined fields)
+ # Convert to domain metadata for storage
domain_metadata = DomainEventMetadata(
- **avro_metadata.model_dump(include=set(DomainEventMetadata.__dataclass_fields__))
+ **avro_metadata.model_dump(include=set(DomainEventMetadata.model_fields.keys()))
)
- event = Event(
- event_id=event_id,
- event_type=event_type,
- event_version="1.0",
- timestamp=timestamp,
- aggregate_id=aggregate_id,
- metadata=domain_metadata,
- payload=payload,
- )
- _ = await self.event_repository.store_event(event)
+ # Create typed domain event via discriminated union adapter
+ event_data = {
+ "event_id": event_id,
+ "event_type": event_type,
+ "event_version": "1.0",
+ "timestamp": timestamp,
+ "aggregate_id": aggregate_id,
+ "metadata": domain_metadata,
+ **payload,
+ }
+ domain_event = domain_event_adapter.validate_python(event_data)
+ _ = await self.event_repository.store_event(domain_event)
# Get event class and create proper event instance
event_class = get_event_class_for_type(event_type)
if not event_class:
raise ValueError(f"No event class found for event type: {event_type}")
- # Create proper event instance with all required fields
- event_data = {
- "event_id": event.event_id,
+ # Create proper Kafka event instance with all required fields
+ kafka_event_data = {
+ "event_id": domain_event.event_id,
"event_type": event_type,
"event_version": "1.0",
"timestamp": timestamp,
@@ -110,13 +112,13 @@ async def publish_event(
**payload, # Include event-specific payload fields
}
- # Create the typed event instance
- kafka_event = event_class(**event_data)
+ # Create the typed Kafka event instance
+ kafka_event = event_class(**kafka_event_data)
# Prepare headers (all values must be strings for UnifiedProducer)
headers: Dict[str, str] = {
"event_type": event_type,
- "correlation_id": event.correlation_id or "",
+ "correlation_id": domain_metadata.correlation_id or "",
"service": avro_metadata.service_name,
}
@@ -227,7 +229,7 @@ async def publish_base_event(self, event: BaseEvent, key: str | None = None) ->
Event ID of published event
"""
with tracer.start_as_current_span("publish_base_event") as span:
- span.set_attribute("event.type", str(event.event_type))
+ span.set_attribute("event.type", event.event_type)
if event.aggregate_id:
span.set_attribute("aggregate.id", event.aggregate_id)
@@ -238,23 +240,24 @@ async def publish_base_event(self, event: BaseEvent, key: str | None = None) ->
# Build payload from event attributes (exclude base fields)
base_fields = {"event_id", "event_type", "event_version", "timestamp", "aggregate_id", "metadata", "topic"}
- payload = {k: v for k, v in vars(event).items() if k not in base_fields and not k.startswith("_")}
-
- # Create domain event for storage
- domain_event = Event(
- event_id=event.event_id,
- event_type=event.event_type,
- event_version=event.event_version,
- timestamp=event.timestamp,
- aggregate_id=event.aggregate_id,
- metadata=domain_metadata,
- payload=payload,
- )
+ payload = {k: v for k, v in event.model_dump().items() if k not in base_fields}
+
+ # Create typed domain event via discriminated union adapter
+ event_data = {
+ "event_id": event.event_id,
+ "event_type": event.event_type,
+ "event_version": event.event_version,
+ "timestamp": event.timestamp,
+ "aggregate_id": event.aggregate_id,
+ "metadata": domain_metadata,
+ **payload,
+ }
+ domain_event = domain_event_adapter.validate_python(event_data)
await self.event_repository.store_event(domain_event)
# Prepare headers
headers: Dict[str, str] = {
- "event_type": str(event.event_type),
+ "event_type": event.event_type,
"correlation_id": event.metadata.correlation_id or "",
"service": event.metadata.service_name,
}
@@ -282,7 +285,7 @@ async def publish_base_event(self, event: BaseEvent, key: str | None = None) ->
self.logger.info(
"Base event published",
extra={
- "event_type": str(event.event_type),
+ "event_type": event.event_type,
"event_id": event.event_id,
"aggregate_id": event.aggregate_id,
},
diff --git a/backend/app/services/notification_service.py b/backend/app/services/notification_service.py
index 4d709b31..7a2c0a61 100644
--- a/backend/app/services/notification_service.py
+++ b/backend/app/services/notification_service.py
@@ -371,7 +371,7 @@ async def worker(uid: str) -> str:
self.logger.info(
"System notification completed",
extra={
- "severity": str(cfg.severity),
+ "severity": cfg.severity,
"title": title,
"total_users": len(users),
"created": created,
@@ -800,7 +800,7 @@ async def _publish_notification_sse(self, notification: DomainNotification) -> N
subject=notification.subject,
body=notification.body,
action_url=notification.action_url or "",
- created_at=notification.created_at.isoformat(),
+ created_at=notification.created_at,
)
await self.sse_bus.publish_notification(notification.user_id, message)
@@ -840,8 +840,8 @@ async def _deliver_notification(self, notification: DomainNotification) -> None:
extra={
"notification_id": str(notification.notification_id),
"user_id": notification.user_id,
- "channel": str(notification.channel),
- "severity": str(notification.severity),
+ "channel": notification.channel,
+ "severity": notification.severity,
"tags": list(notification.tags or []),
},
)
@@ -885,21 +885,21 @@ async def _deliver_notification(self, notification: DomainNotification) -> None:
f"Successfully delivered notification {notification.notification_id}",
extra={
"notification_id": str(notification.notification_id),
- "channel": str(notification.channel),
+ "channel": notification.channel,
"delivery_time_ms": int(delivery_time * 1000),
},
)
# Metrics (use tag string or severity)
self.metrics.record_notification_sent(
- str(notification.severity), channel=str(notification.channel), severity=str(notification.severity)
+ notification.severity, channel=notification.channel, severity=notification.severity
)
- self.metrics.record_notification_delivery_time(delivery_time, str(notification.severity))
+ self.metrics.record_notification_delivery_time(delivery_time, notification.severity)
except Exception as e:
error_details = {
"notification_id": str(notification.notification_id),
- "channel": str(notification.channel),
+ "channel": notification.channel,
"error_type": type(e).__name__,
"error_message": str(e),
"retry_count": notification.retry_count,
diff --git a/backend/app/services/pod_monitor/monitor.py b/backend/app/services/pod_monitor/monitor.py
index 095d8449..69028f25 100644
--- a/backend/app/services/pod_monitor/monitor.py
+++ b/backend/app/services/pod_monitor/monitor.py
@@ -196,16 +196,16 @@ async def _watch_pods(self) -> None:
case 410: # Gone - resource version too old
self.logger.warning("Resource version expired, resetting watch")
self._last_resource_version = None
- self._metrics.record_pod_monitor_watch_error(str(ErrorType.RESOURCE_VERSION_EXPIRED.value))
+ self._metrics.record_pod_monitor_watch_error(ErrorType.RESOURCE_VERSION_EXPIRED)
case _:
self.logger.error(f"API error in watch: {e}")
- self._metrics.record_pod_monitor_watch_error(str(ErrorType.API_ERROR.value))
+ self._metrics.record_pod_monitor_watch_error(ErrorType.API_ERROR)
await self._handle_watch_error()
except Exception as e:
self.logger.error(f"Unexpected error in watch: {e}", exc_info=True)
- self._metrics.record_pod_monitor_watch_error(str(ErrorType.UNEXPECTED.value))
+ self._metrics.record_pod_monitor_watch_error(ErrorType.UNEXPECTED)
await self._handle_watch_error()
async def _watch_pod_events(self) -> None:
@@ -273,7 +273,7 @@ async def _process_raw_event(self, raw_event: KubeEvent) -> None:
except (KeyError, ValueError) as e:
self.logger.error(f"Invalid event format: {e}")
- self._metrics.record_pod_monitor_watch_error(str(ErrorType.PROCESSING_ERROR.value))
+ self._metrics.record_pod_monitor_watch_error(ErrorType.PROCESSING_ERROR)
async def _process_pod_event(self, event: PodEvent) -> None:
"""Process a pod event."""
@@ -310,18 +310,18 @@ async def _process_pod_event(self, event: PodEvent) -> None:
# Log event
if app_events:
self.logger.info(
- f"Processed {event.event_type.value} event for pod {pod_name} "
+ f"Processed {event.event_type} event for pod {pod_name} "
f"(phase: {pod_phase or 'Unknown'}), "
f"published {len(app_events)} events"
)
# Update metrics
duration = time.time() - start_time
- self._metrics.record_pod_monitor_event_processing_duration(duration, str(event.event_type.value))
+ self._metrics.record_pod_monitor_event_processing_duration(duration, event.event_type)
except Exception as e:
self.logger.error(f"Error processing pod event: {e}", exc_info=True)
- self._metrics.record_pod_monitor_watch_error(str(ErrorType.PROCESSING_ERROR.value))
+ self._metrics.record_pod_monitor_watch_error(ErrorType.PROCESSING_ERROR)
async def _publish_event(self, event: BaseEvent, pod: k8s_client.V1Pod) -> None:
"""Publish event to Kafka and store in events collection."""
@@ -336,7 +336,7 @@ async def _publish_event(self, event: BaseEvent, pod: k8s_client.V1Pod) -> None:
await self._kafka_event_service.publish_base_event(event=event, key=key)
phase = pod.status.phase if pod.status else "Unknown"
- self._metrics.record_pod_monitor_event_published(str(event.event_type), phase)
+ self._metrics.record_pod_monitor_event_published(event.event_type, phase)
except Exception as e:
self.logger.error(f"Error publishing event: {e}", exc_info=True)
@@ -445,7 +445,7 @@ def _log_reconciliation_result(self, result: ReconciliationResult) -> None:
async def get_status(self) -> StatusDict:
"""Get monitor status."""
return {
- "state": self._state.value,
+ "state": self._state,
"tracked_pods": len(self._tracked_pods),
"reconnect_attempts": self._reconnect_attempts,
"last_resource_version": self._last_resource_version,
diff --git a/backend/app/services/rate_limit_service.py b/backend/app/services/rate_limit_service.py
index cc98a4c1..d28204de 100644
--- a/backend/app/services/rate_limit_service.py
+++ b/backend/app/services/rate_limit_service.py
@@ -26,11 +26,11 @@
def _rule_to_dict(rule: RateLimitRule) -> Dict[str, Any]:
return {
"endpoint_pattern": rule.endpoint_pattern,
- "group": rule.group.value,
+ "group": rule.group,
"requests": rule.requests,
"window_seconds": rule.window_seconds,
"burst_multiplier": rule.burst_multiplier,
- "algorithm": rule.algorithm.value,
+ "algorithm": rule.algorithm,
"priority": rule.priority,
"enabled": rule.enabled,
}
@@ -156,11 +156,11 @@ def _labels(self, ctx: "RateLimitService._Context") -> dict[str, str]:
labels = {
"authenticated": str(ctx.authenticated).lower(),
"endpoint": ctx.normalized_endpoint,
- "algorithm": ctx.algorithm.value,
+ "algorithm": ctx.algorithm,
}
if ctx.rule is not None:
labels.update(
- {"group": ctx.rule.group.value, "priority": str(ctx.rule.priority), "multiplier": str(ctx.multiplier)}
+ {"group": ctx.rule.group, "priority": str(ctx.rule.priority), "multiplier": str(ctx.multiplier)}
)
return labels
@@ -261,18 +261,18 @@ async def check_rate_limit(
{
"authenticated": str(ctx.authenticated).lower(),
"endpoint": ctx.normalized_endpoint,
- "algorithm": rule.algorithm.value,
+ "algorithm": rule.algorithm,
},
)
# Record window size
self.metrics.window_size.record(
- rule.window_seconds, {"endpoint": ctx.normalized_endpoint, "algorithm": rule.algorithm.value}
+ rule.window_seconds, {"endpoint": ctx.normalized_endpoint, "algorithm": rule.algorithm}
)
# Check rate limit based on algorithm (avoid duplicate branches)
timer_attrs = {
- "algorithm": rule.algorithm.value,
+ "algorithm": rule.algorithm,
"endpoint": ctx.normalized_endpoint,
"authenticated": str(ctx.authenticated).lower(),
}
@@ -302,7 +302,7 @@ async def check_rate_limit(
"rate_limit.allowed": status.allowed,
"rate_limit.limit": status.limit,
"rate_limit.remaining": status.remaining,
- "rate_limit.algorithm": status.algorithm.value,
+ "rate_limit.algorithm": status.algorithm,
}
)
return status
diff --git a/backend/app/services/result_processor/processor.py b/backend/app/services/result_processor/processor.py
index 45ed07ca..6a709787 100644
--- a/backend/app/services/result_processor/processor.py
+++ b/backend/app/services/result_processor/processor.py
@@ -303,6 +303,6 @@ async def _publish_result_failed(self, execution_id: str, error_message: str) ->
async def get_status(self) -> dict[str, Any]:
"""Get processor status."""
return {
- "state": self._state.value,
+ "state": self._state,
"consumer_active": self._consumer is not None,
}
diff --git a/backend/app/services/saga/execution_saga.py b/backend/app/services/saga/execution_saga.py
index 81f705da..4ea62100 100644
--- a/backend/app/services/saga/execution_saga.py
+++ b/backend/app/services/saga/execution_saga.py
@@ -179,7 +179,7 @@ async def execute(self, context: SagaContext, event: ExecutionRequestedEvent) ->
metadata=EventMetadata(
service_name="saga-orchestrator",
service_version="1.0.0",
- user_id=event.metadata.user_id if event.metadata else "system",
+ user_id=event.metadata.user_id or "system",
),
)
diff --git a/backend/app/services/saga/saga_orchestrator.py b/backend/app/services/saga/saga_orchestrator.py
index 6a6f0987..ad84a235 100644
--- a/backend/app/services/saga/saga_orchestrator.py
+++ b/backend/app/services/saga/saga_orchestrator.py
@@ -417,7 +417,7 @@ async def cancel_saga(self, saga_id: str) -> bool:
if saga_instance.state not in [SagaState.RUNNING, SagaState.CREATED]:
self.logger.warning(
"Cannot cancel saga in current state. Only RUNNING or CREATED sagas can be cancelled.",
- extra={"saga_id": saga_id, "state": saga_instance.state.value},
+ extra={"saga_id": saga_id, "state": saga_instance.state},
)
return False
diff --git a/backend/app/services/sse/redis_bus.py b/backend/app/services/sse/redis_bus.py
index 03123138..ce323708 100644
--- a/backend/app/services/sse/redis_bus.py
+++ b/backend/app/services/sse/redis_bus.py
@@ -74,6 +74,7 @@ async def open_subscription(self, execution_id: str) -> SSERedisSubscription:
pubsub = self._redis.pubsub()
channel = self._exec_channel(execution_id)
await pubsub.subscribe(channel)
+ await pubsub.get_message(timeout=1.0)
return SSERedisSubscription(pubsub, channel, self.logger)
async def publish_notification(self, user_id: str, notification: RedisNotificationMessage) -> None:
@@ -84,4 +85,5 @@ async def open_notification_subscription(self, user_id: str) -> SSERedisSubscrip
pubsub = self._redis.pubsub()
channel = self._notif_channel(user_id)
await pubsub.subscribe(channel)
+ await pubsub.get_message(timeout=1.0)
return SSERedisSubscription(pubsub, channel, self.logger)
diff --git a/backend/app/services/sse/sse_service.py b/backend/app/services/sse/sse_service.py
index 19055a7b..3feed4c4 100644
--- a/backend/app/services/sse/sse_service.py
+++ b/backend/app/services/sse/sse_service.py
@@ -58,7 +58,7 @@ async def create_execution_stream(self, execution_id: str, user_id: str) -> Asyn
SSEExecutionEventData(
event_type=SSEControlEvent.ERROR,
execution_id=execution_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
error="Server is shutting down",
)
)
@@ -72,7 +72,7 @@ async def create_execution_stream(self, execution_id: str, user_id: str) -> Asyn
SSEExecutionEventData(
event_type=SSEControlEvent.CONNECTED,
execution_id=execution_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
connection_id=connection_id,
)
)
@@ -87,7 +87,7 @@ async def create_execution_stream(self, execution_id: str, user_id: str) -> Asyn
SSEExecutionEventData(
event_type=SSEControlEvent.SUBSCRIBED,
execution_id=execution_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
message="Redis subscription established",
)
)
@@ -132,7 +132,7 @@ async def _stream_events_redis(
SSEExecutionEventData(
event_type=SSEControlEvent.SHUTDOWN,
execution_id=execution_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
message="Server is shutting down",
grace_period=30,
)
@@ -145,7 +145,7 @@ async def _stream_events_redis(
SSEExecutionEventData(
event_type=SSEControlEvent.HEARTBEAT,
execution_id=execution_id,
- timestamp=now.isoformat(),
+ timestamp=now,
message="SSE connection active",
)
)
@@ -204,7 +204,7 @@ async def create_notification_stream(self, user_id: str) -> AsyncGenerator[Dict[
SSENotificationEventData(
event_type=SSENotificationEvent.CONNECTED,
user_id=user_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
message="Connected to notification stream",
)
)
@@ -217,7 +217,7 @@ async def create_notification_stream(self, user_id: str) -> AsyncGenerator[Dict[
SSENotificationEventData(
event_type=SSENotificationEvent.SUBSCRIBED,
user_id=user_id,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
message="Redis subscription established",
)
)
@@ -231,7 +231,7 @@ async def create_notification_stream(self, user_id: str) -> AsyncGenerator[Dict[
SSENotificationEventData(
event_type=SSENotificationEvent.HEARTBEAT,
user_id=user_id,
- timestamp=now.isoformat(),
+ timestamp=now,
message="Notification stream active",
)
)
diff --git a/backend/app/services/user_settings_service.py b/backend/app/services/user_settings_service.py
index 34f996cf..20074aa4 100644
--- a/backend/app/services/user_settings_service.py
+++ b/backend/app/services/user_settings_service.py
@@ -1,6 +1,6 @@
import logging
from datetime import datetime, timedelta, timezone
-from typing import Any, List
+from typing import Any
from cachetools import TTLCache
from pydantic import TypeAdapter
@@ -11,15 +11,14 @@
from app.domain.user import (
DomainEditorSettings,
DomainNotificationSettings,
- DomainSettingsEvent,
DomainSettingsHistoryEntry,
DomainUserSettings,
+ DomainUserSettingsChangedEvent,
DomainUserSettingsUpdate,
)
from app.services.event_bus import EventBusEvent, EventBusManager
from app.services.kafka_event_service import KafkaEventService
-# TypeAdapters for dict-based settings updates
_settings_adapter = TypeAdapter(DomainUserSettings)
_update_adapter = TypeAdapter(DomainUserSettingsUpdate)
@@ -31,7 +30,6 @@ def __init__(
self.repository = repository
self.event_service = event_service
self.logger = logger
- # TTL+LRU cache for settings
self._cache_ttl = timedelta(minutes=5)
self._max_cache_size = 1000
self._cache: TTLCache[str, DomainUserSettings] = TTLCache(
@@ -72,12 +70,13 @@ async def get_user_settings_fresh(self, user_id: str) -> DomainUserSettings:
snapshot = await self.repository.get_snapshot(user_id)
settings: DomainUserSettings
+ event_types = [EventType.USER_SETTINGS_UPDATED]
if snapshot:
settings = snapshot
- events = await self._get_settings_events(user_id, since=snapshot.updated_at)
+ events = await self.repository.get_settings_events(user_id, event_types, since=snapshot.updated_at)
else:
settings = DomainUserSettings(user_id=user_id)
- events = await self._get_settings_events(user_id)
+ events = await self.repository.get_settings_events(user_id, event_types)
for event in events:
settings = self._apply_event(settings, event)
@@ -91,25 +90,20 @@ async def update_user_settings(
"""Upsert provided fields into current settings, publish minimal event, and cache."""
current = await self.get_user_settings(user_id)
- # Get only fields that were explicitly set (non-None)
changes = _update_adapter.dump_python(updates, exclude_none=True)
if not changes:
return current
- # Merge current settings with changes and update metadata
current_dict = _settings_adapter.dump_python(current)
merged = {**current_dict, **changes}
merged["version"] = (current.version or 0) + 1
merged["updated_at"] = datetime.now(timezone.utc)
- # Reconstruct settings object (TypeAdapter handles nested dict → dataclass)
new_settings = _settings_adapter.validate_python(merged)
- # Publish event with JSON-serializable payload (enums → strings)
changes_json = _update_adapter.dump_python(updates, exclude_none=True, mode="json")
await self._publish_settings_event(user_id, changes_json, reason)
- # Notify event bus for cache invalidation
if self._event_bus_manager is not None:
bus = await self._event_bus_manager.get_event_bus()
await bus.publish("user.settings.updated", {"user_id": user_id})
@@ -166,21 +160,20 @@ async def update_custom_setting(self, user_id: str, key: str, value: Any) -> Dom
reason=f"Custom setting '{key}' updated",
)
- async def get_settings_history(self, user_id: str, limit: int = 50) -> List[DomainSettingsHistoryEntry]:
+ async def get_settings_history(self, user_id: str, limit: int = 50) -> list[DomainSettingsHistoryEntry]:
"""Get history from changed fields recorded in events."""
- events = await self._get_settings_events(user_id, limit=limit)
+ events = await self.repository.get_settings_events(user_id, [EventType.USER_SETTINGS_UPDATED], limit=limit)
history: list[DomainSettingsHistoryEntry] = []
for event in events:
- changed_fields = event.payload.get("changed_fields", [])
- for field in changed_fields:
+ for fld in event.changed_fields:
history.append(
DomainSettingsHistoryEntry(
timestamp=event.timestamp,
event_type=event.event_type,
- field=f"/{field}",
+ field=f"/{fld}",
old_value=None,
- new_value=event.payload.get(field),
- reason=event.payload.get("reason"),
+ new_value=event.model_dump().get(fld),
+ reason=event.reason,
correlation_id=event.correlation_id,
)
)
@@ -188,19 +181,15 @@ async def get_settings_history(self, user_id: str, limit: int = 50) -> List[Doma
async def restore_settings_to_point(self, user_id: str, timestamp: datetime) -> DomainUserSettings:
"""Restore settings to a specific point in time"""
- # Get all events up to the timestamp
- events = await self._get_settings_events(user_id, until=timestamp)
+ events = await self.repository.get_settings_events(user_id, [EventType.USER_SETTINGS_UPDATED], until=timestamp)
- # Rebuild settings from events
settings = DomainUserSettings(user_id=user_id)
for event in events:
settings = self._apply_event(settings, event)
- # Save as current settings
await self.repository.create_snapshot(settings)
self._add_to_cache(user_id, settings)
- # Publish restoration event (marker only, no field changes)
await self.event_service.publish_event(
event_type=EventType.USER_SETTINGS_UPDATED,
aggregate_id=f"user_settings_{user_id}",
@@ -214,34 +203,12 @@ async def restore_settings_to_point(self, user_id: str, timestamp: datetime) ->
return settings
- async def _get_settings_events(
- self, user_id: str, since: datetime | None = None, until: datetime | None = None, limit: int | None = None
- ) -> List[DomainSettingsEvent]:
- """Get settings-related events for a user."""
- raw = await self.repository.get_settings_events(
- user_id=user_id,
- event_types=[EventType.USER_SETTINGS_UPDATED],
- since=since,
- until=until,
- limit=limit,
- )
- return [
- DomainSettingsEvent(
- event_type=EventType.USER_SETTINGS_UPDATED,
- timestamp=e.timestamp,
- payload=e.payload,
- correlation_id=e.metadata.correlation_id if e.metadata else None,
- )
- for e in raw
- ]
-
- # Fields that are stored directly in event payload (not in nested 'changes')
_settings_fields = {"theme", "timezone", "date_format", "time_format", "notifications", "editor"}
- def _apply_event(self, settings: DomainUserSettings, event: DomainSettingsEvent) -> DomainUserSettings:
+ def _apply_event(self, settings: DomainUserSettings, event: DomainUserSettingsChangedEvent) -> DomainUserSettings:
"""Apply a settings update event using TypeAdapter merge."""
- # Extract changes from typed fields in payload
- changes = {k: v for k, v in event.payload.items() if k in self._settings_fields and v is not None}
+ event_dict = event.model_dump()
+ changes = {k: v for k, v in event_dict.items() if k in self._settings_fields and v is not None}
if not changes:
return settings
@@ -273,8 +240,5 @@ def get_cache_stats(self) -> dict[str, Any]:
async def reset_user_settings(self, user_id: str) -> None:
"""Reset user settings by deleting all data and cache."""
await self.invalidate_cache(user_id)
-
- # Delete from database
await self.repository.delete_user_settings(user_id)
-
self.logger.info(f"Reset settings for user {user_id}")
diff --git a/backend/scripts/create_topics.py b/backend/scripts/create_topics.py
index 6b473e25..de69c55f 100755
--- a/backend/scripts/create_topics.py
+++ b/backend/scripts/create_topics.py
@@ -47,7 +47,7 @@ async def create_topics(settings: Settings) -> None:
for topic in all_topics:
# Apply topic prefix for consistency with consumers/producers
- topic_name = f"{topic_prefix}{topic.value}"
+ topic_name = f"{topic_prefix}{topic}"
if topic_name not in existing_topics:
# Get config from topic_configs
config = topic_configs.get(
diff --git a/backend/tests/conftest.py b/backend/tests/conftest.py
index 7b7c1a61..8c5aa57a 100644
--- a/backend/tests/conftest.py
+++ b/backend/tests/conftest.py
@@ -130,9 +130,8 @@ async def db(scope: AsyncContainer) -> AsyncGenerator[Database, None]:
@pytest_asyncio.fixture
async def redis_client(scope: AsyncContainer) -> AsyncGenerator[redis.Redis, None]:
- # Don't close here - Dishka's RedisProvider handles cleanup when scope exits
- client: redis.Redis = await scope.get(redis.Redis)
- yield client
+ # Dishka's RedisProvider handles cleanup when scope exits
+ yield await scope.get(redis.Redis)
# ===== Authenticated client fixtures =====
diff --git a/backend/tests/e2e/conftest.py b/backend/tests/e2e/conftest.py
index b753b352..648dfaef 100644
--- a/backend/tests/e2e/conftest.py
+++ b/backend/tests/e2e/conftest.py
@@ -1,10 +1,9 @@
-"""E2E tests conftest - with infrastructure cleanup."""
from collections.abc import AsyncGenerator
import pytest_asyncio
import redis.asyncio as redis
-
from app.core.database_context import Database
+
from tests.helpers.cleanup import cleanup_db_and_redis
diff --git a/backend/tests/e2e/test_execution_routes.py b/backend/tests/e2e/test_execution_routes.py
index dd0bfa2f..67ed582c 100644
--- a/backend/tests/e2e/test_execution_routes.py
+++ b/backend/tests/e2e/test_execution_routes.py
@@ -2,14 +2,13 @@
from uuid import UUID
import pytest
-from httpx import AsyncClient
-
from app.domain.enums.execution import ExecutionStatus as ExecutionStatusEnum
from app.schemas_pydantic.execution import (
ExecutionResponse,
ExecutionResult,
ResourceUsage,
)
+from httpx import AsyncClient
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
@@ -91,7 +90,7 @@ async def test_get_execution_result(self, test_user: AsyncClient) -> None:
result_data = result_response.json()
execution_result = ExecutionResult(**result_data)
assert execution_result.execution_id == execution_id
- assert execution_result.status in [e.value for e in ExecutionStatusEnum]
+ assert execution_result.status in list(ExecutionStatusEnum)
assert execution_result.lang == "python"
# Execution might be in any state - that's fine
@@ -114,7 +113,7 @@ async def test_execute_with_error(self, test_user: AsyncClient) -> None:
exec_response = await test_user.post("/api/v1/execute", json=execution_request)
assert exec_response.status_code == 200
- execution_id = exec_response.json()["execution_id"]
+ exec_response.json()["execution_id"]
# No waiting - execution was accepted, error will be processed asynchronously
@@ -270,7 +269,7 @@ async def test_execution_with_timeout(self, test_user: AsyncClient) -> None:
exec_response = await test_user.post("/api/v1/execute", json=execution_request)
assert exec_response.status_code == 200
- execution_id = exec_response.json()["execution_id"]
+ exec_response.json()["execution_id"]
# Just verify the execution was created - it will run forever until timeout
# No need to wait or observe states
diff --git a/backend/tests/e2e/test_resource_cleaner_k8s.py b/backend/tests/e2e/test_resource_cleaner_k8s.py
index 33e57386..805aa785 100644
--- a/backend/tests/e2e/test_resource_cleaner_k8s.py
+++ b/backend/tests/e2e/test_resource_cleaner_k8s.py
@@ -3,10 +3,8 @@
import os
import pytest
-
from app.services.result_processor.resource_cleaner import ResourceCleaner
-
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
_test_logger = logging.getLogger("test.k8s.resource_cleaner_k8s")
diff --git a/backend/tests/e2e/test_resource_cleaner_orphan.py b/backend/tests/e2e/test_resource_cleaner_orphan.py
index 2cfb199b..bb75c911 100644
--- a/backend/tests/e2e/test_resource_cleaner_orphan.py
+++ b/backend/tests/e2e/test_resource_cleaner_orphan.py
@@ -1,11 +1,11 @@
-import asyncio
import logging
-from datetime import datetime, timedelta, timezone
+from datetime import datetime
import pytest
-from kubernetes import client as k8s_client, config as k8s_config
-
from app.services.result_processor.resource_cleaner import ResourceCleaner
+from kubernetes import client as k8s_client
+from kubernetes import config as k8s_config
+
from tests.helpers.eventually import eventually
pytestmark = [pytest.mark.e2e, pytest.mark.k8s]
@@ -38,7 +38,7 @@ async def test_cleanup_orphaned_configmaps_dry_run() -> None:
try:
cleaner = ResourceCleaner(logger=_test_logger)
# Force as orphaned by using a large cutoff
- cleaned = await cleaner.cleanup_orphaned_resources(namespace=ns, max_age_hours=0, dry_run=True)
+ await cleaner.cleanup_orphaned_resources(namespace=ns, max_age_hours=0, dry_run=True)
# We expect our configmap to be a candidate; poll the response
async def _has_cm() -> None:
diff --git a/backend/tests/helpers/cleanup.py b/backend/tests/helpers/cleanup.py
index 7dcccaa8..760b48da 100644
--- a/backend/tests/helpers/cleanup.py
+++ b/backend/tests/helpers/cleanup.py
@@ -1,6 +1,4 @@
-"""Shared cleanup utilities for integration and E2E tests."""
import redis.asyncio as redis
-
from app.core.database_context import Database
diff --git a/backend/tests/helpers/k8s_fakes.py b/backend/tests/helpers/k8s_fakes.py
index 7104dc4f..d45f0895 100644
--- a/backend/tests/helpers/k8s_fakes.py
+++ b/backend/tests/helpers/k8s_fakes.py
@@ -45,7 +45,12 @@ def __init__(self, reason: str, message: str | None = None) -> None:
class State:
- def __init__(self, terminated: Terminated | None = None, waiting: Waiting | None = None, running: Any | None = None) -> None:
+ def __init__(
+ self,
+ terminated: Terminated | None = None,
+ waiting: Waiting | None = None,
+ running: Any | None = None,
+ ) -> None:
self.terminated = terminated
self.waiting = waiting
self.running = running
@@ -89,7 +94,13 @@ def __init__(
annotations: dict[str, str] | None = None,
resource_version: str | None = None,
) -> None:
- self.metadata = Meta(name, namespace=namespace, labels=labels, annotations=annotations, resource_version=resource_version)
+ self.metadata = Meta(
+ name,
+ namespace=namespace,
+ labels=labels,
+ annotations=annotations,
+ resource_version=resource_version,
+ )
self.status = Status(phase, reason, msg, cs)
self.spec = Spec(adl)
diff --git a/backend/tests/helpers/kafka.py b/backend/tests/helpers/kafka.py
index 531e0bc9..944eca58 100644
--- a/backend/tests/helpers/kafka.py
+++ b/backend/tests/helpers/kafka.py
@@ -1,10 +1,9 @@
from collections.abc import Awaitable, Callable
import pytest
-from dishka import AsyncContainer
-
from app.events.core import UnifiedProducer
from app.infrastructure.kafka.events.base import BaseEvent
+from dishka import AsyncContainer
@pytest.fixture(scope="function")
diff --git a/backend/tests/integration/app/test_main_app.py b/backend/tests/integration/app/test_main_app.py
index 529df5f2..c178fe14 100644
--- a/backend/tests/integration/app/test_main_app.py
+++ b/backend/tests/integration/app/test_main_app.py
@@ -1,19 +1,10 @@
from importlib import import_module
import pytest
+from app.settings import Settings
from fastapi import FastAPI
-from starlette.middleware.cors import CORSMiddleware
from starlette.routing import Route
-from app.core.correlation import CorrelationMiddleware
-from app.core.middlewares import (
- CacheControlMiddleware,
- MetricsMiddleware,
- RateLimitMiddleware,
- RequestSizeLimitMiddleware,
-)
-from app.settings import Settings
-
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/conftest.py b/backend/tests/integration/conftest.py
index 4ae85086..0d824014 100644
--- a/backend/tests/integration/conftest.py
+++ b/backend/tests/integration/conftest.py
@@ -1,10 +1,9 @@
-"""Integration tests conftest - with infrastructure cleanup."""
from collections.abc import AsyncGenerator
import pytest_asyncio
import redis.asyncio as redis
-
from app.core.database_context import Database
+
from tests.helpers.cleanup import cleanup_db_and_redis
diff --git a/backend/tests/integration/core/test_container.py b/backend/tests/integration/core/test_container.py
index e6b6a1ca..85ef5122 100644
--- a/backend/tests/integration/core/test_container.py
+++ b/backend/tests/integration/core/test_container.py
@@ -1,8 +1,7 @@
import pytest
-from dishka import AsyncContainer
-
from app.core.database_context import Database
from app.services.event_service import EventService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
diff --git a/backend/tests/integration/db/repositories/test_admin_settings_repository.py b/backend/tests/integration/db/repositories/test_admin_settings_repository.py
index ecbe6b30..1f61ce95 100644
--- a/backend/tests/integration/db/repositories/test_admin_settings_repository.py
+++ b/backend/tests/integration/db/repositories/test_admin_settings_repository.py
@@ -1,9 +1,8 @@
import pytest
-from dishka import AsyncContainer
-
from app.core.database_context import Database
from app.db.repositories.admin.admin_settings_repository import AdminSettingsRepository
from app.domain.admin import SystemSettings
+from dishka import AsyncContainer
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/db/repositories/test_saved_script_repository.py b/backend/tests/integration/db/repositories/test_saved_script_repository.py
index 92d26699..58ebfd90 100644
--- a/backend/tests/integration/db/repositories/test_saved_script_repository.py
+++ b/backend/tests/integration/db/repositories/test_saved_script_repository.py
@@ -1,8 +1,7 @@
import pytest
-from dishka import AsyncContainer
-
from app.db.repositories.saved_script_repository import SavedScriptRepository
from app.domain.saved_script import DomainSavedScriptCreate, DomainSavedScriptUpdate
+from dishka import AsyncContainer
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/dlq/test_dlq_discard.py b/backend/tests/integration/dlq/test_dlq_discard.py
index 0549fd7f..0cbcd5f0 100644
--- a/backend/tests/integration/dlq/test_dlq_discard.py
+++ b/backend/tests/integration/dlq/test_dlq_discard.py
@@ -3,13 +3,13 @@
from datetime import datetime, timezone
import pytest
-from dishka import AsyncContainer
-
from app.db.docs import DLQMessageDocument
from app.db.repositories.dlq_repository import DLQRepository
from app.dlq.models import DLQMessageStatus
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
@@ -51,7 +51,7 @@ async def test_dlq_repository_marks_message_discarded(scope: AsyncContainer) ->
# Create a DLQ document
event_id = f"dlq-discard-{uuid.uuid4().hex[:8]}"
- doc = await _create_dlq_document(event_id=event_id, status=DLQMessageStatus.PENDING)
+ await _create_dlq_document(event_id=event_id, status=DLQMessageStatus.PENDING)
# Discard the message
reason = "max_retries_exceeded"
@@ -148,7 +148,7 @@ async def test_dlq_stats_reflect_discarded_messages(scope: AsyncContainer) -> No
# Capture count before to ensure our discard is what increments the stat
stats_before = await repository.get_dlq_stats()
- count_before = stats_before.by_status.get(DLQMessageStatus.DISCARDED.value, 0)
+ count_before = stats_before.by_status.get(DLQMessageStatus.DISCARDED, 0)
# Create and discard a message
event_id = f"dlq-stats-{uuid.uuid4().hex[:8]}"
@@ -157,5 +157,5 @@ async def test_dlq_stats_reflect_discarded_messages(scope: AsyncContainer) -> No
# Get stats after - verify the count incremented by exactly 1
stats_after = await repository.get_dlq_stats()
- count_after = stats_after.by_status.get(DLQMessageStatus.DISCARDED.value, 0)
+ count_after = stats_after.by_status.get(DLQMessageStatus.DISCARDED, 0)
assert count_after == count_before + 1
diff --git a/backend/tests/integration/dlq/test_dlq_manager.py b/backend/tests/integration/dlq/test_dlq_manager.py
index 63b69b0a..6615f248 100644
--- a/backend/tests/integration/dlq/test_dlq_manager.py
+++ b/backend/tests/integration/dlq/test_dlq_manager.py
@@ -4,14 +4,14 @@
from datetime import datetime, timezone
import pytest
-from confluent_kafka import Producer
-
from app.core.database_context import Database
from app.db.docs import DLQMessageDocument
from app.dlq.manager import create_dlq_manager
from app.domain.enums.kafka import KafkaTopic
from app.events.schema.schema_registry import create_schema_registry_manager
from app.settings import Settings
+from confluent_kafka import Producer
+
from tests.helpers import make_execution_requested_event
from tests.helpers.eventually import eventually
diff --git a/backend/tests/integration/dlq/test_dlq_retry.py b/backend/tests/integration/dlq/test_dlq_retry.py
index cde2ac8c..77ad8dcf 100644
--- a/backend/tests/integration/dlq/test_dlq_retry.py
+++ b/backend/tests/integration/dlq/test_dlq_retry.py
@@ -124,7 +124,7 @@ async def test_dlq_stats_reflect_retried_messages(scope: AsyncContainer) -> None
# Capture count before to ensure our retry is what increments the stat
stats_before = await repository.get_dlq_stats()
- count_before = stats_before.by_status.get(DLQMessageStatus.RETRIED.value, 0)
+ count_before = stats_before.by_status.get(DLQMessageStatus.RETRIED, 0)
# Create and retry a message
event_id = f"dlq-stats-retry-{uuid.uuid4().hex[:8]}"
@@ -133,7 +133,7 @@ async def test_dlq_stats_reflect_retried_messages(scope: AsyncContainer) -> None
# Get stats after - verify the count incremented by exactly 1
stats_after = await repository.get_dlq_stats()
- count_after = stats_after.by_status.get(DLQMessageStatus.RETRIED.value, 0)
+ count_after = stats_after.by_status.get(DLQMessageStatus.RETRIED, 0)
assert count_after == count_before + 1
diff --git a/backend/tests/integration/events/test_consume_roundtrip.py b/backend/tests/integration/events/test_consume_roundtrip.py
index 830a950b..9d007594 100644
--- a/backend/tests/integration/events/test_consume_roundtrip.py
+++ b/backend/tests/integration/events/test_consume_roundtrip.py
@@ -3,8 +3,6 @@
import uuid
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
from app.events.core import UnifiedConsumer, UnifiedProducer
@@ -13,6 +11,8 @@
from app.events.schema.schema_registry import SchemaRegistryManager, initialize_event_schemas
from app.infrastructure.kafka.events.base import BaseEvent
from app.settings import Settings
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
# xdist_group: Kafka consumer creation can crash librdkafka when multiple workers
diff --git a/backend/tests/integration/events/test_consumer_group_monitor_real.py b/backend/tests/integration/events/test_consumer_group_monitor_real.py
index 233d0239..457ac87c 100644
--- a/backend/tests/integration/events/test_consumer_group_monitor_real.py
+++ b/backend/tests/integration/events/test_consumer_group_monitor_real.py
@@ -25,7 +25,7 @@ async def test_consumer_group_status_error_path_and_summary(test_settings: Setti
assert status.state in ("ERROR", "DEAD", "UNKNOWN")
assert status.health is ConsumerGroupHealth.UNHEALTHY
summary = monitor.get_health_summary(status)
- assert summary["group_id"] == gid and summary["health"] == ConsumerGroupHealth.UNHEALTHY.value
+ assert summary["group_id"] == gid and summary["health"] == ConsumerGroupHealth.UNHEALTHY
def test_assess_group_health_branches(test_settings: Settings) -> None:
diff --git a/backend/tests/integration/events/test_consumer_lifecycle.py b/backend/tests/integration/events/test_consumer_lifecycle.py
index f1628142..01833c19 100644
--- a/backend/tests/integration/events/test_consumer_lifecycle.py
+++ b/backend/tests/integration/events/test_consumer_lifecycle.py
@@ -2,12 +2,11 @@
from uuid import uuid4
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.kafka import KafkaTopic
from app.events.core import ConsumerConfig, EventDispatcher, UnifiedConsumer
from app.events.schema.schema_registry import SchemaRegistryManager
from app.settings import Settings
+from dishka import AsyncContainer
# xdist_group: Kafka consumer creation can crash librdkafka when multiple workers
# instantiate Consumer() objects simultaneously. Serial execution prevents this.
diff --git a/backend/tests/integration/events/test_dlq_handler.py b/backend/tests/integration/events/test_dlq_handler.py
index eb930b17..de3584a1 100644
--- a/backend/tests/integration/events/test_dlq_handler.py
+++ b/backend/tests/integration/events/test_dlq_handler.py
@@ -1,12 +1,11 @@
import logging
import pytest
-from dishka import AsyncContainer
-
from app.events.core import UnifiedProducer, create_dlq_error_handler, create_immediate_dlq_handler
from app.infrastructure.kafka.events.base import BaseEvent
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
from app.infrastructure.kafka.events.saga import SagaStartedEvent
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.kafka]
@@ -18,7 +17,9 @@ async def test_dlq_handler_with_retries(scope: AsyncContainer, monkeypatch: pyte
p: UnifiedProducer = await scope.get(UnifiedProducer)
calls: list[tuple[str | None, str, str, int]] = []
- async def _record_send_to_dlq(original_event: BaseEvent, original_topic: str, error: Exception, retry_count: int) -> None:
+ async def _record_send_to_dlq(
+ original_event: BaseEvent, original_topic: str, error: Exception, retry_count: int
+ ) -> None:
calls.append((original_event.event_id, original_topic, str(error), retry_count))
monkeypatch.setattr(p, "send_to_dlq", _record_send_to_dlq)
@@ -45,7 +46,9 @@ async def test_immediate_dlq_handler(scope: AsyncContainer, monkeypatch: pytest.
p: UnifiedProducer = await scope.get(UnifiedProducer)
calls: list[tuple[str | None, str, str, int]] = []
- async def _record_send_to_dlq(original_event: BaseEvent, original_topic: str, error: Exception, retry_count: int) -> None:
+ async def _record_send_to_dlq(
+ original_event: BaseEvent, original_topic: str, error: Exception, retry_count: int
+ ) -> None:
calls.append((original_event.event_id, original_topic, str(error), retry_count))
monkeypatch.setattr(p, "send_to_dlq", _record_send_to_dlq)
diff --git a/backend/tests/integration/events/test_event_dispatcher.py b/backend/tests/integration/events/test_event_dispatcher.py
index 0195acbf..244930f6 100644
--- a/backend/tests/integration/events/test_event_dispatcher.py
+++ b/backend/tests/integration/events/test_event_dispatcher.py
@@ -3,8 +3,6 @@
import uuid
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
from app.events.core import UnifiedConsumer, UnifiedProducer
@@ -13,6 +11,7 @@
from app.events.schema.schema_registry import SchemaRegistryManager, initialize_event_schemas
from app.infrastructure.kafka.events.base import BaseEvent
from app.settings import Settings
+from dishka import AsyncContainer
from tests.helpers import make_execution_requested_event
diff --git a/backend/tests/integration/events/test_event_store.py b/backend/tests/integration/events/test_event_store.py
index 45b18304..6fc68869 100644
--- a/backend/tests/integration/events/test_event_store.py
+++ b/backend/tests/integration/events/test_event_store.py
@@ -3,12 +3,12 @@
from datetime import datetime, timedelta, timezone
import pytest
-from dishka import AsyncContainer
-
from app.db.docs import EventDocument
from app.domain.enums.events import EventType
from app.events.event_store import EventStore
from app.infrastructure.kafka.events.base import BaseEvent
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
diff --git a/backend/tests/integration/events/test_schema_registry_roundtrip.py b/backend/tests/integration/events/test_schema_registry_roundtrip.py
index ffef9953..914982d0 100644
--- a/backend/tests/integration/events/test_schema_registry_roundtrip.py
+++ b/backend/tests/integration/events/test_schema_registry_roundtrip.py
@@ -1,10 +1,10 @@
import logging
import pytest
-from dishka import AsyncContainer
-
from app.events.schema.schema_registry import MAGIC_BYTE, SchemaRegistryManager
from app.settings import Settings
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
pytestmark = [pytest.mark.integration]
diff --git a/backend/tests/integration/idempotency/test_consumer_idempotent.py b/backend/tests/integration/idempotency/test_consumer_idempotent.py
index f1554b50..b98db675 100644
--- a/backend/tests/integration/idempotency/test_consumer_idempotent.py
+++ b/backend/tests/integration/idempotency/test_consumer_idempotent.py
@@ -3,8 +3,6 @@
import uuid
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
from app.events.core import ConsumerConfig, EventDispatcher, UnifiedConsumer, UnifiedProducer
@@ -14,6 +12,8 @@
from app.services.idempotency.idempotency_manager import IdempotencyManager
from app.services.idempotency.middleware import IdempotentConsumerWrapper
from app.settings import Settings
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
from tests.helpers.eventually import eventually
diff --git a/backend/tests/integration/idempotency/test_decorator_idempotent.py b/backend/tests/integration/idempotency/test_decorator_idempotent.py
index 305a3500..ffaa51ab 100644
--- a/backend/tests/integration/idempotency/test_decorator_idempotent.py
+++ b/backend/tests/integration/idempotency/test_decorator_idempotent.py
@@ -1,11 +1,11 @@
import logging
import pytest
-from dishka import AsyncContainer
-
from app.infrastructure.kafka.events.base import BaseEvent
from app.services.idempotency.idempotency_manager import IdempotencyManager
from app.services.idempotency.middleware import idempotent_handler
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
_test_logger = logging.getLogger("test.idempotency.decorator_idempotent")
diff --git a/backend/tests/integration/idempotency/test_idempotency.py b/backend/tests/integration/idempotency/test_idempotency.py
index b93cdd79..5011c110 100644
--- a/backend/tests/integration/idempotency/test_idempotency.py
+++ b/backend/tests/integration/idempotency/test_idempotency.py
@@ -8,13 +8,12 @@
import pytest
import redis.asyncio as redis
-
from app.domain.idempotency import IdempotencyRecord, IdempotencyStatus
from app.infrastructure.kafka.events.base import BaseEvent
-from app.infrastructure.kafka.events.execution import ExecutionRequestedEvent
from app.services.idempotency.idempotency_manager import IdempotencyConfig, IdempotencyManager
from app.services.idempotency.middleware import IdempotentEventHandler, idempotent_handler
from app.services.idempotency.redis_repository import RedisIdempotencyRepository
+
from tests.helpers import make_execution_requested_event
pytestmark = [pytest.mark.integration, pytest.mark.redis]
diff --git a/backend/tests/integration/idempotency/test_idempotent_handler.py b/backend/tests/integration/idempotency/test_idempotent_handler.py
index a7872c8b..551c9d2c 100644
--- a/backend/tests/integration/idempotency/test_idempotent_handler.py
+++ b/backend/tests/integration/idempotency/test_idempotent_handler.py
@@ -1,11 +1,11 @@
import logging
import pytest
-from dishka import AsyncContainer
-
from app.infrastructure.kafka.events.base import BaseEvent
from app.services.idempotency.idempotency_manager import IdempotencyManager
from app.services.idempotency.middleware import IdempotentEventHandler
+from dishka import AsyncContainer
+
from tests.helpers import make_execution_requested_event
pytestmark = [pytest.mark.integration]
diff --git a/backend/tests/integration/notifications/test_notification_sse.py b/backend/tests/integration/notifications/test_notification_sse.py
index 5beabd4f..b8145075 100644
--- a/backend/tests/integration/notifications/test_notification_sse.py
+++ b/backend/tests/integration/notifications/test_notification_sse.py
@@ -1,14 +1,12 @@
-import asyncio
-import json
from uuid import uuid4
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.notification import NotificationChannel, NotificationSeverity
from app.schemas_pydantic.sse import RedisNotificationMessage
from app.services.notification_service import NotificationService
from app.services.sse.redis_bus import SSERedisBus
+from dishka import AsyncContainer
+
from tests.helpers.eventually import eventually
pytestmark = [pytest.mark.integration, pytest.mark.redis]
@@ -27,7 +25,7 @@ async def test_in_app_notification_published_to_sse(scope: AsyncContainer) -> No
await svc.update_subscription(user_id, NotificationChannel.IN_APP, True)
# Create notification via service (IN_APP channel triggers SSE publish)
- n = await svc.create_notification(
+ await svc.create_notification(
user_id=user_id,
subject="Hello",
body="World",
diff --git a/backend/tests/integration/result_processor/test_result_processor.py b/backend/tests/integration/result_processor/test_result_processor.py
index cb897bbe..de4eaaae 100644
--- a/backend/tests/integration/result_processor/test_result_processor.py
+++ b/backend/tests/integration/result_processor/test_result_processor.py
@@ -3,8 +3,6 @@
import uuid
import pytest
-from dishka import AsyncContainer
-
from app.core.database_context import Database
from app.db.repositories.execution_repository import ExecutionRepository
from app.domain.enums.events import EventType
@@ -22,6 +20,7 @@
from app.services.idempotency import IdempotencyManager
from app.services.result_processor.processor import ResultProcessor
from app.settings import Settings
+from dishka import AsyncContainer
from tests.helpers.eventually import eventually
@@ -117,7 +116,7 @@ async def _stored(_event: ResultStoredEvent) -> None:
async def _persisted() -> None:
doc = await db.get_collection("executions").find_one({"execution_id": execution_id})
assert doc is not None
- assert doc.get("status") == ExecutionStatus.COMPLETED.value
+ assert doc.get("status") == ExecutionStatus.COMPLETED
await eventually(_persisted, timeout=12.0, interval=0.2)
diff --git a/backend/tests/integration/services/admin/test_admin_user_service.py b/backend/tests/integration/services/admin/test_admin_user_service.py
index ed6b3dca..b9ea3d98 100644
--- a/backend/tests/integration/services/admin/test_admin_user_service.py
+++ b/backend/tests/integration/services/admin/test_admin_user_service.py
@@ -1,11 +1,10 @@
from datetime import datetime, timezone
import pytest
-from dishka import AsyncContainer
-
from app.core.database_context import Database
from app.domain.enums.user import UserRole
from app.services.admin import AdminUserService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
@@ -18,7 +17,7 @@ async def test_get_user_overview_basic(scope: AsyncContainer) -> None:
"user_id": "u1",
"username": "bob",
"email": "b@b.com",
- "role": UserRole.USER.value,
+ "role": UserRole.USER,
"is_active": True,
"is_superuser": False,
"hashed_password": "h",
diff --git a/backend/tests/integration/services/coordinator/test_execution_coordinator.py b/backend/tests/integration/services/coordinator/test_execution_coordinator.py
index 6043ede7..7b4cbcce 100644
--- a/backend/tests/integration/services/coordinator/test_execution_coordinator.py
+++ b/backend/tests/integration/services/coordinator/test_execution_coordinator.py
@@ -1,7 +1,6 @@
import pytest
-from dishka import AsyncContainer
-
from app.services.coordinator.coordinator import ExecutionCoordinator
+from dishka import AsyncContainer
from tests.helpers import make_execution_requested_event
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/services/events/test_event_bus.py b/backend/tests/integration/services/events/test_event_bus.py
index bc0d453f..dc74fdce 100644
--- a/backend/tests/integration/services/events/test_event_bus.py
+++ b/backend/tests/integration/services/events/test_event_bus.py
@@ -1,7 +1,6 @@
import pytest
-from dishka import AsyncContainer
-
from app.services.event_bus import EventBusEvent, EventBusManager
+from dishka import AsyncContainer
from tests.helpers.eventually import eventually
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/services/events/test_kafka_event_service.py b/backend/tests/integration/services/events/test_kafka_event_service.py
index 42fac8ec..2463d5c4 100644
--- a/backend/tests/integration/services/events/test_kafka_event_service.py
+++ b/backend/tests/integration/services/events/test_kafka_event_service.py
@@ -1,10 +1,9 @@
import pytest
-from dishka import AsyncContainer
-
from app.db.repositories import EventRepository
from app.domain.enums.events import EventType
from app.domain.enums.execution import ExecutionStatus
from app.services.kafka_event_service import KafkaEventService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.kafka, pytest.mark.mongodb]
diff --git a/backend/tests/integration/services/execution/test_execution_service.py b/backend/tests/integration/services/execution/test_execution_service.py
index 1fffd3be..c3e689e9 100644
--- a/backend/tests/integration/services/execution/test_execution_service.py
+++ b/backend/tests/integration/services/execution/test_execution_service.py
@@ -1,8 +1,7 @@
import pytest
-from dishka import AsyncContainer
-
from app.domain.execution import ResourceLimitsDomain
from app.services.execution_service import ExecutionService
+from dishka import AsyncContainer
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/services/idempotency/test_redis_repository.py b/backend/tests/integration/services/idempotency/test_redis_repository.py
index f9442539..6537ee8e 100644
--- a/backend/tests/integration/services/idempotency/test_redis_repository.py
+++ b/backend/tests/integration/services/idempotency/test_redis_repository.py
@@ -3,8 +3,6 @@
import pytest
import redis.asyncio as redis
-from pymongo.errors import DuplicateKeyError
-
from app.domain.idempotency import IdempotencyRecord, IdempotencyStatus
from app.services.idempotency.redis_repository import (
RedisIdempotencyRepository,
@@ -12,7 +10,7 @@
_json_default,
_parse_iso_datetime,
)
-
+from pymongo.errors import DuplicateKeyError
pytestmark = [pytest.mark.integration, pytest.mark.redis]
@@ -142,9 +140,15 @@ async def test_aggregate_status_counts(
repository: RedisIdempotencyRepository, redis_client: redis.Redis
) -> None:
# Seed few keys directly using repository
- for i, status in enumerate((IdempotencyStatus.PROCESSING, IdempotencyStatus.PROCESSING, IdempotencyStatus.COMPLETED)):
+ statuses = (IdempotencyStatus.PROCESSING, IdempotencyStatus.PROCESSING, IdempotencyStatus.COMPLETED)
+ for i, status in enumerate(statuses):
rec = IdempotencyRecord(
- key=f"k{i}", status=status, event_type="t", event_id=f"e{i}", created_at=datetime.now(timezone.utc), ttl_seconds=60
+ key=f"k{i}",
+ status=status,
+ event_type="t",
+ event_id=f"e{i}",
+ created_at=datetime.now(timezone.utc),
+ ttl_seconds=60,
)
await repository.insert_processing(rec)
if status != IdempotencyStatus.PROCESSING:
diff --git a/backend/tests/integration/services/notifications/test_notification_service.py b/backend/tests/integration/services/notifications/test_notification_service.py
index 1d93b259..e8440ad9 100644
--- a/backend/tests/integration/services/notifications/test_notification_service.py
+++ b/backend/tests/integration/services/notifications/test_notification_service.py
@@ -1,10 +1,9 @@
import pytest
-from dishka import AsyncContainer
-
from app.db.repositories import NotificationRepository
from app.domain.enums.notification import NotificationChannel, NotificationSeverity
from app.domain.notification import DomainNotificationCreate
from app.services.notification_service import NotificationService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
@@ -15,7 +14,14 @@ async def test_notification_service_crud_and_subscription(scope: AsyncContainer)
repo: NotificationRepository = await scope.get(NotificationRepository)
# Create a notification via repository and then use service to mark/delete
- n = DomainNotificationCreate(user_id="u1", severity=NotificationSeverity.MEDIUM, tags=["x"], channel=NotificationChannel.IN_APP, subject="s", body="b")
+ n = DomainNotificationCreate(
+ user_id="u1",
+ severity=NotificationSeverity.MEDIUM,
+ tags=["x"],
+ channel=NotificationChannel.IN_APP,
+ subject="s",
+ body="b",
+ )
created = await repo.create_notification(n)
got = await repo.get_notification(created.notification_id, "u1")
assert got is not None
diff --git a/backend/tests/integration/services/rate_limit/test_rate_limit_service.py b/backend/tests/integration/services/rate_limit/test_rate_limit_service.py
index 4ce27ecf..942b2a37 100644
--- a/backend/tests/integration/services/rate_limit/test_rate_limit_service.py
+++ b/backend/tests/integration/services/rate_limit/test_rate_limit_service.py
@@ -5,8 +5,6 @@
from uuid import uuid4
import pytest
-from dishka import AsyncContainer
-
from app.domain.rate_limit import (
EndpointGroup,
RateLimitAlgorithm,
@@ -15,6 +13,7 @@
UserRateLimit,
)
from app.services.rate_limit_service import RateLimitService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.redis]
@@ -155,7 +154,11 @@ async def test_ip_based_rate_limiting(scope: AsyncContainer) -> None:
async def test_get_config_roundtrip(scope: AsyncContainer) -> None:
svc: RateLimitService = await scope.get(RateLimitService)
svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:"
- cfg = RateLimitConfig(default_rules=[RateLimitRule(endpoint_pattern=r"^/z", group=EndpointGroup.API, requests=1, window_seconds=1)])
+ cfg = RateLimitConfig(
+ default_rules=[
+ RateLimitRule(endpoint_pattern=r"^/z", group=EndpointGroup.API, requests=1, window_seconds=1)
+ ]
+ )
await svc.update_config(cfg)
got = await svc._get_config()
assert isinstance(got, RateLimitConfig)
@@ -167,7 +170,17 @@ async def test_sliding_window_edge(scope: AsyncContainer) -> None:
svc.prefix = f"{svc.prefix}{uuid4().hex[:6]}:"
svc.settings.RATE_LIMIT_ENABLED = True # Enable rate limiting for this test
# Configure a tight window and ensure behavior is consistent
- cfg = RateLimitConfig(default_rules=[RateLimitRule(endpoint_pattern=r"^/edge", group=EndpointGroup.API, requests=1, window_seconds=1, algorithm=RateLimitAlgorithm.SLIDING_WINDOW)])
+ cfg = RateLimitConfig(
+ default_rules=[
+ RateLimitRule(
+ endpoint_pattern=r"^/edge",
+ group=EndpointGroup.API,
+ requests=1,
+ window_seconds=1,
+ algorithm=RateLimitAlgorithm.SLIDING_WINDOW,
+ )
+ ]
+ )
await svc.update_config(cfg)
ok = await svc.check_rate_limit("u", "/edge")
assert ok.allowed is True
diff --git a/backend/tests/integration/services/replay/test_replay_service.py b/backend/tests/integration/services/replay/test_replay_service.py
index 0705062e..730a12c9 100644
--- a/backend/tests/integration/services/replay/test_replay_service.py
+++ b/backend/tests/integration/services/replay/test_replay_service.py
@@ -1,9 +1,8 @@
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.replay import ReplayTarget, ReplayType
from app.services.event_replay import ReplayConfig, ReplayFilter
from app.services.replay_service import ReplayService
+from dishka import AsyncContainer
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/services/saga/test_saga_service.py b/backend/tests/integration/services/saga/test_saga_service.py
index b0d1a1a1..74780056 100644
--- a/backend/tests/integration/services/saga/test_saga_service.py
+++ b/backend/tests/integration/services/saga/test_saga_service.py
@@ -1,11 +1,10 @@
from datetime import datetime, timezone
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums.user import UserRole
from app.schemas_pydantic.user import User
from app.services.saga.saga_service import SagaService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
diff --git a/backend/tests/integration/services/saved_script/test_saved_script_service.py b/backend/tests/integration/services/saved_script/test_saved_script_service.py
index c016146f..3eb23eb6 100644
--- a/backend/tests/integration/services/saved_script/test_saved_script_service.py
+++ b/backend/tests/integration/services/saved_script/test_saved_script_service.py
@@ -1,8 +1,7 @@
import pytest
-from dishka import AsyncContainer
-
from app.domain.saved_script import DomainSavedScriptCreate, DomainSavedScriptUpdate, SavedScriptNotFoundError
from app.services.saved_script_service import SavedScriptService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
@@ -20,7 +19,9 @@ async def test_crud_saved_script(scope: AsyncContainer) -> None:
got = await service.get_saved_script(str(created.script_id), "u1")
assert got and got.script_id == created.script_id
- out = await service.update_saved_script(str(created.script_id), "u1", DomainSavedScriptUpdate(name="new", script="p"))
+ out = await service.update_saved_script(
+ str(created.script_id), "u1", DomainSavedScriptUpdate(name="new", script="p")
+ )
assert out and out.name == "new"
lst = await service.list_saved_scripts("u1")
diff --git a/backend/tests/integration/services/sse/test_partitioned_event_router.py b/backend/tests/integration/services/sse/test_partitioned_event_router.py
index 18bc3f86..cc8ab4b8 100644
--- a/backend/tests/integration/services/sse/test_partitioned_event_router.py
+++ b/backend/tests/integration/services/sse/test_partitioned_event_router.py
@@ -3,7 +3,6 @@
import pytest
import redis.asyncio as redis
-
from app.core.metrics.events import EventMetrics
from app.events.core import EventDispatcher
from app.events.schema.schema_registry import SchemaRegistryManager
@@ -11,6 +10,7 @@
from app.services.sse.kafka_redis_bridge import SSEKafkaRedisBridge
from app.services.sse.redis_bus import SSERedisBus
from app.settings import Settings
+
from tests.helpers import make_execution_requested_event
from tests.helpers.eventually import eventually
diff --git a/backend/tests/integration/services/sse/test_redis_bus.py b/backend/tests/integration/services/sse/test_redis_bus.py
index 74c05691..c2148c7c 100644
--- a/backend/tests/integration/services/sse/test_redis_bus.py
+++ b/backend/tests/integration/services/sse/test_redis_bus.py
@@ -1,10 +1,10 @@
import asyncio
import logging
+from datetime import datetime, timezone
from typing import Any, ClassVar, cast
import pytest
import redis.asyncio as redis_async
-
from app.domain.enums.events import EventType
from app.domain.enums.kafka import KafkaTopic
from app.domain.enums.notification import NotificationSeverity, NotificationStatus
@@ -127,7 +127,7 @@ async def test_notifications_channels() -> None:
subject="test",
body="body",
action_url="",
- created_at="2025-01-01T00:00:00Z",
+ created_at=datetime(2025, 1, 1, tzinfo=timezone.utc),
)
await bus.publish_notification("user-1", notif)
ch, payload = r.published[-1]
diff --git a/backend/tests/integration/services/user_settings/test_user_settings_service.py b/backend/tests/integration/services/user_settings/test_user_settings_service.py
index d3a15d54..1acb9d2e 100644
--- a/backend/tests/integration/services/user_settings/test_user_settings_service.py
+++ b/backend/tests/integration/services/user_settings/test_user_settings_service.py
@@ -1,8 +1,6 @@
from datetime import datetime, timezone
import pytest
-from dishka import AsyncContainer
-
from app.domain.enums import Theme
from app.domain.user.settings_models import (
DomainEditorSettings,
@@ -10,6 +8,7 @@
DomainUserSettingsUpdate,
)
from app.services.user_settings_service import UserSettingsService
+from dishka import AsyncContainer
pytestmark = [pytest.mark.integration, pytest.mark.mongodb]
diff --git a/backend/tests/integration/test_admin_routes.py b/backend/tests/integration/test_admin_routes.py
index 1626589a..5141986e 100644
--- a/backend/tests/integration/test_admin_routes.py
+++ b/backend/tests/integration/test_admin_routes.py
@@ -64,7 +64,6 @@ async def test_update_and_reset_settings(self, test_admin: AsyncClient) -> None:
# Get original settings
original_response = await test_admin.get("/api/v1/admin/settings/")
assert original_response.status_code == 200
- original_settings = original_response.json()
# Update settings
updated_settings = {
diff --git a/backend/tests/integration/test_alertmanager.py b/backend/tests/integration/test_alertmanager.py
index f7b7dcd6..d2e3f21d 100644
--- a/backend/tests/integration/test_alertmanager.py
+++ b/backend/tests/integration/test_alertmanager.py
@@ -1,7 +1,7 @@
-import httpx
-import pytest
from datetime import datetime, timezone
+import httpx
+import pytest
pytestmark = pytest.mark.integration
diff --git a/backend/tests/integration/test_auth_routes.py b/backend/tests/integration/test_auth_routes.py
index 07df6472..467619f2 100644
--- a/backend/tests/integration/test_auth_routes.py
+++ b/backend/tests/integration/test_auth_routes.py
@@ -1,10 +1,9 @@
from uuid import uuid4
import pytest
-from httpx import AsyncClient
-
from app.domain.enums.user import UserRole as UserRoleEnum
from app.schemas_pydantic.user import UserResponse
+from httpx import AsyncClient
@pytest.mark.integration
@@ -122,7 +121,7 @@ async def test_duplicate_email_registration(self, client: AsyncClient) -> None:
duplicate_response = await client.post("/api/v1/auth/register", json=duplicate_data)
# Backend might allow duplicate emails but not duplicate usernames
- # If it allows the registration, that's also valid behavior
+ # If it allows the registration, that's also valid behavior
assert duplicate_response.status_code in [200, 201, 400, 409]
@pytest.mark.asyncio
diff --git a/backend/tests/integration/test_dlq_routes.py b/backend/tests/integration/test_dlq_routes.py
index 50b0f71b..8d29b929 100644
--- a/backend/tests/integration/test_dlq_routes.py
+++ b/backend/tests/integration/test_dlq_routes.py
@@ -2,8 +2,6 @@
from typing import TypedDict
import pytest
-from httpx import AsyncClient
-
from app.dlq import DLQMessageStatus
from app.schemas_pydantic.dlq import (
DLQBatchRetryResponse,
@@ -15,6 +13,7 @@
)
from app.schemas_pydantic.user import MessageResponse
from app.settings import Settings
+from httpx import AsyncClient
class _RetryRequest(TypedDict):
@@ -68,7 +67,7 @@ async def test_get_dlq_statistics(self, test_user: AsyncClient) -> None:
assert isinstance(topic_stat["count"], int)
assert topic_stat["count"] >= 0
- # Check event type stats
+ # Check event type stats
for event_type_stat in stats.by_event_type:
assert "event_type" in event_type_stat
assert "count" in event_type_stat
diff --git a/backend/tests/integration/test_events_routes.py b/backend/tests/integration/test_events_routes.py
index 992fbbc4..b5de5950 100644
--- a/backend/tests/integration/test_events_routes.py
+++ b/backend/tests/integration/test_events_routes.py
@@ -141,8 +141,8 @@ async def test_query_events_advanced(self, test_user: AsyncClient) -> None:
# Query events with multiple filters
query_request = {
"event_types": [
- EventType.EXECUTION_REQUESTED.value,
- EventType.EXECUTION_COMPLETED.value
+ EventType.EXECUTION_REQUESTED,
+ EventType.EXECUTION_COMPLETED
],
"start_time": (datetime.now(timezone.utc) - timedelta(days=7)).isoformat(),
"end_time": datetime.now(timezone.utc).isoformat(),
@@ -298,7 +298,7 @@ async def test_publish_custom_event_requires_admin(self, test_user: AsyncClient)
"""Test that publishing custom events requires admin privileges."""
# Try to publish custom event (logged in as regular user via fixture)
publish_request = {
- "event_type": EventType.SYSTEM_ERROR.value,
+ "event_type": EventType.SYSTEM_ERROR,
"payload": {
"test": "data",
"value": 123
@@ -317,7 +317,7 @@ async def test_publish_custom_event_as_admin(self, test_admin: AsyncClient) -> N
# Publish custom event (requires Kafka); skip if not available
aggregate_id = str(uuid4())
publish_request = {
- "event_type": EventType.SYSTEM_ERROR.value,
+ "event_type": EventType.SYSTEM_ERROR,
"payload": {
"error_type": "test_error",
"message": "Admin test system error",
diff --git a/backend/tests/integration/test_notifications_routes.py b/backend/tests/integration/test_notifications_routes.py
index 9cb9764a..5eea763c 100644
--- a/backend/tests/integration/test_notifications_routes.py
+++ b/backend/tests/integration/test_notifications_routes.py
@@ -53,10 +53,10 @@ async def test_list_user_notifications(self, test_user: AsyncClient) -> None:
# If there are notifications, validate their structure per schema
for n in notifications_response.notifications:
assert n.notification_id
- assert n.channel in [c.value for c in NotificationChannel]
+ assert n.channel in list(NotificationChannel)
assert n.severity in ["low","medium","high","urgent"]
assert isinstance(n.tags, list)
- assert n.status in [s.value for s in NotificationStatus]
+ assert n.status in list(NotificationStatus)
assert n.subject is not None
assert n.body is not None
assert n.created_at is not None
@@ -66,9 +66,9 @@ async def test_filter_notifications_by_status(self, test_user: AsyncClient) -> N
"""Test filtering notifications by status."""
# Test different status filters
statuses = [
- NotificationStatus.READ.value,
- NotificationStatus.DELIVERED.value,
- NotificationStatus.SKIPPED.value,
+ NotificationStatus.READ,
+ NotificationStatus.DELIVERED,
+ NotificationStatus.SKIPPED,
]
for status in statuses:
response = await test_user.get(f"/api/v1/notifications?status={status}&limit=5")
@@ -102,7 +102,7 @@ async def test_mark_notification_as_read(self, test_user: AsyncClient) -> None:
"""Test marking a notification as read."""
# Get an unread notification
notifications_response = await test_user.get(
- f"/api/v1/notifications?status={NotificationStatus.DELIVERED.value}&limit=1")
+ f"/api/v1/notifications?status={NotificationStatus.DELIVERED}&limit=1")
assert notifications_response.status_code == 200
notifications_data = notifications_response.json()
@@ -201,7 +201,7 @@ async def test_get_notification_subscriptions(self, test_user: AsyncClient) -> N
# Check each subscription
for subscription in subscriptions_response.subscriptions:
assert isinstance(subscription, NotificationSubscription)
- assert subscription.channel in [c.value for c in NotificationChannel]
+ assert subscription.channel in list(NotificationChannel)
assert isinstance(subscription.enabled, bool)
assert subscription.user_id is not None
diff --git a/backend/tests/integration/test_replay_routes.py b/backend/tests/integration/test_replay_routes.py
index 4cd74755..219e6697 100644
--- a/backend/tests/integration/test_replay_routes.py
+++ b/backend/tests/integration/test_replay_routes.py
@@ -234,11 +234,11 @@ async def test_filter_sessions_by_status(self, test_admin: AsyncClient) -> None:
"""Test filtering replay sessions by status."""
# Test different status filters
for status in [
- ReplayStatus.CREATED.value,
- ReplayStatus.RUNNING.value,
- ReplayStatus.COMPLETED.value,
- ReplayStatus.FAILED.value,
- ReplayStatus.CANCELLED.value,
+ ReplayStatus.CREATED,
+ ReplayStatus.RUNNING,
+ ReplayStatus.COMPLETED,
+ ReplayStatus.FAILED,
+ ReplayStatus.CANCELLED,
]:
response = await test_admin.get(f"/api/v1/replay/sessions?status={status}&limit=5")
assert response.status_code in [200, 404]
diff --git a/backend/tests/integration/test_saga_routes.py b/backend/tests/integration/test_saga_routes.py
index b084dc90..cc015115 100644
--- a/backend/tests/integration/test_saga_routes.py
+++ b/backend/tests/integration/test_saga_routes.py
@@ -55,7 +55,7 @@ async def test_get_execution_sagas_with_state_filter(self, test_user: AsyncClien
execution_id = str(uuid.uuid4())
response = await test_user.get(
f"/api/v1/sagas/execution/{execution_id}",
- params={"state": SagaState.RUNNING.value}
+ params={"state": SagaState.RUNNING}
)
# Access denied for non-owned execution is valid
assert response.status_code in [200, 403]
@@ -91,7 +91,7 @@ async def test_list_sagas_with_state_filter(self, test_user: AsyncClient) -> Non
# List completed sagas
response = await test_user.get(
"/api/v1/sagas/",
- params={"state": SagaState.COMPLETED.value, "limit": 5}
+ params={"state": SagaState.COMPLETED, "limit": 5}
)
assert response.status_code == 200
@@ -182,7 +182,7 @@ async def test_get_saga_with_details(self, test_user: AsyncClient) -> None:
if response.status_code == 200:
saga_status = SagaStatusResponse(**response.json())
assert saga_status.saga_id == saga_id
- assert saga_status.state in [s.value for s in SagaState]
+ assert saga_status.state in list(SagaState)
@pytest.mark.asyncio
async def test_list_sagas_with_offset(self, test_user: AsyncClient) -> None:
@@ -216,7 +216,7 @@ async def test_cancel_saga_invalid_state(self, test_user: AsyncClient) -> None:
# Try to find a completed saga to cancel
response = await test_user.get(
"/api/v1/sagas/",
- params={"state": SagaState.COMPLETED.value, "limit": 1}
+ params={"state": SagaState.COMPLETED, "limit": 1}
)
assert response.status_code == 200
saga_list = SagaListResponse(**response.json())
@@ -238,7 +238,7 @@ async def test_get_execution_sagas_multiple_states(self, test_user: AsyncClient)
SagaState.FAILED, SagaState.CANCELLED]:
response = await test_user.get(
f"/api/v1/sagas/execution/{execution_id}",
- params={"state": state.value}
+ params={"state": state}
)
assert response.status_code in [200, 403]
if response.status_code == 403:
diff --git a/backend/tests/integration/test_sse_routes.py b/backend/tests/integration/test_sse_routes.py
index e7c2ff8f..9339219e 100644
--- a/backend/tests/integration/test_sse_routes.py
+++ b/backend/tests/integration/test_sse_routes.py
@@ -1,14 +1,22 @@
-import asyncio
+"""SSE integration tests - precise verification of Redis pub/sub and stream behavior."""
+
import json
from contextlib import aclosing
+from datetime import datetime, timezone
from typing import Any
from uuid import uuid4
import pytest
+from app.domain.enums.events import EventType
from app.domain.enums.notification import NotificationSeverity, NotificationStatus
+from app.domain.enums.sse import SSEControlEvent, SSENotificationEvent
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
from app.infrastructure.kafka.events.pod import PodCreatedEvent
-from app.schemas_pydantic.sse import RedisNotificationMessage, SSEHealthResponse
+from app.schemas_pydantic.sse import (
+ RedisNotificationMessage,
+ RedisSSEMessage,
+ SSEHealthResponse,
+)
from app.services.sse.redis_bus import SSERedisBus
from app.services.sse.sse_service import SSEService
from dishka import AsyncContainer
@@ -16,153 +24,246 @@
@pytest.mark.integration
-class TestSSERoutes:
- """SSE routes tested with deterministic event-driven reads (no polling)."""
+class TestSSEAuth:
+ """SSE endpoints require authentication."""
+
+ @pytest.mark.asyncio
+ async def test_notification_stream_requires_auth(self, client: AsyncClient) -> None:
+ assert (await client.get("/api/v1/events/notifications/stream")).status_code == 401
+
+ @pytest.mark.asyncio
+ async def test_execution_stream_requires_auth(self, client: AsyncClient) -> None:
+ assert (await client.get(f"/api/v1/events/executions/{uuid4()}")).status_code == 401
@pytest.mark.asyncio
- async def test_sse_requires_authentication(self, client: AsyncClient) -> None:
- r = await client.get("/api/v1/events/notifications/stream")
- assert r.status_code == 401
- detail = r.json().get("detail", "").lower()
- assert any(x in detail for x in ("not authenticated", "unauthorized", "login"))
+ async def test_health_requires_auth(self, client: AsyncClient) -> None:
+ assert (await client.get("/api/v1/events/health")).status_code == 401
- exec_id = str(uuid4())
- r = await client.get(f"/api/v1/events/executions/{exec_id}")
- assert r.status_code == 401
- r = await client.get("/api/v1/events/health")
- assert r.status_code == 401
+@pytest.mark.integration
+class TestSSEHealth:
+ """SSE health endpoint."""
@pytest.mark.asyncio
- async def test_sse_health_status(self, test_user: AsyncClient) -> None:
+ async def test_returns_valid_health_status(self, test_user: AsyncClient) -> None:
r = await test_user.get("/api/v1/events/health")
assert r.status_code == 200
- health = SSEHealthResponse(**r.json())
+ health = SSEHealthResponse.model_validate(r.json())
assert health.status in ("healthy", "degraded", "unhealthy", "draining")
- assert isinstance(health.active_connections, int) and health.active_connections >= 0
+ assert health.active_connections >= 0
+
+
+@pytest.mark.integration
+class TestRedisPubSubExecution:
+ """Redis pub/sub for execution events - verifies message structure and delivery."""
@pytest.mark.asyncio
- async def test_notification_stream_service(self, scope: AsyncContainer, test_user: AsyncClient) -> None:
- """Test SSE notification stream directly through service (httpx doesn't support SSE streaming)."""
- sse_service: SSEService = await scope.get(SSEService)
+ async def test_publish_wraps_event_in_redis_message(self, scope: AsyncContainer) -> None:
+ """publish_event wraps BaseEvent in RedisSSEMessage with correct structure."""
bus: SSERedisBus = await scope.get(SSERedisBus)
- user_id = f"user-{uuid4().hex[:8]}"
+ exec_id = f"exec-{uuid4().hex[:8]}"
- events: list[dict[str, Any]] = []
- notification_received = False
+ subscription = await bus.open_subscription(exec_id)
- async with aclosing(sse_service.create_notification_stream(user_id)) as stream:
- try:
- async with asyncio.timeout(3.0):
- async for event in stream:
- if "data" not in event:
- continue
- data = json.loads(event["data"])
- events.append(data)
-
- # Wait for "subscribed" event - Redis subscription is now ready
- if data.get("event_type") == "subscribed":
- notification = RedisNotificationMessage(
- notification_id=f"notif-{uuid4().hex[:8]}",
- severity=NotificationSeverity.MEDIUM,
- status=NotificationStatus.PENDING,
- tags=[],
- subject="Hello",
- body="World",
- action_url="",
- created_at="2024-01-01T00:00:00Z",
- )
- await bus.publish_notification(user_id, notification)
-
- # Stop when we receive the notification
- if data.get("event_type") == "notification" and data.get("subject") == "Hello":
- notification_received = True
- break
- except TimeoutError:
- pass
-
- assert notification_received, f"Expected notification, got events: {events}"
+ event = PodCreatedEvent(
+ execution_id=exec_id,
+ pod_name="test-pod",
+ namespace="test-ns",
+ metadata=AvroEventMetadata(service_name="test", service_version="1.0"),
+ )
+ await bus.publish_event(exec_id, event)
+
+ # Verify the wrapper structure
+ received: RedisSSEMessage | None = await subscription.get(RedisSSEMessage)
+ await subscription.close()
+
+ assert received is not None
+ assert received.event_type == EventType.POD_CREATED
+ assert received.execution_id == exec_id
+ assert received.data["pod_name"] == "test-pod"
+ assert received.data["namespace"] == "test-ns"
@pytest.mark.asyncio
- async def test_execution_event_stream_service(self, scope: AsyncContainer, test_user: AsyncClient) -> None:
- """Test SSE execution stream directly through service (httpx doesn't support SSE streaming)."""
- exec_id = f"e-{uuid4().hex[:8]}"
- user_id = f"user-{uuid4().hex[:8]}"
+ async def test_channel_isolation(self, scope: AsyncContainer) -> None:
+ """Different execution_ids use isolated channels."""
+ bus: SSERedisBus = await scope.get(SSERedisBus)
+ exec_a, exec_b = f"exec-a-{uuid4().hex[:6]}", f"exec-b-{uuid4().hex[:6]}"
- sse_service: SSEService = await scope.get(SSEService)
+ sub_a = await bus.open_subscription(exec_a)
+ sub_b = await bus.open_subscription(exec_b)
+
+ event = PodCreatedEvent(
+ execution_id=exec_a,
+ pod_name="pod-a",
+ namespace="default",
+ metadata=AvroEventMetadata(service_name="test", service_version="1"),
+ )
+ await bus.publish_event(exec_a, event)
+
+ received_a = await sub_a.get(RedisSSEMessage)
+ received_b = await sub_b.get(RedisSSEMessage)
+
+ await sub_a.close()
+ await sub_b.close()
+
+ assert received_a is not None
+ assert received_a.data["pod_name"] == "pod-a"
+ assert received_b is None # B should not receive A's message
+
+
+@pytest.mark.integration
+class TestRedisPubSubNotification:
+ """Redis pub/sub for notifications - verifies message structure and delivery."""
+
+ @pytest.mark.asyncio
+ async def test_publish_sends_notification_directly(self, scope: AsyncContainer) -> None:
+ """publish_notification sends RedisNotificationMessage JSON directly."""
bus: SSERedisBus = await scope.get(SSERedisBus)
+ user_id = f"user-{uuid4().hex[:8]}"
- events: list[dict[str, Any]] = []
- pod_event_received = False
+ subscription = await bus.open_notification_subscription(user_id)
- async with aclosing(sse_service.create_execution_stream(exec_id, user_id)) as stream:
- try:
- async with asyncio.timeout(3.0):
- async for event in stream:
- if "data" not in event:
- continue
- data = json.loads(event["data"])
- events.append(data)
-
- # Wait for "subscribed" event - Redis subscription is now ready
- if data.get("event_type") == "subscribed":
- ev = PodCreatedEvent(
- execution_id=exec_id,
- pod_name=f"executor-{exec_id}",
- namespace="default",
- metadata=AvroEventMetadata(service_name="tests", service_version="1"),
- )
- await bus.publish_event(exec_id, ev)
-
- # Stop when we receive the pod event
- if data.get("event_type") == "pod_created":
- pod_event_received = True
- break
- except TimeoutError:
- pass
-
- assert pod_event_received, f"Expected pod_created event, got events: {events}"
+ notification = RedisNotificationMessage(
+ notification_id="notif-123",
+ severity=NotificationSeverity.HIGH,
+ status=NotificationStatus.PENDING,
+ tags=["urgent", "system"],
+ subject="Test Alert",
+ body="This is a test notification",
+ action_url="https://example.com/action",
+ created_at=datetime(2024, 6, 15, 12, 0, 0, tzinfo=timezone.utc),
+ )
+ await bus.publish_notification(user_id, notification)
+
+ received: RedisNotificationMessage | None = await subscription.get(RedisNotificationMessage)
+ await subscription.close()
+
+ assert received is not None
+ assert received.notification_id == "notif-123"
+ assert received.severity == NotificationSeverity.HIGH
+ assert received.status == NotificationStatus.PENDING
+ assert received.tags == ["urgent", "system"]
+ assert received.subject == "Test Alert"
+ assert received.body == "This is a test notification"
+ assert received.action_url == "https://example.com/action"
@pytest.mark.asyncio
- async def test_sse_route_requires_auth(self, client: AsyncClient) -> None:
- """Test that SSE routes require authentication (HTTP-level test only)."""
- r = await client.get("/api/v1/events/notifications/stream")
- assert r.status_code == 401
+ async def test_user_channel_isolation(self, scope: AsyncContainer) -> None:
+ """Different user_ids use isolated channels."""
+ bus: SSERedisBus = await scope.get(SSERedisBus)
+ user_a, user_b = f"user-a-{uuid4().hex[:6]}", f"user-b-{uuid4().hex[:6]}"
+
+ sub_a = await bus.open_notification_subscription(user_a)
+ sub_b = await bus.open_notification_subscription(user_b)
+
+ notification = RedisNotificationMessage(
+ notification_id="for-user-a",
+ severity=NotificationSeverity.LOW,
+ status=NotificationStatus.PENDING,
+ tags=[],
+ subject="Private",
+ body="For user A only",
+ action_url="",
+ created_at=datetime.now(timezone.utc),
+ )
+ await bus.publish_notification(user_a, notification)
+
+ received_a = await sub_a.get(RedisNotificationMessage)
+ received_b = await sub_b.get(RedisNotificationMessage)
- exec_id = str(uuid4())
- r = await client.get(f"/api/v1/events/executions/{exec_id}")
- assert r.status_code == 401
+ await sub_a.close()
+ await sub_b.close()
+
+ assert received_a is not None
+ assert received_a.notification_id == "for-user-a"
+ assert received_b is None # B should not receive A's notification
+
+
+@pytest.mark.integration
+class TestSSEStreamEvents:
+ """SSE stream control events - verifies event structure without pub/sub."""
@pytest.mark.asyncio
- async def test_sse_endpoint_returns_correct_headers(self, test_user: AsyncClient) -> None:
- """Test SSE health endpoint works (streaming tests done via service)."""
- r = await test_user.get("/api/v1/events/health")
- assert r.status_code == 200
- assert isinstance(r.json(), dict)
+ async def test_notification_stream_yields_connected_then_subscribed(self, scope: AsyncContainer) -> None:
+ """Notification stream yields CONNECTED and SUBSCRIBED with correct fields."""
+ sse_service: SSEService = await scope.get(SSEService)
+ user_id = f"user-{uuid4().hex[:8]}"
+
+ events: list[dict[str, Any]] = []
+ async with aclosing(sse_service.create_notification_stream(user_id)) as stream:
+ async for raw in stream:
+ if "data" in raw:
+ events.append(json.loads(raw["data"]))
+ if len(events) >= 2:
+ break
+
+ # Verify CONNECTED event structure
+ connected = events[0]
+ assert connected["event_type"] == SSENotificationEvent.CONNECTED
+ assert connected["user_id"] == user_id
+ assert "timestamp" in connected
+ assert connected["message"] == "Connected to notification stream"
+
+ # Verify SUBSCRIBED event structure
+ subscribed = events[1]
+ assert subscribed["event_type"] == SSENotificationEvent.SUBSCRIBED
+ assert subscribed["user_id"] == user_id
+ assert "timestamp" in subscribed
+ assert subscribed["message"] == "Redis subscription established"
@pytest.mark.asyncio
- async def test_multiple_concurrent_sse_service_connections(
- self, scope: AsyncContainer, test_user: AsyncClient
- ) -> None:
- """Test multiple concurrent SSE connections through the service."""
+ async def test_execution_stream_yields_connected_then_subscribed(self, scope: AsyncContainer) -> None:
+ """Execution stream yields CONNECTED and SUBSCRIBED with correct fields."""
sse_service: SSEService = await scope.get(SSEService)
+ exec_id = f"exec-{uuid4().hex[:8]}"
+ user_id = f"user-{uuid4().hex[:8]}"
- async def create_and_verify_stream(user_id: str) -> bool:
- async with aclosing(sse_service.create_notification_stream(user_id)) as stream:
- async for event in stream:
- if "data" in event:
- data = json.loads(event["data"])
- if data.get("event_type") == "connected":
- return True
+ events: list[dict[str, Any]] = []
+ async with aclosing(sse_service.create_execution_stream(exec_id, user_id)) as stream:
+ async for raw in stream:
+ if "data" in raw:
+ events.append(json.loads(raw["data"]))
+ if len(events) >= 2:
break
- return False
- results = await asyncio.gather(
- create_and_verify_stream("user1"),
- create_and_verify_stream("user2"),
- create_and_verify_stream("user3"),
- return_exceptions=True,
+ # Verify CONNECTED event structure
+ connected = events[0]
+ assert connected["event_type"] == SSEControlEvent.CONNECTED
+ assert connected["execution_id"] == exec_id
+ assert "connection_id" in connected
+ assert connected["connection_id"].startswith(f"sse_{exec_id}_")
+ assert "timestamp" in connected
+
+ # Verify SUBSCRIBED event structure
+ subscribed = events[1]
+ assert subscribed["event_type"] == SSEControlEvent.SUBSCRIBED
+ assert subscribed["execution_id"] == exec_id
+ assert "timestamp" in subscribed
+ assert subscribed["message"] == "Redis subscription established"
+
+ @pytest.mark.asyncio
+ async def test_concurrent_streams_get_unique_connection_ids(self, scope: AsyncContainer) -> None:
+ """Each stream connection gets a unique connection_id."""
+ import asyncio
+
+ sse_service: SSEService = await scope.get(SSEService)
+ exec_id = f"exec-{uuid4().hex[:8]}"
+
+ async def get_connection_id(user_id: str) -> str:
+ async with aclosing(sse_service.create_execution_stream(exec_id, user_id)) as stream:
+ async for raw in stream:
+ if "data" in raw:
+ data = json.loads(raw["data"])
+ if data.get("event_type") == SSEControlEvent.CONNECTED:
+ return str(data["connection_id"])
+ return ""
+
+ conn_ids = await asyncio.gather(
+ get_connection_id("user-1"),
+ get_connection_id("user-2"),
+ get_connection_id("user-3"),
)
- successful = sum(1 for r in results if r is True)
- assert successful >= 2
+ # All connection IDs should be unique
+ assert len(set(conn_ids)) == 3
+ assert all(cid.startswith(f"sse_{exec_id}_") for cid in conn_ids)
diff --git a/backend/tests/integration/test_user_settings_routes.py b/backend/tests/integration/test_user_settings_routes.py
index a780cd25..024e01ce 100644
--- a/backend/tests/integration/test_user_settings_routes.py
+++ b/backend/tests/integration/test_user_settings_routes.py
@@ -4,6 +4,7 @@
import pytest
from app.schemas_pydantic.user_settings import SettingsHistoryResponse, UserSettings
from httpx import AsyncClient
+
from tests.helpers.eventually import eventually
diff --git a/backend/tests/load/cli.py b/backend/tests/load/cli.py
index 807f5777..b6228859 100644
--- a/backend/tests/load/cli.py
+++ b/backend/tests/load/cli.py
@@ -18,7 +18,8 @@ async def _run(cfg: LoadConfig) -> int:
# Brief run configuration summary to stdout for easier troubleshooting
print(
f"Load config: base_url={cfg.base_url} api_prefix={cfg.api_prefix} "
- f"mode={cfg.mode} clients={cfg.clients} concurrency={cfg.concurrency} duration={cfg.duration_seconds}s verify_tls={cfg.verify_tls}"
+ f"mode={cfg.mode} clients={cfg.clients} concurrency={cfg.concurrency} "
+ f"duration={cfg.duration_seconds}s verify_tls={cfg.verify_tls}"
)
# Quick preflight to catch prefix/port mistakes early
pre_stats = StatsCollector()
@@ -46,7 +47,10 @@ async def _run(cfg: LoadConfig) -> int:
stats.save(stats_path)
# Print concise summary
summary = stats.finalize()
- print(f"Load run complete: mode={cfg.mode} requests={summary['total_requests']} errors={summary['total_errors']} runtime={summary['runtime_seconds']}s")
+ print(
+ f"Load run complete: mode={cfg.mode} requests={summary['total_requests']} "
+ f"errors={summary['total_errors']} runtime={summary['runtime_seconds']}s"
+ )
print(f"Report saved to: {stats_path}")
# Optional plots
if getattr(cfg, "generate_plots", False):
@@ -86,7 +90,7 @@ def main(argv: list[str] | None = None) -> int:
cfg.duration_seconds = args.duration
# Pass plots flag through cfg (without changing dataclass fields)
- setattr(cfg, "generate_plots", bool(args.plots))
+ cfg.generate_plots = bool(args.plots)
return asyncio.run(_run(cfg))
diff --git a/backend/tests/load/config.py b/backend/tests/load/config.py
index 1f29fc42..b82189fe 100644
--- a/backend/tests/load/config.py
+++ b/backend/tests/load/config.py
@@ -5,7 +5,6 @@
from pydantic import Field
from pydantic_settings import BaseSettings, SettingsConfigDict
-
Mode = Literal["monkey", "user", "both"]
diff --git a/backend/tests/load/monkey_runner.py b/backend/tests/load/monkey_runner.py
index 21e14e97..e44d9586 100644
--- a/backend/tests/load/monkey_runner.py
+++ b/backend/tests/load/monkey_runner.py
@@ -3,8 +3,9 @@
import asyncio
import json
import random
-import string
import secrets
+import string
+import time
from typing import Any
from .config import LoadConfig
@@ -63,9 +64,6 @@ def build_monkey_catalog(cfg: LoadConfig) -> list[tuple[str, str]]:
return out
-import time
-
-
async def run_monkey_swarm(cfg: LoadConfig, stats: StatsCollector, clients: int) -> None:
catalog = build_monkey_catalog(cfg)
sem = asyncio.Semaphore(cfg.concurrency)
diff --git a/backend/tests/load/strategies.py b/backend/tests/load/strategies.py
index ba7e34a6..da6af712 100644
--- a/backend/tests/load/strategies.py
+++ b/backend/tests/load/strategies.py
@@ -4,7 +4,6 @@
from hypothesis import strategies as st
-
# Type alias for JSON values
type JsonValue = None | bool | int | float | str | list[JsonValue] | dict[str, JsonValue]
diff --git a/backend/tests/load/user_runner.py b/backend/tests/load/user_runner.py
index a3780404..8c619023 100644
--- a/backend/tests/load/user_runner.py
+++ b/backend/tests/load/user_runner.py
@@ -2,6 +2,7 @@
import asyncio
import random
+import time
from collections.abc import Awaitable
from dataclasses import dataclass
from typing import Callable
@@ -82,9 +83,6 @@ async def _flow_settings_and_notifications(c: APIClient) -> None:
await c.mark_all_read()
-import time
-
-
async def run_user_swarm(cfg: LoadConfig, stats: StatsCollector, clients: int) -> None:
tasks: list[asyncio.Task[None]] = []
sem = asyncio.Semaphore(cfg.concurrency)
diff --git a/backend/tests/unit/conftest.py b/backend/tests/unit/conftest.py
index 517ae021..ea7bab9f 100644
--- a/backend/tests/unit/conftest.py
+++ b/backend/tests/unit/conftest.py
@@ -1,14 +1,8 @@
-"""Unit test configuration.
-
-Unit tests should NOT access real infrastructure (DB, Redis, HTTP).
-These fixtures raise errors to catch accidental usage.
-"""
import logging
from collections.abc import Generator
from typing import NoReturn
import pytest
-
from app.core.metrics.connections import ConnectionMetrics
from app.core.metrics.context import MetricsContext
from app.core.metrics.coordinator import CoordinatorMetrics
diff --git a/backend/tests/unit/core/metrics/test_base_metrics.py b/backend/tests/unit/core/metrics/test_base_metrics.py
index 24a36601..ba4cdfde 100644
--- a/backend/tests/unit/core/metrics/test_base_metrics.py
+++ b/backend/tests/unit/core/metrics/test_base_metrics.py
@@ -1,9 +1,7 @@
import pytest
-
from app.core.metrics.base import BaseMetrics
from app.settings import Settings
-
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py b/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py
index 0610913d..fab6f368 100644
--- a/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py
+++ b/backend/tests/unit/core/metrics/test_connections_and_coordinator_metrics.py
@@ -1,5 +1,4 @@
import pytest
-
from app.core.metrics.connections import ConnectionMetrics
from app.core.metrics.coordinator import CoordinatorMetrics
from app.settings import Settings
diff --git a/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py b/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py
index d0be021d..691d05aa 100644
--- a/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py
+++ b/backend/tests/unit/core/metrics/test_database_and_dlq_metrics.py
@@ -1,5 +1,4 @@
import pytest
-
from app.core.metrics.database import DatabaseMetrics
from app.core.metrics.dlq import DLQMetrics
from app.settings import Settings
diff --git a/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py b/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py
index 7fce126b..2eda95a8 100644
--- a/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py
+++ b/backend/tests/unit/core/metrics/test_execution_and_events_metrics.py
@@ -1,11 +1,9 @@
import pytest
-
-from app.core.metrics.execution import ExecutionMetrics
from app.core.metrics.events import EventMetrics
+from app.core.metrics.execution import ExecutionMetrics
from app.domain.enums.execution import ExecutionStatus
from app.settings import Settings
-
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py b/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py
index 710ce31d..e22a3bff 100644
--- a/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py
+++ b/backend/tests/unit/core/metrics/test_health_and_rate_limit_metrics.py
@@ -1,5 +1,4 @@
import pytest
-
from app.core.metrics.health import HealthMetrics
from app.settings import Settings
diff --git a/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py b/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py
index dda78599..061eed0e 100644
--- a/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py
+++ b/backend/tests/unit/core/metrics/test_kubernetes_and_notifications_metrics.py
@@ -1,10 +1,8 @@
import pytest
-
from app.core.metrics.kubernetes import KubernetesMetrics
from app.core.metrics.notifications import NotificationMetrics
from app.settings import Settings
-
pytestmark = pytest.mark.unit
@@ -46,14 +44,21 @@ def test_notification_metrics_methods(test_settings: Settings) -> None:
m.record_notification_status_change("n1", "pending", "queued")
m.record_notification_read("welcome", 2.0)
m.record_notification_clicked("welcome")
- m.update_unread_count("u1", 5); m.update_unread_count("u1", 2)
- m.record_notification_throttled("welcome", "u1"); m.record_throttle_window_hit("u1")
- m.record_notification_retry("welcome", 1, False); m.record_notification_retry("welcome", 2, True)
+ m.update_unread_count("u1", 5)
+ m.update_unread_count("u1", 2)
+ m.record_notification_throttled("welcome", "u1")
+ m.record_throttle_window_hit("u1")
+ m.record_notification_retry("welcome", 1, False)
+ m.record_notification_retry("welcome", 2, True)
m.record_batch_processed(10, 1.2, notification_type="welcome")
- m.record_template_render(0.2, "tmpl", success=True); m.record_template_render(0.1, "tmpl", success=False)
+ m.record_template_render(0.2, "tmpl", success=True)
+ m.record_template_render(0.1, "tmpl", success=False)
m.record_webhook_delivery(0.3, 200, "/hooks/*")
m.record_slack_delivery(0.4, "#general", False, error_type="rate_limited")
- m.update_active_subscriptions("u1", 3); m.update_active_subscriptions("u1", 1)
+ m.update_active_subscriptions("u1", 3)
+ m.update_active_subscriptions("u1", 1)
m.record_subscription_change("u1", "welcome", "subscribe")
- m.increment_pending_notifications(); m.decrement_pending_notifications()
- m.increment_queued_notifications(); m.decrement_queued_notifications()
+ m.increment_pending_notifications()
+ m.decrement_pending_notifications()
+ m.increment_queued_notifications()
+ m.decrement_queued_notifications()
diff --git a/backend/tests/unit/core/metrics/test_metrics_classes.py b/backend/tests/unit/core/metrics/test_metrics_classes.py
index fc620462..542a4a6a 100644
--- a/backend/tests/unit/core/metrics/test_metrics_classes.py
+++ b/backend/tests/unit/core/metrics/test_metrics_classes.py
@@ -1,7 +1,5 @@
import pytest
-
from app.core.metrics.connections import ConnectionMetrics
-from app.domain.enums.execution import ExecutionStatus
from app.core.metrics.coordinator import CoordinatorMetrics
from app.core.metrics.database import DatabaseMetrics
from app.core.metrics.dlq import DLQMetrics
@@ -13,6 +11,7 @@
from app.core.metrics.rate_limit import RateLimitMetrics
from app.core.metrics.replay import ReplayMetrics
from app.core.metrics.security import SecurityMetrics
+from app.domain.enums.execution import ExecutionStatus
from app.settings import Settings
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/core/metrics/test_metrics_context.py b/backend/tests/unit/core/metrics/test_metrics_context.py
index c5cf6e50..5f24a999 100644
--- a/backend/tests/unit/core/metrics/test_metrics_context.py
+++ b/backend/tests/unit/core/metrics/test_metrics_context.py
@@ -1,7 +1,6 @@
import logging
import pytest
-
from app.core.metrics.context import (
get_connection_metrics,
get_coordinator_metrics,
diff --git a/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py b/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py
index 5c3cf4f1..09462600 100644
--- a/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py
+++ b/backend/tests/unit/core/metrics/test_replay_and_security_metrics.py
@@ -1,10 +1,8 @@
import pytest
-
from app.core.metrics.replay import ReplayMetrics
from app.core.metrics.security import SecurityMetrics
from app.settings import Settings
-
pytestmark = pytest.mark.unit
@@ -40,17 +38,26 @@ def test_security_metrics_methods(test_settings: Settings) -> None:
m.record_security_event("scan_started", severity="high", source="scanner")
m.record_security_violation("csrf", user_id="u1", ip_address="127.0.0.1")
m.record_authentication_attempt("password", False, user_id="u1", duration_seconds=0.2)
- m.update_active_sessions(2); m.increment_active_sessions(); m.decrement_active_sessions()
- m.record_token_generated("access", 3600); m.record_token_refreshed("access"); m.record_token_revoked("access", "logout")
+ m.update_active_sessions(2)
+ m.increment_active_sessions()
+ m.decrement_active_sessions()
+ m.record_token_generated("access", 3600)
+ m.record_token_refreshed("access")
+ m.record_token_revoked("access", "logout")
m.record_token_validation_failure("access", "expired")
m.record_authorization_check("/admin", "GET", False, user_role="user")
m.record_permission_check("write", True, user_id="u1")
- m.record_csrf_token_generated(); m.record_csrf_validation_failure("missing")
+ m.record_csrf_token_generated()
+ m.record_csrf_validation_failure("missing")
m.record_network_policy_violation("np1", "pod1", violation_type="egress")
m.record_privilege_escalation_attempt("u1", "admin", True)
- m.record_rate_limit_hit("/api"); m.record_rate_limit_violation("/api", limit=100)
- m.record_api_key_created("kid"); m.record_api_key_revoked("kid", "compromised"); m.record_api_key_usage("kid", "/api")
- m.record_audit_event("config_change", "u1", resource="system"); m.record_password_change("u1", True)
+ m.record_rate_limit_hit("/api")
+ m.record_rate_limit_violation("/api", limit=100)
+ m.record_api_key_created("kid")
+ m.record_api_key_revoked("kid", "compromised")
+ m.record_api_key_usage("kid", "/api")
+ m.record_audit_event("config_change", "u1", resource="system")
+ m.record_password_change("u1", True)
m.record_password_reset_request("u1", method="email")
m.record_weak_password_attempt("u1", "common_password")
m.record_brute_force_attempt("1.2.3.4", target_user="u1", action_taken="blocked")
diff --git a/backend/tests/unit/core/test_adaptive_sampling.py b/backend/tests/unit/core/test_adaptive_sampling.py
index 9250822c..c3c8d65d 100644
--- a/backend/tests/unit/core/test_adaptive_sampling.py
+++ b/backend/tests/unit/core/test_adaptive_sampling.py
@@ -2,7 +2,6 @@
from unittest.mock import MagicMock, patch
import pytest
-
from app.core.adaptive_sampling import AdaptiveSampler, create_adaptive_sampler
from app.settings import Settings
diff --git a/backend/tests/unit/core/test_csrf.py b/backend/tests/unit/core/test_csrf.py
index 4c303cfd..eb5e3816 100644
--- a/backend/tests/unit/core/test_csrf.py
+++ b/backend/tests/unit/core/test_csrf.py
@@ -1,5 +1,6 @@
import pytest
from app.core.security import SecurityService
+from app.domain.user.exceptions import CSRFValidationError
from app.settings import Settings
from starlette.requests import Request
@@ -28,7 +29,7 @@ def test_csrf_skips_on_get(test_settings: Settings) -> None:
def test_csrf_missing_header_raises_when_authenticated(test_settings: Settings) -> None:
security_service = SecurityService(test_settings)
req = make_request("POST", "/api/v1/items", cookies={"access_token": "tok", "csrf_token": "abc"})
- with pytest.raises(Exception):
+ with pytest.raises(CSRFValidationError):
security_service.validate_csrf_from_request(req)
diff --git a/backend/tests/unit/core/test_security.py b/backend/tests/unit/core/test_security.py
index cb0ed703..5a11f5d4 100644
--- a/backend/tests/unit/core/test_security.py
+++ b/backend/tests/unit/core/test_security.py
@@ -4,11 +4,10 @@
import jwt
import pytest
-from jwt.exceptions import InvalidTokenError
-
from app.core.security import SecurityService
from app.domain.enums.user import UserRole
from app.settings import Settings
+from jwt.exceptions import InvalidTokenError
class TestPasswordHashing:
@@ -131,7 +130,7 @@ def test_create_access_token_with_roles(
data = {
"sub": "admin_user",
"user_id": user_id,
- "role": UserRole.ADMIN.value
+ "role": UserRole.ADMIN
}
expires_delta = timedelta(minutes=security_service.settings.ACCESS_TOKEN_EXPIRE_MINUTES)
@@ -143,11 +142,11 @@ def test_create_access_token_with_roles(
algorithms=[security_service.settings.ALGORITHM]
)
- assert decoded["role"] == UserRole.ADMIN.value
+ assert decoded["role"] == UserRole.ADMIN
assert decoded["user_id"] == user_id
def test_token_contains_expected_claims(self, security_service: SecurityService) -> None:
- data = {"sub": "testuser", "user_id": str(uuid4()), "role": UserRole.USER.value}
+ data = {"sub": "testuser", "user_id": str(uuid4()), "role": UserRole.USER}
token = security_service.create_access_token(
data, expires_delta=timedelta(minutes=security_service.settings.ACCESS_TOKEN_EXPIRE_MINUTES)
)
@@ -156,7 +155,7 @@ def test_token_contains_expected_claims(self, security_service: SecurityService)
)
assert decoded["sub"] == "testuser"
assert decoded["user_id"] == data["user_id"]
- assert decoded["role"] == UserRole.USER.value
+ assert decoded["role"] == UserRole.USER
def test_decode_token_expired(
self,
@@ -271,7 +270,7 @@ async def create_token(username: str) -> str:
def test_token_has_only_expected_claims(self, security_service: SecurityService) -> None:
user_id = str(uuid4())
- data = {"sub": "testuser", "user_id": user_id, "role": UserRole.USER.value, "extra_field": "x"}
+ data = {"sub": "testuser", "user_id": user_id, "role": UserRole.USER, "extra_field": "x"}
token = security_service.create_access_token(
data, expires_delta=timedelta(minutes=security_service.settings.ACCESS_TOKEN_EXPIRE_MINUTES)
)
@@ -280,7 +279,7 @@ def test_token_has_only_expected_claims(self, security_service: SecurityService)
)
assert decoded["sub"] == "testuser"
assert decoded["user_id"] == user_id
- assert decoded["role"] == UserRole.USER.value
+ assert decoded["role"] == UserRole.USER
assert "extra_field" in decoded # Claims are carried as provided
def test_password_context_configuration(self, test_settings: Settings) -> None:
diff --git a/backend/tests/unit/core/test_utils.py b/backend/tests/unit/core/test_utils.py
index ee386718..feefc04d 100644
--- a/backend/tests/unit/core/test_utils.py
+++ b/backend/tests/unit/core/test_utils.py
@@ -1,6 +1,5 @@
-from starlette.requests import Request
-
from app.core.utils import StringEnum, get_client_ip
+from starlette.requests import Request
class E(StringEnum):
diff --git a/backend/tests/unit/domain/events/test_event_schema_coverage.py b/backend/tests/unit/domain/events/test_event_schema_coverage.py
new file mode 100644
index 00000000..46a23a98
--- /dev/null
+++ b/backend/tests/unit/domain/events/test_event_schema_coverage.py
@@ -0,0 +1,186 @@
+"""
+Validates complete correspondence between EventType enum and event classes.
+
+This test ensures that:
+1. Every EventType has a corresponding domain event class (in DomainEvent union)
+2. Every EventType has a corresponding Kafka event class (BaseEvent subclass)
+3. No orphan event classes exist (classes without matching EventType)
+
+Run this test to catch missing event implementations early.
+"""
+
+from typing import get_args
+
+from app.domain.enums.events import EventType
+from app.domain.events.typed import BaseEvent as DomainBaseEvent
+from app.domain.events.typed import DomainEvent, domain_event_adapter
+from app.events.schema.schema_registry import _get_event_type_to_class_mapping
+from app.infrastructure.kafka.events.base import BaseEvent as KafkaBaseEvent
+
+
+def get_domain_event_classes() -> dict[EventType, type]:
+ """Extract EventType -> class mapping from DomainEvent union."""
+ mapping: dict[EventType, type] = {}
+ union_types = get_args(DomainEvent)
+ # First element is the actual union, need to get its args
+ if union_types:
+ inner = union_types[0]
+ if hasattr(inner, "__args__"):
+ event_classes = inner.__args__
+ else:
+ # Python 3.10+ union syntax
+ event_classes = get_args(inner) or [inner]
+ if not event_classes:
+ event_classes = list(union_types[:-1]) # Exclude Discriminator
+ else:
+ event_classes = []
+
+ # Fallback: iterate through all DomainBaseEvent subclasses
+ if not event_classes:
+ event_classes = []
+ for cls in DomainBaseEvent.__subclasses__():
+ if hasattr(cls, "model_fields") and "event_type" in cls.model_fields:
+ event_classes.append(cls)
+
+ for cls in event_classes:
+ if hasattr(cls, "model_fields") and "event_type" in cls.model_fields:
+ field = cls.model_fields["event_type"]
+ if field.default is not None:
+ mapping[field.default] = cls
+
+ return mapping
+
+
+def get_kafka_event_classes() -> dict[EventType, type]:
+ """Extract EventType -> class mapping from Kafka BaseEvent subclasses."""
+ return _get_event_type_to_class_mapping()
+
+
+class TestEventSchemaCoverage:
+ """Ensure complete correspondence between EventType and event classes."""
+
+ def test_all_event_types_have_domain_event_class(self) -> None:
+ """Every EventType must have a corresponding domain event class."""
+ domain_mapping = get_domain_event_classes()
+ all_types = set(EventType)
+ covered_types = set(domain_mapping.keys())
+ missing = all_types - covered_types
+
+ assert not missing, (
+ f"Missing domain event classes for {len(missing)} EventType(s):\n"
+ + "\n".join(f" - {et.value}: needs a class in typed.py" for et in sorted(missing, key=lambda x: x.value))
+ )
+
+ def test_all_event_types_have_kafka_event_class(self) -> None:
+ """Every EventType must have a corresponding Kafka event class."""
+ kafka_mapping = get_kafka_event_classes()
+ all_types = set(EventType)
+ covered_types = set(kafka_mapping.keys())
+ missing = all_types - covered_types
+
+ assert not missing, (
+ f"Missing Kafka event classes for {len(missing)} EventType(s):\n"
+ + "\n".join(
+ f" - {et.value}: needs a class in infrastructure/kafka/events/"
+ for et in sorted(missing, key=lambda x: x.value)
+ )
+ )
+
+ def test_domain_event_adapter_covers_all_types(self) -> None:
+ """The domain_event_adapter TypeAdapter must handle all EventTypes."""
+ errors: list[str] = []
+
+ for et in EventType:
+ try:
+ # Validation will fail due to missing required fields, but that's OK
+ # We just want to confirm the type IS in the union (not "unknown discriminator")
+ domain_event_adapter.validate_python({"event_type": et})
+ except Exception as e:
+ error_str = str(e).lower()
+ # "validation error" means type IS recognized but fields are missing - that's fine
+ # "no match" or "discriminator" means type is NOT in union - that's a failure
+ if "no match" in error_str or "unable to extract" in error_str:
+ errors.append(f" - {et.value}: not in DomainEvent union")
+
+ assert not errors, f"domain_event_adapter missing {len(errors)} type(s):\n" + "\n".join(errors)
+
+ def test_no_orphan_domain_event_classes(self) -> None:
+ """All domain event classes must have a corresponding EventType."""
+ orphans: list[str] = []
+
+ for cls in DomainBaseEvent.__subclasses__():
+ # Skip test fixtures/mocks (private classes starting with _)
+ if cls.__name__.startswith("_"):
+ continue
+ if not hasattr(cls, "model_fields"):
+ continue
+ field = cls.model_fields.get("event_type")
+ if field is None:
+ continue
+ if field.default is None:
+ orphans.append(f" - {cls.__name__}: event_type field has no default")
+ elif not isinstance(field.default, EventType):
+ orphans.append(f" - {cls.__name__}: event_type default is not an EventType")
+
+ assert not orphans, "Orphan domain event classes:\n" + "\n".join(orphans)
+
+ def test_no_orphan_kafka_event_classes(self) -> None:
+ """All Kafka event classes must have a corresponding EventType."""
+ orphans: list[str] = []
+
+ for cls in KafkaBaseEvent.__subclasses__():
+ # Skip test fixtures/mocks (private classes starting with _)
+ if cls.__name__.startswith("_"):
+ continue
+ if not hasattr(cls, "model_fields"):
+ continue
+ field = cls.model_fields.get("event_type")
+ if field is None:
+ orphans.append(f" - {cls.__name__}: missing event_type field")
+ elif field.default is None:
+ orphans.append(f" - {cls.__name__}: event_type field has no default")
+ elif not isinstance(field.default, EventType):
+ orphans.append(f" - {cls.__name__}: event_type default is not an EventType")
+
+ assert not orphans, "Orphan Kafka event classes:\n" + "\n".join(orphans)
+
+ def test_domain_and_kafka_event_names_match(self) -> None:
+ """Domain and Kafka event classes for same EventType should have same name."""
+ domain_mapping = get_domain_event_classes()
+ kafka_mapping = get_kafka_event_classes()
+
+ mismatches: list[str] = []
+ for et in EventType:
+ domain_cls = domain_mapping.get(et)
+ kafka_cls = kafka_mapping.get(et)
+
+ if domain_cls and kafka_cls:
+ if domain_cls.__name__ != kafka_cls.__name__:
+ mismatches.append(
+ f" - {et.value}: domain={domain_cls.__name__}, kafka={kafka_cls.__name__}"
+ )
+
+ assert not mismatches, (
+ f"Event class name mismatches for {len(mismatches)} type(s):\n" + "\n".join(mismatches)
+ )
+
+
+class TestEventSchemaConsistency:
+ """Additional consistency checks between domain and Kafka event schemas."""
+
+ def test_event_type_count_sanity(self) -> None:
+ """Sanity check: we should have a reasonable number of event types."""
+ count = len(EventType)
+ assert count >= 50, f"Expected at least 50 EventTypes, got {count}"
+
+ def test_all_event_types_are_lowercase_snake_case(self) -> None:
+ """All EventType values should be lowercase snake_case."""
+ violations: list[str] = []
+ for et in EventType:
+ value = et.value
+ if value != value.lower():
+ violations.append(f" - {et.name}: '{value}' contains uppercase")
+ if " " in value or "-" in value:
+ violations.append(f" - {et.name}: '{value}' contains spaces or hyphens")
+
+ assert not violations, "EventType naming violations:\n" + "\n".join(violations)
diff --git a/backend/tests/unit/events/core/test_consumer_config.py b/backend/tests/unit/events/core/test_consumer_config.py
index 455cef0f..99e1a6bf 100644
--- a/backend/tests/unit/events/core/test_consumer_config.py
+++ b/backend/tests/unit/events/core/test_consumer_config.py
@@ -1,4 +1,3 @@
-import pytest
from app.events.core.types import ConsumerConfig, ProducerConfig
diff --git a/backend/tests/unit/events/test_event_dispatcher.py b/backend/tests/unit/events/test_event_dispatcher.py
index 5933df45..344a5a9f 100644
--- a/backend/tests/unit/events/test_event_dispatcher.py
+++ b/backend/tests/unit/events/test_event_dispatcher.py
@@ -3,6 +3,7 @@
from app.domain.enums.events import EventType
from app.events.core import EventDispatcher
from app.infrastructure.kafka.events.base import BaseEvent
+
from tests.helpers import make_execution_requested_event
_test_logger = logging.getLogger("test.events.event_dispatcher")
@@ -56,5 +57,5 @@ async def handler(_: BaseEvent) -> None:
metrics = disp.get_metrics()
assert called["n"] == 1
- assert metrics[EventType.EXECUTION_REQUESTED.value]["processed"] >= 1
- assert metrics[EventType.EXECUTION_FAILED.value]["skipped"] >= 1
+ assert metrics[EventType.EXECUTION_REQUESTED]["processed"] >= 1
+ assert metrics[EventType.EXECUTION_FAILED]["skipped"] >= 1
diff --git a/backend/tests/unit/events/test_mappings_and_types.py b/backend/tests/unit/events/test_mappings_and_types.py
index 6a2dedc4..62477f63 100644
--- a/backend/tests/unit/events/test_mappings_and_types.py
+++ b/backend/tests/unit/events/test_mappings_and_types.py
@@ -9,7 +9,18 @@
def test_producer_config_mapping() -> None:
- cfg = ProducerConfig(bootstrap_servers="kafka:29092", client_id="cid", batch_size=123, linger_ms=7, compression_type="gzip", request_timeout_ms=1111, retries=2, enable_idempotence=True, acks="all", max_in_flight_requests_per_connection=3)
+ cfg = ProducerConfig(
+ bootstrap_servers="kafka:29092",
+ client_id="cid",
+ batch_size=123,
+ linger_ms=7,
+ compression_type="gzip",
+ request_timeout_ms=1111,
+ retries=2,
+ enable_idempotence=True,
+ acks="all",
+ max_in_flight_requests_per_connection=3,
+ )
conf = cfg.to_producer_config()
assert conf["bootstrap.servers"] == "kafka:29092"
assert conf["client.id"] == "cid"
@@ -20,7 +31,19 @@ def test_producer_config_mapping() -> None:
def test_consumer_config_mapping() -> None:
- cfg = ConsumerConfig(bootstrap_servers="kafka:29092", group_id="g", client_id="c", auto_offset_reset="latest", enable_auto_commit=False, session_timeout_ms=12345, heartbeat_interval_ms=999, max_poll_interval_ms=555000, fetch_min_bytes=10, fetch_max_wait_ms=777, statistics_interval_ms=60000)
+ cfg = ConsumerConfig(
+ bootstrap_servers="kafka:29092",
+ group_id="g",
+ client_id="c",
+ auto_offset_reset="latest",
+ enable_auto_commit=False,
+ session_timeout_ms=12345,
+ heartbeat_interval_ms=999,
+ max_poll_interval_ms=555000,
+ fetch_min_bytes=10,
+ fetch_max_wait_ms=777,
+ statistics_interval_ms=60000,
+ )
conf = cfg.to_consumer_config()
assert conf["bootstrap.servers"] == "kafka:29092"
assert conf["group.id"] == "g"
diff --git a/backend/tests/unit/events/test_schema_registry_manager.py b/backend/tests/unit/events/test_schema_registry_manager.py
index 5b8ddd1e..9a5c0a2e 100644
--- a/backend/tests/unit/events/test_schema_registry_manager.py
+++ b/backend/tests/unit/events/test_schema_registry_manager.py
@@ -1,7 +1,6 @@
import logging
import pytest
-
from app.events.schema.schema_registry import SchemaRegistryManager
from app.infrastructure.kafka.events.execution import ExecutionRequestedEvent
from app.settings import Settings
diff --git a/backend/tests/unit/schemas_pydantic/test_events_schemas.py b/backend/tests/unit/schemas_pydantic/test_events_schemas.py
index d055a488..38d17179 100644
--- a/backend/tests/unit/schemas_pydantic/test_events_schemas.py
+++ b/backend/tests/unit/schemas_pydantic/test_events_schemas.py
@@ -1,7 +1,6 @@
import pytest
-
-from app.schemas_pydantic.events import EventFilterRequest
from app.domain.enums.common import SortOrder
+from app.schemas_pydantic.events import EventFilterRequest
def test_event_filter_request_sort_validator_accepts_allowed_fields() -> None:
diff --git a/backend/tests/unit/schemas_pydantic/test_execution_schemas.py b/backend/tests/unit/schemas_pydantic/test_execution_schemas.py
index 70c48bab..3d219e38 100644
--- a/backend/tests/unit/schemas_pydantic/test_execution_schemas.py
+++ b/backend/tests/unit/schemas_pydantic/test_execution_schemas.py
@@ -1,7 +1,5 @@
-from datetime import datetime, timezone
import pytest
-
from app.schemas_pydantic.execution import ExecutionRequest
diff --git a/backend/tests/unit/schemas_pydantic/test_notification_schemas.py b/backend/tests/unit/schemas_pydantic/test_notification_schemas.py
index dd274180..b50603f1 100644
--- a/backend/tests/unit/schemas_pydantic/test_notification_schemas.py
+++ b/backend/tests/unit/schemas_pydantic/test_notification_schemas.py
@@ -1,7 +1,6 @@
from datetime import UTC, datetime, timedelta
import pytest
-
from app.domain.enums.notification import NotificationChannel, NotificationSeverity, NotificationStatus
from app.schemas_pydantic.notification import Notification, NotificationBatch
diff --git a/backend/tests/unit/services/coordinator/test_queue_manager.py b/backend/tests/unit/services/coordinator/test_queue_manager.py
index a43f81ca..b5b88220 100644
--- a/backend/tests/unit/services/coordinator/test_queue_manager.py
+++ b/backend/tests/unit/services/coordinator/test_queue_manager.py
@@ -1,9 +1,9 @@
import logging
import pytest
-
from app.infrastructure.kafka.events.execution import ExecutionRequestedEvent
from app.services.coordinator.queue_manager import QueueManager, QueuePriority
+
from tests.helpers import make_execution_requested_event
_test_logger = logging.getLogger("test.services.coordinator.queue_manager")
diff --git a/backend/tests/unit/services/coordinator/test_resource_manager.py b/backend/tests/unit/services/coordinator/test_resource_manager.py
index 5e1df687..1cea9f82 100644
--- a/backend/tests/unit/services/coordinator/test_resource_manager.py
+++ b/backend/tests/unit/services/coordinator/test_resource_manager.py
@@ -1,7 +1,6 @@
import logging
import pytest
-
from app.services.coordinator.resource_manager import ResourceManager
_test_logger = logging.getLogger("test.services.coordinator.resource_manager")
diff --git a/backend/tests/unit/services/idempotency/__init__.py b/backend/tests/unit/services/idempotency/__init__.py
index 05dd5682..e69de29b 100644
--- a/backend/tests/unit/services/idempotency/__init__.py
+++ b/backend/tests/unit/services/idempotency/__init__.py
@@ -1 +0,0 @@
-# Idempotency service unit tests
\ No newline at end of file
diff --git a/backend/tests/unit/services/idempotency/test_idempotency_manager.py b/backend/tests/unit/services/idempotency/test_idempotency_manager.py
index df1b2092..62227363 100644
--- a/backend/tests/unit/services/idempotency/test_idempotency_manager.py
+++ b/backend/tests/unit/services/idempotency/test_idempotency_manager.py
@@ -1,15 +1,14 @@
import logging
from unittest.mock import MagicMock
-import pytest
+import pytest
from app.infrastructure.kafka.events.base import BaseEvent
from app.services.idempotency.idempotency_manager import (
IdempotencyConfig,
- IdempotencyManager,
IdempotencyKeyStrategy,
+ IdempotencyManager,
)
-
pytestmark = pytest.mark.unit
# Test logger
diff --git a/backend/tests/unit/services/idempotency/test_middleware.py b/backend/tests/unit/services/idempotency/test_middleware.py
index 475e75ac..4b1125e0 100644
--- a/backend/tests/unit/services/idempotency/test_middleware.py
+++ b/backend/tests/unit/services/idempotency/test_middleware.py
@@ -1,15 +1,13 @@
import logging
from unittest.mock import AsyncMock, MagicMock
-import pytest
+import pytest
+from app.domain.idempotency import IdempotencyStatus
from app.infrastructure.kafka.events.base import BaseEvent
from app.services.idempotency.idempotency_manager import IdempotencyManager, IdempotencyResult
from app.services.idempotency.middleware import (
IdempotentEventHandler,
- idempotent_handler,
- IdempotentConsumerWrapper,
)
-from app.domain.idempotency import IdempotencyStatus
_test_logger = logging.getLogger("test.services.idempotency.middleware")
@@ -36,7 +34,9 @@ def event(self) -> MagicMock:
return event
@pytest.fixture
- def idempotent_event_handler(self, mock_handler: AsyncMock, mock_idempotency_manager: AsyncMock) -> IdempotentEventHandler:
+ def idempotent_event_handler(
+ self, mock_handler: AsyncMock, mock_idempotency_manager: AsyncMock
+ ) -> IdempotentEventHandler:
return IdempotentEventHandler(
handler=mock_handler,
idempotency_manager=mock_idempotency_manager,
@@ -47,7 +47,9 @@ def idempotent_event_handler(self, mock_handler: AsyncMock, mock_idempotency_man
)
@pytest.mark.asyncio
- async def test_call_with_fields(self, mock_handler: AsyncMock, mock_idempotency_manager: AsyncMock, event: MagicMock) -> None:
+ async def test_call_with_fields(
+ self, mock_handler: AsyncMock, mock_idempotency_manager: AsyncMock, event: MagicMock
+ ) -> None:
# Setup with specific fields
fields = {"field1", "field2"}
@@ -80,7 +82,13 @@ async def test_call_with_fields(self, mock_handler: AsyncMock, mock_idempotency_
)
@pytest.mark.asyncio
- async def test_call_handler_exception(self, idempotent_event_handler: IdempotentEventHandler, mock_idempotency_manager: AsyncMock, mock_handler: AsyncMock, event: MagicMock) -> None:
+ async def test_call_handler_exception(
+ self,
+ idempotent_event_handler: IdempotentEventHandler,
+ mock_idempotency_manager: AsyncMock,
+ mock_handler: AsyncMock,
+ event: MagicMock,
+ ) -> None:
# Setup: Handler raises exception
idempotency_result = IdempotencyResult(
is_duplicate=False,
diff --git a/backend/tests/unit/services/pod_monitor/test_config_and_init.py b/backend/tests/unit/services/pod_monitor/test_config_and_init.py
index 75723aea..66e8a89b 100644
--- a/backend/tests/unit/services/pod_monitor/test_config_and_init.py
+++ b/backend/tests/unit/services/pod_monitor/test_config_and_init.py
@@ -1,11 +1,8 @@
import importlib
-import types
import pytest
-
from app.services.pod_monitor.config import PodMonitorConfig
-
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/services/pod_monitor/test_event_mapper.py b/backend/tests/unit/services/pod_monitor/test_event_mapper.py
index ccce2787..8d848937 100644
--- a/backend/tests/unit/services/pod_monitor/test_event_mapper.py
+++ b/backend/tests/unit/services/pod_monitor/test_event_mapper.py
@@ -2,7 +2,7 @@
import logging
import pytest
-
+from app.domain.enums.events import EventType
from app.domain.enums.storage import ExecutionErrorType
from app.infrastructure.kafka.events.execution import (
ExecutionCompletedEvent,
@@ -12,30 +12,44 @@
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
from app.infrastructure.kafka.events.pod import PodRunningEvent
from app.services.pod_monitor.event_mapper import PodContext, PodEventMapper
+
from tests.helpers.k8s_fakes import (
ContainerStatus,
FakeApi,
- Meta,
Pod,
- Spec,
State,
- Status,
Terminated,
Waiting,
)
-
pytestmark = pytest.mark.unit
_test_logger = logging.getLogger("test.services.pod_monitor.event_mapper")
def _ctx(pod: Pod, event_type: str = "ADDED") -> PodContext:
- return PodContext(pod=pod, execution_id="e1", metadata=AvroEventMetadata(service_name="t", service_version="1"), phase=pod.status.phase or "", event_type=event_type)
+ return PodContext(
+ pod=pod,
+ execution_id="e1",
+ metadata=AvroEventMetadata(service_name="t", service_version="1"),
+ phase=pod.status.phase or "",
+ event_type=event_type,
+ )
def test_pending_running_and_succeeded_mapping() -> None:
- pem = PodEventMapper(k8s_api=FakeApi(json.dumps({"stdout": "ok", "stderr": "", "exit_code": 0, "resource_usage": {"execution_time_wall_seconds": 0, "cpu_time_jiffies": 0, "clk_tck_hertz": 0, "peak_memory_kb": 0}})), logger=_test_logger)
+ logs_json = json.dumps({
+ "stdout": "ok",
+ "stderr": "",
+ "exit_code": 0,
+ "resource_usage": {
+ "execution_time_wall_seconds": 0,
+ "cpu_time_jiffies": 0,
+ "clk_tck_hertz": 0,
+ "peak_memory_kb": 0,
+ },
+ })
+ pem = PodEventMapper(k8s_api=FakeApi(logs_json), logger=_test_logger)
# Pending -> scheduled (set execution-id label and PodScheduled condition)
pend = Pod("p", "Pending")
@@ -49,7 +63,7 @@ def __init__(self, t: str, s: str) -> None:
pend.status.conditions = [Cond("PodScheduled", "True")]
pend.spec.node_name = "n"
evts = pem.map_pod_event(pend, "ADDED")
- assert any(e.event_type.value == "pod_scheduled" for e in evts)
+ assert any(e.event_type == EventType.POD_SCHEDULED for e in evts)
# Running -> running, includes container statuses JSON
cs = [ContainerStatus(State(waiting=Waiting("Init"))), ContainerStatus(State(terminated=Terminated(2)))]
@@ -57,10 +71,10 @@ def __init__(self, t: str, s: str) -> None:
run.metadata.labels = {"execution-id": "e1"}
evts = pem.map_pod_event(run, "MODIFIED")
# Print for debugging if test fails
- if not any(e.event_type.value == "pod_running" for e in evts):
- print(f"Events returned: {[e.event_type.value for e in evts]}")
- assert any(e.event_type.value == "pod_running" for e in evts)
- pr = [e for e in evts if e.event_type.value == "pod_running"][0]
+ if not any(e.event_type == EventType.POD_RUNNING for e in evts):
+ print(f"Events returned: {[e.event_type for e in evts]}")
+ assert any(e.event_type == EventType.POD_RUNNING for e in evts)
+ pr = [e for e in evts if e.event_type == EventType.POD_RUNNING][0]
assert isinstance(pr, PodRunningEvent)
statuses = json.loads(pr.container_statuses)
assert any("waiting" in s["state"] for s in statuses) and any("terminated" in s["state"] for s in statuses)
@@ -70,7 +84,7 @@ def __init__(self, t: str, s: str) -> None:
suc = Pod("p", "Succeeded", cs=[term])
suc.metadata.labels = {"execution-id": "e1"}
evts = pem.map_pod_event(suc, "MODIFIED")
- comp = [e for e in evts if e.event_type.value == "execution_completed"][0]
+ comp = [e for e in evts if e.event_type == EventType.EXECUTION_COMPLETED][0]
assert isinstance(comp, ExecutionCompletedEvent)
assert comp.exit_code == 0 and comp.stdout == "ok"
@@ -80,11 +94,14 @@ def test_failed_timeout_and_deleted() -> None:
pem = PodEventMapper(k8s_api=FakeApi(valid_logs), logger=_test_logger)
# Timeout via DeadlineExceeded
- pod_to = Pod("p", "Failed", cs=[ContainerStatus(State(terminated=Terminated(137)))], reason="DeadlineExceeded", adl=5)
+ pod_to = Pod(
+ "p", "Failed", cs=[ContainerStatus(State(terminated=Terminated(137)))],
+ reason="DeadlineExceeded", adl=5,
+ )
pod_to.metadata.labels = {"execution-id": "e1"}
ev = pem.map_pod_event(pod_to, "MODIFIED")[0]
assert isinstance(ev, ExecutionTimeoutEvent)
- assert ev.event_type.value == "execution_timeout" and ev.timeout_seconds == 5
+ assert ev.event_type == EventType.EXECUTION_TIMEOUT and ev.timeout_seconds == 5
# Failed: terminated exit_code nonzero, message used as stderr, error type defaults to SCRIPT_ERROR
# Note: ExecutionFailedEvent can have None resource_usage when logs extraction fails
@@ -93,7 +110,7 @@ def test_failed_timeout_and_deleted() -> None:
pod_fail.metadata.labels = {"execution-id": "e2"}
evf = pem_no_logs.map_pod_event(pod_fail, "MODIFIED")[0]
assert isinstance(evf, ExecutionFailedEvent)
- assert evf.event_type.value == "execution_failed" and evf.error_type in {ExecutionErrorType.SCRIPT_ERROR}
+ assert evf.event_type == EventType.EXECUTION_FAILED and evf.error_type in {ExecutionErrorType.SCRIPT_ERROR}
# Deleted -> terminated when container terminated present (exit code 0 returns completed for DELETED)
valid_logs_0 = json.dumps({"stdout": "", "stderr": "", "exit_code": 0, "resource_usage": {}})
@@ -102,7 +119,7 @@ def test_failed_timeout_and_deleted() -> None:
pod_del.metadata.labels = {"execution-id": "e3"}
evd = pem_completed.map_pod_event(pod_del, "DELETED")[0]
# For DELETED event with exit code 0, it returns execution_completed, not pod_terminated
- assert evd.event_type.value == "execution_completed"
+ assert evd.event_type == EventType.EXECUTION_COMPLETED
def test_extract_id_and_metadata_priority_and_duplicates() -> None:
@@ -111,7 +128,6 @@ def test_extract_id_and_metadata_priority_and_duplicates() -> None:
# From label
p = Pod("any", "Pending")
p.metadata.labels = {"execution-id": "L1", "user-id": "u", "correlation-id": "corrL"}
- ctx = _ctx(p)
md = pem._create_metadata(p)
assert pem._extract_execution_id(p) == "L1" and md.user_id == "u" and md.correlation_id == "corrL"
@@ -212,7 +228,7 @@ def test_all_containers_succeeded_and_cache_behavior() -> None:
pod.metadata.labels = {"execution-id": "e1"}
# When all succeeded, failed mapping returns completed instead of failed
ev = pem.map_pod_event(pod, "MODIFIED")[0]
- assert ev.event_type.value == "execution_completed"
+ assert ev.event_type == EventType.EXECUTION_COMPLETED
# Cache prevents duplicate for same phase unless event type changes
p2 = Pod("p2", "Running")
diff --git a/backend/tests/unit/services/pod_monitor/test_monitor.py b/backend/tests/unit/services/pod_monitor/test_monitor.py
index 1520eb12..6f33d44b 100644
--- a/backend/tests/unit/services/pod_monitor/test_monitor.py
+++ b/backend/tests/unit/services/pod_monitor/test_monitor.py
@@ -8,7 +8,7 @@
from app.core import k8s_clients as k8s_clients_module
from app.core.k8s_clients import K8sClients
from app.db.repositories.event_repository import EventRepository
-from app.domain.events import Event
+from app.domain.events import DomainEvent
from app.domain.execution.models import ResourceUsageDomain
from app.events.core import UnifiedProducer
from app.infrastructure.kafka.events.base import BaseEvent
@@ -27,6 +27,7 @@
)
from app.settings import Settings
from kubernetes.client.rest import ApiException
+
from tests.helpers.k8s_fakes import (
FakeApi,
FakeV1Api,
@@ -50,9 +51,9 @@ class FakeEventRepository(EventRepository):
def __init__(self) -> None:
super().__init__(_test_logger)
- self.stored_events: list[Event] = []
+ self.stored_events: list[DomainEvent] = []
- async def store_event(self, event: Event) -> str:
+ async def store_event(self, event: DomainEvent) -> str:
self.stored_events.append(event)
return event.event_id
@@ -162,7 +163,8 @@ async def _quick_watch() -> None:
assert pm.state == MonitorState.RUNNING
await pm.aclose()
- assert pm.state.value == MonitorState.STOPPED.value
+ final_state: MonitorState = pm.state
+ assert final_state == MonitorState.STOPPED
assert spy.cleared is True
@@ -559,7 +561,8 @@ def mock_create_clients(
async with create_pod_monitor(cfg, service, _test_logger) as monitor:
assert monitor.state == MonitorState.RUNNING
- assert monitor.state.value == MonitorState.STOPPED.value
+ final_state: MonitorState = monitor.state
+ assert final_state == MonitorState.STOPPED
@pytest.mark.asyncio
@@ -587,7 +590,8 @@ async def test_create_pod_monitor_with_injected_k8s_clients() -> None:
assert monitor._clients is mock_k8s_clients
assert monitor._v1 is mock_v1
- assert monitor.state.value == MonitorState.STOPPED.value
+ final_state: MonitorState = monitor.state
+ assert final_state == MonitorState.STOPPED
@pytest.mark.asyncio
diff --git a/backend/tests/unit/services/result_processor/__init__.py b/backend/tests/unit/services/result_processor/__init__.py
index 27a3238d..e69de29b 100644
--- a/backend/tests/unit/services/result_processor/__init__.py
+++ b/backend/tests/unit/services/result_processor/__init__.py
@@ -1 +0,0 @@
-# Result processor unit tests
\ No newline at end of file
diff --git a/backend/tests/unit/services/saga/test_execution_saga_steps.py b/backend/tests/unit/services/saga/test_execution_saga_steps.py
index 982bbc6c..47327538 100644
--- a/backend/tests/unit/services/saga/test_execution_saga_steps.py
+++ b/backend/tests/unit/services/saga/test_execution_saga_steps.py
@@ -1,5 +1,4 @@
import pytest
-
from app.db.repositories.resource_allocation_repository import ResourceAllocationRepository
from app.domain.saga import DomainResourceAllocation, DomainResourceAllocationCreate
from app.events.core import UnifiedProducer
@@ -16,6 +15,7 @@
ValidateExecutionStep,
)
from app.services.saga.saga_step import SagaContext
+
from tests.helpers import make_execution_requested_event
pytestmark = pytest.mark.unit
@@ -204,4 +204,4 @@ def __init__(self) -> None:
steps = s.get_steps()
# CreatePod step should be configured and present
cps = [st for st in steps if isinstance(st, CreatePodStep)][0]
- assert getattr(cps, "publish_commands") is True
+ assert cps.publish_commands is True
diff --git a/backend/tests/unit/services/saga/test_saga_comprehensive.py b/backend/tests/unit/services/saga/test_saga_comprehensive.py
index 027ec634..4c7c48f1 100644
--- a/backend/tests/unit/services/saga/test_saga_comprehensive.py
+++ b/backend/tests/unit/services/saga/test_saga_comprehensive.py
@@ -6,7 +6,6 @@
"""
import pytest
-
from app.domain.enums.events import EventType
from app.domain.enums.saga import SagaState
from app.domain.saga.models import Saga
@@ -14,8 +13,8 @@
from app.infrastructure.kafka.events.execution import ExecutionRequestedEvent
from app.services.saga.execution_saga import ExecutionSaga
from app.services.saga.saga_step import CompensationStep, SagaContext, SagaStep
-from tests.helpers import make_execution_requested_event
+from tests.helpers import make_execution_requested_event
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py b/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py
index 0621caf8..77ed1084 100644
--- a/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py
+++ b/backend/tests/unit/services/saga/test_saga_orchestrator_unit.py
@@ -4,8 +4,6 @@
from unittest.mock import MagicMock
import pytest
-from pydantic import Field
-
from app.db.repositories.resource_allocation_repository import ResourceAllocationRepository
from app.db.repositories.saga_repository import SagaRepository
from app.domain.enums.events import EventType
@@ -14,14 +12,15 @@
from app.domain.saga.models import Saga, SagaConfig
from app.events.core import UnifiedProducer
from app.events.event_store import EventStore
+from app.events.schema.schema_registry import SchemaRegistryManager
from app.infrastructure.kafka.events import BaseEvent
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
from app.services.idempotency.idempotency_manager import IdempotencyManager
from app.services.saga.base_saga import BaseSaga
from app.services.saga.saga_orchestrator import SagaOrchestrator
from app.services.saga.saga_step import CompensationStep, SagaContext, SagaStep
-from app.events.schema.schema_registry import SchemaRegistryManager
from app.settings import Settings
+from pydantic import Field
pytestmark = pytest.mark.unit
@@ -65,7 +64,9 @@ class _FakeProd(UnifiedProducer):
def __init__(self) -> None:
pass # Skip parent __init__
- async def produce(self, event_to_produce: BaseEvent, key: str | None = None, headers: dict[str, str] | None = None) -> None:
+ async def produce(
+ self, event_to_produce: BaseEvent, key: str | None = None, headers: dict[str, str] | None = None
+ ) -> None:
return None
diff --git a/backend/tests/unit/services/sse/test_shutdown_manager.py b/backend/tests/unit/services/sse/test_shutdown_manager.py
index 7025f15b..69c9d9f5 100644
--- a/backend/tests/unit/services/sse/test_shutdown_manager.py
+++ b/backend/tests/unit/services/sse/test_shutdown_manager.py
@@ -2,7 +2,6 @@
import logging
import pytest
-
from app.core.lifecycle import LifecycleEnabled
from app.services.sse.sse_shutdown_manager import SSEShutdownManager
@@ -47,7 +46,9 @@ async def on_shutdown(event: asyncio.Event, cid: str) -> None:
@pytest.mark.asyncio
async def test_shutdown_force_close_calls_router_stop_and_rejects_new() -> None:
- mgr = SSEShutdownManager(drain_timeout=0.01, notification_timeout=0.01, force_close_timeout=0.01, logger=_test_logger)
+ mgr = SSEShutdownManager(
+ drain_timeout=0.01, notification_timeout=0.01, force_close_timeout=0.01, logger=_test_logger
+ )
router = _FakeRouter()
mgr.set_router(router)
diff --git a/backend/tests/unit/services/sse/test_sse_service.py b/backend/tests/unit/services/sse/test_sse_service.py
index b6526da4..5aa59e21 100644
--- a/backend/tests/unit/services/sse/test_sse_service.py
+++ b/backend/tests/unit/services/sse/test_sse_service.py
@@ -69,7 +69,7 @@ async def get_execution_status(self, execution_id: str) -> SSEExecutionStatusDom
return SSEExecutionStatusDomain(
execution_id=execution_id,
status=ExecutionStatus.RUNNING,
- timestamp=datetime.now(timezone.utc).isoformat(),
+ timestamp=datetime.now(timezone.utc),
)
async def get_execution(self, execution_id: str) -> DomainExecution | None: # noqa: ARG002
@@ -151,7 +151,7 @@ async def test_execution_stream_closes_on_failed_event() -> None:
# Push a failed event and ensure stream ends after yielding it
await bus.exec_sub.push({"event_type": EventType.EXECUTION_FAILED, "execution_id": "exec-1", "data": {}})
failed = await agen.__anext__()
- assert _decode(failed)["event_type"] == str(EventType.EXECUTION_FAILED)
+ assert _decode(failed)["event_type"] == EventType.EXECUTION_FAILED
with pytest.raises(StopAsyncIteration):
await agen.__anext__()
@@ -169,7 +169,9 @@ async def test_execution_stream_result_stored_includes_result_payload() -> None:
stderr="",
lang="python",
lang_version="3.11",
- resource_usage=ResourceUsageDomain(0.1, 1, 100, 64),
+ resource_usage=ResourceUsageDomain(
+ execution_time_wall_seconds=0.1, cpu_time_jiffies=1, clk_tck_hertz=100, peak_memory_kb=64
+ ),
user_id="u1",
exit_code=0,
)
@@ -186,7 +188,7 @@ async def test_execution_stream_result_stored_includes_result_payload() -> None:
await bus.exec_sub.push({"event_type": EventType.RESULT_STORED, "execution_id": "exec-2", "data": {}})
evt = await agen.__anext__()
data = _decode(evt)
- assert data["event_type"] == str(EventType.RESULT_STORED)
+ assert data["event_type"] == EventType.RESULT_STORED
assert "result" in data and data["result"]["execution_id"] == "exec-2"
with pytest.raises(StopAsyncIteration):
diff --git a/backend/tests/unit/services/sse/test_sse_shutdown_manager.py b/backend/tests/unit/services/sse/test_sse_shutdown_manager.py
index f97350c2..46d28026 100644
--- a/backend/tests/unit/services/sse/test_sse_shutdown_manager.py
+++ b/backend/tests/unit/services/sse/test_sse_shutdown_manager.py
@@ -4,6 +4,7 @@
import pytest
from app.core.lifecycle import LifecycleEnabled
from app.services.sse.sse_shutdown_manager import SSEShutdownManager
+
from tests.helpers.eventually import eventually
pytestmark = pytest.mark.unit
diff --git a/backend/tests/unit/services/test_pod_builder.py b/backend/tests/unit/services/test_pod_builder.py
index cd271631..45c267e7 100644
--- a/backend/tests/unit/services/test_pod_builder.py
+++ b/backend/tests/unit/services/test_pod_builder.py
@@ -1,12 +1,11 @@
from uuid import uuid4
import pytest
-from kubernetes import client as k8s_client
-
from app.infrastructure.kafka.events.metadata import AvroEventMetadata
from app.infrastructure.kafka.events.saga import CreatePodCommandEvent
from app.services.k8s_worker.config import K8sWorkerConfig
from app.services.k8s_worker.pod_builder import PodBuilder
+from kubernetes import client as k8s_client
class TestPodBuilder:
diff --git a/deploy.sh b/deploy.sh
index 6819b109..6d24f356 100755
--- a/deploy.sh
+++ b/deploy.sh
@@ -348,7 +348,8 @@ cmd_openapi() {
uv run python -c "
import json
-from app.main import app
+from app.main import create_app
+app = create_app()
schema = app.openapi()
print(json.dumps(schema, indent=2))
" > "../$OUTPUT"
diff --git a/docs/architecture/event-system-design.md b/docs/architecture/event-system-design.md
new file mode 100644
index 00000000..604a0f7e
--- /dev/null
+++ b/docs/architecture/event-system-design.md
@@ -0,0 +1,165 @@
+# Event system design
+
+This document explains how events flow through the system, why there are multiple event representations, and how they work together. If you've looked at the codebase and wondered why we have both domain events and Kafka events that look almost identical, this is where that question gets answered.
+
+## The three layers
+
+Events in Integr8sCode exist in three forms:
+
+```mermaid
+graph LR
+ subgraph "Source of Truth"
+ ET[EventType enum]
+ end
+
+ subgraph "Domain Layer"
+ DE[Domain Events
typed.py]
+ end
+
+ subgraph "Infrastructure Layer"
+ KE[Kafka Events
kafka/events/]
+ end
+
+ ET --> DE
+ ET --> KE
+ DE -.->|"same event_type"| KE
+```
+
+The `EventType` enum defines all possible event types as strings. Domain events are Pydantic models used for storage in MongoDB and deserialization from the event store. Kafka events are Avro-compatible models used for serialization to Kafka topics. Both reference the same `EventType` values, ensuring consistency.
+
+This might look like unnecessary duplication, but it's actually a deliberate architectural choice rooted in Domain-Driven Design.
+
+## Why two event classes?
+
+In DDD terminology, what we call "domain events" and "Kafka events" map to two different concepts: domain events and integration events.
+
+Domain events are internal to the bounded context. They carry whatever information the domain needs, including storage-related fields like `stored_at` and `ttl_expires_at`. These events get stored in MongoDB and replayed during event sourcing operations.
+
+Integration events cross bounded context boundaries. They flow through Kafka to other services or workers. They need to be serializable to Avro, which means they can't contain arbitrary Python objects. They carry routing information like the `topic` ClassVar.
+
+```mermaid
+graph TB
+ subgraph "Bounded Context: Backend"
+ API[API Handler] --> DS[Domain Service]
+ DS --> DomainEvent[Domain Event]
+ DomainEvent --> MongoDB[(MongoDB)]
+ DomainEvent --> Transform[Transform]
+ Transform --> KafkaEvent[Kafka Event]
+ end
+
+ KafkaEvent --> Kafka[(Kafka)]
+
+ subgraph "Other Contexts"
+ Kafka --> Worker1[Saga Orchestrator]
+ Kafka --> Worker2[Pod Monitor]
+ Kafka --> Worker3[Result Processor]
+ end
+```
+
+The transformation between domain and Kafka events happens in `KafkaEventService`. When you call `publish_event()`, the service stores the domain event in MongoDB and publishes the corresponding Kafka event to the appropriate topic.
+
+## How discriminated unions work
+
+When events come back from MongoDB, we need to deserialize them into the correct Python class. A document with `event_type: "execution_completed"` should become an `ExecutionCompletedEvent` instance, not a generic dict.
+
+Pydantic's discriminated unions handle this. Each event class declares its event type using a `Literal` type:
+
+```python
+class ExecutionCompletedEvent(BaseEvent):
+ event_type: Literal[EventType.EXECUTION_COMPLETED] = EventType.EXECUTION_COMPLETED
+ execution_id: str
+ exit_code: int
+ # ...
+```
+
+The `DomainEvent` type is a union of all event classes with a discriminator on `event_type`:
+
+```python
+DomainEvent = Annotated[
+ ExecutionRequestedEvent
+ | ExecutionCompletedEvent
+ | ExecutionFailedEvent
+ | ... # all 53 event types
+ Discriminator("event_type"),
+]
+```
+
+The `domain_event_adapter` TypeAdapter validates incoming data against this union. When it sees `{"event_type": "execution_completed", ...}`, it knows to instantiate an `ExecutionCompletedEvent`.
+
+```mermaid
+sequenceDiagram
+ participant DB as MongoDB
+ participant Repo as EventStore
+ participant TA as TypeAdapter
+ participant Event as ExecutionCompletedEvent
+
+ DB->>Repo: {event_type: "execution_completed", ...}
+ Repo->>TA: validate_python(doc)
+ TA->>TA: Check discriminator field
+ TA->>Event: Instantiate correct class
+ Event->>Repo: Typed event instance
+```
+
+This approach is more performant than trying each union member until one validates. The discriminator tells Pydantic exactly which class to use.
+
+## Keeping things in sync
+
+With three representations of each event, there's a risk of drift. You might add a new `EventType` value but forget to create the corresponding domain or Kafka event class. Or you might create a Kafka event but forget to add it to the `DomainEvent` union.
+
+The `test_event_schema_coverage.py` test suite catches these problems:
+
+```python
+--8<-- "backend/tests/unit/domain/events/test_event_schema_coverage.py:59:72"
+```
+
+The test runs in CI and fails if any `EventType` value lacks a corresponding event class. It also checks the reverse: that no orphan event classes exist without matching enum values.
+
+When adding a new event type, the workflow is:
+
+1. Add the value to `EventType` enum
+2. Create the domain event class in `typed.py`
+3. Add it to the `DomainEvent` union
+4. Create the Kafka event class in `kafka/events/`
+5. Export it from `kafka/events/__init__.py`
+
+If you miss a step, the test tells you exactly what's missing.
+
+## The Avro connection
+
+Kafka events inherit from `AvroBase` (via `pydantic-avro`), which enables automatic Avro schema generation. The schema registry stores these schemas and validates that producers and consumers agree on the format.
+
+```python
+--8<-- "backend/app/infrastructure/kafka/events/base.py:13:27"
+```
+
+Each Kafka event class also declares its target topic as a class variable. The producer uses this to route events to the correct topic without external mapping tables.
+
+## Why not just one event class?
+
+You could theoretically use the same class for both domain and Kafka purposes. The domain-specific fields (`stored_at`, `ttl_expires_at`) could be excluded from Avro serialization with `exclude=True`. The `topic` ClassVar wouldn't serialize anyway.
+
+This is a valid simplification if your domain and integration events have identical payloads. But there are reasons to keep them separate:
+
+The domain layer shouldn't know about Kafka topics. Adding `topic: ClassVar[KafkaTopic]` to a domain event couples it to infrastructure concerns. DDD purists would argue this violates the dependency rule.
+
+Avro has constraints that don't apply to MongoDB. Avro schemas don't support arbitrary nested dicts, certain datetime formats, or MongoDB-specific types like ObjectId. Keeping Kafka events separate means you can optimize them for wire format without affecting domain logic.
+
+The two layers can evolve independently. If you need to change how events are stored in MongoDB, you don't have to worry about breaking Kafka consumers. If you need to add a field to Kafka events for a new consumer, you can do so without touching the domain layer.
+
+That said, if your events are simple and you want less code to maintain, unifying them is a reasonable choice. The current architecture prioritizes separation of concerns over minimizing duplication.
+
+## Key files
+
+| File | Purpose |
+|------|---------|
+| [`domain/enums/events.py`](https://github.com/HardMax71/Integr8sCode/blob/main/backend/app/domain/enums/events.py) | `EventType` enum with all event type values |
+| [`domain/events/typed.py`](https://github.com/HardMax71/Integr8sCode/blob/main/backend/app/domain/events/typed.py) | Domain event classes and `DomainEvent` union |
+| [`infrastructure/kafka/events/`](https://github.com/HardMax71/Integr8sCode/blob/main/backend/app/infrastructure/kafka/events/) | Kafka event classes organized by domain |
+| [`services/kafka_event_service.py`](https://github.com/HardMax71/Integr8sCode/blob/main/backend/app/services/kafka_event_service.py) | Publishes events to both MongoDB and Kafka |
+| [`tests/unit/domain/events/test_event_schema_coverage.py`](https://github.com/HardMax71/Integr8sCode/blob/main/backend/tests/unit/domain/events/test_event_schema_coverage.py) | Validates correspondence between enum and event classes |
+
+## Related docs
+
+- [Event Storage](event-storage.md) — how events are stored in MongoDB with the payload pattern
+- [Kafka Topics](kafka-topic-architecture.md) — topic naming conventions and partitioning strategy
+- [User Settings Events](user-settings-events.md) — event sourcing pattern with TypeAdapter merging
diff --git a/mkdocs.yml b/mkdocs.yml
index 45aa85c0..eae6b2ec 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -118,6 +118,7 @@ nav:
- Pydantic Dataclasses: architecture/pydantic-dataclasses.md
- Model Conversion: architecture/model-conversion.md
- Event Storage: architecture/event-storage.md
+ - Event System Design: architecture/event-system-design.md
- User Settings Events: architecture/user-settings-events.md
- Frontend Build: architecture/frontend-build.md
- Svelte 5 Migration: architecture/svelte5-migration.md