diff --git a/python-test-samples/cns427-testable-serverless-architecture/.dockerignore b/python-test-samples/cns427-testable-serverless-architecture/.dockerignore new file mode 100644 index 00000000..cd43b954 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/.dockerignore @@ -0,0 +1,15 @@ +cdk.out +cdk-outputs.json +.mypy_cache +.ruff_cache +.pytest_cache +__pycache__ +.venv +.git +tests +infrastructure +docs +scripts +ai-dlc +*.md + diff --git a/python-test-samples/cns427-testable-serverless-architecture/.gitignore b/python-test-samples/cns427-testable-serverless-architecture/.gitignore new file mode 100644 index 00000000..c65b0878 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/.gitignore @@ -0,0 +1,23 @@ +.venv +.coverage +.coverage.* +htmlcov/ +coverage.xml +*.cover +*cache +.github +__pycache__/ +junit.xml + +# Python build artifacts +*.egg-info/ +*.egg +dist/ +build/ + +# CDK output +cdk.out/ +cdk-outputs.json +*.js +*.d.ts +node_modules/ diff --git a/python-test-samples/cns427-testable-serverless-architecture/Makefile b/python-test-samples/cns427-testable-serverless-architecture/Makefile new file mode 100644 index 00000000..d3b598f0 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/Makefile @@ -0,0 +1,147 @@ +# CNS427 Task API - Makefile for test automation + +.PHONY: help install test test-unit test-integration test-e2e test-all coverage lint format type-check cdk-nag cdk-nag-report check deploy deploy-test-infra destroy-test-infra check-test-infra clean + +# Default target +help: + @echo "CNS427 Task API - Available Commands:" + @echo "" + @echo "Setup:" + @echo " install Install dependencies with Poetry" + @echo " validate-setup Validate EventBridge testing setup" + @echo "" + @echo "Testing:" + @echo " test Run all available tests" + @echo " test-unit Run unit tests only (fast, no AWS)" + @echo " test-property Run property-based tests (Hypothesis, complex algorithms)" + @echo " test-integration Run integration tests (includes EventBridge)" + @echo " test-e2e Run end-to-end tests" + @echo " test-all Run complete test suite" + @echo " coverage Generate coverage report" + @echo "" + @echo "Test Infrastructure:" + @echo " deploy-test-infra Deploy EventBridge test infrastructure" + @echo " destroy-test-infra Destroy EventBridge test infrastructure" + @echo " check-test-infra Check test infrastructure status" + @echo "" + @echo "Code Quality:" + @echo " lint Run linting checks" + @echo " format Format code with ruff" + @echo " type-check Run type checking with mypy" + @echo " cdk-nag Run CDK Nag security checks (blocks on violations)" + @echo " cdk-nag-report Generate CDK Nag security report (non-blocking)" + @echo " check Run all quality checks (lint + type-check + cdk-nag)" + @echo "" + @echo "Deployment:" + @echo " deploy Deploy main application to AWS" + @echo " destroy Destroy main application" + @echo " clean Clean build artifacts" + +# Setup +install: + @echo "Installing dependencies..." + poetry install + +validate-setup: + @echo "Validating EventBridge testing setup..." + poetry run validate-setup + +# Testing targets +test: + @echo "Running all available tests..." + poetry run test-all + +test-unit: + @echo "Running unit tests..." + poetry run test-unit + +test-property: + @echo "Running property-based tests..." + poetry run pytest tests/property_based/ -v + +test-integration: + @echo "Running integration tests (including EventBridge)..." + poetry run test-integration + +test-e2e: + @echo "Running end-to-end tests..." + poetry run test-e2e + +test-all: + @echo "Running complete test suite..." + poetry run test-all + +coverage: + @echo "Generating coverage report..." + poetry run pytest tests/ --cov=services --cov=shared --cov-report=html --cov-report=term + @echo "Coverage report available at: htmlcov/index.html" + +# Test Infrastructure Management +deploy-test-infra: + @echo "Deploying EventBridge test infrastructure..." + cd infrastructure/test_harness && poetry run cdk deploy + +destroy-test-infra: + @echo "Destroying EventBridge test infrastructure..." + cd infrastructure/test_harness && poetry run cdk destroy + +check-test-infra: + @echo "Checking test infrastructure status..." + @aws dynamodb describe-table --table-name cns427-task-api-test-results > /dev/null 2>&1 && echo "βœ… Test infrastructure is deployed" || echo "❌ Test infrastructure not found" + +# Code quality +lint: + @echo "Running linting checks..." + poetry run lint + +format: + @echo "Formatting code..." + poetry run format + +type-check: + @echo "Running type checking..." + poetry run type-check + +cdk-nag: + @echo "Running CDK Nag security checks..." + @echo "This will fail if security violations are found." + ENABLE_CDK_NAG=true poetry run cdk synth --quiet > /dev/null + @echo "βœ… CDK Nag checks passed - no security violations found" + +cdk-nag-report: + @echo "Generating CDK Nag security report..." + ENABLE_CDK_NAG=true CDK_NAG_REPORT=true poetry run cdk synth --quiet > /dev/null + @echo "βœ… CDK Nag report generated" + @echo "πŸ“Š Reports available at:" + @echo " - cdk.out/AwsSolutions-cns427-task-api-core.csv" + @echo " - cdk.out/AwsSolutions-cns427-task-api-api.csv" + @echo " - cdk.out/AwsSolutions-cns427-task-api-monitoring.csv" + @echo " - cdk.out/*.json (JSON format)" + +check: lint type-check cdk-nag + @echo "βœ… All quality checks passed!" + +# Deployment +deploy: + @echo "Deploying main application to AWS..." + poetry run deploy + +destroy: + @echo "Destroying main application..." + poetry run destroy + +# Cleanup +clean: + @echo "Cleaning build artifacts..." + rm -rf .pytest_cache/ + rm -rf htmlcov/ + rm -rf .coverage + rm -rf coverage.xml + rm -rf cdk.out/ + rm -rf infrastructure/test_harness/cdk.out/ + rm -rf cdk-outputs.json + rm -rf .mypy_cache/ + rm -rf .ruff_cache/ + rm -rf .venv/ + find . -type d -name "__pycache__" -exec rm -rf {} + + find . -type f -name "*.pyc" -delete \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/README.md b/python-test-samples/cns427-testable-serverless-architecture/README.md new file mode 100644 index 00000000..3950b4f8 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/README.md @@ -0,0 +1,499 @@ +# CNS427 Task Management API + +> **Companion code for AWS re:Invent 2025 talk CNS427:** +> *"Supercharge Serverless Testing: Accelerate Development with Kiro"* + +A serverless Task Management API demonstrating best practices for building testable, maintainable serverless applications using hexagonal architecture and comprehensive testing strategies. + +## 🎯 What this project demonstrates + +This codebase showcases how to build serverless applications that are: +- **Easy to test** at multiple levels (unit, integration, end-to-end) +- **Maintainable** through clean architecture and separation of concerns +- **Production-ready** with proper error handling and observability +- **AI-friendly** designed using AI-Driven Development Lifecycle (AI-DLC) methodology + +## πŸ“ Architecture: Hexagonal design for testability + +### The problem with traditional serverless code + +``` +❌ BEFORE: Monolithic Lambda (Hard to Test) +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ lambda_handler() β”‚ +β”‚ β”œβ”€ HTTP parsing β”‚ +β”‚ β”œβ”€ Business logic β”‚ +β”‚ β”œβ”€ DynamoDB calls β”‚ +β”‚ β”œβ”€ EventBridge calls β”‚ +β”‚ └─ HTTP response β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + +Problems: +β€’ Everything mixed together +β€’ Can't test without AWS +β€’ Complex test setup +β€’ Slow test execution +``` + +### Solution: Monorepo service pattern + Hexagonal architecture + +``` +βœ… AFTER: Service-Based Architecture (Easy to Test, Optimized Packaging) + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TASK SERVICE (Complete Hexagon) β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Handler (handler.py) β”‚ β”‚ +β”‚ β”‚ β€’ HTTP parsing only β”‚ β”‚ +β”‚ β”‚ β€’ Delegates to domain β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Domain (domain/) β”‚ β”‚ +β”‚ β”‚ β€’ Pure business logic β”‚ β”‚ +β”‚ β”‚ β€’ No AWS dependencies β”‚ β”‚ +β”‚ β”‚ β€’ Uses protocol interfaces β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Models (models/) β”‚ β”‚ +β”‚ β”‚ β€’ Task domain models β”‚ β”‚ +β”‚ β”‚ β€’ API contracts β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses shared adapters + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ SHARED INFRASTRUCTURE β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Integration (shared/integration/) β”‚ β”‚ +β”‚ β”‚ β€’ DynamoDB adapter β”‚ β”‚ +β”‚ β”‚ β€’ EventBridge adapter β”‚ β”‚ +β”‚ β”‚ β€’ Protocol interfaces β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses shared adapters + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ NOTIFICATION SERVICE (Complete Hexagon) β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Handler (handler.py) β”‚ β”‚ +β”‚ β”‚ β€’ Event parsing β”‚ β”‚ +β”‚ β”‚ β€’ Delegates to domain β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Domain (domain/) β”‚ β”‚ +β”‚ β”‚ β€’ Event processing logic β”‚ β”‚ +β”‚ β”‚ β€’ No AWS dependencies β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + +Benefits: +βœ“ Each service is independent +βœ“ Optimized Lambda packages (30-40% smaller) +βœ“ Test without AWS (unit tests) +βœ“ Clear service boundaries +βœ“ Fast execution (milliseconds) +``` + +## πŸ§ͺ Testing strategy: The honeycomb model + +Unlike traditional applications, serverless apps benefit from an **inverted testing pyramid** - the "honeycomb" model: + +``` + E2E Tests (10%) + / Critical flows \ + / \ + / Integration Tests (60%) \ + | Service Boundaries. | + | Real AWS Services | + | Error Handling, Scale | + \ / + \ / + \ Unit Tests (30%) / + Pure Business Logic + Fast, Isolated +``` + +### Why honeycomb for serverless? + +**Traditional Pyramid** (unit-heavy) doesn't work well for serverless because: +- Most bugs occur at service boundaries (DynamoDB, EventBridge, API Gateway) +- AWS SDK behavior is complex and hard to mock accurately +- Integration issues are the primary source of production failures + +**Honeycomb Model** (integration-heavy) is better because: +- βœ… Tests real AWS service behavior +- βœ… Catches integration bugs early +- βœ… Validates error handling and retries +- βœ… Tests at the right level of abstraction + +### Test distribution + +``` +Integration Tests: +β”œβ”€ DynamoDB: Real AWS + error fakes +β”œβ”€ EventBridge: Real AWS with test harness +└─ API Gateway: Real Lambda handler + +Unit Tests: +β”œβ”€ Domain logic: Pure business rules +β”œβ”€ Handler: HTTP contracts +└─ Models: Validation logic + +E2E Tests: +└─ Critical user workflows +``` + +## πŸ—οΈ How This Codebase Implements Best Practices + +### 1. **Dependency injection for testability** + +```python +# services/task_service/domain/task_service.py +from shared.integration.interfaces import ( + TaskRepositoryProtocol, + EventPublisherProtocol +) + +class TaskService: + def __init__( + self, + repository: TaskRepositoryProtocol, + event_publisher: EventPublisherProtocol + ): + self.repository = repository + self.event_publisher = event_publisher +``` + +**Benefits:** +- Inject real AWS adapters in production +- Inject in-memory fakes for unit tests +- Inject error-simulating fakes for error testing + +### 2. **Protocol-based interfaces** + +```python +# shared/integration/interfaces.py +from typing import Protocol +from services.task_service.models.task import Task + +class TaskRepositoryProtocol(Protocol): + def create_task(self, task: Task) -> Task: ... + def get_task(self, task_id: str) -> Optional[Task]: ... + def update_task(self, task: Task) -> Task: ... + def delete_task(self, task_id: str) -> None: ... +``` + +**Benefits:** +- Swap implementations without changing domain code +- Multiple implementations (real, fake, mock) +- Type-safe with IDE support + +### 3. **In-Memory fakes over mocks** + +```python +# tests/shared/fakes/in_memory_repository.py +from services.task_service.models.task import Task + +class InMemoryTaskRepository: + def __init__(self): + self._tasks: Dict[str, Task] = {} + + def create_task(self, task: Task) -> Task: + self._tasks[task.task_id] = task + return task +``` + +**Benefits:** +- Realistic behavior without AWS +- No complex mock setup +- Easy to debug +- Fast execution + +### 4. **Real AWS integration tests** + +```python +# tests/integration/test_dynamodb_integration.py +from shared.integration.dynamodb_adapter import DynamoDBTaskRepository +from services.task_service.models.task import Task + +def test_create_task_persists_to_dynamodb(): + # Uses real DynamoDB table + repository = DynamoDBTaskRepository(table_name="test-table") + + # Real AWS SDK calls + task = repository.create_task(Task(title="Test")) + + # Verify in DynamoDB + response = dynamodb.get_item(Key={"task_id": task.task_id}) + assert response["Item"]["title"] == "Test" +``` + +**Benefits:** +- Tests real AWS behavior +- Catches SDK quirks +- Validates IAM permissions +- Tests error handling + +### 5. **EventBridge test harness** + +```python +# Test harness captures events for verification +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Lambda │───▢│ EventBridge │───▢│ Test Lambda β”‚ +β”‚ (publishes) β”‚ β”‚ (TEST-* events) β”‚ β”‚ (captures) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ DynamoDB β”‚ + β”‚ (test data) β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +**Benefits:** +- Tests real EventBridge publishing +- Verifies event content and timing +- No production side effects +- Isolated test environment + +## πŸ€– AI-Driven development lifecycle (AI-DLC) + +This codebase was developed using the **AI-DLC methodology** - a structured approach to using AI for software development that follows the software development lifecycle phases. + +### AI-DLC phases + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ INCEPTION PHASE β”‚ +β”‚ Requirements & Architecture β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI validates architecture decisions β”‚ +β”‚ βœ… Layer separation β”‚ +β”‚ βœ… Pure business logic β”‚ +β”‚ β†’ Ready for Construction β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ CONSTRUCTION PHASE β”‚ +β”‚ Implementation & Testing β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI validates testing strategy β”‚ +β”‚ βœ… 62% unit, 35% integration, 3% E2E (honeycomb) β”‚ +β”‚ βœ… Layer-appropriate testing β”‚ +β”‚ β†’ Ready for Operation β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ OPERATION PHASE β”‚ +β”‚ Deploy, Monitor, Optimize β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI analyzes production bugs β”‚ +β”‚ πŸ”₯ 75% bugs in integration layer β”‚ +β”‚ 🎯 Prioritized roadmap β”‚ +β”‚ πŸ“ˆ Measurable targets β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +### Learn more about AI-DLC + +- πŸ“„ **[AI-DLC whitepaper](https://prod.d13rzhkk8cj2z0.amplifyapp.com/)** - Comprehensive guide to the methodology +- πŸ“Š **[Visual flow guide](docs/visual-flow-guide.md)** - Diagrams showing AI-DLC phases +- πŸ“ **[AI-DLC implementation guide](docs/ai-dlc.md)** - How we applied AI-DLC to this project + +## πŸ“š Documentation + +### Getting started +- **[Deployment guide](docs/deployment.md)** - Setup, deployment, and operations +- **[Configuration guide](docs/configuration.md)** - Infrastructure configuration and overrides + +### Architecture & design +- **[Architecture guide](docs/architecture.md)** - Hexagonal architecture deep dive +- **[Testing guide](docs/testing-guide.md)** - Comprehensive testing strategies + +### Methodology +- **[AI-DLC implementation guide](docs/ai-dlc.md)** - How we applied AI-DLC to this project + +## πŸš€ Quick Start + +### Prerequisites +- Python 3.13+ +- Poetry +- Node.js 18+ (for AWS CDK) +- AWS CLI v2 +- Docker (or alternatives like [Finch](https://github.com/runfinch)) + +### AWS configuration + +**Configure AWS credentials and region before running any commands:** + +```bash +# Configure AWS credentials and default region +aws configure + +# Or set environment variables +export AWS_ACCESS_KEY_ID=your_access_key +export AWS_SECRET_ACCESS_KEY=your_secret_key +export AWS_DEFAULT_REGION=us-west-2 +``` + +All commands (deployment, testing, infrastructure management) will use these credentials and region. For detailed configuration options, see the **[Deployment Guide](docs/deployment.md)**. + +### Installation + +```bash +# Install dependencies +poetry install + +# Verify setup +poetry run validate-setup +``` + +### Run unit tests (no AWS required) + +```bash +# Fast, isolated tests +make test-unit + +# Runs in milliseconds +# Tests pure business logic +# Uses in-memory fakes +``` + +### Deploy and run integration tests + +```bash +# 1. Deploy main application +make deploy + +# 2. Deploy test infrastructure +make deploy-test-infra + +# 3. Verify test infrastructure +make check-test-infra + +# 4. Run integration tests (requires AWS) +make test-integration + +# 5. Run end to end tests (requires AWS) +make test-e2e +``` + +## πŸŽ“ Key takeaways + +### For Serverless developers + +1. **Architecture**: Hexagonal architecture makes serverless apps testable +2. **Honeycomb > Pyramid**: Focus on integration tests for serverless +3. **Real AWS tests**: Test against real services, not just mocks +4. **Fast unit tests**: Use in-memory fakes for business logic +5. **Test harness**: Build infrastructure to support testing async integration + +### For AI-Assisted development + +1. **Structured approach**: AI-DLC provides a framework for AI collaboration +2. **Validation at each phase**: AI validates architecture, testing, and operations +3. **Measurable outcomes**: Track metrics at each phase +4. **Continuous improvement**: Feedback loops inform next iteration + +## πŸ“Š Project structure + +This project uses a **monorepo service pattern** where each service is a complete hexagon with its own domain, models, and handler. Only infrastructure adapters are shared. + +``` +cns427-task-api/ +β”œβ”€β”€ services/ # Microservices (hexagonal architecture) +β”‚ β”œβ”€β”€ task_service/ # Task management service +β”‚ β”‚ β”œβ”€β”€ handler.py # Lambda entry point +β”‚ β”‚ β”œβ”€β”€ domain/ # Business logic +β”‚ β”‚ β”‚ β”œβ”€β”€ task_service.py +β”‚ β”‚ β”‚ β”œβ”€β”€ business_rules.py +β”‚ β”‚ β”‚ └── exceptions.py +β”‚ β”‚ β”œβ”€β”€ models/ # Domain models +β”‚ β”‚ β”‚ β”œβ”€β”€ task.py +β”‚ β”‚ β”‚ └── api.py +β”‚ β”‚ └── requirements.txt # Service dependencies +β”‚ └── notification_service/ # Event processing service +β”‚ β”œβ”€β”€ handler.py # Lambda entry point +β”‚ β”œβ”€β”€ domain/ # Business logic +β”‚ β”‚ └── notification_service.py +β”‚ └── requirements.txt # Service dependencies +β”œβ”€β”€ shared/ # Shared infrastructure code +β”‚ └── integration/ # AWS adapters (reusable) +β”‚ β”œβ”€β”€ dynamodb_adapter.py +β”‚ β”œβ”€β”€ eventbridge_adapter.py +β”‚ └── interfaces.py +β”œβ”€β”€ tests/ # Test suites +β”‚ β”œβ”€β”€ unit/ # Unit tests (fast, isolated) +β”‚ β”œβ”€β”€ integration/ # Integration tests (real AWS) +β”‚ β”œβ”€β”€ property_based/ # Property-based tests (Hypothesis) +β”‚ β”œβ”€β”€ e2e/ # End-to-end tests +β”‚ └── shared/ # Test utilities and fakes +β”œβ”€β”€ infrastructure/ # CDK infrastructure code +β”‚ β”œβ”€β”€ core/ # Main application stacks +β”‚ β”œβ”€β”€ test_harness/ # Test infrastructure +β”‚ └── config.py # Centralized configuration +└── docs/ # Documentation + β”œβ”€β”€ architecture.md # Architecture deep dive + β”œβ”€β”€ testing-guide.md # Testing strategies + β”œβ”€β”€ deployment.md # Deployment guide + β”œβ”€β”€ configuration.md # Configuration guide + β”œβ”€β”€ ai-dlc.md # AI-DLC methodology + β”œβ”€β”€ visual-flow-guide.md # Visual flow diagrams + └── cdk-nag-guide.md # CDK validation guide +``` + +### Service-Oriented organization + +**Why services?** +- Each service is a complete, independent hexagon +- Clear bounded contexts and responsibilities +- Optimized Lambda packaging (30-40% smaller) +- Independent deployment and scaling + +**Task service** owns: +- Task CRUD operations +- Business rules and validation +- Task domain models +- API contracts + +**Notification service** owns: +- Event processing +- Notification routing +- Event-specific logic + +**Shared infrastructure** provides: +- Reusable AWS adapters +- Protocol interfaces +- Common utilities + +For more details, see the **[Architecture Guide](docs/architecture.md)**. + +## 🀝 Contributing + +This is a demonstration project for the re:Invent talk. Feel free to use it as a reference for your own serverless applications! + +## πŸ“„ License + +MIT-0 - See LICENSE file for details. + +## πŸ”— Resources + +- **AWS re:Invent 2025 Session CNS427**: [Session Details](#) +- **AI-DLC Whitepaper**: [aws.amazon.com/ai-dlc](#) +- **AWS Lambda Best Practices**: [docs.aws.amazon.com/lambda](https://docs.aws.amazon.com/lambda) +- **Hexagonal Architecture**: [alistair.cockburn.us/hexagonal-architecture](https://alistair.cockburn.us/hexagonal-architecture/) + +--- + +**Built with ❀️ using AI-Driven Development Lifecycle (AI-DLC)** diff --git a/python-test-samples/cns427-testable-serverless-architecture/app.py b/python-test-samples/cns427-testable-serverless-architecture/app.py new file mode 100644 index 00000000..a6dffb90 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/app.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +"""CDK app entry point for CNS427 Task Management API.""" + +import os + +import aws_cdk as cdk +from cdk_nag import AwsSolutionsChecks, NagReportFormat + +from infrastructure.core.task_api_stack import TaskApiCoreStack, TaskApiMonitoringStack, TaskApiStack + +app = cdk.App() + +# Environment configuration +env = cdk.Environment(account=app.node.try_get_context('account'), region=app.node.try_get_context('region') or 'us-east-1') + +# Stack naming +stack_prefix = app.node.try_get_context('stack_prefix') or 'cns427-task-api' + +# Core infrastructure stack +core_stack = TaskApiCoreStack(app, f'{stack_prefix}-core', env=env, description='Core infrastructure for CNS427 Task Management API') + +# API stack +api_stack = TaskApiStack(app, f'{stack_prefix}-api', core_stack=core_stack, env=env, description='API infrastructure for CNS427 Task Management API') + +# Monitoring stack +monitoring_stack = TaskApiMonitoringStack( + app, f'{stack_prefix}-monitoring', api_stack=api_stack, env=env, description='Monitoring and observability for CNS427 Task Management API' +) + +# Add dependencies +api_stack.add_dependency(core_stack) +monitoring_stack.add_dependency(api_stack) + +# CDK Nag Security Checks (Conditional) +# Enable with: ENABLE_CDK_NAG=true cdk synth +# Or via context: cdk synth -c enable-cdk-nag=true +enable_cdk_nag = os.getenv('ENABLE_CDK_NAG', '').lower() == 'true' or app.node.try_get_context('enable-cdk-nag') +enable_reports = os.getenv('CDK_NAG_REPORT', '').lower() == 'true' or app.node.try_get_context('cdk-nag-report') + +if enable_cdk_nag: + print('πŸ”’ CDK Nag: Enabled - Running AwsSolutions security checks...') + + # Configure report formats if requested + report_formats = [] + if enable_reports: + report_formats = [NagReportFormat.CSV, NagReportFormat.JSON] + print('πŸ“Š CDK Nag: Report generation enabled (CSV + JSON)') + + # Apply AwsSolutions checks to all stacks + cdk.Aspects.of(app).add(AwsSolutionsChecks(verbose=True, reports=enable_reports, report_formats=report_formats if report_formats else None)) +else: + print('ℹ️ CDK Nag: Disabled (set ENABLE_CDK_NAG=true to enable)') + +app.synth() diff --git a/python-test-samples/cns427-testable-serverless-architecture/cdk.json b/python-test-samples/cns427-testable-serverless-architecture/cdk.json new file mode 100644 index 00000000..4345bb45 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/cdk.json @@ -0,0 +1,61 @@ +{ + "app": "python3 app.py", + "watch": { + "include": [ + "**" + ], + "exclude": [ + "README.md", + "cdk*.json", + "requirements*.txt", + "source.bat", + "**/__pycache__", + "**/.venv" + ] + }, + "context": { + "project_name": "cns427-task-api", + "environment": "dev", + "region": "us-west-2", + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true, + "@aws-cdk/core:target-partitions": [ + "aws", + "aws-cn" + ], + "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, + "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, + "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, + "@aws-cdk/aws-iam:minimizePolicies": true, + "@aws-cdk/core:validateSnapshotRemovalPolicy": true, + "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, + "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, + "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, + "@aws-cdk/aws-apigateway:disableCloudWatchRole": false, + "@aws-cdk/core:enablePartitionLiterals": true, + "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, + "@aws-cdk/aws-iam:standardizedServicePrincipals": true, + "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, + "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, + "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, + "@aws-cdk/aws-route53-patters:useCertificate": true, + "@aws-cdk/customresources:installLatestAwsSdkDefault": false, + "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, + "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, + "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, + "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, + "@aws-cdk/aws-redshift:columnId": true, + "@aws-cdk/aws-stepfunctions-tasks:enableLoggingConfiguration": true, + "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, + "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, + "@aws-cdk/aws-kms:aliasNameRef": true, + "@aws-cdk/aws-autoscaling:generateLaunchTemplateInsteadOfLaunchConfig": true, + "@aws-cdk/core:includePrefixInUniqueNameGeneration": true, + "@aws-cdk/aws-efs:denyAnonymousAccess": true, + "@aws-cdk/aws-opensearchservice:enableLogging": true, + "@aws-cdk/aws-nordicapis-apigateway:authorizerChangeDeploymentLogicalId": true, + "@aws-cdk/aws-lambda:automaticAsyncInvocation": true, + "@aws-cdk/aws-ecs:removeDefaultDeploymentAlarm": false + } +} \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/ai-dlc.md b/python-test-samples/cns427-testable-serverless-architecture/docs/ai-dlc.md new file mode 100644 index 00000000..b392808e --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/ai-dlc.md @@ -0,0 +1,570 @@ +# AI-DLC Implementation Guide + +This document explains how the AI-Driven Development Lifecycle (AI-DLC) methodology was applied to develop this serverless application, including lessons learned and best practices. + +## Table of Contents + +- [What is AI-DLC](#what-is-ai-dlc) +- [Understanding AI Validation Scores](#understanding-ai-validation-scores) +- [Why AI-DLC for This Project](#why-ai-dlc-for-this-project) +- [The Three Phases](#the-three-phases) +- [How We Applied AI-DLC](#how-we-applied-ai-dlc) +- [Key Outcomes](#key-outcomes) +- [Lessons Learned](#lessons-learned) +- [Best Practices](#best-practices) + +## What is AI-DLC + +**AI-Driven Development Lifecycle (AI-DLC)** is a structured methodology for using AI assistants throughout the software development lifecycle. It provides a framework for: + +- **Structured AI collaboration** at each phase of development +- **Validation checkpoints** to ensure quality +- **Measurable outcomes** at each stage +- **Continuous improvement** through feedback loops + +### Core Principles + +1. **Phase-Based Approach**: Follow traditional SDLC phases (Inception, Construction, Operation) +2. **AI as Validator**: Use AI to validate decisions, not just generate code +3. **Measurable Metrics**: Track concrete metrics at each phase +4. **Feedback Loops**: Use insights from later phases to inform earlier ones + +### How AI Scoring Works + +At each phase, the AI agent evaluates the work against **best practices provided by the development team**. The scoring is not arbitrary - it's based on specific criteria: + +**Inception Phase Scoring (Architecture)**: +- Layer separation (handler, domain, integration) +- Pure business logic (no AWS dependencies in domain) +- Testability (can inject fakes/mocks) +- Protocol-based interfaces +- Dependency injection support + +**Construction Phase Scoring (Testing)**: +- Test distribution (honeycomb model: 60% integration, 30% unit, 10% e2e) +- Layer-appropriate testing +- Real AWS integration tests +- Error simulation coverage +- Test execution speed + +**Operation Phase Scoring (Bug Analysis)**: +- Bug distribution by component +- Bug severity analysis +- Testing gap identification +- Risk-based prioritization +- Measurable improvement targets + +The AI agent receives these best practices as context and evaluates the codebase against them, providing a score (e.g., 7-8/10) with specific feedback on what's working and what needs improvement. + +## Understanding AI Validation Scores + +Throughout this document, you'll see scores like "7-8/10" or "8/10". These are **not arbitrary ratings** - they're based on specific best practices we provided to the AI agent. + +### How Scoring Works + +1. **We define the criteria**: The development team provides best practices for each phase +2. **AI evaluates against criteria**: The agent checks the codebase against these standards +3. **Score reflects compliance**: Higher scores mean better adherence to best practices +4. **Feedback is specific**: AI explains what's working and what needs improvement + +### Scoring Criteria by Phase + +**Inception (Architecture) - Out of 10 points:** +- Layer separation (2 points): Handler, domain, integration clearly separated +- Pure business logic (2 points): Domain has no AWS dependencies +- Testability (2 points): Can inject fakes/mocks for testing +- Protocol interfaces (2 points): Uses protocols for contracts +- Dependency injection (2 points): Supports DI for testing + +**Construction (Testing) - Out of 10 points:** +- Test distribution (3 points): Follows honeycomb model (60/30/10) +- Layer-appropriate tests (2 points): Right tests for each layer +- Real AWS integration (2 points): Tests against real services +- Error coverage (2 points): Simulates AWS errors +- Test speed (1 point): Unit tests run in milliseconds + +**Operation (Bug Analysis) - Out of 10 points:** +- Pattern identification (3 points): Finds bug clusters +- Risk assessment (2 points): Prioritizes by severity and impact +- Architecture validation (2 points): Bugs validate design decisions +- Testing gap analysis (2 points): Identifies missing tests +- Actionable roadmap (1 point): Clear next steps + +### Example: Inception Phase Score + +**Score: 7-8/10** + +Breakdown: +- βœ… Layer separation: 2/2 (clean separation achieved) +- βœ… Pure business logic: 2/2 (no AWS in domain) +- βœ… Testability: 2/2 (can inject fakes) +- βœ… Protocol interfaces: 1/2 (mostly using protocols) +- ⚠️ Dependency injection: 0-1/2 (domain creates own dependencies) + +**Interpretation**: Ready for Construction phase. Minor coupling issue doesn't block progress. + +## Why AI-DLC for This Project + +This project demonstrates serverless testing best practices, making it ideal for AI-DLC because: + +1. **Complex Architecture**: Hexagonal architecture requires careful design +2. **Testing Strategy**: Honeycomb model needs validation +3. **Multiple Concerns**: Architecture, testing, and operations must align +4. **Demonstrable Results**: Can show concrete improvements + +### Traditional Approach Problems + +Without AI-DLC, developers often: +- ❌ Mix architecture concerns +- ❌ Over-rely on unit tests +- ❌ Miss integration issues +- ❌ Lack systematic validation + +### AI-DLC Solution + +With AI-DLC, we: +- βœ… Validate architecture decisions +- βœ… Confirm testing strategy +- βœ… Analyze production patterns +- βœ… Measure improvements + +## The Three Phases + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ INCEPTION PHASE β”‚ +β”‚ Requirements & Architecture β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI validates architecture decisions β”‚ +β”‚ βœ… Layer separation β”‚ +β”‚ βœ… Pure business logic β”‚ +β”‚ Score: 7-8/10 β†’ Ready for Construction β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ CONSTRUCTION PHASE β”‚ +β”‚ Implementation & Testing β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI validates testing strategy β”‚ +β”‚ βœ… 35% integration (honeycomb validated) β”‚ +β”‚ βœ… Layer-appropriate testing β”‚ +β”‚ Score: 8/10 β†’ Ready for Operation β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ OPERATION PHASE β”‚ +β”‚ Deploy, Monitor, Optimize β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI analyzes production bugs β”‚ +β”‚ πŸ”₯ 75% bugs in integration layer β”‚ +β”‚ 🎯 Prioritized roadmap β”‚ +β”‚ πŸ“ˆ Measurable targets β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +## How We Applied AI-DLC + +### Phase 1: Inception (Architecture Validation) + +**Goal**: Transform monolithic Lambda into testable hexagonal architecture + +**AI Role**: Validate architecture decisions and identify issues + +#### Before: Monolithic Lambda + +```python +# ❌ Everything mixed together +def lambda_handler(event, context): + # HTTP parsing + body = json.loads(event['body']) + + # Business logic + if not body.get('title'): + return {'statusCode': 400} + + # DynamoDB operations + dynamodb = boto3.resource('dynamodb') + table.put_item(Item={...}) + + # EventBridge publishing + events = boto3.client('events') + events.put_events(...) + + # HTTP response + return {'statusCode': 201} +``` + +**Problems Identified by AI**: +- Mixed concerns (HTTP, business logic, AWS) +- Hard to test (requires extensive mocking) +- Tight coupling to AWS services +- Business logic not reusable + +#### After: Hexagonal Architecture + +```python +# βœ… Clean separation +# Handler Layer +def lambda_handler(event, context): + body = json.loads(event['body']) + task = task_service.create_task(title=body['title']) + return {'statusCode': 201, 'body': json.dumps(task)} + +# Domain Layer +class TaskService: + def create_task(self, title: str) -> Task: + task = Task(title=title) + self.repository.create_task(task) + self.event_publisher.publish_task_created(task) + return task + +# Integration Layer +class DynamoDBTaskRepository: + def create_task(self, task: Task) -> Task: + self.table.put_item(Item={...}) + return task +``` + +**AI Validation Results**: +- βœ… Layer separation achieved +- βœ… Business logic is pure (no AWS dependencies) +- βœ… Tests 10x faster (milliseconds vs seconds) +- ⚠️ Domain still creates repositories internally (minor coupling) +- **Score: 7-8/10** β†’ Ready for Construction + +#### Key Decisions + +1. **Hexagonal Architecture**: Separate handler, domain, and integration layers +2. **Protocol Interfaces**: Use Python protocols for dependency contracts +3. **Self-Initializing Services**: Services create their own dependencies but allow injection +4. **In-Memory Fakes**: Use fakes instead of mocks for testing + +### Phase 2: Construction (Testing Strategy Validation) + +**Goal**: Implement comprehensive testing following honeycomb model + +**AI Role**: Validate testing distribution and identify gaps + +#### Testing Strategy + +``` +βœ… HONEYCOMB MODEL (Better for serverless) + + E2E Tests (10%) + / Critical flows \ + / \ + / Integration Tests (60%) \ + | Service Boundaries. | + | Real AWS Services | + | Error Handling, Scale | + \ / + \ / + \ Unit Tests (30%) / + Pure Business Logic + Fast, Isolated +``` + +#### Actual Test Distribution + +``` +Integration Tests: 35% (37 tests) +β”œβ”€ Handler Tests: 17 tests (API Gateway boundary) +β”œβ”€ Event Contracts: 4 tests (EventBridge boundary) +└─ AWS Services: 16 tests (DynamoDB + EventBridge) + +Unit Tests: 62% (66 tests) +β”œβ”€ Domain Logic: Pure business rules +└─ Data Models: Validation + +E2E Tests: 3% (3 tests) +``` + +**AI Validation Results**: +- βœ… Honeycomb distribution validated (35% integration, 62% unit, 3% E2E) +- βœ… Layer-appropriate testing +- βœ… Real AWS + fakes strategy working +- βœ… Test harness for EventBridge validation +- ⚠️ Missing: concurrency tests, pagination tests +- **Score: 8/10** β†’ Ready for Operation + +#### Key Decisions + +1. **Honeycomb Over Pyramid**: Focus on integration tests (35% vs traditional 20%) +2. **Real AWS Testing**: Test against real DynamoDB and EventBridge +3. **Test Harness**: Build infrastructure to capture and verify events +4. **Error Simulation**: Use fakes to simulate AWS errors predictably +5. **In-Memory Fakes**: Fast unit tests with realistic behavior + +### Phase 3: Operation (Bug Analysis & Prioritization) + +**Goal**: Analyze production bugs and prioritize improvements + +**AI Role**: Identify patterns and create risk-based roadmap + +#### How We Used AI for Bug Analysis + +We provided the AI agent with a structured dataset of 20 production bugs containing: +- Bug ID, title, and description +- Severity level (critical, high, medium, low) +- Component and file location +- Bug type (validation_error, logic_error, data_integrity, etc.) +- Root cause analysis +- Testing gaps identified +- Whether it represents a layer violation + +The AI agent analyzed this data to: +1. **Identify patterns** - Which components have the most bugs? +2. **Assess risk** - Which bugs pose the highest risk? +3. **Validate architecture** - Do bug patterns support our design decisions? +4. **Prioritize fixes** - What should we fix first? +5. **Identify testing gaps** - What tests would have caught these bugs? + +#### Bug Distribution Analysis + +``` +Production Bugs: 20 total + +By Severity: +πŸ”΄ Critical: 1 bug (5%) +🟠 High: 7 bugs (35%) +🟑 Medium: 11 bugs (55%) +🟒 Low: 1 bug (5%) + +By Component: +πŸ”₯ integrations.py: 8 bugs (40%) ← CRITICAL RISK +πŸ”₯ domain_logic.py: 7 bugs (35%) ← HIGH RISK +⚠️ task_handler.py: 3 bugs (15%) ← MEDIUM RISK +βœ… models.py: 2 bugs (10%) ← LOW RISK + +By Bug Type: +β€’ Error handling: 2 bugs (silent failures, timeouts) +β€’ Data integrity: 3 bugs (orphaned refs, race conditions) +β€’ Validation: 5 bugs (input, business rules) +β€’ Performance: 3 bugs (memory leaks, cold starts) +β€’ Architecture: 2 bugs (layer violations, unused code) +β€’ Other: 5 bugs (serialization, observability, etc.) +``` + +**Key AI Insights**: +- **75% of bugs in integration layer** (integrations.py + domain_logic.py service boundaries) +- This validates our 35% integration test focus on boundaries +- Integration layer is highest risk area +- Honeycomb model is correct for serverless +- Most critical bug (BUG-005): Silent EventBridge failures +- Testing gaps: 8 unit test gaps, 6 integration test gaps + +#### Example Bugs Analyzed + +**BUG-005 (Critical)**: EventBridge silent failures +```json +{ + "severity": "critical", + "component": "EventPublisher.publish_event", + "file": "integrations.py", + "root_cause": "Exception caught but not re-raised, causing silent failures", + "testing_gap": "No integration tests for EventBridge error conditions" +} +``` + +**BUG-007 (High)**: Race condition in concurrent updates +```json +{ + "severity": "high", + "component": "TaskRepository.update_task", + "file": "integrations.py", + "root_cause": "No optimistic locking or conditional updates", + "testing_gap": "No concurrency testing" +} +``` + +**BUG-003 (High)**: Missing dependency validation +```json +{ + "severity": "high", + "component": "TaskService.create_task", + "file": "domain_logic.py", + "root_cause": "_validate_dependencies only checks circular deps, not existence", + "testing_gap": "Missing integration tests for dependency existence validation" +} +``` + +> **Note**: The complete bug dataset with all 20 bugs is available in `ai-dlc/bugs-data.json`. This structured data was used by the AI agent to perform the analysis and generate the prioritized roadmap. + +#### AI-Generated Roadmap + +``` +IMMEDIATE (Week 1) - Critical & High: +1. EventBridge silent failure detection (BUG-005) +2. DynamoDB optimistic locking (BUG-007) +3. Dependency existence validation (BUG-003) + +HIGH PRIORITY (Week 2-3) - High & Medium: +4. DynamoDB pagination (BUG-011) +5. Handler Content-Type validation (BUG-010) +6. Business rule usage validation (BUG-006) + +STANDARD (Week 4+) - Medium & Low: +7-9. Performance, edge cases, minor issues +``` + +**AI Validation Results**: +- βœ… Bug patterns validate honeycomb model +- βœ… Risk-based prioritization +- βœ… Measurable targets +- βœ… Actionable roadmap +- **Outcome**: 50% bug reduction target in 4 weeks + +## Key Outcomes + +### Measurable Results + +| Metric | Before | After | Improvement | +|--------|--------|-------|-------------| +| **Architecture Score** | 3/10 | 7-8/10 | +133% | +| **Test Speed** | Seconds | Milliseconds | 10x faster | +| **Integration Test Coverage** | 20% | 35% | +75% | +| **Bug Detection** | Post-production | Pre-production | Shift-left | +| **Time to Validate** | Hours (manual) | Minutes (AI) | 10x faster | + +### Qualitative Improvements + +1. **Architecture**: Clean separation enables independent testing +2. **Testing**: Honeycomb model catches real issues +3. **Confidence**: Real AWS tests validate actual behavior +4. **Maintainability**: Changes isolated to specific layers +5. **Velocity**: Fast unit tests enable rapid iteration + +## Lessons Learned + +### What Worked Well + +1. **Structured Validation**: AI validation at each phase caught issues early +2. **Measurable Metrics**: Concrete numbers showed progress +3. **Honeycomb Validation**: Bug data proved honeycomb > pyramid for serverless +4. **Real AWS Testing**: Caught issues mocks would miss +5. **Iterative Approach**: Each phase informed the next + +### Challenges + +1. **Initial Resistance**: Team initially skeptical of honeycomb model +2. **Test Infrastructure**: Building test harness took time +3. **AWS Costs**: Real AWS testing has costs (mitigated with cleanup) +4. **Learning Curve**: New patterns required team training +5. **Tooling**: Had to build custom test utilities + +### Surprises + +1. **Bug Distribution**: 75% integration bugs validated our approach +2. **Test Speed**: In-memory fakes were faster than expected +3. **AI Accuracy**: AI validation was highly accurate +4. **Team Adoption**: Team embraced approach after seeing results +5. **Maintainability**: Code became easier to change, not harder + +## Best Practices + +### For AI-DLC Adoption + +1. **Start with Inception**: Don't skip architecture validation +2. **Use Metrics**: Track concrete numbers at each phase +3. **Validate Early**: Catch issues before construction +4. **Iterate**: Use feedback loops between phases +5. **Document**: Keep records of AI interactions and decisions + +### For Serverless Testing + +1. **Embrace Honeycomb**: Focus on integration tests +2. **Test Real AWS**: Don't rely solely on mocks +3. **Build Test Infrastructure**: Invest in test harness +4. **Use Fakes for Errors**: Simulate AWS errors predictably +5. **Fast Unit Tests**: Keep business logic tests fast + +### For Architecture + +1. **Separate Concerns**: Handler, domain, integration layers +2. **Protocol Interfaces**: Define contracts, not implementations +3. **Dependency Injection**: Enable testing with fakes +4. **Self-Initializing**: Services work in production without config +5. **Pure Business Logic**: No AWS dependencies in domain + +### For Team Adoption + +1. **Show Results**: Demonstrate improvements with metrics +2. **Start Small**: Begin with one feature or service +3. **Train Team**: Provide examples and documentation +4. **Iterate**: Refine approach based on feedback +5. **Celebrate Wins**: Highlight successes + +## Applying AI-DLC to Your Project + +### Step 1: Inception Phase + +1. **Define Current State**: Document existing architecture +2. **Identify Problems**: What makes testing hard? +3. **Propose Solution**: Design new architecture +4. **AI Validation**: Have AI review and score +5. **Iterate**: Refine until score > 7/10 + +### Step 2: Construction Phase + +1. **Implement Architecture**: Build new structure +2. **Write Tests**: Follow honeycomb model +3. **Measure Distribution**: Calculate test percentages +4. **AI Validation**: Have AI review test strategy +5. **Iterate**: Add missing test types + +### Step 3: Operation Phase + +1. **Deploy**: Release to production +2. **Collect Data**: Track bugs and issues +3. **Analyze Patterns**: Where do bugs occur? +4. **AI Analysis**: Have AI create roadmap +5. **Prioritize**: Focus on high-risk areas + +### Step 4: Continuous Improvement + +1. **Review Metrics**: Compare before/after +2. **Identify Gaps**: What's still missing? +3. **Feed Back**: Use insights for next iteration +4. **Refine Process**: Improve AI-DLC application +5. **Share Learnings**: Document and teach + +## Resources + +### Official AI-DLC Resources + +- **[AI-DLC Whitepaper](https://prod.d13rzhkk8cj2z0.amplifyapp.com/)** - Comprehensive methodology guide +- **[Visual Flow Guide](visual-flow-guide.md)** - Diagrams of AI-DLC phases + +### Related Documentation + +- **[Architecture Guide](architecture.md)** - Hexagonal architecture details +- **[Testing Guide](testing-guide.md)** - Honeycomb testing strategy +- **[Deployment Guide](deployment.md)** - Deployment procedures + +### External References + +- [Hexagonal Architecture](https://alistair.cockburn.us/hexagonal-architecture/) - Original pattern +- [Testing Honeycomb](https://engineering.atspotify.com/2018/01/testing-of-microservices/) - Spotify's approach +- [AWS Lambda Best Practices](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html) - AWS guidance + +## Conclusion + +AI-DLC provided a structured approach to transforming a monolithic serverless application into a well-architected, thoroughly tested system. The key insights: + +1. **Architecture matters**: Hexagonal architecture enables testing +2. **Honeycomb works**: 35% integration tests focus on boundaries where bugs occur +3. **AI validation helps**: Caught issues early, saved time +4. **Metrics prove value**: Concrete numbers show improvement +5. **Feedback loops work**: Each phase informed the next + +The result is a serverless application that is: +- βœ… Easy to test at multiple levels +- βœ… Maintainable through clean architecture +- βœ… Production-ready with proper error handling +- βœ… Validated by AI at each phase + +--- + +**Want to learn more?** Check out the [Visual Flow Guide](visual-flow-guide.md) for detailed diagrams of how AI-DLC was applied to this project. diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/architecture.md b/python-test-samples/cns427-testable-serverless-architecture/docs/architecture.md new file mode 100644 index 00000000..b2a47b26 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/architecture.md @@ -0,0 +1,997 @@ +# Architecture Guide + +This document provides a deep dive into the hexagonal architecture pattern used in this serverless application and explains how it enables comprehensive testing. + +## Table of Contents + +- [Overview](#overview) +- [Monorepo Service Pattern](#monorepo-service-pattern) +- [The Problem: Monolithic Lambda](#the-problem-monolithic-lambda) +- [The Solution: Hexagonal Architecture](#the-solution-hexagonal-architecture) +- [Layer Details](#layer-details) +- [Dependency Flow](#dependency-flow) +- [Benefits for Testing](#benefits-for-testing) +- [Implementation Patterns](#implementation-patterns) + +## Overview + +This application uses a **monorepo service pattern** where each service implements **hexagonal architecture** (also known as ports and adapters). This combination provides: + +- **Service Boundaries**: Each service is a complete, independent hexagon +- **Testable**: Business logic can be tested without AWS +- **Maintainable**: Changes to AWS services don't affect business logic +- **Flexible**: Easy to swap implementations (real AWS, fakes, mocks) +- **Clear**: Each layer has a single responsibility +- **Optimized Packaging**: Each Lambda only includes its service code + +## Monorepo Service Pattern + +### Why Services, Not Shared Modules? + +Each service is a **complete hexagon** with its own domain, models, and handler. Only infrastructure adapters are shared. + +``` +services/ +β”œβ”€β”€ task_service/ # Complete hexagon #1 +β”‚ β”œβ”€β”€ handler.py # Entry point +β”‚ β”œβ”€β”€ domain/ # Business logic +β”‚ β”‚ β”œβ”€β”€ task_service.py +β”‚ β”‚ β”œβ”€β”€ business_rules.py +β”‚ β”‚ └── exceptions.py +β”‚ β”œβ”€β”€ models/ # Domain models +β”‚ β”‚ β”œβ”€β”€ task.py +β”‚ β”‚ └── api.py +β”‚ └── requirements.txt # Service dependencies +β”‚ +β”œβ”€β”€ notification_service/ # Complete hexagon #2 +β”‚ β”œβ”€β”€ handler.py # Entry point +β”‚ β”œβ”€β”€ domain/ # Business logic +β”‚ β”‚ └── notification_service.py +β”‚ └── requirements.txt # Service dependencies +β”‚ +shared/ # Shared infrastructure only +└── integration/ # Reusable adapters + β”œβ”€β”€ dynamodb_adapter.py + └── eventbridge_adapter.py +``` + +### Service Boundaries Diagram + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TASK SERVICE HEXAGON β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ handler.py β”‚ β”‚ +β”‚ β”‚ (Lambda Entry Point) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ domain/ β”‚ β”‚ +β”‚ β”‚ β€’ task_service.py (orchestration) β”‚ β”‚ +β”‚ β”‚ β€’ business_rules.py (validation) β”‚ β”‚ +β”‚ β”‚ β€’ exceptions.py (domain errors) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ models/ β”‚ β”‚ +β”‚ β”‚ β€’ task.py (Task, TaskStatus, events) β”‚ β”‚ +β”‚ β”‚ β€’ api.py (API request/response models) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses shared adapters + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ SHARED INFRASTRUCTURE β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ shared/integration/ β”‚ β”‚ +β”‚ β”‚ β€’ dynamodb_adapter.py (persistence) β”‚ β”‚ +β”‚ β”‚ β€’ eventbridge_adapter.py (events) β”‚ β”‚ +β”‚ β”‚ β€’ interfaces.py (protocols) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses shared adapters + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ NOTIFICATION SERVICE HEXAGON β”‚ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ handler.py β”‚ β”‚ +β”‚ β”‚ (Lambda Entry Point) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ domain/ β”‚ β”‚ +β”‚ β”‚ β€’ notification_service.py (event processing) β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +### Why Each Service is a Complete Hexagon + +**Task Service** owns: +- Task creation, updates, deletion logic +- Task validation and business rules +- Task domain models and events +- API request/response contracts + +**Notification Service** owns: +- Event processing logic +- Notification routing +- Event-specific business rules + +**Shared Infrastructure** provides: +- DynamoDB adapter (used by both services) +- EventBridge adapter (used by both services) +- Protocol interfaces + +### Example Imports for Each Service + +**Task Service** (`services/task_service/handler.py`): +```python +# Service-specific imports +from services.task_service.domain.task_service import TaskService +from services.task_service.models.task import Task, TaskStatus +from services.task_service.models.api import CreateTaskRequest + +# Shared infrastructure imports +from shared.integration.dynamodb_adapter import DynamoDBTaskRepository +from shared.integration.eventbridge_adapter import EventBridgePublisher +``` + +**Notification Service** (`services/notification_service/handler.py`): +```python +# Service-specific imports +from services.notification_service.domain.notification_service import NotificationService + +# Shared infrastructure imports (if needed) +from shared.integration.eventbridge_adapter import EventBridgePublisher +``` + +**Shared Integration** (`shared/integration/dynamodb_adapter.py`): +```python +# Can import from any service's models +from services.task_service.models.task import Task, TaskStatus + +# Implements shared protocols +from shared.integration.interfaces import TaskRepositoryProtocol +``` + +### Shared Infrastructure vs Service-Specific Code + +**Shared Infrastructure** (`shared/`): +- Infrastructure adapters (DynamoDB, EventBridge) +- Protocol interfaces +- AWS SDK interactions +- Error handling utilities +- **Reason**: Reusable across services, no business logic + +**Service-Specific Code** (`services/*/`): +- Domain logic and business rules +- Domain models and events +- API contracts +- Service orchestration +- **Reason**: Belongs to bounded context, service-specific + +### Benefits of This Pattern + +1. **Independent Deployment** + - Each service can be deployed separately + - Changes to one service don't affect others + - Faster CI/CD pipelines + +2. **Optimized Lambda Packages** + - Task service Lambda: ~30% smaller (no notification code) + - Notification service Lambda: ~40% smaller (no API/models) + - Faster cold starts + +3. **Clear Bounded Contexts** + - Each service has clear responsibilities + - Domain models belong to their service + - Easy to understand ownership + +4. **Scalable Testing** + - Test services in isolation + - Shared adapters tested once + - Integration tests verify service boundaries + +5. **Team Scalability** + - Different teams can own different services + - Clear interfaces between services + - Reduced merge conflicts + +### Cross-Service Communication + +Services communicate through **events**, not by sharing domain models. This maintains loose coupling while enabling collaboration. + +**Pattern: Event-Based Communication** + +``` +Task Service (Publisher) Notification Service (Subscriber) + β”‚ β”‚ + β”‚ 1. Creates TaskEvent β”‚ + β”‚ (owns the model) β”‚ + β”‚ β”‚ + β”‚ 2. to_eventbridge_entry() β”‚ + β”‚ ↓ β”‚ + β”‚ EventBridge β”‚ + β”‚ ↓ β”‚ + β”‚ 3. from_eventbridge_event() + β”‚ β”‚ + β”‚ 4. Processes event β”‚ + β”‚ (doesn't need Task model) +``` + +**Key Principle: Domain Ownership** + +- **Task models stay in Task Service** - They belong to that bounded context +- **Events are the contract** - Services communicate via event structure, not domain models +- **Loose coupling** - Notification Service doesn't depend on Task internals + +**Example: TaskEvent Model** (`services/task_service/models/task.py`): + +```python +@dataclass +class TaskEvent: + """Event model with bidirectional conversion.""" + + def to_eventbridge_entry(self, event_bus_name: str = None) -> Dict[str, Any]: + """Convert to EventBridge format for publishing.""" + return { + 'Source': self.source, + 'DetailType': self.event_type, + 'Detail': json.dumps(self.task_data), + 'EventBusName': event_bus_name + } + + @classmethod + def from_eventbridge_event(cls, event: Dict[str, Any]) -> 'TaskEvent': + """Parse EventBridge event for consuming.""" + return cls( + event_type=event.get('detail-type', '').replace('TEST-', ''), + task_data=event.get('detail', {}), + source=event.get('source', 'cns427-task-api') + ) +``` + +**Usage in Notification Service**: + +```python +# Notification handler doesn't need Task model +from services.task_service.models.task import TaskEvent # Only the event! + +def lambda_handler(event, context): + # Parse event using model's utility method + task_event = TaskEvent.from_eventbridge_event(event) + + # Process event data (just a dict, not Task model) + notification_service.process_task_event( + task_event.event_type, + task_event.task_data # Dict, not Task object + ) +``` + +**Why This Works**: + +βœ… **Domain Ownership**: Task Service owns Task model and events +βœ… **Loose Coupling**: Notification Service only depends on event structure +βœ… **Utility Methods**: Event model provides parsing/serialization helpers +βœ… **No Shared Models**: Services don't share domain models, only event contracts +βœ… **Testability**: Easy to test event parsing independently + +**What NOT to Do**: + +❌ **Don't move models to shared/** - Models belong to their service's bounded context +❌ **Don't import Task in Notification Service** - Use event data (dict) instead +❌ **Don't create shared domain models** - Each service owns its domain + +## The Problem: Monolithic Lambda + +### Before: Everything Mixed Together + +```python +# ❌ BAD: Monolithic Lambda Handler +def lambda_handler(event, context): + # HTTP parsing + body = json.loads(event['body']) + task_id = event['pathParameters']['id'] + + # Business logic + if not body.get('title'): + return {'statusCode': 400, 'body': 'Title required'} + + # DynamoDB operations + dynamodb = boto3.resource('dynamodb') + table = dynamodb.Table('tasks-table') + table.put_item(Item={ + 'task_id': task_id, + 'title': body['title'], + 'status': 'pending' + }) + + # EventBridge publishing + events = boto3.client('events') + events.put_events(Entries=[{ + 'Source': 'task-api', + 'DetailType': 'TaskCreated', + 'Detail': json.dumps({'task_id': task_id}) + }]) + + # HTTP response + return { + 'statusCode': 201, + 'body': json.dumps({'task_id': task_id}) + } +``` + +### Problems with This Approach + +1. **Hard to Test** + - Requires mocking boto3 clients + - Complex mock setup for each test + - Mocks don't behave like real AWS services + - Tests are brittle and break easily + +2. **Slow Tests** + - Must mock AWS SDK for every test + - Mock setup adds overhead + - Can't test business logic in isolation + +3. **Tight Coupling** + - Business logic mixed with AWS code + - Can't change AWS services without changing business logic + - Hard to understand what the code does + +4. **Poor Maintainability** + - Changes ripple across layers + - Hard to add new features + - Difficult to debug + +## The Solution: Hexagonal Architecture + +### After: Layered Separation + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ HANDLER LAYER β”‚ +β”‚ (task_handler.py) β”‚ +β”‚ β”‚ +β”‚ Responsibilities: β”‚ +β”‚ β€’ Parse HTTP requests (API Gateway events) β”‚ +β”‚ β€’ Validate request format β”‚ +β”‚ β€’ Delegate to domain layer β”‚ +β”‚ β€’ Format HTTP responses β”‚ +β”‚ β€’ Handle HTTP errors (400, 404, 500) β”‚ +β”‚ β”‚ +β”‚ Dependencies: Domain services only β”‚ +β”‚ Testing: Mock API Gateway events, inject fake services β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Calls + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DOMAIN LAYER β”‚ +β”‚ (task_service.py, business_rules.py) β”‚ +β”‚ β”‚ +β”‚ Responsibilities: β”‚ +β”‚ β€’ Pure business logic β”‚ +β”‚ β€’ Business rules and validation β”‚ +β”‚ β€’ Orchestrate operations β”‚ +β”‚ β€’ No AWS dependencies β”‚ +β”‚ β€’ No HTTP knowledge β”‚ +β”‚ β”‚ +β”‚ Dependencies: Protocol interfaces only β”‚ +β”‚ Testing: In-memory fakes, no AWS required β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses protocols + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ INTEGRATION LAYER β”‚ +β”‚ (dynamodb_adapter.py, eventbridge_adapter.py) β”‚ +β”‚ β”‚ +β”‚ Responsibilities: β”‚ +β”‚ β€’ Implement protocol interfaces β”‚ +β”‚ β€’ AWS SDK operations β”‚ +β”‚ β€’ Error handling and retries β”‚ +β”‚ β€’ Data transformation (domain ↔ AWS) β”‚ +β”‚ β”‚ +β”‚ Dependencies: AWS SDK (boto3) β”‚ +β”‚ Testing: Real AWS services or error-simulating fakes β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +## Layer Details + +### Handler Layer + +**Location**: `services/*/handler.py` (one per service) + +**Purpose**: Entry point for each Lambda function + +**Example - Task Service**: +```python +# services/task_service/handler.py +from services.task_service.domain.task_service import TaskService + +# Initialize service (self-initializing pattern) +task_service = TaskService() + +def lambda_handler(event: Dict[str, Any], context: Any) -> Dict[str, Any]: + """Handle API Gateway requests.""" + try: + # Parse HTTP request + http_method = event['httpMethod'] + path = event['path'] + + # Route to appropriate handler + if http_method == 'POST' and path == '/tasks': + return handle_create_task(event) + elif http_method == 'GET' and '/tasks/' in path: + return handle_get_task(event) + # ... more routes + + except Exception as e: + return { + 'statusCode': 500, + 'body': json.dumps({'error': str(e)}) + } + +def handle_create_task(event: Dict[str, Any]) -> Dict[str, Any]: + """Handle task creation.""" + # Parse request body + body = json.loads(event['body']) + + # Delegate to domain + task = task_service.create_task( + title=body['title'], + description=body.get('description'), + priority=body.get('priority', 'medium') + ) + + # Format response + return { + 'statusCode': 201, + 'body': json.dumps({ + 'task_id': task.task_id, + 'title': task.title, + 'status': task.status + }) + } +``` + +**Key Characteristics**: +- βœ… Only knows about HTTP (API Gateway events) +- βœ… Delegates all business logic to domain +- βœ… No AWS SDK calls (except through domain) +- βœ… Simple request/response transformation + +### Domain Layer + +**Location**: `services/*/domain/` (service-specific) + +**Purpose**: Pure business logic with no infrastructure dependencies + +**Example - Task Service Domain**: +```python +# services/task_service/domain/task_service.py +from shared.integration.interfaces import ( + TaskRepositoryProtocol, + EventPublisherProtocol +) +from services.task_service.models.task import Task, TaskStatus + +class TaskService: + """Domain service for task operations.""" + + def __init__( + self, + repository: Optional[TaskRepositoryProtocol] = None, + event_publisher: Optional[EventPublisherProtocol] = None + ): + """Initialize with dependencies (self-initializing pattern).""" + from shared.integration.dynamodb_adapter import DynamoDBTaskRepository + from shared.integration.eventbridge_adapter import EventBridgePublisher + + self.repository = repository or DynamoDBTaskRepository() + self.event_publisher = event_publisher or EventBridgePublisher() + + def create_task( + self, + title: str, + description: Optional[str] = None, + priority: str = 'medium' + ) -> Task: + """Create a new task.""" + # Business logic: Create task + task = Task( + title=title, + description=description, + priority=priority, + status=TaskStatus.PENDING + ) + + # Business rule: Validate task + if not task.title or len(task.title) < 3: + raise ValueError("Title must be at least 3 characters") + + # Persist task + created_task = self.repository.create_task(task) + + # Publish event + self.event_publisher.publish_task_created(created_task) + + return created_task +``` + +**Key Characteristics**: +- βœ… Pure business logic +- βœ… No AWS SDK imports +- βœ… Uses protocol interfaces +- βœ… Self-initializing with dependency injection support +- βœ… Easy to test with fakes + +### Integration Layer + +**Location**: `shared/integration/` (shared across services) + +**Purpose**: Implement protocol interfaces using AWS services + +**Example - Protocol Definition**: +```python +# shared/integration/interfaces.py +from typing import Protocol, Optional, List +from services.task_service.models.task import Task + +class TaskRepositoryProtocol(Protocol): + """Protocol for task persistence.""" + + def create_task(self, task: Task) -> Task: + """Create a new task.""" + ... + + def get_task(self, task_id: str) -> Optional[Task]: + """Get task by ID.""" + ... + + def update_task(self, task: Task) -> Task: + """Update existing task.""" + ... + + def delete_task(self, task_id: str) -> None: + """Delete task by ID.""" + ... + + def list_tasks(self, limit: int = 100) -> List[Task]: + """List all tasks.""" + ... +``` + +**Example - DynamoDB Implementation**: +```python +# shared/integration/dynamodb_adapter.py +import boto3 +from typing import Optional, List +from services.task_service.models.task import Task +from shared.integration.interfaces import TaskRepositoryProtocol + +class DynamoDBTaskRepository: + """DynamoDB implementation of task repository.""" + + def __init__(self, table_name: Optional[str] = None): + """Initialize with DynamoDB table.""" + self.table_name = table_name or os.getenv('TASKS_TABLE_NAME') + self.dynamodb = boto3.resource('dynamodb') + self.table = self.dynamodb.Table(self.table_name) + + def create_task(self, task: Task) -> Task: + """Create task in DynamoDB.""" + try: + self.table.put_item( + Item={ + 'task_id': task.task_id, + 'title': task.title, + 'description': task.description, + 'priority': task.priority, + 'status': task.status, + 'version': task.version, + 'created_at': task.created_at.isoformat(), + 'updated_at': task.updated_at.isoformat() + } + ) + return task + except ClientError as e: + # Handle DynamoDB errors + raise RepositoryError(f"Failed to create task: {e}") + + # ... other methods +``` + +**Key Characteristics**: +- βœ… Implements protocol interfaces +- βœ… Contains all AWS SDK code +- βœ… Handles AWS-specific errors +- βœ… Transforms between domain models and AWS formats + +## Dependency Flow + +### Production Flow + +``` +API Gateway Event + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Lambda Handler β”‚ ← Entry point +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Creates/uses + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TaskService β”‚ ← Domain logic +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Uses protocols + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DynamoDB Adapter β”‚ β”‚ EventBridge β”‚ +β”‚ (Real AWS) β”‚ β”‚ Adapter β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ β”‚ + β–Ό β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DynamoDB Table β”‚ β”‚ EventBridge Bus β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +### Unit Test Flow + +``` +Test Code + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TaskService β”‚ ← Same domain logic +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Inject fakes + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ InMemory β”‚ β”‚ InMemory β”‚ +β”‚ Repository β”‚ β”‚ Publisher β”‚ +β”‚ (Fake) β”‚ β”‚ (Fake) β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ β”‚ + β–Ό β–Ό + Dictionary List (in memory) +``` + +### Integration Test Flow + +``` +Test Code + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Lambda Handler β”‚ ← Real handler +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TaskService β”‚ ← Real domain +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Real adapters + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DynamoDB Adapter β”‚ β”‚ EventBridge β”‚ +β”‚ (Real AWS) β”‚ β”‚ Adapter β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ β”‚ + β–Ό β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ Test DynamoDB β”‚ β”‚ Test EventBridge β”‚ +β”‚ Table β”‚ β”‚ Bus β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +## Benefits for Testing + +### 1. Fast Unit Tests + +**Without Hexagonal Architecture**: +```python +# ❌ Must mock boto3 for every test +@patch('boto3.resource') +@patch('boto3.client') +def test_create_task(mock_client, mock_resource): + # Complex mock setup + mock_table = Mock() + mock_resource.return_value.Table.return_value = mock_table + # ... more mock setup + + # Test + result = lambda_handler(event, context) + + # Verify mocks were called correctly + mock_table.put_item.assert_called_once_with(...) +``` + +**With Hexagonal Architecture**: +```python +# βœ… Simple fake injection +def test_create_task(): + # Simple setup + repository = InMemoryTaskRepository() + publisher = InMemoryEventPublisher() + service = TaskService(repository, publisher) + + # Test + task = service.create_task(title="Test Task") + + # Simple assertions + assert task.title == "Test Task" + assert repository.count() == 1 + assert publisher.count() == 1 +``` + +### 2. Realistic Integration Tests + +```python +# βœ… Test against real AWS +def test_create_task_integration(): + # Real DynamoDB adapter + repository = DynamoDBTaskRepository(table_name="test-table") + + # Real AWS SDK calls + task = repository.create_task(Task(title="Test")) + + # Verify in real DynamoDB + response = boto3.client('dynamodb').get_item( + TableName="test-table", + Key={'task_id': {'S': task.task_id}} + ) + assert response['Item']['title']['S'] == "Test" +``` + +### 3. Error Simulation + +```python +# βœ… Simulate AWS errors without AWS +class ErrorSimulatingRepository: + """Fake that simulates DynamoDB errors.""" + + def create_task(self, task: Task) -> Task: + # Simulate throttling + raise ClientError( + {'Error': {'Code': 'ProvisionedThroughputExceededException'}}, + 'PutItem' + ) + +def test_handles_throttling(): + repository = ErrorSimulatingRepository() + service = TaskService(repository) + + with pytest.raises(RepositoryError): + service.create_task(title="Test") +``` + +### 4. Layer-Specific Testing + +Each layer can be tested independently: + +- **Handler Layer**: Test HTTP parsing and response formatting +- **Domain Layer**: Test business logic in isolation +- **Integration Layer**: Test AWS SDK operations + +## Implementation Patterns + +### Self-Initializing Services + +Services initialize their own dependencies but allow injection for testing: + +```python +class TaskService: + def __init__( + self, + repository: Optional[TaskRepositoryProtocol] = None, + event_publisher: Optional[EventPublisherProtocol] = None + ): + """Initialize with optional dependency injection.""" + # Production: Use real implementations + if repository is None: + from shared.integration.dynamodb_adapter import DynamoDBTaskRepository + repository = DynamoDBTaskRepository() + + if event_publisher is None: + from shared.integration.eventbridge_adapter import EventBridgePublisher + event_publisher = EventBridgePublisher() + + self.repository = repository + self.event_publisher = event_publisher +``` + +**Benefits**: +- βœ… Works in production without configuration +- βœ… Easy to inject fakes for testing +- βœ… No dependency injection container needed + +### Protocol-Based Interfaces + +Use Python protocols instead of abstract base classes: + +```python +from typing import Protocol + +class TaskRepositoryProtocol(Protocol): + """Protocol defines the contract.""" + def create_task(self, task: Task) -> Task: ... +``` + +**Benefits**: +- βœ… Structural typing (duck typing with type safety) +- βœ… No inheritance required +- βœ… IDE autocomplete and type checking +- βœ… Easy to create multiple implementations + +### In-Memory Fakes + +Create realistic fakes with in-memory storage: + +```python +class InMemoryTaskRepository: + """Fake repository with realistic behavior.""" + + def __init__(self): + self._tasks: Dict[str, Task] = {} + + def create_task(self, task: Task) -> Task: + if task.task_id in self._tasks: + raise ValueError("Task already exists") + self._tasks[task.task_id] = task + return task + + # Helper methods for testing + def count(self) -> int: + return len(self._tasks) + + def clear(self) -> None: + self._tasks.clear() +``` + +**Benefits**: +- βœ… Realistic behavior (not just mocks) +- βœ… Fast execution (in-memory) +- βœ… Easy to debug +- βœ… Helper methods for test assertions + +## Comparison: Before vs After + +| Aspect | Before (Monolithic) | After (Hexagonal) | +|--------|---------------------|-------------------| +| **Testability** | Hard - requires extensive mocking | Easy - inject fakes | +| **Test Speed** | Slow - mock setup overhead | Fast - in-memory operations | +| **Maintainability** | Poor - changes ripple across layers | Good - isolated changes | +| **Flexibility** | Low - tightly coupled to AWS | High - swap implementations | +| **Clarity** | Low - mixed concerns | High - clear separation | +| **AWS Changes** | Breaks business logic tests | Only affects integration layer | +| **Business Logic** | Mixed with infrastructure | Pure and isolated | +| **Test Confidence** | Low - mocks don't match reality | High - real AWS tests | + +## Lambda Packaging Benefits + +### Before: Monolithic Packaging + +``` +task_handler.zip (100%) +β”œβ”€β”€ task_api/ +β”‚ β”œβ”€β”€ handlers/ +β”‚ β”‚ β”œβ”€β”€ task_handler.py βœ“ Used +β”‚ β”‚ └── notification_handler.py βœ— Not used +β”‚ β”œβ”€β”€ domain/ +β”‚ β”‚ β”œβ”€β”€ task_service.py βœ“ Used +β”‚ β”‚ └── notification_service.py βœ— Not used +β”‚ β”œβ”€β”€ models/ βœ“ Used +β”‚ └── integration/ βœ“ Used +└── dependencies/ βœ“ Used + +notification_handler.zip (100%) +β”œβ”€β”€ task_api/ +β”‚ β”œβ”€β”€ handlers/ +β”‚ β”‚ β”œβ”€β”€ task_handler.py βœ— Not used +β”‚ β”‚ └── notification_handler.py βœ“ Used +β”‚ β”œβ”€β”€ domain/ +β”‚ β”‚ β”œβ”€β”€ task_service.py βœ— Not used +β”‚ β”‚ └── notification_service.py βœ“ Used +β”‚ β”œβ”€β”€ models/ βœ— Not used (mostly) +β”‚ └── integration/ βœ“ Used +└── dependencies/ βœ“ Used +``` + +### After: Service-Based Packaging + +``` +task_service.zip (~70% of before) +β”œβ”€β”€ services/ +β”‚ └── task_service/ βœ“ All used +β”‚ β”œβ”€β”€ handler.py +β”‚ β”œβ”€β”€ domain/ +β”‚ └── models/ +β”œβ”€β”€ shared/ +β”‚ └── integration/ βœ“ All used +└── dependencies/ βœ“ All used + +notification_service.zip (~60% of before) +β”œβ”€β”€ services/ +β”‚ └── notification_service/ βœ“ All used +β”‚ β”œβ”€β”€ handler.py +β”‚ └── domain/ +β”œβ”€β”€ shared/ +β”‚ └── integration/ βœ“ All used +└── dependencies/ βœ“ All used +``` + +### Packaging Improvements + +| Metric | Before | After | Improvement | +|--------|--------|-------|-------------| +| **Task Service Package** | 100% | ~70% | 30% reduction | +| **Notification Service Package** | 100% | ~60% | 40% reduction | +| **Unused Code in Package** | 30-40% | 0% | Eliminated | +| **Cold Start Time** | Baseline | Faster | Smaller package | +| **Deployment Speed** | Baseline | Faster | Smaller upload | + +### How CDK Bundles Services + +```python +# infrastructure/core/task_api_stack.py + +# Task Service Lambda +task_handler = PythonFunction( + self, "TaskHandler", + entry="services/task_service", # Entry point + index="handler.py", # Handler file + handler="lambda_handler", # Handler function + # CDK automatically: + # 1. Starts from services/task_service/ + # 2. Follows imports to include: + # - services/task_service/domain/ + # - services/task_service/models/ + # - shared/integration/ (via imports) + # 3. Excludes services/notification_service/ (not imported) +) + +# Notification Service Lambda +notification_handler = PythonFunction( + self, "NotificationHandler", + entry="services/notification_service", # Entry point + index="handler.py", # Handler file + handler="lambda_handler", # Handler function + # CDK automatically: + # 1. Starts from services/notification_service/ + # 2. Follows imports to include: + # - services/notification_service/domain/ + # - shared/integration/ (via imports) + # 3. Excludes services/task_service/ (not imported) +) +``` + +## Next Steps + +- **[Testing Guide](testing-guide.md)** - Learn how to test each layer +- **[Deployment Guide](deployment.md)** - Deploy the application +- **[Configuration Guide](configuration.md)** - Configure infrastructure + +## References + +- [Hexagonal Architecture](https://alistair.cockburn.us/hexagonal-architecture/) by Alistair Cockburn +- [Clean Architecture](https://blog.cleancoder.com/uncle-bob/2012/08/13/the-clean-architecture.html) by Robert C. Martin +- [Ports and Adapters Pattern](https://herbertograca.com/2017/09/14/ports-adapters-architecture/) +- [Domain-Driven Design](https://martinfowler.com/bliki/BoundedContext.html) - Bounded Contexts by Martin Fowler diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/cdk-nag-guide.md b/python-test-samples/cns427-testable-serverless-architecture/docs/cdk-nag-guide.md new file mode 100644 index 00000000..8e861e81 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/cdk-nag-guide.md @@ -0,0 +1,355 @@ +# CDK Nag Security Checks Guide + +## Overview + +This project uses [CDK Nag](https://github.com/cdklabs/cdk-nag) to validate infrastructure code against AWS security best practices. CDK Nag is integrated conditionally, so it doesn't slow down normal development but can be enabled for security reviews. + +### Implementation Status + +βœ… **Complete** - CDK Nag is fully integrated and ready to use + +**What was implemented**: +- Conditional CDK Nag in `app.py` (disabled by default) +- Makefile targets: `make cdk-nag` and `make cdk-nag-report` +- Stack-level suppressions for demo-appropriate violations +- Fixed IAM and API Gateway security issues +- Comprehensive documentation + +**Current violations**: 0 (all addressed through fixes or documented suppressions) + +## Quick Start + +### Run Security Checks (Blocking) + +```bash +make cdk-nag +``` + +This will: +- Run AwsSolutions security checks +- **Fail if violations are found** +- Display violations in the console +- Exit with non-zero code (suitable for CI/CD) + +### Generate Security Report (Non-Blocking) + +```bash +make cdk-nag-report +``` + +This will: +- Run AwsSolutions security checks +- Generate CSV and JSON reports +- **Not fail the build** (violations are reported only) +- Save reports to `cdk.out/` directory + +### Run All Quality Checks + +```bash +make check +``` + +This runs: +1. Linting (ruff) +2. Type checking (mypy) +3. CDK Nag security checks + +## How It Works + +### Conditional Activation + +CDK Nag is **disabled by default** to keep development fast. It's enabled via environment variables: + +```bash +# Enable CDK Nag +ENABLE_CDK_NAG=true cdk synth + +# Enable CDK Nag + Reports +ENABLE_CDK_NAG=true CDK_NAG_REPORT=true cdk synth + +# Or via CDK context +cdk synth -c enable-cdk-nag=true +``` + +### Normal Development (CDK Nag Disabled) + +```bash +# These commands run WITHOUT CDK Nag (fast) +cdk synth +cdk deploy +make deploy +``` + +### Security Review (CDK Nag Enabled) + +```bash +# These commands run WITH CDK Nag (thorough) +make cdk-nag +make cdk-nag-report +make check +``` + +## Understanding CDK Nag Output + +### Violation Format + +``` +[Error] AwsSolutions-IAM4: The IAM user, role, or group uses AWS managed policies + - Stack: cns427-task-api-core + - Resource: LambdaExecutionRole/Resource + - Severity: Medium + - Recommendation: Use customer managed policies instead +``` + +### Severity Levels + +- **Error**: Must be fixed or suppressed +- **Warning**: Should be reviewed +- **Info**: Informational only + +## Common Violations and Fixes + +### IAM4: AWS Managed Policies + +**Violation**: Lambda uses AWS managed policy `AWSLambdaBasicExecutionRole` + +**Fix Options**: +1. Create custom policy with only needed permissions +2. Suppress if AWS managed policy is acceptable for demo + +### IAM5: Wildcard Permissions + +**Violation**: IAM policy uses wildcard (`*`) in actions or resources + +**Fix Options**: +1. Specify exact resources and actions +2. Suppress if wildcard is necessary (e.g., CloudWatch Logs) + +### L1: Lambda Reserved Concurrency + +**Violation**: Lambda function doesn't have reserved concurrency + +**Fix Options**: +1. Set `reserved_concurrent_executions` in CDK +2. Suppress if using account-level limits + +### DDB3: Point-in-Time Recovery + +**Violation**: DynamoDB table doesn't have PITR enabled + +**Fix**: Already enabled in our code βœ… + +## Suppressing Violations + +If a violation is acceptable (e.g., for demo/educational purposes), you can suppress it: + +### Stack-Level Suppression + +```python +from cdk_nag import NagSuppressions + +# In your stack +NagSuppressions.add_stack_suppressions( + self, + [ + { + 'id': 'AwsSolutions-IAM4', + 'reason': 'AWS managed policies acceptable for demo purposes' + } + ] +) +``` + +### Resource-Level Suppression + +```python +NagSuppressions.add_resource_suppressions( + lambda_function, + [ + { + 'id': 'AwsSolutions-L1', + 'reason': 'Reserved concurrency not needed for demo workload' + } + ] +) +``` + +## Report Files + +When you run `make cdk-nag-report`, reports are generated in `cdk.out/`: + +### CSV Reports +- `AwsSolutions-cns427-task-api-core.csv` +- `AwsSolutions-cns427-task-api-api.csv` +- `AwsSolutions-cns427-task-api-monitoring.csv` + +### JSON Reports +- `AwsSolutions-cns427-task-api-core.json` +- `AwsSolutions-cns427-task-api-api.json` +- `AwsSolutions-cns427-task-api-monitoring.json` + +### Report Contents + +Each report includes: +- Rule ID (e.g., AwsSolutions-IAM4) +- Resource path +- Compliance status +- Severity level +- Recommendation + +## CI/CD Integration + +### GitHub Actions Example + +```yaml +- name: Run CDK Nag Security Checks + run: make cdk-nag + env: + ENABLE_CDK_NAG: true +``` + +### GitLab CI Example + +```yaml +cdk-nag: + script: + - make cdk-nag + variables: + ENABLE_CDK_NAG: "true" +``` + +## Rule Packs + +Currently using: **AwsSolutions** (AWS Solutions best practices) + +Other available rule packs: +- `HIPAASecurityChecks` - HIPAA compliance +- `NIST80053R5Checks` - NIST 800-53 Rev 5 +- `PCIDSS321Checks` - PCI DSS 3.2.1 + +To change rule pack, edit `app.py`: + +```python +from cdk_nag import HIPAASecurityChecks + +cdk.Aspects.of(app).add(HIPAASecurityChecks(verbose=True)) +``` + +## Best Practices + +1. **Run before commits**: `make cdk-nag` before committing infrastructure changes +2. **Generate reports regularly**: Track security posture over time +3. **Document suppressions**: Always provide clear reasons for suppressions +4. **Review in PRs**: Include CDK Nag output in pull request reviews +5. **Enable in CI/CD**: Block deployments on security violations + +## Troubleshooting + +### CDK Nag Not Running + +Check that it's enabled: +```bash +# Should see: "πŸ”’ CDK Nag: Enabled" +ENABLE_CDK_NAG=true cdk synth +``` + +### Too Many Violations + +Start with reports to understand scope: +```bash +make cdk-nag-report +# Review reports in cdk.out/ +``` + +Then fix or suppress violations incrementally. + +### Performance Issues + +CDK Nag adds ~10-30 seconds to synth time. This is why it's disabled by default for development. + +## Resources + +- [CDK Nag GitHub](https://github.com/cdklabs/cdk-nag) +- [AwsSolutions Rules](https://github.com/cdklabs/cdk-nag/blob/main/RULES.md) +- [CDK Nag Workshop](https://catalog.workshops.aws/cdk-nag) +- [AWS Security Best Practices](https://docs.aws.amazon.com/wellarchitected/latest/security-pillar/welcome.html) + +## Implementation Details + +### Files Modified + +1. **`app.py`** - Added conditional CDK Nag integration + - Disabled by default (no performance impact) + - Enable with `ENABLE_CDK_NAG=true` + - Supports report generation with `CDK_NAG_REPORT=true` + +2. **`Makefile`** - Added CDK Nag targets + - `make cdk-nag` - Run security checks (blocking) + - `make cdk-nag-report` - Generate reports (non-blocking) + - `make check` - Now includes CDK Nag + +3. **`infrastructure/core/task_api_stack.py`** - Security improvements + - Replaced AWS managed policies with inline policies + - Added API Gateway request validation + - Added stack-level suppressions with justifications + +### Suppressions + +**Suppressed with Justification (8 violations)**: +1. βœ… **AwsSolutions-COG4** (Γ—5) - Using IAM auth instead of Cognito (appropriate for demo) + - IAM auth with SigV4 signing provides sufficient security + - Cognito adds unnecessary complexity and cost for demo +2. βœ… **AwsSolutions-APIG1** (Γ—1) - Access logging disabled (cost reduction for demo) + - Lambda CloudWatch Logs provide sufficient observability +3. βœ… **AwsSolutions-APIG3** (Γ—1) - WAF not configured (cost reduction for demo) + - IAM auth already restricts access + - WAF would add ~$5-10/month without substantial benefit for demo +4. βœ… **AwsSolutions-APIG6** (Γ—1) - Stage logging disabled (Lambda logs sufficient for demo) + - Lambda logs provide sufficient observability + - Stage logging would duplicate information + +All suppressions include detailed justifications and are documented in the code. + +### Integration with Existing Workflow + +**Before**: +```bash +make check # lint + type-check +``` + +**After**: +```bash +make check # lint + type-check + cdk-nag +``` + +CDK Nag is now part of your quality checks! + +### Performance Impact + +- **Normal development**: No impact (CDK Nag disabled by default) +- **Security checks**: Adds ~10-30 seconds to synth time +- **CI/CD**: One-time cost per pipeline run + +### Testing the Implementation + +```bash +# Test 1: Verify CDK Nag is disabled by default +cdk synth +# Should see: "ℹ️ CDK Nag: Disabled" + +# Test 2: Verify CDK Nag can be enabled +ENABLE_CDK_NAG=true cdk synth +# Should see: "πŸ”’ CDK Nag: Enabled" + +# Test 3: Run security checks (should pass with 0 violations) +make cdk-nag + +# Test 4: Generate reports +make cdk-nag-report +# Should create files in cdk.out/ +``` + +--- + +**Status**: βœ… Implementation Complete - 0 violations +**Next Steps**: Run `make cdk-nag` to verify security posture! diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/configuration.md b/python-test-samples/cns427-testable-serverless-architecture/docs/configuration.md new file mode 100644 index 00000000..5f52f878 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/configuration.md @@ -0,0 +1,469 @@ +# Configuration Guide + +This guide explains the centralized configuration system used for infrastructure resource naming and how to customize it for different environments. + +## Table of Contents + +- [Overview](#overview) +- [Configuration Module](#configuration-module) +- [Default Configuration](#default-configuration) +- [Using Configuration in Stacks](#using-configuration-in-stacks) +- [Overriding Configuration](#overriding-configuration) +- [Available Configuration Methods](#available-configuration-methods) +- [Best Practices](#best-practices) + +## Overview + +The project uses a centralized configuration module (`infrastructure/config.py`) for all resource naming. This provides: + +- **Consistent naming patterns** across all resources +- **Type-safe configuration** with IDE autocomplete +- **Environment-specific overrides** via CDK context +- **Easy multi-environment deployments** +- **Single source of truth** for resource names + +## Configuration Module + +### Location + +`infrastructure/config.py` + +### Key Features + +1. **Dataclass-based**: Uses Python dataclasses for type safety +2. **Computed names**: Resource names are computed from base configuration +3. **CDK context integration**: Loads values from `cdk.json` or CLI overrides +4. **Protocol-based**: Provides methods for each resource type + +### Basic Structure + +```python +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from constructs import Node + +@dataclass +class InfrastructureConfig: + """Configuration for infrastructure resource naming.""" + + project_name: str = "cns427-task-api" + environment: str = "dev" + region: str = "us-west-2" + + def tasks_table_name(self) -> str: + """Get DynamoDB table name for tasks.""" + return f"{self.project_name}-tasks" + + # ... more methods + + @classmethod + def from_cdk_context(cls, node: 'Node') -> 'InfrastructureConfig': + """Create configuration from CDK context.""" + return cls( + project_name=node.try_get_context('project_name') or cls.project_name, + environment=node.try_get_context('environment') or cls.environment, + region=node.try_get_context('region') or cls.region, + ) +``` + +## Default Configuration + +Default values are defined in `cdk.json`: + +```json +{ + "app": "python app.py", + "context": { + "project_name": "cns427-task-api", + "environment": "dev", + "region": "us-west-2" + } +} +``` + +### Configuration Parameters + +| Parameter | Default | Description | +|-----------|---------|-------------| +| `project_name` | `cns427-task-api` | Base name for all resources | +| `environment` | `dev` | Environment identifier (dev, staging, prod) | +| `region` | `us-west-2` | AWS region for deployment | + +## Using Configuration in Stacks + +### Loading Configuration + +In every CDK stack, load the configuration from context: + +```python +from infrastructure.config import InfrastructureConfig + +class MyStack(Stack): + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration from CDK context + config = InfrastructureConfig.from_cdk_context(self.node) +``` + +### Example 1: Core Stack (TaskApiCoreStack) + +```python +from infrastructure.config import InfrastructureConfig + +class TaskApiCoreStack(Stack): + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration from CDK context + config = InfrastructureConfig.from_cdk_context(self.node) + + # DynamoDB table with configured name + self.tasks_table = dynamodb.Table( + self, + 'TasksTable', + table_name=config.tasks_table_name(), # Returns: cns427-task-api-tasks + partition_key=dynamodb.Attribute(name='task_id', type=dynamodb.AttributeType.STRING), + billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, + # ... + ) + + # EventBridge custom event bus with configured name + self.event_bus = events.EventBus( + self, + 'TaskEventBus', + event_bus_name=config.event_bus_name() # Returns: cns427-task-api-core-task-events + ) + + # CloudWatch log group with configured name + self.app_log_group = logs.LogGroup( + self, + 'AppLogGroup', + log_group_name=config.log_group_name(), # Returns: /aws/lambda/cns427-task-api + retention=logs.RetentionDays.ONE_WEEK, + ) +``` + +### Example 2: API Stack (TaskApiStack) + +```python +class TaskApiStack(Stack): + def __init__(self, scope: Construct, construct_id: str, core_stack: TaskApiCoreStack, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # Lambda function with configured name + self.task_handler = PythonFunction( + self, + 'TaskHandler', + function_name=config.task_handler_function_name(), # Returns: cns427-task-api-task-handler + entry='task_api/handlers', + # ... + ) + + # API Gateway with configured name + self.api = apigateway.RestApi( + self, + 'TaskApi', + rest_api_name=config.api_name(), # Returns: cns427-task-api-api + description='CNS427 Task Management API', + ) + + # EventBridge rule with configured name + task_event_rule = events.Rule( + self, + 'TaskEventRule', + rule_name=config.task_event_rule_name(), # Returns: cns427-task-api-task-event-rule + event_bus=self.event_bus, + ) +``` + +### Example 3: Test Harness Stack (TestInfrastructureStack) + +```python +class TestInfrastructureStack(Stack): + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # Test results table with configured name + self.test_results_table = dynamodb.Table( + self, + "TestResultsTable", + table_name=config.test_results_table_name(), # Returns: cns427-task-api-test-results + # ... + ) + + # Test subscriber Lambda with configured name + self.test_subscriber_lambda = lambda_.Function( + self, + 'TestHarnessLambda', + function_name=config.test_subscriber_function_name(), # Returns: cns427-task-api-test-subscriber + # ... + ) + + # Test event rule with configured name + self.test_event_rule = events.Rule( + self, + "TestEventRule", + rule_name=config.test_event_rule_name(), # Returns: cns427-task-api-test-rule + # ... + ) +``` + +## Overriding Configuration + +### Method 1: Edit cdk.json + +Update the context values in `cdk.json`: + +```json +{ + "context": { + "project_name": "my-custom-project", + "environment": "prod", + "region": "us-east-1" + } +} +``` + +**When to use:** +- Permanent configuration changes +- Team-wide defaults +- Version-controlled settings + +### Method 2: CLI Context Parameters + +Override values at deployment time: + +```bash +# Deploy to production +poetry run cdk deploy -c environment=prod -c project_name=my-app-prod + +# Deploy to different region +poetry run cdk deploy -c region=eu-west-1 + +# Multiple overrides +poetry run cdk deploy -c environment=staging -c project_name=my-app-staging -c region=ap-southeast-1 +``` + +**When to use:** +- One-time deployments +- Testing different configurations +- CI/CD pipelines with dynamic values +- Personal development environments + +### Method 3: Environment Variables (for Application Code) + +Application code can read configuration from environment variables: + +```python +import os + +# Lambda environment variables set by CDK +TASKS_TABLE_NAME = os.getenv('TASKS_TABLE_NAME') +EVENT_BUS_NAME = os.getenv('EVENT_BUS_NAME') +AWS_REGION = os.getenv('AWS_REGION') +``` + +## Available Configuration Methods + +### Core Infrastructure + +| Method | Returns | Example Output | +|--------|---------|----------------| +| `tasks_table_name()` | DynamoDB tasks table name | `cns427-task-api-tasks` | +| `event_bus_name()` | EventBridge custom event bus name | `cns427-task-api-core-task-events` | +| `task_handler_function_name()` | Task CRUD Lambda function name | `cns427-task-api-task-handler` | +| `notification_handler_function_name()` | Notification Lambda function name | `cns427-task-api-notification-handler` | +| `api_name()` | API Gateway REST API name | `cns427-task-api-api` | +| `task_event_rule_name()` | EventBridge rule name | `cns427-task-api-task-event-rule` | +| `log_group_name()` | CloudWatch log group name | `/aws/lambda/cns427-task-api` | +| `dashboard_name()` | CloudWatch dashboard name | `cns427-task-api-dashboard` | + +### Test Harness Infrastructure + +| Method | Returns | Example Output | +|--------|---------|----------------| +| `test_results_table_name()` | Test results DynamoDB table name | `cns427-task-api-test-results` | +| `test_subscriber_function_name()` | Test subscriber Lambda function name | `cns427-task-api-test-subscriber` | +| `test_event_rule_name()` | Test EventBridge rule name | `cns427-task-api-test-rule` | +| `test_dlq_name()` | Test dead-letter queue name | `cns427-task-api-test-events-dlq` | +| `test_execution_role_name()` | Test IAM role name | `cns427-task-api-test-execution-role` | +| `test_dashboard_name()` | Test monitoring dashboard name | `cns427-task-api-test-monitoring` | + +## Best Practices + +### 1. Always Use Configuration Methods + +```python +# βœ… DO: Use configuration methods +table_name = config.tasks_table_name() + +# ❌ DON'T: Hardcode resource names +table_name = "cns427-task-api-tasks" +``` + +### 2. Load Configuration Once Per Stack + +```python +# βœ… DO: Load once in __init__ +class MyStack(Stack): + def __init__(self, scope, construct_id, **kwargs): + super().__init__(scope, construct_id, **kwargs) + config = InfrastructureConfig.from_cdk_context(self.node) + # Use config throughout the stack + +# ❌ DON'T: Load multiple times +def create_table(self): + config = InfrastructureConfig.from_cdk_context(self.node) # Wasteful +``` + +### 3. Use Descriptive Project Names + +```python +# βœ… DO: Use descriptive names +project_name = "my-company-task-api-prod" + +# ❌ DON'T: Use generic names +project_name = "app" +``` + +### 4. Include Environment in Project Name for Production + +```bash +# βœ… DO: Separate production resources +poetry run cdk deploy -c project_name=task-api-prod -c environment=prod + +# ❌ DON'T: Use same name for all environments +poetry run cdk deploy -c project_name=task-api +``` + +### 5. Document Custom Configurations + +If you add custom configuration methods, document them: + +```python +def custom_resource_name(self) -> str: + """ + Get custom resource name. + + Returns: + Resource name in format: {project_name}-custom-resource + + Example: + >>> config = InfrastructureConfig(project_name="my-app") + >>> config.custom_resource_name() + 'my-app-custom-resource' + """ + return f"{self.project_name}-custom-resource" +``` + +## Common Scenarios + +### Scenario 1: Deploy to Multiple Environments + +```bash +# Development +poetry run cdk deploy --all -c environment=dev + +# Staging +poetry run cdk deploy --all \ + -c environment=staging \ + -c project_name=cns427-task-api-staging + +# Production +poetry run cdk deploy --all \ + -c environment=prod \ + -c project_name=cns427-task-api-prod \ + -c region=us-east-1 +``` + +### Scenario 2: Deploy to Multiple Regions + +```bash +# US West +poetry run cdk deploy --all \ + -c region=us-west-2 \ + -c project_name=task-api-west + +# EU West +poetry run cdk deploy --all \ + -c region=eu-west-1 \ + -c project_name=task-api-eu +``` + +### Scenario 3: Personal Development Environment + +```bash +# Use your name in project name +poetry run cdk deploy --all \ + -c project_name=task-api-john-dev \ + -c environment=dev +``` + +### Scenario 4: CI/CD Pipeline + +```bash +# Use environment variables in CI/CD +export PROJECT_NAME="task-api-${CI_ENVIRONMENT_NAME}" +export ENVIRONMENT="${CI_ENVIRONMENT_NAME}" +export REGION="${AWS_REGION}" + +poetry run cdk deploy --all \ + -c project_name="${PROJECT_NAME}" \ + -c environment="${ENVIRONMENT}" \ + -c region="${REGION}" +``` + +## Troubleshooting + +### Issue: Resources Not Found + +**Problem:** Can't find deployed resources + +**Solution:** Check that you're using the same configuration values: +```bash +# List stacks to see what's deployed +poetry run cdk list + +# Check context values +cat cdk.json | grep -A 5 "context" +``` + +### Issue: Resource Name Conflicts + +**Problem:** `Resource already exists` error + +**Solution:** Use a different project name: +```bash +poetry run cdk deploy -c project_name=my-unique-name +``` + +### Issue: Configuration Not Applied + +**Problem:** Changes to `cdk.json` not taking effect + +**Solution:** Clear CDK cache and redeploy: +```bash +rm -rf cdk.out/ +poetry run cdk deploy --all +``` + +## Next Steps + +- **[Deployment Guide](deployment.md)** - Deploy with custom configuration +- **[Architecture Guide](architecture.md)** - Understand how configuration is used +- **[Testing Guide](testing-guide.md)** - Test with different configurations + +## References + +- [AWS CDK Context](https://docs.aws.amazon.com/cdk/v2/guide/context.html) +- [Python Dataclasses](https://docs.python.org/3/library/dataclasses.html) +- [Infrastructure as Code Best Practices](https://docs.aws.amazon.com/prescriptive-guidance/latest/strategy-infrastructure-as-code/welcome.html) diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/deployment.md b/python-test-samples/cns427-testable-serverless-architecture/docs/deployment.md new file mode 100644 index 00000000..5d910f28 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/deployment.md @@ -0,0 +1,811 @@ +# Deployment Guide + +This guide covers everything you need to deploy and operate the CNS427 Task Management API, including prerequisites, AWS setup, deployment procedures, and troubleshooting. + +## Table of Contents + +- [Prerequisites](#prerequisites) +- [AWS Account Setup](#aws-account-setup) +- [Installation](#installation) +- [Deployment](#deployment) +- [Testing After Deployment](#testing-after-deployment) +- [Cleanup](#cleanup) +- [Troubleshooting](#troubleshooting) +- [Examples](#examples) + +## Prerequisites + +Before you begin, ensure you have the following installed: + +### Required Tools + +1. **Python 3.13+** + ```bash + python3 --version # Should be 3.13 or higher + ``` + +> **Note**: +It is common for Linux distros to use the executable name python3 for Python 3.x, and have python refer to a Python 2.x installation. Some distros have an optional package you can install that makes the python command refer to Python 3. + +2. **Poetry** (Python dependency management) + ```bash + curl -sSL https://install.python-poetry.org | python3 - + # Or via pip + pip install poetry + ``` + +3. **Node.js 18+** (required for AWS CDK) + ```bash + node --version # Should be 18 or higher + npm --version + ``` + +4. **AWS CDK CLI** + ```bash + npm install -g aws-cdk + cdk --version + ``` + +5. **AWS CLI v2** + ```bash + # macOS + brew install awscli + + # Linux + curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" + unzip awscliv2.zip + sudo ./aws/install + + # Verify installation + aws --version + ``` + +6. **Docker** (required for CDK Lambda bundling) + ```bash + docker --version + ``` + - Download from: https://www.docker.com/products/docker-desktop + +### Docker Alternatives + +CDK supports using alternative container runtimes through the `CDK_DOCKER` environment variable. This is useful if you prefer tools like **Finch** (AWS's open-source Docker alternative) or other OCI-compliant container runtimes. + +**Using Finch:** + +1. **Install Finch** + ```bash + # macOS (via Homebrew) + brew install --cask finch + + # Or download from: https://github.com/runfinch/finch + ``` + +2. **Initialize Finch** + ```bash + finch vm init + finch vm start + ``` + +3. **Configure CDK to use Finch** + ```bash + # Set for current session + export CDK_DOCKER=finch + + # Or add to your shell profile (~/.zshrc, ~/.bashrc) + echo 'export CDK_DOCKER=finch' >> ~/.zshrc + source ~/.zshrc + ``` + +4. **Verify Finch is working** + ```bash + finch version + finch ps # Should show running containers (if any) + ``` + +**Using other container runtimes:** + +You can use any OCI-compliant container runtime by setting `CDK_DOCKER`: + +```bash +# Podman +export CDK_DOCKER=podman + +# Rancher Desktop +export CDK_DOCKER=nerdctl + +# Custom path +export CDK_DOCKER=/path/to/your/container-runtime +``` + +**Note:** The container runtime must support the same command-line interface as Docker (build, run, etc.) for CDK bundling to work correctly. + +### ARM64 Lambda Architecture and Cross-Platform Building + +This project deploys Lambda functions using **ARM64 architecture** (AWS Graviton2 processors) for better price-performance: +- **20% lower cost** compared to x86_64 +- **Up to 34% better performance** for many workloads +- **Lower latency** and better energy efficiency + +#### Building ARM64 on Different Platforms + +**macOS (Apple Silicon - M1/M2/M3):** +- βœ… Native ARM64 support - no additional setup needed +- Docker builds ARM64 images natively + +**macOS (Intel):** +- βœ… Docker Desktop includes QEMU and buildx pre-configured +- ARM64 builds work out of the box through emulation + +**Linux x86_64 (EC2, Ubuntu, etc.):** +- ⚠️ Requires QEMU and Docker buildx for cross-compilation +- Follow the setup instructions below + +#### Setting up ARM64 Cross-Compilation on Linux x86_64 + +If you're deploying from a Linux x86_64 machine (like an EC2 instance), you need to enable multi-platform builds: + +**1. Verify QEMU (ARM64 emulator):** + +```bash +# Verify QEMU is registered +docker run --rm --privileged multiarch/qemu-user-static --reset -p yes +``` + +**2. Enable Docker buildx:** + +```bash +# Create a new builder instance +docker buildx create --name multiarch --driver docker-container --use + +# Bootstrap the builder +docker buildx inspect --bootstrap + +# Verify ARM64 support +docker buildx ls +# Should show: linux/amd64, linux/arm64, linux/arm/v7, etc. +``` + +**3. Test ARM64 build:** + +```bash +# Test building an ARM64 image +docker buildx build --platform linux/arm64 -t test-arm64 . + +# Or test with CDK synth +cdk synth +``` + +#### Alternative: Use x86_64 Architecture + +If cross-compilation is problematic, you can change the Lambda architecture to x86_64: + +**In `infrastructure/core/task_api_stack.py`:** + +```python +# Change from: +architecture=lambda_.Architecture.ARM_64, + +# To: +architecture=lambda_.Architecture.X86_64, +``` + +**Trade-offs:** +- βœ… No cross-compilation needed on x86_64 systems +- βœ… Simpler build process +- ❌ ~20% higher Lambda costs +- ❌ Slightly lower performance + +#### Troubleshooting ARM64 Builds + +**Error: "exec format error" or "cannot execute binary file"** +- QEMU is not installed or not registered +- Run: `docker run --rm --privileged multiarch/qemu-user-static --reset -p yes` + +**Error: "multiple platforms feature is currently not supported"** +- Docker buildx is not enabled +- Follow the buildx setup steps above + +**Slow builds on x86_64:** +- ARM64 emulation is slower than native builds +- Consider using an ARM64 build machine (like AWS Graviton EC2) for faster builds +- Or switch to x86_64 architecture if build speed is critical + +## AWS Account Setup + +### 1. AWS Account + +You need an AWS account with appropriate permissions to create: +- Lambda functions +- DynamoDB tables +- API Gateway APIs +- EventBridge event buses and rules +- IAM roles and policies +- CloudWatch log groups + +### 2. Configure AWS Credentials + +Set up your AWS credentials using one of these methods: + +**Option A: AWS CLI Configuration (Recommended)** +```bash +aws configure +# Enter your: +# - AWS Access Key ID +# - AWS Secret Access Key +# - Default region (e.g., us-west-2) +# - Default output format (json) +``` + +**Option B: Environment Variables** +```bash +export AWS_ACCESS_KEY_ID=your_access_key_id +export AWS_SECRET_ACCESS_KEY=your_secret_access_key +export AWS_SESSION_TOKEN=your_session_token # If using temporary credentials +export AWS_DEFAULT_REGION=us-west-2 +``` + +**Option C: AWS SSO** +```bash +aws sso login --profile your-profile +export AWS_PROFILE=your-profile +``` + +### 3. Bootstrap CDK (First-time only) + +If this is your first time using CDK in your AWS account/region: + +```bash +cdk bootstrap aws://ACCOUNT-ID/REGION + +# Example: +cdk bootstrap aws://123456789012/us-west-2 +``` + +## Installation + +1. **Clone the repository** (if not already done) + ```bash + git clone + cd cns427-task-api + ``` + +2. **Install dependencies** + ```bash + poetry install + ``` + +3. **Verify installation** + ```bash + poetry run validate-setup + ``` + +## Security Checks (Optional but Recommended) + +Before deploying, you can run CDK Nag security checks to validate your infrastructure against AWS best practices: + +```bash +# Run security checks (will fail if violations found) +make cdk-nag + +# Or generate a security report (non-blocking) +make cdk-nag-report +``` + +**What CDK Nag checks**: +- IAM policies and roles +- API Gateway configuration +- Lambda security settings +- Encryption and logging + +**Current status**: βœ… All violations addressed (0 violations) + +For detailed information about CDK Nag, see the [CDK Nag Guide](cdk-nag-guide.md). + +## Deployment + +> **Important:** This project has **two separate CDK applications**: +> 1. **Main Application** (required) - The Task API itself +> 2. **Test Harness** (optional) - Infrastructure for EventBridge integration testing +> +> They must be deployed separately and cannot be deployed with a single `cdk deploy --all` command. + +### Step 1: Deploy Main Application Stacks + +Deploy the main application infrastructure (core + API + monitoring): + +```bash +# Using Make (recommended) +make deploy + +# Or using Poetry script +poetry run deploy + +# Or using CDK directly from project root +poetry run cdk deploy --all +``` + +This deploys **three stacks** that together form the main application: +- **cns427-task-api-core**: Foundation resources (DynamoDB table, EventBridge bus, IAM roles, CloudWatch logs) +- **cns427-task-api-api**: Compute and API resources (Lambda functions, API Gateway, EventBridge rules) +- **cns427-task-api-monitoring**: Observability resources (CloudWatch dashboards and alarms) + +**Note:** These stacks have dependencies (API depends on Core, Monitoring depends on API) and CDK deploys them in the correct order automatically. + +### Step 2: Deploy Test Harness Stack (Optional) + +The test harness is **optional** and only needed if you want to run EventBridge integration tests. It's a **separate CDK application** located in `infrastructure/test_harness/`. + +Deploy the EventBridge testing infrastructure: + +```bash +# Using Make (recommended) +make deploy-test-infra + +# Or using CDK directly (must change directory) +cd infrastructure/test_harness +poetry run cdk deploy +cd ../.. # Return to project root +``` + +This deploys **one stack**: +- **cns427-task-api-test-harness**: Test infrastructure (DynamoDB table for test results, Lambda subscriber, EventBridge rule) + +**Verify test infrastructure deployment:** +```bash +make check-test-infra +# Should output: βœ… Test infrastructure is deployed +``` + +**Why separate CDK apps?** +- Test infrastructure is optional (not needed for production) +- Keeps test resources isolated from production resources +- Allows independent lifecycle management +- Simpler to tear down test infrastructure without affecting main app + +### Environment-Specific Deployments + +**Development (default):** +```bash +poetry run cdk deploy --all +``` + +**Production:** +```bash +poetry run cdk deploy --all -c environment=prod -c project_name=cns427-task-api-prod +``` + +**Staging:** +```bash +poetry run cdk deploy --all -c environment=staging -c project_name=cns427-task-api-staging +``` + +**Custom Region:** +```bash +poetry run cdk deploy --all -c region=eu-west-1 +``` + +### Deployment Output + +After successful deployment, CDK will output: +- API Gateway endpoint URL +- Lambda function ARNs +- DynamoDB table names +- EventBridge bus ARN + +Save these outputs for testing and verification. + +## Testing After Deployment + +### Unit Tests (No Deployment Required) + +Unit tests can be run locally without any AWS infrastructure: + +```bash +# Using Make +make test-unit + +# Or using Poetry +poetry run test-unit +``` + +### Integration Tests (Requires Deployment) + +**Important:** Integration, DynamoDB, and EventBridge tests require both core application and test harness infrastructure to be deployed first. + +**Prerequisites:** +1. Deploy core application: `make deploy` +2. Deploy test harness: `make deploy-test-infra` +3. Verify test infrastructure: `make check-test-infra` + +**Run Integration Tests:** +```bash +# DynamoDB integration tests +make test-integration +# Or: poetry run test-integration + +# EventBridge integration tests +make test-eventbridge +# Or: poetry run test-eventbridge +``` + +### Run All Tests + +```bash +# Using Make (runs unit + integration + eventbridge) +make test + +# Or using Poetry +poetry run test-all +``` + +**Note:** This will fail if infrastructure is not deployed. + +### Generate Coverage Report + +```bash +make coverage +# View report at: htmlcov/all/index.html +``` + +## Cleanup + +### Important: Deletion Order + +> **Critical:** Because the main app and test harness are **separate CDK applications**, you **cannot** use `cdk destroy --all` to destroy everything. You must destroy them separately in the correct order. + +**You must destroy the test harness stack BEFORE destroying the main application stacks.** The test harness has an EventBridge rule that references the core event bus. AWS will not allow you to delete an event bus with active rules attached. + +### Recommended: Destroy Using Make + +The Makefile handles the correct order: + +```bash +# Step 1: Destroy test harness (if deployed) +make destroy-test-infra + +# Step 2: Destroy main application +make destroy +``` + +### Manual Destruction (Correct Order) + +If destroying manually, you **must** follow this order: + +**Step 1: Destroy Test Infrastructure First** +```bash +# Change to test harness directory +cd infrastructure/test_harness + +# Destroy test harness stack +poetry run cdk destroy --force + +# Return to project root +cd ../.. +``` + +**Step 2: Destroy Main Application Stacks** +```bash +# From project root +poetry run cdk destroy --all --force +``` + +### What Happens If You Destroy in Wrong Order? + +If you try to destroy the main stacks before the test harness, you'll get an error: + +``` +Cannot delete event bus 'cns427-task-api-core-task-events' because it has rules attached +``` + +**Solution:** Destroy the test harness first, then retry destroying the main stacks. + +### Why Can't `cdk destroy --all` Handle Both? + +- The main app and test harness are **separate CDK applications** with different `app.py` files +- `cdk destroy --all` only sees stacks in the current CDK app +- From project root: only sees main app stacks (core, api, monitoring) +- From `infrastructure/test_harness/`: only sees test harness stack +- This separation is intentional to keep test infrastructure isolated + +### Clean Build Artifacts + +```bash +make clean +``` + +This removes: +- `.pytest_cache/` +- `htmlcov/` +- `.coverage` +- `cdk.out/` +- `.mypy_cache/` +- `.ruff_cache/` +- `.venv/` +- `__pycache__/` directories +- `*.pyc` files + +## Troubleshooting + +### Common Issues + +#### 1. CDK Bootstrap Required + +**Error:** `Policy contains a statement with one or more invalid principals` + +**Solution:** +```bash +cdk bootstrap aws://ACCOUNT-ID/REGION +``` + +#### 2. Docker Not Running + +**Error:** `Cannot connect to the Docker daemon` + +**Solution:** +- Start Docker Desktop +- Verify: `docker ps` + +#### 3. AWS Credentials Not Configured + +**Error:** `Unable to locate credentials` + +**Solution:** +```bash +aws configure +# Or set environment variables +export AWS_ACCESS_KEY_ID=your_key +export AWS_SECRET_ACCESS_KEY=your_secret +``` + +#### 4. Region Mismatch + +**Error:** Resources not found in expected region + +**Solution:** +- Check `cdk.json` context region setting +- Verify AWS CLI default region: `aws configure get region` +- Override at deployment: `poetry run cdk deploy -c region=us-west-2` + +#### 5. Poetry Dependencies Not Installed + +**Error:** `ModuleNotFoundError: No module named 'aws_cdk'` + +**Solution:** +```bash +poetry install +``` + +#### 6. CDK Synthesis Fails + +**Error:** `jsii.errors.JSIIError` + +**Solution:** +- Check Python version: `python --version` (must be 3.13+) +- Reinstall dependencies: `poetry install --no-cache` +- Clear CDK cache: `rm -rf cdk.out/` + +#### 7. Test Infrastructure Not Found + +**Error:** `❌ Test infrastructure not found` + +**Solution:** +```bash +make deploy-test-infra +``` + +#### 8. Lambda Function Timeout + +**Error:** Task timed out after X seconds + +**Solution:** +- Check CloudWatch logs: `aws logs tail /aws/lambda/cns427-task-api-task-handler --follow` +- Increase timeout in stack definition if needed + +#### 9. DynamoDB Table Already Exists + +**Error:** `Table already exists` + +**Solution:** +- Use different project name: `poetry run cdk deploy -c project_name=my-unique-name` +- Or destroy existing stack first: `make destroy` + +#### 10. EventBridge Events Not Captured + +**Error:** Test events not appearing in test results table + +**Solution:** +- Verify test infrastructure is deployed: `make check-test-infra` +- Check EventBridge rule is enabled: + ```bash + aws events describe-rule \ + --name cns427-task-api-test-rule \ + --event-bus-name cns427-task-api-core-task-events \ + --region us-west-2 + ``` +- Check Lambda logs for errors: + ```bash + aws logs tail /aws/lambda/cns427-task-api-test-subscriber --follow + ``` + +#### 11. Log Group Already Exists After CDK Changes + +**Error:** `Resource of type 'AWS::Logs::LogGroup' with identifier '/aws/lambda/...' already exists` + +**Why this happens:** + +When you modify CDK code to change how log groups are created (e.g., switching from `log_retention` parameter to explicit `LogGroup` resources), CloudFormation tries to create a new log group with the same name that already exists from the previous deployment. + +**Background:** + +Originally, log groups were created implicitly using the `log_retention` parameter on Lambda functions. This approach was deprecated by AWS CDK. The modern approach is to create explicit `LogGroup` resources with a `retention` property. However, when you make this change, CloudFormation sees the explicit log group as a new resource and tries to create it, conflicting with the existing one. + +**Solution:** + +You need to manually delete the existing log groups before redeploying: + +```bash +# 1. List all log groups for your project +aws logs describe-log-groups \ + --log-group-name-prefix /aws/lambda/cns427-task-api \ + --region us-west-2 + +# 2. Delete each log group manually +aws logs delete-log-group \ + --log-group-name /aws/lambda/cns427-task-api-task-handler \ + --region us-west-2 + +aws logs delete-log-group \ + --log-group-name /aws/lambda/cns427-task-api-notification-handler \ + --region us-west-2 + +aws logs delete-log-group \ + --log-group-name /aws/lambda/cns427-task-api-test-subscriber \ + --region us-west-2 + +# 3. Redeploy with the updated CDK code +make deploy +make deploy-test-infra +``` + +**Alternative (if you have many log groups):** + +```bash +# Delete all log groups for your project at once +aws logs describe-log-groups \ + --log-group-name-prefix /aws/lambda/cns427-task-api \ + --region us-west-2 \ + --query 'logGroups[*].logGroupName' \ + --output text | \ + xargs -n 1 aws logs delete-log-group --log-group-name --region us-west-2 +``` + +**Important:** This will delete your existing logs. If you need to preserve them, export them first: + +```bash +# Export logs before deletion (optional) +aws logs create-export-task \ + --log-group-name /aws/lambda/cns427-task-api-task-handler \ + --from $(date -u -d '30 days ago' +%s)000 \ + --to $(date -u +%s)000 \ + --destination your-s3-bucket \ + --destination-prefix lambda-logs/ +``` + +### Getting Help + +- Check CloudWatch logs for Lambda errors +- Review CDK synthesis output: `poetry run cdk synth` +- Enable verbose logging: `poetry run cdk deploy --verbose` +- Check AWS service quotas in your account + +### Useful Commands + +```bash +# List all CDK stacks +poetry run cdk list + +# Show differences before deployment +poetry run cdk diff + +# Synthesize CloudFormation templates +poetry run cdk synth + +# View CloudWatch logs +aws logs tail /aws/lambda/FUNCTION-NAME --follow + +# Describe CloudFormation stack +aws cloudformation describe-stacks --stack-name STACK-NAME + +# Check AWS service quotas +aws service-quotas list-service-quotas --service-code lambda +``` + +## Examples + +### Example 1: Deploy to Multiple Environments + +```bash +# Development +poetry run cdk deploy --all -c environment=dev + +# Staging +poetry run cdk deploy --all -c environment=staging -c project_name=cns427-task-api-staging + +# Production +poetry run cdk deploy --all -c environment=prod -c project_name=cns427-task-api-prod -c region=us-east-1 +``` + +### Example 2: Custom Project Name + +```bash +# Deploy with custom project name +poetry run cdk deploy --all -c project_name=my-company-task-api + +# This creates resources like: +# - my-company-task-api-tasks (DynamoDB table) +# - my-company-task-api-task-handler (Lambda function) +# - my-company-task-api-core-task-events (EventBridge bus) +``` + +### Example 3: Multi-Region Deployment + +```bash +# Deploy to us-west-2 +poetry run cdk deploy --all -c region=us-west-2 -c project_name=task-api-west + +# Deploy to eu-west-1 +poetry run cdk deploy --all -c region=eu-west-1 -c project_name=task-api-eu +``` + +### Example 4: Development Workflow + +```bash +# 1. Install dependencies +poetry install + +# 2. Run unit tests locally (no deployment needed) +make test-unit + +# 3. Deploy core application +make deploy + +# 4. Deploy test infrastructure +make deploy-test-infra + +# 5. Verify test infrastructure +make check-test-infra + +# 6. Run integration tests (requires deployment) +make test-integration + +# 7. Run EventBridge tests (requires deployment) +make test-eventbridge + +# 8. Make code changes +# ... edit code ... + +# 9. Run unit tests again +make test-unit + +# 10. Deploy updates +make deploy + +# 11. Run all tests +make test + +# 12. Cleanup when done +make destroy-test-infra +make destroy +``` + +## Next Steps + +- **[Configuration Guide](configuration.md)** - Learn about infrastructure configuration +- **[Testing Guide](testing-guide.md)** - Understand the testing strategy +- **[Architecture Guide](architecture.md)** - Deep dive into the architecture + +## References + +- [AWS CDK Documentation](https://docs.aws.amazon.com/cdk/) +- [AWS Lambda Best Practices](https://docs.aws.amazon.com/lambda/latest/dg/best-practices.html) +- [AWS CLI Configuration](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-quickstart.html) diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/testing-guide.md b/python-test-samples/cns427-testable-serverless-architecture/docs/testing-guide.md new file mode 100644 index 00000000..bbb5a50a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/testing-guide.md @@ -0,0 +1,945 @@ +# Testing Guide + +This guide explains the comprehensive testing strategy used in this serverless application, including the honeycomb model, testing patterns, and best practices. + +## Table of Contents + +- [Testing Philosophy](#testing-philosophy) +- [The Honeycomb Model](#the-honeycomb-model) +- [Test Types](#test-types) +- [Testing Patterns](#testing-patterns) +- [Running Tests](#running-tests) +- [Writing Tests](#writing-tests) +- [Troubleshooting](#troubleshooting) + +## Testing Philosophy + +### Core Principles + +1. **Test at the Right Level**: Test each layer appropriately +2. **Real AWS Over Mocks**: Use real AWS services for integration tests +3. **Fakes Over Mocks**: Use in-memory fakes for unit tests +4. **Fast Feedback**: Unit tests run in milliseconds +5. **Confidence**: Integration tests validate real behavior + +### Why This Matters for Serverless + +Serverless applications have unique testing challenges: +- Most bugs occur at service boundaries (DynamoDB, EventBridge, API Gateway) +- AWS SDK behavior is complex and hard to mock accurately +- Integration issues are the primary source of production failures +- Mocking AWS services doesn't catch real-world issues + +## The Honeycomb Model + +### Traditional Pyramid vs Honeycomb + +``` +❌ TRADITIONAL PYRAMID (Doesn't work well for serverless) + /\ + / \ E2E Tests (10%) + /____\ + / \ Integration Tests (20%) + /________\ + / \ Unit Tests (70%) + /____________\ + +Problems for Serverless: +β€’ Most bugs are at service boundaries +β€’ Mocking AWS is complex and unreliable +β€’ Unit tests give false confidence +``` + +``` +βœ… HONEYCOMB MODEL (Better for serverless) + + E2E Tests (10%) + / Critical flows \ + / \ + / Integration Tests (60%) \ + | Service Boundaries. | + | Real AWS Services | + | Error Handling, Scale | + \ / + \ / + \ Unit Tests (30%) / + Pure Business Logic + Fast, Isolated +``` + +### Our Test Distribution + +``` +Total: 102 tests + +Integration Tests: 35% (36 tests) +β”œβ”€ Handler Tests: 17 tests (API Gateway boundary) +β”‚ β”œβ”€ Request/response contracts +β”‚ β”œβ”€ HTTP status codes +β”‚ └─ End-to-end handler flow +β”œβ”€ Event Contract Tests: 4 tests (EventBridge boundary) +β”‚ β”œβ”€ Schema validation +β”‚ └─ Consumer contracts +└─ AWS Service Tests: 15 tests + β”œβ”€ DynamoDB integration (7 tests) + └─ EventBridge integration (8 tests) + +Unit Tests: 56% (57 tests) +β”œβ”€ Domain Logic: Pure business rules +β”œβ”€ Data Models: Validation and serialization +└─ Helper Utilities: Test support functions + +Property-Based Tests: 6% (6 tests) +└─ Complex Algorithms: Circular dependency detection + +E2E Tests: 3% (3 tests) +└─ Critical user workflows +``` + +**Key Insight**: Handler and event contract tests are classified as **integration tests** because they test boundaries (API Gateway, EventBridge), not pure business logic. This gives us a true honeycomb distribution. + +### Why This Distribution Works + +**Most serverless bugs occur at integration boundaries** (API contracts, event schemas, AWS services), validating our 35% integration test focus. Combined with 56% unit tests for domain logic, 6% property-based tests for complex algorithms, and 3% E2E for critical workflows, this proves the honeycomb model is correct for serverless applications. + +## Test Types + +### 1. Unit Tests (Fast, Isolated) + +**Purpose**: Test business logic without AWS dependencies + +**Location**: `tests/unit/` + +**Characteristics**: +- βœ… No AWS SDK calls +- βœ… In-memory fakes +- βœ… Fast execution (< 1 second total) +- βœ… No network calls +- βœ… Deterministic results + +**What to Test**: +- Pure business logic +- Domain rules and validation +- HTTP request/response formatting +- Model validation +- Error handling logic + +**Example**: +```python +def test_create_task_validates_title(): + """Test business rule: title must be at least 3 characters.""" + # GIVEN in-memory fakes + repository = InMemoryTaskRepository() + publisher = InMemoryEventPublisher() + service = TaskService(repository, publisher) + + # WHEN creating task with short title + with pytest.raises(ValueError, match="at least 3 characters"): + service.create_task(title="ab") + + # THEN no task created + assert repository.count() == 0 + assert publisher.count() == 0 +``` + +**Key Pattern**: Use **in-memory fakes** instead of mocks +```python +# ❌ DON'T: Complex mock setup +@patch('boto3.resource') +def test_with_mocks(mock_resource): + mock_table = Mock() + mock_resource.return_value.Table.return_value = mock_table + # ... complex setup + +# βœ… DO: Simple fake injection +def test_with_fakes(): + repository = InMemoryTaskRepository() + service = TaskService(repository) + # ... simple and clear +``` + +### 2. Integration Tests (Real AWS) + +**Purpose**: Test complete flows with real AWS services + +**Location**: `tests/integration/` + +**Characteristics**: +- βœ… Real AWS SDK calls +- βœ… Real DynamoDB operations +- βœ… Real EventBridge publishing +- βœ… Validates actual AWS behavior +- ⚠️ Requires AWS credentials +- ⚠️ Slower than unit tests (seconds) + +**What to Test**: +- CRUD operations with real DynamoDB +- Event publishing to real EventBridge +- Error handling with real AWS errors +- Pagination and limits +- Concurrent operations +- IAM permissions + +**Example - DynamoDB Integration**: +```python +@pytest.mark.integration +def test_create_task_persists_to_dynamodb(): + """Test task creation with real DynamoDB.""" + # GIVEN real DynamoDB adapter + repository = DynamoDBTaskRepository( + table_name=os.getenv('TEST_TASKS_TABLE_NAME') + ) + + try: + # WHEN creating task + task = Task(title="Integration Test Task") + created_task = repository.create_task(task) + + # THEN verify in DynamoDB + retrieved_task = repository.get_task(created_task.task_id) + assert retrieved_task is not None + assert retrieved_task.title == "Integration Test Task" + + finally: + # ALWAYS cleanup + repository.delete_task(created_task.task_id) +``` + +**Example - EventBridge Integration**: +```python +@pytest.mark.integration +def test_task_created_event_published_to_eventbridge(): + """Test event publishing with real EventBridge.""" + # GIVEN real EventBridge publisher + publisher = EventBridgePublisher( + event_bus_name=os.getenv('TEST_EVENT_BUS_NAME') + ) + + # WHEN publishing event + task = Task(title="Test Task") + publisher.publish_task_created(task) + + # THEN verify event captured by test harness + # (Test harness Lambda captures events to DynamoDB) + time.sleep(2) # Allow event processing + + test_results = query_test_results_table(task.task_id) + assert len(test_results) == 1 + assert test_results[0]['detail_type'] == 'TaskCreated' +``` + +### 3. Error Simulation Tests + +**Purpose**: Test error handling without affecting real AWS resources + +**Location**: `tests/integration/` (marked with error simulation) + +**Characteristics**: +- βœ… Simulates AWS errors +- βœ… Fast execution (no real AWS calls) +- βœ… Predictable error conditions +- βœ… No AWS costs +- βœ… Safe to run anytime + +**What to Test**: +- DynamoDB throttling +- Access denied errors +- Resource not found +- Validation errors +- Service unavailable +- Network timeouts + +**Example**: +```python +class ThrottlingRepository: + """Fake that simulates DynamoDB throttling.""" + + def create_task(self, task: Task) -> Task: + raise ClientError( + { + 'Error': { + 'Code': 'ProvisionedThroughputExceededException', + 'Message': 'Rate exceeded' + } + }, + 'PutItem' + ) + +def test_handles_dynamodb_throttling(): + """Test graceful handling of DynamoDB throttling.""" + # GIVEN throttling repository + repository = ThrottlingRepository() + service = TaskService(repository) + + # WHEN creating task + with pytest.raises(RepositoryError) as exc_info: + service.create_task(title="Test") + + # THEN appropriate error raised + assert "throttling" in str(exc_info.value).lower() +``` + +### 4. End-to-End Tests (Planned) + +**Purpose**: Test critical user workflows through the entire system + +**Location**: `tests/e2e/` + +**Characteristics**: +- βœ… Full system integration +- βœ… Real API Gateway endpoints +- βœ… Real user workflows +- ⚠️ Slowest tests +- ⚠️ Most expensive + +**What to Test**: +- Critical user journeys +- Multi-step workflows +- Cross-service interactions +- Authentication flows +- Error recovery + +## Testing Patterns + +### Pattern 1: Given/When/Then + +All tests follow this structure: + +```python +def test_behavior_description(): + # GIVEN - Setup test data and conditions + repository = InMemoryTaskRepository() + service = TaskService(repository) + + # WHEN - Execute the behavior being tested + task = service.create_task(title="Test Task") + + # THEN - Assert expected outcomes + assert task.title == "Test Task" + assert task.status == TaskStatus.PENDING + + # AND - Verify side effects (optional) + assert repository.count() == 1 +``` + +### Pattern 2: Dependency Injection for Testing + +```python +# Production: Self-initializing +service = TaskService() # Uses real AWS adapters + +# Testing: Inject fakes +repository = InMemoryTaskRepository() +publisher = InMemoryEventPublisher() +service = TaskService(repository, publisher) +``` + +### Pattern 3: Test Data Factories + +```python +def create_test_task(**overrides) -> Task: + """Create task with sensible defaults.""" + defaults = { + 'title': 'Test Task', + 'description': 'Test description', + 'priority': TaskPriority.MEDIUM, + 'status': TaskStatus.PENDING + } + defaults.update(overrides) + return Task(**defaults) + +# Usage +task = create_test_task(priority=TaskPriority.HIGH) +``` + +### Pattern 4: Cleanup in Finally Blocks + +```python +@pytest.mark.integration +def test_with_real_aws(): + """Always cleanup, even if test fails.""" + repository = DynamoDBTaskRepository() + task = None + + try: + # Test code + task = repository.create_task(Task(title="Test")) + assert task is not None + + finally: + # Cleanup always runs + if task: + repository.delete_task(task.task_id) +``` + +### Pattern 5: Socket Blocking for Unit Tests + +```python +# tests/unit/conftest.py +import pytest +import socket + +@pytest.fixture(scope='session', autouse=True) +def block_network(): + """Block network calls in unit tests.""" + def guard(*args, **kwargs): + raise RuntimeError("Network call blocked in unit test") + + socket.socket = guard +``` + +## Running Tests + +### Environment Setup + +Before running integration tests, ensure your environment is configured correctly: + +```bash +# Set AWS region (defaults to us-west-2 if not set) +export AWS_DEFAULT_REGION=us-west-2 + +# Or use AWS_REGION +export AWS_REGION=us-west-2 + +# Optional: Override test infrastructure stack name +export TEST_INFRASTRUCTURE_STACK_NAME=CNS427TaskApiTestInfrastructure +``` + +**Note**: The test scripts default to `us-west-2` to match the deployed infrastructure. If you deploy to a different region, set the environment variable accordingly. + +### All Tests + +```bash +# Run all tests (unit + integration) +make test + +# Or with Poetry +poetry run test-all +``` + +### Unit Tests Only + +```bash +# Fast, no AWS required +make test-unit + +# Or with Poetry +poetry run test-unit + +# Or with pytest directly +poetry run pytest tests/unit -v +``` + +### Integration Tests Only + +```bash +# Requires AWS credentials and deployed infrastructure +make test-integration + +# Or with Poetry +poetry run test-integration + +# Or with pytest directly +poetry run pytest tests/integration -m integration -v +``` + +### EventBridge Tests + +```bash +# Requires test harness deployed +make test-eventbridge + +# Or with Poetry +poetry run test-eventbridge + +# Or with pytest directly +poetry run pytest tests/integration/test_eventbridge_integration.py -v +``` + +### Coverage Report + +```bash +# Generate HTML coverage report +make coverage + +# View report +open htmlcov/all/index.html +``` + +### Specific Tests + +```bash +# Run specific test file +poetry run pytest tests/unit/test_task_service.py -v + +# Run specific test +poetry run pytest tests/unit/test_task_service.py::test_create_task -v + +# Run tests matching pattern +poetry run pytest -k "create_task" -v +``` + +### Debug Mode + +```bash +# Verbose output with print statements +poetry run pytest tests/unit -v -s + +# Stop on first failure +poetry run pytest tests/unit -x + +# Drop into debugger on failure +poetry run pytest tests/unit --pdb +``` + +## Writing Tests + +### Unit Test Template + +```python +# tests/unit/test_my_feature.py +import pytest +from task_api.domain.task_service import TaskService +from tests.shared.fakes.in_memory_repository import InMemoryTaskRepository +from tests.shared.fakes.in_memory_publisher import InMemoryEventPublisher + +class TestMyFeature: + """Test suite for my feature.""" + + @pytest.fixture + def repository(self): + """Create fresh repository for each test.""" + return InMemoryTaskRepository() + + @pytest.fixture + def publisher(self): + """Create fresh publisher for each test.""" + return InMemoryEventPublisher() + + @pytest.fixture + def service(self, repository, publisher): + """Create service with test dependencies.""" + return TaskService(repository, publisher) + + def test_happy_path(self, service): + """Test successful operation.""" + # GIVEN + # ... setup + + # WHEN + result = service.do_something() + + # THEN + assert result is not None + + def test_error_case(self, service): + """Test error handling.""" + # GIVEN + # ... setup error condition + + # WHEN/THEN + with pytest.raises(ValueError): + service.do_something_invalid() +``` + +### Integration Test Template + +```python +# tests/integration/test_my_integration.py +import pytest +import os +from task_api.integration.dynamodb_adapter import DynamoDBTaskRepository + +@pytest.mark.integration +class TestMyIntegration: + """Integration tests with real AWS.""" + + @pytest.fixture + def repository(self): + """Create real DynamoDB repository.""" + return DynamoDBTaskRepository( + table_name=os.getenv('TEST_TASKS_TABLE_NAME') + ) + + def test_with_real_aws(self, repository): + """Test with real AWS service.""" + task = None + + try: + # GIVEN + # ... setup + + # WHEN + task = repository.create_task(Task(title="Test")) + + # THEN + assert task is not None + + # Verify in AWS + retrieved = repository.get_task(task.task_id) + assert retrieved.title == "Test" + + finally: + # ALWAYS cleanup + if task: + repository.delete_task(task.task_id) +``` + +### Test Naming Conventions + +```python +# File names +test_task_service.py # Unit tests for TaskService +test_dynamodb_integration.py # Integration tests for DynamoDB + +# Class names +class TestTaskService: # Test suite for TaskService +class TestCreateTask: # Test suite for create_task method + +# Method names +def test_create_task_returns_task(): # Happy path +def test_create_task_with_invalid_title_raises(): # Error case +def test_create_task_publishes_event(): # Side effect +``` + +## What to Focus On + +### Unit Tests: Focus on Business Logic + +βœ… **DO Test**: +- Business rules and validation +- Domain logic +- Error conditions +- Edge cases +- Data transformations + +❌ **DON'T Test**: +- AWS SDK behavior +- Network calls +- Database operations +- External service integration + +### Integration Tests: Focus on Service Boundaries + +βœ… **DO Test**: +- Real AWS operations +- Error handling with real errors +- Data persistence +- Event publishing +- Pagination and limits +- Concurrent operations + +❌ **DON'T Test**: +- Business logic (that's unit tests) +- Every edge case (too slow) +- UI/presentation logic + +### What to Look For in Failures + +**Unit Test Failures**: +- Business logic bugs +- Validation errors +- Domain rule violations +- Logic errors + +**Integration Test Failures**: +- AWS configuration issues +- IAM permission problems +- Service limits +- Network issues +- Data format mismatches +- Event schema changes + +### 5. Property-Based Tests (Hypothesis) + +**Purpose**: Test complex algorithms with automatically generated test cases + +**Location**: `tests/property_based/` + +**Characteristics**: +- βœ… Generates hundreds of test cases automatically +- βœ… Tests mathematical properties and invariants +- βœ… Finds edge cases you might not think of +- βœ… Best for complex algorithms (graph traversal, parsing, etc.) +- ⚠️ Slower than unit tests (runs 100 examples per test) +- ⚠️ Not suitable for integration tests or specific data flows + +**What to Test**: +- Complex algorithms with clear properties +- Graph algorithms (cycle detection, traversal) +- Data structure invariants +- Parsing and serialization +- Mathematical properties + +**What NOT to Test**: +- Integration with external systems +- End-to-end workflows +- Tests requiring specific data values +- UI interactions + +**Example**: +```python +from hypothesis import given +from hypothesis import strategies as st + +@given(tasks=st.lists(task_ids, min_size=4, max_size=10, unique=True)) +def test_long_chain_cycle(self, tasks): + """Property: Cycles of any length are detected.""" + # Create chain: tasks[0]β†’tasks[1]β†’...β†’tasks[n-1] + graph = {} + for i in range(len(tasks) - 1): + graph[tasks[i]] = [tasks[i + 1]] + + # Check if tasks[n-1]β†’tasks[0] completes the cycle + result = has_circular_dependency(tasks[-1], tasks[0], graph) + + assert result is True +``` + +**Running Property-Based Tests**: +```bash +# Run property-based tests only +make test-property + +# Run with statistics +poetry run pytest tests/property_based/ -v --hypothesis-show-statistics + +# Run with more examples (default is 100) +poetry run pytest tests/property_based/ -v --hypothesis-seed=random +``` + +**Further Reading**: See `tests/property_based/README.md` for comprehensive documentation on property-based testing concepts, when to use PBT, and interpreting Hypothesis statistics. + +## Troubleshooting + +### Common Issues + +**1. Unit Tests Making Network Calls** +``` +Error: RuntimeError: Network call blocked in unit test + +Solution: Check that you're using fakes, not real adapters +``` + +**2. Integration Tests Failing - No AWS Credentials** +``` +Error: NoCredentialsError: Unable to locate credentials + +Solution: Configure AWS credentials +aws configure +``` + +**3. Integration Tests Failing - Table Not Found** +``` +Error: ResourceNotFoundException: Table not found + +Solution: Set TEST_TASKS_TABLE_NAME environment variable +export TEST_TASKS_TABLE_NAME=cns427-task-api-test-tasks +``` + +**4. EventBridge Tests Failing - No Events Captured** +``` +Error: No events found in test results table + +Solution: +1. Deploy test harness: make deploy-test-infra +2. Verify deployment: make check-test-infra +3. Check EventBridge rule is enabled +``` + +**5. Tests Are Slow** +``` +Problem: Unit tests taking > 1 second + +Solution: +1. Check for real AWS calls (should use fakes) +2. Check for network calls (should be blocked) +3. Reduce test data size +4. Use pytest-xdist for parallel execution +``` + +### Debug Techniques + +**Print Debugging**: +```python +def test_something(service): + result = service.do_something() + print(f"Result: {result}") # Use -s flag to see output + assert result is not None +``` + +**Debugger**: +```python +def test_something(service): + import pdb; pdb.set_trace() # Breakpoint + result = service.do_something() +``` + +**Verbose Assertions**: +```python +# ❌ Hard to debug +assert response == expected + +# βœ… Clear error message +assert response['statusCode'] == 201, \ + f"Expected 201, got {response['statusCode']}" +``` + +**Changing Log Level for Debug Logs**: + +AWS Lambda Powertools Logger respects the `LOG_LEVEL` environment variable. To see DEBUG logs: + +```bash +# Option 1: Set environment variable in Lambda console +# Go to Lambda β†’ Configuration β†’ Environment variables +# Add: LOG_LEVEL = DEBUG + +# Option 2: Update CDK stack temporarily +# infrastructure/core/task_api_stack.py +environment={ + 'LOG_LEVEL': 'DEBUG', # Change from 'INFO' to 'DEBUG' + ... +} + +# Option 3: Set locally for integration tests +export LOG_LEVEL=DEBUG +poetry run pytest tests/integration/ + +# Option 4: Use AWS CLI to update Lambda +aws lambda update-function-configuration \ + --function-name cns427-task-api-task-handler \ + --environment Variables={LOG_LEVEL=DEBUG,TASKS_TABLE_NAME=cns427-task-api-tasks,...} \ + --region us-west-2 +``` + +**What You'll See with DEBUG Level**: +``` +# INFO level (default) +INFO create_task:45 Creating task with ID: abc-123 +INFO create_task:97 Created task: abc-123 + +# DEBUG level (more detail) +INFO create_task:45 Creating task with ID: abc-123 +DEBUG create_task:48 Validating dependencies: [] +DEBUG update_task:98 Existing task version: 1763172928050, Request version: 1763172928050 +DEBUG publish_event:55 Publishing event: {"Source": "cns427-task-api", "DetailType": "TaskCreated"} +INFO create_task:97 Created task: abc-123 +``` + +**Tip**: Remember to set `LOG_LEVEL` back to `INFO` in production to avoid excessive logging costs! + +### Understanding Structured Logs + +AWS Lambda Powertools Logger provides structured JSON logs with automatic module name inclusion. + +**Log Format**: +```json +{ + "level": "INFO", + "location": "create_task:45", + "message": "Creating task with ID: 751073b5-5a3c-46cd-9d04-f477aa209b9d", + "timestamp": "2025-11-15 02:15:28,050+0000", + "service": "task-api", + "module": "task_service" +} +``` + +**Module Names by File**: +| File | Module Name in Logs | +|------|---------------------| +| `services/task_service/domain/task_service.py` | `task_service` | +| `services/notification_service/domain/notification_service.py` | `notification_service` | +| `shared/integration/dynamodb_adapter.py` | `dynamodb_adapter` | +| `shared/integration/eventbridge_adapter.py` | `eventbridge_adapter` | +| `services/task_service/handler.py` | `handler` | + +**Querying Logs with CloudWatch Logs Insights**: + +```sql +# Find all logs from task_service domain +fields @timestamp, level, location, message +| filter module = "task_service" +| sort @timestamp desc +| limit 100 + +# Find all update operations +fields @timestamp, level, location, message +| filter message like /Updating task/ +| sort @timestamp desc + +# Find version conflicts +fields @timestamp, level, location, message +| filter message like /Version conflict/ +| sort @timestamp desc + +# Trace a specific task through the system +fields @timestamp, level, location, message +| filter message like /751073b5-5a3c-46cd-9d04-f477aa209b9d/ +| sort @timestamp asc +``` + +**Example: Tracing a Request**: +``` +1. Handler receives request + INFO lambda_handler:25 Processing POST /tasks request + +2. Domain service creates task + INFO create_task:45 Creating task with ID: abc-123 + DEBUG create_task:48 Validating dependencies: [] + +3. Repository persists task + INFO create_task:97 Created task: abc-123 + +4. EventBridge publishes event + DEBUG publish_event:55 Publishing event: TaskCreated + INFO publish_event:75 Published event: TaskCreated + +5. Handler returns response + INFO lambda_handler:35 Task created successfully: abc-123 +``` + +The `location` field (e.g., `create_task:45`) shows the function name and line number, making it easy to find the exact code that logged the message. + +## Best Practices + +### 1. Test Independence +- Each test should be completely independent +- Use fresh fixtures for each test +- No shared state between tests +- Clean up after yourself + +### 2. Clear Test Intent +- Use descriptive test names +- Follow Given/When/Then structure +- One behavior per test +- Clear assertions + +### 3. Realistic Test Data +- Use realistic data +- Test with various combinations +- Include edge cases +- Use test data factories + +### 4. Fast Feedback +- Unit tests should be fast (< 1 second total) +- Run unit tests frequently during development +- Run integration tests before committing +- Use watch mode for TDD + +### 5. Maintainable Tests +- Keep tests simple +- Avoid complex setup +- Use fixtures appropriately +- Document unusual patterns + +## Next Steps + +- **[Architecture Guide](architecture.md)** - Understand the architecture that enables testing +- **[Deployment Guide](deployment.md)** - Deploy infrastructure for integration tests +- **[Configuration Guide](configuration.md)** - Configure test infrastructure + +## References + +- [Testing Honeycomb](https://engineering.atspotify.com/2018/01/testing-of-microservices/) - Spotify Engineering +- [AWS Lambda Testing Best Practices](https://docs.aws.amazon.com/lambda/latest/dg/testing-guide.html) +- [Hexagonal Architecture Testing](https://alistair.cockburn.us/hexagonal-architecture/) diff --git a/python-test-samples/cns427-testable-serverless-architecture/docs/visual-flow-guide.md b/python-test-samples/cns427-testable-serverless-architecture/docs/visual-flow-guide.md new file mode 100644 index 00000000..f97e9379 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/docs/visual-flow-guide.md @@ -0,0 +1,374 @@ +# Visual Flow Guide - AI Audit Demo + +## Complete AI-DLC Cycle Visualization + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ AI-DLC CONTINUOUS CYCLE β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ INCEPTION PHASE (Demo01) β”‚ + β”‚ Requirements & Architecture β”‚ + β”‚ β”‚ + β”‚ Problem: Monolithic Lambda (demo00) β”‚ + β”‚ ↓ β”‚ + β”‚ AI-Guided Refactoring β”‚ + β”‚ ↓ β”‚ + β”‚ Hexagonal Architecture (demo01) β”‚ + β”‚ ↓ β”‚ + β”‚ πŸ€– AI AUDIT: Validate Architecture β”‚ + β”‚ βœ… Layer separation β”‚ + β”‚ βœ… Pure business logic β”‚ + β”‚ ❌ Some coupling remains β”‚ + β”‚ Score: 7-8/10 β†’ Ready for Construction β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ CONSTRUCTION PHASE (Demo02) β”‚ + β”‚ Implementation & Testing β”‚ + β”‚ β”‚ + β”‚ Build Comprehensive Tests β”‚ + β”‚ ↓ β”‚ + β”‚ Testing Honeycomb Model β”‚ + β”‚ β€’ 60% Integration β”‚ + β”‚ β€’ 30% Unit β”‚ + β”‚ β€’ 10% E2E β”‚ + β”‚ ↓ β”‚ + β”‚ πŸ€– AI AUDIT: Validate Testing Strategy β”‚ + β”‚ βœ… 74% integration (honeycomb validated) β”‚ + β”‚ βœ… Layer-appropriate testing β”‚ + β”‚ ❌ Missing concurrency tests β”‚ + β”‚ Score: 8/10 β†’ Ready for Operation β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ OPERATION PHASE (Demo03) β”‚ + β”‚ Deploy, Monitor, Optimize β”‚ + β”‚ β”‚ + β”‚ Deploy to Production β”‚ + β”‚ ↓ β”‚ + β”‚ Collect Bug Data (20 bugs) β”‚ + β”‚ ↓ β”‚ + β”‚ πŸ€– AI ANALYSIS: Risk-Based Strategy β”‚ + β”‚ πŸ”₯ integrations.py: 40% bugs (critical) β”‚ + β”‚ πŸ”₯ 75% bugs in integration layer β”‚ + β”‚ 🎯 Prioritized roadmap β”‚ + β”‚ πŸ“ˆ Measurable targets β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Feedback Loop + β”‚ (Insights inform next Inception) + β”‚ + └──────────────────────────────────┐ + β”‚ + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ CONTINUOUS β”‚ + β”‚ IMPROVEMENT β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Demo01: Architecture Validation Flow + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DEMO00: Monolithic Lambda (Before) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ lambda_handler() β”‚ β”‚ +β”‚ β”‚ β”œβ”€ HTTP parsing β”‚ β”‚ +β”‚ β”‚ β”œβ”€ Business logic β”‚ β”‚ +β”‚ β”‚ β”œβ”€ DynamoDB calls β”‚ β”‚ +β”‚ β”‚ β”œβ”€ EventBridge calls β”‚ β”‚ +β”‚ β”‚ └─ HTTP response β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ +β”‚ Problems: β”‚ +β”‚ ❌ Everything mixed together β”‚ +β”‚ ❌ Can't test without AWS β”‚ +β”‚ ❌ Complex test setup β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ AI-Guided Refactoring + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ DEMO01: Hexagonal Architecture (After) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Handler Layer (task_handler.py) β”‚ β”‚ +β”‚ β”‚ β€’ HTTP parsing only β”‚ β”‚ +β”‚ β”‚ β€’ Delegates to domain β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Domain Layer (domain_logic.py) β”‚ β”‚ +β”‚ β”‚ β€’ Pure business logic β”‚ β”‚ +β”‚ β”‚ β€’ No AWS dependencies β”‚ β”‚ +β”‚ β”‚ β€’ TaskRepositoryProtocol β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ β”‚ +β”‚ β–Ό β”‚ +β”‚ β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” β”‚ +β”‚ β”‚ Integration Layer (integrations.py) β”‚ β”‚ +β”‚ β”‚ β€’ DynamoDB adapter β”‚ β”‚ +β”‚ β”‚ β€’ EventBridge adapter β”‚ β”‚ +β”‚ β”‚ β€’ Implements protocols β”‚ β”‚ +β”‚ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI AUDIT RESULTS: β”‚ +β”‚ βœ… Layer separation achieved β”‚ +β”‚ βœ… Business logic is pure β”‚ +β”‚ βœ… Tests 10x faster β”‚ +β”‚ ❌ Domain still creates repositories internally β”‚ +β”‚ Score: 7-8/10 β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Demo02: Testing Strategy Validation Flow + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ TESTING HONEYCOMB MODEL FOR SERVERLESS β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +| E2E Tests (10%) β”‚ +| / Critical flows \ β”‚ +| / \ β”‚ +| / Integration Tests (60%) \ β”‚ +| | Service Boundaries. | β”‚ +| | Real AWS Services | β”‚ +| | Error Handling, Scale | β”‚ +| \ / β”‚ +| \ / β”‚ +| \ Unit Tests (30%) / β”‚ +| Pure Business Logic β”‚ + Fast, Isolated β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ ACTUAL TEST DISTRIBUTION (Demo02) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ Integration Tests: 14 tests (74%) βœ… β”‚ +β”‚ β”œβ”€ DynamoDB: 7 tests β”‚ +β”‚ β”‚ β”œβ”€ Real AWS: 4 tests (success paths) β”‚ +β”‚ β”‚ └─ Fakes: 3 tests (error simulation) β”‚ +β”‚ └─ EventBridge: 7 tests β”‚ +β”‚ β”œβ”€ Real AWS: 3 tests (with test harness) β”‚ +β”‚ └─ Fakes: 4 tests (error simulation) β”‚ +β”‚ β”‚ +β”‚ Unit Tests: 5 tests (26%) βœ… β”‚ +β”‚ β”œβ”€ Domain: 3 tests (circular dependency) β”‚ +β”‚ └─ Handler: 2 tests (HTTP contracts) β”‚ +β”‚ β”‚ +β”‚ E2E Tests: 0 tests (0%) ⚠️ β”‚ +β”‚ β”‚ +β”‚ πŸ€– AI AUDIT RESULTS: β”‚ +β”‚ βœ… Honeycomb distribution validated (74% integration) β”‚ +β”‚ βœ… Layer-appropriate testing β”‚ +β”‚ βœ… Real AWS + fakes strategy working β”‚ +β”‚ ❌ Missing: concurrency, pagination tests β”‚ +β”‚ Score: 8/10 β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Demo03: Risk-Based Analysis Flow + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ PRODUCTION BUG DATA (20 bugs) β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ Severity Distribution: β”‚ +β”‚ πŸ”΄ Critical: 1 bug (5%) β”‚ +β”‚ 🟠 High: 7 bugs (35%) β”‚ +β”‚ 🟑 Medium: 11 bugs (55%) β”‚ +β”‚ 🟒 Low: 1 bug (5%) β”‚ +β”‚ β”‚ +β”‚ Component Distribution: β”‚ +β”‚ πŸ”₯ integrations.py: 8 bugs (40%) ← CRITICAL RISK β”‚ +β”‚ πŸ”₯ domain_logic.py: 7 bugs (35%) ← HIGH RISK β”‚ +β”‚ ⚠️ task_handler.py: 3 bugs (15%) ← MEDIUM RISK β”‚ +β”‚ βœ… models.py: 2 bugs (10%) ← LOW RISK β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ AI Analysis + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ πŸ€– AI RISK HEAT MAP β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ πŸ”₯ CRITICAL RISK: integrations.py β”‚ +β”‚ β”œβ”€ BUG-005 (Critical): EventBridge silent failures β”‚ +β”‚ β”œβ”€ BUG-007 (High): DynamoDB race conditions β”‚ +β”‚ └─ BUG-011 (High): Unpaginated scans β”‚ +β”‚ β”‚ +β”‚ πŸ”₯ HIGH RISK: domain_logic.py β”‚ +β”‚ β”œβ”€ BUG-003 (High): Missing dependency validation β”‚ +β”‚ └─ BUG-006 (Medium): Status transitions not validated β”‚ +β”‚ β”‚ +β”‚ ⚠️ MEDIUM RISK: task_handler.py β”‚ +β”‚ └─ BUG-010 (Medium): No Content-Type validation β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Prioritization + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ 🎯 PRIORITIZED TESTING ROADMAP β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ IMMEDIATE (Week 1) - Critical & High: β”‚ +β”‚ 1. EventBridge silent failure detection (BUG-005) β”‚ +β”‚ 2. DynamoDB optimistic locking (BUG-007) β”‚ +β”‚ 3. Dependency existence validation (BUG-003) β”‚ +β”‚ β”‚ +β”‚ HIGH PRIORITY (Week 2-3) - High & Medium: β”‚ +β”‚ 4. DynamoDB pagination (BUG-011) β”‚ +β”‚ 5. Handler Content-Type validation (BUG-010) β”‚ +β”‚ 6. Business rule usage validation (BUG-006) β”‚ +β”‚ β”‚ +β”‚ STANDARD (Week 4+) - Medium & Low: β”‚ +β”‚ 7-9. Performance, edge cases, minor issues β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Metrics + β–Ό +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ πŸ“ˆ MEASURABLE TARGETS β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ Before: After 4 Weeks: β”‚ +β”‚ β€’ 20 bugs total β€’ <10 bugs total β”‚ +β”‚ β€’ 1 critical β€’ 0 critical βœ… β”‚ +β”‚ β€’ 7 high severity β€’ <3 high severity βœ… β”‚ +β”‚ β€’ 8 unit test gaps β€’ <4 unit test gaps β”‚ +β”‚ β€’ 6 integration gaps β€’ <3 integration gaps β”‚ +β”‚ β”‚ +β”‚ Success Metric: 90% historical bug patterns covered β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Honeycomb Validation Visualization + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ BUG DISTRIBUTION VALIDATES HONEYCOMB MODEL β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ Integration Layer Bugs: 75% β”‚ +β”‚ β”œβ”€ integrations.py: 40% β”‚ +β”‚ └─ domain_logic.py (service boundaries): 35% β”‚ +β”‚ β”‚ +β”‚ Business Logic Bugs: 15% β”‚ +β”‚ └─ domain_logic.py (pure logic): 15% β”‚ +β”‚ β”‚ +β”‚ Handler Bugs: 10% β”‚ +β”‚ └─ task_handler.py: 10% β”‚ +β”‚ β”‚ +β”‚ βœ… CONCLUSION: 75% integration bugs validates β”‚ +β”‚ 74% integration test focus from Demo02 β”‚ +β”‚ β”‚ +β”‚ This proves: Honeycomb > Pyramid for serverless β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +## Feedback Loop Visualization + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ CONTINUOUS IMPROVEMENT CYCLE β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + + Operation Phase (Demo03) + Collect Production Data + β”‚ + β”‚ Bug patterns reveal + β”‚ architectural issues + β–Ό + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + β”‚ FEEDBACK TO β”‚ + β”‚ INCEPTION β”‚ + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + β”‚ + β”‚ Insights inform + β”‚ next architecture + β–Ό + Inception Phase (Demo01) + Design Better Architecture + β”‚ + β”‚ Clean architecture + β”‚ enables better testing + β–Ό + Construction Phase (Demo02) + Build Comprehensive Tests + β”‚ + β”‚ Tests catch bugs + β”‚ before production + β–Ό + Operation Phase (Demo03) + Fewer Production Bugs + β”‚ + β”‚ Continuous + β”‚ improvement + └──────────────────────────────────────┐ + β”‚ + β–Ό + [Cycle Repeats] +``` + +--- + +## Key Metrics Dashboard + +``` +β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +β”‚ AI-DLC TRANSFORMATION METRICS β”‚ +β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€ +β”‚ β”‚ +β”‚ INCEPTION (Demo01): β”‚ +β”‚ β€’ Architecture Score: 7-8/10 β”‚ +β”‚ β€’ Test Speed: 10x faster (ms vs seconds) β”‚ +β”‚ β€’ Layer Separation: βœ… Achieved β”‚ +β”‚ β”‚ +β”‚ CONSTRUCTION (Demo02): β”‚ +β”‚ β€’ Test Distribution: 74% integration βœ… β”‚ +β”‚ β€’ Honeycomb Alignment: βœ… Validated β”‚ +β”‚ β€’ Anti-Patterns: 0 detected βœ… β”‚ +β”‚ β”‚ +β”‚ OPERATION (Demo03): β”‚ +β”‚ β€’ Bugs Analyzed: 20 β”‚ +β”‚ β€’ Risk Components: 2 (integrations, domain) β”‚ +β”‚ β€’ Target Reduction: 50% in 4 weeks β”‚ +β”‚ β”‚ +β”‚ OVERALL: β”‚ +β”‚ β€’ Time to Validate: Minutes (vs hours manually) β”‚ +β”‚ β€’ Consistency: 100% (AI applies same standards) β”‚ +β”‚ β€’ Actionability: High (specific recommendations) β”‚ +β”‚ β”‚ +β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +``` + +--- + +**Use these visualizations in your slides or draw them on a whiteboard during the demo to help the audience understand the flow!** diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/config.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/config.py new file mode 100644 index 00000000..14e64e5d --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/config.py @@ -0,0 +1,196 @@ +""" +Centralized configuration for infrastructure resource naming. + +Provides consistent naming patterns for all AWS resources with support +for environment-specific overrides via CDK context. +""" + +from dataclasses import dataclass +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from constructs import Node + + +@dataclass +class InfrastructureConfig: + """ + Configuration for infrastructure resource naming. + + Provides type-safe, computed resource names with support for + environment-specific overrides via CDK context. + + Attributes: + project_name: Base name for all resources (default: cns427-task-api) + environment: Environment identifier (default: dev) + region: AWS region for deployment (default: us-west-2) + """ + + project_name: str = 'cns427-task-api' + environment: str = 'dev' + region: str = 'us-west-2' + + # Core Infrastructure Resource Names + + def tasks_table_name(self) -> str: + """ + Get DynamoDB table name for tasks. + + Returns: + Table name in format: {project_name}-tasks + """ + return f'{self.project_name}-tasks' + + def event_bus_name(self) -> str: + """ + Get EventBridge custom event bus name. + + Returns: + Event bus name in format: {project_name}-core-task-events + """ + return f'{self.project_name}-core-task-events' + + def task_handler_function_name(self) -> str: + """ + Get Lambda function name for task CRUD operations. + + Returns: + Function name in format: {project_name}-task-handler + """ + return f'{self.project_name}-task-handler' + + def notification_handler_function_name(self) -> str: + """ + Get Lambda function name for notification processing. + + Returns: + Function name in format: {project_name}-notification-handler + """ + return f'{self.project_name}-notification-handler' + + def api_name(self) -> str: + """ + Get API Gateway REST API name. + + Returns: + API name in format: {project_name}-api + """ + return f'{self.project_name}-api' + + def task_event_rule_name(self) -> str: + """ + Get EventBridge rule name for task events. + + Returns: + Rule name in format: {project_name}-task-event-rule + """ + return f'{self.project_name}-task-event-rule' + + def log_group_name(self) -> str: + """ + Get CloudWatch log group name. + + Returns: + Log group name in format: /aws/lambda/{project_name} + """ + return f'/aws/lambda/{self.project_name}' + + # Test Harness Infrastructure Resource Names + + def test_results_table_name(self) -> str: + """ + Get DynamoDB table name for test results. + + Returns: + Table name in format: {project_name}-test-results + """ + return f'{self.project_name}-test-results' + + def test_subscriber_function_name(self) -> str: + """ + Get Lambda function name for test event subscriber. + + Returns: + Function name in format: {project_name}-test-subscriber + """ + return f'{self.project_name}-test-subscriber' + + def test_event_rule_name(self) -> str: + """ + Get EventBridge rule name for test events. + + Returns: + Rule name in format: {project_name}-test-rule + """ + return f'{self.project_name}-test-rule' + + def test_dlq_name(self) -> str: + """ + Get SQS queue name for test event DLQ. + + Returns: + Queue name in format: {project_name}-test-events-dlq + """ + return f'{self.project_name}-test-events-dlq' + + def test_execution_role_name(self) -> str: + """ + Get IAM role name for test execution. + + Returns: + Role name in format: {project_name}-test-execution-role + """ + return f'{self.project_name}-test-execution-role' + + def test_dashboard_name(self) -> str: + """ + Get CloudWatch dashboard name for test monitoring. + + Returns: + Dashboard name in format: {project_name}-test-monitoring + """ + return f'{self.project_name}-test-monitoring' + + def test_harness_stack_name(self) -> str: + """ + Get CloudFormation stack name for test harness infrastructure. + + Returns: + Stack name in format: {project_name}-test-harness + """ + return f'{self.project_name}-test-harness' + + # Monitoring Infrastructure Resource Names + + def dashboard_name(self) -> str: + """ + Get CloudWatch dashboard name for main application. + + Returns: + Dashboard name in format: {project_name}-dashboard + """ + return f'{self.project_name}-dashboard' + + @classmethod + def from_cdk_context(cls, node: 'Node') -> 'InfrastructureConfig': + """ + Create configuration from CDK context. + + Loads configuration from cdk.json context or uses defaults. + Supports CLI overrides via -c flag. + + Args: + node: CDK construct node with context access + + Returns: + InfrastructureConfig instance with values from context or defaults + + Example: + >>> config = InfrastructureConfig.from_cdk_context(self.node) + >>> table_name = config.tasks_table_name() + """ + return cls( + project_name=node.try_get_context('project_name') or cls.project_name, + environment=node.try_get_context('environment') or cls.environment, + region=node.try_get_context('region') or cls.region, + ) diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/__init__.py new file mode 100644 index 00000000..eb069ae4 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/__init__.py @@ -0,0 +1,13 @@ +"""Core infrastructure for Task API.""" + +from infrastructure.core.task_api_stack import ( + TaskApiCoreStack, + TaskApiMonitoringStack, + TaskApiStack, +) + +__all__ = [ + 'TaskApiCoreStack', + 'TaskApiStack', + 'TaskApiMonitoringStack', +] diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/task_api_stack.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/task_api_stack.py new file mode 100644 index 00000000..f856c73a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/core/task_api_stack.py @@ -0,0 +1,406 @@ +"""CDK stack for CNS427 Task Management API.""" + +from aws_cdk import ( + Duration, + RemovalPolicy, + Stack, +) +from aws_cdk import ( + aws_dynamodb as dynamodb, +) +from aws_cdk import ( + aws_events as events, +) +from aws_cdk import ( + aws_iam as iam, +) +from aws_cdk import ( + aws_logs as logs, +) +from cdk_nag import NagSuppressions +from constructs import Construct + +from infrastructure.config import InfrastructureConfig + + +class TaskApiCoreStack(Stack): + """Core infrastructure stack for Task API.""" + + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # DynamoDB table for tasks + self.tasks_table = dynamodb.Table( + self, + 'TasksTable', + table_name=config.tasks_table_name(), + partition_key=dynamodb.Attribute(name='task_id', type=dynamodb.AttributeType.STRING), + billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, + encryption=dynamodb.TableEncryption.AWS_MANAGED, + removal_policy=RemovalPolicy.DESTROY, # For demo purposes + point_in_time_recovery_specification=dynamodb.PointInTimeRecoverySpecification(point_in_time_recovery_enabled=True), + ) + + # EventBridge custom event bus + self.event_bus = events.EventBus(self, 'TaskEventBus', event_bus_name=config.event_bus_name()) + + # CloudWatch log group for application logs + self.app_log_group = logs.LogGroup( + self, + 'AppLogGroup', + log_group_name=config.log_group_name(), + retention=logs.RetentionDays.ONE_WEEK, + removal_policy=RemovalPolicy.DESTROY, + ) + + # IAM role for Lambda functions + # Using inline policies instead of AWS managed policies for better security (CDK Nag: AwsSolutions-IAM4) + self.lambda_execution_role = iam.Role( + self, + 'LambdaExecutionRole', + assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), + description='Execution role for Task API Lambda functions', + ) + + # Add CloudWatch Logs permissions (inline policy instead of AWSLambdaBasicExecutionRole) + # Scoped to specific log groups to avoid wildcards (CDK Nag: AwsSolutions-IAM5) + self.lambda_execution_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=[ + 'logs:CreateLogGroup', + 'logs:CreateLogStream', + 'logs:PutLogEvents', + ], + resources=[ + f'arn:aws:logs:{self.region}:{self.account}:log-group:/aws/lambda/{config.task_handler_function_name()}:*', + f'arn:aws:logs:{self.region}:{self.account}:log-group:/aws/lambda/{config.notification_handler_function_name()}:*', + ], + ) + ) + + # Grant DynamoDB permissions + self.tasks_table.grant_read_write_data(self.lambda_execution_role) + + # Grant EventBridge permissions + self.event_bus.grant_put_events_to(self.lambda_execution_role) + + # Grant CloudWatch Logs permissions for application log group + self.app_log_group.grant_write(self.lambda_execution_role) + + # Add X-Ray tracing permissions + # Note: X-Ray requires wildcard resources as traces can be sent to any region + self.lambda_execution_role.add_to_policy( + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=['xray:PutTraceSegments', 'xray:PutTelemetryRecords'], + resources=['*'], # X-Ray requires wildcard - this is acceptable + ) + ) + + # Suppress CDK Nag warnings for acceptable wildcards + NagSuppressions.add_resource_suppressions( + self.lambda_execution_role, + [ + { + 'id': 'AwsSolutions-IAM5', + 'reason': 'X-Ray tracing requires wildcard permissions to send traces to any region. This is an AWS service requirement and is documented in AWS X-Ray documentation.', + 'appliesTo': ['Resource::*'], + }, + { + 'id': 'AwsSolutions-IAM5', + 'reason': 'CloudWatch Logs requires wildcard suffix (:*) to allow log stream creation within the log group. ' + 'The log group itself is scoped to specific Lambda functions. This is standard AWS practice for Lambda logging.', + 'appliesTo': [ + f'Resource::arn:aws:logs:{self.region}::log-group:/aws/lambda/{config.task_handler_function_name()}:*', + f'Resource::arn:aws:logs:{self.region}::log-group:/aws/lambda/{config.notification_handler_function_name()}:*', + ], + }, + ], + apply_to_children=True, + ) + + +class TaskApiStack(Stack): + """API infrastructure stack for Task API.""" + + def __init__(self, scope: Construct, construct_id: str, core_stack: TaskApiCoreStack, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # Import core resources + self.tasks_table = core_stack.tasks_table + self.event_bus = core_stack.event_bus + self.lambda_role = core_stack.lambda_execution_role + + # Lambda function for task CRUD operations + from aws_cdk import BundlingOptions + from aws_cdk import aws_lambda as lambda_ + + # Create log group for task handler + task_handler_log_group = logs.LogGroup( + self, + 'TaskHandlerLogGroup', + log_group_name=f'/aws/lambda/{config.task_handler_function_name()}', + retention=logs.RetentionDays.ONE_WEEK, + removal_policy=RemovalPolicy.DESTROY, + ) + + self.task_handler = lambda_.Function( + self, + 'TaskHandler', + function_name=config.task_handler_function_name(), + runtime=lambda_.Runtime.PYTHON_3_13, + architecture=lambda_.Architecture.ARM_64, + handler='services.task_service.handler.lambda_handler', + code=lambda_.Code.from_asset( + '.', + bundling=BundlingOptions( + image=lambda_.Runtime.PYTHON_3_13.bundling_image, + platform='linux/arm64', + command=[ + 'bash', + '-c', + 'pip install -r services/task_service/requirements.txt -t /asset-output && ' + + 'cp -r services /asset-output/ && ' + + 'cp -r shared /asset-output/', + ], + ), + ), + role=self.lambda_role, + timeout=Duration.seconds(30), + memory_size=512, + tracing=lambda_.Tracing.ACTIVE, + log_group=task_handler_log_group, + description='Handles task CRUD operations via API Gateway with DynamoDB persistence and EventBridge publishing', + environment={ + 'TASKS_TABLE_NAME': self.tasks_table.table_name, + 'EVENT_BUS_NAME': self.event_bus.event_bus_name, + 'POWERTOOLS_SERVICE_NAME': 'task-api', + 'POWERTOOLS_METRICS_NAMESPACE': 'CNS427/TaskAPI', + 'LOG_LEVEL': 'INFO', + }, + ) + + # Lambda function for notification processing + # Create log group for notification handler + notification_handler_log_group = logs.LogGroup( + self, + 'NotificationHandlerLogGroup', + log_group_name=f'/aws/lambda/{config.notification_handler_function_name()}', + retention=logs.RetentionDays.ONE_WEEK, + removal_policy=RemovalPolicy.DESTROY, + ) + + self.notification_handler = lambda_.Function( + self, + 'NotificationHandler', + function_name=config.notification_handler_function_name(), + runtime=lambda_.Runtime.PYTHON_3_13, + architecture=lambda_.Architecture.ARM_64, + handler='services.notification_service.handler.lambda_handler', + code=lambda_.Code.from_asset( + '.', + bundling=BundlingOptions( + image=lambda_.Runtime.PYTHON_3_13.bundling_image, + platform='linux/arm64', + command=[ + 'bash', + '-c', + 'pip install -r services/notification_service/requirements.txt -t /asset-output && ' + + 'cp -r services /asset-output/ && ' + + 'cp -r shared /asset-output/', + ], + ), + ), + role=self.lambda_role, + timeout=Duration.seconds(30), + memory_size=256, + tracing=lambda_.Tracing.ACTIVE, + log_group=notification_handler_log_group, + description='Processes task events from EventBridge and handles notification logic', + environment={'POWERTOOLS_SERVICE_NAME': 'task-notifications', 'POWERTOOLS_METRICS_NAMESPACE': 'CNS427/TaskAPI', 'LOG_LEVEL': 'INFO'}, + ) + + # API Gateway + from aws_cdk import CfnOutput + from aws_cdk import aws_apigateway as apigateway + + self.api = apigateway.RestApi( + self, + 'TaskApi', + rest_api_name=config.api_name(), + description='CNS427 Task Management API', + cloud_watch_role=False, # Disable automatic role creation to avoid account-level conflicts + default_cors_preflight_options=apigateway.CorsOptions( + allow_origins=apigateway.Cors.ALL_ORIGINS, + allow_methods=apigateway.Cors.ALL_METHODS, + allow_headers=['Content-Type', 'Authorization', 'X-Amz-Date', 'X-Amz-Security-Token'], + ), + ) + + # Add request validator for basic input validation (CDK Nag: AwsSolutions-APIG2) + request_validator = self.api.add_request_validator( + 'RequestValidator', + validate_request_body=True, + validate_request_parameters=True, + ) + + # Lambda integration + task_integration = apigateway.LambdaIntegration(self.task_handler, proxy=True) + + # API Gateway resources and methods with IAM authorization and request validation + tasks_resource = self.api.root.add_resource('tasks') + tasks_resource.add_method( + 'GET', + task_integration, + authorization_type=apigateway.AuthorizationType.IAM, + request_validator=request_validator, + ) # List tasks + tasks_resource.add_method( + 'POST', + task_integration, + authorization_type=apigateway.AuthorizationType.IAM, + request_validator=request_validator, + ) # Create task + + task_resource = tasks_resource.add_resource('{id}') + task_resource.add_method( + 'GET', + task_integration, + authorization_type=apigateway.AuthorizationType.IAM, + request_validator=request_validator, + ) # Get task + task_resource.add_method( + 'PUT', + task_integration, + authorization_type=apigateway.AuthorizationType.IAM, + request_validator=request_validator, + ) # Update task + task_resource.add_method( + 'DELETE', + task_integration, + authorization_type=apigateway.AuthorizationType.IAM, + request_validator=request_validator, + ) # Delete task + + # Output API endpoint for E2E tests + CfnOutput(self, 'ApiEndpoint', value=self.api.url, description='API Gateway endpoint URL', export_name=f'{config.project_name}-api-endpoint') + + # EventBridge rule for task events + task_event_rule = events.Rule( + self, + 'TaskEventRule', + rule_name=config.task_event_rule_name(), + event_bus=self.event_bus, + event_pattern=events.EventPattern(source=['cns427-task-api'], detail_type=['TaskCreated', 'TaskUpdated', 'TaskDeleted']), + ) + + # Add notification handler as target + from aws_cdk import aws_events_targets as targets + + task_event_rule.add_target(targets.LambdaFunction(self.notification_handler)) + + # CDK Nag Suppressions for API Gateway + # These are acceptable for demo/educational purposes to reduce cost and complexity + NagSuppressions.add_stack_suppressions( + self, + [ + { + 'id': 'AwsSolutions-COG4', + 'reason': 'Demo/Educational Code: Using IAM authentication instead of Cognito. ' + 'IAM auth with SigV4 signing is appropriate for this demonstration and provides ' + 'sufficient security without the added complexity and cost of Cognito user pools. ' + 'All API methods require valid AWS credentials.', + }, + { + 'id': 'AwsSolutions-APIG1', + 'reason': 'Demo/Educational Code: API Gateway access logging disabled to reduce costs. ' + 'CloudWatch Logs from Lambda functions provide sufficient observability for demonstration purposes. ' + 'For production use, enable access logging with: deploy_options={"access_log_destination": ...}', + }, + { + 'id': 'AwsSolutions-APIG3', + 'reason': 'Demo/Educational Code: AWS WAF not configured to reduce costs. ' + 'IAM authentication already restricts API access to authorized AWS principals only. ' + 'WAF would add significant cost (~$5-10/month minimum) without substantial benefit for a demo API. ' + 'For production use, consider adding WAF for additional protection against web exploits.', + }, + { + 'id': 'AwsSolutions-APIG6', + 'reason': 'Demo/Educational Code: CloudWatch logging at API Gateway stage level disabled to reduce costs. ' + 'Lambda function logs (enabled) provide sufficient observability for demonstration purposes. ' + 'API Gateway stage logging would duplicate information already captured in Lambda logs. ' + 'For production use, enable stage logging for detailed API Gateway metrics.', + }, + ], + ) + + +class TaskApiMonitoringStack(Stack): + """Monitoring and observability stack for Task API.""" + + def __init__(self, scope: Construct, construct_id: str, api_stack: TaskApiStack, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # Import resources from API stack + self.task_handler = api_stack.task_handler + self.notification_handler = api_stack.notification_handler + self.api = api_stack.api + + from aws_cdk import aws_cloudwatch as cloudwatch + + # CloudWatch Dashboard + self.dashboard = cloudwatch.Dashboard(self, 'TaskApiDashboard', dashboard_name=config.dashboard_name()) + + # Lambda metrics + task_handler_errors = cloudwatch.Metric( + namespace='AWS/Lambda', metric_name='Errors', dimensions_map={'FunctionName': self.task_handler.function_name}, statistic='Sum' + ) + + task_handler_duration = cloudwatch.Metric( + namespace='AWS/Lambda', metric_name='Duration', dimensions_map={'FunctionName': self.task_handler.function_name}, statistic='Average' + ) + + # API Gateway metrics + api_4xx_errors = cloudwatch.Metric( + namespace='AWS/ApiGateway', metric_name='4XXError', dimensions_map={'ApiName': self.api.rest_api_name}, statistic='Sum' + ) + + api_5xx_errors = cloudwatch.Metric( + namespace='AWS/ApiGateway', metric_name='5XXError', dimensions_map={'ApiName': self.api.rest_api_name}, statistic='Sum' + ) + + # Add widgets to dashboard + self.dashboard.add_widgets( + cloudwatch.GraphWidget(title='Lambda Errors', left=[task_handler_errors], width=12, height=6), + cloudwatch.GraphWidget(title='Lambda Duration', left=[task_handler_duration], width=12, height=6), + cloudwatch.GraphWidget(title='API Gateway Errors', left=[api_4xx_errors, api_5xx_errors], width=24, height=6), + ) + + # CloudWatch Alarms + cloudwatch.Alarm( + self, + 'TaskHandlerErrorAlarm', + metric=task_handler_errors, + threshold=5, + evaluation_periods=2, + alarm_description='Task handler error rate is too high', + ) + + cloudwatch.Alarm( + self, + 'ApiGateway5xxAlarm', + metric=api_5xx_errors, + threshold=10, + evaluation_periods=2, + alarm_description='API Gateway 5xx error rate is too high', + ) diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/setup.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/setup.py new file mode 100644 index 00000000..c81b42cb --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/setup.py @@ -0,0 +1,13 @@ +"""Setup for infrastructure package.""" + +from setuptools import find_packages, setup + +setup( + name='cns427-infrastructure', + version='1.0.0', + packages=find_packages(), + install_requires=[ + 'aws-cdk-lib>=2.100.0', + 'constructs>=10.0.0', + ], +) diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/README.md b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/README.md new file mode 100644 index 00000000..6d654884 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/README.md @@ -0,0 +1,58 @@ +# EventBridge Test Infrastructure + +Simple setup for EventBridge integration testing infrastructure in **us-east-1** region. + +## Quick Start + +1. **Set AWS credentials** (use the export commands provided): + ```bash + export AWS_ACCESS_KEY_ID=your_access_key + export AWS_SECRET_ACCESS_KEY=your_secret_key + export AWS_SESSION_TOKEN=your_session_token + ``` + +2. **Deploy infrastructure** (from project root): + ```bash + make deploy-test-infra + ``` + +3. **Run EventBridge tests**: + ```bash + poetry run pytest tests/integration/test_eventbridge_integration.py -v + ``` + +## Commands (from project root) + +- **Deploy**: `make deploy-test-infra` - Deploy test infrastructure +- **Check**: `make check-test-infra` - Check infrastructure status +- **Destroy**: `make destroy-test-infra` - Clean up infrastructure + +Or run directly from this directory: +- **Deploy**: `poetry run ./deploy.sh` +- **Check**: `poetry run ./check.sh` +- **Destroy**: `poetry run ./destroy.sh` + +## What Gets Deployed (us-east-1 region) + +- **DynamoDB Table**: `cns427-task-api-test-results` - Stores captured test events +- **Lambda Function**: `cns427-task-api-test-subscriber` - Captures EventBridge events +- **EventBridge Rule**: `cns427-task-api-test-rule` - Routes TEST-* events to Lambda + +**Note**: All resources are deployed in us-east-1 region as required. + +## Files + +- `test_infrastructure_stack.py` - CDK stack definition +- `test_subscriber.py` - Lambda function code +- `deploy.sh` - Deployment script +- `destroy.sh` - Cleanup script +- `check.sh` - Status check script + +## Troubleshooting + +1. **CDK not found**: Install with `npm install -g aws-cdk` +2. **Python dependencies**: Run `pip install aws-cdk-lib constructs boto3` +3. **AWS credentials**: Set the export commands provided +4. **Synthesis fails**: Check `test_infrastructure_stack.py` for errors + +The scripts will create temporary `app.py` and `cdk.json` files during deployment and clean them up afterward. \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/__init__.py new file mode 100644 index 00000000..1f550dae --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/__init__.py @@ -0,0 +1,5 @@ +"""Test harness infrastructure for EventBridge integration testing.""" + +from infrastructure.test_harness.test_infrastructure_stack import TestInfrastructureStack + +__all__ = ['TestInfrastructureStack'] diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/app.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/app.py new file mode 100644 index 00000000..4d3abce5 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/app.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python3 +import sys +from pathlib import Path + +# Add project root to Python path +project_root = Path(__file__).parent.parent.parent +sys.path.insert(0, str(project_root)) + +import aws_cdk as cdk + +from infrastructure.config import InfrastructureConfig +from infrastructure.test_harness.test_infrastructure_stack import TestInfrastructureStack + +app = cdk.App() +config = InfrastructureConfig.from_cdk_context(app.node) +TestInfrastructureStack(app, config.test_harness_stack_name(), env=cdk.Environment(region=config.region)) +app.synth() diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/cdk.json b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/cdk.json new file mode 100644 index 00000000..2173a51d --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/cdk.json @@ -0,0 +1,10 @@ +{ + "app": "poetry run python app.py", + "context": { + "project_name": "cns427-task-api", + "environment": "dev", + "region": "us-west-2", + "@aws-cdk/aws-lambda:recognizeLayerVersion": true, + "@aws-cdk/core:checkSecretUsage": true + } +} diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_event_subscriber.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_event_subscriber.py new file mode 100644 index 00000000..c477b35d --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_event_subscriber.py @@ -0,0 +1,81 @@ +""" +Test Subscriber Lambda Function + +Captures EventBridge test events and stores them in the test results table +for integration test verification. +""" + +import json +import os +import time +from typing import Any, Dict + +import boto3 +from botocore.exceptions import ClientError + +# Get configuration from environment +table_name = os.environ.get('TEST_RESULTS_TABLE_NAME', 'cns427-task-api-test-results') +AWS_REGION = os.getenv('AWS_REGION', 'us-east-1') + +# Initialize DynamoDB client +dynamodb = boto3.resource('dynamodb', region_name=AWS_REGION) +table = dynamodb.Table(table_name) + + +def handler(event: Dict[str, Any], context: Any) -> Dict[str, Any]: + """ + EventBridge test event subscriber handler. + + Captures TEST-* events and stores them in the test results table + for integration test verification. + """ + print(f'Received EventBridge event: {json.dumps(event, default=str)}') + + try: + # Extract event details directly from EventBridge event + source = event.get('source', '') + detail_type = event.get('detail-type', '') + detail = event.get('detail', {}) + + # Only process TEST events + if not source.startswith('TEST-') or not detail_type.startswith('TEST-'): + print(f'Skipping non-test event: source={source}, detail_type={detail_type}') + return {'statusCode': 200, 'body': json.dumps({'message': 'Skipped non-test event', 'processed_count': 0})} + + # Title is test run ID + test_run_id = detail.get('title', '') + if not test_run_id.startswith('TEST-'): + print(f'Skipping event without test run id: {test_run_id}') + return {'statusCode': 200, 'body': json.dumps({'message': 'Skipped event without test run id', 'processed_count': 0})} + + # Create timestamp for sorting + event_timestamp = str(time.time()) + + # Store event in test results table + item = { + 'test_run_id': test_run_id, + 'event_timestamp': event_timestamp, + 'event_type': detail_type.replace('TEST-', ''), # Remove TEST- prefix + 'event_data': json.dumps(detail), + 'source': source, + 'ttl': int(time.time()) + 3600, # Auto-cleanup after 1 hour + } + + table.put_item(Item=item) + + print(f'Stored test event: test_run_id={test_run_id}, event_type={item["event_type"]}') + + return { + 'statusCode': 200, + 'body': json.dumps( + {'message': 'Processed test event successfully', 'processed_count': 1, 'test_run_id': test_run_id, 'event_type': item['event_type']} + ), + } + + except ClientError as e: + print(f'DynamoDB error: {e}') + return {'statusCode': 500, 'body': json.dumps({'error': f'DynamoDB error: {str(e)}'})} + + except Exception as e: + print(f'Unexpected error: {e}') + return {'statusCode': 500, 'body': json.dumps({'error': f'Unexpected error: {str(e)}'})} diff --git a/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_infrastructure_stack.py b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_infrastructure_stack.py new file mode 100644 index 00000000..d0e649e5 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/infrastructure/test_harness/test_infrastructure_stack.py @@ -0,0 +1,232 @@ +""" +Test Infrastructure Stack + +CDK stack for EventBridge integration testing infrastructure including +test results table, test subscriber Lambda, and EventBridge rules. +""" + +from aws_cdk import ( + CfnOutput, + Duration, + RemovalPolicy, + Stack, +) +from aws_cdk import ( + aws_cloudwatch as cloudwatch, +) +from aws_cdk import ( + aws_dynamodb as dynamodb, +) +from aws_cdk import ( + aws_events as events, +) +from aws_cdk import ( + aws_events_targets as targets, +) +from aws_cdk import ( + aws_iam as iam, +) +from aws_cdk import ( + aws_lambda as lambda_, +) +from aws_cdk import ( + aws_logs as logs, +) +from aws_cdk import ( + aws_sqs as sqs, +) +from constructs import Construct + +from infrastructure.config import InfrastructureConfig + + +class TestInfrastructureStack(Stack): + """CDK Stack for EventBridge integration test infrastructure.""" + + def __init__(self, scope: Construct, construct_id: str, **kwargs) -> None: + super().__init__(scope, construct_id, **kwargs) + + # Load configuration + config = InfrastructureConfig.from_cdk_context(self.node) + + # Test Results DynamoDB Table + self.test_results_table = dynamodb.Table( + self, + 'TestResultsTable', + table_name=config.test_results_table_name(), + partition_key=dynamodb.Attribute(name='test_run_id', type=dynamodb.AttributeType.STRING), + sort_key=dynamodb.Attribute(name='event_timestamp', type=dynamodb.AttributeType.STRING), + time_to_live_attribute='ttl', + billing_mode=dynamodb.BillingMode.PAY_PER_REQUEST, + removal_policy=RemovalPolicy.DESTROY, + point_in_time_recovery_specification=dynamodb.PointInTimeRecoverySpecification( + point_in_time_recovery_enabled=False + ), # Not needed for test data + ) + + # CloudWatch Log Group for Test Subscriber Lambda + test_subscriber_log_group = logs.LogGroup( + self, + 'TestSubscriberLogGroup', + log_group_name=f'/aws/lambda/{config.test_subscriber_function_name()}', + retention=logs.RetentionDays.ONE_WEEK, + removal_policy=RemovalPolicy.DESTROY, + ) + + # Test Subscriber Lambda Function + self.test_subscriber_lambda = lambda_.Function( + self, + 'TestHarnessLambda', + function_name=config.test_subscriber_function_name(), + runtime=lambda_.Runtime.PYTHON_3_13, + architecture=lambda_.Architecture.ARM_64, + code=lambda_.Code.from_asset( + '.', exclude=['*.md', 'cdk.out', '*.sh', 'app.py', 'cdk.json', 'test_infrastructure_stack.py', '__pycache__'] + ), + handler='test_event_subscriber.handler', + environment={'TEST_RESULTS_TABLE_NAME': self.test_results_table.table_name, 'LOG_LEVEL': 'INFO'}, + timeout=Duration.seconds(30), + memory_size=256, + log_group=test_subscriber_log_group, + description='Captures EventBridge test events for integration testing', + ) + + # Grant Lambda permissions to write to test results table + self.test_results_table.grant_write_data(self.test_subscriber_lambda) + + # Import the custom event bus + custom_event_bus = events.EventBus.from_event_bus_name(self, 'CustomEventBus', config.event_bus_name()) + + # EventBridge Rule for Test Events on Custom Bus + self.test_event_rule = events.Rule( + self, + 'TestEventRule', + rule_name=config.test_event_rule_name(), + description='Routes TEST-* events to test subscriber Lambda', + event_bus=custom_event_bus, + event_pattern=events.EventPattern(source=['TEST-cns427-task-api'], detail_type=events.Match.prefix('TEST-')), + enabled=True, + ) + + # Add Lambda as target for the rule + self.test_event_rule.add_target(targets.LambdaFunction(self.test_subscriber_lambda, retry_attempts=2, max_event_age=Duration.minutes(5))) + + # Grant EventBridge permission to invoke Lambda + self.test_subscriber_lambda.add_permission( + 'AllowEventBridgeInvoke', + principal=iam.ServicePrincipal('events.amazonaws.com'), + action='lambda:InvokeFunction', + source_arn=self.test_event_rule.rule_arn, + ) + + # Dead Letter Queue for failed test events + self.test_dlq = sqs.Queue( + self, 'TestEventDLQ', queue_name=config.test_dlq_name(), retention_period=Duration.days(7), visibility_timeout=Duration.seconds(300) + ) + + # IAM Role for integration tests + self.test_execution_role = iam.Role( + self, + 'TestExecutionRole', + role_name=config.test_execution_role_name(), + assumed_by=iam.ServicePrincipal('lambda.amazonaws.com'), + managed_policies=[iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AWSLambdaBasicExecutionRole')], + inline_policies={ + 'EventBridgeTestPolicy': iam.PolicyDocument( + statements=[ + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=['events:PutEvents'], + resources=['*'], + conditions={'StringEquals': {'events:source': 'TEST-cns427-task-api'}}, + ), + iam.PolicyStatement( + effect=iam.Effect.ALLOW, + actions=['dynamodb:Query', 'dynamodb:GetItem', 'dynamodb:PutItem', 'dynamodb:DeleteItem'], + resources=[self.test_results_table.table_arn], + ), + ] + ) + }, + ) + + # CloudWatch Dashboard for test monitoring + self.test_dashboard = cloudwatch.Dashboard( + self, + 'TestDashboard', + dashboard_name=config.test_dashboard_name(), + widgets=[ + [ + cloudwatch.GraphWidget( + title='Test Subscriber Lambda Metrics', + left=[ + self.test_subscriber_lambda.metric_invocations(), + self.test_subscriber_lambda.metric_errors(), + self.test_subscriber_lambda.metric_duration(), + ], + width=12, + height=6, + ) + ], + [ + cloudwatch.GraphWidget( + title='Test Results Table Metrics', + left=[ + self.test_results_table.metric_consumed_read_capacity_units(), + self.test_results_table.metric_consumed_write_capacity_units(), + ], + width=12, + height=6, + ) + ], + [ + cloudwatch.SingleValueWidget( + title='Test Events DLQ Messages', metrics=[self.test_dlq.metric_approximate_number_of_messages_visible()], width=6, height=3 + ), + cloudwatch.SingleValueWidget( + title='EventBridge Rule Invocations', + metrics=[ + cloudwatch.Metric( + namespace='AWS/Events', + metric_name='SuccessfulInvocations', + dimensions_map={'RuleName': self.test_event_rule.rule_name}, + ) + ], + width=6, + height=3, + ), + ], + ], + ) + + # Output important ARNs and names for test configuration + CfnOutput(self, 'TestResultsTableName', value=self.test_results_table.table_name, description='DynamoDB table name for test results') + + CfnOutput(self, 'TestSubscriberLambdaArn', value=self.test_subscriber_lambda.function_arn, description='Test subscriber Lambda function ARN') + + CfnOutput(self, 'TestEventRuleArn', value=self.test_event_rule.rule_arn, description='EventBridge rule ARN for test events') + + CfnOutput(self, 'TestExecutionRoleArn', value=self.test_execution_role.role_arn, description='IAM role ARN for test execution') + + +# Helper function to create test infrastructure app +def create_test_infrastructure_app(): + """Create CDK app with test infrastructure stack.""" + from aws_cdk import App + + app = App() + + TestInfrastructureStack( + app, + 'CNS427TaskApiTestInfrastructure', + description='EventBridge integration test infrastructure for CNS427 Task API', + env={'account': app.node.try_get_context('account'), 'region': app.node.try_get_context('region') or 'us-east-1'}, + ) + + return app + + +if __name__ == '__main__': + # For direct execution + app = create_test_infrastructure_app() + app.synth() diff --git a/python-test-samples/cns427-testable-serverless-architecture/mypy.ini b/python-test-samples/cns427-testable-serverless-architecture/mypy.ini new file mode 100644 index 00000000..c1b91399 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/mypy.ini @@ -0,0 +1,56 @@ +[mypy] +# Python version +python_version = 3.13 + +# Import discovery +mypy_path = . +namespace_packages = False +explicit_package_bases = False +exclude = (^tests/|^infrastructure/|^\.venv/|^cdk\.out/) + +# Strictness - Relaxed for pragmatic development +warn_return_any = False +warn_unused_configs = True +disallow_untyped_defs = False +disallow_incomplete_defs = False +check_untyped_defs = False +disallow_untyped_calls = False + +# Allow implicit optional (common pattern with default None) +no_implicit_optional = False + +# Ignore missing imports for third-party libraries without stubs +ignore_missing_imports = True + +# Don't require return type annotations +disallow_any_unimported = False +disallow_any_expr = False +disallow_any_decorated = False +disallow_any_explicit = False + +# Allow subclassing Any +disallow_subclassing_any = False + +# Warnings +warn_redundant_casts = True +warn_unused_ignores = False +warn_no_return = False + +# Error messages +show_error_context = True +show_column_numbers = True +show_error_codes = True +pretty = True + +# Per-module options for external libraries +[mypy-boto3.*] +ignore_missing_imports = True + +[mypy-botocore.*] +ignore_missing_imports = True + +[mypy-aws_lambda_powertools.*] +ignore_missing_imports = True + +[mypy-moto.*] +ignore_missing_imports = True diff --git a/python-test-samples/cns427-testable-serverless-architecture/poetry.lock b/python-test-samples/cns427-testable-serverless-architecture/poetry.lock new file mode 100644 index 00000000..7ddada5d --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/poetry.lock @@ -0,0 +1,1865 @@ +# This file is automatically @generated by Poetry 2.2.0 and should not be changed by hand. + +[[package]] +name = "annotated-types" +version = "0.7.0" +description = "Reusable constraint types to use with typing.Annotated" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, + {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, +] + +[[package]] +name = "attrs" +version = "25.3.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, + {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, +] + +[package.extras] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] + +[[package]] +name = "aws-cdk-asset-awscli-v1" +version = "2.2.242" +description = "A library that contains the AWS CLI for use in Lambda Layers" +optional = false +python-versions = "~=3.9" +groups = ["dev"] +files = [ + {file = "aws_cdk_asset_awscli_v1-2.2.242-py3-none-any.whl", hash = "sha256:d1001bf56a12f7d1162d4211003d1e8f72a213159465e2d0e1c598cc0ea44aad"}, + {file = "aws_cdk_asset_awscli_v1-2.2.242.tar.gz", hash = "sha256:a957d679a118f4375307ed90b9aed7127c5c1402989438060eae4ab29ab0d13f"}, +] + +[package.dependencies] +jsii = ">=1.112.0,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<4.3.0" + +[[package]] +name = "aws-cdk-asset-node-proxy-agent-v6" +version = "2.1.0" +description = "@aws-cdk/asset-node-proxy-agent-v6" +optional = false +python-versions = "~=3.8" +groups = ["dev"] +files = [ + {file = "aws_cdk.asset_node_proxy_agent_v6-2.1.0-py3-none-any.whl", hash = "sha256:24a388b69a44d03bae6dbf864c4e25ba650d4b61c008b4568b94ffbb9a69e40e"}, + {file = "aws_cdk_asset_node_proxy_agent_v6-2.1.0.tar.gz", hash = "sha256:1f292c0631f86708ba4ee328b3a2b229f7e46ea1c79fbde567ee9eb119c2b0e2"}, +] + +[package.dependencies] +jsii = ">=1.103.1,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<5.0.0" + +[[package]] +name = "aws-cdk-cloud-assembly-schema" +version = "48.10.0" +description = "Schema for the protocol between CDK framework and CDK CLI" +optional = false +python-versions = "~=3.9" +groups = ["dev"] +files = [ + {file = "aws_cdk_cloud_assembly_schema-48.10.0-py3-none-any.whl", hash = "sha256:3eaddae345481fbdc0a24f618cdea2efefc3670308e2292be9ed8c039a965c1e"}, + {file = "aws_cdk_cloud_assembly_schema-48.10.0.tar.gz", hash = "sha256:c2c3836d5b1b5655a71226b8f6e1339b6db5f36e9c6533b8e572658d20676d9a"}, +] + +[package.dependencies] +jsii = ">=1.114.1,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<4.3.0" + +[[package]] +name = "aws-cdk-lib" +version = "2.215.0" +description = "Version 2 of the AWS Cloud Development Kit library" +optional = false +python-versions = "~=3.9" +groups = ["dev"] +files = [ + {file = "aws_cdk_lib-2.215.0-py3-none-any.whl", hash = "sha256:f609a88a6fe252dc5a0629393567942ec7f0c2aef1ef350c911cd9165b50bee5"}, + {file = "aws_cdk_lib-2.215.0.tar.gz", hash = "sha256:82da916b55070f7fc8a11a28f47afb6f7e34b452be7d7a4d7fdbf8e4e77ea768"}, +] + +[package.dependencies] +"aws-cdk.asset-awscli-v1" = "2.2.242" +"aws-cdk.asset-node-proxy-agent-v6" = ">=2.1.0,<3.0.0" +"aws-cdk.cloud-assembly-schema" = ">=48.6.0,<49.0.0" +constructs = ">=10.0.0,<11.0.0" +jsii = ">=1.113.0,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<4.3.0" + +[[package]] +name = "aws-lambda-env-modeler" +version = "1.0.7" +description = "AWS-Lambda-Env-Modeler is a Python library designed to simplify the process of managing and validating environment variables in your AWS Lambda functions." +optional = false +python-versions = "<4.0.0,>=3.8.17" +groups = ["main"] +files = [ + {file = "aws_lambda_env_modeler-1.0.7-py3-none-any.whl", hash = "sha256:9246b9400fba2bca0e09b5b3c03516fdefc1a8412949c4b9dda154b0ec551cee"}, + {file = "aws_lambda_env_modeler-1.0.7.tar.gz", hash = "sha256:16e16f17603476023b0ef7774791bee5812033220ddc615cf321c662a96087ce"}, +] + +[package.dependencies] +pydantic = ">=2.0.0,<3.0.0" + +[[package]] +name = "aws-lambda-powertools" +version = "2.43.1" +description = "Powertools for AWS Lambda (Python) is a developer toolkit to implement Serverless best practices and increase developer velocity." +optional = false +python-versions = "<4.0.0,>=3.8" +groups = ["main"] +files = [ + {file = "aws_lambda_powertools-2.43.1-py3-none-any.whl", hash = "sha256:48116250c1771c7b8d4977ad2d475271074d86964107ccfd3fc6775e51984d88"}, + {file = "aws_lambda_powertools-2.43.1.tar.gz", hash = "sha256:5c371a0c0430cf7bca1696748cb0d85079aac2c51056cbee10e5435029b35ca4"}, +] + +[package.dependencies] +aws-xray-sdk = {version = ">=2.8.0,<3.0.0", optional = true, markers = "extra == \"tracer\" or extra == \"all\""} +jmespath = ">=1.0.1,<2.0.0" +typing-extensions = ">=4.11.0,<5.0.0" + +[package.extras] +all = ["aws-xray-sdk (>=2.8.0,<3.0.0)", "fastjsonschema (>=2.14.5,<3.0.0)", "pydantic (>=1.8.2,<2.0.0)"] +aws-sdk = ["boto3 (>=1.26.164,<2.0.0)"] +datadog = ["datadog-lambda (>=4.77,<7.0)"] +datamasking = ["aws-encryption-sdk (>=3.1.1,<4.0.0)", "jsonpath-ng (>=1.6.0,<2.0.0)"] +parser = ["pydantic (>=1.8.2,<2.0.0)"] +redis = ["redis (>=4.4,<6.0)"] +tracer = ["aws-xray-sdk (>=2.8.0,<3.0.0)"] +validation = ["fastjsonschema (>=2.14.5,<3.0.0)"] + +[[package]] +name = "aws-xray-sdk" +version = "2.14.0" +description = "The AWS X-Ray SDK for Python (the SDK) enables Python developers to record and emit information from within their applications to the AWS X-Ray service." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "aws_xray_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:cfbe6feea3d26613a2a869d14c9246a844285c97087ad8f296f901633554ad94"}, + {file = "aws_xray_sdk-2.14.0.tar.gz", hash = "sha256:aab843c331af9ab9ba5cefb3a303832a19db186140894a523edafc024cc0493c"}, +] + +[package.dependencies] +botocore = ">=1.11.3" +wrapt = "*" + +[[package]] +name = "boto3" +version = "1.40.35" +description = "The AWS SDK for Python" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "boto3-1.40.35-py3-none-any.whl", hash = "sha256:f4c1b01dd61e7733b453bca38b004ce030e26ee36e7a3d4a9e45a730b67bc38d"}, + {file = "boto3-1.40.35.tar.gz", hash = "sha256:d718df3591c829bcca4c498abb7b09d64d1eecc4e5a2b6cef14b476501211b8a"}, +] + +[package.dependencies] +botocore = ">=1.40.35,<1.41.0" +jmespath = ">=0.7.1,<2.0.0" +s3transfer = ">=0.14.0,<0.15.0" + +[package.extras] +crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] + +[[package]] +name = "botocore" +version = "1.40.35" +description = "Low-level, data-driven core of boto 3." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "botocore-1.40.35-py3-none-any.whl", hash = "sha256:c545de2cbbce161f54ca589fbb677bae14cdbfac7d5f1a27f6a620cb057c26f4"}, + {file = "botocore-1.40.35.tar.gz", hash = "sha256:67e062752ff579c8cc25f30f9c3a84c72d692516a41a9ee1cf17735767ca78be"}, +] + +[package.dependencies] +jmespath = ">=0.7.1,<2.0.0" +python-dateutil = ">=2.1,<3.0.0" +urllib3 = {version = ">=1.25.4,<2.2.0 || >2.2.0,<3", markers = "python_version >= \"3.10\""} + +[package.extras] +crt = ["awscrt (==0.27.6)"] + +[[package]] +name = "cattrs" +version = "25.2.0" +description = "Composable complex class support for attrs and dataclasses." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "cattrs-25.2.0-py3-none-any.whl", hash = "sha256:539d7eedee7d2f0706e4e109182ad096d608ba84633c32c75ef3458f1d11e8f1"}, + {file = "cattrs-25.2.0.tar.gz", hash = "sha256:f46c918e955db0177be6aa559068390f71988e877c603ae2e56c71827165cc06"}, +] + +[package.dependencies] +attrs = ">=24.3.0" +typing-extensions = ">=4.12.2" + +[package.extras] +bson = ["pymongo (>=4.4.0)"] +cbor2 = ["cbor2 (>=5.4.6)"] +msgpack = ["msgpack (>=1.0.5)"] +msgspec = ["msgspec (>=0.19.0) ; implementation_name == \"cpython\""] +orjson = ["orjson (>=3.10.7) ; implementation_name == \"cpython\""] +pyyaml = ["pyyaml (>=6.0)"] +tomlkit = ["tomlkit (>=0.11.8)"] +ujson = ["ujson (>=5.10.0)"] + +[[package]] +name = "cdk-nag" +version = "2.37.33" +description = "Check CDK v2 applications for best practices using a combination on available rule packs." +optional = false +python-versions = "~=3.9" +groups = ["dev"] +files = [ + {file = "cdk_nag-2.37.33-py3-none-any.whl", hash = "sha256:0a34f303c31c56b91724604e8aac590ceb2d4265593b950a479bb7429864213a"}, + {file = "cdk_nag-2.37.33.tar.gz", hash = "sha256:9d2de9d169747f3afae7d02e2eb45d43bb9325571a952313ae2e189f54907c57"}, +] + +[package.dependencies] +aws-cdk-lib = ">=2.176.0,<3.0.0" +constructs = ">=10.0.5,<11.0.0" +jsii = ">=1.114.1,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<4.3.0" + +[[package]] +name = "certifi" +version = "2025.8.3" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "certifi-2025.8.3-py3-none-any.whl", hash = "sha256:f6c12493cfb1b06ba2ff328595af9350c65d6644968e5d3a2ffd78699af217a5"}, + {file = "certifi-2025.8.3.tar.gz", hash = "sha256:e564105f78ded564e3ae7c923924435e1daa7463faeab5bb932bc53ffae63407"}, +] + +[[package]] +name = "cffi" +version = "2.0.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +markers = "platform_python_implementation != \"PyPy\"" +files = [ + {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"}, + {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"}, + {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"}, + {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"}, + {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"}, + {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"}, + {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"}, + {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"}, + {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"}, + {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"}, + {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"}, + {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"}, + {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"}, + {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"}, + {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"}, + {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"}, + {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"}, + {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"}, + {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"}, + {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"}, + {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"}, + {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"}, + {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"}, + {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"}, + {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"}, + {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"}, + {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"}, + {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"}, + {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"}, + {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"}, + {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"}, + {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"}, + {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"}, + {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"}, + {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"}, + {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"}, + {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"}, + {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"}, + {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"}, + {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"}, + {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"}, + {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"}, +] + +[package.dependencies] +pycparser = {version = "*", markers = "implementation_name != \"PyPy\""} + +[[package]] +name = "cfgv" +version = "3.4.0" +description = "Validate configuration and produce human readable error messages." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, + {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.3" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "charset_normalizer-3.4.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:fb7f67a1bfa6e40b438170ebdc8158b78dc465a5a67b6dde178a46987b244a72"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cc9370a2da1ac13f0153780040f465839e6cccb4a1e44810124b4e22483c93fe"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:07a0eae9e2787b586e129fdcbe1af6997f8d0e5abaa0bc98c0e20e124d67e601"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:74d77e25adda8581ffc1c720f1c81ca082921329452eba58b16233ab1842141c"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d0e909868420b7049dafd3a31d45125b31143eec59235311fc4c57ea26a4acd2"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:c6f162aabe9a91a309510d74eeb6507fab5fff92337a15acbe77753d88d9dcf0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4ca4c094de7771a98d7fbd67d9e5dbf1eb73efa4f744a730437d8a3a5cf994f0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:02425242e96bcf29a49711b0ca9f37e451da7c70562bc10e8ed992a5a7a25cc0"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:78deba4d8f9590fe4dae384aeff04082510a709957e968753ff3c48399f6f92a"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win32.whl", hash = "sha256:d79c198e27580c8e958906f803e63cddb77653731be08851c7df0b1a14a8fc0f"}, + {file = "charset_normalizer-3.4.3-cp310-cp310-win_amd64.whl", hash = "sha256:c6e490913a46fa054e03699c70019ab869e990270597018cef1d8562132c2669"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b256ee2e749283ef3ddcff51a675ff43798d92d746d1a6e4631bf8c707d22d0b"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:13faeacfe61784e2559e690fc53fa4c5ae97c6fcedb8eb6fb8d0a15b475d2c64"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:00237675befef519d9af72169d8604a067d92755e84fe76492fef5441db05b91"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:585f3b2a80fbd26b048a0be90c5aae8f06605d3c92615911c3a2b03a8a3b796f"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0e78314bdc32fa80696f72fa16dc61168fda4d6a0c014e0380f9d02f0e5d8a07"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:96b2b3d1a83ad55310de8c7b4a2d04d9277d5591f40761274856635acc5fcb30"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:939578d9d8fd4299220161fdd76e86c6a251987476f5243e8864a7844476ba14"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:fd10de089bcdcd1be95a2f73dbe6254798ec1bda9f450d5828c96f93e2536b9c"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1e8ac75d72fa3775e0b7cb7e4629cec13b7514d928d15ef8ea06bca03ef01cae"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win32.whl", hash = "sha256:6cf8fd4c04756b6b60146d98cd8a77d0cdae0e1ca20329da2ac85eed779b6849"}, + {file = "charset_normalizer-3.4.3-cp311-cp311-win_amd64.whl", hash = "sha256:31a9a6f775f9bcd865d88ee350f0ffb0e25936a7f930ca98995c05abf1faf21c"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e28e334d3ff134e88989d90ba04b47d84382a828c061d0d1027b1b12a62b39b1"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0cacf8f7297b0c4fcb74227692ca46b4a5852f8f4f24b3c766dd94a1075c4884"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c6fd51128a41297f5409deab284fecbe5305ebd7e5a1f959bee1c054622b7018"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3cfb2aad70f2c6debfbcb717f23b7eb55febc0bb23dcffc0f076009da10c6392"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1606f4a55c0fd363d754049cdf400175ee96c992b1f8018b993941f221221c5f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:027b776c26d38b7f15b26a5da1044f376455fb3766df8fc38563b4efbc515154"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:42e5088973e56e31e4fa58eb6bd709e42fc03799c11c42929592889a2e54c491"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:cc34f233c9e71701040d772aa7490318673aa7164a0efe3172b2981218c26d93"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320e8e66157cc4e247d9ddca8e21f427efc7a04bbd0ac8a9faf56583fa543f9f"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win32.whl", hash = "sha256:fb6fecfd65564f208cbf0fba07f107fb661bcd1a7c389edbced3f7a493f70e37"}, + {file = "charset_normalizer-3.4.3-cp312-cp312-win_amd64.whl", hash = "sha256:86df271bf921c2ee3818f0522e9a5b8092ca2ad8b065ece5d7d9d0e9f4849bcc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:14c2a87c65b351109f6abfc424cab3927b3bdece6f706e4d12faaf3d52ee5efe"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:41d1fc408ff5fdfb910200ec0e74abc40387bccb3252f3f27c0676731df2b2c8"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1bb60174149316da1c35fa5233681f7c0f9f514509b8e399ab70fea5f17e45c9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:30d006f98569de3459c2fc1f2acde170b7b2bd265dc1943e87e1a4efe1b67c31"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:416175faf02e4b0810f1f38bcb54682878a4af94059a1cd63b8747244420801f"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6aab0f181c486f973bc7262a97f5aca3ee7e1437011ef0c2ec04b5a11d16c927"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:fdabf8315679312cfa71302f9bd509ded4f2f263fb5b765cf1433b39106c3cc9"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:bd28b817ea8c70215401f657edef3a8aa83c29d447fb0b622c35403780ba11d5"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:18343b2d246dc6761a249ba1fb13f9ee9a2bcd95decc767319506056ea4ad4dc"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win32.whl", hash = "sha256:6fb70de56f1859a3f71261cbe41005f56a7842cc348d3aeb26237560bfa5e0ce"}, + {file = "charset_normalizer-3.4.3-cp313-cp313-win_amd64.whl", hash = "sha256:cf1ebb7d78e1ad8ec2a8c4732c7be2e736f6e5123a4146c5b89c9d1f585f8cef"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3cd35b7e8aedeb9e34c41385fda4f73ba609e561faedfae0a9e75e44ac558a15"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b89bc04de1d83006373429975f8ef9e7932534b8cc9ca582e4db7d20d91816db"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2001a39612b241dae17b4687898843f254f8748b796a2e16f1051a17078d991d"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8dcfc373f888e4fb39a7bc57e93e3b845e7f462dacc008d9749568b1c4ece096"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18b97b8404387b96cdbd30ad660f6407799126d26a39ca65729162fd810a99aa"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ccf600859c183d70eb47e05a44cd80a4ce77394d1ac0f79dbd2dd90a69a3a049"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:53cd68b185d98dde4ad8990e56a58dea83a4162161b1ea9272e5c9182ce415e0"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:30a96e1e1f865f78b030d65241c1ee850cdf422d869e9028e2fc1d5e4db73b92"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d716a916938e03231e86e43782ca7878fb602a125a91e7acb8b5112e2e96ac16"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win32.whl", hash = "sha256:c6dbd0ccdda3a2ba7c2ecd9d77b37f3b5831687d8dc1b6ca5f56a4880cc7b7ce"}, + {file = "charset_normalizer-3.4.3-cp314-cp314-win_amd64.whl", hash = "sha256:73dc19b562516fc9bcf6e5d6e596df0b4eb98d87e4f79f3ae71840e6ed21361c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:0f2be7e0cf7754b9a30eb01f4295cc3d4358a479843b31f328afd210e2c7598c"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c60e092517a73c632ec38e290eba714e9627abe9d301c8c8a12ec32c314a2a4b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:252098c8c7a873e17dd696ed98bbe91dbacd571da4b87df3736768efa7a792e4"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3653fad4fe3ed447a596ae8638b437f827234f01a8cd801842e43f3d0a6b281b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8999f965f922ae054125286faf9f11bc6932184b93011d138925a1773830bbe9"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:d95bfb53c211b57198bb91c46dd5a2d8018b3af446583aab40074bf7988401cb"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:5b413b0b1bfd94dbf4023ad6945889f374cd24e3f62de58d6bb102c4d9ae534a"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:b5e3b2d152e74e100a9e9573837aba24aab611d39428ded46f4e4022ea7d1942"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a2d08ac246bb48479170408d6c19f6385fa743e7157d716e144cad849b2dd94b"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win32.whl", hash = "sha256:ec557499516fc90fd374bf2e32349a2887a876fbf162c160e3c01b6849eaf557"}, + {file = "charset_normalizer-3.4.3-cp38-cp38-win_amd64.whl", hash = "sha256:5d8d01eac18c423815ed4f4a2ec3b439d654e55ee4ad610e153cf02faf67ea40"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:70bfc5f2c318afece2f5838ea5e4c3febada0be750fcf4775641052bbba14d05"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:23b6b24d74478dc833444cbd927c338349d6ae852ba53a0d02a2de1fce45b96e"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:34a7f768e3f985abdb42841e20e17b330ad3aaf4bb7e7aeeb73db2e70f077b99"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb731e5deb0c7ef82d698b0f4c5bb724633ee2a489401594c5c88b02e6cb15f7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:257f26fed7d7ff59921b78244f3cd93ed2af1800ff048c33f624c87475819dd7"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1ef99f0456d3d46a50945c98de1774da86f8e992ab5c77865ea8b8195341fc19"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:2c322db9c8c89009a990ef07c3bcc9f011a3269bc06782f916cd3d9eed7c9312"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:511729f456829ef86ac41ca78c63a5cb55240ed23b4b737faca0eb1abb1c41bc"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:88ab34806dea0671532d3f82d82b85e8fc23d7b2dd12fa837978dad9bb392a34"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win32.whl", hash = "sha256:16a8770207946ac75703458e2c743631c79c59c5890c80011d536248f8eaa432"}, + {file = "charset_normalizer-3.4.3-cp39-cp39-win_amd64.whl", hash = "sha256:d22dbedd33326a4a5190dd4fe9e9e693ef12160c77382d9e87919bce54f3d4ca"}, + {file = "charset_normalizer-3.4.3-py3-none-any.whl", hash = "sha256:ce571ab16d890d23b5c278547ba694193a45011ff86a9162a71307ed9f86759a"}, + {file = "charset_normalizer-3.4.3.tar.gz", hash = "sha256:6fce4b8500244f6fcb71465d4a4930d132ba9ab8e71a7859e6a5d59851068d14"}, +] + +[[package]] +name = "cns427-infrastructure" +version = "1.0.0" +description = "" +optional = false +python-versions = "*" +groups = ["dev"] +files = [] +develop = true + +[package.dependencies] +aws-cdk-lib = ">=2.100.0" +constructs = ">=10.0.0" + +[package.source] +type = "directory" +url = "infrastructure" + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +markers = "sys_platform == \"win32\"" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "constructs" +version = "10.4.2" +description = "A programming model for software-defined state" +optional = false +python-versions = "~=3.8" +groups = ["dev"] +files = [ + {file = "constructs-10.4.2-py3-none-any.whl", hash = "sha256:1f0f59b004edebfde0f826340698b8c34611f57848139b7954904c61645f13c1"}, + {file = "constructs-10.4.2.tar.gz", hash = "sha256:ce54724360fffe10bab27d8a081844eb81f5ace7d7c62c84b719c49f164d5307"}, +] + +[package.dependencies] +jsii = ">=1.102.0,<2.0.0" +publication = ">=0.0.3" +typeguard = ">=2.13.3,<2.14.0" + +[[package]] +name = "coverage" +version = "7.10.6" +description = "Code coverage measurement for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "coverage-7.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70e7bfbd57126b5554aa482691145f798d7df77489a177a6bef80de78860a356"}, + {file = "coverage-7.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e41be6f0f19da64af13403e52f2dec38bbc2937af54df8ecef10850ff8d35301"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c61fc91ab80b23f5fddbee342d19662f3d3328173229caded831aa0bd7595460"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10356fdd33a7cc06e8051413140bbdc6f972137508a3572e3f59f805cd2832fd"}, + {file = "coverage-7.10.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80b1695cf7c5ebe7b44bf2521221b9bb8cdf69b1f24231149a7e3eb1ae5fa2fb"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2e4c33e6378b9d52d3454bd08847a8651f4ed23ddbb4a0520227bd346382bbc6"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c8a3ec16e34ef980a46f60dc6ad86ec60f763c3f2fa0db6d261e6e754f72e945"}, + {file = "coverage-7.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7d79dabc0a56f5af990cc6da9ad1e40766e82773c075f09cc571e2076fef882e"}, + {file = "coverage-7.10.6-cp310-cp310-win32.whl", hash = "sha256:86b9b59f2b16e981906e9d6383eb6446d5b46c278460ae2c36487667717eccf1"}, + {file = "coverage-7.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:e132b9152749bd33534e5bd8565c7576f135f157b4029b975e15ee184325f528"}, + {file = "coverage-7.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c706db3cabb7ceef779de68270150665e710b46d56372455cd741184f3868d8f"}, + {file = "coverage-7.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e0c38dc289e0508ef68ec95834cb5d2e96fdbe792eaccaa1bccac3966bbadcc"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:752a3005a1ded28f2f3a6e8787e24f28d6abe176ca64677bcd8d53d6fe2ec08a"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:689920ecfd60f992cafca4f5477d55720466ad2c7fa29bb56ac8d44a1ac2b47a"}, + {file = "coverage-7.10.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec98435796d2624d6905820a42f82149ee9fc4f2d45c2c5bc5a44481cc50db62"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b37201ce4a458c7a758ecc4efa92fa8ed783c66e0fa3c42ae19fc454a0792153"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2904271c80898663c810a6b067920a61dd8d38341244a3605bd31ab55250dad5"}, + {file = "coverage-7.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5aea98383463d6e1fa4e95416d8de66f2d0cb588774ee20ae1b28df826bcb619"}, + {file = "coverage-7.10.6-cp311-cp311-win32.whl", hash = "sha256:e3fb1fa01d3598002777dd259c0c2e6d9d5e10e7222976fc8e03992f972a2cba"}, + {file = "coverage-7.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:f35ed9d945bece26553d5b4c8630453169672bea0050a564456eb88bdffd927e"}, + {file = "coverage-7.10.6-cp311-cp311-win_arm64.whl", hash = "sha256:99e1a305c7765631d74b98bf7dbf54eeea931f975e80f115437d23848ee8c27c"}, + {file = "coverage-7.10.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5b2dd6059938063a2c9fee1af729d4f2af28fd1a545e9b7652861f0d752ebcea"}, + {file = "coverage-7.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:388d80e56191bf846c485c14ae2bc8898aa3124d9d35903fef7d907780477634"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:90cb5b1a4670662719591aa92d0095bb41714970c0b065b02a2610172dbf0af6"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:961834e2f2b863a0e14260a9a273aff07ff7818ab6e66d2addf5628590c628f9"}, + {file = "coverage-7.10.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf9a19f5012dab774628491659646335b1928cfc931bf8d97b0d5918dd58033c"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99c4283e2a0e147b9c9cc6bc9c96124de9419d6044837e9799763a0e29a7321a"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:282b1b20f45df57cc508c1e033403f02283adfb67d4c9c35a90281d81e5c52c5"}, + {file = "coverage-7.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cdbe264f11afd69841bd8c0d83ca10b5b32853263ee62e6ac6a0ab63895f972"}, + {file = "coverage-7.10.6-cp312-cp312-win32.whl", hash = "sha256:a517feaf3a0a3eca1ee985d8373135cfdedfbba3882a5eab4362bda7c7cf518d"}, + {file = "coverage-7.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:856986eadf41f52b214176d894a7de05331117f6035a28ac0016c0f63d887629"}, + {file = "coverage-7.10.6-cp312-cp312-win_arm64.whl", hash = "sha256:acf36b8268785aad739443fa2780c16260ee3fa09d12b3a70f772ef100939d80"}, + {file = "coverage-7.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ffea0575345e9ee0144dfe5701aa17f3ba546f8c3bb48db62ae101afb740e7d6"}, + {file = "coverage-7.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:95d91d7317cde40a1c249d6b7382750b7e6d86fad9d8eaf4fa3f8f44cf171e80"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e23dd5408fe71a356b41baa82892772a4cefcf758f2ca3383d2aa39e1b7a003"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0f3f56e4cb573755e96a16501a98bf211f100463d70275759e73f3cbc00d4f27"}, + {file = "coverage-7.10.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:db4a1d897bbbe7339946ffa2fe60c10cc81c43fab8b062d3fcb84188688174a4"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fd7879082953c156d5b13c74aa6cca37f6a6f4747b39538504c3f9c63d043d"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:28395ca3f71cd103b8c116333fa9db867f3a3e1ad6a084aa3725ae002b6583bc"}, + {file = "coverage-7.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:61c950fc33d29c91b9e18540e1aed7d9f6787cc870a3e4032493bbbe641d12fc"}, + {file = "coverage-7.10.6-cp313-cp313-win32.whl", hash = "sha256:160c00a5e6b6bdf4e5984b0ef21fc860bc94416c41b7df4d63f536d17c38902e"}, + {file = "coverage-7.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:628055297f3e2aa181464c3808402887643405573eb3d9de060d81531fa79d32"}, + {file = "coverage-7.10.6-cp313-cp313-win_arm64.whl", hash = "sha256:df4ec1f8540b0bcbe26ca7dd0f541847cc8a108b35596f9f91f59f0c060bfdd2"}, + {file = "coverage-7.10.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c9a8b7a34a4de3ed987f636f71881cd3b8339f61118b1aa311fbda12741bff0b"}, + {file = "coverage-7.10.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd5af36092430c2b075cee966719898f2ae87b636cefb85a653f1d0ba5d5393"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b0353b0f0850d49ada66fdd7d0c7cdb0f86b900bb9e367024fd14a60cecc1e27"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6b9ae13d5d3e8aeca9ca94198aa7b3ebbc5acfada557d724f2a1f03d2c0b0df"}, + {file = "coverage-7.10.6-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:675824a363cc05781b1527b39dc2587b8984965834a748177ee3c37b64ffeafb"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:692d70ea725f471a547c305f0d0fc6a73480c62fb0da726370c088ab21aed282"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:851430a9a361c7a8484a36126d1d0ff8d529d97385eacc8dfdc9bfc8c2d2cbe4"}, + {file = "coverage-7.10.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d9369a23186d189b2fc95cc08b8160ba242057e887d766864f7adf3c46b2df21"}, + {file = "coverage-7.10.6-cp313-cp313t-win32.whl", hash = "sha256:92be86fcb125e9bda0da7806afd29a3fd33fdf58fba5d60318399adf40bf37d0"}, + {file = "coverage-7.10.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6b3039e2ca459a70c79523d39347d83b73f2f06af5624905eba7ec34d64d80b5"}, + {file = "coverage-7.10.6-cp313-cp313t-win_arm64.whl", hash = "sha256:3fb99d0786fe17b228eab663d16bee2288e8724d26a199c29325aac4b0319b9b"}, + {file = "coverage-7.10.6-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6008a021907be8c4c02f37cdc3ffb258493bdebfeaf9a839f9e71dfdc47b018e"}, + {file = "coverage-7.10.6-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5e75e37f23eb144e78940b40395b42f2321951206a4f50e23cfd6e8a198d3ceb"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0f7cb359a448e043c576f0da00aa8bfd796a01b06aa610ca453d4dde09cc1034"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c68018e4fc4e14b5668f1353b41ccf4bc83ba355f0e1b3836861c6f042d89ac1"}, + {file = "coverage-7.10.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cd4b2b0707fc55afa160cd5fc33b27ccbf75ca11d81f4ec9863d5793fc6df56a"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4cec13817a651f8804a86e4f79d815b3b28472c910e099e4d5a0e8a3b6a1d4cb"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:f2a6a8e06bbda06f78739f40bfb56c45d14eb8249d0f0ea6d4b3d48e1f7c695d"}, + {file = "coverage-7.10.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:081b98395ced0d9bcf60ada7661a0b75f36b78b9d7e39ea0790bb4ed8da14747"}, + {file = "coverage-7.10.6-cp314-cp314-win32.whl", hash = "sha256:6937347c5d7d069ee776b2bf4e1212f912a9f1f141a429c475e6089462fcecc5"}, + {file = "coverage-7.10.6-cp314-cp314-win_amd64.whl", hash = "sha256:adec1d980fa07e60b6ef865f9e5410ba760e4e1d26f60f7e5772c73b9a5b0713"}, + {file = "coverage-7.10.6-cp314-cp314-win_arm64.whl", hash = "sha256:a80f7aef9535442bdcf562e5a0d5a5538ce8abe6bb209cfbf170c462ac2c2a32"}, + {file = "coverage-7.10.6-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0de434f4fbbe5af4fa7989521c655c8c779afb61c53ab561b64dcee6149e4c65"}, + {file = "coverage-7.10.6-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6e31b8155150c57e5ac43ccd289d079eb3f825187d7c66e755a055d2c85794c6"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:98cede73eb83c31e2118ae8d379c12e3e42736903a8afcca92a7218e1f2903b0"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f863c08f4ff6b64fa8045b1e3da480f5374779ef187f07b82e0538c68cb4ff8e"}, + {file = "coverage-7.10.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b38261034fda87be356f2c3f42221fdb4171c3ce7658066ae449241485390d5"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e93b1476b79eae849dc3872faeb0bf7948fd9ea34869590bc16a2a00b9c82a7"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ff8a991f70f4c0cf53088abf1e3886edcc87d53004c7bb94e78650b4d3dac3b5"}, + {file = "coverage-7.10.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ac765b026c9f33044419cbba1da913cfb82cca1b60598ac1c7a5ed6aac4621a0"}, + {file = "coverage-7.10.6-cp314-cp314t-win32.whl", hash = "sha256:441c357d55f4936875636ef2cfb3bee36e466dcf50df9afbd398ce79dba1ebb7"}, + {file = "coverage-7.10.6-cp314-cp314t-win_amd64.whl", hash = "sha256:073711de3181b2e204e4870ac83a7c4853115b42e9cd4d145f2231e12d670930"}, + {file = "coverage-7.10.6-cp314-cp314t-win_arm64.whl", hash = "sha256:137921f2bac5559334ba66122b753db6dc5d1cf01eb7b64eb412bb0d064ef35b"}, + {file = "coverage-7.10.6-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90558c35af64971d65fbd935c32010f9a2f52776103a259f1dee865fe8259352"}, + {file = "coverage-7.10.6-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8953746d371e5695405806c46d705a3cd170b9cc2b9f93953ad838f6c1e58612"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c83f6afb480eae0313114297d29d7c295670a41c11b274e6bca0c64540c1ce7b"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7eb68d356ba0cc158ca535ce1381dbf2037fa8cb5b1ae5ddfc302e7317d04144"}, + {file = "coverage-7.10.6-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5b15a87265e96307482746d86995f4bff282f14b027db75469c446da6127433b"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:fc53ba868875bfbb66ee447d64d6413c2db91fddcfca57025a0e7ab5b07d5862"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:efeda443000aa23f276f4df973cb82beca682fd800bb119d19e80504ffe53ec2"}, + {file = "coverage-7.10.6-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9702b59d582ff1e184945d8b501ffdd08d2cee38d93a2206aa5f1365ce0b8d78"}, + {file = "coverage-7.10.6-cp39-cp39-win32.whl", hash = "sha256:2195f8e16ba1a44651ca684db2ea2b2d4b5345da12f07d9c22a395202a05b23c"}, + {file = "coverage-7.10.6-cp39-cp39-win_amd64.whl", hash = "sha256:f32ff80e7ef6a5b5b606ea69a36e97b219cd9dc799bcf2963018a4d8f788cfbf"}, + {file = "coverage-7.10.6-py3-none-any.whl", hash = "sha256:92c4ecf6bf11b2e85fd4d8204814dc26e6a19f0c9d938c207c5cb0eadfcabbe3"}, + {file = "coverage-7.10.6.tar.gz", hash = "sha256:f644a3ae5933a552a29dbb9aa2f90c677a875f80ebea028e5a52a4f429044b90"}, +] + +[package.extras] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] + +[[package]] +name = "cryptography" +version = "46.0.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = "!=3.9.0,!=3.9.1,>=3.8" +groups = ["dev"] +files = [ + {file = "cryptography-46.0.1-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:1cd6d50c1a8b79af1a6f703709d8973845f677c8e97b1268f5ff323d38ce8475"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0ff483716be32690c14636e54a1f6e2e1b7bf8e22ca50b989f88fa1b2d287080"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9873bf7c1f2a6330bdfe8621e7ce64b725784f9f0c3a6a55c3047af5849f920e"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:0dfb7c88d4462a0cfdd0d87a3c245a7bc3feb59de101f6ff88194f740f72eda6"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e22801b61613ebdebf7deb18b507919e107547a1d39a3b57f5f855032dd7cfb8"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:757af4f6341ce7a1e47c326ca2a81f41d236070217e5fbbad61bbfe299d55d28"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f7a24ea78de345cfa7f6a8d3bde8b242c7fac27f2bd78fa23474ca38dfaeeab9"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:9e8776dac9e660c22241b6587fae51a67b4b0147daa4d176b172c3ff768ad736"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9f40642a140c0c8649987027867242b801486865277cbabc8c6059ddef16dc8b"}, + {file = "cryptography-46.0.1-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:449ef2b321bec7d97ef2c944173275ebdab78f3abdd005400cc409e27cd159ab"}, + {file = "cryptography-46.0.1-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2dd339ba3345b908fa3141ddba4025568fa6fd398eabce3ef72a29ac2d73ad75"}, + {file = "cryptography-46.0.1-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7411c910fb2a412053cf33cfad0153ee20d27e256c6c3f14d7d7d1d9fec59fd5"}, + {file = "cryptography-46.0.1-cp311-abi3-win32.whl", hash = "sha256:cbb8e769d4cac884bb28e3ff620ef1001b75588a5c83c9c9f1fdc9afbe7f29b0"}, + {file = "cryptography-46.0.1-cp311-abi3-win_amd64.whl", hash = "sha256:92e8cfe8bd7dd86eac0a677499894862cd5cc2fd74de917daa881d00871ac8e7"}, + {file = "cryptography-46.0.1-cp311-abi3-win_arm64.whl", hash = "sha256:db5597a4c7353b2e5fb05a8e6cb74b56a4658a2b7bf3cb6b1821ae7e7fd6eaa0"}, + {file = "cryptography-46.0.1-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:4c49eda9a23019e11d32a0eb51a27b3e7ddedde91e099c0ac6373e3aacc0d2ee"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9babb7818fdd71394e576cf26c5452df77a355eac1a27ddfa24096665a27f8fd"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9f2c4cc63be3ef43c0221861177cee5d14b505cd4d4599a89e2cd273c4d3542a"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:41c281a74df173876da1dc9a9b6953d387f06e3d3ed9284e3baae3ab3f40883a"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0a17377fa52563d730248ba1f68185461fff36e8bc75d8787a7dd2e20a802b7a"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:0d1922d9280e08cde90b518a10cd66831f632960a8d08cb3418922d83fce6f12"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:af84e8e99f1a82cea149e253014ea9dc89f75b82c87bb6c7242203186f465129"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:ef648d2c690703501714588b2ba640facd50fd16548133b11b2859e8655a69da"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:e94eb5fa32a8a9f9bf991f424f002913e3dd7c699ef552db9b14ba6a76a6313b"}, + {file = "cryptography-46.0.1-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:534b96c0831855e29fc3b069b085fd185aa5353033631a585d5cd4dd5d40d657"}, + {file = "cryptography-46.0.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:f9b55038b5c6c47559aa33626d8ecd092f354e23de3c6975e4bb205df128a2a0"}, + {file = "cryptography-46.0.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ec13b7105117dbc9afd023300fb9954d72ca855c274fe563e72428ece10191c0"}, + {file = "cryptography-46.0.1-cp314-cp314t-win32.whl", hash = "sha256:504e464944f2c003a0785b81668fe23c06f3b037e9cb9f68a7c672246319f277"}, + {file = "cryptography-46.0.1-cp314-cp314t-win_amd64.whl", hash = "sha256:c52fded6383f7e20eaf70a60aeddd796b3677c3ad2922c801be330db62778e05"}, + {file = "cryptography-46.0.1-cp314-cp314t-win_arm64.whl", hash = "sha256:9495d78f52c804b5ec8878b5b8c7873aa8e63db9cd9ee387ff2db3fffe4df784"}, + {file = "cryptography-46.0.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:d84c40bdb8674c29fa192373498b6cb1e84f882889d21a471b45d1f868d8d44b"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9ed64e5083fa806709e74fc5ea067dfef9090e5b7a2320a49be3c9df3583a2d8"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:341fb7a26bc9d6093c1b124b9f13acc283d2d51da440b98b55ab3f79f2522ead"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6ef1488967e729948d424d09c94753d0167ce59afba8d0f6c07a22b629c557b2"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7823bc7cdf0b747ecfb096d004cc41573c2f5c7e3a29861603a2871b43d3ef32"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:f736ab8036796f5a119ff8211deda416f8c15ce03776db704a7a4e17381cb2ef"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:e46710a240a41d594953012213ea8ca398cd2448fbc5d0f1be8160b5511104a0"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:84ef1f145de5aee82ea2447224dc23f065ff4cc5791bb3b506615957a6ba8128"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:9394c7d5a7565ac5f7d9ba38b2617448eba384d7b107b262d63890079fad77ca"}, + {file = "cryptography-46.0.1-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:ed957044e368ed295257ae3d212b95456bd9756df490e1ac4538857f67531fcc"}, + {file = "cryptography-46.0.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f7de12fa0eee6234de9a9ce0ffcfa6ce97361db7a50b09b65c63ac58e5f22fc7"}, + {file = "cryptography-46.0.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:7fab1187b6c6b2f11a326f33b036f7168f5b996aedd0c059f9738915e4e8f53a"}, + {file = "cryptography-46.0.1-cp38-abi3-win32.whl", hash = "sha256:45f790934ac1018adeba46a0f7289b2b8fe76ba774a88c7f1922213a56c98bc1"}, + {file = "cryptography-46.0.1-cp38-abi3-win_amd64.whl", hash = "sha256:7176a5ab56fac98d706921f6416a05e5aff7df0e4b91516f450f8627cda22af3"}, + {file = "cryptography-46.0.1-cp38-abi3-win_arm64.whl", hash = "sha256:efc9e51c3e595267ff84adf56e9b357db89ab2279d7e375ffcaf8f678606f3d9"}, + {file = "cryptography-46.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:fd4b5e2ee4e60425711ec65c33add4e7a626adef79d66f62ba0acfd493af282d"}, + {file = "cryptography-46.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:48948940d0ae00483e85e9154bb42997d0b77c21e43a77b7773c8c80de532ac5"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b9c79af2c3058430d911ff1a5b2b96bbfe8da47d5ed961639ce4681886614e70"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:0ca4be2af48c24df689a150d9cd37404f689e2968e247b6b8ff09bff5bcd786f"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:13e67c4d3fb8b6bc4ef778a7ccdd8df4cd15b4bcc18f4239c8440891a11245cc"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:15b5fd9358803b0d1cc42505a18d8bca81dabb35b5cfbfea1505092e13a9d96d"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:e34da95e29daf8a71cb2841fd55df0511539a6cdf33e6f77c1e95e44006b9b46"}, + {file = "cryptography-46.0.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:34f04b7311174469ab3ac2647469743720f8b6c8b046f238e5cb27905695eb2a"}, + {file = "cryptography-46.0.1.tar.gz", hash = "sha256:ed570874e88f213437f5cf758f9ef26cbfc3f336d889b1e592ee11283bb8d1c7"}, +] + +[package.dependencies] +cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"] +docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"] +nox = ["nox[uv] (>=2024.4.15)"] +pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"] +sdist = ["build (>=1.0.0)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi (>=2024)", "cryptography-vectors (==46.0.1)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "distlib" +version = "0.4.0" +description = "Distribution utilities" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "distlib-0.4.0-py2.py3-none-any.whl", hash = "sha256:9659f7d87e46584a30b5780e43ac7a2143098441670ff0a49d5f9034c54a6c16"}, + {file = "distlib-0.4.0.tar.gz", hash = "sha256:feec40075be03a04501a973d81f633735b4b69f98b05450592310c0f401a4e0d"}, +] + +[[package]] +name = "filelock" +version = "3.19.1" +description = "A platform independent file lock." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "filelock-3.19.1-py3-none-any.whl", hash = "sha256:d38e30481def20772f5baf097c122c3babc4fcdb7e14e57049eb9d88c6dc017d"}, + {file = "filelock-3.19.1.tar.gz", hash = "sha256:66eda1888b0171c998b35be2bcc0f6d75c388a7ce20c3f3f37aa8e96c2dddf58"}, +] + +[[package]] +name = "hypothesis" +version = "6.148.2" +description = "The property-based testing library for Python" +optional = false +python-versions = ">=3.10.2" +groups = ["dev"] +files = [ + {file = "hypothesis-6.148.2-py3-none-any.whl", hash = "sha256:bf8ddc829009da73b321994b902b1964bcc3e5c3f0ed9a1c1e6a1631ab97c5fa"}, + {file = "hypothesis-6.148.2.tar.gz", hash = "sha256:07e65d34d687ddff3e92a3ac6b43966c193356896813aec79f0a611c5018f4b1"}, +] + +[package.dependencies] +sortedcontainers = ">=2.1.0,<3.0.0" + +[package.extras] +all = ["black (>=20.8b0)", "click (>=7.0)", "crosshair-tool (>=0.0.97)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.25)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.21.6)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\"", "watchdog (>=4.0.0)"] +cli = ["black (>=20.8b0)", "click (>=7.0)", "rich (>=9.0.0)"] +codemods = ["libcst (>=0.3.16)"] +crosshair = ["crosshair-tool (>=0.0.97)", "hypothesis-crosshair (>=0.0.25)"] +dateutil = ["python-dateutil (>=1.4)"] +django = ["django (>=4.2)"] +dpcontracts = ["dpcontracts (>=0.4)"] +ghostwriter = ["black (>=20.8b0)"] +lark = ["lark (>=0.10.1)"] +numpy = ["numpy (>=1.21.6)"] +pandas = ["pandas (>=1.1)"] +pytest = ["pytest (>=4.6)"] +pytz = ["pytz (>=2014.1)"] +redis = ["redis (>=3.0.0)"] +watchdog = ["watchdog (>=4.0.0)"] +zoneinfo = ["tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\""] + +[[package]] +name = "identify" +version = "2.6.14" +description = "File identification library for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "identify-2.6.14-py2.py3-none-any.whl", hash = "sha256:11a073da82212c6646b1f39bb20d4483bfb9543bd5566fec60053c4bb309bf2e"}, + {file = "identify-2.6.14.tar.gz", hash = "sha256:663494103b4f717cb26921c52f8751363dc89db64364cd836a9bf1535f53cd6a"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.10" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.6" +groups = ["dev"] +files = [ + {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, + {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, +] + +[package.extras] +all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"] + +[[package]] +name = "importlib-resources" +version = "6.5.2" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, + {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, +] + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["jaraco.test (>=5.4)", "pytest (>=6,!=8.1.*)", "zipp (>=3.17)"] +type = ["pytest-mypy"] + +[[package]] +name = "iniconfig" +version = "2.1.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, + {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jmespath" +version = "1.0.1" +description = "JSON Matching Expressions" +optional = false +python-versions = ">=3.7" +groups = ["main", "dev"] +files = [ + {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, + {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"}, +] + +[[package]] +name = "jsii" +version = "1.114.1" +description = "Python client for jsii runtime" +optional = false +python-versions = "~=3.9" +groups = ["dev"] +files = [ + {file = "jsii-1.114.1-py3-none-any.whl", hash = "sha256:e8e6a2fb6117af734803b709cbbb74745548e4d70248417f14724c9f6d4945dd"}, + {file = "jsii-1.114.1.tar.gz", hash = "sha256:bd3a9ab7aa3f3971aea638e02ae079599f70ff2fe80a0bd8a6e38759f1235d78"}, +] + +[package.dependencies] +attrs = ">=21.2,<26.0" +cattrs = ">=1.8,<25.3" +importlib_resources = ">=5.2.0" +publication = ">=0.0.3" +python-dateutil = "*" +typeguard = ">=2.13.3,<4.5.0" +typing_extensions = ">=3.8,<5.0" + +[[package]] +name = "markupsafe" +version = "3.0.2" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7e94c425039cde14257288fd61dcfb01963e658efbc0ff54f5306b06054700f8"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9e2d922824181480953426608b81967de705c3cef4d1af983af849d7bd619158"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38a9ef736c01fccdd6600705b09dc574584b89bea478200c5fbf112a6b0d5579"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bbcb445fa71794da8f178f0f6d66789a28d7319071af7a496d4d507ed566270d"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:57cb5a3cf367aeb1d316576250f65edec5bb3be939e9247ae594b4bcbc317dfb"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:3809ede931876f5b2ec92eef964286840ed3540dadf803dd570c3b7e13141a3b"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e07c3764494e3776c602c1e78e298937c3315ccc9043ead7e685b7f2b8d47b3c"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b424c77b206d63d500bcb69fa55ed8d0e6a3774056bdc4839fc9298a7edca171"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win32.whl", hash = "sha256:fcabf5ff6eea076f859677f5f0b6b5c1a51e70a376b0579e0eadef8db48c6b50"}, + {file = "MarkupSafe-3.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:6af100e168aa82a50e186c82875a5893c5597a0c1ccdb0d8b40240b1f28b969a"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9025b4018f3a1314059769c7bf15441064b2207cb3f065e6ea1e7359cb46db9d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:93335ca3812df2f366e80509ae119189886b0f3c2b81325d39efdb84a1e2ae93"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2cb8438c3cbb25e220c2ab33bb226559e7afb3baec11c4f218ffa7308603c832"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a123e330ef0853c6e822384873bef7507557d8e4a082961e1defa947aa59ba84"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e084f686b92e5b83186b07e8a17fc09e38fff551f3602b249881fec658d3eca"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8213e09c917a951de9d09ecee036d5c7d36cb6cb7dbaece4c71a60d79fb9798"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:5b02fb34468b6aaa40dfc198d813a641e3a63b98c2b05a16b9f80b7ec314185e"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0bff5e0ae4ef2e1ae4fdf2dfd5b76c75e5c2fa4132d05fc1b0dabcd20c7e28c4"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win32.whl", hash = "sha256:6c89876f41da747c8d3677a2b540fb32ef5715f97b66eeb0c6b66f5e3ef6f59d"}, + {file = "MarkupSafe-3.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:70a87b411535ccad5ef2f1df5136506a10775d267e197e4cf531ced10537bd6b"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30"}, + {file = "MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ba9527cdd4c926ed0760bc301f6728ef34d841f405abf9d4f959c478421e4efd"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f8b3d067f2e40fe93e1ccdd6b2e1d16c43140e76f02fb1319a05cf2b79d99430"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:569511d3b58c8791ab4c2e1285575265991e6d8f8700c7be0e88f86cb0672094"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15ab75ef81add55874e7ab7055e9c397312385bd9ced94920f2802310c930396"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f3818cb119498c0678015754eba762e0d61e5b52d34c8b13d770f0719f7b1d79"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:cdb82a876c47801bb54a690c5ae105a46b392ac6099881cdfb9f6e95e4014c6a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:cabc348d87e913db6ab4aa100f01b08f481097838bdddf7c7a84b7575b7309ca"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:444dcda765c8a838eaae23112db52f1efaf750daddb2d9ca300bcae1039adc5c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win32.whl", hash = "sha256:bcf3e58998965654fdaff38e58584d8937aa3096ab5354d493c77d1fdd66d7a1"}, + {file = "MarkupSafe-3.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:e6a2a455bd412959b57a172ce6328d2dd1f01cb2135efda2e4576e8a23fa3b0f"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:b5a6b3ada725cea8a5e634536b1b01c30bcdcd7f9c6fff4151548d5bf6b3a36c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:a904af0a6162c73e3edcb969eeeb53a63ceeb5d8cf642fade7d39e7963a22ddb"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4aa4e5faecf353ed117801a068ebab7b7e09ffb6e1d5e412dc852e0da018126c"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0ef13eaeee5b615fb07c9a7dadb38eac06a0608b41570d8ade51c56539e509d"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d16a81a06776313e817c951135cf7340a3e91e8c1ff2fac444cfd75fffa04afe"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6381026f158fdb7c72a168278597a5e3a5222e83ea18f543112b2662a9b699c5"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:3d79d162e7be8f996986c064d1c7c817f6df3a77fe3d6859f6f9e7be4b8c213a"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:131a3c7689c85f5ad20f9f6fb1b866f402c445b220c19fe4308c0b147ccd2ad9"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win32.whl", hash = "sha256:ba8062ed2cf21c07a9e295d5b8a2a5ce678b913b45fdf68c32d95d6c1291e0b6"}, + {file = "MarkupSafe-3.0.2-cp313-cp313t-win_amd64.whl", hash = "sha256:e444a31f8db13eb18ada366ab3cf45fd4b31e4db1236a4448f68778c1d1a5a2f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:eaa0a10b7f72326f1372a713e73c3f739b524b3af41feb43e4921cb529f5929a"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:48032821bbdf20f5799ff537c7ac3d1fba0ba032cfc06194faffa8cda8b560ff"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a9d3f5f0901fdec14d8d2f66ef7d035f2157240a433441719ac9a3fba440b13"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88b49a3b9ff31e19998750c38e030fc7bb937398b1f78cfa599aaef92d693144"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cfad01eed2c2e0c01fd0ecd2ef42c492f7f93902e39a42fc9ee1692961443a29"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:1225beacc926f536dc82e45f8a4d68502949dc67eea90eab715dea3a21c1b5f0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3169b1eefae027567d1ce6ee7cae382c57fe26e82775f460f0b2778beaad66c0"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:eb7972a85c54febfb25b5c4b4f3af4dcc731994c7da0d8a0b4a6eb0640e1d178"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win32.whl", hash = "sha256:8c4e8c3ce11e1f92f6536ff07154f9d49677ebaaafc32db9db4620bc11ed480f"}, + {file = "MarkupSafe-3.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:6e296a513ca3d94054c2c881cc913116e90fd030ad1c656b3869762b754f5f8a"}, + {file = "markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0"}, +] + +[[package]] +name = "moto" +version = "5.1.12" +description = "A library that allows you to easily mock out tests based on AWS infrastructure" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "moto-5.1.12-py3-none-any.whl", hash = "sha256:c9f1119ab57819ce4b88f793f51c6ca0361b6932a90c59865fd71022acfc5582"}, + {file = "moto-5.1.12.tar.gz", hash = "sha256:6eca3a020cb89c188b763610c27c969c32b832205712d3bdcb1a6031a4005187"}, +] + +[package.dependencies] +boto3 = ">=1.9.201" +botocore = ">=1.20.88,<1.35.45 || >1.35.45,<1.35.46 || >1.35.46" +cryptography = ">=35.0.0" +Jinja2 = ">=2.10.1" +python-dateutil = ">=2.1,<3.0.0" +requests = ">=2.5" +responses = ">=0.15.0,<0.25.5 || >0.25.5" +werkzeug = ">=0.5,<2.2.0 || >2.2.0,<2.2.1 || >2.2.1" +xmltodict = "*" + +[package.extras] +all = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "jsonpath_ng", "jsonschema", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +apigateway = ["PyYAML (>=5.1)", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)"] +apigatewayv2 = ["PyYAML (>=5.1)", "openapi-spec-validator (>=0.5.0)"] +appsync = ["graphql-core"] +awslambda = ["docker (>=3.0.0)"] +batch = ["docker (>=3.0.0)"] +cloudformation = ["PyYAML (>=5.1)", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +cognitoidp = ["joserfc (>=0.9.0)"] +dynamodb = ["docker (>=3.0.0)", "py-partiql-parser (==0.6.1)"] +dynamodbstreams = ["docker (>=3.0.0)", "py-partiql-parser (==0.6.1)"] +events = ["jsonpath_ng"] +glue = ["pyparsing (>=3.0.7)"] +proxy = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=2.5.1)", "graphql-core", "joserfc (>=0.9.0)", "jsonpath_ng", "multipart", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +quicksight = ["jsonschema"] +resourcegroupstaggingapi = ["PyYAML (>=5.1)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "graphql-core", "joserfc (>=0.9.0)", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)"] +s3 = ["PyYAML (>=5.1)", "py-partiql-parser (==0.6.1)"] +s3crc32c = ["PyYAML (>=5.1)", "crc32c", "py-partiql-parser (==0.6.1)"] +server = ["PyYAML (>=5.1)", "antlr4-python3-runtime", "aws-xray-sdk (>=0.93,!=0.96)", "cfn-lint (>=0.40.0)", "docker (>=3.0.0)", "flask (!=2.2.0,!=2.2.1)", "flask-cors", "graphql-core", "joserfc (>=0.9.0)", "jsonpath_ng", "openapi-spec-validator (>=0.5.0)", "py-partiql-parser (==0.6.1)", "pyparsing (>=3.0.7)", "setuptools"] +ssm = ["PyYAML (>=5.1)"] +stepfunctions = ["antlr4-python3-runtime", "jsonpath_ng"] +xray = ["aws-xray-sdk (>=0.93,!=0.96)", "setuptools"] + +[[package]] +name = "mypy" +version = "1.18.2" +description = "Optional static typing for Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "mypy-1.18.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c1eab0cf6294dafe397c261a75f96dc2c31bffe3b944faa24db5def4e2b0f77c"}, + {file = "mypy-1.18.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7a780ca61fc239e4865968ebc5240bb3bf610ef59ac398de9a7421b54e4a207e"}, + {file = "mypy-1.18.2-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:448acd386266989ef11662ce3c8011fd2a7b632e0ec7d61a98edd8e27472225b"}, + {file = "mypy-1.18.2-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f9e171c465ad3901dc652643ee4bffa8e9fef4d7d0eece23b428908c77a76a66"}, + {file = "mypy-1.18.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:592ec214750bc00741af1f80cbf96b5013d81486b7bb24cb052382c19e40b428"}, + {file = "mypy-1.18.2-cp310-cp310-win_amd64.whl", hash = "sha256:7fb95f97199ea11769ebe3638c29b550b5221e997c63b14ef93d2e971606ebed"}, + {file = "mypy-1.18.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:807d9315ab9d464125aa9fcf6d84fde6e1dc67da0b6f80e7405506b8ac72bc7f"}, + {file = "mypy-1.18.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:776bb00de1778caf4db739c6e83919c1d85a448f71979b6a0edd774ea8399341"}, + {file = "mypy-1.18.2-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1379451880512ffce14505493bd9fe469e0697543717298242574882cf8cdb8d"}, + {file = "mypy-1.18.2-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1331eb7fd110d60c24999893320967594ff84c38ac6d19e0a76c5fd809a84c86"}, + {file = "mypy-1.18.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3ca30b50a51e7ba93b00422e486cbb124f1c56a535e20eff7b2d6ab72b3b2e37"}, + {file = "mypy-1.18.2-cp311-cp311-win_amd64.whl", hash = "sha256:664dc726e67fa54e14536f6e1224bcfce1d9e5ac02426d2326e2bb4e081d1ce8"}, + {file = "mypy-1.18.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:33eca32dd124b29400c31d7cf784e795b050ace0e1f91b8dc035672725617e34"}, + {file = "mypy-1.18.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a3c47adf30d65e89b2dcd2fa32f3aeb5e94ca970d2c15fcb25e297871c8e4764"}, + {file = "mypy-1.18.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d6c838e831a062f5f29d11c9057c6009f60cb294fea33a98422688181fe2893"}, + {file = "mypy-1.18.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01199871b6110a2ce984bde85acd481232d17413868c9807e95c1b0739a58914"}, + {file = "mypy-1.18.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a2afc0fa0b0e91b4599ddfe0f91e2c26c2b5a5ab263737e998d6817874c5f7c8"}, + {file = "mypy-1.18.2-cp312-cp312-win_amd64.whl", hash = "sha256:d8068d0afe682c7c4897c0f7ce84ea77f6de953262b12d07038f4d296d547074"}, + {file = "mypy-1.18.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:07b8b0f580ca6d289e69209ec9d3911b4a26e5abfde32228a288eb79df129fcc"}, + {file = "mypy-1.18.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ed4482847168439651d3feee5833ccedbf6657e964572706a2adb1f7fa4dfe2e"}, + {file = "mypy-1.18.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c3ad2afadd1e9fea5cf99a45a822346971ede8685cc581ed9cd4d42eaf940986"}, + {file = "mypy-1.18.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a431a6f1ef14cf8c144c6b14793a23ec4eae3db28277c358136e79d7d062f62d"}, + {file = "mypy-1.18.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7ab28cc197f1dd77a67e1c6f35cd1f8e8b73ed2217e4fc005f9e6a504e46e7ba"}, + {file = "mypy-1.18.2-cp313-cp313-win_amd64.whl", hash = "sha256:0e2785a84b34a72ba55fb5daf079a1003a34c05b22238da94fcae2bbe46f3544"}, + {file = "mypy-1.18.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:62f0e1e988ad41c2a110edde6c398383a889d95b36b3e60bcf155f5164c4fdce"}, + {file = "mypy-1.18.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8795a039bab805ff0c1dfdb8cd3344642c2b99b8e439d057aba30850b8d3423d"}, + {file = "mypy-1.18.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6ca1e64b24a700ab5ce10133f7ccd956a04715463d30498e64ea8715236f9c9c"}, + {file = "mypy-1.18.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d924eef3795cc89fecf6bedc6ed32b33ac13e8321344f6ddbf8ee89f706c05cb"}, + {file = "mypy-1.18.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:20c02215a080e3a2be3aa50506c67242df1c151eaba0dcbc1e4e557922a26075"}, + {file = "mypy-1.18.2-cp314-cp314-win_amd64.whl", hash = "sha256:749b5f83198f1ca64345603118a6f01a4e99ad4bf9d103ddc5a3200cc4614adf"}, + {file = "mypy-1.18.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:25a9c8fb67b00599f839cf472713f54249a62efd53a54b565eb61956a7e3296b"}, + {file = "mypy-1.18.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2b9c7e284ee20e7598d6f42e13ca40b4928e6957ed6813d1ab6348aa3f47133"}, + {file = "mypy-1.18.2-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d6985ed057513e344e43a26cc1cd815c7a94602fb6a3130a34798625bc2f07b6"}, + {file = "mypy-1.18.2-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:22f27105f1525ec024b5c630c0b9f36d5c1cc4d447d61fe51ff4bd60633f47ac"}, + {file = "mypy-1.18.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:030c52d0ea8144e721e49b1f68391e39553d7451f0c3f8a7565b59e19fcb608b"}, + {file = "mypy-1.18.2-cp39-cp39-win_amd64.whl", hash = "sha256:aa5e07ac1a60a253445797e42b8b2963c9675563a94f11291ab40718b016a7a0"}, + {file = "mypy-1.18.2-py3-none-any.whl", hash = "sha256:22a1748707dd62b58d2ae53562ffc4d7f8bcc727e8ac7cbc69c053ddc874d47e"}, + {file = "mypy-1.18.2.tar.gz", hash = "sha256:06a398102a5f203d7477b2923dda3634c36727fa5c237d8f859ef90c42a9924b"}, +] + +[package.dependencies] +mypy_extensions = ">=1.0.0" +pathspec = ">=0.9.0" +typing_extensions = ">=4.6.0" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] +install-types = ["pip"] +mypyc = ["setuptools (>=50)"] +reports = ["lxml"] + +[[package]] +name = "mypy-boto3-dynamodb" +version = "1.40.20" +description = "Type annotations for boto3 DynamoDB 1.40.20 service generated with mypy-boto3-builder 8.11.0" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mypy_boto3_dynamodb-1.40.20-py3-none-any.whl", hash = "sha256:276f12132b19e5cd9c8c2e551899128acfad39fb56fc77c7e0b005b29d86c844"}, + {file = "mypy_boto3_dynamodb-1.40.20.tar.gz", hash = "sha256:2f424315d299ccf0b3b4e3b2b2300f90f09cd0b7c69fc473d0cdc0b6be6c30d6"}, +] + +[[package]] +name = "mypy-boto3-events" +version = "1.40.0" +description = "Type annotations for boto3 EventBridge 1.40.0 service generated with mypy-boto3-builder 8.11.0" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "mypy_boto3_events-1.40.0-py3-none-any.whl", hash = "sha256:3d99a5bf9a357347a3e0a28b5f5f0a288a691fca765a3330ae0db5907a0a7a6f"}, + {file = "mypy_boto3_events-1.40.0.tar.gz", hash = "sha256:3627db3a067c434a149ca72122dd3846030c0c15cc8b024908a0e66b1de3e04b"}, +] + +[[package]] +name = "mypy-extensions" +version = "1.1.0" +description = "Type system extensions for programs checked with the mypy type checker." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, + {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, +] + +[[package]] +name = "nodeenv" +version = "1.9.1" +description = "Node.js virtual environment builder" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] +files = [ + {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, + {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, +] + +[[package]] +name = "packaging" +version = "25.0" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, + {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + +[[package]] +name = "platformdirs" +version = "4.4.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "platformdirs-4.4.0-py3-none-any.whl", hash = "sha256:abd01743f24e5287cd7a5db3752faf1a2d65353f38ec26d98e25a6db65958c85"}, + {file = "platformdirs-4.4.0.tar.gz", hash = "sha256:ca753cf4d81dc309bc67b0ea38fd15dc97bc30ce419a7f58d13eb3bf14c4febf"}, +] + +[package.extras] +docs = ["furo (>=2024.8.6)", "proselint (>=0.14)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=8.3.4)", "pytest-cov (>=6)", "pytest-mock (>=3.14)"] +type = ["mypy (>=1.14.1)"] + +[[package]] +name = "pluggy" +version = "1.6.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, + {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["coverage", "pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "4.3.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pre_commit-4.3.0-py2.py3-none-any.whl", hash = "sha256:2b0747ad7e6e967169136edffee14c16e148a778a54e4f967921aa1ebf2308d8"}, + {file = "pre_commit-4.3.0.tar.gz", hash = "sha256:499fe450cc9d42e9d58e606262795ecb64dd05438943c62b66f6a8673da30b16"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +virtualenv = ">=20.10.0" + +[[package]] +name = "publication" +version = "0.0.3" +description = "Publication helps you maintain public-api-friendly modules by preventing unintentional access to private implementation details via introspection." +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "publication-0.0.3-py2.py3-none-any.whl", hash = "sha256:0248885351febc11d8a1098d5c8e3ab2dabcf3e8c0c96db1e17ecd12b53afbe6"}, + {file = "publication-0.0.3.tar.gz", hash = "sha256:68416a0de76dddcdd2930d1c8ef853a743cc96c82416c4e4d3b5d901c6276dc4"}, +] + +[[package]] +name = "pycparser" +version = "2.23" +description = "C parser in Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\"" +files = [ + {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"}, + {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"}, +] + +[[package]] +name = "pydantic" +version = "2.11.9" +description = "Data validation using Python type hints" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic-2.11.9-py3-none-any.whl", hash = "sha256:c42dd626f5cfc1c6950ce6205ea58c93efa406da65f479dcb4029d5934857da2"}, + {file = "pydantic-2.11.9.tar.gz", hash = "sha256:6b8ffda597a14812a7975c90b82a8a2e777d9257aba3453f973acd3c032a18e2"}, +] + +[package.dependencies] +annotated-types = ">=0.6.0" +pydantic-core = "2.33.2" +typing-extensions = ">=4.12.2" +typing-inspection = ">=0.4.0" + +[package.extras] +email = ["email-validator (>=2.0.0)"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +description = "Core functionality for Pydantic validation and serialization" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, + {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0069c9acc3f3981b9ff4cdfaf088e98d83440a4c7ea1bc07460af3d4dc22e72d"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d53b22f2032c42eaaf025f7c40c2e3b94568ae077a606f006d206a463bc69572"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0405262705a123b7ce9f0b92f123334d67b70fd1f20a9372b907ce1080c7ba02"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b25d91e288e2c4e0662b8038a28c6a07eaac3e196cfc4ff69de4ea3db992a1b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bdfe4b3789761f3bcb4b1ddf33355a71079858958e3a552f16d5af19768fef2"}, + {file = "pydantic_core-2.33.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:efec8db3266b76ef9607c2c4c419bdb06bf335ae433b80816089ea7585816f6a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:031c57d67ca86902726e0fae2214ce6770bbe2f710dc33063187a68744a5ecac"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:f8de619080e944347f5f20de29a975c2d815d9ddd8be9b9b7268e2e3ef68605a"}, + {file = "pydantic_core-2.33.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:73662edf539e72a9440129f231ed3757faab89630d291b784ca99237fb94db2b"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win32.whl", hash = "sha256:0a39979dcbb70998b0e505fb1556a1d550a0781463ce84ebf915ba293ccb7e22"}, + {file = "pydantic_core-2.33.2-cp310-cp310-win_amd64.whl", hash = "sha256:b0379a2b24882fef529ec3b4987cb5d003b9cda32256024e6fe1586ac45fc640"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4c5b0a576fb381edd6d27f0a85915c6daf2f8138dc5c267a57c08a62900758c7"}, + {file = "pydantic_core-2.33.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e799c050df38a639db758c617ec771fd8fb7a5f8eaaa4b27b101f266b216a246"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dc46a01bf8d62f227d5ecee74178ffc448ff4e5197c756331f71efcc66dc980f"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a144d4f717285c6d9234a66778059f33a89096dfb9b39117663fd8413d582dcc"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:73cf6373c21bc80b2e0dc88444f41ae60b2f070ed02095754eb5a01df12256de"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3dc625f4aa79713512d1976fe9f0bc99f706a9dee21dfd1810b4bbbf228d0e8a"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:881b21b5549499972441da4758d662aeea93f1923f953e9cbaff14b8b9565aef"}, + {file = "pydantic_core-2.33.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bdc25f3681f7b78572699569514036afe3c243bc3059d3942624e936ec93450e"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:fe5b32187cbc0c862ee201ad66c30cf218e5ed468ec8dc1cf49dec66e160cc4d"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:bc7aee6f634a6f4a95676fcb5d6559a2c2a390330098dba5e5a5f28a2e4ada30"}, + {file = "pydantic_core-2.33.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:235f45e5dbcccf6bd99f9f472858849f73d11120d76ea8707115415f8e5ebebf"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win32.whl", hash = "sha256:6368900c2d3ef09b69cb0b913f9f8263b03786e5b2a387706c5afb66800efd51"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_amd64.whl", hash = "sha256:1e063337ef9e9820c77acc768546325ebe04ee38b08703244c1309cccc4f1bab"}, + {file = "pydantic_core-2.33.2-cp311-cp311-win_arm64.whl", hash = "sha256:6b99022f1d19bc32a4c2a0d544fc9a76e3be90f0b3f4af413f87d38749300e65"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc"}, + {file = "pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6"}, + {file = "pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2"}, + {file = "pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1082dd3e2d7109ad8b7da48e1d4710c8d06c253cbc4a27c1cff4fbcaa97a9e3f"}, + {file = "pydantic_core-2.33.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:f517ca031dfc037a9c07e748cefd8d96235088b83b4f4ba8939105d20fa1dcd6"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a9f2c9dd19656823cb8250b0724ee9c60a82f3cdf68a080979d13092a3b0fef"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2b0a451c263b01acebe51895bfb0e1cc842a5c666efe06cdf13846c7418caa9a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ea40a64d23faa25e62a70ad163571c0b342b8bf66d5fa612ac0dec4f069d916"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0fb2d542b4d66f9470e8065c5469ec676978d625a8b7a363f07d9a501a9cb36a"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdac5d6ffa1b5a83bca06ffe7583f5576555e6c8b3a91fbd25ea7780f825f7d"}, + {file = "pydantic_core-2.33.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:04a1a413977ab517154eebb2d326da71638271477d6ad87a769102f7c2488c56"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:c8e7af2f4e0194c22b5b37205bfb293d166a7344a5b0d0eaccebc376546d77d5"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:5c92edd15cd58b3c2d34873597a1e20f13094f59cf88068adb18947df5455b4e"}, + {file = "pydantic_core-2.33.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:65132b7b4a1c0beded5e057324b7e16e10910c106d43675d9bd87d4f38dde162"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win32.whl", hash = "sha256:52fb90784e0a242bb96ec53f42196a17278855b0f31ac7c3cc6f5c1ec4811849"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_amd64.whl", hash = "sha256:c083a3bdd5a93dfe480f1125926afcdbf2917ae714bdb80b36d34318b2bec5d9"}, + {file = "pydantic_core-2.33.2-cp313-cp313-win_arm64.whl", hash = "sha256:e80b087132752f6b3d714f041ccf74403799d3b23a72722ea2e6ba2e892555b9"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:61c18fba8e5e9db3ab908620af374db0ac1baa69f0f32df4f61ae23f15e586ac"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95237e53bb015f67b63c91af7518a62a8660376a6a0db19b89acc77a4d6199f5"}, + {file = "pydantic_core-2.33.2-cp313-cp313t-win_amd64.whl", hash = "sha256:c2fc0a768ef76c15ab9238afa6da7f69895bb5d1ee83aeea2e3509af4472d0b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:a2b911a5b90e0374d03813674bf0a5fbbb7741570dcd4b4e85a2e48d17def29d"}, + {file = "pydantic_core-2.33.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6fa6dfc3e4d1f734a34710f391ae822e0a8eb8559a85c6979e14e65ee6ba2954"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c54c939ee22dc8e2d545da79fc5381f1c020d6d3141d3bd747eab59164dc89fb"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:53a57d2ed685940a504248187d5685e49eb5eef0f696853647bf37c418c538f7"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:09fb9dd6571aacd023fe6aaca316bd01cf60ab27240d7eb39ebd66a3a15293b4"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0e6116757f7959a712db11f3e9c0a99ade00a5bbedae83cb801985aa154f071b"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d55ab81c57b8ff8548c3e4947f119551253f4e3787a7bbc0b6b3ca47498a9d3"}, + {file = "pydantic_core-2.33.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c20c462aa4434b33a2661701b861604913f912254e441ab8d78d30485736115a"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:44857c3227d3fb5e753d5fe4a3420d6376fa594b07b621e220cd93703fe21782"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:eb9b459ca4df0e5c87deb59d37377461a538852765293f9e6ee834f0435a93b9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9fcd347d2cc5c23b06de6d3b7b8275be558a0c90549495c699e379a80bf8379e"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win32.whl", hash = "sha256:83aa99b1285bc8f038941ddf598501a86f1536789740991d7d8756e34f1e74d9"}, + {file = "pydantic_core-2.33.2-cp39-cp39-win_amd64.whl", hash = "sha256:f481959862f57f29601ccced557cc2e817bce7533ab8e01a797a48b49c9692b3"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5c4aa4e82353f65e548c476b37e64189783aa5384903bfea4f41580f255fddfa"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d946c8bf0d5c24bf4fe333af284c59a19358aa3ec18cb3dc4370080da1e8ad29"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:87b31b6846e361ef83fedb187bb5b4372d0da3f7e28d85415efa92d6125d6e6d"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa9d91b338f2df0508606f7009fde642391425189bba6d8c653afd80fd6bb64e"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2058a32994f1fde4ca0480ab9d1e75a0e8c87c22b53a3ae66554f9af78f2fe8c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:0e03262ab796d986f978f79c943fc5f620381be7287148b8010b4097f79a39ec"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1a8695a8d00c73e50bff9dfda4d540b7dee29ff9b8053e38380426a85ef10052"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:fa754d1850735a0b0e03bcffd9d4b4343eb417e47196e4485d9cca326073a42c"}, + {file = "pydantic_core-2.33.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a11c8d26a50bfab49002947d3d237abe4d9e4b5bdc8846a63537b6488e197808"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:dd14041875d09cc0f9308e37a6f8b65f5585cf2598a53aa0123df8b129d481f8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:d87c561733f66531dced0da6e864f44ebf89a8fba55f31407b00c2f7f9449593"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2f82865531efd18d6e07a04a17331af02cb7a651583c418df8266f17a63c6612"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bfb5112df54209d820d7bf9317c7a6c9025ea52e49f46b6a2060104bba37de7"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:64632ff9d614e5eecfb495796ad51b0ed98c453e447a76bcbeeb69615079fc7e"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:f889f7a40498cc077332c7ab6b4608d296d852182211787d4f3ee377aaae66e8"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:de4b83bb311557e439b9e186f733f6c645b9417c84e2eb8203f3f820a4b988bf"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:82f68293f055f51b51ea42fafc74b6aad03e70e191799430b90c13d643059ebb"}, + {file = "pydantic_core-2.33.2-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:329467cecfb529c925cf2bbd4d60d2c509bc2fb52a20c1045bf09bb70971a9c1"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:87acbfcf8e90ca885206e98359d7dca4bcbb35abdc0ff66672a293e1d7a19101"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:7f92c15cd1e97d4b12acd1cc9004fa092578acfa57b67ad5e43a197175d01a64"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3f26877a748dc4251cfcfda9dfb5f13fcb034f5308388066bcfe9031b63ae7d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dac89aea9af8cd672fa7b510e7b8c33b0bba9a43186680550ccf23020f32d535"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:970919794d126ba8645f3837ab6046fb4e72bbc057b3709144066204c19a455d"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:3eb3fe62804e8f859c49ed20a8451342de53ed764150cb14ca71357c765dc2a6"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:3abcd9392a36025e3bd55f9bd38d908bd17962cc49bc6da8e7e96285336e2bca"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:3a1c81334778f9e3af2f8aeb7a960736e5cab1dfebfb26aabca09afd2906c039"}, + {file = "pydantic_core-2.33.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2807668ba86cb38c6817ad9bc66215ab8584d1d304030ce4f0887336f28a5e27"}, + {file = "pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc"}, +] + +[package.dependencies] +typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" + +[[package]] +name = "pygments" +version = "2.19.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b"}, + {file = "pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pytest" +version = "8.4.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79"}, + {file = "pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01"}, +] + +[package.dependencies] +colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""} +iniconfig = ">=1" +packaging = ">=20" +pluggy = ">=1.5,<2" +pygments = ">=2.7.2" + +[package.extras] +dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "1.2.0" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_asyncio-1.2.0-py3-none-any.whl", hash = "sha256:8e17ae5e46d8e7efe51ab6494dd2010f4ca8dae51652aa3c8d55acf50bfb2e99"}, + {file = "pytest_asyncio-1.2.0.tar.gz", hash = "sha256:c609a64a2a8768462d0c99811ddb8bd2583c33fd33cf7f21af1c142e824ffb57"}, +] + +[package.dependencies] +pytest = ">=8.2,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + +[[package]] +name = "pytest-cov" +version = "7.0.0" +description = "Pytest plugin for measuring coverage." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_cov-7.0.0-py3-none-any.whl", hash = "sha256:3b8e9558b16cc1479da72058bdecf8073661c7f57f7d3c5f22a1c23507f2d861"}, + {file = "pytest_cov-7.0.0.tar.gz", hash = "sha256:33c97eda2e049a0c5298e91f519302a1334c26ac65c1a483d6206fd458361af1"}, +] + +[package.dependencies] +coverage = {version = ">=7.10.6", extras = ["toml"]} +pluggy = ">=1.2" +pytest = ">=7" + +[package.extras] +testing = ["process-tests", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-html" +version = "4.1.1" +description = "pytest plugin for generating HTML reports" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_html-4.1.1-py3-none-any.whl", hash = "sha256:c8152cea03bd4e9bee6d525573b67bbc6622967b72b9628dda0ea3e2a0b5dd71"}, + {file = "pytest_html-4.1.1.tar.gz", hash = "sha256:70a01e8ae5800f4a074b56a4cb1025c8f4f9b038bba5fe31e3c98eb996686f07"}, +] + +[package.dependencies] +jinja2 = ">=3.0.0" +pytest = ">=7.0.0" +pytest-metadata = ">=2.0.0" + +[package.extras] +docs = ["pip-tools (>=6.13.0)"] +test = ["assertpy (>=1.1)", "beautifulsoup4 (>=4.11.1)", "black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "pytest-mock (>=3.7.0)", "pytest-rerunfailures (>=11.1.2)", "pytest-xdist (>=2.4.0)", "selenium (>=4.3.0)", "tox (>=3.24.5)"] + +[[package]] +name = "pytest-metadata" +version = "3.1.1" +description = "pytest plugin for test session metadata" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "pytest_metadata-3.1.1-py3-none-any.whl", hash = "sha256:c8e0844db684ee1c798cfa38908d20d67d0463ecb6137c72e91f418558dd5f4b"}, + {file = "pytest_metadata-3.1.1.tar.gz", hash = "sha256:d2a29b0355fbc03f168aa96d41ff88b1a3b44a3b02acbe491801c98a048017c8"}, +] + +[package.dependencies] +pytest = ">=7.0.0" + +[package.extras] +test = ["black (>=22.1.0)", "flake8 (>=4.0.1)", "pre-commit (>=2.17.0)", "tox (>=3.24.5)"] + +[[package]] +name = "pytest-mock" +version = "3.15.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d"}, + {file = "pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "pytest-socket" +version = "0.7.0" +description = "Pytest Plugin to disable socket calls during tests" +optional = false +python-versions = ">=3.8,<4.0" +groups = ["dev"] +files = [ + {file = "pytest_socket-0.7.0-py3-none-any.whl", hash = "sha256:7e0f4642177d55d317bbd58fc68c6bd9048d6eadb2d46a89307fa9221336ce45"}, + {file = "pytest_socket-0.7.0.tar.gz", hash = "sha256:71ab048cbbcb085c15a4423b73b619a8b35d6a307f46f78ea46be51b1b7e11b3"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pyyaml" +version = "6.0.2" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, + {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8824b5a04a04a047e72eea5cec3bc266db09e35de6bdfe34c9436ac5ee27d237"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7c36280e6fb8385e520936c3cb3b8042851904eba0e58d277dca80a5cfed590b"}, + {file = "PyYAML-6.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec031d5d2feb36d1d1a24380e4db6d43695f3748343d99434e6f5f9156aaa2ed"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:936d68689298c36b53b29f23c6dbb74de12b4ac12ca6cfe0e047bedceea56180"}, + {file = "PyYAML-6.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:23502f431948090f597378482b4812b0caae32c22213aecf3b55325e049a6c68"}, + {file = "PyYAML-6.0.2-cp310-cp310-win32.whl", hash = "sha256:2e99c6826ffa974fe6e27cdb5ed0021786b03fc98e5ee3c5bfe1fd5015f42b99"}, + {file = "PyYAML-6.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:a4d3091415f010369ae4ed1fc6b79def9416358877534caf6a0fdd2146c87a3e"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cc1c1159b3d456576af7a3e4d1ba7e6924cb39de8f67111c735f6fc832082774"}, + {file = "PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1e2120ef853f59c7419231f3bf4e7021f1b936f6ebd222406c3b60212205d2ee"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d225db5a45f21e78dd9358e58a98702a0302f2659a3c6cd320564b75b86f47c"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ac9328ec4831237bec75defaf839f7d4564be1e6b25ac710bd1a96321cc8317"}, + {file = "PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ad2a3decf9aaba3d29c8f537ac4b243e36bef957511b4766cb0057d32b0be85"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ff3824dc5261f50c9b0dfb3be22b4567a6f938ccce4587b38952d85fd9e9afe4"}, + {file = "PyYAML-6.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:797b4f722ffa07cc8d62053e4cff1486fa6dc094105d13fea7b1de7d8bf71c9e"}, + {file = "PyYAML-6.0.2-cp311-cp311-win32.whl", hash = "sha256:11d8f3dd2b9c1207dcaf2ee0bbbfd5991f571186ec9cc78427ba5bd32afae4b5"}, + {file = "PyYAML-6.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10ce637b18caea04431ce14fabcf5c64a1c61ec9c56b071a4b7ca131ca52d44"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab"}, + {file = "PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425"}, + {file = "PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48"}, + {file = "PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b"}, + {file = "PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4"}, + {file = "PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba"}, + {file = "PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484"}, + {file = "PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc"}, + {file = "PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652"}, + {file = "PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183"}, + {file = "PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563"}, + {file = "PyYAML-6.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:24471b829b3bf607e04e88d79542a9d48bb037c2267d7927a874e6c205ca7e9a"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7fded462629cfa4b685c5416b949ebad6cec74af5e2d42905d41e257e0869f5"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d84a1718ee396f54f3a086ea0a66d8e552b2ab2017ef8b420e92edbc841c352d"}, + {file = "PyYAML-6.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9056c1ecd25795207ad294bcf39f2db3d845767be0ea6e6a34d856f006006083"}, + {file = "PyYAML-6.0.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:82d09873e40955485746739bcb8b4586983670466c23382c19cffecbf1fd8706"}, + {file = "PyYAML-6.0.2-cp38-cp38-win32.whl", hash = "sha256:43fa96a3ca0d6b1812e01ced1044a003533c47f6ee8aca31724f78e93ccc089a"}, + {file = "PyYAML-6.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:01179a4a8559ab5de078078f37e5c1a30d76bb88519906844fd7bdea1b7729ff"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:688ba32a1cffef67fd2e9398a2efebaea461578b0923624778664cc1c914db5d"}, + {file = "PyYAML-6.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8786accb172bd8afb8be14490a16625cbc387036876ab6ba70912730faf8e1f"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8e03406cac8513435335dbab54c0d385e4a49e4945d2909a581c83647ca0290"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f753120cb8181e736c57ef7636e83f31b9c0d1722c516f7e86cf15b7aa57ff12"}, + {file = "PyYAML-6.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3b1fdb9dc17f5a7677423d508ab4f243a726dea51fa5e70992e59a7411c89d19"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0b69e4ce7a131fe56b7e4d770c67429700908fc0752af059838b1cfb41960e4e"}, + {file = "PyYAML-6.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:a9f8c2e67970f13b16084e04f134610fd1d374bf477b17ec1599185cf611d725"}, + {file = "PyYAML-6.0.2-cp39-cp39-win32.whl", hash = "sha256:6395c297d42274772abc367baaa79683958044e5d3835486c16da75d2a694631"}, + {file = "PyYAML-6.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:39693e1f8320ae4f43943590b49779ffb98acb81f788220ea932a6b6c51004d8"}, + {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"}, +] + +[[package]] +name = "requests" +version = "2.32.5" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6"}, + {file = "requests-2.32.5.tar.gz", hash = "sha256:dbba0bac56e100853db0ea71b82b4dfd5fe2bf6d3754a8893c3af500cec7d7cf"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset_normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "responses" +version = "0.25.8" +description = "A utility library for mocking out the `requests` Python library." +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "responses-0.25.8-py3-none-any.whl", hash = "sha256:0c710af92def29c8352ceadff0c3fe340ace27cf5af1bbe46fb71275bcd2831c"}, + {file = "responses-0.25.8.tar.gz", hash = "sha256:9374d047a575c8f781b94454db5cab590b6029505f488d12899ddb10a4af1cf4"}, +] + +[package.dependencies] +pyyaml = "*" +requests = ">=2.30.0,<3.0" +urllib3 = ">=1.25.10,<3.0" + +[package.extras] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] + +[[package]] +name = "ruff" +version = "0.13.1" +description = "An extremely fast Python linter and code formatter, written in Rust." +optional = false +python-versions = ">=3.7" +groups = ["dev"] +files = [ + {file = "ruff-0.13.1-py3-none-linux_armv6l.whl", hash = "sha256:b2abff595cc3cbfa55e509d89439b5a09a6ee3c252d92020bd2de240836cf45b"}, + {file = "ruff-0.13.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:4ee9f4249bf7f8bb3984c41bfaf6a658162cdb1b22e3103eabc7dd1dc5579334"}, + {file = "ruff-0.13.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:5c5da4af5f6418c07d75e6f3224e08147441f5d1eac2e6ce10dcce5e616a3bae"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80524f84a01355a59a93cef98d804e2137639823bcee2931f5028e71134a954e"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ff7f5ce8d7988767dd46a148192a14d0f48d1baea733f055d9064875c7d50389"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c55d84715061f8b05469cdc9a446aa6c7294cd4bd55e86a89e572dba14374f8c"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:ac57fed932d90fa1624c946dc67a0a3388d65a7edc7d2d8e4ca7bddaa789b3b0"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c366a71d5b4f41f86a008694f7a0d75fe409ec298685ff72dc882f882d532e36"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4ea9d1b5ad3e7a83ee8ebb1229c33e5fe771e833d6d3dcfca7b77d95b060d38"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b0f70202996055b555d3d74b626406476cc692f37b13bac8828acff058c9966a"}, + {file = "ruff-0.13.1-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:f8cff7a105dad631085d9505b491db33848007d6b487c3c1979dd8d9b2963783"}, + {file = "ruff-0.13.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:9761e84255443316a258dd7dfbd9bfb59c756e52237ed42494917b2577697c6a"}, + {file = "ruff-0.13.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:3d376a88c3102ef228b102211ef4a6d13df330cb0f5ca56fdac04ccec2a99700"}, + {file = "ruff-0.13.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:cbefd60082b517a82c6ec8836989775ac05f8991715d228b3c1d86ccc7df7dae"}, + {file = "ruff-0.13.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:dd16b9a5a499fe73f3c2ef09a7885cb1d97058614d601809d37c422ed1525317"}, + {file = "ruff-0.13.1-py3-none-win32.whl", hash = "sha256:55e9efa692d7cb18580279f1fbb525146adc401f40735edf0aaeabd93099f9a0"}, + {file = "ruff-0.13.1-py3-none-win_amd64.whl", hash = "sha256:3a3fb595287ee556de947183489f636b9f76a72f0fa9c028bdcabf5bab2cc5e5"}, + {file = "ruff-0.13.1-py3-none-win_arm64.whl", hash = "sha256:c0bae9ffd92d54e03c2bf266f466da0a65e145f298ee5b5846ed435f6a00518a"}, + {file = "ruff-0.13.1.tar.gz", hash = "sha256:88074c3849087f153d4bb22e92243ad4c1b366d7055f98726bc19aa08dc12d51"}, +] + +[[package]] +name = "s3transfer" +version = "0.14.0" +description = "An Amazon S3 Transfer Manager" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "s3transfer-0.14.0-py3-none-any.whl", hash = "sha256:ea3b790c7077558ed1f02a3072fb3cb992bbbd253392f4b6e9e8976941c7d456"}, + {file = "s3transfer-0.14.0.tar.gz", hash = "sha256:eff12264e7c8b4985074ccce27a3b38a485bb7f7422cc8046fee9be4983e4125"}, +] + +[package.dependencies] +botocore = ">=1.37.4,<2.0a.0" + +[package.extras] +crt = ["botocore[crt] (>=1.37.4,<2.0a.0)"] + +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + +[[package]] +name = "sortedcontainers" +version = "2.4.0" +description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" +optional = false +python-versions = "*" +groups = ["dev"] +files = [ + {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, + {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, +] + +[[package]] +name = "typeguard" +version = "2.13.3" +description = "Run-time type checker for Python" +optional = false +python-versions = ">=3.5.3" +groups = ["dev"] +files = [ + {file = "typeguard-2.13.3-py3-none-any.whl", hash = "sha256:5e3e3be01e887e7eafae5af63d1f36c849aaa94e3a0112097312aabfa16284f1"}, + {file = "typeguard-2.13.3.tar.gz", hash = "sha256:00edaa8da3a133674796cf5ea87d9f4b4c367d77476e185e80251cc13dfbb8c4"}, +] + +[package.extras] +doc = ["sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["mypy ; platform_python_implementation != \"PyPy\"", "pytest", "typing-extensions"] + +[[package]] +name = "typing-extensions" +version = "4.15.0" +description = "Backported and Experimental Type Hints for Python 3.9+" +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"}, + {file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"}, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +description = "Runtime typing introspection tools" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, + {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, +] + +[package.dependencies] +typing-extensions = ">=4.12.0" + +[[package]] +name = "urllib3" +version = "2.5.0" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.9" +groups = ["main", "dev"] +files = [ + {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"}, + {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "virtualenv" +version = "20.34.0" +description = "Virtual Python Environment builder" +optional = false +python-versions = ">=3.8" +groups = ["dev"] +files = [ + {file = "virtualenv-20.34.0-py3-none-any.whl", hash = "sha256:341f5afa7eee943e4984a9207c025feedd768baff6753cd660c857ceb3e36026"}, + {file = "virtualenv-20.34.0.tar.gz", hash = "sha256:44815b2c9dee7ed86e387b842a84f20b93f7f417f95886ca1996a72a4138eb1a"}, +] + +[package.dependencies] +distlib = ">=0.3.7,<1" +filelock = ">=3.12.2,<4" +platformdirs = ">=3.9.1,<5" + +[package.extras] +docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + +[[package]] +name = "wrapt" +version = "1.17.3" +description = "Module for decorators, wrappers and monkey patching." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "wrapt-1.17.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88bbae4d40d5a46142e70d58bf664a89b6b4befaea7b2ecc14e03cedb8e06c04"}, + {file = "wrapt-1.17.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6b13af258d6a9ad602d57d889f83b9d5543acd471eee12eb51f5b01f8eb1bc2"}, + {file = "wrapt-1.17.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd341868a4b6714a5962c1af0bd44f7c404ef78720c7de4892901e540417111c"}, + {file = "wrapt-1.17.3-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f9b2601381be482f70e5d1051a5965c25fb3625455a2bf520b5a077b22afb775"}, + {file = "wrapt-1.17.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:343e44b2a8e60e06a7e0d29c1671a0d9951f59174f3709962b5143f60a2a98bd"}, + {file = "wrapt-1.17.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:33486899acd2d7d3066156b03465b949da3fd41a5da6e394ec49d271baefcf05"}, + {file = "wrapt-1.17.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e6f40a8aa5a92f150bdb3e1c44b7e98fb7113955b2e5394122fa5532fec4b418"}, + {file = "wrapt-1.17.3-cp310-cp310-win32.whl", hash = "sha256:a36692b8491d30a8c75f1dfee65bef119d6f39ea84ee04d9f9311f83c5ad9390"}, + {file = "wrapt-1.17.3-cp310-cp310-win_amd64.whl", hash = "sha256:afd964fd43b10c12213574db492cb8f73b2f0826c8df07a68288f8f19af2ebe6"}, + {file = "wrapt-1.17.3-cp310-cp310-win_arm64.whl", hash = "sha256:af338aa93554be859173c39c85243970dc6a289fa907402289eeae7543e1ae18"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:273a736c4645e63ac582c60a56b0acb529ef07f78e08dc6bfadf6a46b19c0da7"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5531d911795e3f935a9c23eb1c8c03c211661a5060aab167065896bbf62a5f85"}, + {file = "wrapt-1.17.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0610b46293c59a3adbae3dee552b648b984176f8562ee0dba099a56cfbe4df1f"}, + {file = "wrapt-1.17.3-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b32888aad8b6e68f83a8fdccbf3165f5469702a7544472bdf41f582970ed3311"}, + {file = "wrapt-1.17.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8cccf4f81371f257440c88faed6b74f1053eef90807b77e31ca057b2db74edb1"}, + {file = "wrapt-1.17.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d8a210b158a34164de8bb68b0e7780041a903d7b00c87e906fb69928bf7890d5"}, + {file = "wrapt-1.17.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:79573c24a46ce11aab457b472efd8d125e5a51da2d1d24387666cd85f54c05b2"}, + {file = "wrapt-1.17.3-cp311-cp311-win32.whl", hash = "sha256:c31eebe420a9a5d2887b13000b043ff6ca27c452a9a22fa71f35f118e8d4bf89"}, + {file = "wrapt-1.17.3-cp311-cp311-win_amd64.whl", hash = "sha256:0b1831115c97f0663cb77aa27d381237e73ad4f721391a9bfb2fe8bc25fa6e77"}, + {file = "wrapt-1.17.3-cp311-cp311-win_arm64.whl", hash = "sha256:5a7b3c1ee8265eb4c8f1b7d29943f195c00673f5ab60c192eba2d4a7eae5f46a"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ab232e7fdb44cdfbf55fc3afa31bcdb0d8980b9b95c38b6405df2acb672af0e0"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9baa544e6acc91130e926e8c802a17f3b16fbea0fd441b5a60f5cf2cc5c3deba"}, + {file = "wrapt-1.17.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6b538e31eca1a7ea4605e44f81a48aa24c4632a277431a6ed3f328835901f4fd"}, + {file = "wrapt-1.17.3-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:042ec3bb8f319c147b1301f2393bc19dba6e176b7da446853406d041c36c7828"}, + {file = "wrapt-1.17.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3af60380ba0b7b5aeb329bc4e402acd25bd877e98b3727b0135cb5c2efdaefe9"}, + {file = "wrapt-1.17.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0b02e424deef65c9f7326d8c19220a2c9040c51dc165cddb732f16198c168396"}, + {file = "wrapt-1.17.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:74afa28374a3c3a11b3b5e5fca0ae03bef8450d6aa3ab3a1e2c30e3a75d023dc"}, + {file = "wrapt-1.17.3-cp312-cp312-win32.whl", hash = "sha256:4da9f45279fff3543c371d5ababc57a0384f70be244de7759c85a7f989cb4ebe"}, + {file = "wrapt-1.17.3-cp312-cp312-win_amd64.whl", hash = "sha256:e71d5c6ebac14875668a1e90baf2ea0ef5b7ac7918355850c0908ae82bcb297c"}, + {file = "wrapt-1.17.3-cp312-cp312-win_arm64.whl", hash = "sha256:604d076c55e2fdd4c1c03d06dc1a31b95130010517b5019db15365ec4a405fc6"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a47681378a0439215912ef542c45a783484d4dd82bac412b71e59cf9c0e1cea0"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:54a30837587c6ee3cd1a4d1c2ec5d24e77984d44e2f34547e2323ddb4e22eb77"}, + {file = "wrapt-1.17.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:16ecf15d6af39246fe33e507105d67e4b81d8f8d2c6598ff7e3ca1b8a37213f7"}, + {file = "wrapt-1.17.3-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:6fd1ad24dc235e4ab88cda009e19bf347aabb975e44fd5c2fb22a3f6e4141277"}, + {file = "wrapt-1.17.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0ed61b7c2d49cee3c027372df5809a59d60cf1b6c2f81ee980a091f3afed6a2d"}, + {file = "wrapt-1.17.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:423ed5420ad5f5529db9ce89eac09c8a2f97da18eb1c870237e84c5a5c2d60aa"}, + {file = "wrapt-1.17.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e01375f275f010fcbf7f643b4279896d04e571889b8a5b3f848423d91bf07050"}, + {file = "wrapt-1.17.3-cp313-cp313-win32.whl", hash = "sha256:53e5e39ff71b3fc484df8a522c933ea2b7cdd0d5d15ae82e5b23fde87d44cbd8"}, + {file = "wrapt-1.17.3-cp313-cp313-win_amd64.whl", hash = "sha256:1f0b2f40cf341ee8cc1a97d51ff50dddb9fcc73241b9143ec74b30fc4f44f6cb"}, + {file = "wrapt-1.17.3-cp313-cp313-win_arm64.whl", hash = "sha256:7425ac3c54430f5fc5e7b6f41d41e704db073309acfc09305816bc6a0b26bb16"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:cf30f6e3c077c8e6a9a7809c94551203c8843e74ba0c960f4a98cd80d4665d39"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:e228514a06843cae89621384cfe3a80418f3c04aadf8a3b14e46a7be704e4235"}, + {file = "wrapt-1.17.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5ea5eb3c0c071862997d6f3e02af1d055f381b1d25b286b9d6644b79db77657c"}, + {file = "wrapt-1.17.3-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:281262213373b6d5e4bb4353bc36d1ba4084e6d6b5d242863721ef2bf2c2930b"}, + {file = "wrapt-1.17.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:dc4a8d2b25efb6681ecacad42fca8859f88092d8732b170de6a5dddd80a1c8fa"}, + {file = "wrapt-1.17.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:373342dd05b1d07d752cecbec0c41817231f29f3a89aa8b8843f7b95992ed0c7"}, + {file = "wrapt-1.17.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:d40770d7c0fd5cbed9d84b2c3f2e156431a12c9a37dc6284060fb4bec0b7ffd4"}, + {file = "wrapt-1.17.3-cp314-cp314-win32.whl", hash = "sha256:fbd3c8319de8e1dc79d346929cd71d523622da527cca14e0c1d257e31c2b8b10"}, + {file = "wrapt-1.17.3-cp314-cp314-win_amd64.whl", hash = "sha256:e1a4120ae5705f673727d3253de3ed0e016f7cd78dc463db1b31e2463e1f3cf6"}, + {file = "wrapt-1.17.3-cp314-cp314-win_arm64.whl", hash = "sha256:507553480670cab08a800b9463bdb881b2edeed77dc677b0a5915e6106e91a58"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:ed7c635ae45cfbc1a7371f708727bf74690daedc49b4dba310590ca0bd28aa8a"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:249f88ed15503f6492a71f01442abddd73856a0032ae860de6d75ca62eed8067"}, + {file = "wrapt-1.17.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:5a03a38adec8066d5a37bea22f2ba6bbf39fcdefbe2d91419ab864c3fb515454"}, + {file = "wrapt-1.17.3-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:5d4478d72eb61c36e5b446e375bbc49ed002430d17cdec3cecb36993398e1a9e"}, + {file = "wrapt-1.17.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:223db574bb38637e8230eb14b185565023ab624474df94d2af18f1cdb625216f"}, + {file = "wrapt-1.17.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e405adefb53a435f01efa7ccdec012c016b5a1d3f35459990afc39b6be4d5056"}, + {file = "wrapt-1.17.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:88547535b787a6c9ce4086917b6e1d291aa8ed914fdd3a838b3539dc95c12804"}, + {file = "wrapt-1.17.3-cp314-cp314t-win32.whl", hash = "sha256:41b1d2bc74c2cac6f9074df52b2efbef2b30bdfe5f40cb78f8ca22963bc62977"}, + {file = "wrapt-1.17.3-cp314-cp314t-win_amd64.whl", hash = "sha256:73d496de46cd2cdbdbcce4ae4bcdb4afb6a11234a1df9c085249d55166b95116"}, + {file = "wrapt-1.17.3-cp314-cp314t-win_arm64.whl", hash = "sha256:f38e60678850c42461d4202739f9bf1e3a737c7ad283638251e79cc49effb6b6"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:70d86fa5197b8947a2fa70260b48e400bf2ccacdcab97bb7de47e3d1e6312225"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:df7d30371a2accfe4013e90445f6388c570f103d61019b6b7c57e0265250072a"}, + {file = "wrapt-1.17.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:caea3e9c79d5f0d2c6d9ab96111601797ea5da8e6d0723f77eabb0d4068d2b2f"}, + {file = "wrapt-1.17.3-cp38-cp38-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:758895b01d546812d1f42204bd443b8c433c44d090248bf22689df673ccafe00"}, + {file = "wrapt-1.17.3-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:02b551d101f31694fc785e58e0720ef7d9a10c4e62c1c9358ce6f63f23e30a56"}, + {file = "wrapt-1.17.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:656873859b3b50eeebe6db8b1455e99d90c26ab058db8e427046dbc35c3140a5"}, + {file = "wrapt-1.17.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a9a2203361a6e6404f80b99234fe7fb37d1fc73487b5a78dc1aa5b97201e0f22"}, + {file = "wrapt-1.17.3-cp38-cp38-win32.whl", hash = "sha256:55cbbc356c2842f39bcc553cf695932e8b30e30e797f961860afb308e6b1bb7c"}, + {file = "wrapt-1.17.3-cp38-cp38-win_amd64.whl", hash = "sha256:ad85e269fe54d506b240d2d7b9f5f2057c2aa9a2ea5b32c66f8902f768117ed2"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:30ce38e66630599e1193798285706903110d4f057aab3168a34b7fdc85569afc"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:65d1d00fbfb3ea5f20add88bbc0f815150dbbde3b026e6c24759466c8b5a9ef9"}, + {file = "wrapt-1.17.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7c06742645f914f26c7f1fa47b8bc4c91d222f76ee20116c43d5ef0912bba2d"}, + {file = "wrapt-1.17.3-cp39-cp39-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7e18f01b0c3e4a07fe6dfdb00e29049ba17eadbc5e7609a2a3a4af83ab7d710a"}, + {file = "wrapt-1.17.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f5f51a6466667a5a356e6381d362d259125b57f059103dd9fdc8c0cf1d14139"}, + {file = "wrapt-1.17.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:59923aa12d0157f6b82d686c3fd8e1166fa8cdfb3e17b42ce3b6147ff81528df"}, + {file = "wrapt-1.17.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:46acc57b331e0b3bcb3e1ca3b421d65637915cfcd65eb783cb2f78a511193f9b"}, + {file = "wrapt-1.17.3-cp39-cp39-win32.whl", hash = "sha256:3e62d15d3cfa26e3d0788094de7b64efa75f3a53875cdbccdf78547aed547a81"}, + {file = "wrapt-1.17.3-cp39-cp39-win_amd64.whl", hash = "sha256:1f23fa283f51c890eda8e34e4937079114c74b4c81d2b2f1f1d94948f5cc3d7f"}, + {file = "wrapt-1.17.3-cp39-cp39-win_arm64.whl", hash = "sha256:24c2ed34dc222ed754247a2702b1e1e89fdbaa4016f324b4b8f1a802d4ffe87f"}, + {file = "wrapt-1.17.3-py3-none-any.whl", hash = "sha256:7171ae35d2c33d326ac19dd8facb1e82e5fd04ef8c6c0e394d7af55a55051c22"}, + {file = "wrapt-1.17.3.tar.gz", hash = "sha256:f66eb08feaa410fe4eebd17f2a2c8e2e46d3476e9f8c783daa8e09e0faa666d0"}, +] + +[[package]] +name = "xmltodict" +version = "1.0.2" +description = "Makes working with XML feel like you are working with JSON" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "xmltodict-1.0.2-py3-none-any.whl", hash = "sha256:62d0fddb0dcbc9f642745d8bbf4d81fd17d6dfaec5a15b5c1876300aad92af0d"}, + {file = "xmltodict-1.0.2.tar.gz", hash = "sha256:54306780b7c2175a3967cad1db92f218207e5bc1aba697d887807c0fb68b7649"}, +] + +[package.extras] +test = ["pytest", "pytest-cov"] + +[metadata] +lock-version = "2.1" +python-versions = "^3.13.0" +content-hash = "a1687ea6464a13942de93d6f865663a0f922ebe43dc73899b56dd28c75fd45fe" diff --git a/python-test-samples/cns427-testable-serverless-architecture/poetry.toml b/python-test-samples/cns427-testable-serverless-architecture/poetry.toml new file mode 100644 index 00000000..efa46ec0 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/poetry.toml @@ -0,0 +1,2 @@ +[virtualenvs] +in-project = true \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/pyproject.toml b/python-test-samples/cns427-testable-serverless-architecture/pyproject.toml new file mode 100644 index 00000000..3ad6c99c --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/pyproject.toml @@ -0,0 +1,121 @@ +[build-system] +requires = ["poetry>=2.0.1"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "cns427-task-api" +version = "1.0.0" +description = "CNS427 Task Management API - Serverless testing demonstration" +authors = ["CNS427 Demo"] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Programming Language :: Python :: 3.13", +] +readme = "README.md" +keywords = ["serverless", "testing", "aws", "lambda"] +license = "MIT-0" +packages = [ + {include = "services"}, + {include = "shared"} +] + +[tool.poetry.dependencies] +python = "^3.13.0" +pydantic = { version = "^2.0.3" } +aws-lambda-powertools = { extras = ["tracer"], version = "^2.20.0" } +mypy-boto3-dynamodb = "*" +mypy-boto3-events = "*" +boto3 = "^1.26.125" +aws-lambda-env-modeler = { version = "^1.0.5" } + +[tool.poetry.group.dev.dependencies] +# CDK +cns427-infrastructure = { path = "infrastructure", develop = true } +aws-cdk-lib = ">=2.100.0" +constructs = ">=10.0.0" +cdk-nag = ">2.0.0" +# Testing +pytest = "*" +pytest-mock = "*" +pytest-cov = "*" +pytest-html = "*" +pytest-asyncio = "*" +pytest-socket = "*" +moto = "*" +requests = "*" # For E2E tests with API Gateway +hypothesis = "*" +# Development +ruff = "*" +mypy = "*" +pre-commit = "*" + +[tool.poetry.scripts] +# Setup validation +validate-setup = "scripts.validate_setup:validate_setup" +# Main application deployment +deploy = "scripts.deploy:main" +destroy = "scripts.destroy:main" +# Test infrastructure management (using bash scripts) +# Use: make deploy-test-infra, make destroy-test-infra, make check-test-infra +# Testing commands +test-unit = "scripts.testing:run_unit_tests" +test-integration = "scripts.testing:run_integration_tests" +test-e2e = "scripts.testing:run_e2e_tests" +test-all = "scripts.testing:run_all_tests" +# Development commands +lint = "scripts.dev:lint" +format = "scripts.dev:format" +type-check = "scripts.dev:type_check" + +[tool.pytest.ini_options] +testpaths = ["tests"] +python_files = ["test_*.py"] +python_classes = ["Test*"] +python_functions = ["test_*"] +addopts = [ + "--verbose", + "--tb=short", + "--strict-markers", +] +markers = [ + "unit: Unit tests (no external dependencies)", + "integration: Integration tests (with AWS services)", + "e2e: End-to-end tests (full workflow)", + "slow: Slow running tests" +] + +[tool.ruff] +exclude = [ + ".git", + ".mypy_cache", + ".ruff_cache", + ".venv", + "venv", + "cdk.out", + ".pytest_cache", +] +line-length = 150 +indent-width = 4 +target-version = "py313" + +[tool.ruff.lint] +select = [ + "E", # pycodestyle errors + "W", # pycodestyle warnings + "F", # pyflakes + "I", # isort + "C", # flake8-comprehensions + "B", # flake8-bugbear +] +ignore = ["E203", "E266", "E501", "W191"] + +[tool.ruff.format] +quote-style = "single" +indent-style = "space" +line-ending = "auto" + +[tool.ruff.lint.isort] +known-third-party = ["pydantic", "aws_lambda_powertools"] \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/__init__.py new file mode 100644 index 00000000..49ce1bd3 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/__init__.py @@ -0,0 +1 @@ +# Scripts package for Poetry command integration diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/deploy.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/deploy.py new file mode 100644 index 00000000..1d79349a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/deploy.py @@ -0,0 +1,66 @@ +""" +Main Application Deployment Scripts + +Poetry-integrated commands for deploying the main CNS427 Task API application. +""" + +import subprocess +import sys +from pathlib import Path + + +def print_status(message: str) -> None: + """Print status message in blue.""" + print(f'\033[0;34m[INFO]\033[0m {message}') + + +def print_success(message: str) -> None: + """Print success message in green.""" + print(f'\033[0;32m[SUCCESS]\033[0m {message}') + + +def print_error(message: str) -> None: + """Print error message in red.""" + print(f'\033[0;31m[ERROR]\033[0m {message}') + + +def main() -> None: + """Deploy the main CNS427 Task API application.""" + print_status('Deploying CNS427 Task API application...') + + # Check if we're in the correct directory + if not Path('pyproject.toml').exists() or not Path('services').exists(): + print_error('Please run this command from the cns427-task-api root directory') + sys.exit(1) + + try: + # # Clean up CDK output and cache directories to prevent path length issues + # print_status('Cleaning up CDK output and cache directories...') + # import shutil + + # cleanup_dirs = ['cdk.out', '.mypy_cache', '.ruff_cache', '.pytest_cache'] + # for dir_name in cleanup_dirs: + # dir_path = Path(dir_name) + # if dir_path.exists(): + # try: + # shutil.rmtree(dir_path) + # print_status(f'Removed {dir_name}/') + # except Exception as e: + # print_status(f'Could not remove {dir_name}/: {e}') + + # Run CDK deploy with outputs file + # Using list form with shell=False for security + subprocess.run(['cdk', 'deploy', '--all', '--outputs-file', 'cdk-outputs.json', '--require-approval', 'never'], check=True, shell=False) + print_success('CNS427 Task API deployed successfully!') + print_status('Stack outputs saved to: cdk-outputs.json') + except subprocess.CalledProcessError as e: + print_error(f'Deployment failed with exit code {e.returncode}') + sys.exit(1) + except FileNotFoundError: + print_error('CDK not found. Please install AWS CDK:') + print(' npm install -g aws-cdk') + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/destroy.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/destroy.py new file mode 100644 index 00000000..015d4a83 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/destroy.py @@ -0,0 +1,100 @@ +""" +Main Application Destruction Scripts + +Poetry-integrated commands for destroying the main CNS427 Task API application. +""" + +import subprocess +import sys +from pathlib import Path + + +def print_status(message: str) -> None: + """Print status message in blue.""" + print(f'\033[0;34m[INFO]\033[0m {message}') + + +def print_success(message: str) -> None: + """Print success message in green.""" + print(f'\033[0;32m[SUCCESS]\033[0m {message}') + + +def print_error(message: str) -> None: + """Print error message in red.""" + print(f'\033[0;31m[ERROR]\033[0m {message}') + + +def main() -> None: + """Destroy the main CNS427 Task API application.""" + print_status('Destroying CNS427 Task API application...') + + # Check if we're in the correct directory + if not Path('pyproject.toml').exists() or not Path('services').exists(): + print_error('Please run this command from the cns427-task-api root directory') + sys.exit(1) + + # Confirm destruction + response = input('Are you sure you want to destroy the CNS427 Task API application? (y/N): ') + if response.lower() != 'y': + print_status('Destruction cancelled.') + return + + try: + # Run CDK destroy + # Using list form with shell=False for security + # Note: This may take a minute as CDK synthesizes the stack before destroying + print_status('Synthesizing stack (this may take a minute)...') + subprocess.run(['cdk', 'destroy', '--all', '--force'], check=True, shell=False) + print_success('CNS427 Task API destroyed successfully!') + + # Clean up any leftover CDK-generated log groups + print_status('Cleaning up CDK-generated log groups...') + try: + # List all log groups with the project prefix + result = subprocess.run( + [ + 'aws', + 'logs', + 'describe-log-groups', + '--log-group-name-prefix', + '/aws/lambda/cns427-task-api', + '--query', + 'logGroups[*].logGroupName', + '--output', + 'text', + ], + capture_output=True, + text=True, + check=True, + shell=False, + ) + + log_groups = result.stdout.strip().split() + if log_groups and log_groups[0]: # Check if any log groups exist + for log_group in log_groups: + try: + subprocess.run( + ['aws', 'logs', 'delete-log-group', '--log-group-name', log_group, '--region', 'us-west-2'], + check=True, + shell=False, + capture_output=True, + ) + print_status(f'Deleted log group: {log_group}') + except subprocess.CalledProcessError: + print_status(f'Could not delete log group: {log_group} (may not exist)') + else: + print_status('No leftover log groups found') + except subprocess.CalledProcessError: + print_status('Could not check for leftover log groups (AWS CLI may not be configured)') + + except subprocess.CalledProcessError as e: + print_error(f'Destruction failed with exit code {e.returncode}') + sys.exit(1) + except FileNotFoundError: + print_error('CDK not found. Please install AWS CDK:') + print(' npm install -g aws-cdk') + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/dev.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/dev.py new file mode 100644 index 00000000..b9fa7da2 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/dev.py @@ -0,0 +1,102 @@ +""" +Development Scripts + +Poetry-integrated commands for development tasks like linting, formatting, and type checking. +""" + +import subprocess +import sys + + +def print_status(message: str) -> None: + """Print status message in blue.""" + print(f'\033[0;34m[INFO]\033[0m {message}') + + +def print_success(message: str) -> None: + """Print success message in green.""" + print(f'\033[0;32m[SUCCESS]\033[0m {message}') + + +def print_error(message: str) -> None: + """Print error message in red.""" + print(f'\033[0;31m[ERROR]\033[0m {message}') + + +def run_command(command: list[str], description: str) -> bool: + """Run a command and return success status. + + This function follows Python's recommended security practices for subprocess execution: + + 1. Uses subprocess.run() with shell=False (REQUIRED for security) + - Arguments are passed directly to the executable without shell interpretation + - Prevents command injection even if arguments contain special characters + - This is the approach recommended by Python documentation and OWASP + + 2. Input validation as defense-in-depth + - Validates arguments against shell metacharacters before execution + - Provides an additional security layer beyond shell=False + - Helps catch potential issues early with clear error messages + + References: + - Python subprocess docs: https://docs.python.org/3/library/subprocess.html#security-considerations + + Args: + command: List of command arguments (validated for safety) + description: Description of the command + + Returns: + True if command succeeded, False otherwise + """ + print_status(f'Running {description}...') + + # Validate command arguments to prevent injection + # Only allow safe arguments (no shell metacharacters) + for arg in command: + if any(char in arg for char in ['&', '|', ';', '`', '$', '(', ')', '<', '>', '\n', '\r']): + print_error(f'Rejected unsafe command argument: {arg}') + return False + + try: + # Explicitly disable shell for security + subprocess.run(command, check=True, shell=False) + print_success(f'{description} completed successfully!') + return True + except subprocess.CalledProcessError as e: + print_error(f'{description} failed with exit code {e.returncode}') + return False + + +def lint() -> None: + """Run linting with ruff.""" + success = run_command(['ruff', 'check', '.'], 'linting') + sys.exit(0 if success else 1) + + +def format() -> None: + """Format code with ruff.""" + success = run_command(['ruff', 'format', '.'], 'code formatting') + sys.exit(0 if success else 1) + + +def type_check() -> None: + """Run type checking with mypy.""" + success = run_command(['mypy', 'services', 'shared'], 'type checking') + sys.exit(0 if success else 1) + + +if __name__ == '__main__': + if len(sys.argv) > 1: + command = sys.argv[1] + if command == 'lint': + lint() + elif command == 'format': + format() + elif command == 'type-check': + type_check() + else: + print_error(f'Unknown dev command: {command}') + sys.exit(1) + else: + print_error('Please specify a dev command: lint, format, or type-check') + sys.exit(1) diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/check.sh b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/check.sh new file mode 100755 index 00000000..94247b9c --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/check.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +# Check EventBridge Test Infrastructure Status +# Run this from anywhere in the project + +echo "πŸ” Checking EventBridge Test Infrastructure..." + +# Set region to us-east-1 (required) +export AWS_DEFAULT_REGION=us-east-1 +echo "πŸ“ Using region: us-east-1" + +# Check AWS credentials +if ! aws sts get-caller-identity &> /dev/null; then + echo "❌ Error: AWS credentials not configured" + exit 1 +fi + +echo "πŸ“Š Infrastructure Status:" + +# Check DynamoDB table +if aws dynamodb describe-table --table-name cns427-task-api-test-results > /dev/null 2>&1; then + echo "βœ… DynamoDB table: cns427-task-api-test-results" +else + echo "❌ DynamoDB table: cns427-task-api-test-results (not found)" +fi + +# Check Lambda function +if aws lambda get-function --function-name cns427-task-api-test-subscriber > /dev/null 2>&1; then + echo "βœ… Lambda function: cns427-task-api-test-subscriber" +else + echo "❌ Lambda function: cns427-task-api-test-subscriber (not found)" +fi + +# Check EventBridge rule +if aws events list-rules --name-prefix cns427-task-api-test | grep -q "cns427-task-api-test-rule"; then + echo "βœ… EventBridge rule: cns427-task-api-test-rule" +else + echo "❌ EventBridge rule: cns427-task-api-test-rule (not found)" +fi + +echo "" +echo "πŸ’‘ To deploy infrastructure:" +echo " make deploy-test-infra" +echo "" +echo "πŸ’‘ To run EventBridge tests:" +echo " poetry run pytest tests/integration/test_eventbridge_integration.py -v" \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/deploy.sh b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/deploy.sh new file mode 100755 index 00000000..6939ea0d --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/deploy.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Simple EventBridge Test Infrastructure Deployment +# Run this from anywhere in the project (via make deploy-test-infra) + +set -e + +echo "πŸš€ Deploying EventBridge Test Infrastructure..." + +# Get script directory and change to infrastructure/test_harness +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." && pwd )" +cd "$PROJECT_ROOT/infrastructure/test_harness" + +# Check if CDK is installed +if ! command -v cdk &> /dev/null; then + echo "❌ Error: CDK is not installed. Install with: npm install -g aws-cdk" + exit 1 +fi + +# Set region to us-east-1 (required) +export AWS_DEFAULT_REGION=us-east-1 +echo "πŸ“ Using region: us-east-1" + +# Check AWS credentials +if ! aws sts get-caller-identity &> /dev/null; then + echo "❌ Error: AWS credentials not configured" + echo "Set your credentials using the export commands provided" + exit 1 +fi + +echo "πŸ“¦ Installing CDK dependencies..." +pip install aws-cdk-lib constructs boto3 > /dev/null 2>&1 || true + +echo "πŸ” Testing CDK synthesis..." +if ! cdk synth 2>&1; then + echo "❌ CDK synthesis failed. Showing error details above." + echo "πŸ’‘ Common fixes:" + echo " - Install CDK dependencies: pip install aws-cdk-lib constructs boto3" + echo " - Check Python version (needs 3.13+)" + echo " - Verify stack file syntax" + exit 1 +fi + +echo "☁️ Deploying to AWS..." +if cdk deploy --require-approval never; then + echo "βœ… Test infrastructure deployed successfully!" + + # Verify deployment + echo "πŸ” Verifying resources..." + + if aws dynamodb describe-table --table-name cns427-task-api-test-results > /dev/null 2>&1; then + echo "βœ… DynamoDB table created" + else + echo "⚠️ DynamoDB table not found" + fi + + if aws lambda get-function --function-name cns427-task-api-test-subscriber > /dev/null 2>&1; then + echo "βœ… Lambda function created" + else + echo "⚠️ Lambda function not found" + fi + + if aws events list-rules --name-prefix cns427-task-api-test | grep -q "cns427-task-api-test-rule"; then + echo "βœ… EventBridge rule created" + else + echo "⚠️ EventBridge rule not found" + fi + + echo "" + echo "πŸŽ‰ Setup complete! You can now run:" + echo " cd ../.." + echo " poetry run pytest tests/integration/test_eventbridge_integration.py -v" + +else + echo "❌ Deployment failed" + exit 1 +fi \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/destroy.sh b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/destroy.sh new file mode 100755 index 00000000..addc2427 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_harness/destroy.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# Destroy EventBridge Test Infrastructure +# Run this from anywhere in the project (via make destroy-test-infra) + +set -e + +echo "πŸ—‘οΈ Destroying EventBridge Test Infrastructure..." + +# Get script directory and change to infrastructure/test_harness +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +PROJECT_ROOT="$( cd "$SCRIPT_DIR/../.." && pwd )" +cd "$PROJECT_ROOT/infrastructure/test_harness" + +# Set region to us-east-1 (required) +export AWS_DEFAULT_REGION=us-east-1 +echo "πŸ“ Using region: us-east-1" + +# Check if CDK is installed +if ! command -v cdk &> /dev/null; then + echo "❌ Error: CDK is not installed" + exit 1 +fi + +echo "πŸ” Synthesizing CDK stack..." +if ! cdk synth > /dev/null 2>&1; then + echo "⚠️ CDK synthesis failed, but continuing with destroy..." +fi + +echo "☁️ Destroying AWS resources..." +if cdk destroy --force; then + echo "βœ… Test infrastructure destroyed successfully!" + + # Clean up CDK output + rm -rf cdk.out + +else + echo "❌ Destruction failed" + exit 1 +fi \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/test_infrastructure.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_infrastructure.py new file mode 100644 index 00000000..c1a17cd4 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/test_infrastructure.py @@ -0,0 +1,49 @@ +"""Test infrastructure management utilities.""" + +import os +from typing import Dict + +import boto3 + + +def verify_deployment() -> Dict[str, bool]: + """ + Verify test infrastructure deployment status. + + Returns: + Dictionary with deployment status of each component. + """ + region = os.environ.get('AWS_DEFAULT_REGION', os.environ.get('AWS_REGION', 'us-west-2')) + stack_name = os.environ.get('TEST_INFRASTRUCTURE_STACK_NAME', 'CNS427TaskApiTestInfrastructure') + cfn = boto3.client('cloudformation', region_name=region) + + try: + response = cfn.describe_stacks(StackName=stack_name) + stack_status = response['Stacks'][0]['StackStatus'] + + # Stack exists and is in a good state + stack_deployed = stack_status in ['CREATE_COMPLETE', 'UPDATE_COMPLETE', 'UPDATE_ROLLBACK_COMPLETE'] + + return { + 'test_infrastructure_stack': stack_deployed, + 'test_results_table': stack_deployed, # Exists if stack exists + 'test_subscriber_lambda': stack_deployed, # Exists if stack exists + 'test_event_rule': stack_deployed, # Exists if stack exists + } + except cfn.exceptions.ClientError: + # Stack doesn't exist + return { + 'test_infrastructure_stack': False, + 'test_results_table': False, + 'test_subscriber_lambda': False, + 'test_event_rule': False, + } + except Exception as e: + # Other error (credentials, network, etc.) + print(f'Error checking test infrastructure: {e}') + return { + 'test_infrastructure_stack': False, + 'test_results_table': False, + 'test_subscriber_lambda': False, + 'test_event_rule': False, + } diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/testing.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/testing.py new file mode 100644 index 00000000..2fb4b02f --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/testing.py @@ -0,0 +1,224 @@ +""" +Testing Scripts + +Poetry-integrated commands for running different types of tests. +""" + +import subprocess +import sys + + +def print_status(message: str) -> None: + """Print status message in blue.""" + print(f'\033[0;34m[INFO]\033[0m {message}') + + +def print_success(message: str) -> None: + """Print success message in green.""" + print(f'\033[0;32m[SUCCESS]\033[0m {message}') + + +def print_error(message: str) -> None: + """Print error message in red.""" + print(f'\033[0;31m[ERROR]\033[0m {message}') + + +def run_pytest(args: list[str], description: str) -> bool: + """Run pytest with given arguments. + + This function follows Python's recommended security practices for subprocess execution: + + 1. Uses subprocess.run() with shell=False (REQUIRED for security) + - Arguments are passed directly to the executable without shell interpretation + - Prevents command injection even if arguments contain special characters + - This is the approach recommended by Python documentation and OWASP + + 2. Input validation as defense-in-depth + - Validates arguments against shell metacharacters before execution + - Provides an additional security layer beyond shell=False + - Helps catch potential issues early with clear error messages + + References: + - Python subprocess docs: https://docs.python.org/3/library/subprocess.html#security-considerations + + Args: + args: List of pytest arguments (validated internally, only accepts safe paths and flags) + description: Description of the test run + + Returns: + True if tests passed, False otherwise + """ + print_status(f'Running {description}...') + + # Validate arguments to prevent command injection + # Only allow safe pytest arguments (paths, flags, no shell metacharacters) + safe_args = [] + for arg in args: + # Reject any arguments with shell metacharacters + if any(char in arg for char in ['&', '|', ';', '`', '$', '(', ')', '<', '>', '\n', '\r']): + print_error(f'Rejected unsafe argument: {arg}') + return False + safe_args.append(arg) + + # Build command as a list with validated arguments + command = ['pytest'] + safe_args + + try: + subprocess.run(command, check=True, shell=False) + print_success(f'{description} completed successfully!') + return True + except subprocess.CalledProcessError as e: + print_error(f'{description} failed with exit code {e.returncode}') + return False + + +def run_unit_tests() -> None: + """Run unit tests only.""" + args = ['tests/unit', '-v'] + + success = run_pytest(args, 'unit tests') + sys.exit(0 if success else 1) + + +def run_integration_tests() -> None: + """Run all integration tests including EventBridge tests if infrastructure is available.""" + # Check if EventBridge test infrastructure is deployed + from scripts.test_infrastructure import verify_deployment + + print_status('Checking EventBridge test infrastructure...') + status = verify_deployment() + eventbridge_available = all(status.values()) + + if eventbridge_available: + print_success('EventBridge test infrastructure is ready!') + + # Run all integration tests including EventBridge + args = [ + 'tests/integration', + '-v', + ] + success = run_pytest(args, 'integration tests (including EventBridge)') + else: + missing_components = [name for name, deployed in status.items() if not deployed] + print_status('EventBridge test infrastructure not fully deployed') + print_status(f'Missing components: {", ".join(missing_components)}') + print_status('Running DynamoDB integration tests only...') + + # Run only DynamoDB integration tests + args = [ + 'tests/integration', + '-v', + '--ignore=tests/integration/test_eventbridge_integration.py', + ] + success = run_pytest(args, 'integration tests (DynamoDB only)') + + print_status('\nTo run EventBridge tests, deploy test infrastructure:') + print(' make deploy-test-infra') + + sys.exit(0 if success else 1) + + +def run_e2e_tests() -> None: + """Run end-to-end tests.""" + print_status('Running end-to-end tests...') + print_status('Note: E2E tests require deployed infrastructure') + + # Run E2E tests + e2e_args = [ + 'tests/e2e', + '-v', + '-s', # Don't capture output for better debugging + ] + + success = run_pytest(e2e_args, 'end-to-end tests') + + if success: + print_success('E2E tests completed successfully!') + sys.exit(0) + else: + print_error('E2E tests failed') + sys.exit(1) + + +def run_all_tests() -> None: + """Run all tests in sequence.""" + print_status('Running complete test suite...') + + # Run unit tests first + unit_args = ['tests/unit', '-v'] + unit_success = run_pytest(unit_args, 'unit tests') + + # Check if EventBridge infrastructure is available + from scripts.test_infrastructure import verify_deployment + + eventbridge_status = verify_deployment() + eventbridge_available = all(eventbridge_status.values()) + + integration_success = True + if eventbridge_available: + print_success('EventBridge test infrastructure detected!') + + # Run all integration tests including EventBridge + integration_args = [ + 'tests/integration', + '-v', + ] + integration_success = run_pytest(integration_args, 'integration tests (including EventBridge)') + else: + print_status('EventBridge test infrastructure not available, running DynamoDB tests only') + + # Run only DynamoDB integration tests + integration_args = [ + 'tests/integration', + '-v', + '--ignore=tests/integration/test_eventbridge_integration.py', + ] + integration_success = run_pytest(integration_args, 'integration tests (DynamoDB only)') + + print_status('To run EventBridge tests, deploy test infrastructure:') + print(' make deploy-test-infra') + + # Run E2E tests + print_status('Running end-to-end tests...') + e2e_args = [ + 'tests/e2e', + '-v', + '-s', # Don't capture output for better debugging + ] + e2e_success = run_pytest(e2e_args, 'end-to-end tests') + + # Summary + print_status('\nTest Suite Summary:') + print(f' Unit Tests: {"βœ“" if unit_success else "βœ—"}') + if eventbridge_available: + print(f' Integration Tests: {"βœ“" if integration_success else "βœ—"}') + else: + print(f' Integration Tests (DynamoDB only): {"βœ“" if integration_success else "βœ—"}') + print(f' E2E Tests: {"βœ“" if e2e_success else "βœ—"}') + + overall_success = unit_success and integration_success and e2e_success + + if overall_success: + print_success('All available tests completed successfully!') + sys.exit(0) + else: + print_error('Some tests failed') + sys.exit(1) + + +if __name__ == '__main__': + if len(sys.argv) > 1: + command = sys.argv[1] + if command == 'unit': + run_unit_tests() + elif command == 'integration': + run_integration_tests() + elif command == 'e2e': + run_e2e_tests() + elif command == 'all': + run_all_tests() + else: + print_error(f'Unknown test command: {command}') + sys.exit(1) + else: + run_all_tests() diff --git a/python-test-samples/cns427-testable-serverless-architecture/scripts/validate_setup.py b/python-test-samples/cns427-testable-serverless-architecture/scripts/validate_setup.py new file mode 100644 index 00000000..7a7a48d6 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/scripts/validate_setup.py @@ -0,0 +1,234 @@ +""" +Setup Validation Script + +Validates that all Poetry commands and test infrastructure is working correctly. +""" + +import subprocess +import sys +from pathlib import Path + + +def print_status(message: str) -> None: + """Print status message in blue.""" + print(f'\033[0;34m[INFO]\033[0m {message}') + + +def print_success(message: str) -> None: + """Print success message in green.""" + print(f'\033[0;32m[SUCCESS]\033[0m {message}') + + +def print_warning(message: str) -> None: + """Print warning message in yellow.""" + print(f'\033[1;33m[WARNING]\033[0m {message}') + + +def print_error(message: str) -> None: + """Print error message in red.""" + print(f'\033[0;31m[ERROR]\033[0m {message}') + + +def check_command_exists(command: str) -> bool: + """Check if a command exists in PATH. + + Args: + command: Command name to check (validated as single string) + + Returns: + True if command exists, False otherwise + """ + try: + # Use list form with shell=False for security + subprocess.run([command, '--version'], capture_output=True, check=True, shell=False) + return True + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +def check_poetry_scripts() -> dict[str, bool]: + """Check which Poetry scripts are defined in pyproject.toml. + + Returns: + Dictionary mapping script names to availability (True if defined) + """ + try: + import tomllib + except ImportError: + import tomli as tomllib + + try: + with open('pyproject.toml', 'rb') as f: + pyproject = tomllib.load(f) + + scripts = pyproject.get('tool', {}).get('poetry', {}).get('scripts', {}) + return {name: True for name in scripts.keys()} + except Exception: + return {} + + +def validate_setup() -> None: + """Validate the complete project setup.""" + print_status('Validating project setup...') + + errors = [] + warnings = [] + + # Check basic prerequisites + print_status('Checking prerequisites...') + + if not Path('pyproject.toml').exists(): + errors.append('pyproject.toml not found - run from project root') + else: + print_success('βœ“ Project root directory') + + if not Path('services').exists(): + errors.append('services directory not found') + else: + print_success('βœ“ Services source code') + + if not Path('shared').exists(): + errors.append('shared directory not found') + else: + print_success('βœ“ Shared source code') + + # Check external dependencies + if not check_command_exists('poetry'): + errors.append('Poetry not installed') + else: + print_success('βœ“ Poetry installed') + + if not check_command_exists('cdk'): + warnings.append('AWS CDK not installed - needed for infrastructure deployment') + else: + print_success('βœ“ AWS CDK installed') + + if not check_command_exists('aws'): + warnings.append('AWS CLI not installed - needed for infrastructure management') + else: + print_success('βœ“ AWS CLI installed') + + # Check Poetry scripts + print_status('Checking Poetry scripts...') + + available_scripts = check_poetry_scripts() + + # All Poetry scripts that should be defined (from Makefile targets that use poetry run) + required_commands = [ + 'validate-setup', + 'test-unit', + 'test-integration', + 'test-e2e', + 'test-all', + 'lint', + 'format', + 'type-check', + 'deploy', + 'destroy', + ] + + for command in required_commands: + if command in available_scripts: + print_success(f'βœ“ poetry run {command}') + else: + errors.append(f'Poetry script not defined in pyproject.toml: {command}') + + # Check non-Poetry commands used by Makefile + print_status('Checking additional tools...') + + optional_tools = [ + ('ruff', 'Code linting and formatting'), + ('mypy', 'Type checking'), + ('pytest', 'Test runner'), + ] + + for tool, description in optional_tools: + if check_command_exists(tool): + print_success(f'βœ“ {tool} ({description})') + else: + warnings.append(f'{tool} not installed - {description}') + + # Note: cdk-nag is a Python package, not a CLI tool, so we don't check for it here + # It's checked when running: ENABLE_CDK_NAG=true poetry run cdk synth + + # Check test directories + print_status('Checking test structure...') + + test_dirs = [ + 'tests/unit', + 'tests/unit/domain', + 'tests/unit/handlers', + 'tests/unit/models', + 'tests/integration', + 'tests/e2e', + 'tests/shared/fakes', + 'tests/shared/helpers', + ] + + for test_dir in test_dirs: + if Path(test_dir).exists(): + print_success(f'βœ“ {test_dir}/') + else: + errors.append(f'Test directory missing: {test_dir}') + + # Check key test files + print_status('Checking key test files...') + + test_files = [ + 'tests/unit/conftest.py', + 'tests/unit/domain/test_task_service.py', + 'tests/unit/handlers/test_task_handler.py', + 'tests/unit/models/test_event_contracts.py', + 'tests/integration/test_dynamodb_integration.py', + 'tests/integration/test_eventbridge_integration.py', + 'tests/e2e/test_task_lifecycle_e2e.py', + ] + + for test_file in test_files: + if Path(test_file).exists(): + print_success(f'βœ“ {test_file}') + else: + warnings.append(f'Test file missing: {test_file}') + + # Check infrastructure + print_status('Checking infrastructure...') + + infra_paths = [ + ('infrastructure/core', 'Main infrastructure'), + ('infrastructure/test_harness/app.py', 'Test harness'), + ('infrastructure/setup.py', 'Infrastructure package'), + ] + + for infra_path, description in infra_paths: + if Path(infra_path).exists(): + print_success(f'βœ“ {description}: {infra_path}') + else: + warnings.append(f'{description} missing: {infra_path}') + + # Summary + print_status('\nValidation Summary:') + + if errors: + print_error(f'Found {len(errors)} errors:') + for error in errors: + print_error(f' - {error}') + + if warnings: + print_warning(f'Found {len(warnings)} warnings:') + for warning in warnings: + print_warning(f' - {warning}') + + if not errors and not warnings: + print_success('βœ“ All checks passed! Project setup is ready.') + elif not errors: + print_success('βœ“ Setup is functional with minor warnings.') + print_status('\nYou can proceed with:') + print('1. Run tests: poetry run test-all') + print('2. Deploy application: poetry run deploy') + else: + print_error('βœ— Setup has errors that need to be fixed before proceeding.') + sys.exit(1) + + +if __name__ == '__main__': + validate_setup() diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/__init__.py new file mode 100644 index 00000000..53932f28 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/__init__.py @@ -0,0 +1,5 @@ +"""Services package for task API. + +This package contains service-oriented Lambda functions following the Monorepo Service Pattern. +Each service is a complete hexagon with its own domain, models, and handlers. +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/__init__.py new file mode 100644 index 00000000..ccb1687e --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/__init__.py @@ -0,0 +1,5 @@ +"""Notification service package. + +Complete hexagon for notification processing. +Contains handler (input adapter) and domain logic (core). +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/__init__.py new file mode 100644 index 00000000..5f7e4bb5 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/__init__.py @@ -0,0 +1 @@ +"""Notification service domain layer.""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/notification_service.py b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/notification_service.py new file mode 100644 index 00000000..425efdcb --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/domain/notification_service.py @@ -0,0 +1,64 @@ +"""Domain logic for notification processing.""" + +from typing import Any, Dict + +from aws_lambda_powertools import Logger + +logger = Logger() + + +class NotificationService: + """Pure business logic for notification processing.""" + + def process_task_event(self, event_type: str, task_data: Dict[str, Any]) -> None: + """Process a task event and generate appropriate notification.""" + logger.info(f'Processing task event: {event_type}') + logger.debug(f'Event data: {task_data}') + + if event_type == 'TaskCreated': + self._handle_task_created(task_data) + elif event_type == 'TaskUpdated': + self._handle_task_updated(task_data) + elif event_type == 'TaskDeleted': + self._handle_task_deleted(task_data) + else: + logger.warning(f'Unknown event type: {event_type}') + + def _handle_task_created(self, task_data: Dict[str, Any]) -> None: + """Handle task created notification.""" + task_id = task_data.get('task_id') + title = task_data.get('title') + priority = task_data.get('priority') + + logger.debug(f'Handling TaskCreated: task_id={task_id}, priority={priority}') + logger.info( + f'Task created notification: {title}', + extra={'task_id': task_id, 'event_type': 'TaskCreated'}, + ) + + def _handle_task_updated(self, task_data: Dict[str, Any]) -> None: + """Handle task updated notification.""" + task_id = task_data.get('task_id') + title = task_data.get('title') + status = task_data.get('status') + version = task_data.get('version') + + logger.debug(f'Handling TaskUpdated: task_id={task_id}, status={status}, version={version}') + logger.info( + f'Task updated notification: {title} (status: {status})', + extra={ + 'task_id': task_id, + 'event_type': 'TaskUpdated', + 'new_status': status, + }, + ) + + def _handle_task_deleted(self, task_data: Dict[str, Any]) -> None: + """Handle task deleted notification.""" + task_id = task_data.get('task_id') + + logger.debug(f'Handling TaskDeleted: task_id={task_id}') + logger.info( + f'Task deleted notification: {task_id}', + extra={'task_id': task_id, 'event_type': 'TaskDeleted'}, + ) diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/handler.py b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/handler.py new file mode 100644 index 00000000..6caac0f6 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/handler.py @@ -0,0 +1,92 @@ +"""Lambda handler for processing task events from EventBridge.""" + +import json +from typing import Any, Dict, Optional + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.logging import correlation_paths + +from services.notification_service.domain.notification_service import NotificationService +from services.task_service.models.task import TaskEvent + +logger = Logger() + +# Dependencies - injected at runtime +notification_service: Optional[NotificationService] = None + + +def _initialize_dependencies(): + """Initialize dependencies with dependency injection.""" + global notification_service + + if notification_service is None: + notification_service = NotificationService() + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.EVENT_BRIDGE) +def lambda_handler(event: Dict[str, Any], context) -> Dict[str, Any]: + """ + Lambda handler entry point for EventBridge events. + + EventBridge Event Structure: + { + "version": "0", + "id": "event-id", + "detail-type": "TaskCreated", + "source": "cns427-task-api", + "account": "123456789012", + "time": "2025-11-15T02:15:28Z", + "region": "us-west-2", + "resources": [], + "detail": { + "task_id": "abc-123", + "title": "My Task", + ... + } + } + """ + _initialize_dependencies() + + logger.info('Notification handler invoked') + logger.debug(f'Event: {json.dumps(event)}') + + try: + # Extract EventBridge event fields + detail = event.get('detail', {}) + detail_type = event.get('detail-type', '') + source = event.get('source', '') + + if not detail: + logger.warning('No event detail found to process') + return {'statusCode': 200, 'processedEvents': 0} + + if not detail_type: + logger.warning('No detail-type found in event') + return {'statusCode': 200, 'processedEvents': 0} + + logger.debug(f'Processing EventBridge event: source={source}, detail_type={detail_type}') + + # Parse EventBridge event into TaskEvent model + task_event = TaskEvent.from_eventbridge_event(event) + + logger.debug(f'Event type: {task_event.event_type} (original: {detail_type})') + + # Extract task_id for logging + task_id = task_event.task_data.get('task_id', 'unknown') + + logger.info(f'Processing {task_event.event_type} event for task: {task_id}') + + # Delegate to domain service + if notification_service is None: + raise RuntimeError('Notification service not initialized') + notification_service.process_task_event(task_event.event_type, task_event.task_data) + + logger.info(f'Successfully processed {task_event.event_type} event for task: {task_id}') + return {'statusCode': 200, 'processedEvents': 1} + + except Exception as e: + # Log as warning since we're re-raising for Lambda to handle + # Lambda runtime will log the error and trigger retry mechanism + logger.warning(f'Handler encountered error, re-raising for Lambda retry: {e}', exc_info=True) + # Re-raise to trigger Lambda retry mechanism + raise diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/requirements.txt b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/requirements.txt new file mode 100644 index 00000000..b4ed8332 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/notification_service/requirements.txt @@ -0,0 +1,2 @@ +aws-lambda-powertools[tracer]>=2.20.0 +pydantic>=2.0.3 diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/__init__.py new file mode 100644 index 00000000..9a23d1fb --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/__init__.py @@ -0,0 +1,5 @@ +"""Task service package. + +Complete hexagon for task management operations. +Contains handler (input adapter), domain logic (core), and models (core). +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/__init__.py new file mode 100644 index 00000000..d066ff77 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/__init__.py @@ -0,0 +1 @@ +"""Task service domain layer.""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/business_rules.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/business_rules.py new file mode 100644 index 00000000..803b0d27 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/business_rules.py @@ -0,0 +1,113 @@ +"""Business rules for task management.""" + +from typing import Dict, List + +from services.task_service.models.task import TaskStatus + + +def is_valid_version_token(version_token: str) -> bool: + """ + Business Rule: Version token format validation. + + Rules: + - Version tokens must be non-empty strings + - Version tokens should be numeric (timestamp milliseconds) + - Version tokens must be positive integers when converted + """ + if not version_token or not isinstance(version_token, str): + return False + + try: + # Version tokens are timestamp milliseconds, so should be positive integers + timestamp = int(version_token) + return timestamp > 0 + except ValueError: + return False + + +def compare_version_tokens(token1: str, token2: str) -> int: + """ + Business Rule: Version token comparison for conflict detection. + + Returns: + - -1 if token1 is older than token2 + - 0 if tokens are equal + - 1 if token1 is newer than token2 + + Raises ValueError if tokens are invalid. + """ + if not is_valid_version_token(token1) or not is_valid_version_token(token2): + raise ValueError('Invalid version token format') + + timestamp1 = int(token1) + timestamp2 = int(token2) + + if timestamp1 < timestamp2: + return -1 + elif timestamp1 > timestamp2: + return 1 + else: + return 0 + + +def validate_version_token_for_update(version_token: str) -> None: + """ + Business Rule: Version token validation for update operations. + + Rules: + - Version token is required for all update operations + - Version token must be in valid format + """ + if not version_token: + raise ValueError('version_token is required for update operations') + + if not is_valid_version_token(version_token): + raise ValueError('Invalid version_token format. Must be a positive integer string.') + + +def can_transition_to(current_status: TaskStatus, new_status: TaskStatus) -> bool: + """ + Business Rule: Tasks can only transition through valid states. + + Valid transitions: + - pending -> in_progress -> completed + - pending -> completed (skip in_progress for simple tasks) + - completed -> pending (reopen task) + - NOT allowed: in_progress -> pending (can't go backwards without completing) + """ + valid_transitions = { + TaskStatus.PENDING: [TaskStatus.IN_PROGRESS, TaskStatus.COMPLETED], + TaskStatus.IN_PROGRESS: [TaskStatus.COMPLETED], + TaskStatus.COMPLETED: [TaskStatus.PENDING], # Reopen task + } + return new_status in valid_transitions.get(current_status, []) + + +def has_circular_dependency(task_id: str, dependency_id: str, all_dependencies: Dict[str, List[str]]) -> bool: + """ + Business Rule: Prevent circular dependencies. + + Rules: + - Adding a dependency should not create a cycle + - Use depth-first search to detect cycles + """ + if task_id == dependency_id: + return True # Self-dependency is circular + + visited = set() + + def check_cycle(current_id: str) -> bool: + if current_id in visited: + return True + visited.add(current_id) + + for dep_id in all_dependencies.get(current_id, []): + if dep_id == task_id: # Would create cycle back to original task + return True + if check_cycle(dep_id): + return True + + visited.remove(current_id) + return False + + return check_cycle(dependency_id) diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/exceptions.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/exceptions.py new file mode 100644 index 00000000..238a5818 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/exceptions.py @@ -0,0 +1,39 @@ +"""Domain exceptions for task management.""" + +from typing import Dict, Optional + + +class CircularDependencyError(Exception): + """Raised when a circular dependency is detected.""" + + pass + + +class ConflictError(Exception): + """Raised when a task update conflicts with concurrent modifications.""" + + def __init__(self, message: str, current_task: Optional[Dict] = None): + super().__init__(message) + self.message = message + self.current_task = current_task + + def __str__(self) -> str: + return self.message + + +class ThrottlingError(Exception): + """Raised when service capacity is exceeded (e.g., DynamoDB throttling).""" + + pass + + +class ResourceNotFoundError(Exception): + """Raised when a required resource is not found (e.g., DynamoDB table missing).""" + + pass + + +class RepositoryError(Exception): + """Raised when repository operations fail.""" + + pass diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/task_service.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/task_service.py new file mode 100644 index 00000000..264fb6f3 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/domain/task_service.py @@ -0,0 +1,202 @@ +"""Domain logic for task management operations.""" + +from datetime import UTC, datetime +from typing import List, Optional, Tuple +from uuid import uuid4 + +from aws_lambda_powertools import Logger + +from services.task_service.domain.business_rules import can_transition_to, has_circular_dependency +from services.task_service.domain.exceptions import CircularDependencyError, ConflictError +from services.task_service.models.api import CreateTaskRequest, UpdateTaskRequest +from services.task_service.models.task import Task, TaskStatus +from shared.integration.interfaces import EventPublisher, TaskRepository + +# Initialize logger at module level - will include module name in logs +logger = Logger() + + +class TaskService: + """Pure business logic for task operations.""" + + def __init__(self, repository: TaskRepository = None, event_publisher: EventPublisher = None): + """Initialize service with dependencies.""" + if repository is None or event_publisher is None: + import os + + from shared.integration.dynamodb_adapter import DynamoDBTaskRepository + from shared.integration.eventbridge_adapter import EventBridgePublisher + + self.repository = repository or DynamoDBTaskRepository(table_name=os.environ.get('TASKS_TABLE_NAME', 'tasks')) + self.event_publisher = event_publisher or EventBridgePublisher(event_bus_name=os.environ.get('EVENT_BUS_NAME', 'TaskEvents')) + else: + self.repository = repository + self.event_publisher = event_publisher + + def create_task(self, request: CreateTaskRequest) -> Task: + """Create a new task from request data.""" + # Generate task ID and timestamp for version + task_id = str(uuid4()) + now = datetime.now(UTC) + version = int(now.timestamp() * 1000) # UTC timestamp in milliseconds + + logger.info(f'Creating task with ID: {task_id}') + + # Validate dependencies for circular references + if request.dependencies: + logger.debug(f'Validating dependencies: {request.dependencies}') + self._validate_dependencies(task_id, request.dependencies) + + # Create task entity + task = Task( + task_id=task_id, + title=request.title, + description=request.description, + priority=request.priority, + dependencies=request.dependencies, + status=TaskStatus.PENDING, + created_at=now, + updated_at=now, + version=version, + ) + + # Persist task + created_task = self.repository.create_task(task) + + # Publish event + from services.task_service.models.task import TaskCreatedEvent + + event = TaskCreatedEvent(created_task) + self.event_publisher.publish_event(event) + + logger.info(f'Task created successfully: {task_id}') + return created_task + + def get_task(self, task_id: str) -> Task: + """Retrieve a task by ID.""" + task = self.repository.get_task(task_id) + if task is None: + raise ValueError(f'Task not found: {task_id}') + return task + + def list_tasks(self, limit: Optional[int] = None, next_token: Optional[str] = None) -> Tuple[List[Task], Optional[str]]: + """List tasks with pagination.""" + validated_limit = self._validate_pagination_params(limit) + return self.repository.list_tasks(validated_limit, next_token) + + def update_task(self, task_id: str, request: UpdateTaskRequest) -> Task: + """Update an existing task with optimistic concurrency control.""" + logger.info(f'Updating task: {task_id}, request_version: {request.version}') + + # Get existing task + existing_task = self.repository.get_task(task_id) + if existing_task is None: + raise ValueError(f'Task not found: {task_id}') + + logger.debug(f'Existing task version: {existing_task.version}, Request version: {request.version}') + + # Validate version for optimistic locking + if existing_task.version != request.version: + # Conflict detected - version mismatch + logger.debug(f'Version mismatch! Existing: {existing_task.version}, Request: {request.version}') + raise ConflictError(f'Task {task_id} was modified by another process', current_task=existing_task.model_dump()) + + # Validate dependencies if being updated + if request.dependencies is not None: + self._validate_dependencies(task_id, request.dependencies) + + # Validate status transition if status is being updated + if request.status is not None: + new_status = TaskStatus(request.status) + if not can_transition_to(existing_task.status, new_status): + raise ValueError(f'Invalid status transition from {existing_task.status.value} to {new_status.value}') + + # Create updated task with new values + # Domain logic: Generate new version + now = datetime.now(UTC) + new_version = int(now.timestamp() * 1000) # UTC timestamp in milliseconds + + # Keep track of old version for optimistic locking + old_version = existing_task.version + + logger.debug(f'Generating new version: {new_version} (old: {old_version})') + + updated_data = existing_task.model_dump() + updated_data['updated_at'] = now + updated_data['version'] = new_version + + if request.title is not None: + updated_data['title'] = request.title + if request.description is not None: + updated_data['description'] = request.description + if request.status is not None: + updated_data['status'] = TaskStatus(request.status) + logger.debug(f'Updating status to: {request.status}') + if request.priority is not None: + updated_data['priority'] = request.priority + if request.dependencies is not None: + updated_data['dependencies'] = request.dependencies + + updated_task = Task(**updated_data) + + logger.debug(f'Calling repository.update_task with expected_version={old_version}, new_version={new_version}') + + # Persist updated task with old version for conditional check + saved_task = self.repository.update_task(updated_task, expected_version=old_version) + + logger.debug(f'Repository update successful, saved version: {saved_task.version}') + + # Publish event + from services.task_service.models.task import TaskUpdatedEvent + + event = TaskUpdatedEvent(saved_task) + self.event_publisher.publish_event(event) + + return saved_task + + def delete_task(self, task_id: str) -> None: + """Delete a task.""" + # Get task to validate existence and get version + existing_task = self.repository.get_task(task_id) + if existing_task is None: + raise ValueError(f'Task not found: {task_id}') + + # Delete task + self.repository.delete_task(task_id, existing_task.version) + + # Publish event + from services.task_service.models.task import TaskDeletedEvent + + event = TaskDeletedEvent(task_id) + self.event_publisher.publish_event(event) + + def _validate_dependencies(self, task_id: str, dependencies: list[str]) -> None: + """Validate task dependencies for circular references.""" + if not dependencies: + return + + # Get all dependencies from repository + all_dependencies = self._get_all_dependencies() + + for dep_id in dependencies: + if has_circular_dependency(task_id, dep_id, all_dependencies): + raise CircularDependencyError(f'Circular dependency detected with task {dep_id}') + + def _get_all_dependencies(self) -> dict[str, list[str]]: + """Get all task dependencies from repository.""" + tasks, _ = self.repository.list_tasks(limit=1000) + dependencies = {} + for task in tasks: + if task.dependencies: + dependencies[task.task_id] = task.dependencies + return dependencies + + def _validate_pagination_params(self, limit: Optional[int] = None) -> int: + """Validate and normalize pagination parameters.""" + if limit is None: + return 50 + if limit < 1: + raise ValueError('Limit must be greater than 0') + if limit > 100: + raise ValueError('Limit cannot exceed 100') + return limit diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/handler.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/handler.py new file mode 100644 index 00000000..61bf4e68 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/handler.py @@ -0,0 +1,224 @@ +"""Lambda handler for task CRUD operations.""" + +from typing import Any, Dict, Optional + +from aws_lambda_powertools import Logger +from aws_lambda_powertools.event_handler import APIGatewayRestResolver +from aws_lambda_powertools.event_handler.exceptions import BadRequestError, InternalServerError, NotFoundError, ServiceError +from aws_lambda_powertools.logging import correlation_paths +from botocore.exceptions import ClientError +from pydantic import ValidationError + +from services.task_service.domain.exceptions import CircularDependencyError, ConflictError, RepositoryError, ResourceNotFoundError, ThrottlingError +from services.task_service.domain.task_service import TaskService +from services.task_service.models.api import CreateTaskRequest, ErrorResponse, PaginationInfo, TaskResponse, UpdateTaskRequest + +logger = Logger() +app = APIGatewayRestResolver() + +# Domain service - injected at runtime +task_service: Optional[TaskService] = None + + +def _initialize_dependencies(): + """Initialize dependencies with dependency injection.""" + global task_service + + if task_service is None: + task_service = TaskService() + + +def _handle_common_exceptions(e: Exception, operation: str = 'operation', task_id: Optional[str] = None): + """ + Handle common exceptions across all endpoints. + + Args: + e: The exception to handle + operation: Description of the operation (e.g., "creating task", "updating task") + task_id: Optional task ID for logging context + + Raises: + Appropriate Powertools exception or returns error response tuple + """ + extra_context = {'task_id': task_id} if task_id else {} + + # Domain exceptions + if isinstance(e, ThrottlingError): + logger.warning(f'Throttling error during {operation}: {str(e)}', extra={'retry_recommended': True, **extra_context}) + raise ServiceError(503, 'Service temporarily unavailable due to high load. Please retry after a few seconds.') + + if isinstance(e, ResourceNotFoundError): + logger.error(f'Resource not found during {operation}: {str(e)}', extra={'error_type': 'configuration', **extra_context}) + raise InternalServerError('Service configuration error. Please contact support.') + + if isinstance(e, RepositoryError): + logger.error(f'Repository error during {operation}: {str(e)}', extra=extra_context) + raise InternalServerError('Database error occurred. Please try again later.') + + if isinstance(e, ConflictError): + logger.warning(f'Conflict during {operation}: {str(e)}', extra={'conflict_reason': 'version_mismatch', **extra_context}) + # Serialize current_task to JSON-compatible format + current_task_serialized = None + if e.current_task: + import json + from datetime import datetime + + # Convert datetime objects to ISO format strings + current_task_serialized = json.loads(json.dumps(e.current_task, default=lambda o: o.isoformat() if isinstance(o, datetime) else str(o))) + return {'error': 'Conflict', 'message': str(e), 'current_task': current_task_serialized}, 409 + + if isinstance(e, CircularDependencyError): + logger.warning(f'Circular dependency error during {operation}: {str(e)}', extra=extra_context) + error = ErrorResponse(error='BusinessError', message=str(e), details=None) + raise BadRequestError(error.model_dump_json()) + + # AWS SDK exceptions + if isinstance(e, ClientError): + logger.error(f'AWS service error during {operation}: {e}', extra=extra_context) + error = ErrorResponse(error='InternalError', message=f'Failed {operation}', details=None) + return error.model_dump(), 500 + + # Validation exceptions + if isinstance(e, ValidationError): + logger.warning(f'Validation error during {operation}: {e}', extra=extra_context) + error = ErrorResponse(error='ValidationError', message='Invalid request data', details={'errors': e.errors()}) + raise BadRequestError(error.model_dump_json()) + + # Business logic exceptions + if isinstance(e, ValueError): + logger.warning(f'Business logic error during {operation}: {e}', extra=extra_context) + error_msg = str(e).lower() + + if 'not found' in error_msg: + error = ErrorResponse(error='NotFound', message=str(e), details=None) + raise NotFoundError(error.model_dump_json()) + elif 'version' in error_msg: + error = ErrorResponse(error='ConflictError', message=str(e), details=None) + return error.model_dump(), 409 + else: + error = ErrorResponse(error='BusinessError', message=str(e), details=None) + raise BadRequestError(error.model_dump_json()) + + # Catch-all for unexpected exceptions + logger.error(f'Unexpected error during {operation}: {str(e)}', extra=extra_context) + raise InternalServerError('Internal server error') + + +@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST) +def lambda_handler(event: Dict[str, Any], context) -> Dict[str, Any]: + """Lambda handler entry point.""" + _initialize_dependencies() + return app.resolve(event, context) + + +@app.post('/tasks') +def create_task(): + """Create a new task.""" + try: + # Parse and validate request + request_data = app.current_event.json_body + create_request = CreateTaskRequest.model_validate(request_data) + + # Delegate to domain service + created_task = task_service.create_task(create_request) + + # Return response + logger.info(f'Created task: {created_task.task_id}') + return TaskResponse.from_task(created_task).model_dump(), 201 + + except Exception as e: + result = _handle_common_exceptions(e, 'creating task') + if result: + return result + raise + + +@app.get('/tasks/') +def get_task(task_id: str): + """Retrieve a task by ID.""" + try: + if task_service is None: + raise RuntimeError('Task service not initialized') + task = task_service.get_task(task_id) + + logger.info(f'Retrieved task: {task_id}') + return TaskResponse.from_task(task).model_dump() + + except Exception as e: + result = _handle_common_exceptions(e, 'retrieving task', task_id) + if result: + return result + raise + + +@app.get('/tasks') +def list_tasks(): + """List tasks with pagination.""" + try: + # Parse query parameters + limit = app.current_event.query_string_parameters.get('limit') + next_token = app.current_event.query_string_parameters.get('next_token') + + # Delegate to domain service + tasks, next_page_token = task_service.list_tasks(int(limit) if limit else None, next_token) + + # Build response + task_responses = [TaskResponse.from_task(task).model_dump() for task in tasks] + pagination = PaginationInfo(limit=len(tasks), next_token=next_page_token) + + logger.info(f'Listed {len(tasks)} tasks') + return {'tasks': task_responses, 'pagination': pagination.model_dump()} + + except Exception as e: + result = _handle_common_exceptions(e, 'listing tasks') + if result: + return result + raise + + +@app.put('/tasks/') +def update_task(task_id: str): + """Update an existing task.""" + try: + # Parse and validate request + request_data = app.current_event.json_body + update_request = UpdateTaskRequest.model_validate(request_data) + + logger.debug(f'Update request for task {task_id}: {update_request.model_dump()}') + + # Delegate to domain service + if task_service is None: + raise RuntimeError('Task service not initialized') + updated_task = task_service.update_task(task_id, update_request) + + logger.debug(f'Update successful for task {task_id}, new version: {updated_task.version}') + + # Return response + logger.info(f'Updated task: {task_id}') + return TaskResponse.from_task(updated_task).model_dump() + + except Exception as e: + logger.debug(f'Update failed for task {task_id}: {type(e).__name__}: {str(e)}') + result = _handle_common_exceptions(e, 'updating task', task_id) + if result: + return result + raise + + +@app.delete('/tasks/') +def delete_task(task_id: str): + """Delete a task.""" + try: + # Delegate to domain service + if task_service is None: + raise RuntimeError('Task service not initialized') + task_service.delete_task(task_id) + + logger.info(f'Deleted task: {task_id}') + return '', 204 + + except Exception as e: + result = _handle_common_exceptions(e, 'deleting task', task_id) + if result: + return result + raise diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/__init__.py new file mode 100644 index 00000000..8a02a550 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/__init__.py @@ -0,0 +1 @@ +"""Task service models layer.""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/api.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/api.py new file mode 100644 index 00000000..fab498b5 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/api.py @@ -0,0 +1,109 @@ +"""API request and response models.""" + +from typing import List, Optional + +from pydantic import BaseModel, Field, field_validator + +from services.task_service.models.task import Task, TaskPriority + + +class CreateTaskRequest(BaseModel): + """Request model for creating a new task.""" + + title: str = Field(..., min_length=1, max_length=100, description='Task title') + + @field_validator('title') + @classmethod + def validate_title(cls, v: str) -> str: + """Validate title is not just whitespace.""" + if not v.strip(): + raise ValueError('Title cannot be empty or whitespace') + return v.strip() + + description: Optional[str] = Field(None, max_length=500, description='Task description') + priority: TaskPriority = Field(default=TaskPriority.MEDIUM, description='Task priority') + dependencies: list[str] = Field(default_factory=list, description='List of task IDs this task depends on') + + +class UpdateTaskRequest(BaseModel): + """Request model for updating an existing task.""" + + title: Optional[str] = Field(None, min_length=1, max_length=100, description='Task title') + + @field_validator('title') + @classmethod + def validate_title(cls, v: Optional[str]) -> Optional[str]: + """Validate title is not just whitespace.""" + if v is not None and not v.strip(): + raise ValueError('Title cannot be empty or whitespace') + return v.strip() if v else v + + description: Optional[str] = Field(None, max_length=500, description='Task description') + status: Optional[str] = Field(None, description='Task status') + + @field_validator('status') + @classmethod + def validate_status(cls, v: Optional[str]) -> Optional[str]: + """Validate status is a valid TaskStatus value.""" + if v is not None: + from services.task_service.models.task import TaskStatus + + valid_statuses = [status.value for status in TaskStatus] + if v not in valid_statuses: + raise ValueError(f'Invalid status. Must be one of: {valid_statuses}') + return v + + priority: Optional[TaskPriority] = Field(None, description='Task priority') + dependencies: Optional[list[str]] = Field(None, description='List of task IDs this task depends on') + version: int = Field(..., description='Current version for optimistic locking') + + +class TaskResponse(BaseModel): + """Response model for task operations.""" + + task_id: str = Field(..., description='Unique task identifier') + title: str = Field(..., description='Task title') + description: Optional[str] = Field(None, description='Task description') + status: str = Field(..., description='Task status') + priority: str = Field(..., description='Task priority') + dependencies: list[str] = Field(default_factory=list, description='List of task IDs this task depends on') + created_at: str = Field(..., description='Creation timestamp (ISO 8601)') + updated_at: str = Field(..., description='Last update timestamp (ISO 8601)') + version: int = Field(..., description='Current version') + + @classmethod + def from_task(cls, task: Task) -> 'TaskResponse': + """Create TaskResponse from Task entity.""" + return cls( + task_id=task.task_id, + title=task.title, + description=task.description, + status=task.status.value, + priority=task.priority.value, + dependencies=task.dependencies, + created_at=task.created_at.isoformat(), + updated_at=task.updated_at.isoformat(), + version=task.version, + ) + + +class PaginationInfo(BaseModel): + """Pagination information for list responses.""" + + next_token: Optional[str] = Field(None, description='Token for next page') + limit: int = Field(..., description='Number of items per page') + + +class ListTasksResponse(BaseModel): + """Response model for listing tasks.""" + + tasks: List[TaskResponse] = Field(..., description='List of tasks') + pagination: PaginationInfo = Field(..., description='Pagination information') + + +class ErrorResponse(BaseModel): + """Standard error response model.""" + + error: str = Field(..., description='Error type') + message: str = Field(..., description='Error message') + details: Optional[dict] = Field(None, description='Additional error details') diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/task.py b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/task.py new file mode 100644 index 00000000..f2d6aa82 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/models/task.py @@ -0,0 +1,162 @@ +"""Task entity models and validation.""" + +import json +import os +from dataclasses import dataclass +from datetime import UTC, datetime +from enum import Enum +from typing import Any, Dict, Optional +from uuid import uuid4 + +from pydantic import BaseModel, Field, field_validator + + +class TaskStatus(str, Enum): + """Task status enumeration.""" + + PENDING = 'pending' + IN_PROGRESS = 'in_progress' + COMPLETED = 'completed' + + +class TaskPriority(str, Enum): + """Task priority enumeration.""" + + LOW = 'low' + MEDIUM = 'medium' + HIGH = 'high' + + +class Task(BaseModel): + """Task entity with validation.""" + + task_id: str = Field(default_factory=lambda: str(uuid4()), description='Unique task identifier') + title: str = Field(..., min_length=1, max_length=100, description='Task title') + description: Optional[str] = Field(None, max_length=500, description='Task description') + status: TaskStatus = Field(default=TaskStatus.PENDING, description='Task status') + priority: TaskPriority = Field(default=TaskPriority.MEDIUM, description='Task priority') + dependencies: list[str] = Field(default_factory=list, description='List of task IDs this task depends on') + created_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description='Creation timestamp') + updated_at: datetime = Field(default_factory=lambda: datetime.now(UTC), description='Last update timestamp') + version: int = Field(default=1, description='Version for optimistic locking') + + @field_validator('title') + @classmethod + def validate_title(cls, v: str) -> str: + """Validate title is not empty after stripping whitespace.""" + if not v.strip(): + raise ValueError('Title cannot be empty') + return v.strip() + + @field_validator('description') + @classmethod + def validate_description(cls, v: Optional[str]) -> Optional[str]: + """Validate and clean description.""" + if v is not None: + return v.strip() if v.strip() else None + return v + + def model_post_init(self, __context) -> None: + """Post-initialization to ensure updated_at is set.""" + if self.updated_at == self.created_at and hasattr(self, '_updating'): + self.updated_at = datetime.now(UTC) + + +class TaskEventType(str, Enum): + """Task event types for EventBridge.""" + + TASK_CREATED = 'TaskCreated' + TASK_UPDATED = 'TaskUpdated' + TASK_DELETED = 'TaskDeleted' + + +@dataclass +class TaskEvent: + """Base class for all task events.""" + + event_type: str + task_data: Dict[str, Any] + source: str = 'cns427-task-api' + detail_type_prefix: str = '' + + def __init__(self, event_type: str, task_data: Dict[str, Any], source: str = None, detail_type_prefix: str = None): + self.event_type = event_type + self.task_data = task_data + if source is not None: + self.source = source + if detail_type_prefix is not None: + self.detail_type_prefix = detail_type_prefix + + def to_eventbridge_entry(self, event_bus_name: str = None) -> Dict[str, Any]: + """Convert to EventBridge entry format.""" + detail_type = f'{self.detail_type_prefix}{self.event_type}' if self.detail_type_prefix else self.event_type + bus_name = event_bus_name or os.environ.get('EVENT_BUS_NAME', 'TaskEvents') + + return { + 'Source': self.source, + 'DetailType': detail_type, + 'Detail': json.dumps(self.task_data, default=str), # default=str handles datetime serialization + 'EventBusName': bus_name, + } + + @classmethod + def from_eventbridge_event(cls, event: Dict[str, Any]) -> 'TaskEvent': + """ + Create TaskEvent from EventBridge event structure. + + Args: + event: EventBridge event with structure: + { + "version": "0", + "detail-type": "TaskCreated", + "source": "cns427-task-api", + "detail": { task_data } + } + + Returns: + TaskEvent instance + """ + detail_type = event.get('detail-type', '') + source = event.get('source', 'cns427-task-api') + detail = event.get('detail', {}) + + # Remove TEST- prefix if present + event_type = detail_type.replace('TEST-', '') + + return cls(event_type=event_type, task_data=detail, source=source) + + +@dataclass +class TaskCreatedEvent(TaskEvent): + """Task created event.""" + + def __init__(self, task: Task, source: str = None, detail_type_prefix: str = None): + # Convert Task to dict + task_dict = task.model_dump(mode='json') + # Convert datetime objects to ISO format strings + task_dict['created_at'] = task.created_at.isoformat() + task_dict['updated_at'] = task.updated_at.isoformat() + + super().__init__('TaskCreated', task_dict, source=source, detail_type_prefix=detail_type_prefix) + + +@dataclass +class TaskUpdatedEvent(TaskEvent): + """Task updated event.""" + + def __init__(self, task: Task, source: str = None, detail_type_prefix: str = None): + # Convert Task to dict + task_dict = task.model_dump(mode='json') + # Convert datetime objects to ISO format strings + task_dict['created_at'] = task.created_at.isoformat() + task_dict['updated_at'] = task.updated_at.isoformat() + + super().__init__('TaskUpdated', task_dict, source=source, detail_type_prefix=detail_type_prefix) + + +@dataclass +class TaskDeletedEvent(TaskEvent): + """Task deleted event.""" + + def __init__(self, task_id: str, source: str = None, detail_type_prefix: str = None): + super().__init__('TaskDeleted', {'task_id': task_id}, source=source, detail_type_prefix=detail_type_prefix) diff --git a/python-test-samples/cns427-testable-serverless-architecture/services/task_service/requirements.txt b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/requirements.txt new file mode 100644 index 00000000..b4ed8332 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/services/task_service/requirements.txt @@ -0,0 +1,2 @@ +aws-lambda-powertools[tracer]>=2.20.0 +pydantic>=2.0.3 diff --git a/python-test-samples/cns427-testable-serverless-architecture/shared/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/shared/__init__.py new file mode 100644 index 00000000..c7c9929c --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/shared/__init__.py @@ -0,0 +1,6 @@ +""" +Shared infrastructure components. + +This package contains ONLY infrastructure adapters that are reusable across services. +It does NOT contain domain logic or models - those belong to specific services. +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/shared/integration/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/__init__.py new file mode 100644 index 00000000..6d535d5b --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/__init__.py @@ -0,0 +1,9 @@ +""" +Infrastructure integration adapters. + +This module contains adapters for external infrastructure services: +- DynamoDB adapter for data persistence +- EventBridge adapter for event publishing + +These are output ports in the hexagonal architecture pattern. +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/shared/integration/dynamodb_adapter.py b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/dynamodb_adapter.py new file mode 100644 index 00000000..7aa4f659 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/dynamodb_adapter.py @@ -0,0 +1,247 @@ +"""DynamoDB adapter for task persistence.""" + +import json +import os +from datetime import datetime +from typing import List, Optional + +import boto3 +from aws_lambda_powertools import Logger +from boto3.dynamodb.types import TypeDeserializer, TypeSerializer +from botocore.exceptions import ClientError + +from services.task_service.domain.exceptions import ConflictError, RepositoryError, ResourceNotFoundError, ThrottlingError +from services.task_service.models.task import Task +from shared.integration.interfaces import TaskRepository + +logger = Logger() + +# Initialize AWS clients at module level +AWS_REGION = os.environ.get('AWS_REGION', 'us-west-2') +dynamodb = boto3.client('dynamodb', region_name=AWS_REGION) + +# Type serializer/deserializer for DynamoDB client +_serializer = TypeSerializer() +_deserializer = TypeDeserializer() + + +# Helper functions +def python_to_dynamo(python_object: dict) -> dict: + """Convert Python dict to DynamoDB format.""" + return {k: _serializer.serialize(v) for k, v in python_object.items()} + + +def dynamo_to_python(dynamo_object: dict) -> dict: + """Convert DynamoDB format to Python dict.""" + return {k: _deserializer.deserialize(v) for k, v in dynamo_object.items()} + + +def _handle_dynamodb_error(e: ClientError, operation: str, current_task: Optional[dict] = None) -> None: + """Convert DynamoDB ClientError to domain exceptions.""" + error_code = e.response['Error']['Code'] + + if error_code in ['ProvisionedThroughputExceededException', 'ThrottlingException']: + raise ThrottlingError(f'DynamoDB capacity exceeded during {operation}: {error_code}') from e + elif error_code == 'ResourceNotFoundException': + raise ResourceNotFoundError(f'DynamoDB table not found during {operation}') from e + elif error_code in ['AccessDeniedException', 'UnrecognizedClientException']: + raise RepositoryError(f'Permission error accessing DynamoDB during {operation}: {error_code}') from e + elif error_code in ['RequestTimeout', 'RequestTimeoutException']: + raise ThrottlingError(f'DynamoDB request timeout during {operation}: {error_code}') from e + elif error_code == 'ConditionalCheckFailedException': + raise ConflictError('The resource has been updated by another process. Please refresh and try again.', current_task=current_task) from e + else: + raise RepositoryError(f'Database error during {operation}: {error_code}') from e + + +class DynamoDBTaskRepository(TaskRepository): + """DynamoDB implementation of TaskRepository.""" + + def __init__(self, table_name: str): + """Initialize DynamoDB repository.""" + self.table_name = table_name + self.dynamodb = dynamodb + + def create_task(self, task: Task) -> Task: + """Create a new task in DynamoDB.""" + try: + # Convert Task to dict with enum values as strings + item = task.model_dump(mode='json') + # Convert datetime objects to ISO format strings + item['created_at'] = task.created_at.isoformat() + item['updated_at'] = task.updated_at.isoformat() + + dynamo_item = python_to_dynamo(item) + + # Use condition to prevent overwriting existing task + self.dynamodb.put_item(TableName=self.table_name, Item=dynamo_item, ConditionExpression='attribute_not_exists(task_id)') + + logger.info(f'Created task: {task.task_id}') + return task + + except ClientError as e: + _handle_dynamodb_error(e, 'create_task') + raise # This line is unreachable but satisfies type checker + + def get_task(self, task_id: str) -> Optional[Task]: + """Retrieve a task by ID from DynamoDB.""" + try: + key = python_to_dynamo({'task_id': task_id}) + response = self.dynamodb.get_item(TableName=self.table_name, Key=key) + + if 'Item' not in response: + logger.info(f'Task not found: {task_id}') + return None + + item = dynamo_to_python(response['Item']) + # Convert ISO strings back to datetime objects + item['created_at'] = datetime.fromisoformat(item['created_at']) + item['updated_at'] = datetime.fromisoformat(item['updated_at']) + return Task(**item) + + except ClientError as e: + _handle_dynamodb_error(e, 'get_task') + raise # This line is unreachable but satisfies type checker + + def list_tasks(self, limit: int = 50, next_token: Optional[str] = None) -> tuple[List[Task], Optional[str]]: + """List tasks with pagination.""" + try: + scan_kwargs = {'TableName': self.table_name, 'Limit': limit} + + if next_token: + scan_kwargs['ExclusiveStartKey'] = json.loads(next_token) + + response = self.dynamodb.scan(**scan_kwargs) + + tasks = [] + for dynamo_item in response.get('Items', []): + item = dynamo_to_python(dynamo_item) + # Convert ISO strings back to datetime objects + item['created_at'] = datetime.fromisoformat(item['created_at']) + item['updated_at'] = datetime.fromisoformat(item['updated_at']) + tasks.append(Task(**item)) + + # Handle pagination + next_token_result: Optional[str] = None + if 'LastEvaluatedKey' in response: + next_token_result = json.dumps(response['LastEvaluatedKey']) + + logger.info(f'Listed {len(tasks)} tasks') + return tasks, next_token_result + + except ClientError as e: + _handle_dynamodb_error(e, 'list_tasks') + raise # This line is unreachable but satisfies type checker + + def update_task(self, task: Task, expected_version: int) -> Task: + """ + Update an existing task with optimistic locking using conditional update. + + Args: + task: The task object with updated fields and NEW version + expected_version: The OLD version to check against (for optimistic locking) + + Returns: + The updated task + """ + try: + logger.debug(f'DynamoDB update_task called: task_id={task.task_id}, expected_version={expected_version}, new_version={task.version}') + + # Build update expression dynamically + update_expression_parts = [] + expression_attribute_values: dict = {} + expression_attribute_names: dict = {} + + # Use the version and updated_at from the task object (set by domain service) + # Always update these fields + update_expression_parts.append('#updated_at = :updated_at') + expression_attribute_names['#updated_at'] = 'updated_at' + expression_attribute_values[':updated_at'] = task.updated_at.isoformat() + + update_expression_parts.append('#version = :new_version') + expression_attribute_names['#version'] = 'version' + expression_attribute_values[':new_version'] = task.version # NEW version from domain + + logger.debug(f'Setting new version in DynamoDB: {task.version}') + + # Update other fields + update_expression_parts.append('title = :title') + expression_attribute_values[':title'] = task.title + + if task.description is not None: + update_expression_parts.append('description = :description') + expression_attribute_values[':description'] = task.description + + # Status is a reserved keyword + update_expression_parts.append('#status = :status') + expression_attribute_names['#status'] = 'status' + expression_attribute_values[':status'] = task.status.value + + update_expression_parts.append('priority = :priority') + expression_attribute_values[':priority'] = task.priority.value + + update_expression_parts.append('dependencies = :dependencies') + expression_attribute_values[':dependencies'] = task.dependencies + + # Build the complete update expression + update_expression = 'SET ' + ', '.join(update_expression_parts) + + # Condition: check that current version matches expected version (OLD version) + condition_expression = '#version = :expected_version' + expression_attribute_values[':expected_version'] = expected_version + + logger.debug(f'Condition: version must equal {expected_version}') + logger.debug(f'Update expression: {update_expression}') + + # Convert to DynamoDB format + key = python_to_dynamo({'task_id': task.task_id}) + dynamo_expression_values = python_to_dynamo(expression_attribute_values) + + logger.debug(f'Calling DynamoDB update_item for task {task.task_id}') + + self.dynamodb.update_item( + TableName=self.table_name, + Key=key, + UpdateExpression=update_expression, + ConditionExpression=condition_expression, + ExpressionAttributeNames=expression_attribute_names, + ExpressionAttributeValues=dynamo_expression_values, + ) + + logger.info(f'Updated task: {task.task_id}') + logger.debug(f'DynamoDB update successful for task {task.task_id}') + + # Task already has the new version and updated_at from domain service + return task + + except ClientError as e: + # For conflict errors, fetch current task state + current_task = None + if e.response['Error']['Code'] == 'ConditionalCheckFailedException': + try: + current = self.get_task(task.task_id) + if current: + current_task = current.model_dump() + except Exception: + pass # If we can't get current task, proceed without it + _handle_dynamodb_error(e, 'update_task', current_task=current_task) + raise # This line is unreachable but satisfies type checker + + def delete_task(self, task_id: str, version: int) -> None: + """Delete a task with version check.""" + try: + key = python_to_dynamo({'task_id': task_id}) + expression_values = python_to_dynamo({':version': version}) + + self.dynamodb.delete_item( + TableName=self.table_name, + Key=key, + ConditionExpression='#version = :version', + ExpressionAttributeNames={'#version': 'version'}, + ExpressionAttributeValues=expression_values, + ) + + logger.info(f'Deleted task: {task_id}') + + except ClientError as e: + _handle_dynamodb_error(e, 'delete_task') diff --git a/python-test-samples/cns427-testable-serverless-architecture/shared/integration/eventbridge_adapter.py b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/eventbridge_adapter.py new file mode 100644 index 00000000..b9c4afad --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/eventbridge_adapter.py @@ -0,0 +1,87 @@ +"""EventBridge adapter for publishing task events.""" + +import json +import os +import time + +import boto3 +from aws_lambda_powertools import Logger +from botocore.exceptions import ClientError + +from services.task_service.domain.exceptions import RepositoryError +from services.task_service.models.task import TaskEvent +from shared.integration.interfaces import EventPublisher + +logger = Logger() + +# Initialize AWS clients at module level +AWS_REGION = os.environ.get('AWS_REGION', 'us-west-2') +events_client = boto3.client('events', region_name=AWS_REGION) + + +def _handle_eventbridge_error(e: ClientError, operation: str) -> None: + """Convert EventBridge ClientError to domain exceptions.""" + error_code = e.response['Error']['Code'] + + # All EventBridge errors are internal system issues, not client-facing capacity issues + # So we use RepositoryError (500) instead of ThrottlingError (503) + if error_code == 'ThrottlingException': + raise RepositoryError(f'EventBridge capacity exceeded during {operation}: {error_code}') from e + elif error_code in ['AccessDeniedException', 'UnauthorizedException']: + raise RepositoryError(f'Permission error accessing EventBridge during {operation}: {error_code}') from e + elif error_code in ['InternalException', 'ServiceUnavailableException']: + raise RepositoryError(f'EventBridge service error during {operation}: {error_code}') from e + else: + raise RepositoryError(f'EventBridge error during {operation}: {error_code}') from e + + +class EventBridgePublisher(EventPublisher): + """EventBridge implementation of EventPublisher.""" + + def __init__(self, event_bus_name: str = 'default'): + """Initialize EventBridge publisher.""" + self.event_bus_name = event_bus_name + self.events_client = events_client + self.max_retries = 3 + self.base_delay = 0.1 + + def publish_event(self, event: TaskEvent) -> None: + """Publish a task event to EventBridge with retry logic.""" + for attempt in range(self.max_retries): + try: + # Use the event's to_eventbridge_entry method + entry = event.to_eventbridge_entry(event_bus_name=self.event_bus_name) + logger.debug(f'Publishing event: {json.dumps(entry)}') + + response = self.events_client.put_events(Entries=[entry]) + + # Check for failed entries + if response.get('FailedEntryCount', 0) > 0: + failed_entries = response.get('Entries', []) + error_msg = f'Failed to publish event: {failed_entries}' + logger.error(error_msg) + raise RuntimeError(error_msg) + + logger.info(f'Published event: {event.event_type}') + return + + except ClientError as e: + error_code = e.response['Error']['Code'] + + # Don't retry on certain errors + if error_code in ['ValidationException', 'InvalidParameterException']: + logger.error(f'Non-retryable error publishing event: {e}') + _handle_eventbridge_error(e, 'publish_event') + + # Retry on throttling and service errors + if attempt < self.max_retries - 1: + delay = self.base_delay * (2**attempt) # Exponential backoff + logger.warning(f'Retrying event publish in {delay}s (attempt {attempt + 1})') + time.sleep(delay) + else: + logger.error(f'Failed to publish event after {self.max_retries} attempts: {e}') + _handle_eventbridge_error(e, 'publish_event') + + except Exception as e: + logger.error(f'Unexpected error publishing event: {e}') + raise diff --git a/python-test-samples/cns427-testable-serverless-architecture/shared/integration/interfaces.py b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/interfaces.py new file mode 100644 index 00000000..8246248f --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/shared/integration/interfaces.py @@ -0,0 +1,53 @@ +"""Core interfaces for the task management API.""" + +from abc import ABC, abstractmethod +from typing import List, Optional + +from services.task_service.models.task import Task, TaskEvent + + +class TaskRepository(ABC): + """Interface for task data persistence.""" + + @abstractmethod + def create_task(self, task: Task) -> Task: + """Create a new task.""" + pass + + @abstractmethod + def get_task(self, task_id: str) -> Optional[Task]: + """Retrieve a task by ID.""" + pass + + @abstractmethod + def list_tasks(self, limit: int = 50, next_token: Optional[str] = None) -> tuple[List[Task], Optional[str]]: + """List tasks with pagination.""" + pass + + @abstractmethod + def update_task(self, task: Task, expected_version: int) -> Task: + """ + Update an existing task with optimistic locking. + + Args: + task: The task with updated fields and new version + expected_version: The old version to check against + + Returns: + The updated task + """ + pass + + @abstractmethod + def delete_task(self, task_id: str, version: int) -> None: + """Delete a task.""" + pass + + +class EventPublisher(ABC): + """Interface for publishing task events.""" + + @abstractmethod + def publish_event(self, event: TaskEvent) -> None: + """Publish a task event.""" + pass diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/README.md b/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/README.md new file mode 100644 index 00000000..d008d2ab --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/README.md @@ -0,0 +1,351 @@ +# End-to-End Tests + +This directory contains end-to-end tests that validate complete user workflows through the real API Gateway endpoint. + +## Overview + +E2E tests validate the entire system working together: +- **API Gateway** with IAM authorization +- **Lambda functions** (cold/warm starts) +- **DynamoDB** persistence +- **EventBridge** event delivery +- **Async processing** (notification Lambda) +- **Business rules** (circular dependencies, status transitions) + +## Prerequisites + +### 1. Deploy Infrastructure + +E2E tests require both main application and test harness to be deployed: + +```bash +# Deploy main application +make deploy + +# Deploy test harness (optional, for event verification) +make deploy-test-infra +``` + +### 2. AWS Credentials + +E2E tests use your AWS credentials to sign requests with SigV4: + +```bash +# Ensure credentials are configured +aws configure + +# Or use environment variables +export AWS_ACCESS_KEY_ID=your_key +export AWS_SECRET_ACCESS_KEY=your_secret +export AWS_DEFAULT_REGION=us-west-2 +``` + +### 3. Get API Endpoint + +The tests automatically retrieve the API endpoint from CloudFormation stack outputs. Alternatively, set it manually: + +```bash +export API_ENDPOINT="https://your-api-id.execute-api.us-west-2.amazonaws.com/prod" +``` + +## Running E2E Tests + +### All E2E Tests + +```bash +# Using Make +make test-e2e + +# Or using pytest directly +poetry run pytest tests/e2e -v -m e2e + +# Or using test script +poetry run test-e2e +``` + +### Specific Test + +```bash +# Run specific test file +poetry run pytest tests/e2e/test_task_lifecycle_e2e.py -v + +# Run specific test method +poetry run pytest tests/e2e/test_task_lifecycle_e2e.py::TestTaskLifecycleE2E::test_complete_task_lifecycle_with_dependencies -v +``` + +### With Verbose Output + +```bash +# See detailed test output +poetry run pytest tests/e2e -v -s +``` + +## Test Scenarios + +### 1. Complete Task Lifecycle with Dependencies + +**Test**: `test_complete_task_lifecycle_with_dependencies` + +**Flow**: +1. Create parent task via API Gateway +2. Verify parent task in DynamoDB +3. Create child task with dependency on parent +4. Verify child task with correct dependencies +5. Wait for async EventBridge processing +6. Verify events were published +7. Update parent task status +8. Verify update propagated +9. Retrieve tasks via API +10. Cleanup test data + +**Validates**: +- API Gateway IAM authorization (SigV4 signing) +- Lambda invocation through API Gateway +- DynamoDB CRUD operations +- EventBridge event publishing +- Async notification processing +- Dependency management +- Status updates +- Error handling + +### 2. Circular Dependency Prevention + +**Test**: `test_circular_dependency_prevention_e2e` + +**Flow**: +1. Create task A +2. Create task B with dependency on A +3. Try to update A to depend on B (should fail) +4. Verify error response +5. Cleanup + +**Validates**: +- Business rule enforcement (circular dependencies) +- Error handling across API β†’ Lambda β†’ Domain +- Proper HTTP error codes +- Error message formatting + +## IAM Authorization + +The API Gateway uses IAM authorization, which means: + +1. **Requests must be signed** with AWS SigV4 +2. **Credentials required**: AWS access key and secret key +3. **Permissions needed**: `execute-api:Invoke` on the API + +### How Signing Works + +The tests use `botocore.auth.SigV4Auth` to sign requests: + +```python +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest + +# Create request +request = AWSRequest( + method='POST', + url='https://api-id.execute-api.region.amazonaws.com/prod/tasks', + data=json.dumps({'title': 'My Task'}), + headers={'Content-Type': 'application/json'} +) + +# Sign with your credentials +credentials = boto3.Session().get_credentials() +SigV4Auth(credentials, 'execute-api', 'us-west-2').add_auth(request) + +# Make request with signed headers +response = requests.post(url, headers=dict(request.headers), json=body) +``` + +### Required IAM Permissions + +Your AWS credentials need these permissions: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "execute-api:Invoke", + "Resource": "arn:aws:execute-api:region:account:api-id/*" + } + ] +} +``` + +## Test Data Management + +### Automatic Cleanup + +All E2E tests include cleanup in `finally` blocks to ensure test data is removed even if tests fail. + +### Manual Cleanup + +If tests fail and leave data: + +```bash +# List tasks in DynamoDB +aws dynamodb scan --table-name cns427-task-api-tasks --select COUNT + +# Delete specific task +aws dynamodb delete-item \ + --table-name cns427-task-api-tasks \ + --key '{"task_id":{"S":"your-task-id"}}' +``` + +## Troubleshooting + +### Issue: API Endpoint Not Found + +**Error**: `Could not get API endpoint` + +**Solution**: +```bash +# Set manually +export API_ENDPOINT=$(aws cloudformation describe-stacks \ + --stack-name cns427-task-api-api \ + --query 'Stacks[0].Outputs[?OutputKey==`ApiEndpoint`].OutputValue' \ + --output text) + +# Or check CloudFormation console for the output +``` + +### Issue: 403 Forbidden + +**Error**: `403 Forbidden` when calling API + +**Solution**: +- Check AWS credentials are configured: `aws sts get-caller-identity` +- Verify IAM permissions for `execute-api:Invoke` +- Ensure API Gateway has IAM authorization enabled + +### Issue: Signature Mismatch + +**Error**: `SignatureDoesNotMatch` + +**Solution**: +- Check system clock is synchronized +- Verify AWS region matches API region +- Ensure credentials are valid + +### Issue: 502 Bad Gateway + +**Error**: `502 Bad Gateway` or `Internal server error` + +**Solution**: +1. **Check Lambda logs** (most common cause): + ```bash + aws logs tail /aws/lambda/cns427-task-api-task-handler --follow --region us-west-2 + ``` + +2. **Wait after deployment**: Lambda may need time to initialize + ```bash + # After deploying, wait 30-60 seconds before running E2E tests + make deploy + sleep 60 + make test-e2e + ``` + +3. **Verify environment variables**: + ```bash + aws lambda get-function-configuration \ + --function-name cns427-task-api-task-handler \ + --query 'Environment.Variables' + ``` + +4. **Check IAM permissions**: Lambda needs DynamoDB and EventBridge permissions + +5. **Verify API Gateway integration**: + ```bash + aws apigateway get-rest-apis --query 'items[?name==`cns427-task-api-api`]' + ``` + +### Issue: Tasks Not Found in DynamoDB + +**Error**: Task not found after creation + +**Solution**: +- Increase wait time for eventual consistency +- Check DynamoDB table name is correct +- Verify Lambda has permissions to write to DynamoDB + +### Issue: Events Not Captured in Test Harness + +**This is expected behavior for E2E tests!** + +E2E tests use **real events** (source: `cns427-task-api`, detailType: `TaskCreated`), which are NOT captured by the test harness. + +The test harness only captures **TEST-* events** (source: `TEST-cns427-task-api`, detailType: `TEST-TaskCreated`) used by integration tests. + +**Why?** +- **E2E tests**: Verify production flow with real events +- **Integration tests**: Use TEST- prefix for isolated testing +- **Test harness**: Only captures TEST- events to avoid mixing test and production data + +**To verify EventBridge in E2E tests**: +- Check CloudWatch metrics for notification Lambda invocations +- Check CloudWatch logs for notification Lambda +- Verify the full workflow completes successfully (which proves events were delivered) + +## Performance + +E2E tests are slower than unit/integration tests: + +- **Unit tests**: < 1 second (in-memory) +- **Integration tests**: 1-5 seconds (real AWS, single service) +- **E2E tests**: 5-15 seconds (full workflow, multiple services, async processing) + +This is expected and acceptable for E2E tests. + +## Best Practices + +1. **Run E2E tests less frequently**: Before commits, not on every save +2. **Use descriptive test run IDs**: Makes debugging easier +3. **Always cleanup**: Use `finally` blocks +4. **Wait for async operations**: EventBridge processing takes time +5. **Check CloudWatch logs**: If tests fail, check Lambda logs +6. **Isolate test data**: Use unique IDs to avoid conflicts + +## CI/CD Integration + +### GitHub Actions Example + +```yaml +name: E2E Tests + +on: + push: + branches: [main] + pull_request: + +jobs: + e2e: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + + - name: Configure AWS Credentials + uses: aws-actions/configure-aws-credentials@v2 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + aws-region: us-west-2 + + - name: Setup Python + uses: actions/setup-python@v4 + with: + python-version: '3.13' + + - name: Install dependencies + run: poetry install + + - name: Run E2E tests + run: make test-e2e +``` + +## Next Steps + +- **[Testing Guide](../../docs/testing-guide.md)** - Overall testing strategy +- **[Deployment Guide](../../docs/deployment.md)** - Deploy infrastructure +- **[Architecture Guide](../../docs/architecture.md)** - Understand the architecture diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/test_task_lifecycle_e2e.py b/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/test_task_lifecycle_e2e.py new file mode 100644 index 00000000..806f94f1 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/e2e/test_task_lifecycle_e2e.py @@ -0,0 +1,502 @@ +""" +End-to-End Test: Complete Task Lifecycle with Dependencies + +Tests the full user workflow through real API Gateway: +1. User creates parent task via API Gateway +2. User creates child task with dependency on parent +3. Task persisted to DynamoDB +4. Events published to EventBridge (real events, not TEST- prefix) +5. Notification Lambda processes events +6. Verify entire flow completed successfully + +This validates: +- API Gateway IAM authorization +- Lambda cold/warm starts +- DynamoDB persistence +- EventBridge event delivery (real production flow) +- Async notification processing +- Business rules (circular dependencies) +- Error handling across services + +Note on EventBridge Verification: +- E2E tests use REAL events (source: "cns427-task-api", detailType: "TaskCreated") +- Test harness only captures TEST-* events (source: "TEST-cns427-task-api") +- This is intentional - E2E tests verify production flow, not test flow +- Integration tests use TEST- prefix for isolated testing +""" + +import json +import os +import time +import uuid + +import boto3 +import pytest +import requests +from botocore.auth import SigV4Auth +from botocore.awsrequest import AWSRequest + +# Get configuration from environment with defaults +from infrastructure.config import InfrastructureConfig + +config = InfrastructureConfig() +API_ENDPOINT = os.environ.get('API_ENDPOINT') +AWS_REGION = os.environ.get('AWS_DEFAULT_REGION', os.environ.get('AWS_REGION', 'us-west-2')) +TASKS_TABLE_NAME = os.environ.get('TASKS_TABLE_NAME', config.tasks_table_name()) +TEST_HARNESS_TABLE = os.environ.get('TEST_HARNESS_TABLE', config.test_results_table_name()) + + +@pytest.fixture(scope='module') +def api_endpoint(): + """Get API endpoint from environment, CDK outputs file, or CloudFormation stack.""" + if API_ENDPOINT: + return API_ENDPOINT.rstrip('/') + + # Try to get from CDK outputs file (created by deploy script) + try: + import json + from pathlib import Path + + outputs_file = Path('cdk-outputs.json') + if outputs_file.exists(): + with open(outputs_file) as f: + outputs = json.load(f) + # Look for ApiEndpoint in the API stack + # Structure: {"cns427-task-api-api": {"ApiEndpoint": "https://..."}} + api_stack_name = 'cns427-task-api-api' + if api_stack_name in outputs and 'ApiEndpoint' in outputs[api_stack_name]: + endpoint = outputs[api_stack_name]['ApiEndpoint'].rstrip('/') + print(f'\n[E2E] β„Ή Using API endpoint from cdk-outputs.json: {endpoint}') + return endpoint + else: + print('\n[E2E] ⚠ ApiEndpoint not found in cdk-outputs.json') + print(f'[E2E] Available stacks: {list(outputs.keys())}') + except Exception as e: + print(f'\n[E2E] ⚠ Could not read cdk-outputs.json: {e}') + + # Try to get from CloudFormation stack outputs + try: + import boto3 + + cfn = boto3.client('cloudformation', region_name=AWS_REGION) + stack_name = 'cns427-task-api-api' + + response = cfn.describe_stacks(StackName=stack_name) + outputs = response['Stacks'][0]['Outputs'] + + for output in outputs: + if output['OutputKey'] == 'ApiEndpoint': + endpoint = output['OutputValue'].rstrip('/') + print(f'\n[E2E] β„Ή Using API endpoint from CloudFormation: {endpoint}') + return endpoint + except Exception as e: + print(f'\n[E2E] ⚠ Could not get API endpoint from CloudFormation: {e}') + + # If we get here, we couldn't find the endpoint + pytest.skip( + 'API_ENDPOINT not set and could not retrieve from CDK outputs or CloudFormation.\n' + 'Please either:\n' + ' 1. Deploy with: make deploy (saves outputs to cdk-outputs.json)\n' + ' 2. Set API_ENDPOINT environment variable:\n' + ' export API_ENDPOINT=$(aws cloudformation describe-stacks --stack-name cns427-task-api-api ' + "--query 'Stacks[0].Outputs[?OutputKey==`ApiEndpoint`].OutputValue' --output text --region us-west-2)" + ) + + +@pytest.fixture +def aws_session(): + """Create AWS session for signing requests.""" + return boto3.Session(region_name=AWS_REGION) + + +@pytest.fixture +def dynamodb_client(): + """DynamoDB client for verification.""" + return boto3.client('dynamodb', region_name=AWS_REGION) + + +@pytest.fixture +def test_run_id(): + """Generate unique test run ID.""" + return f'e2e-{uuid.uuid4()}' + + +def sign_request(method, url, body=None, session=None): + """ + Sign HTTP request with AWS SigV4 for IAM authorization. + + Args: + method: HTTP method (GET, POST, PUT, DELETE) + url: Full URL to request + body: Request body (dict or None) + session: boto3 Session for credentials + + Returns: + dict: Headers with AWS signature + """ + if session is None: + session = boto3.Session(region_name=AWS_REGION) + + credentials = session.get_credentials() + + # Prepare request + request_body = json.dumps(body) if body else '' + request = AWSRequest( + method=method, + url=url, + data=request_body, + headers={ + 'Content-Type': 'application/json', + 'Host': url.split('/')[2], # Extract host from URL + }, + ) + + # Sign request + SigV4Auth(credentials, 'execute-api', AWS_REGION).add_auth(request) + + return dict(request.headers) + + +def make_api_request(method, url, body=None, session=None, timeout=30): + """ + Make signed API request to API Gateway. + + Args: + method: HTTP method + url: Full URL + body: Request body dict + session: boto3 Session + timeout: Request timeout in seconds (default: 30) + + Returns: + requests.Response + """ + headers = sign_request(method, url, body, session) + + if method == 'GET': + return requests.get(url, headers=headers, timeout=timeout) + elif method == 'POST': + return requests.post(url, headers=headers, json=body, timeout=timeout) + elif method == 'PUT': + return requests.put(url, headers=headers, json=body, timeout=timeout) + elif method == 'DELETE': + return requests.delete(url, headers=headers, timeout=timeout) + else: + raise ValueError(f'Unsupported method: {method}') + + +@pytest.mark.e2e +class TestTaskLifecycleE2E: + """End-to-end tests for complete task lifecycle.""" + + def test_complete_task_lifecycle_with_dependencies(self, api_endpoint, aws_session, dynamodb_client, test_run_id): + """ + E2E: Create tasks with dependencies, verify persistence and async processing. + + Flow: + 1. Create parent task via API Gateway + 2. Verify parent task in DynamoDB + 3. Create child task with dependency on parent + 4. Verify child task in DynamoDB with correct dependency + 5. Wait for async EventBridge processing + 6. Verify events were published and processed + 7. Update parent task status + 8. Verify update propagated correctly + 9. Cleanup all test data + """ + parent_task_id = None + child_task_id = None + + try: + # Step 1: Create parent task via API Gateway + print('\n[E2E] Step 1: Creating parent task via API Gateway') + parent_task_data = { + 'title': f'E2E Parent Task - {test_run_id}', + 'description': 'Parent task for E2E testing', + 'priority': 'high', + 'status': 'pending', + } + + response = make_api_request('POST', f'{api_endpoint}/tasks', body=parent_task_data, session=aws_session) + + if response.status_code != 201: + print('\n[E2E] ❌ API request failed!') + print(f'[E2E] Status Code: {response.status_code}') + print(f'[E2E] Response: {response.text}') + print(f'[E2E] Headers: {response.headers}') + print('\n[E2E] Troubleshooting:') + print('[E2E] 1. Check Lambda logs:') + print('[E2E] aws logs tail /aws/lambda/cns427-task-api-task-handler --follow') + print('[E2E] 2. Check if Lambda has correct environment variables') + print('[E2E] 3. Check if Lambda has permissions to DynamoDB and EventBridge') + print('[E2E] 4. Verify API Gateway is deployed: aws apigateway get-rest-apis') + + assert response.status_code == 201, f'Failed to create parent task: {response.text}' + parent_task = response.json() + parent_task_id = parent_task['task_id'] + + print(f'[E2E] βœ“ Parent task created: {parent_task_id}') + assert parent_task['title'] == parent_task_data['title'] + assert parent_task['priority'] == 'high' + assert parent_task['status'] == 'pending' + + # Step 2: Verify parent task in DynamoDB + print('[E2E] Step 2: Verifying parent task in DynamoDB') + time.sleep(1) # Allow for eventual consistency + + db_response = dynamodb_client.get_item(TableName=TASKS_TABLE_NAME, Key={'task_id': {'S': parent_task_id}}) + + assert 'Item' in db_response, 'Parent task not found in DynamoDB' + assert db_response['Item']['title']['S'] == parent_task_data['title'] + print('[E2E] βœ“ Parent task verified in DynamoDB') + + # Step 3: Create child task with dependency on parent + print('[E2E] Step 3: Creating child task with dependency') + child_task_data = { + 'title': f'E2E Child Task - {test_run_id}', + 'description': 'Child task depends on parent', + 'priority': 'medium', + 'status': 'pending', + 'dependencies': [parent_task_id], + } + + response = make_api_request('POST', f'{api_endpoint}/tasks', body=child_task_data, session=aws_session) + + assert response.status_code == 201, f'Failed to create child task: {response.text}' + child_task = response.json() + child_task_id = child_task['task_id'] + + print(f'[E2E] βœ“ Child task created: {child_task_id}') + assert child_task['title'] == child_task_data['title'] + assert parent_task_id in child_task.get('dependencies', []) + + # Step 4: Verify child task in DynamoDB with correct dependency + print('[E2E] Step 4: Verifying child task dependencies in DynamoDB') + time.sleep(1) + + db_response = dynamodb_client.get_item(TableName=TASKS_TABLE_NAME, Key={'task_id': {'S': child_task_id}}) + + assert 'Item' in db_response, 'Child task not found in DynamoDB' + db_dependencies = db_response['Item'].get('dependencies', {}).get('L', []) + db_dependency_ids = [dep['S'] for dep in db_dependencies] + assert parent_task_id in db_dependency_ids, 'Dependency not stored correctly' + print('[E2E] βœ“ Child task dependencies verified in DynamoDB') + + # Step 5: Wait for async EventBridge processing + print('[E2E] Step 5: Waiting for async EventBridge processing') + time.sleep(5) # Allow time for EventBridge β†’ Lambda β†’ DynamoDB + + # Step 6: Verify notification Lambda was triggered by EventBridge + print('[E2E] Step 6: Verifying notification Lambda was triggered') + + # Check CloudWatch Logs for notification Lambda invocations + logs_client = boto3.client('logs', region_name=AWS_REGION) + notification_lambda_name = 'cns427-task-api-notification-handler' + log_group_name = f'/aws/lambda/{notification_lambda_name}' + + try: + # Get recent log streams (last 5 minutes) + end_time = int(time.time() * 1000) + start_time = end_time - (5 * 60 * 1000) # 5 minutes ago + + # Query logs for our task IDs + response = logs_client.filter_log_events( + logGroupName=log_group_name, startTime=start_time, endTime=end_time, filterPattern=f'"{parent_task_id}"' + ) + + if response.get('events'): + print('[E2E] βœ“ Notification Lambda was triggered for parent task') + print(f'[E2E] βœ“ Found {len(response["events"])} log entries') + print('[E2E] βœ“ EventBridge β†’ Notification Lambda flow verified') + else: + print('[E2E] ⚠ No notification Lambda logs found for parent task') + print("[E2E] β„Ή This may be normal if notification Lambda hasn't processed yet") + print(f'[E2E] β„Ή Check logs manually: aws logs tail {log_group_name} --follow') + + except Exception as e: + print(f'[E2E] ⚠ Could not verify notification Lambda: {e}') + print('[E2E] β„Ή This is not a test failure - notification Lambda may not be deployed') + + print('[E2E] β„Ή Note: E2E tests use real events (not TEST- prefix)') + print('[E2E] β„Ή Test harness only captures TEST-* events for integration tests') + + # Step 7: Update parent task status + print('[E2E] Step 7: Updating parent task status') + + # First, get the latest version of the task + response = make_api_request('GET', f'{api_endpoint}/tasks/{parent_task_id}', session=aws_session) + assert response.status_code == 200, f'Failed to get parent task: {response.text}' + current_parent = response.json() + + print(f'[E2E] Current parent version from GET: {current_parent["version"]}') + print(f'[E2E] Current parent data: {json.dumps(current_parent, indent=2)}') + + # Small delay to ensure no race condition + time.sleep(0.5) + + # Now update with the current version + update_data = { + 'status': 'in_progress', + 'version': current_parent['version'], # Use current version for optimistic locking + } + + print(f'[E2E] Sending update with version: {update_data["version"]}') + + response = make_api_request('PUT', f'{api_endpoint}/tasks/{parent_task_id}', body=update_data, session=aws_session) + + # If we get a conflict, retry once with fresh version + if response.status_code == 409: + print('[E2E] Got conflict, retrying with fresh version...') + time.sleep(0.5) + + # Get fresh version + response = make_api_request('GET', f'{api_endpoint}/tasks/{parent_task_id}', session=aws_session) + assert response.status_code == 200 + current_parent = response.json() + + # Retry update + update_data['version'] = current_parent['version'] + print(f'[E2E] Retrying with version: {update_data["version"]}') + + response = make_api_request('PUT', f'{api_endpoint}/tasks/{parent_task_id}', body=update_data, session=aws_session) + + if response.status_code != 200: + print(f'[E2E] Update failed with status {response.status_code}') + print(f'[E2E] Response: {response.text}') + + assert response.status_code == 200, f'Failed to update parent task: {response.text}' + updated_task = response.json() + assert updated_task['status'] == 'in_progress' + print('[E2E] βœ“ Parent task status updated') + + # Step 8: Verify update propagated correctly + print('[E2E] Step 8: Verifying update in DynamoDB') + time.sleep(1) + + db_response = dynamodb_client.get_item(TableName=TASKS_TABLE_NAME, Key={'task_id': {'S': parent_task_id}}) + + assert db_response['Item']['status']['S'] == 'in_progress' + print('[E2E] βœ“ Update verified in DynamoDB') + + # Step 9: Verify we can retrieve tasks via API + print('[E2E] Step 9: Retrieving tasks via API') + + # Get parent task + response = make_api_request('GET', f'{api_endpoint}/tasks/{parent_task_id}', session=aws_session) + assert response.status_code == 200 + retrieved_parent = response.json() + assert retrieved_parent['task_id'] == parent_task_id + assert retrieved_parent['status'] == 'in_progress' + + # Get child task + response = make_api_request('GET', f'{api_endpoint}/tasks/{child_task_id}', session=aws_session) + assert response.status_code == 200 + retrieved_child = response.json() + assert retrieved_child['task_id'] == child_task_id + assert parent_task_id in retrieved_child.get('dependencies', []) + + print('[E2E] βœ“ Tasks retrieved successfully via API') + + print('\n[E2E] βœ… Complete task lifecycle test PASSED') + print('[E2E] Summary:') + print(f' - Parent task: {parent_task_id}') + print(f' - Child task: {child_task_id}') + print(' - Dependency validated: βœ“') + print(' - DynamoDB persistence: βœ“') + print(' - API Gateway IAM auth: βœ“') + print(' - Status updates: βœ“') + + finally: + # Step 10: Cleanup - Delete test tasks + print('\n[E2E] Cleanup: Deleting test tasks') + + if child_task_id: + try: + response = make_api_request('DELETE', f'{api_endpoint}/tasks/{child_task_id}', session=aws_session) + if response.status_code == 204: + print(f'[E2E] βœ“ Child task deleted: {child_task_id}') + except Exception as e: + print(f'[E2E] ⚠ Failed to delete child task: {e}') + + if parent_task_id: + try: + response = make_api_request('DELETE', f'{api_endpoint}/tasks/{parent_task_id}', session=aws_session) + if response.status_code == 204: + print(f'[E2E] βœ“ Parent task deleted: {parent_task_id}') + except Exception as e: + print(f'[E2E] ⚠ Failed to delete parent task: {e}') + + def test_circular_dependency_prevention_e2e(self, api_endpoint, aws_session, test_run_id): + """ + E2E: Verify circular dependency detection works through API. + + Flow: + 1. Create task A + 2. Create task B with dependency on A + 3. Try to update A to depend on B (should fail) + 4. Verify error handling + """ + task_a_id = None + task_b_id = None + + try: + # Create task A + print('\n[E2E] Creating task A') + response = make_api_request( + 'POST', + f'{api_endpoint}/tasks', + body={'title': f'E2E Task A - {test_run_id}', 'description': 'Task A for circular dependency test'}, + session=aws_session, + ) + + assert response.status_code == 201 + task_a = response.json() + task_a_id = task_a['task_id'] + print(f'[E2E] βœ“ Task A created: {task_a_id}') + + # Create task B with dependency on A + print('[E2E] Creating task B with dependency on A') + response = make_api_request( + 'POST', + f'{api_endpoint}/tasks', + body={'title': f'E2E Task B - {test_run_id}', 'description': 'Task B depends on A', 'dependencies': [task_a_id]}, + session=aws_session, + ) + + assert response.status_code == 201 + task_b = response.json() + task_b_id = task_b['task_id'] + print(f'[E2E] βœ“ Task B created: {task_b_id}') + + # Try to create circular dependency (should fail) + print('[E2E] Attempting to create circular dependency') + response = make_api_request( + 'PUT', + f'{api_endpoint}/tasks/{task_a_id}', + body={ + 'dependencies': [task_b_id], + 'version': task_a['version'], # Required for optimistic locking + }, + session=aws_session, + ) + + # Should fail with 400 or 409 + assert response.status_code in [400, 409], f'Expected error for circular dependency, got {response.status_code}' + + error_response = response.json() + assert 'error' in error_response or 'message' in error_response + print('[E2E] βœ“ Circular dependency correctly prevented') + print(f'[E2E] Error message: {error_response}') + + print('\n[E2E] βœ… Circular dependency prevention test PASSED') + + finally: + # Cleanup + print('\n[E2E] Cleanup: Deleting test tasks') + for task_id in [task_b_id, task_a_id]: + if task_id: + try: + make_api_request('DELETE', f'{api_endpoint}/tasks/{task_id}', session=aws_session) + print(f'[E2E] βœ“ Task deleted: {task_id}') + except Exception as e: + print(f'[E2E] ⚠ Failed to delete task: {e}') diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/README.md b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/README.md new file mode 100644 index 00000000..ce35a5ec --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/README.md @@ -0,0 +1,179 @@ +# Integration Tests + +This directory contains integration tests that test the full Task API flow using real AWS services. + +## Test Strategy + +### DynamoDB Integration Tests (`test_dynamodb_integration.py`) +- **Purpose**: Test complete CRUD flow through API Gateway β†’ Lambda β†’ DynamoDB +- **AWS Services**: Uses real DynamoDB table for data persistence, fake EventBridge for event capture +- **EventBridge**: Stubbed to capture events without publishing +- **Verification**: Direct DynamoDB queries to verify data persistence + +### Error Simulation Tests (`test_task_api_error_simulation.py`) +- **Purpose**: Test error handling scenarios without affecting real AWS resources +- **DynamoDB**: Uses error-simulating fake repository +- **EventBridge**: Stubbed to capture events +- **Error Types**: Throttling, access denied, resource not found, validation errors, service unavailable + +## Prerequisites + +### AWS Credentials +Integration tests require AWS credentials configured: +```bash +aws configure +# OR +export AWS_ACCESS_KEY_ID=your-access-key +export AWS_SECRET_ACCESS_KEY=your-secret-key +export AWS_DEFAULT_REGION=us-east-1 +``` + +### DynamoDB Table +Tests require a DynamoDB table for testing. Set the table name: +```bash +export TEST_TASKS_TABLE_NAME=cns427-task-api-test +``` + +**Table Schema:** +- **Table Name**: `cns427-task-api-test` (or your custom name) +- **Partition Key**: `task_id` (String) +- **Billing Mode**: Pay-per-request (recommended for testing) + +### IAM Permissions +The AWS credentials need the following DynamoDB permissions: +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "dynamodb:GetItem", + "dynamodb:PutItem", + "dynamodb:UpdateItem", + "dynamodb:DeleteItem", + "dynamodb:Scan" + ], + "Resource": "arn:aws:dynamodb:*:*:table/cns427-task-api-test" + } + ] +} +``` + +## Running Tests + +### All Integration Tests +```bash +make test-integration +# OR +./scripts/run-tests.sh --type integration +# OR +poetry run pytest tests/integration -m integration +``` + +### Specific Test Files +```bash +# DynamoDB integration tests +poetry run pytest tests/integration/test_dynamodb_integration.py -v + +# Error simulation tests +poetry run pytest tests/integration/test_task_api_error_simulation.py -v +``` + +### With Custom Table Name +```bash +TEST_TASKS_TABLE_NAME=my-test-table poetry run pytest tests/integration -m integration +``` + +## Test Data Management + +### Automatic Cleanup +- Each test cleans up its own test data +- Uses `try/finally` blocks to ensure cleanup even if tests fail +- Helper functions: `cleanup_task_from_dynamodb(task_id)` + +### Manual Cleanup +If tests fail and leave test data: +```bash +# List items in test table +aws dynamodb scan --table-name cns427-task-api-test --select COUNT + +# Delete specific item +aws dynamodb delete-item --table-name cns427-task-api-test --key '{"task_id":{"S":"your-task-id"}}' +``` + +## Test Structure + +### Happy Path Tests +- `test_create_task_end_to_end`: Complete task creation flow +- `test_get_task_end_to_end`: Task retrieval with DynamoDB verification +- `test_update_task_end_to_end`: Task update with version control +- `test_delete_task_end_to_end`: Task deletion with cleanup verification +- `test_list_tasks_end_to_end`: Task listing with pagination + +### Error Handling Tests +- `test_get_nonexistent_task_returns_404`: 404 error handling +- `test_create_task_validation_error_returns_400`: Input validation + +### Error Simulation Tests +- `test_create_task_throttling_error_returns_500`: DynamoDB throttling +- `test_create_task_access_denied_error_returns_500`: Permission errors +- `test_get_task_resource_not_found_error_returns_500`: Resource errors +- `test_update_task_validation_error_returns_500`: Validation errors +- `test_list_tasks_service_unavailable_error_returns_500`: Service errors + +## Benefits + +### Real AWS Integration +- βœ… Tests actual AWS SDK behavior +- βœ… Validates real DynamoDB operations +- βœ… Tests actual data persistence and retrieval +- βœ… No LocalStack complexity or version mismatches + +### Error Simulation +- βœ… Predictable error conditions +- βœ… Fast execution (no network calls for errors) +- βœ… Comprehensive error scenario coverage +- βœ… No risk to real AWS resources + +### EventBridge Stubbing +- βœ… Captures events for verification +- βœ… No unintended side effects +- βœ… Tests event content and timing +- βœ… Fast and reliable + +## Troubleshooting + +### Common Issues + +**AWS Credentials Not Found** +``` +Error: AWS credentials not configured +Solution: Run 'aws configure' or set environment variables +``` + +**DynamoDB Table Not Found** +``` +Error: ResourceNotFoundException +Solution: Create the test table or check TEST_TASKS_TABLE_NAME +``` + +**Permission Denied** +``` +Error: AccessDeniedException +Solution: Check IAM permissions for DynamoDB operations +``` + +### Debug Mode +Run tests with verbose output: +```bash +poetry run pytest tests/integration -v -s +``` + +### Test Isolation +Each test is isolated and cleans up its own data. If you need to inspect test data, add a breakpoint or sleep before cleanup: +```python +# In test code +import time +time.sleep(30) # Inspect DynamoDB table manually +``` \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/__init__.py new file mode 100644 index 00000000..12bfb68e --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/__init__.py @@ -0,0 +1 @@ +"""Integration test fakes for stubbing external dependencies.""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/dynamodb_fake.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/dynamodb_fake.py new file mode 100644 index 00000000..ec63d4b9 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/dynamodb_fake.py @@ -0,0 +1,162 @@ +""" +DynamoDB Fake for Error Simulation + +Used only for testing AWS service errors without affecting real data. +Enhanced with conditional update simulation and version token tracking. +""" + +from typing import Any, Dict, Optional + +from boto3.dynamodb.types import TypeDeserializer +from botocore.exceptions import ClientError + + +class DynamoDBFake: + """Fake DynamoDB client for error simulation and conflict testing.""" + + def __init__(self, error_type: str = None): + """ + Initialize fake with specific error type. + + Args: + error_type: Type of error to simulate (throttling, iam, timeout, conditional_check) + """ + self.error_type = error_type + self.table_name = 'cns427-task-api-core-tasks' + self.task_store = {} # In-memory task storage for conflict testing + self._deserializer = TypeDeserializer() + + def _deserialize_item(self, dynamo_item: Dict[str, Any]) -> Dict[str, Any]: + """Convert DynamoDB format to Python dict.""" + return {k: self._deserializer.deserialize(v) for k, v in dynamo_item.items()} + + def put_item(self, **kwargs) -> Dict[str, Any]: + """Simulate put_item with errors and data storage.""" + # Simple error simulation (but NOT conditional_check - that's only for updates) + if self.error_type == 'throttling': + raise ClientError({'Error': {'Code': 'ProvisionedThroughputExceededException', 'Message': 'Rate exceeded'}}, 'PutItem') + elif self.error_type == 'iam': + raise ClientError({'Error': {'Code': 'AccessDeniedException', 'Message': 'Access denied'}}, 'PutItem') + elif self.error_type == 'timeout': + raise ClientError({'Error': {'Code': 'RequestTimeout', 'Message': 'Request timeout'}}, 'PutItem') + # Note: conditional_check errors only apply to update_item, not put_item + + # Store item in fake data store for conflict testing + if 'Item' in kwargs: + item = self._deserialize_item(kwargs['Item']) + task_id = item.get('task_id') + if task_id: + self.task_store[task_id] = item + + return {} + + def get_item(self, **kwargs) -> Dict[str, Any]: + """Simulate get_item with errors and data retrieval.""" + # Simple error simulation + if self.error_type == 'throttling': + raise ClientError({'Error': {'Code': 'ProvisionedThroughputExceededException', 'Message': 'Rate exceeded'}}, 'GetItem') + elif self.error_type == 'iam': + raise ClientError({'Error': {'Code': 'AccessDeniedException', 'Message': 'Access denied'}}, 'GetItem') + elif self.error_type == 'timeout': + raise ClientError({'Error': {'Code': 'RequestTimeout', 'Message': 'Request timeout'}}, 'GetItem') + + # Retrieve item from fake data store + if 'Key' in kwargs: + key = self._deserialize_item(kwargs['Key']) + task_id = key.get('task_id') + if task_id and task_id in self.task_store: + # Convert back to DynamoDB format for response + from boto3.dynamodb.types import TypeSerializer + + serializer = TypeSerializer() + item = {k: serializer.serialize(v) for k, v in self.task_store[task_id].items()} + return {'Item': item} + + return {} # Item not found + + def update_item(self, **kwargs) -> Dict[str, Any]: + """Simulate update_item with errors and conditional update logic.""" + # Simple error simulation + if self.error_type == 'throttling': + raise ClientError({'Error': {'Code': 'ProvisionedThroughputExceededException', 'Message': 'Rate exceeded'}}, 'UpdateItem') + elif self.error_type == 'iam': + raise ClientError({'Error': {'Code': 'AccessDeniedException', 'Message': 'Access denied'}}, 'UpdateItem') + elif self.error_type == 'timeout': + raise ClientError({'Error': {'Code': 'RequestTimeout', 'Message': 'Request timeout'}}, 'UpdateItem') + + # Handle conditional updates for conflict testing + if 'ConditionExpression' in kwargs and 'Key' in kwargs: + key = self._deserialize_item(kwargs['Key']) + task_id = key.get('task_id') + + if task_id and task_id in self.task_store: + current_item = self.task_store[task_id] + + # Check version condition + if 'ExpressionAttributeValues' in kwargs: + expression_values = self._deserialize_item(kwargs['ExpressionAttributeValues']) + expected_version = expression_values.get(':expected_version') + current_version = current_item.get('version') + + # Simulate conditional check failure if versions don't match + if expected_version and current_version and expected_version != current_version: + raise ClientError( + {'Error': {'Code': 'ConditionalCheckFailedException', 'Message': 'The conditional request failed'}}, 'UpdateItem' + ) + + # Update the item with new values + if ':new_version' in expression_values: + current_item['version'] = expression_values[':new_version'] + if ':updated_at' in expression_values: + current_item['updated_at'] = expression_values[':updated_at'] + + # Apply other updates based on expression values + for key, value in expression_values.items(): + if key.startswith(':') and key not in [':expected_version', ':new_version', ':updated_at']: + field_name = key[1:] # Remove ':' prefix + current_item[field_name] = value + elif self.error_type == 'conditional_check': + # Force conditional check failure for testing + raise ClientError({'Error': {'Code': 'ConditionalCheckFailedException', 'Message': 'Condition not met'}}, 'UpdateItem') + + return {} + + def scan(self, **kwargs) -> Dict[str, Any]: + """Simulate scan operation for listing all items.""" + # Simple error simulation + if self.error_type == 'throttling': + raise ClientError({'Error': {'Code': 'ProvisionedThroughputExceededException', 'Message': 'Rate exceeded'}}, 'Scan') + elif self.error_type == 'iam': + raise ClientError({'Error': {'Code': 'AccessDeniedException', 'Message': 'Access denied'}}, 'Scan') + elif self.error_type == 'timeout': + raise ClientError({'Error': {'Code': 'RequestTimeout', 'Message': 'Request timeout'}}, 'Scan') + + # Return all items from task store + from boto3.dynamodb.types import TypeSerializer + + serializer = TypeSerializer() + + items = [] + for task_data in self.task_store.values(): + # Convert back to DynamoDB format for response + item = {k: serializer.serialize(v) for k, v in task_data.items()} + items.append(item) + + return {'Items': items} + + def set_task_data(self, task_id: str, task_data: Dict[str, Any]) -> None: + """Set task data in the fake store for testing.""" + self.task_store[task_id] = task_data.copy() + + def get_task_data(self, task_id: str) -> Optional[Dict[str, Any]]: + """Get task data from the fake store.""" + return self.task_store.get(task_id) + + def clear_task_store(self) -> None: + """Clear all task data from the fake store.""" + self.task_store.clear() + + def simulate_concurrent_access(self, task_id: str, version: int) -> None: + """Simulate concurrent access by updating version.""" + if task_id in self.task_store: + self.task_store[task_id]['version'] = version diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/eventbridge_fake.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/eventbridge_fake.py new file mode 100644 index 00000000..fdc6219a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/fakes/eventbridge_fake.py @@ -0,0 +1,23 @@ +""" +EventBridge Fake for Error Simulation + +Simulates AWS EventBridge service errors without affecting real events. +""" + +from botocore.exceptions import ClientError + + +class EventBridgeFake: + """Fake EventBridge client for simulating AWS service errors.""" + + def __init__(self, error_type: str): + self.error_type = error_type + + def put_events(self, **kwargs): + """Simulate put_events with various error types.""" + if self.error_type == 'throttling': + raise ClientError({'Error': {'Code': 'ThrottlingException', 'Message': 'Rate exceeded'}}, 'PutEvents') + elif self.error_type == 'permission': + raise ClientError({'Error': {'Code': 'AccessDeniedException', 'Message': 'Not authorized'}}, 'PutEvents') + elif self.error_type == 'service': + raise ClientError({'Error': {'Code': 'InternalException', 'Message': 'Service error'}}, 'PutEvents') diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_dynamodb_integration.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_dynamodb_integration.py new file mode 100644 index 00000000..9e787e77 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_dynamodb_integration.py @@ -0,0 +1,330 @@ +""" +DynamoDB Integration Tests - Real AWS DynamoDB + +Tests TaskRepository against actual DynamoDB table. +Focuses on create task flow with real data persistence. +""" + +import json +import os +import time +import uuid +from datetime import UTC, datetime +from unittest.mock import Mock + +import pytest + +# Set table name for integration tests from config +from infrastructure.config import InfrastructureConfig +from services.task_service.domain.exceptions import ConflictError +from services.task_service.domain.task_service import TaskService +from services.task_service.models.task import Task, TaskPriority, TaskStatus +from shared.integration.dynamodb_adapter import DynamoDBTaskRepository +from tests.integration.fakes.dynamodb_fake import DynamoDBFake +from tests.unit.test_helpers import create_api_gateway_event, create_test_context + +config = InfrastructureConfig() +os.environ['TASKS_TABLE_NAME'] = config.tasks_table_name() + + +@pytest.fixture +def task_repository(): + """Create TaskRepository instance for testing.""" + return DynamoDBTaskRepository(table_name=os.environ['TASKS_TABLE_NAME']) + + +@pytest.fixture +def test_task_id(): + """Generate unique task ID for testing.""" + return f'test-{uuid.uuid4()}' + + +@pytest.fixture +def cleanup_task(task_repository): + """Cleanup fixture to delete test tasks after each test.""" + task_ids = [] + + def register(task_id): + task_ids.append(task_id) + + yield register + + # Cleanup after test + for task_id in task_ids: + try: + # Get the task to retrieve its current version + task = task_repository.get_task(task_id) + if task: + task_repository.delete_task(task_id, task.version) + else: + print(f'[Cleanup] Task {task_id} not found, skipping delete') + except Exception as e: + # Log warning but don't fail the test + print(f'[Cleanup] Warning: Failed to delete task {task_id}: {e}') + # Continue with other cleanups + + +@pytest.fixture +def mock_event_publisher(): + """Create a mock event publisher for testing.""" + return Mock() + + +@pytest.fixture +def fake_task_service_with_error(request): + """Fixture to create TaskService with fake DynamoDB for error simulation.""" + error_type = request.param if hasattr(request, 'param') else 'throttling' + + # Create fake DynamoDB client with error simulation + fake_dynamodb = DynamoDBFake(error_type=error_type) + + # Create real repository but inject fake DynamoDB client + fake_repo = DynamoDBTaskRepository(table_name=os.environ['TASKS_TABLE_NAME']) + fake_repo.dynamodb = fake_dynamodb + + # Create service with fake repo and mock publisher + mock_publisher = Mock() + fake_service = TaskService(fake_repo, mock_publisher) + + # Inject fake service into handler module + import services.task_service.handler as handler_module + + handler_module.task_service = fake_service + + yield fake_service + + +class TestDynamoDBIntegration: + """Test TaskRepository with real DynamoDB.""" + + def test_create_and_retrieve_task(self, task_repository, test_task_id, cleanup_task): + """Test creating and retrieving a task from real DynamoDB.""" + # GIVEN a new task + now = datetime.now(UTC) + task = Task( + task_id=test_task_id, + title='Integration Test Task', + description='Testing real DynamoDB', + dependencies=[], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + cleanup_task(test_task_id) + + # WHEN saving to DynamoDB + task_repository.create_task(task) + + # THEN should be able to retrieve it + retrieved = task_repository.get_task(test_task_id) + assert retrieved is not None + assert retrieved.task_id == test_task_id + assert retrieved.title == 'Integration Test Task' + assert retrieved.description == 'Testing real DynamoDB' + + def test_create_task_with_dependencies(self, task_repository, cleanup_task): + """Test creating task with dependencies.""" + # GIVEN two tasks with dependency relationship + task1_id = f'test-{uuid.uuid4()}' + task2_id = f'test-{uuid.uuid4()}' + cleanup_task(task1_id) + cleanup_task(task2_id) + + now = datetime.now(UTC) + task1 = Task( + task_id=task1_id, + title='Task 1', + dependencies=[], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + + task2 = Task( + task_id=task2_id, + title='Task 2', + dependencies=[task1_id], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + + # WHEN saving both tasks + task_repository.create_task(task1) + task_repository.create_task(task2) + + # THEN should retrieve task with dependencies + retrieved = task_repository.get_task(task2_id) + assert retrieved is not None + assert retrieved.dependencies == [task1_id] + + def test_conditional_update(self, task_repository, test_task_id, cleanup_task): + """Test conflict resolution with real DynamoDB conditional updates.""" + + # GIVEN a task in real DynamoDB + now = datetime.now(UTC) + task = Task( + task_id=test_task_id, + title='Conflict Resolution Test', + description='Testing real DynamoDB conditional updates', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + cleanup_task(test_task_id) + + # Save initial task + task_repository.create_task(task) + original_version = task.version + + # WHEN attempting update with correct version + time.sleep(0.001) # Ensure different timestamp + task.title = 'Updated Title' + # Generate new version (simulating domain service behavior) + new_version = int(datetime.now(UTC).timestamp() * 1000) + task.version = new_version + result = task_repository.update_task(task, expected_version=original_version) + + # THEN update should succeed + assert result is not None + assert result.title == 'Updated Title' + assert result.version == new_version + assert result.version != original_version + + # WHEN attempting another update with stale version + task.title = 'Should Fail' + another_new_version = int(datetime.now(UTC).timestamp() * 1000) + task.version = another_new_version + + # THEN second update should raise ConflictError (using stale expected_version) + with pytest.raises(ConflictError) as exc_info: + task_repository.update_task(task, expected_version=original_version) # Wrong! Should be new_version + + # AND error should include current task state + assert exc_info.value.current_task is not None + assert exc_info.value.current_task['title'] == 'Updated Title' + + +class TestDynamoDBErrorSimulation: + """Test error handling with DynamoDB fake through full handler integration.""" + + @pytest.mark.parametrize('fake_task_service_with_error', ['throttling'], indirect=True) + def test_throttling_error_returns_503_with_retry_info(self, fake_task_service_with_error): + """Test that DynamoDB throttling returns 503 Service Unavailable with retry guidance.""" + # GIVEN a system experiencing DynamoDB throttling (injected via fixture) + # WHEN client tries to create a task + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 503 Service Unavailable + assert response['statusCode'] == 503, f'Expected 503 but got {response["statusCode"]}' + + # AND response should include retry guidance in message + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'retry' in message or 'unavailable' in message or 'capacity' in message, ( + f'Expected retry guidance in message but got: {body.get("message")}' + ) + + @pytest.mark.parametrize('fake_task_service_with_error', ['iam'], indirect=True) + def test_iam_error_returns_500_internal_error(self, fake_task_service_with_error): + """Test that IAM/permission errors return 500 Internal Server Error (not exposed to client).""" + # GIVEN a system with IAM permission issues (injected via fixture) + # WHEN client tries to create a task + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 500 Internal Server Error + assert response['statusCode'] == 500, f'Expected 500 but got {response["statusCode"]}' + + # AND response should NOT expose IAM/permission details to client + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'permission' not in message and 'access' not in message, f'Should not expose permission details to client: {body.get("message")}' + assert 'internal' in message or 'error' in message, f'Expected generic internal error message but got: {body.get("message")}' + + @pytest.mark.parametrize('fake_task_service_with_error', ['timeout'], indirect=True) + def test_timeout_error_returns_503_with_retry_info(self, fake_task_service_with_error): + """Test that timeout errors return 503 with retry guidance.""" + # GIVEN a system experiencing timeouts (injected via fixture) + # WHEN client tries to create a task + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 503 Service Unavailable + assert response['statusCode'] == 503, f'Expected 503 but got {response["statusCode"]}' + + # AND response should include retry guidance + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'retry' in message or 'unavailable' in message or 'timeout' in message, ( + f'Expected retry guidance in message but got: {body.get("message")}' + ) + + @pytest.mark.parametrize('fake_task_service_with_error', ['conditional_check'], indirect=True) + def test_conditional_check_failure_returns_409_conflict(self, fake_task_service_with_error): + """Test that conditional check failures return 409 Conflict with refresh guidance.""" + # GIVEN a system with conditional check failures and an existing task + # Pre-populate the fake DynamoDB with a task + task_id = 'test-task-123' + now = datetime.now(UTC) + fake_task_data = { + 'task_id': task_id, + 'title': 'Original Task', + 'description': 'Test', + 'status': 'pending', + 'priority': 'medium', + 'dependencies': [], + 'created_at': now.isoformat(), + 'updated_at': now.isoformat(), + 'version': int(now.timestamp() * 1000), + } + # Access the fake DynamoDB through the repository + fake_repo = fake_task_service_with_error.repository + fake_repo.dynamodb.set_task_data(task_id, fake_task_data) + + # WHEN client tries to update the task (will trigger conditional check failure) + update_event = create_api_gateway_event( + method='PUT', + path=f'/tasks/{task_id}', + body={ + 'title': 'Updated Task', + 'version': 1234567890123, # Valid format but will trigger conditional check failure + }, + ) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(update_event, lambda_context) + + # THEN should return 409 Conflict + assert response['statusCode'] == 409, f'Expected 409 but got {response["statusCode"]}' + + # AND response should indicate resource was updated and suggest refresh + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'updated' in message or 'modified' in message or 'refresh' in message or 'process' in message, ( + f'Expected message about stale data/refresh but got: {body.get("message")}' + ) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_eventbridge_integration.py b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_eventbridge_integration.py new file mode 100644 index 00000000..2f04c4f5 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/integration/test_eventbridge_integration.py @@ -0,0 +1,257 @@ +""" +EventBridge Integration Tests + +Tests EventPublisher against real EventBridge with test harness verification. +Uses fakes only for error simulation. +""" + +import json +import os +import time +import uuid +from datetime import UTC, datetime +from unittest.mock import Mock + +import boto3 +import pytest + +from infrastructure.config import InfrastructureConfig +from services.task_service.domain.task_service import TaskService +from services.task_service.models.task import Task, TaskPriority, TaskStatus +from shared.integration.dynamodb_adapter import DynamoDBTaskRepository +from shared.integration.eventbridge_adapter import EventBridgePublisher +from tests.integration.fakes.eventbridge_fake import EventBridgeFake +from tests.unit.test_helpers import create_api_gateway_event, create_test_context + +# Real AWS resources from config +config = InfrastructureConfig() +EVENT_BUS_NAME = os.environ.get('EVENT_BUS_NAME', config.event_bus_name()) +TEST_HARNESS_TABLE = os.environ.get('TEST_HARNESS_TABLE', config.test_results_table_name()) +AWS_REGION = os.environ.get('AWS_DEFAULT_REGION', os.environ.get('AWS_REGION', 'us-west-2')) + + +@pytest.fixture +def test_run_id(): + """Generate unique test run ID for event correlation.""" + return str(uuid.uuid4()) + + +@pytest.fixture +def dynamodb_client(): + """DynamoDB client for test harness verification.""" + return boto3.client('dynamodb', region_name=AWS_REGION) + + +@pytest.fixture +def test_task(test_run_id): + """Create test task for event publishing.""" + now = datetime.now(UTC) + return Task( + task_id=str(uuid.uuid4()), + title=f'TEST-{test_run_id}', + description='Task for EventBridge testing', + dependencies=[], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + + +def wait_for_event_processing(seconds=5): + """Wait for EventBridge β†’ Lambda β†’ DynamoDB pipeline.""" + time.sleep(seconds) + + +def query_test_harness(dynamodb_client, test_run_id: str): + """Query test harness table by test_run_id set to TEST-{test_run_id}.""" + response = dynamodb_client.query( + TableName=TEST_HARNESS_TABLE, + KeyConditionExpression='test_run_id = :test_run_id', + ExpressionAttributeValues={':test_run_id': {'S': f'TEST-{test_run_id}'}}, + ) + return response.get('Items', []) + + +class TestEventBridgeIntegration: + """Test EventPublisher with real EventBridge and test harness verification.""" + + def test_publish_task_created_event(self, test_run_id, test_task, dynamodb_client): + """Test task created event is published and appears in test harness.""" + # GIVEN EventPublisher with test event bus + from services.task_service.models.task import TaskCreatedEvent + + publisher = EventBridgePublisher(event_bus_name=EVENT_BUS_NAME) + + # WHEN publishing TaskCreated event with TEST- prefix for test harness + event = TaskCreatedEvent(test_task, source='TEST-cns427-task-api', detail_type_prefix='TEST-') + publisher.publish_event(event) + + # Wait for event processing + wait_for_event_processing() + + # THEN event should appear in test harness table + events = query_test_harness(dynamodb_client, test_run_id) + assert len(events) > 0, 'Event not found in test harness' + + # Verify event data + event_item = events[0] + assert event_item['test_run_id']['S'] == f'TEST-{test_run_id}' + assert 'event_timestamp' in event_item + + def test_publish_event_with_dependencies(self, test_run_id, dynamodb_client): + """Test event with task dependencies is published correctly.""" + # GIVEN task with dependencies + from services.task_service.models.task import TaskCreatedEvent + + now = datetime.now(UTC) + task = Task( + task_id=str(uuid.uuid4()), + title=f'TEST-{test_run_id}', + description='Task with dependencies', + dependencies=['task-1', 'task-2'], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + publisher = EventBridgePublisher(event_bus_name=EVENT_BUS_NAME) + + # WHEN publishing event with TEST- prefix for test harness + event = TaskCreatedEvent(task, source='TEST-cns427-task-api', detail_type_prefix='TEST-') + publisher.publish_event(event) + + # Wait for event processing + wait_for_event_processing() + + # THEN event should be in test harness + events = query_test_harness(dynamodb_client, test_run_id) + assert len(events) > 0 + + def test_publish_multiple_events(self, test_run_id, dynamodb_client): + """Test multiple events are published and tracked separately.""" + # GIVEN multiple tasks with same test_run_id in title for tracking + from services.task_service.models.task import TaskCreatedEvent + + now = datetime.now(UTC) + tasks = [ + Task( + task_id=str(uuid.uuid4()), + title=f'TEST-{test_run_id}', + description=f'Test task {i}', + dependencies=[], + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + for i in range(3) + ] + publisher = EventBridgePublisher(event_bus_name=EVENT_BUS_NAME) + + # WHEN publishing multiple events with TEST- prefix for test harness + for task in tasks: + event = TaskCreatedEvent(task, source='TEST-cns427-task-api', detail_type_prefix='TEST-') + publisher.publish_event(event) + + wait_for_event_processing() + + # THEN all events should be in test harness + events = query_test_harness(dynamodb_client, test_run_id) + assert len(events) >= 3 + + +@pytest.fixture +def fake_task_service_with_eventbridge_error(request): + """Fixture to inject fake TaskService with EventBridge error simulation.""" + error_type = request.param if hasattr(request, 'param') else 'throttling' + + # Create mock repository (no errors) + fake_repo = Mock() + + # Create fake event publisher with error + fake_publisher = EventBridgePublisher(event_bus_name=EVENT_BUS_NAME) + fake_publisher.events_client = EventBridgeFake(error_type) + + # Create service with fake repo and fake publisher + fake_service = TaskService(fake_repo, fake_publisher) + + # Inject fake service into handler module + import services.task_service.handler as handler_module + + handler_module.task_service = fake_service + + yield fake_service + + +class TestEventBridgeErrorSimulation: + """Test EventBridge error handling through full handler integration.""" + + @pytest.mark.parametrize('fake_task_service_with_eventbridge_error', ['throttling'], indirect=True) + def test_throttling_error_returns_500_internal_error(self, fake_task_service_with_eventbridge_error): + """Test that EventBridge throttling returns 500 Internal Server Error.""" + # GIVEN a system experiencing EventBridge throttling (injected via fixture) + + # WHEN client tries to create a task (which triggers event publishing) + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 500 Internal Server Error + assert response['statusCode'] == 500, f'Expected 500 but got {response["statusCode"]}' + + # AND response should include generic error message (not expose EventBridge details) + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'internal' in message or 'error' in message, f'Expected generic internal error message but got: {body.get("message")}' + # Ensure EventBridge-specific details are not exposed + assert 'eventbridge' not in message and 'event bus' not in message, f'Should not expose EventBridge details: {body.get("message")}' + + @pytest.mark.parametrize('fake_task_service_with_eventbridge_error', ['permission'], indirect=True) + def test_permission_error_returns_500_internal_error(self, fake_task_service_with_eventbridge_error): + """Test that EventBridge permission errors return 500 Internal Server Error.""" + # GIVEN a system with EventBridge permission issues (injected via fixture) + + # WHEN client tries to create a task + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 500 Internal Server Error + assert response['statusCode'] == 500, f'Expected 500 but got {response["statusCode"]}' + + # AND response should NOT expose permission details to client + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'permission' not in message and 'access' not in message, f'Should not expose permission details to client: {body.get("message")}' + assert 'internal' in message or 'error' in message, f'Expected generic internal error message but got: {body.get("message")}' + + @pytest.mark.parametrize('fake_task_service_with_eventbridge_error', ['service'], indirect=True) + def test_service_error_returns_500_internal_error(self, fake_task_service_with_eventbridge_error): + """Test that EventBridge service errors return 500 Internal Server Error.""" + # GIVEN a system experiencing EventBridge service errors (injected via fixture) + + # WHEN client tries to create a task + event = create_api_gateway_event(method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test'}) + + lambda_context = create_test_context() + from services.task_service.handler import lambda_handler + + response = lambda_handler(event, lambda_context) + + # THEN should return 500 Internal Server Error + assert response['statusCode'] == 500, f'Expected 500 but got {response["statusCode"]}' + + # AND response should include generic error message + body = json.loads(response['body']) + message = body.get('message', '').lower() + assert 'internal' in message or 'error' in message, f'Expected generic internal error message but got: {body.get("message")}' diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/README.md b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/README.md new file mode 100644 index 00000000..91d2e2b6 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/README.md @@ -0,0 +1,1044 @@ +# Property-Based Tests for Circular Dependency Detection + +## Overview + +This directory contains property-based tests for the circular dependency detection logic in the Task API. These tests were generated from formal requirements documents using Kiro's correctness testing capability and the Hypothesis library. + +## What is Property-Based Testing? + +Property-based testing is a testing approach where you define **properties** (invariants) that should always be true, and the testing framework automatically generates hundreds of test cases to verify those properties. + +### Traditional Testing vs Property-Based Testing + +**Traditional Example-Based Test:** +```python +def test_self_dependency(): + assert has_circular_dependency('task-1', 'task-1', {}) == True +``` + +**Property-Based Test:** +```python +@given(task_id=st.text(min_size=1)) +def test_self_dependency_always_circular(task_id): + # Property: ANY task depending on itself is circular + assert has_circular_dependency(task_id, task_id, {}) == True +``` + +The property-based test automatically generates hundreds of different task IDs to verify the property holds universally. + +## Test Organization + +Tests are organized by requirement: + +- **test_self_dependency_properties.py** - Requirement 1: Self-dependency detection +- **test_direct_cycle_properties.py** - Requirement 2: Direct two-task cycles +- **test_complex_cycle_properties.py** - Requirement 3: Complex multi-task cycles +- **test_linear_chain_properties.py** - Requirement 4: Valid linear chains +- **test_empty_graph_properties.py** - Requirement 5: Empty graph handling +- **test_validation_properties.py** - Requirement 6: Validation integration + +## Properties Being Tested + +### 1. Self-Dependency Properties +- **Reflexivity**: `has_circular(A, A, graph) == True` for any A and any graph +- **Independence**: Self-dependencies are detected regardless of graph state + +### 2. Direct Cycle Properties +- **Symmetry**: If Aβ†’B exists, then Bβ†’A creates a cycle +- **Bidirectionality**: Aβ†’Bβ†’A is always circular + +### 3. Complex Cycle Properties +- **Transitivity**: Aβ†’Bβ†’Cβ†’...β†’A is circular for any chain length +- **Graph Traversal**: DFS correctly identifies cycles in complex graphs + +### 4. Linear Chain Properties +- **Non-circularity**: Linear chains without loops return false +- **Extensibility**: Linear chains can be extended indefinitely +- **Distinction**: System correctly distinguishes linear from circular + +### 5. Empty Graph Properties +- **Permissiveness**: Empty graphs allow any dependency between different tasks +- **Self-dependency Exception**: Self-dependencies are still detected in empty graphs + +### 6. Validation Properties +- **Error Handling**: CircularDependencyError raised for cycles +- **Error Messages**: Error messages include problematic dependency IDs +- **Valid Cases**: Validation passes for non-circular dependencies + +## Running the Tests + +### Run all property-based tests: +```bash +poetry run pytest tests/property_based/ -v +``` + +### Run specific test file: +```bash +poetry run pytest tests/property_based/test_self_dependency_properties.py -v +``` + +### Run with Hypothesis statistics: +```bash +poetry run pytest tests/property_based/ -v --hypothesis-show-statistics +``` + +### Run with more examples (default is 100): +```bash +poetry run pytest tests/property_based/ -v --hypothesis-seed=random +``` + +## Test Generation Process + +These tests were generated using the following process: + +1. **Requirements Analysis** - Formal requirements written in EARS format +2. **Property Identification** - Identified invariants and properties from requirements +3. **Test Generation** - Created property-based tests using Hypothesis +4. **Verification** - Ran tests to ensure comprehensive coverage + +## Benefits of Property-Based Testing + +### 1. Comprehensive Coverage +- Tests hundreds of input combinations automatically +- Finds edge cases you might not think of manually +- Verifies properties hold universally, not just for specific examples + +### 2. Better Bug Detection +- Hypothesis automatically finds minimal failing examples +- Shrinks complex failing inputs to simplest form +- Reveals assumptions and edge cases + +### 3. Living Documentation +- Properties serve as executable specifications +- Tests document what should always be true +- Requirements directly map to test properties + +### 4. Regression Prevention +- Once a bug is found, Hypothesis remembers it +- Prevents regression by testing the same edge case +- Builds confidence in refactoring + +## Example: How Hypothesis Finds Bugs + +If there's a bug in the circular dependency detection, Hypothesis will: + +1. **Generate** hundreds of random test cases +2. **Detect** the failing case +3. **Shrink** the input to the minimal failing example +4. **Report** the simplest case that breaks the property + +Example output: +``` +Falsifying example: test_three_task_cycle( + task_a='550e8400-e29b-41d4-a716-446655440000', + task_b='6ba7b810-9dad-11d1-80b4-00c04fd430c8', + task_c='6ba7b814-9dad-11d1-80b4-00c04fd430c8' +) +``` + +## Realistic Test Data + +Our property-based tests use the same data types as production code: + +```python +# Production code (models.py) +id=str(uuid.uuid4()) # e.g., "550e8400-e29b-41d4-a716-446655440000" + +# Property-based tests +task_ids = st.uuids().map(str) # Generates same format as production +``` + +**Benefits:** +- Tests use realistic UUIDs instead of arbitrary strings +- More representative of actual production usage +- Hypothesis can still shrink UUIDs to simpler examples when bugs are found +- Ensures validation logic works with real UUID formats + +## Integration with Requirements + +Each test file maps directly to requirements: + +| Test File | Requirements | Properties Tested | +|-----------|-------------|-------------------| +| test_self_dependency_properties.py | 1.1, 1.2, 1.3 | Reflexivity, Independence | +| test_direct_cycle_properties.py | 2.1, 2.2, 2.3 | Symmetry, Bidirectionality | +| test_complex_cycle_properties.py | 3.1, 3.2, 3.3, 3.4 | Transitivity, DFS traversal | +| test_linear_chain_properties.py | 4.1, 4.2, 4.3 | Non-circularity, Distinction | +| test_empty_graph_properties.py | 5.1, 5.2, 5.3 | Permissiveness, Exceptions | +| test_validation_properties.py | 6.1-6.5 | Error handling, Integration | + +## Further Reading + +- [Hypothesis Documentation](https://hypothesis.readthedocs.io/) +- [Property-Based Testing Introduction](https://hypothesis.works/articles/what-is-property-based-testing/) +- [Kiro Correctness Testing](https://kiro.dev/docs/specs/correctness/) + + +## Interpreting Hypothesis Statistics + +When you run tests with `--hypothesis-show-statistics`, you get detailed information about how Hypothesis generated and executed test cases. Here's how to interpret the output: + +### Example Output + +``` +tests/property_based/test_validation_properties.py::TestValidationIntegrationProperties::test_validation_with_complex_graph: + +- during generate phase (0.10 seconds): + - Typical runtimes: < 1ms, of which < 1ms in data generation + - 100 passing examples, 0 failing examples, 138 invalid examples + - Events: + * 35.71%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 154) + * 8.40%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 157) + * 6.72%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 155) + * 5.04%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 156) + - Stopped because settings.max_examples=100 +``` + +### Breaking Down Each Section + +#### 1. Generate Phase Duration +``` +during generate phase (0.10 seconds) +``` +- **What it means**: Total time spent generating random inputs and running test cases +- **Good**: < 1 second per test +- **Acceptable**: 1-5 seconds per test +- **Slow**: > 5 seconds per test (may need optimization) + +#### 2. Runtime Performance +``` +Typical runtimes: < 1ms, of which < 1ms in data generation +``` +- **First value**: Time to run each individual test case +- **Second value**: Time to generate the random input data +- **Good sign**: Both values < 1ms means very efficient tests + +#### 3. Test Results Summary +``` +100 passing examples, 0 failing examples, 138 invalid examples +``` + +| Metric | Meaning | Interpretation | +|--------|---------|----------------| +| **Passing examples** | Test cases that met all constraints and passed assertions | βœ… This is your target (default: 100) | +| **Failing examples** | Test cases that failed assertions (found bugs) | βœ… 0 means no bugs found | +| **Invalid examples** | Test cases rejected by `assume()` statements | ⚠️ Normal, but high numbers may indicate inefficiency | + +#### 4. Invalid Examples Breakdown +``` +Events: +* 35.71%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 154) +* 8.40%, invalid because: failed to satisfy assume() in test_validation_with_complex_graph (line 157) +``` + +This shows **why** examples were rejected. Common reasons: +- `assume()` statements filtering out unwanted inputs +- Constraints that are hard to satisfy randomly +- Overlapping or conflicting requirements + +### Understanding Invalid Examples + +#### What Causes Invalid Examples? + +In the example above, the test has these constraints: +```python +assume(len({task_a, task_b, task_c}) == 3) # Line 154: All 3 tasks must be different +assume(task_a not in other_deps) # Line 155: task_a not in other_deps list +assume(task_b not in other_deps) # Line 156: task_b not in other_deps list +assume(task_c not in other_deps) # Line 157: task_c not in other_deps list +``` + +Hypothesis generates random inputs, but many don't meet these constraints: +- **35.71% rejected at line 154**: Generated duplicate task IDs (e.g., task_a='x', task_b='x') +- **8.40% rejected at line 157**: task_c appeared in other_deps list +- And so on... + +#### Is This a Problem? + +**Usually no!** Here's when to worry: + +| Rejection Rate | Status | Action | +|----------------|--------|--------| +| < 50% | βœ… Good | No action needed | +| 50-80% | ⚠️ Acceptable | Consider optimization if slow | +| 80-95% | ⚠️ Inefficient | Should optimize strategies | +| > 95% | ❌ Problem | Must optimize - Hypothesis will warn | + +#### When to Optimize + +Optimize if you see: +- **Very high rejection rate** (>90%) - Hypothesis struggles to find valid inputs +- **Slow generation** (>5 seconds) - Too much time wasted on invalid examples +- **Warning messages** - Hypothesis explicitly warns about health check failures + +### Optimization Strategies + +#### Before: Using `assume()` (Less Efficient) +```python +@given(task_a=task_ids, task_b=task_ids, task_c=task_ids) +def test_something(self, task_a, task_b, task_c): + assume(len({task_a, task_b, task_c}) == 3) # Rejects duplicates + # Test logic... +``` +**Problem**: Generates many duplicate combinations that get rejected + +#### After: Using Specific Strategies (More Efficient) +```python +@given(tasks=st.lists(task_ids, min_size=3, max_size=3, unique=True)) +def test_something(self, tasks): + task_a, task_b, task_c = tasks # Guaranteed to be different + # Test logic... +``` +**Benefit**: Only generates valid inputs, no rejections! + +### Real-World Example Analysis + +Let's analyze the example output: + +``` +100 passing examples, 0 failing examples, 138 invalid examples +``` + +**Calculations:** +- Total attempts: 100 + 138 = 238 +- Rejection rate: 138/238 = 58% +- Success rate: 100/238 = 42% +- Time per attempt: 0.10s / 238 = 0.0004s (very fast!) + +**Verdict:** βœ… Acceptable +- Got all 100 passing examples needed +- Only took 0.10 seconds despite 58% rejection rate +- No optimization needed unless you want to show best practices + +### Quick Reference: What's Good vs Bad + +#### βœ… Good Signs +- 100+ passing examples +- 0 failing examples (unless you're testing for bugs) +- Generation time < 1 second +- Runtime per test < 1ms +- Rejection rate < 50% + +#### ⚠️ Warning Signs +- Rejection rate > 80% +- Generation time > 5 seconds +- Hypothesis warnings about health checks +- Very few passing examples despite long runtime + +#### ❌ Problem Signs +- Rejection rate > 95% +- Generation time > 30 seconds +- Hypothesis errors or test timeouts +- "Unable to satisfy assumptions" errors + +### Stopping Conditions + +``` +Stopped because settings.max_examples=100 +``` + +Hypothesis stops when it reaches one of these conditions: +- **max_examples reached**: Successfully ran target number of valid tests (default: 100) +- **Deadline exceeded**: Took too long (default: 200ms per test) +- **Health check failed**: Too many invalid examples (>90% rejection) +- **Bug found**: Failing example discovered + +### Advanced: Configuring Hypothesis + +You can adjust settings in `pytest.ini` or per-test: + +```python +from hypothesis import settings + +@settings(max_examples=1000) # Run 1000 examples instead of 100 +@given(task_id=task_ids) +def test_something(self, task_id): + ... +``` + +Common settings: +- `max_examples`: Number of valid test cases to run (default: 100) +- `deadline`: Max time per test case (default: 200ms) +- `suppress_health_check`: Disable specific health checks (use carefully!) + +### Summary Checklist + +When reviewing Hypothesis statistics, check: + +- [ ] **Passing examples**: Did you get 100+ valid tests? +- [ ] **Failing examples**: Are there 0 (or expected failures)? +- [ ] **Generation time**: Is it < 1 second? +- [ ] **Rejection rate**: Is it < 80%? +- [ ] **Runtime**: Is each test < 1ms? +- [ ] **Warnings**: Are there any Hypothesis warnings? + +If all checks pass, your property-based tests are working efficiently! πŸŽ‰ + + +## When to Use Property-Based Testing + +### Best Use Cases for PBT + +Property-based testing shines in specific scenarios. Here's a practical guide: + +#### βœ… Excellent Use Cases + +**1. Pure Business Logic / Domain Logic** +```python +# Perfect for PBT - pure functions with clear invariants +def has_circular_dependency(task_id, dependency_id, graph): + # No external dependencies, deterministic, testable properties + ... +``` + +**Why it works:** +- Pure functions with no side effects +- Clear mathematical properties (reflexivity, transitivity, symmetry) +- Input/output relationships that should hold universally +- No dependency on specific data values + +**Examples:** +- Validation logic (email, phone, data formats) +- State machines (valid transitions) +- Data transformations (parsing, serialization) +- Algorithm correctness (sorting, searching, graph algorithms) +- Business rules (pricing, discounts, eligibility) + +**2. Parsers and Serializers** +```python +@given(data=st.dictionaries(st.text(), st.integers())) +def test_json_roundtrip(data): + # Property: serialize then deserialize should equal original + assert json.loads(json.dumps(data)) == data +``` + +**3. Data Structure Invariants** +```python +@given(items=st.lists(st.integers())) +def test_sorted_list_invariant(items): + sorted_items = sorted(items) + # Property: each element <= next element + for i in range(len(sorted_items) - 1): + assert sorted_items[i] <= sorted_items[i + 1] +``` + +**4. Encoding/Decoding** +```python +@given(text=st.text()) +def test_base64_roundtrip(text): + # Property: encode then decode should equal original + encoded = base64.b64encode(text.encode()) + decoded = base64.b64decode(encoded).decode() + assert decoded == text +``` + +#### ⚠️ Limited Use Cases + +**1. Integration Tests with External Systems** + +**Problem:** Random data doesn't work with real external systems +```python +# ❌ BAD: Can't use random event sources with real EventBridge +@given(source=st.text(), detail_type=st.text()) +def test_eventbridge_integration(source, detail_type): + # EventBridge test harness requires source='TEST-...' + # Random data won't match test harness expectations + publisher.publish_event(source, detail_type) +``` + +**Solution:** Use traditional example-based tests for integration +```python +# βœ… GOOD: Specific data for integration tests +def test_eventbridge_integration(): + # Test harness expects specific format + publisher.publish_event( + source='TEST-cns427-task-api', + detail_type='TEST-TaskCreated' + ) +``` + +**When to use PBT for integration:** +- Testing error handling with various invalid inputs +- Testing retry logic with different failure scenarios +- Testing idempotency with repeated operations + +**2. End-to-End Tests** + +**Problem:** E2E tests require specific data flows and state +```python +# ❌ BAD: E2E requires specific user journey +@given(user_action=st.text()) +def test_user_workflow(user_action): + # Can't test "user logs in β†’ creates task β†’ completes task" + # with random actions +``` + +**Solution:** Use traditional E2E tests with specific scenarios +```python +# βœ… GOOD: Specific user journey +def test_complete_task_workflow(): + user = login_user("test@example.com") + task = create_task("Complete report") + complete_task(task.id) + assert task.status == "completed" +``` + +**3. Tests Requiring Specific Data** + +**Problem:** Some tests need exact data values +```python +# ❌ BAD: Test requires specific test harness data +@given(task_id=st.text()) +def test_task_appears_in_test_harness(task_id): + # Test harness only captures tasks with title starting with "TEST-" + # Random task_id won't work +``` + +**Solution:** Use example-based tests +```python +# βœ… GOOD: Specific test data +def test_task_appears_in_test_harness(): + task_id = f"TEST-{uuid.uuid4()}" + create_task(task_id, title=f"TEST-{task_id}") + # Now test harness will capture it +``` + +#### 🎯 Decision Matrix + +| Test Type | Use PBT? | Reason | +|-----------|----------|--------| +| **Pure business logic** | βœ… Yes | Clear properties, no dependencies | +| **Validation functions** | βœ… Yes | Universal rules, deterministic | +| **Data transformations** | βœ… Yes | Roundtrip properties, invariants | +| **Algorithm correctness** | βœ… Yes | Mathematical properties | +| **Unit tests (no mocks)** | βœ… Yes | Fast, isolated, deterministic | +| **Integration tests** | ⚠️ Limited | Use for error cases, not happy path | +| **E2E tests** | ❌ No | Requires specific data flows | +| **Tests with external APIs** | ❌ No | Can't use random data with real systems | +| **Tests requiring specific state** | ❌ No | Need controlled test data | +| **UI tests** | ❌ No | Requires specific user interactions | + +### Hybrid Approach: Best of Both Worlds + +**Recommended Strategy:** + +```python +# 1. Property-based tests for business logic +@given(task_id=st.text(min_size=1), dependency_id=st.text(min_size=1)) +def test_circular_dependency_properties(task_id, dependency_id): + # Test universal properties + ... + +# 2. Example-based tests for integration +def test_eventbridge_integration_with_test_harness(): + # Use specific TEST- prefixed data + event = TaskCreatedEvent( + task, + source='TEST-cns427-task-api', + detail_type_prefix='TEST-' + ) + ... + +# 3. Example-based tests for E2E +def test_complete_user_workflow(): + # Specific user journey with known data + ... +``` + +## Performance and CI/CD Considerations + +### Question: Should PBT Run on Every Commit? + +**Short Answer:** It depends on your test suite organization and performance requirements. + +### Performance Comparison + +Let's compare actual performance from this project: + +```bash +# Traditional unit tests (no mocks) +poetry run pytest tests/unit/test_domain_logic.py -v +# Result: 14 tests in 0.06s (0.004s per test) + +# Property-based tests +poetry run pytest tests/property_based/ -v +# Result: 33 tests in 3.43s (0.104s per test) +``` + +**Analysis:** +- **Traditional tests:** 0.004s per test +- **Property-based tests:** 0.104s per test (26x slower) +- **But:** PBT runs 100 examples per test vs 1 example in traditional + +**Per-example comparison:** +- Traditional: 0.004s for 1 example +- PBT: 0.104s for 100 examples = 0.001s per example (actually faster!) + +### CI/CD Strategies + +#### Strategy 1: Run Everything on Every Commit (Small Projects) + +```yaml +# .github/workflows/test.yml +- name: Run all tests + run: | + poetry run pytest tests/ -v +``` + +**When to use:** +- Small test suites (< 5 minutes total) +- Fast CI runners +- High confidence requirements + +**Pros:** +- Maximum confidence +- Catch bugs early +- Simple configuration + +**Cons:** +- Slower CI pipeline +- May slow down development + +#### Strategy 2: Separate Fast and Slow Tests (Recommended) + +```yaml +# .github/workflows/test.yml +jobs: + fast-tests: + runs-on: ubuntu-latest + steps: + - name: Run unit tests + run: poetry run pytest tests/unit/ -v + + property-tests: + runs-on: ubuntu-latest + steps: + - name: Run property-based tests + run: poetry run pytest tests/property_based/ -v + + integration-tests: + runs-on: ubuntu-latest + steps: + - name: Run integration tests + run: poetry run pytest tests/integration/ -v +``` + +**When to use:** +- Medium to large projects +- Want fast feedback on unit tests +- Can parallelize test execution + +**Pros:** +- Fast feedback from unit tests +- Parallel execution saves time +- Can fail fast on unit test failures + +**Cons:** +- More complex CI configuration +- Need to manage multiple jobs + +#### Strategy 3: Tiered Testing (Large Projects) + +```yaml +# Run on every commit +on: [push] +jobs: + unit-tests: + run: poetry run pytest tests/unit/ -v + +# Run on PR +on: [pull_request] +jobs: + property-tests: + run: poetry run pytest tests/property_based/ -v + +# Run on merge to main +on: + push: + branches: [main] +jobs: + all-tests: + run: poetry run pytest tests/ -v +``` + +**When to use:** +- Large projects with slow test suites +- High commit frequency +- Want to optimize developer experience + +**Pros:** +- Very fast feedback on commits +- Comprehensive testing before merge +- Optimized for developer productivity + +**Cons:** +- Most complex configuration +- Bugs might not be caught until PR +- Requires discipline + +#### Strategy 4: Reduced Examples for CI (Pragmatic) + +```python +# conftest.py +import os +from hypothesis import settings + +# Use fewer examples in CI for faster feedback +if os.getenv('CI'): + settings.register_profile('ci', max_examples=20) + settings.load_profile('ci') +else: + settings.register_profile('dev', max_examples=100) + settings.load_profile('dev') +``` + +```yaml +# .github/workflows/test.yml +- name: Run property-based tests (reduced) + env: + CI: true + run: poetry run pytest tests/property_based/ -v + # Runs 20 examples instead of 100 (5x faster) + +# Nightly comprehensive run +on: + schedule: + - cron: '0 0 * * *' +jobs: + comprehensive-tests: + run: poetry run pytest tests/property_based/ -v + # Runs full 100 examples +``` + +**When to use:** +- Want PBT on every commit but faster +- Can accept slightly less coverage in CI +- Run comprehensive tests nightly + +**Pros:** +- Balance between speed and coverage +- Still get PBT benefits +- Comprehensive testing happens regularly + +**Cons:** +- Might miss rare edge cases in CI +- Need to maintain two configurations + +### Recommended Approach for This Project + +Based on the performance numbers: + +```yaml +# .github/workflows/test.yml +name: Tests + +on: [push, pull_request] + +jobs: + # Fast feedback (< 1 second) + unit-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run unit tests + run: poetry run pytest tests/unit/ -v + # 14 tests in 0.06s + + # Medium speed (< 5 seconds) + property-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run property-based tests + run: poetry run pytest tests/property_based/ -v + # 33 tests in 3.43s + + # Slower (depends on AWS) + integration-tests: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Run integration tests + run: poetry run pytest tests/integration/ -v + # Variable timing +``` + +**Why this works:** +- All tests run in parallel +- Fast feedback from unit tests (< 1s) +- Property tests complete quickly (< 5s) +- Total wall time β‰ˆ slowest job (integration tests) + +### Performance Optimization Tips + +**1. Use Hypothesis Profiles** +```python +# conftest.py +from hypothesis import settings + +settings.register_profile('ci', max_examples=20, deadline=None) +settings.register_profile('dev', max_examples=100, deadline=None) +settings.register_profile('thorough', max_examples=1000, deadline=None) +``` + +**2. Mark Slow Tests** +```python +import pytest + +@pytest.mark.slow +@given(tasks=st.lists(task_ids, min_size=10, max_size=100)) +def test_very_large_graphs(tasks): + ... +``` + +```bash +# Run fast tests only +poetry run pytest -m "not slow" + +# Run all tests +poetry run pytest +``` + +**3. Use Pytest-xdist for Parallelization** +```bash +poetry add --group dev pytest-xdist + +# Run tests in parallel +poetry run pytest -n auto tests/property_based/ +``` + +### Summary: When to Run PBT + +| Scenario | Run PBT? | Configuration | +|----------|----------|---------------| +| **Local development** | βœ… Yes | Full 100 examples | +| **Pre-commit hook** | ⚠️ Optional | 20 examples (fast) | +| **Every commit (CI)** | βœ… Yes | 20-50 examples | +| **Pull request** | βœ… Yes | 100 examples | +| **Merge to main** | βœ… Yes | 100 examples | +| **Nightly builds** | βœ… Yes | 1000 examples (thorough) | +| **Release builds** | βœ… Yes | 1000+ examples | + +### Bottom Line + +**For this project:** +- βœ… Run PBT on every commit (only 3.43s) +- βœ… Run in parallel with other tests +- βœ… Use for pure business logic (circular dependency detection) +- ❌ Don't use for integration tests requiring specific data +- ⚠️ Consider reduced examples (20-50) for faster CI feedback + +**The 3.43s runtime is acceptable for CI/CD**, especially when run in parallel with other test suites. The comprehensive coverage (3,300+ test cases) is worth the extra time. + + +## Debugging Invalid Examples + +### What Causes Invalid Examples? + +Invalid examples occur when Hypothesis generates data that doesn't meet your test's constraints. Common causes: + +1. **Strategy constraints** - The strategy itself rejects some values +2. **assume() statements** - Your test explicitly rejects certain inputs +3. **UUID collisions** - Random UUIDs occasionally collide in complex data structures + +### Example: Why test_self_dependency_returns_boolean_true Has 7 Invalid Examples + +```bash +poetry run pytest tests/property_based/test_self_dependency_properties.py::TestSelfDependencyProperties::test_self_dependency_returns_boolean_true --hypothesis-show-statistics +``` + +Output shows: +``` +100 passing examples, 0 failing examples, 7 invalid examples +``` + +**Why?** The test generates: +- A `task_id` (UUID) +- A `graph` (dictionary with UUID keys and UUID list values) + +Occasionally, the randomly generated `task_id` matches a key in the randomly generated `graph`. This is rare (~1-2% probability) but happens due to the birthday paradox when generating many UUIDs. + +### How to See What Data Was Rejected + +#### Method 1: Run the Debug Test + +We've created a special debug test that shows exactly what gets rejected: + +```bash +poetry run pytest tests/property_based/test_show_rejections.py::test_demonstrate_uuid_collision_probability -v -s +``` + +This will show: +- Total number of collisions +- Collision rate percentage +- Example collisions with actual UUIDs +- Explanation of why it happens + +Example output: +``` +UUID COLLISION PROBABILITY DEMONSTRATION + +Results: + Total generations: 1000 + Collisions found: 17 + Collision rate: 1.70% + +Example collisions: + Example 1: + task_id: c2a7af9e-ab79-b005-6dd1-77d2c700d84c + graph_size: 7 + collision_type: task_id is a key in graph +``` + +#### Method 2: Use Hypothesis Verbosity + +Add verbosity to any test to see all generated examples: + +```python +from hypothesis import settings, Verbosity + +@settings(verbosity=Verbosity.verbose, max_examples=10) +@given(task_id=task_ids, graph=dependency_graphs) +def test_something(self, task_id, graph): + # Your test code + ... +``` + +Run with `-s` flag to see output: +```bash +poetry run pytest tests/property_based/test_your_test.py -v -s +``` + +#### Method 3: Use event() to Track Patterns + +Add `event()` calls to track what's happening: + +```python +from hypothesis import event + +@given(task_id=task_ids, graph=dependency_graphs) +def test_something(self, task_id, graph): + if task_id in graph: + event("task_id IS in graph") + else: + event("task_id NOT in graph") + + # Your test code + ... +``` + +Run with `--hypothesis-show-statistics` to see event distribution: +```bash +poetry run pytest tests/property_based/test_your_test.py --hypothesis-show-statistics +``` + +Output shows: +``` +Events: + * 95%, task_id NOT in graph + * 5%, task_id IS in graph +``` + +### Understanding the Numbers + +For `test_self_dependency_returns_boolean_true`: +- **100 passing examples** - Successfully tested 100 valid cases βœ… +- **0 failing examples** - No bugs found βœ… +- **7 invalid examples** - 7 UUID collisions occurred (normal) + +**Calculation:** +- Total attempts: 100 + 7 = 107 +- Success rate: 100/107 = 93.5% +- Rejection rate: 7/107 = 6.5% + +**Is this good?** Yes! A 6.5% rejection rate is excellent and has minimal performance impact. + +### When to Worry About Invalid Examples + +| Rejection Rate | Status | Action | +|----------------|--------|--------| +| 0-10% | βœ… Excellent | No action needed | +| 10-30% | βœ… Good | No action needed | +| 30-50% | ⚠️ Acceptable | Consider optimization if slow | +| 50-80% | ⚠️ Inefficient | Should optimize | +| 80%+ | ❌ Problem | Must optimize | + +### Why UUID Collisions Happen (Birthday Paradox) + +When generating many random UUIDs, collisions become more likely: + +- **1 UUID**: 0% chance of collision +- **10 UUIDs**: ~0.00000001% chance +- **100 UUIDs**: ~0.000001% chance +- **1000 UUIDs**: ~0.0001% chance + +In our tests: +- We generate 1 task_id +- We generate a graph with 0-20 keys (UUIDs) +- Each key has 0-10 values (UUIDs) +- Total: up to ~200 UUIDs per test +- Result: ~1-2% collision rate (matches our observations!) + +This is the **birthday paradox** in action - with enough random values, collisions become probable even though each individual collision is unlikely. + +### Reducing Invalid Examples (If Needed) + +If you want to reduce invalid examples, you can: + +#### Option 1: Use More Specific Strategies + +Instead of generating independently and filtering: +```python +# Less efficient - generates then filters +@given(task_id=task_ids, graph=dependency_graphs) +def test_something(task_id, graph): + assume(task_id not in graph) # Causes rejections +``` + +Generate with constraints built-in: +```python +# More efficient - only generates valid data +@given(data=st.data()) +def test_something(data): + task_id = data.draw(task_ids) + # Generate graph that doesn't include task_id + graph = data.draw(st.dictionaries( + keys=task_ids.filter(lambda x: x != task_id), + values=st.lists(task_ids.filter(lambda x: x != task_id)) + )) +``` + +#### Option 2: Accept the Rejections + +For low rejection rates (< 10%), it's often better to accept them: +- Simpler code +- Easier to understand +- Minimal performance impact +- More realistic data generation + +**Our approach:** We accept the small number of UUID collisions because: +- Rejection rate is very low (1-7%) +- Code is simpler without complex filtering +- Performance impact is negligible +- Collisions are realistic (can happen in production too!) + +### Summary + +**Invalid examples are normal and expected** when using property-based testing. They occur due to: +1. Random data generation (UUID collisions) +2. Strategy constraints +3. Test assumptions + +**For our tests:** +- 0-7 invalid examples per test is excellent +- Rejection rate of 0-7% is very good +- No optimization needed +- Tests run fast (< 0.3s per test) + +**To debug invalid examples:** +1. Run `test_show_rejections.py` to see collision examples +2. Use `--hypothesis-show-statistics` to see rejection rates +3. Add `event()` calls to track patterns +4. Use `verbosity=Verbosity.verbose` to see all examples + +The small number of invalid examples demonstrates that our UUID strategy is working well! diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/__init__.py new file mode 100644 index 00000000..4e6d4cb4 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/__init__.py @@ -0,0 +1,7 @@ +""" +Property-Based Tests for Circular Dependency Detection + +This package contains property-based tests generated from requirements +using Hypothesis library. These tests verify invariants and properties +that should always hold true for the circular dependency detection logic. +""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/test_complex_cycle_properties.py b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/test_complex_cycle_properties.py new file mode 100644 index 00000000..5e18257c --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/property_based/test_complex_cycle_properties.py @@ -0,0 +1,183 @@ +""" +Property-Based Tests: Complex Circular Dependency Detection + +Generated from Requirement 3: Complex Circular Dependency Detection +Tests cycles of any length (3+ tasks). +""" +import pytest +from hypothesis import given, assume +from hypothesis import strategies as st +from services.task_service.domain.business_rules import has_circular_dependency + + +# Strategy for generating valid task IDs (matching production: str(uuid.uuid4())) +task_ids = st.uuids().map(str) + + +class TestComplexCircularDependencyProperties: + """Property-based tests for complex circular dependencies (Requirement 3).""" + + @given(task_a=task_ids, task_b=task_ids, task_c=task_ids) + def test_three_task_cycle(self, task_a, task_b, task_c): + """ + Property: Three-task cycles are detected (Aβ†’Bβ†’Cβ†’A). + + Requirement 3.1: WHEN a dependency chain exists from Task A through + multiple tasks back to Task A, THE System SHALL identify this as + a circular dependency. + """ + assume(len({task_a, task_b, task_c}) == 3) # All different + + # Create cycle: task_aβ†’task_bβ†’task_c + graph = { + task_a: [task_b], + task_b: [task_c] + } + + # Check if task_cβ†’task_a would complete the cycle + result = has_circular_dependency(task_c, task_a, graph) + + assert result is True, f"Three-task cycle {task_a}β†’{task_b}β†’{task_c}β†’{task_a} should be detected" + + @given(tasks=st.lists(task_ids, min_size=4, max_size=10, unique=True)) + def test_long_chain_cycle(self, tasks): + """ + Property: Cycles of any length are detected. + + Requirement 3.2: THE System SHALL traverse the entire dependency graph + to detect cycles of any length. + """ + assume(len(tasks) >= 4) + + # Create chain: tasks[0]β†’tasks[1]β†’tasks[2]β†’...β†’tasks[n-1] + graph = {} + for i in range(len(tasks) - 1): + graph[tasks[i]] = [tasks[i + 1]] + + # Check if tasks[n-1]β†’tasks[0] would complete the cycle + result = has_circular_dependency(tasks[-1], tasks[0], graph) + + assert result is True, f"Cycle of length {len(tasks)} should be detected" + + @given(task_a=task_ids, task_b=task_ids, task_c=task_ids, task_d=task_ids) + def test_four_task_cycle(self, task_a, task_b, task_c, task_d): + """ + Property: Four-task cycles are detected (Aβ†’Bβ†’Cβ†’Dβ†’A). + + Requirement 3.4: THE System SHALL handle dependency chains of three + or more tasks. + """ + assume(len({task_a, task_b, task_c, task_d}) == 4) # All different + + # Create cycle: task_aβ†’task_bβ†’task_cβ†’task_d + graph = { + task_a: [task_b], + task_b: [task_c], + task_c: [task_d] + } + + # Check if task_dβ†’task_a would complete the cycle + result = has_circular_dependency(task_d, task_a, graph) + + assert result is True, f"Four-task cycle should be detected" + + @given(tasks=st.lists(task_ids, min_size=3, max_size=8, unique=True)) + def test_cycle_detection_uses_dfs(self, tasks): + """ + Property: Cycle detection traverses the graph (DFS behavior). + + Requirement 3.3: THE System SHALL use depth-first search to efficiently + detect cycles in the dependency graph. + """ + assume(len(tasks) >= 3) + + # Create a chain + graph = {} + for i in range(len(tasks) - 1): + graph[tasks[i]] = [tasks[i + 1]] + + # Adding back-edge creates cycle + result_with_cycle = has_circular_dependency(tasks[-1], tasks[0], graph) + + # Not adding back-edge means no cycle + result_without_cycle = has_circular_dependency(tasks[-1], tasks[-1] + "_new", graph) + + assert result_with_cycle is True, "Should detect cycle when back-edge exists" + assert result_without_cycle is False, "Should not detect cycle without back-edge" + + @given( + main_chain=st.lists(task_ids, min_size=3, max_size=5, unique=True), + branch_chain=st.lists(task_ids, min_size=2, max_size=4, unique=True), + branch_point=st.integers(min_value=0, max_value=10) + ) + def test_cycle_in_graph_with_branches(self, main_chain, branch_chain, branch_point): + """ + Property: Cycles are detected when nodes have multiple dependencies (branches). + + Requirement 3.2: THE System SHALL traverse the entire dependency graph. + + Tests a graph where a node has multiple dependencies, requiring DFS to + explore multiple paths to find the cycle. + + Example structure: + main_chain[0] β†’ [main_chain[1], branch_chain[0]] (branches to two paths) + main_chain[1] β†’ main_chain[2] + branch_chain[0] β†’ branch_chain[1] + main_chain[2] β†’ main_chain[0] (creates cycle back to start) + + Visual: + β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” + ↓ β”‚ + main[0] | + / \\ | + / \\ | + main[1] branch[0] | + | | | + main[2] branch[1] | + β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ + """ + assume(len(main_chain) >= 3) + assume(len(branch_chain) >= 2) + assume(not set(main_chain).intersection(set(branch_chain))) + + graph = {} + + # Create main chain + for i in range(len(main_chain) - 1): + graph[main_chain[i]] = [main_chain[i + 1]] + + # Create branch chain + for i in range(len(branch_chain) - 1): + graph[branch_chain[i]] = [branch_chain[i + 1]] + + # Pick random point in main chain to add the branch + branch_idx = branch_point % len(main_chain) + existing_deps = graph.get(main_chain[branch_idx], []) + graph[main_chain[branch_idx]] = existing_deps + [branch_chain[0]] + + # Target end of main chain to beginning so we are forced to traverse branch + target = main_chain[0] + + # Check if completing cycle on main chain returns True + result = has_circular_dependency(main_chain[-1], target, graph) + + assert result is True, "Cycle should be detected in graph with branches" + + @given(tasks=st.lists(task_ids, min_size=5, max_size=10, unique=True)) + def test_partial_cycle_not_detected(self, tasks): + """ + Property: Partial chains that don't loop back are not cycles. + + Requirement 3.1: Only when chain loops back to starting task. + """ + assume(len(tasks) >= 5) + + # Create chain: tasks[0]β†’tasks[1]β†’tasks[2]β†’tasks[3] + graph = {} + for i in range(3): + graph[tasks[i]] = [tasks[i + 1]] + + # Check if tasks[3]β†’tasks[4] creates cycle (it shouldn't - doesn't loop back) + result = has_circular_dependency(tasks[3], tasks[4], graph) + + assert result is False, "Partial chain without loop back should not be circular" diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/__init__.py new file mode 100644 index 00000000..b3e1d11a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/__init__.py @@ -0,0 +1 @@ +"""Shared test utilities for unit and integration tests.""" diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/__init__.py new file mode 100644 index 00000000..bcdc7a54 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/__init__.py @@ -0,0 +1,7 @@ +"""Shared fake implementations for testing.""" + +from .in_memory_event_publisher import InMemoryEventPublisher +from .in_memory_notification_service import InMemoryNotificationService +from .in_memory_task_repository import InMemoryTaskRepository + +__all__ = ['InMemoryEventPublisher', 'InMemoryTaskRepository', 'InMemoryNotificationService'] diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_event_publisher.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_event_publisher.py new file mode 100644 index 00000000..b33f9eea --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_event_publisher.py @@ -0,0 +1,46 @@ +"""In-memory fake implementation of EventPublisher for all test types.""" + +from typing import List + +from task_api.models.interfaces import EventPublisher +from task_api.models.task import TaskEvent + + +class InMemoryEventPublisher(EventPublisher): + """ + In-memory implementation of EventPublisher for testing. + + This fake captures events in memory instead of publishing to EventBridge, + making it suitable for both unit tests (complete isolation) and integration + tests (real DynamoDB + fake EventBridge). + """ + + def __init__(self): + """Initialize with empty event storage.""" + self.published_events: List[TaskEvent] = [] + + def publish_event(self, event: TaskEvent) -> None: + """Store event in memory instead of publishing to EventBridge.""" + self.published_events.append(event) + + def clear(self) -> None: + """Clear all published events (useful for test cleanup).""" + self.published_events.clear() + + def count(self) -> int: + """Get total number of published events.""" + return len(self.published_events) + + def get_events_by_type(self, event_type) -> List[TaskEvent]: + """Get events filtered by type.""" + return [event for event in self.published_events if event.event_type == event_type] + + def get_latest_event(self) -> TaskEvent: + """Get the most recently published event.""" + if not self.published_events: + raise ValueError('No events have been published') + return self.published_events[-1] + + def has_event_with_task_id(self, task_id: str) -> bool: + """Check if any event exists for the given task ID.""" + return any(event.task_id == task_id for event in self.published_events) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_notification_service.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_notification_service.py new file mode 100644 index 00000000..1cbb4418 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_notification_service.py @@ -0,0 +1,70 @@ +"""In-memory fake implementation of NotificationService for all test types.""" + +from typing import List + +from task_api.models.task import TaskEvent, TaskEventType + + +class InMemoryNotificationService: + """ + In-memory implementation of NotificationService for testing. + + This fake captures processed events in memory instead of logging, + making it suitable for unit tests that need to verify notification + processing behavior. + """ + + def __init__(self): + """Initialize with empty notification storage.""" + self.processed_events: List[TaskEvent] = [] + self.created_notifications: List[TaskEvent] = [] + self.updated_notifications: List[TaskEvent] = [] + self.deleted_notifications: List[TaskEvent] = [] + + def process_task_event(self, task_event: TaskEvent) -> None: + """Process a task event and store notification instead of logging.""" + self.processed_events.append(task_event) + + if task_event.event_type == TaskEventType.TASK_CREATED: + self._handle_task_created(task_event) + elif task_event.event_type == TaskEventType.TASK_UPDATED: + self._handle_task_updated(task_event) + elif task_event.event_type == TaskEventType.TASK_DELETED: + self._handle_task_deleted(task_event) + + def _handle_task_created(self, task_event: TaskEvent) -> None: + """Handle task created notification by storing it.""" + self.created_notifications.append(task_event) + + def _handle_task_updated(self, task_event: TaskEvent) -> None: + """Handle task updated notification by storing it.""" + self.updated_notifications.append(task_event) + + def _handle_task_deleted(self, task_event: TaskEvent) -> None: + """Handle task deleted notification by storing it.""" + self.deleted_notifications.append(task_event) + + def clear(self) -> None: + """Clear all stored notifications (useful for test cleanup).""" + self.processed_events.clear() + self.created_notifications.clear() + self.updated_notifications.clear() + self.deleted_notifications.clear() + + def count_total(self) -> int: + """Get total number of processed events.""" + return len(self.processed_events) + + def count_by_type(self, event_type: TaskEventType) -> int: + """Get count of events by type.""" + return len([e for e in self.processed_events if e.event_type == event_type]) + + def get_latest_event(self) -> TaskEvent: + """Get the most recently processed event.""" + if not self.processed_events: + raise ValueError('No events have been processed') + return self.processed_events[-1] + + def has_event_with_task_id(self, task_id: str) -> bool: + """Check if any event exists for the given task ID.""" + return any(event.task_id == task_id for event in self.processed_events) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_task_repository.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_task_repository.py new file mode 100644 index 00000000..6bf324c8 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/fakes/in_memory_task_repository.py @@ -0,0 +1,102 @@ +"""In-memory fake implementation of TaskRepository for all test types.""" + +from typing import Dict, List, Optional + +from task_api.models.interfaces import TaskRepository +from task_api.models.task import Task + + +class InMemoryTaskRepository(TaskRepository): + """ + In-memory implementation of TaskRepository for testing. + + This fake stores tasks in memory instead of DynamoDB, making it suitable + for unit tests that need complete isolation from external dependencies. + """ + + def __init__(self): + """Initialize with empty task storage.""" + self._tasks: Dict[str, Task] = {} + + def create_task(self, task: Task) -> Task: + """Create a new task in memory.""" + if task.task_id in self._tasks: + raise ValueError(f'Task already exists: {task.task_id}') + + self._tasks[task.task_id] = task + return task + + def get_task(self, task_id: str) -> Optional[Task]: + """Retrieve a task by ID from memory.""" + return self._tasks.get(task_id) + + def list_tasks(self, limit: int = 50, next_token: Optional[str] = None) -> tuple[List[Task], Optional[str]]: + """List tasks with pagination from memory.""" + tasks = list(self._tasks.values()) + + # Simple pagination simulation + start_index = 0 + if next_token: + try: + start_index = int(next_token) + except (ValueError, TypeError): + start_index = 0 + + end_index = start_index + limit + page_tasks = tasks[start_index:end_index] + + # Generate next token if there are more tasks + next_page_token = None + if end_index < len(tasks): + next_page_token = str(end_index) + + return page_tasks, next_page_token + + def update_task(self, task: Task, expected_version: int) -> Task: + """Update an existing task with optimistic locking.""" + existing_task = self._tasks.get(task.task_id) + if existing_task is None: + raise ValueError(f'Task not found: {task.task_id}') + + # Check version for optimistic locking + if existing_task.version != expected_version: + from services.task_service.domain.exceptions import ConflictError + + raise ConflictError(f'Version conflict for task: {task.task_id}', current_task=existing_task.model_dump()) + + self._tasks[task.task_id] = task + return task + + def delete_task(self, task_id: str, version: int) -> None: + """Delete a task with version check.""" + existing_task = self._tasks.get(task_id) + if existing_task is None: + raise ValueError(f'Task not found: {task_id}') + + # Check version for optimistic locking + if existing_task.version != version: + raise ValueError(f'Version conflict for task: {task_id}') + + del self._tasks[task_id] + + def clear(self) -> None: + """Clear all tasks (useful for test cleanup).""" + self._tasks.clear() + + def count(self) -> int: + """Get total number of tasks.""" + return len(self._tasks) + + def get_all_tasks(self) -> List[Task]: + """Get all tasks (useful for test verification).""" + return list(self._tasks.values()) + + def simulate_failure(self, should_fail: bool = True, message: str = 'Simulated repository failure'): + """Configure the fake to simulate failures for error testing.""" + self.should_fail = should_fail + self.failure_message = message + + def _check_failure(self): + """Check if failure should be simulated.""" + if getattr(self, 'should_fail', False): + raise RuntimeError(getattr(self, 'failure_message', 'Simulated repository failure')) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/__init__.py new file mode 100644 index 00000000..1f7402ae --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/__init__.py @@ -0,0 +1,56 @@ +"""Shared test helpers for dependency injection and test setup.""" + +from .api_gateway_helpers import ( + create_api_gateway_event, + create_invalid_json_event, + create_invalid_method_event, + create_missing_body_event, + create_task_creation_event, + create_task_delete_event, + create_task_get_event, + create_task_list_event, + create_task_update_event, +) +from .dependency_injection import domain_unit_dependencies, dynamodb_integration_dependencies, eventbridge_integration_dependencies +from .eventbridge_helpers import ( + check_test_infrastructure, + cleanup_test_events, + create_eventbridge_event, + create_test_infrastructure_summary, + extract_test_run_id_from_correlation_id, + format_event_summary, + generate_test_run_id, + verify_event_structure, + verify_task_event_data, + wait_for_event_with_retries, + wait_for_test_events, +) + +__all__ = [ + # Dependency injection + 'dynamodb_integration_dependencies', + 'eventbridge_integration_dependencies', + 'domain_unit_dependencies', + # API Gateway helpers + 'create_api_gateway_event', + 'create_task_creation_event', + 'create_task_update_event', + 'create_task_get_event', + 'create_task_delete_event', + 'create_task_list_event', + 'create_invalid_method_event', + 'create_invalid_json_event', + 'create_missing_body_event', + # EventBridge helpers + 'generate_test_run_id', + 'wait_for_test_events', + 'cleanup_test_events', + 'verify_event_structure', + 'verify_task_event_data', + 'check_test_infrastructure', + 'create_test_infrastructure_summary', + 'wait_for_event_with_retries', + 'extract_test_run_id_from_correlation_id', + 'format_event_summary', + 'create_eventbridge_event', +] diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/api_gateway_helpers.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/api_gateway_helpers.py new file mode 100644 index 00000000..37849b8a --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/api_gateway_helpers.py @@ -0,0 +1,115 @@ +""" +API Gateway Test Helpers + +Utility functions for creating API Gateway events for both unit and integration testing. +These helpers create realistic API Gateway events that can be used across all test types. +""" + +import json +from typing import Any, Dict, Optional + + +def create_api_gateway_event( + method: str = 'GET', + path: str = '/tasks', + body: Optional[Dict] = None, + query_params: Optional[Dict] = None, + path_params: Optional[Dict] = None, + request_id: str = 'test-request-123', +) -> Dict[str, Any]: + """ + Create a realistic API Gateway event for testing. + + Args: + method: HTTP method (GET, POST, PUT, DELETE) + path: Request path + body: Request body as dictionary + query_params: Query string parameters + path_params: Path parameters + request_id: Request ID for tracing (useful for test isolation) + + Returns: + API Gateway event dictionary + """ + return { + 'httpMethod': method, + 'path': path, + 'pathParameters': path_params or {}, + 'queryStringParameters': query_params or {}, + 'body': json.dumps(body) if body else None, + 'headers': {'Content-Type': 'application/json', 'Accept': 'application/json'}, + 'requestContext': { + 'requestId': request_id, + 'stage': 'test', + 'accountId': '123456789012', + 'resourceId': 'test-resource', + 'httpMethod': method, + 'resourcePath': path, + 'protocol': 'HTTP/1.1', + 'requestTime': '09/Apr/2015:12:34:56 +0000', + 'requestTimeEpoch': 1428582896000, + 'identity': {'sourceIp': '127.0.0.1', 'userAgent': 'Custom User Agent String'}, + }, + 'isBase64Encoded': False, + } + + +def create_task_creation_event( + title: str, priority: str = 'medium', description: Optional[str] = None, request_id: str = 'test-create-123' +) -> Dict[str, Any]: + """Create API Gateway event for task creation.""" + body = {'title': title, 'priority': priority} + if description: + body['description'] = description + + return create_api_gateway_event(method='POST', path='/tasks', body=body, request_id=request_id) + + +def create_task_update_event( + task_id: str, title: str, priority: str = 'medium', status: Optional[str] = None, version: int = 1, request_id: str = 'test-update-123' +) -> Dict[str, Any]: + """Create API Gateway event for task update.""" + body = {'title': title, 'priority': priority, 'version': version} + if status: + body['status'] = status + + return create_api_gateway_event(method='PUT', path=f'/tasks/{task_id}', path_params={'task_id': task_id}, body=body, request_id=request_id) + + +def create_task_get_event(task_id: str, request_id: str = 'test-get-123') -> Dict[str, Any]: + """Create API Gateway event for getting a task.""" + return create_api_gateway_event(method='GET', path=f'/tasks/{task_id}', path_params={'task_id': task_id}, request_id=request_id) + + +def create_task_delete_event(task_id: str, request_id: str = 'test-delete-123') -> Dict[str, Any]: + """Create API Gateway event for task deletion.""" + return create_api_gateway_event(method='DELETE', path=f'/tasks/{task_id}', path_params={'task_id': task_id}, request_id=request_id) + + +def create_task_list_event(limit: Optional[int] = None, next_token: Optional[str] = None, request_id: str = 'test-list-123') -> Dict[str, Any]: + """Create API Gateway event for listing tasks.""" + query_params = {} + if limit: + query_params['limit'] = str(limit) + if next_token: + query_params['next_token'] = next_token + + return create_api_gateway_event(method='GET', path='/tasks', query_params=query_params if query_params else None, request_id=request_id) + + +def create_invalid_method_event(request_id: str = 'test-invalid-123') -> Dict[str, Any]: + """Create API Gateway event with unsupported HTTP method.""" + return create_api_gateway_event(method='PATCH', path='/tasks', request_id=request_id) + + +def create_invalid_json_event(request_id: str = 'test-invalid-json-123') -> Dict[str, Any]: + """Create API Gateway event with invalid JSON body.""" + event = create_api_gateway_event(method='POST', path='/tasks', request_id=request_id) + # Override body with invalid JSON + event['body'] = 'invalid json content' + return event + + +def create_missing_body_event(request_id: str = 'test-missing-body-123') -> Dict[str, Any]: + """Create API Gateway event with missing body for POST request.""" + return create_api_gateway_event(method='POST', path='/tasks', body=None, request_id=request_id) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/dependency_injection.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/dependency_injection.py new file mode 100644 index 00000000..721336f4 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/dependency_injection.py @@ -0,0 +1,123 @@ +"""Dependency injection helpers for different test scenarios.""" + +from contextlib import contextmanager + +from task_api.domain.task_service import TaskService +from task_api.integration.factory import create_task_repository + +from tests.shared.fakes import InMemoryEventPublisher, InMemoryTaskRepository + + +@contextmanager +def dynamodb_integration_dependencies(): + """ + Context manager for DynamoDB integration tests with mixed dependencies: + - Real DynamoDB repository (tests actual data persistence) + - Fake EventBridge publisher (avoids external calls, enables verification) + + Usage: + with dynamodb_integration_dependencies() as event_publisher: + response = lambda_handler(event, context) + # Verify real DynamoDB persistence + # Verify captured events in event_publisher + """ + # Create real repository (uses actual DynamoDB table) + real_repository = create_task_repository() + + # Create fake publisher (captures events in memory) + fake_publisher = InMemoryEventPublisher() + + # Create TaskService with mixed dependencies + mixed_service = TaskService(real_repository, fake_publisher) + + # Import handler module + import task_api.handlers.task_handler as handler_module + + # Store original service + original_service = getattr(handler_module, 'task_service', None) + + try: + # Inject mixed service directly into handler + handler_module.task_service = mixed_service + + yield fake_publisher # Return publisher for event verification + finally: + # Restore original state for test isolation + handler_module.task_service = original_service + + +@contextmanager +def eventbridge_integration_dependencies(): + """ + Context manager for EventBridge integration tests with mixed dependencies: + - Fake DynamoDB repository (avoids database dependencies) + - Real EventBridge publisher (tests actual event publishing) + + Usage: + with eventbridge_integration_dependencies() as (repository, test_run_id): + response = lambda_handler(event, context) + # Verify fake repository state + # Verify real EventBridge events using test_run_id + """ + import os + import uuid + from unittest.mock import patch + + # Generate unique test run ID for event isolation + test_run_id = f'test-{uuid.uuid4()}' + + # Create fake repository (avoids DynamoDB dependencies) + fake_repository = InMemoryTaskRepository() + + # Create real publisher (tests actual EventBridge publishing) + from task_api.integration.factory import create_event_publisher + + real_publisher = create_event_publisher() + + # Create TaskService with mixed dependencies + mixed_service = TaskService(fake_repository, real_publisher) + + # Import handler module + import task_api.handlers.task_handler as handler_module + + # Store original service + original_service = getattr(handler_module, 'task_service', None) + + # Test configuration + TEST_EVENT_BUS_NAME = os.environ.get('TEST_EVENT_BUS_NAME', 'cns427-task-api-test-bus') + AWS_REGION = os.environ.get('AWS_DEFAULT_REGION', 'us-east-1') + + try: + # Set environment for test isolation + with patch.dict(os.environ, {'EVENT_BUS_NAME': TEST_EVENT_BUS_NAME, 'AWS_DEFAULT_REGION': AWS_REGION}): + # Inject mixed service directly into handler + handler_module.task_service = mixed_service + + yield fake_repository, test_run_id + finally: + # Restore original state for test isolation + handler_module.task_service = original_service + + +@contextmanager +def domain_unit_dependencies(): + """ + Context manager for domain unit tests with all fake dependencies: + - Fake task repository (complete isolation from DynamoDB) + - Fake event publisher (complete isolation from EventBridge) + + Usage: + with domain_unit_dependencies() as (repository, publisher): + service = TaskService(repository, publisher) + # Test pure business logic + """ + # Create all fake dependencies + fake_repository = InMemoryTaskRepository() + fake_publisher = InMemoryEventPublisher() + + try: + yield fake_repository, fake_publisher + finally: + # Clean up for test isolation + fake_repository.clear() + fake_publisher.clear() diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/eventbridge_helpers.py b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/eventbridge_helpers.py new file mode 100644 index 00000000..89a3e075 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/shared/helpers/eventbridge_helpers.py @@ -0,0 +1,322 @@ +""" +EventBridge Test Helpers + +Utility functions for EventBridge testing including event verification, +cleanup, and infrastructure management. These helpers can be used across +both unit and integration tests. +""" + +import json +import time +import uuid +from typing import Any, Dict, List, Optional + +import boto3 +from boto3.dynamodb.conditions import Key +from botocore.exceptions import ClientError + + +def generate_test_run_id() -> str: + """Generate a unique test run ID for event isolation.""" + return f'test-{uuid.uuid4()}' + + +def wait_for_test_events(test_run_id: str, expected_count: int, timeout: int = 30, table_name: str = 'cns427-task-api-test-results') -> List[Dict]: + """ + Poll test results table for expected events with timeout. + + Args: + test_run_id: Unique test run identifier + expected_count: Number of events to wait for + timeout: Maximum time to wait in seconds + table_name: DynamoDB table name for test results + + Returns: + List of captured events sorted by timestamp + + Raises: + TimeoutError: If expected events not received within timeout + """ + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb.Table(table_name) + + start_time = time.time() + events = [] + + while time.time() - start_time < timeout: + try: + response = table.query( + KeyConditionExpression=Key('test_run_id').eq(test_run_id), + ScanIndexForward=False, # Latest events first + ) + + events = response.get('Items', []) + if len(events) >= expected_count: + return sorted(events, key=lambda x: x['event_timestamp']) + + except ClientError as e: + # Table might not exist yet or other transient errors + print(f'Warning: Error querying test results table: {e}') + except Exception as e: + print(f'Unexpected error querying test results: {e}') + + time.sleep(1) + + raise TimeoutError(f"Expected {expected_count} events for test_run_id '{test_run_id}', got {len(events)} after {timeout}s") + + +def cleanup_test_events(test_run_id: str, table_name: str = 'cns427-task-api-test-results') -> int: + """ + Clean up test events from results table. + + Args: + test_run_id: Unique test run identifier + table_name: DynamoDB table name for test results + + Returns: + Number of events cleaned up + """ + try: + dynamodb = boto3.resource('dynamodb', region_name='us-east-1') + table = dynamodb.Table(table_name) + + # Query all events for this test run + response = table.query(KeyConditionExpression=Key('test_run_id').eq(test_run_id)) + + events = response.get('Items', []) + + # Delete each event + deleted_count = 0 + for item in events: + try: + table.delete_item(Key={'test_run_id': item['test_run_id'], 'event_timestamp': item['event_timestamp']}) + deleted_count += 1 + except Exception as e: + print(f'Warning: Failed to delete event {item["event_timestamp"]}: {e}') + + return deleted_count + + except Exception as e: + print(f'Warning: Failed to cleanup test events for {test_run_id}: {e}') + return 0 + + +def verify_event_structure(event_data: Dict, expected_event_type: str) -> bool: + """ + Verify that captured event has expected structure and content. + + Args: + event_data: Parsed event data from test results + expected_event_type: Expected event type (e.g., 'TaskCreated') + + Returns: + True if event structure is valid + """ + required_fields = ['event_type', 'task_id', 'correlation_id'] + + # Check required fields + for field in required_fields: + if field not in event_data: + print(f'Missing required field: {field}') + return False + + # Check event type + if event_data['event_type'] != expected_event_type: + print(f"Expected event_type '{expected_event_type}', got '{event_data['event_type']}'") + return False + + # Check correlation_id format (should start with TEST-) + correlation_id = event_data.get('correlation_id', '') + if not correlation_id.startswith('TEST-'): + print(f'Invalid correlation_id format: {correlation_id}') + return False + + return True + + +def verify_task_event_data(event_data: Dict, expected_task_data: Dict) -> bool: + """ + Verify that task event contains expected task data. + + Args: + event_data: Parsed event data from test results + expected_task_data: Expected task data fields + + Returns: + True if task data matches expectations + """ + task_data = event_data.get('task_data', {}) + + for key, expected_value in expected_task_data.items(): + if key not in task_data: + print(f'Missing task data field: {key}') + return False + + if task_data[key] != expected_value: + print(f"Task data mismatch for {key}: expected '{expected_value}', got '{task_data[key]}'") + return False + + return True + + +def check_test_infrastructure() -> Dict[str, bool]: + """ + Check if test infrastructure is properly deployed. + + Returns: + Dictionary with infrastructure component status + """ + status = {'test_results_table': False, 'eventbridge_rule': False, 'test_subscriber_lambda': False} + + try: + # Check DynamoDB table + dynamodb = boto3.client('dynamodb', region_name='us-east-1') + try: + dynamodb.describe_table(TableName='cns427-task-api-test-results') + status['test_results_table'] = True + except ClientError as e: + if e.response['Error']['Code'] != 'ResourceNotFoundException': + print(f'Error checking test results table: {e}') + + # Check EventBridge rule + events = boto3.client('events', region_name='us-east-1') + try: + response = events.list_rules(NamePrefix='cns427-task-api-test') + if response.get('Rules'): + status['eventbridge_rule'] = True + except Exception as e: + print(f'Error checking EventBridge rule: {e}') + + # Check Lambda function + lambda_client = boto3.client('lambda', region_name='us-east-1') + try: + lambda_client.get_function(FunctionName='cns427-task-api-test-subscriber') + status['test_subscriber_lambda'] = True + except ClientError as e: + if e.response['Error']['Code'] != 'ResourceNotFoundException': + print(f'Error checking test subscriber Lambda: {e}') + + except Exception as e: + print(f'Error checking test infrastructure: {e}') + + return status + + +def create_test_infrastructure_summary() -> str: + """ + Create a summary of test infrastructure status for debugging. + + Returns: + Formatted string with infrastructure status + """ + status = check_test_infrastructure() + + summary = 'EventBridge Test Infrastructure Status:\n' + summary += f' βœ“ Test Results Table: {"βœ“" if status["test_results_table"] else "βœ—"}\n' + summary += f' βœ“ EventBridge Rule: {"βœ“" if status["eventbridge_rule"] else "βœ—"}\n' + summary += f' βœ“ Test Subscriber Lambda: {"βœ“" if status["test_subscriber_lambda"] else "βœ—"}\n' + + if not all(status.values()): + summary += '\nTo deploy test infrastructure:\n' + summary += ' poetry run cdk deploy --context environment=test\n' + + return summary + + +def wait_for_event_with_retries(test_run_id: str, expected_count: int, max_retries: int = 3, base_timeout: int = 30) -> List[Dict]: + """ + Wait for events with exponential backoff retries. + + Args: + test_run_id: Unique test run identifier + expected_count: Number of events to wait for + max_retries: Maximum number of retry attempts + base_timeout: Base timeout for each attempt + + Returns: + List of captured events + + Raises: + TimeoutError: If events not received after all retries + """ + for attempt in range(max_retries): + timeout = base_timeout * (2**attempt) # Exponential backoff + + try: + return wait_for_test_events(test_run_id, expected_count, timeout) + except TimeoutError as e: + if attempt == max_retries - 1: + # Last attempt, re-raise the error + raise e + + print(f'Attempt {attempt + 1} failed, retrying with longer timeout...') + time.sleep(2) # Brief pause between retries + + # This should never be reached due to the re-raise above + raise TimeoutError(f'Failed to receive events after {max_retries} attempts') + + +def extract_test_run_id_from_correlation_id(correlation_id: str) -> Optional[str]: + """ + Extract test run ID from correlation ID. + + Args: + correlation_id: Correlation ID in format TEST-{test_run_id}-{original_id} + + Returns: + Test run ID or None if format is invalid + """ + if not correlation_id.startswith('TEST-'): + return None + + parts = correlation_id.split('-', 2) + if len(parts) < 2: + return None + + return parts[1] + + +def format_event_summary(events: List[Dict]) -> str: + """ + Format a summary of captured events for debugging. + + Args: + events: List of captured events + + Returns: + Formatted string with event summary + """ + if not events: + return 'No events captured' + + summary = f'Captured {len(events)} events:\n' + + for i, event in enumerate(events, 1): + event_data = json.loads(event['event_data']) + summary += f' {i}. {event_data["event_type"]} - Task: {event_data["task_id"]}\n' + summary += f' Timestamp: {event["event_timestamp"]}\n' + + if 'task_data' in event_data: + task_data = event_data['task_data'] + summary += f' Title: {task_data.get("title", "N/A")}\n' + summary += f' Priority: {task_data.get("priority", "N/A")}\n' + + return summary + + +def create_eventbridge_event( + source: str = 'task-api', detail_type: str = 'Task Event', detail: Dict = None, event_bus_name: str = 'default' +) -> Dict[str, Any]: + """ + Create an EventBridge event for testing. + + Args: + source: Event source + detail_type: Event detail type + detail: Event detail payload + event_bus_name: EventBridge bus name + + Returns: + EventBridge event dictionary + """ + return {'Source': source, 'DetailType': detail_type, 'Detail': json.dumps(detail or {}), 'EventBusName': event_bus_name, 'Time': time.time()} diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/README.md b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/README.md new file mode 100644 index 00000000..a15c12ca --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/README.md @@ -0,0 +1,822 @@ +# Unit Tests + +This directory contains unit tests that test individual components in isolation using **in-memory fakes** instead of mocks. This approach provides realistic behavior while maintaining fast execution and easy debugging. + +## Overview + +Unit tests validate individual components without external dependencies: +- **Domain Logic** - Business rules and validation +- **Lambda Handlers** - Request/response processing +- **Data Models** - Validation and serialization +- **Event Contracts** - Schema compliance and consumer requirements +- **In-Memory Fakes** - Realistic behavior without AWS services + +## Test Philosophy + +### In-Memory Fakes Over Mocks +- **Real Implementations**: Use actual business logic with in-memory storage +- **No Mock Setup**: Avoid complex mock configuration and verification +- **Realistic Behavior**: Fakes behave like real dependencies +- **Easy Debugging**: Real data flow makes issues easier to trace + +## Test Structure + +``` +tests/unit/ +β”œβ”€β”€ conftest.py # Shared fixtures +β”œβ”€β”€ domain/ # Domain logic tests +β”‚ β”œβ”€β”€ test_business_rules.py # Business rule validation +β”‚ β”œβ”€β”€ test_notification_service.py # Notification domain logic +β”‚ └── test_task_service.py # Task domain service +β”œβ”€β”€ handlers/ # Lambda handler tests +β”‚ β”œβ”€β”€ test_notification_handler.py # Event processing handler +β”‚ └── test_task_handler.py # API request handler +β”œβ”€β”€ models/ # Data model tests +β”‚ β”œβ”€β”€ test_event_contracts.py # Event schema validation +β”‚ └── test_task_model.py # Task domain model +└── test_helpers.py # Test utility functions +``` + +## Test Categories + +### 1. Domain Tests (`domain/`) + +#### Business Rules (`test_business_rules.py`) +**Purpose**: Test business rule validation in isolation + +**Coverage**: +- βœ… Circular dependency detection +- βœ… Status transition validation +- βœ… Priority validation +- βœ… Dependency chain validation +- βœ… Business constraint enforcement + +#### Task Service (`test_task_service.py`) +**Purpose**: Test core domain service logic + +**Coverage**: +- βœ… CRUD operations +- βœ… Business rule enforcement +- βœ… Event publishing +- βœ… Error handling +- βœ… Data transformations + +#### Notification Service (`test_notification_service.py`) +**Purpose**: Test notification domain logic + +**Coverage**: +- βœ… Event processing +- βœ… Notification formatting +- βœ… Error handling +- βœ… Event type routing + +### 2. Handler Tests (`handlers/`) + +#### Task Handler (`test_task_handler.py`) +**Purpose**: Test Lambda handler with realistic API Gateway events + +**Approach**: +- Use real `lambda_handler` function with actual API Gateway event structure +- Inject in-memory fakes for repository and event publisher +- Test complete request β†’ domain β†’ response flow + +**Coverage**: +- βœ… HTTP status codes (201, 200, 204, 400, 404, 500) +- βœ… Request parsing and validation +- βœ… Response formatting +- βœ… Error handling +- βœ… Data persistence verification +- βœ… Event publishing verification + +**Example Test**: +```python +def test_create_task_returns_201(self, task_service, lambda_context): + # GIVEN realistic API Gateway event + event = create_api_gateway_event( + method="POST", + path="/tasks", + body={"title": "Test Task", "priority": "high"} + ) + + # WHEN processing through real handler + with patch('services.task_service.handler.task_service', task_service): + response = lambda_handler(event, lambda_context) + + # THEN verify HTTP response + assert response["statusCode"] == 201 + body = json.loads(response["body"]) + assert body["title"] == "Test Task" + assert body["priority"] == "high" +``` + +#### Notification Handler (`test_notification_handler.py`) +**Purpose**: Test event processing handler + +**Coverage**: +- βœ… EventBridge event parsing +- βœ… Notification processing logic +- βœ… Error handling +- βœ… Event type routing + +### 3. Model Tests (`models/`) + +#### Task Model (`test_task_model.py`) +**Purpose**: Test task domain model and validation + +**Coverage**: +- βœ… Model creation and validation +- βœ… Field constraints and validation rules +- βœ… Serialization/deserialization +- βœ… Model methods and properties +- βœ… Status and priority enums +- βœ… Dependency management + +#### Event Contracts (`test_event_contracts.py`) +**Purpose**: Test event schema compliance and consumer requirements + +**What are Consumer-Driven Contract Tests?** + +Consumer-driven contract tests are owned by the **consumer** (subscriber) of events, not the producer (publisher). They validate that: +1. Events contain all fields the consumer needs +2. Field types and formats match consumer expectations +3. EventBridge rule patterns correctly route events +4. Schema changes don't break downstream consumers + +**When to Use Contract Tests:** + +Contract tests become critical in **event-driven architectures** with: +- **Multiple teams** managing different publishers and subscribers +- **Unknown subscribers** where the publisher doesn't know all consumers +- **Rapidly evolving schemas** where producers change events frequently +- **Loose coupling** where consumers may not use all event fields + +**When NOT to Use Contract Tests:** + +For this Task API application, contract tests are **less critical** because: +- **Single team ownership**: Task service and notification service owned by same team +- **Known consumers**: Publisher knows all subscribers +- **Coordinated changes**: Schema changes can be synchronized across services +- **Business impact**: Breaking changes have low business impact + +In real-world scenarios, evaluate: +- **Team boundaries**: Different teams = higher value for contract tests +- **Business impact**: Critical consumers = invest in contract tests +- **Change frequency**: Rapid evolution = contract tests prevent breakage +- **Coupling**: Tight coupling = less need for contract tests + +**Running Contract Tests in CI/CD:** + +Contract tests should run in the **producer's pipeline** to catch breaking changes before deployment: + +```yaml +# Producer pipeline (Task Service) +- name: Run Contract Tests + run: poetry run pytest tests/unit/models/test_event_contracts.py -v + +- name: Validate EventBridge Rule Patterns + run: poetry run pytest tests/unit/models/test_event_contracts.py::TestEventContracts::test_eventbridge_rule_pattern_matching -v +``` + +**Coverage**: +- βœ… Schema validation using Pydantic +- βœ… Consumer-specific field requirements +- βœ… EventBridge rule pattern matching +- βœ… Schema drift detection +- βœ… Breaking change prevention + +**Test Scenarios**: + +1. **Schema Validation**: Ensures events match expected structure + ```python + def test_schema_validation(self): + # Validates TaskCreated event has all required fields + # with correct types (string, enum, list, etc.) + ``` + +2. **Consumer Contracts**: Validates consumer-specific needs + ```python + def test_consumer_contract_notification_service(self): + # Ensures notification service gets task_id, title, status + ``` + +3. **Rule Pattern Matching**: Validates EventBridge routing + ```python + def test_eventbridge_rule_pattern_matching(self): + # Ensures events match rule patterns for correct routing + ``` + +4. **Contract Violation Detection**: Catches breaking changes + ```python + def test_contract_violation_detection(self): + # Detects missing fields, wrong types, invalid enums + ``` + +## In-Memory Fakes + +Located in `tests/shared/fakes/`, these provide realistic behavior without AWS dependencies. + +### `InMemoryTaskRepository` +Simulates DynamoDB operations with in-memory dictionary storage. + +**Features**: +- Full CRUD operations +- Optimistic locking with version checking +- Pagination simulation +- Error conditions (duplicates, version conflicts) +- Helper methods for test verification + +**Usage**: +```python +from tests.shared.fakes.in_memory_task_repository import InMemoryTaskRepository + +@pytest.fixture +def repository(): + return InMemoryTaskRepository() + +def test_create_task(self, repository): + task = Task(title="Test Task") + result = repository.create_task(task) + + assert result == task + assert len(repository.tasks) == 1 +``` + +### `InMemoryEventPublisher` +Simulates EventBridge publishing by capturing events in memory. + +**Features**: +- Captures published events in list +- Event filtering and verification methods +- No actual EventBridge calls +- Helper methods for assertions + +**Usage**: +```python +from tests.shared.fakes.in_memory_event_publisher import InMemoryEventPublisher + +@pytest.fixture +def publisher(): + return InMemoryEventPublisher() + +def test_event_publishing(self, publisher): + publisher.publish_event(event_data) + + assert len(publisher.events) == 1 + assert publisher.events[0]['DetailType'] == 'TaskCreated' +``` + +### `InMemoryNotificationService` +Simulates notification processing by storing notifications in memory. + +**Features**: +- Captures processed events and notifications +- Categorizes notifications by type +- Verification methods for test assertions + +**Usage**: +```python +from tests.shared.fakes.in_memory_notification_service import InMemoryNotificationService + +@pytest.fixture +def notification_service(): + return InMemoryNotificationService() + +def test_notification(self, notification_service): + notification_service.process_event(event) + + assert len(notification_service.notifications) == 1 +``` + +## Running Tests + +### All Unit Tests +```bash +# Using Make +make test-unit + +# Or using pytest directly +poetry run pytest tests/unit -v + +# Or using test script +poetry run python scripts/testing.py --type unit +``` + +### Specific Test Categories +```bash +# Domain tests +poetry run pytest tests/unit/domain -v + +# Handler tests +poetry run pytest tests/unit/handlers -v + +# Model tests +poetry run pytest tests/unit/models -v +``` + +### Specific Test Files +```bash +# Business rules +poetry run pytest tests/unit/domain/test_business_rules.py -v + +# Task service +poetry run pytest tests/unit/domain/test_task_service.py -v + +# Task handler +poetry run pytest tests/unit/handlers/test_task_handler.py -v + +# Event contracts +poetry run pytest tests/unit/models/test_event_contracts.py -v + +# Task model +poetry run pytest tests/unit/models/test_task_model.py -v +``` + +### Specific Test Methods +```bash +# Run specific test method +poetry run pytest tests/unit/models/test_event_contracts.py::TestEventContracts::test_schema_validation -v + +# Run tests matching pattern +poetry run pytest tests/unit -k "contract" -v +``` + +### With Coverage +```bash +# Generate coverage report +poetry run pytest tests/unit --cov=services --cov=shared --cov-report=html + +# View coverage report +open htmlcov/index.html + +# Show missing lines +poetry run pytest tests/unit --cov=services --cov=shared --cov-report=term-missing +``` + +### With Verbose Output +```bash +# See detailed test output +poetry run pytest tests/unit -v -s + +# Stop on first failure +poetry run pytest tests/unit -v -x +``` + +## Test Patterns + +### Test Structure +All tests follow the **Given/When/Then** pattern: + +```python +def test_behavior_description(self, fixtures): + # GIVEN - Setup test data and conditions + setup_data = create_test_data() + + # WHEN - Execute the behavior being tested + result = component_under_test.method(setup_data) + + # THEN - Assert expected outcomes + assert result.status == expected_status + assert result.data == expected_data + + # AND - Verify side effects (optional) + assert fake_dependency.was_called_correctly() +``` + +### Test Naming Convention +- **Files**: `test_{component_name}.py` +- **Classes**: `Test{ComponentName}` +- **Methods**: `test_{behavior}_returns_{expected_result}` + +**Examples**: +- `test_create_task_returns_201` +- `test_get_nonexistent_task_returns_404` +- `test_validation_error_returns_400` +- `test_update_task_with_version_conflict_raises_error` + +### Fixture Usage +```python +@pytest.fixture +def repository(): + """Create fresh in-memory repository for each test.""" + return InMemoryTaskRepository() + +@pytest.fixture +def publisher(): + """Create fresh in-memory publisher for each test.""" + return InMemoryEventPublisher() + +@pytest.fixture +def task_service(repository, publisher): + """Create task service with in-memory dependencies.""" + return TaskService(repository, publisher) +``` + +## Test Data Creation + +### Helper Functions +```python +def create_api_gateway_event( + method: str = "GET", + path: str = "/tasks", + body: Optional[Dict] = None, + query_params: Optional[Dict] = None, + path_params: Optional[Dict] = None +) -> Dict[str, Any]: + """Create realistic API Gateway event for testing.""" + return { + "httpMethod": method, + "path": path, + "pathParameters": path_params or {}, + "queryStringParameters": query_params or {}, + "body": json.dumps(body) if body else None, + "headers": {"Content-Type": "application/json"}, + "requestContext": {"requestId": "test-123", "stage": "test"} + } +``` + +### Test Data Factories +```python +def create_test_task(title: str = "Test Task", **kwargs) -> Task: + """Create a test task with default values.""" + defaults = { + "title": title, + "description": "Test description", + "priority": TaskPriority.MEDIUM, + "status": TaskStatus.PENDING + } + defaults.update(kwargs) + return Task(**defaults) +``` + +## Coverage Requirements + +### Minimum Coverage +- **Overall**: 80% minimum (enforced by pytest configuration) +- **Per File**: Aim for 90%+ coverage on critical components +- **Branches**: Include both positive and negative test cases + +### Coverage Report +```bash +# Generate detailed coverage report +poetry run pytest tests/unit --cov=task_api --cov-report=html --cov-report=term-missing + +# View missing lines +poetry run pytest tests/unit --cov=task_api --cov-report=term-missing +``` + +## Best Practices + +### 1. Test Independence +- Each test should be completely independent +- Use fresh fixtures for each test +- No shared state between tests +- Clean setup and teardown + +### 2. Clear Test Intent +- Use descriptive test names that explain the behavior +- Follow Given/When/Then structure +- Test one behavior per test method +- Clear assertions with meaningful error messages + +### 3. Comprehensive Testing +- Test both happy path and error scenarios +- Include edge cases and boundary conditions +- Verify side effects and state changes +- Test validation and error messages + +### 4. Realistic Test Data +- Use realistic data that represents actual usage +- Test with various data combinations +- Include both valid and invalid scenarios +- Use consistent test data creation patterns + +## Debugging Tests + +### Common Issues + +**Test Isolation Problems** +```python +# Problem: Shared state between tests +class TestExample: + shared_data = [] # DON'T DO THIS + + def test_one(self): + self.shared_data.append("item") + assert len(self.shared_data) == 1 + + def test_two(self): + # This might fail if test_one ran first + assert len(self.shared_data) == 0 + +# Solution: Use fixtures +@pytest.fixture +def fresh_data(): + return [] + +def test_one(fresh_data): + fresh_data.append("item") + assert len(fresh_data) == 1 +``` + +**Assertion Errors** +```python +# Problem: Unclear assertion errors +assert response == expected # Hard to debug when it fails + +# Solution: Specific assertions with messages +assert response["statusCode"] == 201, f"Expected 201, got {response['statusCode']}" +assert response["body"]["title"] == "Test Task" +``` + +### Debug Mode +```bash +# Run with verbose output and stop on first failure +poetry run pytest tests/unit -v -x + +# Run with pdb debugger on failures +poetry run pytest tests/unit --pdb + +# Run specific test with output +poetry run pytest tests/unit/test_task_handler.py::TestTaskHandler::test_create_task_returns_201 -v -s +``` + +## Performance + +### Test Execution Speed +- **Target**: < 1 second for all unit tests +- **Actual**: ~100-500ms typical execution time +- **Factors**: In-memory operations, no network calls, minimal setup + +### Optimization Tips +- Use fixtures efficiently (session, module, function scope) +- Avoid unnecessary test data creation +- Keep fakes simple and focused +- Run tests in parallel when possible + +## Troubleshooting + +### Common Issues + +**Import Errors** +``` +Error: ModuleNotFoundError: No module named 'services' +Solution: Ensure you're running from project root and dependencies are installed +``` + +```bash +# Install dependencies +poetry install + +# Run from project root +cd /path/to/project +poetry run pytest tests/unit +``` + +**Fixture Not Found** +``` +Error: fixture 'task_service' not found +Solution: Check conftest.py for fixture definitions +``` + +```bash +# Check available fixtures +poetry run pytest tests/unit --fixtures + +# Verify fixture scope and location +cat tests/unit/conftest.py +``` + +**Pydantic Validation Errors in Contract Tests** +``` +Error: ValidationError: 1 validation error for TaskDetailSchema +Solution: Event schema doesn't match contract - this is expected behavior for drift detection +``` + +This is intentional for `test_contract_violation_detection` - it validates that schema changes are caught. + +**Test Isolation Issues** +``` +Error: Test passes alone but fails when run with others +Solution: Ensure proper fixture cleanup and no shared state +``` + +```python +# Use fresh fixtures for each test +@pytest.fixture +def repository(): + return InMemoryTaskRepository() # New instance each time + +# Avoid class-level shared state +class TestExample: + shared_data = [] # DON'T DO THIS +``` + +### Debug Mode +```bash +# Run with verbose output and stop on first failure +poetry run pytest tests/unit -v -x + +# Run with pdb debugger on failures +poetry run pytest tests/unit --pdb + +# Run specific test with output +poetry run pytest tests/unit/models/test_event_contracts.py::TestEventContracts::test_schema_validation -v -s +``` + +### Performance Issues +```bash +# Check slow tests +poetry run pytest tests/unit --durations=10 + +# Run tests in parallel (if installed pytest-xdist) +poetry run pytest tests/unit -n auto +``` + +## Best Practices + +### 1. Test Independence +- Each test should be completely independent +- Use fresh fixtures for each test +- No shared state between tests +- Clean setup and teardown + +### 2. Clear Test Intent +- Use descriptive test names that explain the behavior +- Follow Given/When/Then structure +- Test one behavior per test method +- Clear assertions with meaningful error messages + +### 3. Comprehensive Testing +- Test both happy path and error scenarios +- Include edge cases and boundary conditions +- Verify side effects and state changes +- Test validation and error messages + +### 4. Realistic Test Data +- Use realistic data that represents actual usage +- Test with various data combinations +- Include both valid and invalid scenarios +- Use consistent test data creation patterns + +## Contributing + +### Adding New Tests +1. **Choose the right category**: domain, handlers, or models +2. **Follow naming conventions**: `test_{behavior}_returns_{expected_result}` +3. **Use appropriate fixtures**: Defined in `conftest.py` +4. **Include both positive and negative cases** +5. **Add docstrings** explaining test purpose +6. **Ensure test independence**: No shared state + +### Adding Event Contract Tests +When adding new event types: + +1. **Define Pydantic schema** in `test_event_contracts.py`: + ```python + class NewEventDetailSchema(BaseModel): + field1: str + field2: int + ``` + +2. **Update validation function** to handle new event type + +3. **Add schema validation test**: + ```python + def test_new_event_schema_validation(self): + # Test event matches schema + ``` + +4. **Add consumer contract test** if needed: + ```python + def test_consumer_contract_for_new_event(self): + # Test consumer-specific requirements + ``` + +5. **Update rule pattern test** if routing changes + +### Updating In-Memory Fakes +When updating fakes in `tests/shared/fakes/`: + +1. **Keep behavior consistent** with real AWS services +2. **Update all affected tests** +3. **Add tests for new fake functionality** +4. **Document any behavior differences** +5. **Maintain simplicity** - fakes should be simple and focused + +### Test Review Checklist +- [ ] Tests are independent and isolated +- [ ] Clear test names and documentation +- [ ] Both positive and negative cases covered +- [ ] Appropriate use of fixtures +- [ ] Realistic test data +- [ ] Clear assertions with good error messages +- [ ] No external dependencies +- [ ] Fast execution (< 1 second total) +- [ ] Contract tests run in producer pipeline (if applicable) + +## When Contract Tests Matter + +### High Value Scenarios +Contract tests provide significant value when: + +1. **Multiple Teams**: Different teams own publishers and subscribers + - Team A owns Task Service (publisher) + - Team B owns Analytics Service (subscriber) + - Team C owns Audit Service (subscriber) + - Contract tests prevent Team A from breaking Teams B and C + +2. **Unknown Subscribers**: Publisher doesn't know all consumers + - Event bus pattern with dynamic subscription + - Third-party integrations + - Future services not yet built + - Contract tests document what consumers can depend on + +3. **Rapid Evolution**: Producer changes events frequently + - Adding new fields (usually safe) + - Removing fields (breaking change) + - Changing field types (breaking change) + - Contract tests catch breaking changes before deployment + +4. **High Business Impact**: Breaking changes affect critical systems + - Payment processing + - Security auditing + - Compliance reporting + - Contract tests prevent production incidents + +### Low Value Scenarios +Contract tests provide less value when: + +1. **Single Team Ownership**: Same team owns publisher and subscribers + - This Task API application (task service + notification service) + - Changes can be coordinated across services + - Team knows all consumers and their requirements + +2. **Known Consumers**: Publisher knows all subscribers + - Direct service-to-service communication + - Tightly coupled services + - Small number of well-known consumers + +3. **Stable Schemas**: Events rarely change + - Mature APIs with stable contracts + - Backward compatibility maintained + - Versioned events + +4. **Low Business Impact**: Breaking changes have minimal consequences + - Internal development tools + - Non-critical notifications + - Easily recoverable failures + +### Decision Framework + +Ask these questions to decide if contract tests are worth the investment: + +1. **Who owns the services?** + - Same team β†’ Lower priority + - Different teams β†’ Higher priority + +2. **How many consumers?** + - 1-2 known consumers β†’ Lower priority + - Many or unknown consumers β†’ Higher priority + +3. **How often do schemas change?** + - Rarely β†’ Lower priority + - Frequently β†’ Higher priority + +4. **What's the business impact of breakage?** + - Low impact β†’ Lower priority + - High impact β†’ Higher priority + +5. **Can changes be coordinated?** + - Yes, easily β†’ Lower priority + - No, difficult β†’ Higher priority + +### Implementation Strategy + +If contract tests are valuable for your use case: + +1. **Consumer-owned tests**: Each consumer writes their own contract tests +2. **Run in producer pipeline**: Catch breaking changes before deployment +3. **Version events**: Use semantic versioning for event schemas +4. **Document contracts**: Clear documentation of what consumers can depend on +5. **Gradual rollout**: Add contract tests incrementally, starting with critical consumers + +## Performance + +Unit tests are designed for speed: + +- **Target**: < 1 second for all unit tests +- **Actual**: ~100-500ms typical execution time +- **Factors**: In-memory operations, no network calls, minimal setup + +### Optimization Tips +- Use fixtures efficiently (session, module, function scope) +- Avoid unnecessary test data creation +- Keep fakes simple and focused +- Run tests in parallel when possible + +## Next Steps + +- **[Integration Tests](../integration/README.md)** - Test with real AWS services +- **[E2E Tests](../e2e/README.md)** - Test complete workflows +- **[Testing Guide](../../docs/testing-guide.md)** - Overall testing strategy +- **[Architecture Guide](../../docs/architecture.md)** - Understand the system design \ No newline at end of file diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/conftest.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/conftest.py new file mode 100644 index 00000000..f134cd91 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/conftest.py @@ -0,0 +1,223 @@ +"""Unit test configuration - automatically disables socket connections.""" + +import pytest + + +@pytest.fixture(scope='session', autouse=True) +def disable_socket_for_unit_tests(): + """Automatically disable socket connections for all unit tests.""" + import pytest_socket + + pytest_socket.disable_socket() + yield + pytest_socket.enable_socket() + + +@pytest.fixture +def socket_disabled(): + """Fixture that indicates sockets are disabled. + + This fixture doesn't do anything itself - the actual socket disabling + is handled by the disable_socket_for_unit_tests fixture above. + This fixture is used in test signatures to make it explicit that + the test requires socket disabling. + """ + pass + + +"""Test configuration and fixtures.""" +from datetime import UTC, datetime + +import pytest + +from services.task_service.domain.exceptions import CircularDependencyError, ConflictError +from services.task_service.models.api import CreateTaskRequest, UpdateTaskRequest +from services.task_service.models.task import Task, TaskPriority, TaskStatus + + +class FakeTaskService: + """Fake TaskService for testing HTTP behavior.""" + + def __init__(self): + self.should_raise_circular_dependency = False + self.should_raise_conflict_error = False + self.should_raise_generic_error = False + self.should_raise_value_error = False + self.return_task = None + self.return_none = False + self.value_error_message = 'Task not found' + + def create_task(self, request: CreateTaskRequest) -> Task: + """Create a task from CreateTaskRequest.""" + if self.should_raise_generic_error: + raise Exception('Database connection failed') + + if self.should_raise_circular_dependency: + raise CircularDependencyError('Circular dependency detected') + + # Return fake task + task = Task( + task_id='test-task-123', + title=request.title, + description=request.description, + priority=request.priority, + dependencies=request.dependencies, + status=TaskStatus.PENDING, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ) + return task + + def get_task(self, task_id: str) -> Task: + """Get a task by ID.""" + if self.should_raise_generic_error: + raise Exception('Database connection failed') + + if self.should_raise_value_error: + raise ValueError(self.value_error_message) + + if self.return_none: + raise ValueError(f'Task not found: {task_id}') + + if self.return_task: + return self.return_task + + # Return fake task + task = Task( + task_id=task_id, + title='Retrieved Task', + description='Test task', + priority=TaskPriority.MEDIUM, + dependencies=[], + status=TaskStatus.PENDING, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ) + return task + + def list_tasks(self, limit: int = 50, next_token: str = None): + """List tasks with pagination.""" + tasks = [ + Task( + task_id='task-1', + title='Task 1', + description='First task', + priority=TaskPriority.MEDIUM, + dependencies=[], + status=TaskStatus.PENDING, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ), + Task( + task_id='task-2', + title='Task 2', + description='Second task', + priority=TaskPriority.HIGH, + dependencies=[], + status=TaskStatus.IN_PROGRESS, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ), + ] + return tasks, None # Return tasks and next_token + + def update_task(self, task_id: str, request: UpdateTaskRequest) -> Task: + """Update a task.""" + if self.should_raise_value_error: + raise ValueError(self.value_error_message) + + if self.should_raise_circular_dependency: + raise CircularDependencyError('Circular dependency detected') + + if self.should_raise_conflict_error: + now = datetime.now(UTC) + current_task_dict = { + 'task_id': task_id, + 'title': 'Current Task', + 'description': 'Current description', + 'priority': 'medium', + 'dependencies': [], + 'status': 'pending', + 'created_at': now.isoformat(), + 'updated_at': now.isoformat(), + 'version': 9876543210, + } + raise ConflictError('Task was modified by another process', current_task=current_task_dict) + + # Return updated task + task = Task( + task_id=task_id, + title=request.title or 'Updated Task', + description=request.description, + priority=request.priority or TaskPriority.MEDIUM, + dependencies=request.dependencies or [], + status=TaskStatus(request.status) if request.status else TaskStatus.PENDING, + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ) + return task + + def delete_task(self, task_id: str) -> None: + """Delete a task.""" + if self.should_raise_value_error: + raise ValueError(self.value_error_message) + + if self.return_none: + raise ValueError(f'Task not found: {task_id}') + + # Success - no return value + + +@pytest.fixture +def fake_task_service(): + """Create fake task service and inject it into handler.""" + fake_service = FakeTaskService() + + # Inject fake service into handler module + import services.task_service.handler as handler + + original_service = handler.task_service + handler.task_service = fake_service + + yield fake_service + + # Restore original service + handler.task_service = original_service + + +class FakeNotificationService: + """Fake NotificationService for testing HTTP behavior.""" + + def __init__(self): + self.should_raise_error = False + self.processed_events = [] + + def process_task_event(self, event_type: str, task_data: dict) -> None: + """Process a task event and track it.""" + if self.should_raise_error: + raise Exception('Notification processing failed') + + # Track processed events for verification + self.processed_events.append({'event_type': event_type, 'task_data': task_data}) + + +@pytest.fixture +def fake_notification_service(): + """Create fake notification service and inject it into handler.""" + fake_service = FakeNotificationService() + + # Inject fake service into handler module + import services.notification_service.handler as handler + + original_service = handler.notification_service + handler.notification_service = fake_service + + yield fake_service + + # Restore original service + handler.notification_service = original_service diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_business_rules.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_business_rules.py new file mode 100644 index 00000000..ce7bb2e6 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_business_rules.py @@ -0,0 +1,187 @@ +"""Unit tests for business rules - pure logic, no mocks needed. + +These tests verify the core business logic in isolation. +No external dependencies, no mocks, just pure functions and assertions. +""" + +import pytest + +from services.task_service.domain.business_rules import ( + can_transition_to, + compare_version_tokens, + has_circular_dependency, + is_valid_version_token, + validate_version_token_for_update, +) +from services.task_service.models.task import TaskStatus + + +class TestVersionTokenValidation: + """Test version token validation rules.""" + + def test_valid_version_token_positive_integer(self): + """Valid version tokens are positive integer strings.""" + assert is_valid_version_token('1234567890') is True + assert is_valid_version_token('1') is True + assert is_valid_version_token('999999999999') is True + + def test_invalid_version_token_zero(self): + """Zero is not a valid version token.""" + assert is_valid_version_token('0') is False + + def test_invalid_version_token_negative(self): + """Negative numbers are not valid version tokens.""" + assert is_valid_version_token('-1') is False + assert is_valid_version_token('-123') is False + + def test_invalid_version_token_empty(self): + """Empty string is not a valid version token.""" + assert is_valid_version_token('') is False + + def test_invalid_version_token_non_numeric(self): + """Non-numeric strings are not valid version tokens.""" + assert is_valid_version_token('abc') is False + assert is_valid_version_token('12.34') is False + assert is_valid_version_token('12a34') is False + + def test_invalid_version_token_none(self): + """None is not a valid version token.""" + assert is_valid_version_token(None) is False + + def test_compare_version_tokens_older(self): + """Older token should return -1.""" + assert compare_version_tokens('100', '200') == -1 + assert compare_version_tokens('1', '999') == -1 + + def test_compare_version_tokens_newer(self): + """Newer token should return 1.""" + assert compare_version_tokens('200', '100') == 1 + assert compare_version_tokens('999', '1') == 1 + + def test_compare_version_tokens_equal(self): + """Equal tokens should return 0.""" + assert compare_version_tokens('123', '123') == 0 + assert compare_version_tokens('1', '1') == 0 + + def test_compare_version_tokens_invalid_raises_error(self): + """Comparing invalid tokens should raise ValueError.""" + with pytest.raises(ValueError, match='Invalid version token'): + compare_version_tokens('invalid', '123') + + with pytest.raises(ValueError, match='Invalid version token'): + compare_version_tokens('123', 'invalid') + + def test_validate_version_token_for_update_valid(self): + """Valid version token should not raise error.""" + validate_version_token_for_update('123456') # Should not raise + + def test_validate_version_token_for_update_empty_raises_error(self): + """Empty version token should raise ValueError.""" + with pytest.raises(ValueError, match='required for update'): + validate_version_token_for_update('') + + def test_validate_version_token_for_update_invalid_format_raises_error(self): + """Invalid format should raise ValueError.""" + with pytest.raises(ValueError, match='Invalid version_token format'): + validate_version_token_for_update('invalid') + + with pytest.raises(ValueError, match='Invalid version_token format'): + validate_version_token_for_update('0') + + +class TestStatusTransitions: + """Test task status transition rules.""" + + def test_pending_to_in_progress_allowed(self): + """PENDING -> IN_PROGRESS is allowed.""" + assert can_transition_to(TaskStatus.PENDING, TaskStatus.IN_PROGRESS) is True + + def test_pending_to_completed_allowed(self): + """PENDING -> COMPLETED is allowed (skip in_progress).""" + assert can_transition_to(TaskStatus.PENDING, TaskStatus.COMPLETED) is True + + def test_pending_to_pending_not_allowed(self): + """PENDING -> PENDING is not allowed (no-op).""" + assert can_transition_to(TaskStatus.PENDING, TaskStatus.PENDING) is False + + def test_in_progress_to_completed_allowed(self): + """IN_PROGRESS -> COMPLETED is allowed.""" + assert can_transition_to(TaskStatus.IN_PROGRESS, TaskStatus.COMPLETED) is True + + def test_in_progress_to_pending_not_allowed(self): + """IN_PROGRESS -> PENDING is not allowed (can't go backwards).""" + assert can_transition_to(TaskStatus.IN_PROGRESS, TaskStatus.PENDING) is False + + def test_in_progress_to_in_progress_not_allowed(self): + """IN_PROGRESS -> IN_PROGRESS is not allowed (no-op).""" + assert can_transition_to(TaskStatus.IN_PROGRESS, TaskStatus.IN_PROGRESS) is False + + def test_completed_to_pending_allowed(self): + """COMPLETED -> PENDING is allowed (reopen task).""" + assert can_transition_to(TaskStatus.COMPLETED, TaskStatus.PENDING) is True + + def test_completed_to_in_progress_not_allowed(self): + """COMPLETED -> IN_PROGRESS is not allowed.""" + assert can_transition_to(TaskStatus.COMPLETED, TaskStatus.IN_PROGRESS) is False + + def test_completed_to_completed_not_allowed(self): + """COMPLETED -> COMPLETED is not allowed (no-op).""" + assert can_transition_to(TaskStatus.COMPLETED, TaskStatus.COMPLETED) is False + + +class TestCircularDependencies: + """Test circular dependency detection rules.""" + + def test_self_dependency_is_circular(self): + """Task depending on itself is circular.""" + assert has_circular_dependency('task-1', 'task-1', {}) is True + + def test_no_dependencies_not_circular(self): + """Task with no dependencies is not circular.""" + assert has_circular_dependency('task-1', 'task-2', {}) is False + + def test_simple_circular_dependency(self): + """Simple cycle: A -> B -> A.""" + dependencies = {'task-B': ['task-A']} + assert has_circular_dependency('task-A', 'task-B', dependencies) is True + + def test_three_way_circular_dependency(self): + """Three-way cycle: A -> B -> C -> A.""" + dependencies = {'task-B': ['task-C'], 'task-C': ['task-A']} + assert has_circular_dependency('task-A', 'task-B', dependencies) is True + + def test_complex_circular_dependency(self): + """Complex cycle: A -> B -> C -> D -> B.""" + dependencies = {'task-B': ['task-C'], 'task-C': ['task-D'], 'task-D': ['task-B']} + assert has_circular_dependency('task-A', 'task-B', dependencies) is True + + def test_linear_dependency_not_circular(self): + """Linear chain: A -> B -> C -> D (no cycle).""" + dependencies = {'task-B': ['task-C'], 'task-C': ['task-D'], 'task-D': []} + assert has_circular_dependency('task-A', 'task-B', dependencies) is False + + def test_diamond_dependency_not_circular(self): + """Diamond pattern: A -> B,C -> D (no cycle).""" + dependencies = {'task-B': ['task-D'], 'task-C': ['task-D'], 'task-D': []} + assert has_circular_dependency('task-A', 'task-B', dependencies) is False + assert has_circular_dependency('task-A', 'task-C', dependencies) is False + + def test_multiple_paths_one_circular(self): + """Multiple paths where one creates a cycle.""" + dependencies = { + 'task-B': ['task-C', 'task-D'], + 'task-C': ['task-E'], + 'task-D': ['task-A'], # This creates the cycle + 'task-E': [], + } + assert has_circular_dependency('task-A', 'task-B', dependencies) is True + + def test_deep_dependency_chain_not_circular(self): + """Deep chain without cycle.""" + dependencies = {'task-B': ['task-C'], 'task-C': ['task-D'], 'task-D': ['task-E'], 'task-E': ['task-F'], 'task-F': []} + assert has_circular_dependency('task-A', 'task-B', dependencies) is False + + def test_dependency_on_unrelated_task(self): + """Dependency on task not in the chain is not circular.""" + dependencies = {'task-B': ['task-X'], 'task-X': ['task-Y'], 'task-Y': []} + assert has_circular_dependency('task-A', 'task-B', dependencies) is False diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_notification_service.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_notification_service.py new file mode 100644 index 00000000..37f6c8eb --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_notification_service.py @@ -0,0 +1,92 @@ +"""Unit tests for notification service domain logic.""" + +import pytest + +from services.notification_service.domain.notification_service import NotificationService +from tests.unit.test_helpers import create_task_event_detail + + +class TestNotificationService: + """Unit tests for notification service pure business logic using fakes.""" + + @pytest.fixture(autouse=True) + def setup_env(self, monkeypatch): + """Set up environment variables for tests.""" + monkeypatch.setenv('POWERTOOLS_SERVICE_NAME', 'notification-service-test') + monkeypatch.setenv('LOG_LEVEL', 'INFO') + + @pytest.fixture + def notification_service(self): + """Create notification service instance.""" + return NotificationService() + + def test_process_task_created_event(self, notification_service, caplog): + """Test processing TaskCreated event logs correct information.""" + # GIVEN a TaskCreated event data + event_data = create_task_event_detail(task_id='test-task-123', title='Test Task', status='pending', priority='high') + + # WHEN processing the event + notification_service.process_task_event('TaskCreated', event_data) + + # THEN should log task created notification + assert 'Processing task event: TaskCreated' in caplog.text + assert 'Task created notification: Test Task' in caplog.text + # task_id is in structured logging extra fields, not in caplog.text + assert len(caplog.records) >= 2 + + def test_process_task_updated_event(self, notification_service, caplog): + """Test processing TaskUpdated event logs status change.""" + # GIVEN a TaskUpdated event data + event_data = create_task_event_detail(task_id='updated-task-456', title='Updated Task', status='completed', priority='medium') + + # WHEN processing the event + notification_service.process_task_event('TaskUpdated', event_data) + + # THEN should log task updated notification with status + assert 'Processing task event: TaskUpdated' in caplog.text + assert 'Task updated notification: Updated Task (status: completed)' in caplog.text + # task_id is in structured logging extra fields, not in caplog.text + assert len(caplog.records) >= 2 + + def test_process_task_deleted_event(self, notification_service, caplog): + """Test processing TaskDeleted event logs deletion.""" + # GIVEN a TaskDeleted event data (only task_id) + event_data = {'task_id': 'deleted-task-789'} + + # WHEN processing the event + notification_service.process_task_event('TaskDeleted', event_data) + + # THEN should log task deleted notification + assert 'Task deleted notification: deleted-task-789' in caplog.text + assert 'deleted-task-789' in caplog.text + + def test_process_unknown_event_type_ignored(self, notification_service, caplog): + """Test that unknown event types log a warning.""" + # GIVEN an event with unknown type + event_data = {'task_id': 'unknown-task-id'} + + # WHEN processing the unknown event + notification_service.process_task_event('UnknownEventType', event_data) + + # THEN should log a warning about unknown event type + assert 'Processing task event: UnknownEventType' in caplog.text + assert 'Unknown event type: UnknownEventType' in caplog.text + + def test_process_multiple_events_different_types(self, notification_service, caplog): + """Test processing multiple events of different types.""" + # GIVEN multiple events of different types + created_event_data = create_task_event_detail(task_id='task-1', title='Task 1', status='pending', priority='medium') + + updated_event_data = create_task_event_detail(task_id='task-2', title='Task 2', status='in_progress', priority='high') + + deleted_event_data = {'task_id': 'task-3'} + + # WHEN processing all events + notification_service.process_task_event('TaskCreated', created_event_data) + notification_service.process_task_event('TaskUpdated', updated_event_data) + notification_service.process_task_event('TaskDeleted', deleted_event_data) + + # THEN all should log appropriate notifications + assert 'Task created notification: Task 1' in caplog.text + assert 'Task updated notification: Task 2' in caplog.text + assert 'Task deleted notification: task-3' in caplog.text diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_task_service.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_task_service.py new file mode 100644 index 00000000..36ae2512 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/domain/test_task_service.py @@ -0,0 +1,275 @@ +"""Unit tests for task service business logic. + +Focus: Test actual business rules and behavior, not pass-through operations. +- Version conflict detection +- Circular dependency validation +- Status transition validation +- Pagination validation +- Error handling +- Repository/publisher interaction patterns +""" + +from datetime import UTC, datetime +from unittest.mock import Mock + +import pytest + +from services.task_service.domain.exceptions import CircularDependencyError, ConflictError +from services.task_service.domain.task_service import TaskService +from services.task_service.models.api import CreateTaskRequest, UpdateTaskRequest +from services.task_service.models.task import Task, TaskEventType, TaskPriority, TaskStatus + + +class TestTaskService: + """Unit tests for task service business logic.""" + + @pytest.fixture + def repository(self): + """Create mock repository for each test.""" + return Mock() + + @pytest.fixture + def publisher(self): + """Create mock publisher for each test.""" + return Mock() + + @pytest.fixture + def service(self, repository, publisher): + """Create task service with mocks.""" + return TaskService(repository, publisher) + + def test_create_task_calls_repository_and_publisher(self, service, repository, publisher): + """Test that create_task calls repository and publisher correctly.""" + # GIVEN a task creation request + request = CreateTaskRequest(title='Test Task', priority=TaskPriority.HIGH) + + # Mock repository to return a proper Task object + now = datetime.now(UTC) + created_task = Task( + task_id='test-id', + title='Test Task', + priority=TaskPriority.HIGH, + status=TaskStatus.PENDING, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.create_task.return_value = created_task + + # WHEN creating the task + service.create_task(request) + + # THEN repository should be called with a Task object + repository.create_task.assert_called_once() + passed_task = repository.create_task.call_args[0][0] + assert isinstance(passed_task, Task) + assert passed_task.title == 'Test Task' + assert passed_task.priority == TaskPriority.HIGH + assert passed_task.status == TaskStatus.PENDING + + # AND event should be published + publisher.publish_event.assert_called_once() + published_event = publisher.publish_event.call_args[0][0] + assert published_event.event_type == TaskEventType.TASK_CREATED + + def test_update_task_calls_repository_and_publisher(self, service, repository, publisher): + """Test that update_task calls repository and publisher correctly.""" + # GIVEN existing task + now = datetime.now(UTC) + int(now.timestamp() * 1000) + existing_task = Task( + task_id='test-id', + title='Original', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.get_task.return_value = existing_task + + # Mock update to return a proper Task object + updated_task = Task( + task_id='test-id', + title='Updated', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=datetime.now(UTC), + version=int(datetime.now(UTC).timestamp() * 1000), + ) + repository.update_task.return_value = updated_task + + # WHEN updating task + update_request = UpdateTaskRequest(title='Updated', version=existing_task.version) + service.update_task('test-id', update_request) + + # THEN repository should be called + repository.get_task.assert_called_once_with('test-id') + repository.update_task.assert_called_once() + + # AND update event should be published + publisher.publish_event.assert_called_once() + update_event = publisher.publish_event.call_args[0][0] + assert update_event.event_type == TaskEventType.TASK_UPDATED + + def test_update_task_version_mismatch_raises_conflict_error(self, service, repository): + """BUSINESS RULE: Version mismatch should raise ConflictError.""" + # GIVEN existing task with specific version + now = datetime.now(UTC) + existing_task = Task( + task_id='test-id', + title='Test', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.get_task.return_value = existing_task + + # WHEN attempting to update with wrong version + update_request = UpdateTaskRequest(title='Updated', version=999) # Wrong version + + # THEN should raise ConflictError + with pytest.raises(ConflictError) as exc_info: + service.update_task('test-id', update_request) + + assert 'modified by another process' in str(exc_info.value).lower() + # Repository update should NOT be called + repository.update_task.assert_not_called() + + def test_update_task_invalid_status_transition_raises_error(self, service, repository): + """BUSINESS RULE: Invalid status transitions should be rejected.""" + # GIVEN task in COMPLETED status + now = datetime.now(UTC) + existing_task = Task( + task_id='test-id', + title='Test', + status=TaskStatus.COMPLETED, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.get_task.return_value = existing_task + + # WHEN attempting invalid transition (COMPLETED -> IN_PROGRESS not allowed) + update_request = UpdateTaskRequest(status='in_progress', version=existing_task.version) + + # THEN should raise ValueError + with pytest.raises(ValueError) as exc_info: + service.update_task('test-id', update_request) + + assert 'invalid status transition' in str(exc_info.value).lower() + # Repository update should NOT be called + repository.update_task.assert_not_called() + + def test_update_task_with_circular_dependency_raises_error(self, service, repository): + """BUSINESS RULE: Circular dependencies should be detected.""" + # GIVEN existing task + now = datetime.now(UTC) + existing_task = Task( + task_id='test-id', + title='Test', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.get_task.return_value = existing_task + + # Mock repository to return tasks that would create circular dependency + repository.list_tasks.return_value = ([Mock(task_id='dep-1', dependencies=['test-id'])], None) + + # WHEN attempting to add dependency that creates cycle + update_request = UpdateTaskRequest(dependencies=['dep-1'], version=existing_task.version) + + # THEN should raise CircularDependencyError + with pytest.raises(CircularDependencyError): + service.update_task('test-id', update_request) + + # Repository update should NOT be called + repository.update_task.assert_not_called() + + def test_get_task_not_found_raises_error(self, service, repository): + """BUSINESS RULE: Getting non-existent task should raise ValueError.""" + # GIVEN repository returns None for non-existent task + repository.get_task.return_value = None + + # WHEN getting non-existent task + # THEN should raise ValueError + with pytest.raises(ValueError) as exc_info: + service.get_task('non-existent-id') + + assert 'not found' in str(exc_info.value).lower() + repository.get_task.assert_called_once_with('non-existent-id') + + def test_delete_task_calls_repository_and_publisher(self, service, repository, publisher): + """Test that delete_task calls repository and publisher correctly.""" + # GIVEN existing task + now = datetime.now(UTC) + existing_task = Task( + task_id='test-id', + title='Task to Delete', + status=TaskStatus.PENDING, + priority=TaskPriority.MEDIUM, + dependencies=[], + created_at=now, + updated_at=now, + version=int(now.timestamp() * 1000), + ) + repository.get_task.return_value = existing_task + + # WHEN deleting task + service.delete_task('test-id') + + # THEN repository delete should be called with version + repository.get_task.assert_called_once_with('test-id') + repository.delete_task.assert_called_once_with('test-id', existing_task.version) + + # AND delete event should be published + publisher.publish_event.assert_called_once() + delete_event = publisher.publish_event.call_args[0][0] + assert delete_event.event_type == TaskEventType.TASK_DELETED + + def test_delete_task_not_found_raises_error(self, service, repository): + """BUSINESS RULE: Deleting non-existent task should raise ValueError.""" + # GIVEN repository returns None for non-existent task + repository.get_task.return_value = None + + # WHEN deleting non-existent task + # THEN should raise ValueError + with pytest.raises(ValueError) as exc_info: + service.delete_task('non-existent-id') + + assert 'not found' in str(exc_info.value).lower() + # Repository delete should NOT be called + repository.delete_task.assert_not_called() + + def test_validate_pagination_params_default(self, service): + """Test default pagination parameters.""" + result = service._validate_pagination_params(None) + assert result == 50 + + def test_validate_pagination_params_valid(self, service): + """Test valid pagination parameters.""" + result = service._validate_pagination_params(25) + assert result == 25 + + def test_validate_pagination_params_too_small(self, service): + """Test pagination limit too small raises error.""" + with pytest.raises(ValueError): + service._validate_pagination_params(0) + + def test_validate_pagination_params_too_large(self, service): + """Test pagination limit too large raises error.""" + with pytest.raises(ValueError): + service._validate_pagination_params(101) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_notification_handler.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_notification_handler.py new file mode 100644 index 00000000..2181ebd4 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_notification_handler.py @@ -0,0 +1,75 @@ +"""Unit tests for notification handler focusing on event processing and Lambda integration.""" + +import pytest + +from services.notification_service.handler import lambda_handler +from tests.unit.test_helpers import create_eventbridge_event, create_test_context + + +@pytest.fixture +def lambda_context(): + """Fixture for Lambda context.""" + return create_test_context() + + +class TestNotificationHandler: + """Unit tests for notification handler focusing on event processing and Lambda integration.""" + + def test_lambda_handler_direct_eventbridge_event(self, fake_notification_service, lambda_context): + """Test lambda handler with direct EventBridge event returns correct response.""" + # GIVEN direct EventBridge event + event = create_eventbridge_event(detail_type='TaskCreated', task_id='test-task-id') + + # WHEN processing through lambda handler + result = lambda_handler(event, lambda_context) + + # THEN should return success response + assert result['statusCode'] == 200 + assert result['processedEvents'] == 1 + + # AND should have processed one event + assert len(fake_notification_service.processed_events) == 1 + + def test_lambda_handler_task_updated_event(self, fake_notification_service, lambda_context): + """Test lambda handler processing TaskUpdated event.""" + # GIVEN TaskUpdated EventBridge event + event = create_eventbridge_event(detail_type='TaskUpdated', task_id='task-2', title='Updated Task', status='completed') + + # WHEN processing through lambda handler + result = lambda_handler(event, lambda_context) + + # THEN should return success with correct processed count + assert result['statusCode'] == 200 + assert result['processedEvents'] == 1 + + # AND should have processed the event + assert len(fake_notification_service.processed_events) == 1 + assert fake_notification_service.processed_events[0]['event_type'] == 'TaskUpdated' + + def test_lambda_handler_no_events(self, fake_notification_service, lambda_context): + """Test lambda handler with no events returns zero count.""" + # GIVEN empty event + event = {} + + # WHEN processing through lambda handler + result = lambda_handler(event, lambda_context) + + # THEN should return success with zero processed count + assert result['statusCode'] == 200 + assert result['processedEvents'] == 0 + + # AND should not have processed any events + assert len(fake_notification_service.processed_events) == 0 + + def test_lambda_handler_processing_error_propagates(self, fake_notification_service, lambda_context): + """Test lambda handler propagates processing errors.""" + # GIVEN event that will cause processing error + event = create_eventbridge_event(detail_type='TaskCreated', task_id='error-task') + + # Configure fake service to raise an error + fake_notification_service.should_raise_error = True + + # WHEN processing event that causes error + # THEN should propagate the exception + with pytest.raises(Exception, match='Notification processing failed'): + lambda_handler(event, lambda_context) diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_task_handler.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_task_handler.py new file mode 100644 index 00000000..3708a5e2 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/handlers/test_task_handler.py @@ -0,0 +1,200 @@ +"""Unit tests for task handler focusing on HTTP response handling.""" + +import json + +import pytest + +from services.task_service.handler import lambda_handler +from tests.unit.test_helpers import create_api_gateway_event, create_test_context + + +@pytest.fixture +def lambda_context(): + """Fixture for Lambda context.""" + return create_test_context() + + +# fake_task_service fixture defined in conftest.py +class TestTaskHandler: + """Unit tests for task handler focusing on HTTP response handling and status codes.""" + + def test_create_task_returns_201(self, fake_task_service, lambda_context): + """Test successful task creation returns 201 status code.""" + event = create_api_gateway_event( + method='POST', path='/tasks', body={'title': 'New Task', 'description': 'Test task', 'priority': 'medium', 'dependencies': []} + ) + + # GIVEN task service returns task data + # WHEN calling create task endpoint + response = lambda_handler(event, lambda_context) + + # THEN should return 201 with task data + assert response['statusCode'] == 201 + body = json.loads(response['body']) + assert body['title'] == 'New Task' + assert body['task_id'] == 'test-task-123' + + def test_get_task_returns_200(self, fake_task_service, lambda_context): + """Test successful task retrieval returns 200 status code.""" + # GIVEN fake service will return a task + # WHEN getting task + event = create_api_gateway_event(method='GET', path='/tasks/test-id', path_parameters={'task_id': 'test-id'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 200 with task data + assert response['statusCode'] == 200 + body = json.loads(response['body']) + assert body['task_id'] == 'test-id' + assert body['title'] == 'Retrieved Task' + + def test_get_task_not_found_returns_404(self, fake_task_service, lambda_context): + """Test getting non-existent task returns 404.""" + # GIVEN service will raise ValueError for not found + fake_task_service.should_raise_value_error = True + fake_task_service.value_error_message = 'Task not found: non-existent-id' + + # WHEN getting non-existent task + event = create_api_gateway_event(method='GET', path='/tasks/non-existent-id', path_parameters={'task_id': 'non-existent-id'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 404 + assert response['statusCode'] == 404 + body = json.loads(response['body']) + assert 'not found' in body['message'].lower() + + def test_list_tasks_returns_200(self, fake_task_service, lambda_context): + """Test task listing returns 200 status code.""" + # GIVEN fake service returns task list + # WHEN listing tasks + event = create_api_gateway_event(method='GET', path='/tasks') + response = lambda_handler(event, lambda_context) + + # THEN should return 200 with task list + assert response['statusCode'] == 200 + body = json.loads(response['body']) + assert 'tasks' in body + assert 'pagination' in body + assert len(body['tasks']) == 2 + assert body['tasks'][0]['title'] == 'Task 1' + + def test_list_tasks_with_pagination(self, fake_task_service, lambda_context): + """Test task listing with pagination parameters.""" + # GIVEN fake service returns paginated results + # WHEN listing tasks with pagination + event = create_api_gateway_event(method='GET', path='/tasks', query_parameters={'limit': '10'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 200 with pagination info + assert response['statusCode'] == 200 + body = json.loads(response['body']) + assert 'pagination' in body + # Fake service returns None for next_token + assert body['pagination']['next_token'] is None + + def test_update_task_returns_200(self, fake_task_service, lambda_context): + """Test successful task update returns 200 status code.""" + # GIVEN fake service returns updated task + # WHEN updating task + event = create_api_gateway_event( + method='PUT', path='/tasks/test-id', path_parameters={'task_id': 'test-id'}, body={'title': 'Updated Task', 'version': 1} + ) + response = lambda_handler(event, lambda_context) + + # THEN should return 200 with updated data + assert response['statusCode'] == 200 + body = json.loads(response['body']) + assert body['title'] == 'Updated Task' + assert 'version' in body + + def test_update_task_version_conflict_returns_409(self, fake_task_service, lambda_context): + """Test version conflict during update returns 409.""" + # GIVEN service raises ConflictError for version mismatch + fake_task_service.should_raise_conflict_error = True + + # WHEN updating with wrong version + event = create_api_gateway_event( + method='PUT', path='/tasks/test-id', path_parameters={'task_id': 'test-id'}, body={'title': 'Updated Task', 'version': 1} + ) + response = lambda_handler(event, lambda_context) + + # THEN should return 409 for conflict + assert response['statusCode'] == 409 + + def test_delete_task_returns_204(self, fake_task_service, lambda_context): + """Test successful task deletion returns 204 status code.""" + # GIVEN fake service deletes successfully + # WHEN deleting task + event = create_api_gateway_event(method='DELETE', path='/tasks/test-id', path_parameters={'task_id': 'test-id'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 204 with empty body + assert response['statusCode'] == 204 + assert response['body'] == '' + + def test_delete_task_not_found_returns_404(self, fake_task_service, lambda_context): + """Test deleting non-existent task returns 404.""" + # GIVEN service raises ValueError for not found + fake_task_service.should_raise_value_error = True + fake_task_service.value_error_message = 'Task not found: non-existent' + + # WHEN deleting non-existent task + event = create_api_gateway_event(method='DELETE', path='/tasks/non-existent', path_parameters={'task_id': 'non-existent'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 404 + assert response['statusCode'] == 404 + + def test_create_task_validation_error_returns_400(self, fake_task_service, lambda_context): + """Test validation error returns 400 status code.""" + # GIVEN invalid request with empty title + event = create_api_gateway_event( + method='POST', + path='/tasks', + body={'title': ''}, # Invalid empty title + ) + + # WHEN creating task with invalid data + response = lambda_handler(event, lambda_context) + + # THEN should return 400 status code + assert response['statusCode'] == 400 + + # Verify response has error information + assert 'body' in response + body_str = response['body'] + assert body_str is not None and body_str != '' + assert 'validation' in body_str.lower() or 'invalid' in body_str.lower() or 'error' in body_str.lower() + + def test_create_task_missing_body_returns_400(self, fake_task_service, lambda_context): + """Test missing request body returns 400.""" + # GIVEN request with no body + event = create_api_gateway_event(method='POST', path='/tasks', body=None) + + # WHEN creating task without body + response = lambda_handler(event, lambda_context) + + # THEN should return 400 or higher error status + assert response['statusCode'] >= 400 + + def test_unsupported_method_returns_405(self, fake_task_service, lambda_context): + """Test unsupported HTTP method returns 405.""" + # GIVEN request with unsupported method (PATCH) + event = create_api_gateway_event(method='PATCH', path='/tasks') + + # WHEN processing unsupported method + response = lambda_handler(event, lambda_context) + + # THEN should return 404 (API Gateway resolver behavior for unsupported methods) + assert response['statusCode'] == 404 + + def test_internal_error_returns_500(self, fake_task_service, lambda_context): + """Test internal service error returns 500.""" + # GIVEN service raises unexpected exception + fake_task_service.should_raise_generic_error = True + + # WHEN processing request that causes internal error + event = create_api_gateway_event(method='GET', path='/tasks/test-id', path_parameters={'task_id': 'test-id'}) + response = lambda_handler(event, lambda_context) + + # THEN should return 500 + assert response['statusCode'] == 500 diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/__init__.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_event_contracts.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_event_contracts.py new file mode 100644 index 00000000..e4646fbd --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_event_contracts.py @@ -0,0 +1,184 @@ +""" +Event Contract Tests using Pydantic + +Demonstrates 4 key contract testing concepts: +1. Schema validation - Event structure compliance using Pydantic +2. Consumer contracts - Consumer-specific field requirements +3. Rule patterns - EventBridge routing validation +4. Drift detection - Checks breaking schema changes are caught +""" + +import json +from typing import List, Literal + +from pydantic import BaseModel, ValidationError + +from services.task_service.models.task import Task, TaskCreatedEvent, TaskDeletedEvent, TaskUpdatedEvent + + +# Pydantic Contract Schemas +class TaskDetailSchema(BaseModel): + """Contract schema for TaskCreated/TaskUpdated events.""" + + task_id: str + title: str + description: str | None + status: Literal['pending', 'in_progress', 'completed'] + priority: Literal['low', 'medium', 'high'] + dependencies: List[str] + created_at: str + updated_at: str + version: int + + +class TaskDeletedDetailSchema(BaseModel): + """Contract schema for TaskDeleted events.""" + + task_id: str + + +class EventBridgeEnvelopeSchema(BaseModel): + """Contract schema for EventBridge envelope.""" + + Source: Literal['cns427-task-api'] + DetailType: Literal['TaskCreated', 'TaskUpdated', 'TaskDeleted'] + Detail: str # JSON string + EventBusName: str + + +# Validation Functions +def validate_against_contract(event: dict, message_name: str) -> List[str]: + """Validate event against Pydantic contract schemas.""" + errors = [] + + try: + # Validate EventBridge envelope + EventBridgeEnvelopeSchema.model_validate(event) + + # Parse and validate Detail content + detail = json.loads(event['Detail']) + + if message_name in ['TaskCreated', 'TaskUpdated']: + TaskDetailSchema.model_validate(detail) + elif message_name == 'TaskDeleted': + TaskDeletedDetailSchema.model_validate(detail) + else: + errors.append(f'Unknown message type: {message_name}') + + except ValidationError as e: + for error in e.errors(): + field_path = ' -> '.join(str(loc) for loc in error['loc']) + errors.append(f'Validation failed at {field_path}: {error["msg"]}') + except json.JSONDecodeError: + errors.append('Detail field is not valid JSON') + except Exception as e: + errors.append(f'Validation error: {str(e)}') + + return errors + + +class TestEventContracts: + """ + Event contract tests using Pydantic validation. + + Pydantic automatically validates: + - String fields are strings + - Status is one of the allowed literals + - Priority is one of the allowed literals + - Dependencies is a list of strings + - All required fields are present + """ + + def test_schema_validation(self): + """1. Schema Validation - Event structure compliance using Pydantic.""" + # GIVEN a task created through domain logic + task = Task(title='Sync Test', description='Testing schema sync') + + # WHEN creating event using Event model + created_event_model = TaskCreatedEvent(task) + created_event = created_event_model.to_eventbridge_entry() + + # THEN event should pass Pydantic validation + errors = validate_against_contract(created_event, 'TaskCreated') + assert errors == [], f'TaskCreated schema out of sync: {errors}' + + # AND should have correct EventBridge structure + assert created_event['Source'] == 'cns427-task-api' + assert created_event['DetailType'] == 'TaskCreated' + + # AND detail should have required fields with correct types + detail = json.loads(created_event['Detail']) + required_fields = ['task_id', 'title', 'status', 'priority', 'version'] + + for field in required_fields: + assert field in detail, f'Missing required field: {field}' + assert detail[field] is not None, f'Field {field} cannot be null' + + def test_consumer_contract_notification_service(self): + """2. Consumer Contract - Notification service requirements.""" + # GIVEN notification service needs: task_id, title, status + task = Task(title='Notification Test', description='Demo') + + # WHEN creating event using Event model + event_model = TaskCreatedEvent(task) + event = event_model.to_eventbridge_entry() + + # THEN notification service required fields should be present + detail = json.loads(event['Detail']) + notification_fields = ['task_id', 'title', 'status'] + for field in notification_fields: + assert field in detail, f'Notification service requires: {field}' + assert isinstance(detail[field], str), f'Field {field} must be string for notifications' + + def test_eventbridge_rule_pattern_matching(self): + """3. Rule Pattern - EventBridge routing validation.""" + # GIVEN EventBridge rule pattern for notification service + rule_pattern = {'source': ['cns427-task-api'], 'detail-type': ['TaskCreated', 'TaskUpdated', 'TaskDeleted']} + + # WHEN creating different event types using Event models + task = Task(title='Rule Pattern Test') + test_cases = [TaskCreatedEvent(task), TaskUpdatedEvent(task), TaskDeletedEvent('task-123')] + + for event_model in test_cases: + event = event_model.to_eventbridge_entry() + + # THEN event should match rule pattern + assert event['Source'] in rule_pattern['source'], f'Source mismatch for {event_model.event_type}' + assert event['DetailType'] in rule_pattern['detail-type'], f'DetailType mismatch for {event_model.event_type}' + + def test_contract_violation_detection(self): + """4. Schema drift detection: Pydantic catches contract violations immediately.""" + # GIVEN changes to domain logic + # WHEN publishing events with new schema + invalid_events = [ + # Missing required fields + { + 'Source': 'cns427-task-api', + 'DetailType': 'TaskCreated', + 'Detail': json.dumps({'task_id': 'task-123', 'title': 'Invalid'}), + 'EventBusName': 'test', + }, + # Wrong field types + { + 'Source': 'cns427-task-api', + 'DetailType': 'TaskCreated', + 'Detail': json.dumps( + { + 'task_id': 'task-123', + 'title': 'Invalid', + 'status': 'invalid_status', # Not in allowed literals + 'priority': 'urgent', # Not in allowed literals + 'dependencies': 'not_a_list', # Wrong type + 'created_at': '2023-01-01', + 'updated_at': '2023-01-01', + 'version': 123, + } + ), + 'EventBusName': 'test', + }, + ] + + # THEN Pydantic should catch all violations + for invalid_event in invalid_events: + errors = validate_against_contract(invalid_event, 'TaskCreated') + assert len(errors) > 0, 'Should detect contract violations' diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_task_model.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_task_model.py new file mode 100644 index 00000000..7d897a49 --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/models/test_task_model.py @@ -0,0 +1,159 @@ +"""Unit tests for task model validation.""" + +from datetime import datetime + +import pytest +from pydantic import ValidationError + +from services.task_service.models.task import Task, TaskCreatedEvent, TaskDeletedEvent, TaskPriority, TaskStatus, TaskUpdatedEvent + + +class TestTask: + """Test cases for Task model.""" + + def test_create_task_with_minimal_data(self): + """Test creating a task with only required fields.""" + task = Task(title='Test Task') + + assert task.title == 'Test Task' + assert task.description is None + assert task.status == TaskStatus.PENDING + assert task.priority == TaskPriority.MEDIUM + assert task.version == 1 + assert isinstance(task.created_at, datetime) + assert isinstance(task.updated_at, datetime) + assert len(task.task_id) == 36 # UUID length + + def test_create_task_with_all_fields(self): + """Test creating a task with all fields specified.""" + task = Task(title='Complete Task', description='A detailed description', status=TaskStatus.IN_PROGRESS, priority=TaskPriority.HIGH) + + assert task.title == 'Complete Task' + assert task.description == 'A detailed description' + assert task.status == TaskStatus.IN_PROGRESS + assert task.priority == TaskPriority.HIGH + + def test_title_validation_empty_string(self): + """Test that empty title raises validation error.""" + with pytest.raises(ValidationError) as exc_info: + Task(title='') + + assert 'String should have at least 1 character' in str(exc_info.value) + + def test_title_validation_whitespace_only(self): + """Test that whitespace-only title raises validation error.""" + with pytest.raises(ValidationError) as exc_info: + Task(title=' ') + + assert 'Title cannot be empty' in str(exc_info.value) + + def test_title_validation_strips_whitespace(self): + """Test that title whitespace is stripped.""" + task = Task(title=' Test Task ') + assert task.title == 'Test Task' + + def test_title_too_long(self): + """Test that title exceeding max length raises validation error.""" + long_title = 'x' * 101 + with pytest.raises(ValidationError) as exc_info: + Task(title=long_title) + + assert 'String should have at most 100 characters' in str(exc_info.value) + + def test_description_validation_strips_whitespace(self): + """Test that description whitespace is stripped.""" + task = Task(title='Test', description=' Description ') + assert task.description == 'Description' + + def test_description_validation_empty_becomes_none(self): + """Test that empty description becomes None.""" + task = Task(title='Test', description=' ') + assert task.description is None + + def test_description_too_long(self): + """Test that description exceeding max length raises validation error.""" + long_description = 'x' * 501 + with pytest.raises(ValidationError) as exc_info: + Task(title='Test', description=long_description) + + assert 'String should have at most 500 characters' in str(exc_info.value) + + def test_status_enum_validation(self): + """Test that invalid status raises validation error.""" + with pytest.raises(ValidationError): + Task(title='Test', status='invalid_status') + + def test_priority_enum_validation(self): + """Test that invalid priority raises validation error.""" + with pytest.raises(ValidationError): + Task(title='Test', priority='invalid_priority') + + def test_missing_title(self): + """Test that missing title raises validation error.""" + with pytest.raises(ValidationError): + Task() + + +class TestTaskEvent: + """Test cases for TaskEvent model.""" + + def test_create_task_created_event(self): + """Test creating a TaskCreated event.""" + task = Task(title='Test Task') + event = TaskCreatedEvent(task) + + assert event.event_type == 'TaskCreated' + assert event.task_data['task_id'] == task.task_id + assert event.task_data['title'] == 'Test Task' + assert event.source == 'cns427-task-api' + assert event.detail_type_prefix == '' + + def test_create_task_created_event_with_test_prefix(self): + """Test creating a TaskCreated event with TEST prefix.""" + task = Task(title='Test Task') + event = TaskCreatedEvent(task, source='TEST-cns427-task-api', detail_type_prefix='TEST-') + + assert event.event_type == 'TaskCreated' + assert event.source == 'TEST-cns427-task-api' + assert event.detail_type_prefix == 'TEST-' + + # Test EventBridge entry format + entry = event.to_eventbridge_entry() + assert entry['Source'] == 'TEST-cns427-task-api' + assert entry['DetailType'] == 'TEST-TaskCreated' + + def test_create_task_updated_event(self): + """Test creating a TaskUpdated event.""" + task = Task(title='Updated Task') + event = TaskUpdatedEvent(task) + + assert event.event_type == 'TaskUpdated' + assert event.task_data['task_id'] == task.task_id + assert event.source == 'cns427-task-api' + + def test_create_task_deleted_event(self): + """Test creating a TaskDeleted event with no task data.""" + event = TaskDeletedEvent('test-task-id') + + assert event.event_type == 'TaskDeleted' + assert event.task_data['task_id'] == 'test-task-id' + assert event.source == 'cns427-task-api' + + def test_event_to_eventbridge_entry(self): + """Test converting event to EventBridge entry format.""" + task = Task(title='Test Task') + event = TaskCreatedEvent(task) + + entry = event.to_eventbridge_entry(event_bus_name='TestBus') + + assert entry['Source'] == 'cns427-task-api' + assert entry['DetailType'] == 'TaskCreated' + assert entry['EventBusName'] == 'TestBus' + assert 'Detail' in entry + + # Verify Detail is valid JSON string + import json + + detail = json.loads(entry['Detail']) + assert detail['task_id'] == task.task_id + assert detail['title'] == 'Test Task' diff --git a/python-test-samples/cns427-testable-serverless-architecture/tests/unit/test_helpers.py b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/test_helpers.py new file mode 100644 index 00000000..ee29355b --- /dev/null +++ b/python-test-samples/cns427-testable-serverless-architecture/tests/unit/test_helpers.py @@ -0,0 +1,115 @@ +""" +Test Helper Functions - Simple utilities for demo +""" + +import json +from typing import Any, Dict, Optional + + +def create_api_gateway_event( + method: str, + path: str, + body: Optional[Dict[str, Any]] = None, + query_parameters: Optional[Dict[str, str]] = None, + path_parameters: Optional[Dict[str, str]] = None, +) -> Dict[str, Any]: + """ + Create API Gateway event for testing. + + Simple helper that reduces duplication in tests. + """ + event = { + 'httpMethod': method, + 'path': path, + 'pathParameters': path_parameters, + 'queryStringParameters': query_parameters or {}, + 'body': json.dumps(body) if body else None, + 'headers': {'Content-Type': 'application/json'} if body else {}, + 'requestContext': {'requestId': 'test-request-id', 'stage': 'test'}, + } + return event + + +def create_test_context(): + """Create a mock Lambda context for testing.""" + + class MockContext: + def __init__(self): + self.function_name = 'test-function' + self.function_version = '1' + self.invoked_function_arn = 'arn:aws:lambda:us-east-1:123456789012:function:test-function' + self.memory_limit_in_mb = 128 + self.remaining_time_in_millis = lambda: 30000 + self.aws_request_id = 'test-request-id' + + return MockContext() + + +def create_task_event_detail( + task_id: str = 'test-task-id', + title: str = 'Test Task', + status: str = 'pending', + priority: str = 'medium', +) -> Dict[str, Any]: + """Create a task event detail for testing using Task model.""" + from datetime import UTC, datetime + + from services.task_service.models.task import Task, TaskCreatedEvent, TaskPriority, TaskStatus + + # Create a Task model instance + task = Task( + task_id=task_id, + title=title, + description=None, + status=TaskStatus(status), + priority=TaskPriority(priority), + dependencies=[], + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=1, + ) + + # Use TaskCreatedEvent to generate the event data + event = TaskCreatedEvent(task) + + # Return the task_data from the event (which is already a dict) + return event.task_data + + +def create_eventbridge_event( + detail_type: str = 'TaskCreated', + task_id: str = 'test-task-id', + title: str = 'Test Task', + status: str = 'pending', + priority: str = 'medium', +) -> Dict[str, Any]: + """Create a full EventBridge event structure for testing.""" + from datetime import UTC, datetime + + from services.task_service.models.task import Task, TaskCreatedEvent, TaskDeletedEvent, TaskPriority, TaskStatus, TaskUpdatedEvent + + # For TaskDeleted, only need task_id + if detail_type in ['TaskDeleted', 'TEST-TaskDeleted']: + event = TaskDeletedEvent(task_id) + return {'detail-type': detail_type, 'detail': event.task_data} + + # For TaskCreated and TaskUpdated, create full task + task = Task( + task_id=task_id, + title=title, + description=None, + status=TaskStatus(status), + priority=TaskPriority(priority), + dependencies=[], + created_at=datetime.now(UTC), + updated_at=datetime.now(UTC), + version=1, + ) + + # Choose the right event type + if 'Updated' in detail_type: + event = TaskUpdatedEvent(task) + else: + event = TaskCreatedEvent(task) + + return {'detail-type': detail_type, 'detail': event.task_data}