From e85c9500d9d9b19168365e3db8c6065a96253667 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Mon, 1 Sep 2025 16:38:13 +0100 Subject: [PATCH 01/49] Multitenancy support Signed-off-by: Mihai Criveti --- .dockerignore | 1 + .env.example | 112 + .gitignore | 3 + .pre-commit-config.yaml | 2 +- AGENTS.md | 2 +- CHANGELOG.md | 257 + CLAUDE.md | 4 +- Makefile | 3 +- README.md | 17 +- docker-compose.yml | 2 +- docs/docs/architecture/multitenancy.md | 933 +++ docs/docs/architecture/plugins.md | 417 +- docs/docs/deployment/container.md | 2 + docs/docs/deployment/google-cloud-run.md | 2 +- docs/docs/deployment/ibm-code-engine.md | 4 +- docs/docs/deployment/local.md | 2 +- docs/docs/development/developer-onboarding.md | 2 +- docs/docs/development/github.md | 2 +- docs/docs/development/index.md | 2 +- .../mcp-developer-guide-json-rpc.md | 10 +- docs/docs/development/review.md | 2 +- docs/docs/faq/index.md | 2 +- docs/docs/index.md | 10 +- docs/docs/manage/.pages | 5 + docs/docs/manage/export-import-reference.md | 2 +- docs/docs/manage/securing.md | 51 +- docs/docs/manage/sso-github-tutorial.md | 382 + docs/docs/manage/sso-google-tutorial.md | 399 + docs/docs/manage/sso-ibm-tutorial.md | 425 ++ docs/docs/manage/sso-okta-tutorial.md | 469 ++ docs/docs/manage/sso.md | 662 ++ docs/docs/testing/acceptance.md | 4 +- docs/docs/testing/basic.md | 2 +- docs/docs/using/clients/continue.md | 2 +- docs/docs/using/clients/copilot.md | 4 +- docs/docs/using/clients/mcp-cli.md | 4 +- docs/docs/using/clients/mcp-inspector.md | 2 +- mcpgateway/admin.py | 3085 +++++++- ...a0fb2_consolidated_multiuser_team_rbac_.py | 510 ++ mcpgateway/auth.py | 228 + mcpgateway/bootstrap_db.py | 200 + mcpgateway/cache/resource_cache.py | 2 +- mcpgateway/cache/session_registry.py | 46 +- mcpgateway/config.py | 142 + mcpgateway/db.py | 1524 +++- mcpgateway/main.py | 641 +- mcpgateway/middleware/__init__.py | 5 + mcpgateway/middleware/rbac.py | 460 ++ mcpgateway/middleware/security_headers.py | 27 +- mcpgateway/middleware/token_scoping.py | 304 + mcpgateway/models.py | 151 + mcpgateway/plugins/framework/loader/config.py | 24 +- mcpgateway/reverse_proxy.py | 2 +- mcpgateway/routers/auth.py | 141 + mcpgateway/routers/email_auth.py | 628 ++ mcpgateway/routers/rbac.py | 477 ++ mcpgateway/routers/sso.py | 566 ++ mcpgateway/routers/teams.py | 949 +++ mcpgateway/routers/tokens.py | 586 ++ mcpgateway/schemas.py | 1516 +++- mcpgateway/services/a2a_service.py | 82 +- mcpgateway/services/argon2_service.py | 304 + mcpgateway/services/email_auth_service.py | 732 ++ mcpgateway/services/gateway_service.py | 92 +- mcpgateway/services/logging_service.py | 95 + mcpgateway/services/permission_service.py | 503 ++ mcpgateway/services/personal_team_service.py | 245 + mcpgateway/services/prompt_service.py | 94 +- mcpgateway/services/resource_service.py | 149 +- mcpgateway/services/role_service.py | 539 ++ mcpgateway/services/server_service.py | 85 +- mcpgateway/services/sso_service.py | 656 ++ .../services/team_invitation_service.py | 446 ++ .../services/team_management_service.py | 799 ++ mcpgateway/services/token_catalog_service.py | 678 ++ mcpgateway/services/tool_service.py | 95 +- mcpgateway/static/admin.js | 2530 ++++++- mcpgateway/static/logo.png | Bin 0 -> 247320 bytes mcpgateway/templates/admin.html | 6463 ++++++++++++----- mcpgateway/templates/login.html | 610 ++ .../transports/streamablehttp_transport.py | 4 + mcpgateway/utils/create_jwt_token.py | 30 +- mcpgateway/utils/metadata_capture.py | 6 +- mcpgateway/utils/sso_bootstrap.py | 171 + mcpgateway/utils/token_scoping.py | 125 + mcpgateway/utils/verify_credentials.py | 250 +- plugins/deny_filter/README.md | 4 +- plugins/pii_filter/README.md | 2 +- pyproject.toml | 41 +- tests/conftest.py | 12 + tests/e2e/test_admin_apis.py | 84 +- tests/e2e/test_main_apis.py | 588 +- tests/integration/test_integration.py | 87 +- .../integration/test_metadata_integration.py | 202 +- tests/integration/test_tag_endpoints.py | 42 +- tests/security/test_rpc_api.py | 2 +- .../middleware/test_token_scoping.py | 312 + .../services/test_email_auth_basic.py | 297 + .../services/test_permission_fallback.py | 157 + .../services/test_sso_admin_assignment.py | 136 + .../services/test_sso_approval_workflow.py | 189 + .../services/test_team_invitation_service.py | 954 +++ .../services/test_team_management_service.py | 764 ++ tests/unit/mcpgateway/test_admin.py | 83 +- tests/unit/mcpgateway/test_main.py | 133 +- tests/unit/mcpgateway/test_main_extended.py | 52 + .../test_streamable_closedresource_filter.py | 55 + tests/unit/mcpgateway/test_ui_version.py | 1 + .../mcpgateway/utils/test_create_jwt_token.py | 14 +- .../unit/mcpgateway/utils/test_proxy_auth.py | 1 + .../utils/test_verify_credentials.py | 27 +- tests/utils/__init__.py | 2 + tests/utils/rbac_mocks.py | 397 + 113 files changed, 33540 insertions(+), 3301 deletions(-) create mode 100644 docs/docs/architecture/multitenancy.md create mode 100644 docs/docs/manage/sso-github-tutorial.md create mode 100644 docs/docs/manage/sso-google-tutorial.md create mode 100644 docs/docs/manage/sso-ibm-tutorial.md create mode 100644 docs/docs/manage/sso-okta-tutorial.md create mode 100644 docs/docs/manage/sso.md create mode 100644 mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py create mode 100644 mcpgateway/auth.py create mode 100644 mcpgateway/middleware/rbac.py create mode 100644 mcpgateway/middleware/token_scoping.py create mode 100644 mcpgateway/routers/auth.py create mode 100644 mcpgateway/routers/email_auth.py create mode 100644 mcpgateway/routers/rbac.py create mode 100644 mcpgateway/routers/sso.py create mode 100644 mcpgateway/routers/teams.py create mode 100644 mcpgateway/routers/tokens.py create mode 100644 mcpgateway/services/argon2_service.py create mode 100644 mcpgateway/services/email_auth_service.py create mode 100644 mcpgateway/services/permission_service.py create mode 100644 mcpgateway/services/personal_team_service.py create mode 100644 mcpgateway/services/role_service.py create mode 100644 mcpgateway/services/sso_service.py create mode 100644 mcpgateway/services/team_invitation_service.py create mode 100644 mcpgateway/services/team_management_service.py create mode 100644 mcpgateway/services/token_catalog_service.py create mode 100644 mcpgateway/static/logo.png create mode 100644 mcpgateway/templates/login.html create mode 100644 mcpgateway/utils/sso_bootstrap.py create mode 100644 mcpgateway/utils/token_scoping.py create mode 100644 tests/unit/mcpgateway/middleware/test_token_scoping.py create mode 100644 tests/unit/mcpgateway/services/test_email_auth_basic.py create mode 100644 tests/unit/mcpgateway/services/test_permission_fallback.py create mode 100644 tests/unit/mcpgateway/services/test_sso_admin_assignment.py create mode 100644 tests/unit/mcpgateway/services/test_sso_approval_workflow.py create mode 100644 tests/unit/mcpgateway/services/test_team_invitation_service.py create mode 100644 tests/unit/mcpgateway/services/test_team_management_service.py create mode 100644 tests/unit/mcpgateway/test_streamable_closedresource_filter.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/rbac_mocks.py diff --git a/.dockerignore b/.dockerignore index e9a71f900..96bc08a95 100644 --- a/.dockerignore +++ b/.dockerignore @@ -20,6 +20,7 @@ test/ attic/ *.md .benchmarks/ +.claude # Development environment directories .devcontainer/ diff --git a/.env.example b/.env.example index a8ec71364..5386658f1 100644 --- a/.env.example +++ b/.env.example @@ -13,6 +13,12 @@ DATABASE_URL=sqlite:///./mcp.db # DATABASE_URL=postgresql://postgres:mysecretpassword@localhost:5432/mcp # DATABASE_URL=mysql+pymysql://mysql:changeme@localhost:3306/mcp +# Database Connection Pool Configuration (for performance optimization) +# DB_POOL_SIZE=50 # Maximum number of persistent connections (default: 200, SQLite capped at 50) +# DB_MAX_OVERFLOW=20 # Additional connections beyond pool_size (default: 10, SQLite capped at 20) +# DB_POOL_TIMEOUT=30 # Seconds to wait for connection before timeout (default: 30) +# DB_POOL_RECYCLE=3600 # Seconds before recreating connection (default: 3600) + # Cache Configuration CACHE_TYPE=database # CACHE_TYPE=redis @@ -45,6 +51,10 @@ BASIC_AUTH_USER=admin BASIC_AUTH_PASSWORD=changeme AUTH_REQUIRED=true +# Bootstrap Admin API Key (for initial setup and automation) +# PRODUCTION: Generate a secure random key and store in secure vault +# BOOTSTRAP_ADMIN_API_KEY=bootstrap_admin_token_xyz + # Secret used to sign JWTs (use long random value in prod) # PRODUCTION: Use a strong, random secret (minimum 32 characters) JWT_SECRET_KEY=my-test-key @@ -52,10 +62,49 @@ JWT_SECRET_KEY=my-test-key # Algorithm used to sign JWTs (e.g., HS256) JWT_ALGORITHM=HS256 +# JWT Audience and Issuer claims for token validation +# PRODUCTION: Set these to your service-specific values +JWT_AUDIENCE=mcpgateway-api +JWT_ISSUER=mcpgateway + # Expiry time for generated JWT tokens (in minutes; e.g. 7 days) TOKEN_EXPIRY=10080 REQUIRE_TOKEN_EXPIRATION=false +##################################### +# Email-Based Authentication (Epic 001) +##################################### + +# Enable email-based authentication system +EMAIL_AUTH_ENABLED=true + +# Platform admin user (bootstrap from environment) +# PRODUCTION: Change these to your actual admin credentials! +PLATFORM_ADMIN_EMAIL=admin@example.com +PLATFORM_ADMIN_PASSWORD=changeme +PLATFORM_ADMIN_FULL_NAME=Platform Administrator + +# Argon2id Password Hashing Configuration +# Time cost (iterations) - higher = more secure but slower +ARGON2ID_TIME_COST=3 +# Memory cost (KB) - higher = more secure but uses more RAM +ARGON2ID_MEMORY_COST=65536 +# Parallelism (threads) - typically 1 for web apps +ARGON2ID_PARALLELISM=1 + +# Password Policy Configuration +PASSWORD_MIN_LENGTH=8 +PASSWORD_REQUIRE_UPPERCASE=false +PASSWORD_REQUIRE_LOWERCASE=false +PASSWORD_REQUIRE_NUMBERS=false +PASSWORD_REQUIRE_SPECIAL=false + +# Account Security Configuration +# Maximum failed login attempts before account lockout +MAX_FAILED_LOGIN_ATTEMPTS=5 +# Account lockout duration in minutes +ACCOUNT_LOCKOUT_DURATION_MINUTES=30 + # MCP Client Authentication MCP_CLIENT_AUTH_ENABLED=true TRUST_PROXY_AUTH=false @@ -65,6 +114,69 @@ PROXY_USER_HEADER=X-Authenticated-User # Must be a non-empty string (e.g. passphrase or random secret) AUTH_ENCRYPTION_SECRET=my-test-salt +# Bootstrap Admin API Key (for initial setup and automation) +# PRODUCTION: Generate a secure random key and store in secure vault +# Uncomment and set a secure value for production use +# BOOTSTRAP_ADMIN_API_KEY=your-secure-bootstrap-key-here + +# ============================================================================== +# SSO (Single Sign-On) Configuration +# ============================================================================== + +# Master SSO switch - enable Single Sign-On authentication +SSO_ENABLED=false + +# GitHub OAuth Configuration +SSO_GITHUB_ENABLED=false +# SSO_GITHUB_CLIENT_ID=your-github-client-id +# SSO_GITHUB_CLIENT_SECRET=your-github-client-secret + +# Google OAuth Configuration +SSO_GOOGLE_ENABLED=false +# SSO_GOOGLE_CLIENT_ID=your-google-client-id.googleusercontent.com +# SSO_GOOGLE_CLIENT_SECRET=your-google-client-secret + +# IBM Security Verify OIDC Configuration +SSO_IBM_VERIFY_ENABLED=false +# SSO_IBM_VERIFY_CLIENT_ID=your-ibm-verify-client-id +# SSO_IBM_VERIFY_CLIENT_SECRET=your-ibm-verify-client-secret +# SSO_IBM_VERIFY_ISSUER=https://your-tenant.verify.ibm.com/oidc/endpoint/default + +# Okta OIDC Configuration +SSO_OKTA_ENABLED=false +# SSO_OKTA_CLIENT_ID=your-okta-client-id +# SSO_OKTA_CLIENT_SECRET=your-okta-client-secret +# SSO_OKTA_ISSUER=https://your-okta-domain.okta.com + +# SSO General Settings +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=[] # JSON array of trusted email domains, e.g., ["example.com", "company.org"] +SSO_PRESERVE_ADMIN_AUTH=true # Keep local admin authentication when SSO is enabled + +# SSO Admin Assignment Settings +SSO_AUTO_ADMIN_DOMAINS=[] # Email domains that automatically get admin privileges, e.g., ["yourcompany.com"] +SSO_GITHUB_ADMIN_ORGS=[] # GitHub organizations whose members get admin privileges, e.g., ["your-org", "partner-org"] +SSO_GOOGLE_ADMIN_DOMAINS=[] # Google Workspace domains that get admin privileges, e.g., ["company.com"] +SSO_REQUIRE_ADMIN_APPROVAL=false # Require admin approval for new SSO registrations + +##################################### +# Personal Teams Configuration (Epic 002) +##################################### + +# Enable automatic personal team creation for new users +AUTO_CREATE_PERSONAL_TEAMS=true + +# Personal team naming prefix +PERSONAL_TEAM_PREFIX=personal + +# Team Limits +MAX_TEAMS_PER_USER=50 +MAX_MEMBERS_PER_TEAM=100 + +# Team Invitation Settings +INVITATION_EXPIRY_DAYS=7 +REQUIRE_EMAIL_VERIFICATION_FOR_INVITES=true + ##################################### # Admin UI and API Toggles ##################################### diff --git a/.gitignore b/.gitignore index 14d5ac5a5..018af30f4 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,6 @@ +*cookies*txt +cookies* +cookies.txt .claude mcpgateway-export* mutants diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 97062cd5f..4bcf8ce37 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -368,7 +368,7 @@ repos: description: Verifies test files in tests/ directories start with `test_`. language: python files: (^|/)tests/.+\.py$ - exclude: ^tests/(.*/)?(pages|helpers|fuzzers|scripts|fixtures|migration)/.*\.py$|^tests/migration/.*\.py$ # Exclude page object, helper, fuzzer, script, fixture, and migration files + exclude: ^tests/(.*/)?(pages|helpers|fuzzers|scripts|fixtures|migration|utils)/.*\.py$|^tests/migration/.*\.py$ # Exclude page object, helper, fuzzer, script, fixture, util, and migration files args: [--pytest-test-first] # `test_.*\.py` # - repo: https://github.com/pycqa/flake8 diff --git a/AGENTS.md b/AGENTS.md index 14ee35423..a61c5d3fa 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -29,7 +29,7 @@ - `make clean`: Remove caches, build artefacts, venv, coverage, docs, certs. MCP helpers -- JWT token: `python -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret KEY`. +- JWT token: `python -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret KEY`. - Expose stdio server: `python -m mcpgateway.translate --stdio "uvx mcp-server-git" --port 9000`. ## Coding Style & Naming Conventions diff --git a/CHANGELOG.md b/CHANGELOG.md index a1981001d..cab51dca4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,263 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) --- +## [Unreleased] - Enterprise Multi-Tenancy System + +### Overview + +**This major release implements [EPIC #860]: Complete Enterprise Multi-Tenancy System with Team-Based Resource Scoping**, transforming MCP Gateway from a single-tenant system into a **production-ready enterprise multi-tenant platform** with team-based resource scoping, comprehensive authentication, and enterprise SSO integration. + +**Impact:** Complete architectural transformation enabling secure team collaboration, enterprise SSO integration, and scalable multi-tenant deployments. + +### Added + +#### **🔐 Authentication & Authorization System** +* **Email-based Authentication** (#544) - Complete user authentication system with Argon2id password hashing replacing basic auth +* **Complete RBAC System** (#283) - Platform Admin, Team Owner, Team Member roles with full multi-tenancy support +* **Enhanced JWT Tokens** (#87) - JWT tokens with team context, scoped permissions, and per-user expiry +* **Password Policy Engine** (#426) - Configurable security requirements with password complexity rules +* **Multi-Provider SSO Framework** (#220, #278, #859) - GitHub, Google, and IBM Security Verify integration +* **Per-Virtual-Server API Keys** (#282) - Scoped access tokens for individual virtual servers + +#### **👥 Team Management System** +* **Personal Teams Auto-Creation** - Every user automatically gets a personal team on registration +* **Multi-Team Membership** - Users can belong to multiple teams with different roles (owner/member) +* **Team Invitation System** - Email-based invitations with secure tokens and expiration +* **Team Visibility Controls** - Private/Public team discovery and cross-team collaboration +* **Team Administration** - Complete team lifecycle management via API and Admin UI + +#### **🔒 Resource Scoping & Visibility** +* **Three-Tier Resource Visibility System**: + - **Private**: Owner-only access + - **Team**: Team member access + - **Public**: Cross-team access for collaboration +* **Applied to All Resource Types**: Tools, Servers, Resources, Prompts, A2A Agents +* **Team-Scoped API Endpoints** with proper access validation and filtering +* **Cross-Team Resource Discovery** for public resources + +#### **🏗️ Platform Administration** +* **Platform Admin Role** separate from team roles for system-wide management +* **Domain-Based Auto-Assignment** via SSO (SSO_AUTO_ADMIN_DOMAINS) +* **Enterprise Domain Trust** (SSO_TRUSTED_DOMAINS) for controlled access +* **System-Wide Team Management** for administrators + +#### **🗄️ Database & Infrastructure** +* **Complete Multi-Tenant Database Schema** with proper indexing and performance optimization +* **Team-Based Query Filtering** for performance and security +* **Automated Migration Strategy** from single-tenant to multi-tenant with rollback support +* **All APIs Redesigned** to be team-aware with backward compatibility + +#### **🔧 Configuration & Security** +* **Database Connection Pool Configuration** - Optimized settings for multi-tenant workloads: + ```bash + # New .env.example settings for performance: + DB_POOL_SIZE=50 # Maximum persistent connections (default: 200, SQLite capped at 50) + DB_MAX_OVERFLOW=20 # Additional connections beyond pool_size (default: 10, SQLite capped at 20) + DB_POOL_TIMEOUT=30 # Seconds to wait for connection before timeout (default: 30) + DB_POOL_RECYCLE=3600 # Seconds before recreating connection (default: 3600) + ``` +* **Bootstrap Admin API Key** - Secure initial setup and automation support: + ```bash + # New bootstrap admin configuration: + BOOTSTRAP_ADMIN_API_KEY=bootstrap_admin_token_xyz # For initial setup and automation + ``` +* **Enhanced JWT Configuration** - Audience, issuer claims, and improved token validation: + ```bash + # New JWT configuration options: + JWT_AUDIENCE=mcpgateway-api # JWT audience claim for token validation + JWT_ISSUER=mcpgateway # JWT issuer claim for token validation + ``` +* **Account Security Configuration** - Lockout policies and failed login attempt limits: + ```bash + # New security policy settings: + MAX_FAILED_LOGIN_ATTEMPTS=5 # Maximum failed attempts before lockout + ACCOUNT_LOCKOUT_DURATION_MINUTES=30 # Account lockout duration in minutes + ``` + +### Changed + +#### **🔄 Authentication Migration** +* **Username to Email Migration** - All authentication now uses email addresses instead of usernames + ```bash + # OLD (v0.6.0 and earlier): + python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-test-key + + # NEW (v0.7.0+): + python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-test-key + ``` +* **JWT Token Format Enhanced** - Tokens now include team context and scoped permissions +* **API Authentication Updated** - All examples and documentation updated to use email-based authentication + +#### **📊 Database Schema Evolution** +* **New Multi-Tenant Tables**: email_users, email_teams, email_team_members, email_team_invitations +* **Extended Resource Tables** - All resource tables now include team_id, owner_email, visibility columns +* **Performance Indexing** - Strategic indexes on team_id, owner_email, visibility for optimal query performance + +#### **🚀 API Enhancements** +* **New Authentication Endpoints** - Email registration/login and SSO provider integration +* **New Team Management Endpoints** - Complete CRUD operations for teams and memberships +* **Enhanced Resource Endpoints** - All resource endpoints support team-scoping parameters +* **Backward Compatibility** - Existing API endpoints remain functional with feature flags + +### Security + +* **Data Isolation** - Team-scoped queries prevent cross-tenant data access +* **Resource Ownership** - Every resource has owner_email and team_id validation +* **Visibility Enforcement** - Private/Team/Public visibility strictly enforced +* **Secure Tokens** - Invitation tokens with expiration and single-use validation +* **Domain Restrictions** - Corporate domain enforcement via SSO_TRUSTED_DOMAINS +* **MFA Support** - Automatic enforcement of SSO provider MFA policies + +### Documentation + +* **Architecture Documentation** - `docs/docs/architecture/multitenancy.md` - Complete multi-tenancy architecture guide +* **SSO Integration Tutorials**: + - `docs/docs/manage/sso.md` - General SSO configuration guide + - `docs/docs/manage/sso-github-tutorial.md` - GitHub SSO integration tutorial + - `docs/docs/manage/sso-google-tutorial.md` - Google SSO integration tutorial + - `docs/docs/manage/sso-ibm-tutorial.md` - IBM Security Verify integration tutorial + - `docs/docs/manage/sso-okta-tutorial.md` - Okta SSO integration tutorial +* **Configuration Reference** - Complete environment variable documentation with examples +* **Migration Guide** - Single-tenant to multi-tenant upgrade path with troubleshooting +* **API Reference** - Team-scoped endpoint documentation with usage examples + +### Infrastructure + +* **Team-Based Indexing** - Optimized database queries for multi-tenant workloads +* **Connection Pooling** - Enhanced configuration for enterprise scale +* **Migration Scripts** - Automated Alembic migrations with rollback support +* **Performance Monitoring** - Team-scoped metrics and observability + +### Migration Guide + +#### **Environment Configuration Updates** +Update your `.env` file with the new multi-tenancy settings: + +```bash +##################################### +# Email-Based Authentication (Epic 001) +##################################### + +# Enable email-based authentication system +EMAIL_AUTH_ENABLED=true + +# Platform admin user (bootstrap from environment) +PLATFORM_ADMIN_EMAIL=admin@example.com +PLATFORM_ADMIN_PASSWORD=changeme +PLATFORM_ADMIN_FULL_NAME=Platform Administrator + +# Argon2id Password Hashing Configuration +ARGON2ID_TIME_COST=3 +ARGON2ID_MEMORY_COST=65536 +ARGON2ID_PARALLELISM=1 + +# Password Policy Configuration +PASSWORD_MIN_LENGTH=8 +PASSWORD_REQUIRE_UPPERCASE=false +PASSWORD_REQUIRE_LOWERCASE=false +PASSWORD_REQUIRE_NUMBERS=false +PASSWORD_REQUIRE_SPECIAL=false + +##################################### +# Personal Teams Configuration (Epic 002) +##################################### + +# Enable automatic personal team creation for new users +AUTO_CREATE_PERSONAL_TEAMS=true + +# Personal team naming prefix +PERSONAL_TEAM_PREFIX=personal + +# Team Limits +MAX_TEAMS_PER_USER=50 +MAX_MEMBERS_PER_TEAM=100 + +# Team Invitation Settings +INVITATION_EXPIRY_DAYS=7 +REQUIRE_EMAIL_VERIFICATION_FOR_INVITES=true + +##################################### +# SSO Configuration (Optional) +##################################### + +# Master SSO switch - enable Single Sign-On authentication +SSO_ENABLED=false + +# GitHub OAuth Configuration +SSO_GITHUB_ENABLED=false +# SSO_GITHUB_CLIENT_ID=your-github-client-id +# SSO_GITHUB_CLIENT_SECRET=your-github-client-secret + +# Google OAuth Configuration +SSO_GOOGLE_ENABLED=false +# SSO_GOOGLE_CLIENT_ID=your-google-client-id.googleusercontent.com +# SSO_GOOGLE_CLIENT_SECRET=your-google-client-secret + +# IBM Security Verify OIDC Configuration +SSO_IBM_VERIFY_ENABLED=false +# SSO_IBM_VERIFY_CLIENT_ID=your-ibm-verify-client-id +# SSO_IBM_VERIFY_CLIENT_SECRET=your-ibm-verify-client-secret +# SSO_IBM_VERIFY_ISSUER=https://your-tenant.verify.ibm.com/oidc/endpoint/default +``` + +#### **Database Migration** +Database migrations run automatically on startup: +```bash +# Backup your database first +cp mcp.db mcp.db.backup + +# Migrations run automatically when you start the server +make dev # Migrations execute automatically, then server starts + +# Or for production +make serve # Migrations execute automatically, then production server starts +``` + +#### **JWT Token Generation Updates** +All JWT token generation now uses email addresses: +```bash +# Generate development tokens +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ + --username admin@example.com --exp 10080 --secret my-test-key) + +# For API testing +curl -s -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" \ + http://127.0.0.1:4444/version | jq +``` + +### Breaking Changes + +* **Database Schema** - New tables and extended resource tables (backward compatible with feature flags) +* **Authentication System** - Migration from username to email-based authentication + - **Action Required**: Update JWT token generation to use email addresses instead of usernames + - **Action Required**: Update `.env` with new authentication configuration +* **API Changes** - New endpoints added, existing endpoints enhanced with team parameters + - **Backward Compatible**: Existing endpoints work with new team-scoping parameters +* **Configuration** - New required environment variables for multi-tenancy features + - **Action Required**: Copy updated `.env.example` to `.env` and configure multi-tenancy settings + +### Issues Closed + +**Primary Epic:** +- Closes #860 - [EPIC]: Complete Enterprise Multi-Tenancy System with Team-Based Resource Scoping + +**Core Security & Authentication:** +- Closes #544 - Database-Backed User Authentication with Argon2id (replace BASIC auth) +- Closes #283 - Role-Based Access Control (RBAC) - User/Team/Global Scopes for full multi-tenancy support +- Closes #426 - Configurable Password and Secret Policy Engine +- Closes #87 - Epic: Secure JWT Token Catalog with Per-User Expiry and Revocation +- Closes #282 - Per-Virtual-Server API Keys with Scoped Access + +**SSO Integration:** +- Closes #220 - Authentication & Authorization - SSO + Identity-Provider Integration +- Closes #278 - Authentication & Authorization - Google SSO Integration Tutorial +- Closes #859 - Authentication & Authorization - IBM Security Verify Enterprise SSO Integration + +**Future Foundation:** +- Provides foundation for #706 - ABAC Virtual Server Support (RBAC foundation implemented) + +--- + ## [0.6.0] - 2025-08-22 - Security, Scale & Smart Automation ### Overview diff --git a/CLAUDE.md b/CLAUDE.md index 8b0746586..3a47ff2bd 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -145,10 +145,10 @@ LOG_FOLDER=logs ### Authentication & Tokens ```bash # Generate JWT bearer token -python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-test-key +python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-test-key # Export for API calls -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) ``` ### Working with MCP Servers diff --git a/Makefile b/Makefile index 5768734e0..15df9106b 100644 --- a/Makefile +++ b/Makefile @@ -1383,7 +1383,8 @@ install-web-linters: @npm install --no-save \ htmlhint \ stylelint stylelint-config-standard @stylistic/stylelint-config stylelint-order \ - eslint eslint-config-standard \ + eslint eslint-config-standard eslint-plugin-import eslint-plugin-n eslint-plugin-promise \ + eslint-plugin-prettier eslint-config-prettier \ retire \ prettier \ jshint \ diff --git a/README.md b/README.md index ceb06feac..b20b6dfae 100644 --- a/README.md +++ b/README.md @@ -270,7 +270,7 @@ BASIC_AUTH_PASSWORD=pass JWT_SECRET_KEY=my-test-key \ # 3️⃣ Generate a bearer token & smoke-test the API export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ - --username admin --exp 10080 --secret my-test-key) + --username admin@example.com --exp 10080 --secret my-test-key) curl -s -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" \ http://127.0.0.1:4444/version | jq @@ -300,7 +300,7 @@ mcpgateway.exe --host 0.0.0.0 --port 4444 # 4️⃣ Bearer token and smoke-test $Env:MCPGATEWAY_BEARER_TOKEN = python3 -m mcpgateway.utils.create_jwt_token ` - --username admin --exp 10080 --secret my-test-key + --username admin@example.com --exp 10080 --secret my-test-key curl -s -H "Authorization: Bearer $Env:MCPGATEWAY_BEARER_TOKEN" ` http://127.0.0.1:4444/version | jq @@ -452,7 +452,7 @@ docker logs -f mcpgateway # Generating an API key docker run --rm -it ghcr.io/ibm/mcp-context-forge:0.6.0 \ - python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key + python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key ``` Browse to **[http://localhost:4444/admin](http://localhost:4444/admin)** (user `admin` / pass `changeme`). @@ -569,7 +569,7 @@ podman run -d --name mcpgateway \ * **JWT tokens** - Generate one in the running container: ```bash - docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin -e 10080 --secret my-test-key + docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com -e 10080 --secret my-test-key ``` * **Upgrades** - Stop, remove, and rerun with the same `-v $(pwd)/data:/data` mount; your DB and config stay intact. @@ -600,7 +600,7 @@ The `mcpgateway.wrapper` lets you connect to the gateway over **stdio** while ke ```bash # Set environment variables -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-test-key) export MCP_AUTH=${MCPGATEWAY_BEARER_TOKEN} export MCP_SERVER_URL='http://localhost:4444/servers/UUID_OF_SERVER_1/mcp' export MCP_TOOL_CALL_TIMEOUT=120 @@ -1019,9 +1019,12 @@ You can get started by copying the provided [.env.example](.env.example) to `.en | --------------------- | ---------------------------------------------------------------- | ------------- | ---------- | | `BASIC_AUTH_USER` | Username for Admin UI login and HTTP Basic authentication | `admin` | string | | `BASIC_AUTH_PASSWORD` | Password for Admin UI login and HTTP Basic authentication | `changeme` | string | +| `PLATFORM_ADMIN_EMAIL` | Email for bootstrap platform admin user (auto-created with admin privileges) | `admin@example.com` | string | | `AUTH_REQUIRED` | Require authentication for all API routes | `true` | bool | | `JWT_SECRET_KEY` | Secret key used to **sign JWT tokens** for API access | `my-test-key` | string | | `JWT_ALGORITHM` | Algorithm used to sign the JWTs (`HS256` is default, HMAC-based) | `HS256` | PyJWT algs | +| `JWT_AUDIENCE` | JWT audience claim for token validation | `mcpgateway-api` | string | +| `JWT_ISSUER` | JWT issuer claim for token validation | `mcpgateway` | string | | `TOKEN_EXPIRY` | Expiry of generated JWTs in minutes | `10080` | int > 0 | | `AUTH_ENCRYPTION_SECRET` | Passphrase used to derive AES key for encrypting tool auth headers | `my-test-salt` | string | @@ -1036,7 +1039,7 @@ You can get started by copying the provided [.env.example](.env.example) to `.en > * Generate tokens via: > > ```bash -> export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +> export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) > echo $MCPGATEWAY_BEARER_TOKEN > ``` > * Tokens allow non-interactive API clients to authenticate securely. @@ -1480,7 +1483,7 @@ Generate an API Bearer token, and test the various API endpoints. ```bash # Generate a bearer token using the configured secret key (use the same as your .env) -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) echo ${MCPGATEWAY_BEARER_TOKEN} # Quickly confirm that authentication works and the gateway is healthy diff --git a/docker-compose.yml b/docker-compose.yml index c95557a8b..608dcc577 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -358,7 +358,7 @@ services: done echo "Generating JWT token..." - export MCPGATEWAY_BEARER_TOKEN=$$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) + export MCPGATEWAY_BEARER_TOKEN=$$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) echo "Registering fast_time_server with gateway..." RESPONSE=$$(curl -s -X POST http://gateway:4444/gateways \ diff --git a/docs/docs/architecture/multitenancy.md b/docs/docs/architecture/multitenancy.md new file mode 100644 index 000000000..717ddf3c4 --- /dev/null +++ b/docs/docs/architecture/multitenancy.md @@ -0,0 +1,933 @@ +# Multi-Tenancy Architecture + +The MCP Gateway implements a comprehensive multi-tenant architecture that provides secure isolation, flexible resource sharing, and granular access control. This document describes the complete multi-tenancy design, user lifecycle, team management, and resource scoping mechanisms. + +## Overview + +The multi-tenancy system is built around **teams as the primary organizational unit**, with users belonging to one or more teams, and all resources scoped to teams with configurable visibility levels. + +### Core Principles + +1. **Team-Centric**: Teams are the fundamental organizational unit for resource ownership and access control +2. **User Flexibility**: Users can belong to multiple teams with different roles in each team +3. **Resource Isolation**: Resources are scoped to teams with explicit sharing controls +4. **Invitation-Based**: Team membership is controlled through invitation workflows +5. **Role-Based Access**: Users have roles (Owner, Member) within teams that determine their capabilities +6. **Platform Administration**: Separate platform-level administration for system management + +--- + +## User Lifecycle & Authentication + +### User Authentication Flow + +```mermaid +sequenceDiagram + participant U as User + participant G as Gateway + participant SSO as SSO Provider + participant DB as Database + participant E as Email Service + + alt Email Authentication + U->>G: POST /auth/email/login + G->>DB: Validate email/password + DB-->>G: User record + G-->>U: JWT token + session + else SSO Authentication + U->>G: GET /auth/sso/login/github + G->>SSO: OAuth redirect + U->>SSO: Authorize application + SSO->>G: OAuth callback with code + G->>SSO: Exchange code for token + SSO-->>G: User profile data + G->>DB: Create/update user + G->>DB: Create personal team + G-->>U: JWT token + session + end + + Note over G,DB: Personal team auto-created for new users +``` + +### User Creation & Personal Teams + +Every user gets an automatically created **Personal Team** upon registration: + +```mermaid +flowchart TD + A[New User Registration] --> B{Authentication Method} + + B -->|Email| C[Email Registration] + B -->|SSO| D[SSO Registration] + + C --> E[Create EmailUser Record] + D --> F[Create SSO User Record] + + E --> G[Create Personal Team] + F --> G + + G --> H[Set User as Team Owner] + H --> I[User Can Access System] + + subgraph "Personal Team Properties" + J[Name: user@email.com or Full Name] + K[Type: personal] + L[Owner: User] + M[Members: User only] + N[Visibility: private] + end + + G --> J + G --> K + G --> L + G --> M + G --> N + + style G fill:#e1f5fe + style J fill:#f3e5f5 + style K fill:#f3e5f5 + style L fill:#f3e5f5 + style M fill:#f3e5f5 + style N fill:#f3e5f5 +``` + +--- + +## Team Architecture & Management + +### Team Structure & Roles + +```mermaid +erDiagram + EmailTeam ||--o{ EmailTeamMember : has + EmailUser ||--o{ EmailTeamMember : belongs_to + EmailTeam ||--o{ EmailTeamInvitation : has_pending + EmailUser ||--o{ EmailTeamInvitation : invited_by + + EmailTeam { + uuid id PK + string name + string description + enum type "personal|organizational" + enum visibility "private|public" + string owner_email FK + timestamp created_at + timestamp updated_at + } + + EmailUser { + string email PK + string password_hash + string full_name + boolean is_admin + timestamp created_at + } + + EmailTeamMember { + uuid id PK + uuid team_id FK + string user_email FK + enum role "owner|member" + timestamp joined_at + } + + EmailTeamInvitation { + uuid id PK + uuid team_id FK + string invited_email + string invited_by_email FK + enum role "owner|member" + string token + timestamp expires_at + enum status "pending|accepted|declined|expired" + } +``` + +### Team Visibility & Access Model + +```mermaid +flowchart TB + subgraph "Team Visibility Types" + T1[Private Team
Not discoverable; invite-only] + T2[Public Team
Discoverable; membership by invite/request] + end + + subgraph "Team Roles" + R1[Owner
- Full team control
- Invite/remove members
- Manage resources
- Delete team] + R2[Member
- Access team resources
- Create resources
- No member management] + end + + subgraph "Team Membership Flow" + A[User Exists] --> B{Team Type} + B -->|Private| C[Requires Invitation] + B -->|Public| D[Discover and Request Join] + + C --> E[Owner Sends Invite] + E --> F[Pending Invitation] + F --> G[User Accepts/Declines] + + D --> H[User Joins Team] + G -->|Accept| H + H --> I[Team Member] + end + + style T1 fill:#ffebee + style T2 fill:#e8f5e8 + style R1 fill:#fff3e0 + style R2 fill:#f3e5f5 +``` + +#### Team Role Semantics (Design) + +- Owner: + - Manage team settings (name, description, visibility) and lifecycle (cannot delete personal teams). + - Manage membership (invite, accept, change roles, remove members). + - Full control over team resources (create/update/delete), subject to platform policies. + +- Member: + - Access and use team resources; can create resources by default unless policies restrict it. + - Cannot manage team membership or team‑level settings. + +Platform Admin is a global role (not a team role) with system‑wide oversight. + +### Team Invitation Workflow + +```mermaid +sequenceDiagram + participant O as Team Owner + participant G as Gateway + participant DB as Database + participant E as Email Service + participant I as Invited User + + Note over O,I: Invitation Process + O->>G: POST /teams/{team_id}/invitations + Note right of O: {email, role, expires_in} + + G->>DB: Check team ownership + DB-->>G: Owner confirmed + + G->>DB: Create invitation record + DB-->>G: Invitation token generated + + alt User exists on platform + G->>DB: User found + Note right of G: Internal notification + else User not on platform + G->>E: Send invitation email + E-->>I: Email with invitation link + end + + G-->>O: Invitation created + + Note over I,G: Acceptance Process + I->>G: GET /teams/invitations/{token} + G->>DB: Validate token + DB-->>G: Invitation details + G-->>I: Invitation info page + + I->>G: POST /teams/invitations/{token}/accept + G->>DB: Create team membership + G->>DB: Update invitation status + G-->>I: Welcome to team + + Note over O,G: Owner notification + G->>O: Member joined notification +``` + +--- + +## Visibility Semantics + +This section clarifies what Private and Public mean for teams, and what Private/Team/Public mean for resources across the system. + +### Team Visibility (Design) + +- Private: + - Discoverability: Not listed to non‑members; only visible to members/owner. + - Membership: By invitation from a team owner (request‑to‑join is not exposed to non‑members). + - API/UI: Team shows up only in the current user's teams list; direct deep links require membership. + +- Public: + - Discoverability: Listed in public team discovery views for all authenticated users. + - Membership: Still requires an invitation or explicit approval of a join request. + - API/UI: Limited metadata may be visible without membership; all management and resource operations still require membership. + +Note: Platform Admin is a global role and is not a team role. Admins can view/manage teams for operational purposes irrespective of team visibility. + +### Resource Visibility (Design) + +Applies to Tools, Servers, Resources, Prompts, and A2A Agents. All resources are owned by a team (team_id) and created by a user (owner_email). + +- Private: + - Who sees it: Only the resource owner (owner_email). + - Team members cannot see or use it unless they are the owner. + - Mutations: Owner and Platform Admin can update/delete; team owners may be allowed by policy (see Enhancements). + +- Team: + - Who sees it: All members of the owning team (owners and members). + - Mutations: Owner can update/delete; team owners can administratively manage; Platform Admin can override. + +- Public: + - Who sees it: All authenticated users across the platform (cross‑team visibility). + - Mutations: Only the resource owner, team owners, or Platform Admins can modify/delete. + +Enforcement summary: +- Listing queries include resources where (a) owner_email == user.email, (b) team_id ∈ user_teams with visibility ∈ {team, public}, and (c) visibility == public. +- Read follows the same rules as list; write operations require ownership or delegated/team administrative rights. + +--- + +## Resource Scoping & Visibility + +### Resource Architecture + +All resources in the MCP Gateway are scoped to teams with three visibility levels: + +```mermaid +flowchart TD + subgraph "Resource Types" + A[MCP Servers] + B[Virtual Servers] + C[Tools] + D[Resources] + E[Prompts] + F[A2A Agents] + end + + subgraph "Team Scoping" + G[team_id: UUID] + H[owner_email: string] + I[visibility: enum] + end + + subgraph "Visibility Levels" + J[Private
Owner only] + K[Team
Team members] + L[Public
All users] + end + + A --> G + B --> G + C --> G + D --> G + E --> G + F --> G + + G --> I + H --> I + + I --> J + I --> K + I --> L + + style J fill:#ffebee + style K fill:#e3f2fd + style L fill:#e8f5e8 +``` + +### Resource Visibility Matrix + +```mermaid +flowchart LR + subgraph "User Access to Resources" + U1[User A
Team 1 Member
Team 2 Owner] + U2[User B
Team 1 Owner
Team 3 Member] + U3[User C
No team membership] + end + + subgraph "Resource Visibility" + R1[Resource 1
Team 1, Private
Owner: User B] + R2[Resource 2
Team 1, Team
Owner: User A] + R3[Resource 3
Team 2, Public
Owner: User A] + R4[Resource 4
Team 3, Team
Owner: User B] + end + + U1 -.->|❌ No Access| R1 + U1 -->|✅ Team Member| R2 + U1 -->|✅ Owner & Public| R3 + U1 -.->|❌ Not Team Member| R4 + + U2 -->|✅ Owner & Private| R1 + U2 -->|✅ Team Member| R2 + U2 -->|✅ Public| R3 + U2 -->|✅ Team Member| R4 + + U3 -.->|❌ No Access| R1 + U3 -.->|❌ No Access| R2 + U3 -->|✅ Public| R3 + U3 -.->|❌ No Access| R4 + + style U1 fill:#e1f5fe + style U2 fill:#f3e5f5 + style U3 fill:#fff3e0 +``` + +### Resource Access Control Logic + +```mermaid +flowchart TD + A[User requests resource access] --> B{Resource visibility} + + B -->|Private| C{User owns resource?} + B -->|Team| D{User in resource team?} + B -->|Public| E[✅ Allow access] + + C -->|Yes| F[✅ Allow access] + C -->|No| G[❌ Deny access] + + D -->|Yes| H[✅ Allow access] + D -->|No| I[❌ Deny access] + + style F fill:#e8f5e8 + style H fill:#e8f5e8 + style E fill:#e8f5e8 + style G fill:#ffebee + style I fill:#ffebee +``` + +--- + +## Platform Administration + +### Administrator Hierarchy + +```mermaid +flowchart TD + subgraph "Platform Roles" + A[Platform Administrator
- System-wide access
- User management
- SSO configuration
- Global settings] + B[Team Owner
- Team management
- Team resource control
- Member management] + C[Team Member
- Team resource access
- Limited team visibility] + D[Regular User
- Personal team only
- Public resource access] + end + + subgraph "Domain Restrictions" + E[Admin Domain Whitelist
SSO_AUTO_ADMIN_DOMAINS] + F[Trusted Domains
SSO_TRUSTED_DOMAINS] + G[Manual Assignment
Platform admin approval] + end + + A --> E + A --> G + B --> F + + subgraph "Access Hierarchy" + H[Platform Admin] --> I[All Teams & Resources] + J[Team Owner] --> K[Team Resources & Members] + L[Team Member] --> M[Team Resources Only] + N[Regular User] --> O[Personal + Public Resources] + end + + style A fill:#ff8a80 + style B fill:#ffb74d + style C fill:#81c784 + style D fill:#90caf9 +``` + +### Administrator Assignment Flow + +```mermaid +sequenceDiagram + participant U as New User + participant G as Gateway + participant SSO as SSO Provider + participant DB as Database + participant A as Platform Admin + + Note over U,A: SSO Registration with Domain Check + U->>G: SSO Login (user@company.com) + G->>SSO: OAuth flow + SSO-->>G: User profile + + G->>G: Check SSO_AUTO_ADMIN_DOMAINS + Note right of G: company.com in whitelist? + + alt Auto-Admin Domain + G->>DB: Create user with is_admin=true + G-->>U: Admin access granted + else Trusted Domain + G->>DB: Create user with is_admin=false + G->>DB: Auto-approve user + G-->>U: Regular user access + else Unknown Domain + G->>DB: Create pending user + G->>A: Admin approval required + A->>G: Approve/deny + admin assignment + alt Approved as Admin + G->>DB: Set is_admin=true + G-->>U: Admin access granted + else Approved as User + G->>DB: Set is_admin=false + G-->>U: Regular user access + else Denied + G-->>U: Access denied + end + end +``` + +### Admin vs User Experience + +- Admin team view: Administrators can view and manage all non-personal teams across the platform in Teams Management. Personal teams remain protected from cross-tenant edits and deletion. +- User team view: Non-admin users see only teams they belong to. Team owners manage membership via invitations; members can access team resources per visibility rules. +- UI access: The main dashboard supports both admins and regular users. Admin-only actions remain protected by RBAC; non-admins can access their teams, resources, and tokens but cannot perform admin functions. + +### Default Visibility & Sharing + +- Default on create: New resources (including MCP Servers, Tools, Resources, Prompts, and A2A Agents) default to `visibility="private"` unless a different value is explicitly provided by an allowed actor. For servers created via the UI, the visibility is enforced to `private` by default. +- Team assignment: When a user creates a server and does not specify `team_id`, the server is automatically assigned to the user's personal team. +- Sharing workflow: + - Private → Team: Make the resource visible to the owning team by setting `visibility="team"`. + - Private/Team → Public: Make the resource visible to all authenticated users by setting `visibility="public"`. + - Cross-team: To have a resource under a different team, create it in that team or move/clone it per policy; cross-team "share" is by visibility, not multi-team ownership. + +--- + +## Complete Multi-Tenancy Flow + +### End-to-End Resource Access + +```mermaid +sequenceDiagram + participant U as User + participant G as Gateway + participant Auth as Authentication + participant Team as Team Service + participant Res as Resource Service + participant DB as Database + + Note over U,DB: Complete Access Flow + U->>G: Request resource list + G->>Auth: Validate JWT token + Auth-->>G: User identity confirmed + + G->>Team: Get user teams + Team->>DB: Query team memberships + DB-->>Team: User team list + Team-->>G: Teams with roles + + G->>Res: List resources for user + Res->>DB: Query with team filtering + Note right of Res: WHERE (owner_email = user
OR (team_id IN user_teams AND visibility IN ('team', 'public'))
OR visibility = 'public') + + DB-->>Res: Filtered resource list + Res-->>G: User-accessible resources + G-->>U: Resource list response + + Note over U,DB: Resource Creation + U->>G: Create new resource + G->>Auth: Validate permissions + G->>Team: Verify team membership + Team-->>G: Team access confirmed + + G->>Res: Create resource + Res->>DB: INSERT with team_id, owner_email, visibility + DB-->>Res: Resource created + Res-->>G: Creation confirmed + G-->>U: Resource created successfully +``` + +### Team-Based Resource Filtering + +```mermaid +flowchart TD + A[User Request] --> B[Extract User Identity] + B --> C[Get User Team Memberships] + + C --> D[Build Filter Criteria] + + D --> E{Resource Query} + E --> F[Owner-Owned Resources
owner_email = user.email] + E --> G[Team Resources
team_id IN user.teams
AND visibility IN ('team', 'public')] + E --> H[Public Resources
visibility = 'public'] + + F --> I[Combine Results] + G --> I + H --> I + + I --> J[Apply Additional Filters] + J --> K[Return Filtered Resources] + + subgraph "Filter Logic" + L[Personal: User owns directly] + M[Team: User is team member] + N[Public: Available to all] + end + + style F fill:#e1f5fe + style G fill:#e3f2fd + style H fill:#e8f5e8 +``` + +--- + +## Database Schema Design + +### Complete Multi-Tenant Schema + +```mermaid +erDiagram + %% User Management + EmailUser ||--o{ EmailTeamMember : belongs_to + EmailUser ||--o{ EmailTeamInvitation : invites + EmailUser ||--o{ EmailTeam : owns + + %% Team Management + EmailTeam ||--o{ EmailTeamMember : has + EmailTeam ||--o{ EmailTeamInvitation : has_pending + EmailTeam ||--o{ Tool : owns + EmailTeam ||--o{ Server : owns + EmailTeam ||--o{ Resource : owns + EmailTeam ||--o{ Prompt : owns + EmailTeam ||--o{ A2AAgent : owns + + %% Resources + Tool ||--o{ ToolExecution : executions + Server ||--o{ ServerConnection : connections + A2AAgent ||--o{ A2AInteraction : interactions + + EmailUser { + string email PK + string password_hash + string full_name + boolean is_admin + timestamp created_at + timestamp updated_at + } + + EmailTeam { + uuid id PK + string name + text description + enum type "personal|organizational" + enum visibility "private|public" + string owner_email FK + jsonb settings + timestamp created_at + timestamp updated_at + } + + EmailTeamMember { + uuid id PK + uuid team_id FK + string user_email FK + enum role "owner|member" + jsonb permissions + timestamp joined_at + timestamp updated_at + } + + EmailTeamInvitation { + uuid id PK + uuid team_id FK + string invited_email + string invited_by_email FK + enum role "owner|member" + string token + text message + timestamp expires_at + enum status "pending|accepted|declined|expired" + timestamp created_at + } + + Tool { + uuid id PK + string name + text description + uuid team_id FK + string owner_email FK + enum visibility "private|team|public" + jsonb schema + jsonb tags + timestamp created_at + timestamp updated_at + } + + Server { + uuid id PK + string name + text description + uuid team_id FK + string owner_email FK + enum visibility "private|team|public" + jsonb config + jsonb tags + timestamp created_at + timestamp updated_at + } + + Resource { + uuid id PK + string name + text description + uuid team_id FK + string owner_email FK + enum visibility "private|team|public" + string uri + string mime_type + jsonb tags + timestamp created_at + timestamp updated_at + } + + Prompt { + uuid id PK + string name + text description + uuid team_id FK + string owner_email FK + enum visibility "private|team|public" + text content + jsonb arguments + jsonb tags + timestamp created_at + timestamp updated_at + } + + A2AAgent { + uuid id PK + string name + text description + uuid team_id FK + string owner_email FK + enum visibility "private|team|public" + string endpoint_url + jsonb config + jsonb tags + timestamp created_at + timestamp updated_at + } +``` + +--- + +## API Design Patterns + +### Team-Scoped Endpoints + +All resource endpoints follow consistent team-scoping patterns: + +```mermaid +flowchart TD + subgraph "API Endpoint Patterns" + A[GET /tools?team_id=uuid&visibility=team] + B[POST /tools
{name, team_id, visibility}] + C[GET /tools/{id}] + D[PUT /tools/{id}
{team_id, visibility}] + E[DELETE /tools/{id}] + end + + subgraph "Request Processing" + F[Extract User Identity] --> G[Validate Team Access] + G --> H[Apply Team Filters] + H --> I[Execute Query] + I --> J[Return Results] + end + + subgraph "Access Control Checks" + K[User Team Membership] + L[Resource Ownership] + M[Visibility Level] + N[Operation Permissions] + end + + A --> F + B --> F + C --> F + D --> F + E --> F + + G --> K + G --> L + G --> M + G --> N + + style A fill:#e1f5fe + style B fill:#f3e5f5 + style C fill:#fff3e0 + style D fill:#e8f5e8 + style E fill:#ffebee +``` + +### Resource Creation Flow + +```mermaid +sequenceDiagram + participant C as Client + participant G as Gateway + participant A as Auth Middleware + participant T as Team Service + participant R as Resource Service + participant DB as Database + + C->>G: POST /tools + Note right of C: {name, team_id, visibility} + + G->>A: Validate request + A->>A: Extract user from JWT + A->>T: Check team membership + T->>DB: Query team_members + DB-->>T: Membership confirmed + T-->>A: Access granted + A-->>G: User authorized + + G->>R: Create resource + R->>R: Validate team_id ownership + R->>DB: INSERT resource + Note right of R: team_id, owner_email, visibility + DB-->>R: Resource created + R-->>G: Creation response + G-->>C: 201 Created +``` + +--- + +## Configuration & Environment + +### Multi-Tenancy Configuration + +```bash +##################################### +# Multi-Tenancy Configuration +##################################### + +# Team Settings +AUTO_CREATE_PERSONAL_TEAMS=true +PERSONAL_TEAM_PREFIX=personal +MAX_TEAMS_PER_USER=50 +MAX_MEMBERS_PER_TEAM=100 + +# Team Invitation Settings +INVITATION_EXPIRY_DAYS=7 +REQUIRE_EMAIL_VERIFICATION_FOR_INVITES=true + +# Visibility +# NOTE: Resources default to 'private' (not configurable via env today) +# Allowed visibility values: private | team | public + +# Platform Administration +PLATFORM_ADMIN_EMAIL=admin@company.com +PLATFORM_ADMIN_PASSWORD=changeme +PLATFORM_ADMIN_FULL_NAME="Platform Administrator" + +# SSO (enable + trust and admin mapping) +SSO_ENABLED=true +SSO_TRUSTED_DOMAINS=["company.com","trusted-partner.com"] +SSO_AUTO_ADMIN_DOMAINS=["company.com"] +SSO_GITHUB_ADMIN_ORGS=["your-org"] +SSO_GOOGLE_ADMIN_DOMAINS=["your-google-workspace-domain.com"] +SSO_REQUIRE_ADMIN_APPROVAL=false + +# Public team self-join flows are planned; no env toggles yet +``` + +--- + +## Security Considerations + +### Multi-Tenant Security Model + +```mermaid +flowchart TD + subgraph "Security Layers" + A[Authentication Layer
- JWT validation
- Session management] + B[Authorization Layer
- Team membership
- Resource ownership
- Visibility checks] + C[Data Isolation Layer
- Team-scoped queries
- Owner validation
- Access logging] + end + + subgraph "Security Controls" + D[Input Validation
- Team ID validation
- Email format
- Role validation] + E[Rate Limiting
- Per-user limits
- Per-team limits
- API quotas] + F[Audit Logging
- Access attempts
- Resource changes
- Team modifications] + end + + subgraph "Attack Prevention" + G[Team Enumeration
- UUID team IDs
- Access validation] + H[Resource Access
- Ownership checks
- Visibility enforcement] + I[Privilege Escalation
- Role validation
- Permission boundaries] + end + + A --> B --> C + D --> E --> F + G --> H --> I + + style A fill:#ffcdd2 + style B fill:#f8bbd9 + style C fill:#e1bee7 + style D fill:#c8e6c9 + style E fill:#dcedc8 + style F fill:#f0f4c3 +``` + +### Access Control Matrix + +| User Role | Team Access | Resource Creation | Member Management | Team Settings | Platform Admin | +|-----------|-------------|-------------------|-------------------|---------------|----------------| +| Platform Admin | All teams | All resources | All teams | All settings | Full access | +| Team Owner | Owned teams | Team resources | Team members | Team settings | No access | +| Team Member | Member teams | Team resources | No access | No access | No access | +| Regular User | Personal team | Personal resources | Personal team | Personal settings | No access | + +--- + +## Implementation Verification + +### Key Requirements Checklist + +- [x] **User Authentication**: Email and SSO authentication implemented +- [x] **Personal Teams**: Auto-created for every user +- [x] **Team Roles**: Owner and Member roles (platform Admin is global) +- [x] **Team Visibility**: Private and Public team types +- [x] **Resource Scoping**: All resources scoped to teams with visibility controls +- [x] **Invitation System**: Email-based invitations with token management +- [x] **Platform Administration**: Separate admin role with domain restrictions +- [x] **Access Control**: Team-based filtering for all resources +- [x] **Database Design**: Complete multi-tenant schema +- [x] **API Patterns**: Consistent team-scoped endpoints + +### Critical Implementation Points + +1. **Team ID Validation**: Every resource operation must validate team membership +2. **Visibility Enforcement**: Resource visibility (private/team/public) strictly enforced; team visibility (private/public) per design +3. **Owner Permissions**: Only team owners can manage members and settings +4. **Personal Team Protection**: Personal teams cannot be deleted or transferred +5. **Invitation Security**: Invitation tokens with expiration and single-use +6. **Platform Admin Isolation**: Platform admin access separate from team access +7. **Cross-Team Access**: Public resources accessible across team boundaries +8. **Audit Trail**: Permission checks and auth events audited; extended operation audit planned + +--- + +## Gaps & Issues + +- Team roles: Owner and Member only (platform Admin is global) — consistent across ERD, APIs, and UI. +- Team visibility: Private and Public. +- Resource visibility: `private|team|public` — enforced as designed. +- Public team discovery/join: Join‑request/self‑join flows to be implemented. +- Default resource visibility: Defaults to "private"; not configurable via env. +- SSO admin mapping: Domain/org lists supported; provider‑specific org checks may require provider API calls in production. + +--- + +## Enhancements & Roadmap (Part of the Design) + +- Public Team Discovery & Join Requests: + - Add endpoints and UI to request membership on public teams; owner approval workflow; optional auto‑approve policy. + - Admin toggles/policies to restrict who can create public teams and who can approve joins. + +- Unified Operation Audit: + - System‑wide audit log for create/update/delete across teams, tools, servers, resources, prompts, agents with export/reporting. + +- Role Automation: + - Auto‑assign default RBAC roles on resource creation (e.g., owner gets manager role in team scope; members get viewer). + - Optional per‑team policies defining who may create public resources. + +- ABAC for Virtual Servers: + - Attribute‑based conditions layered on top of RBAC (tenant tags, data classifications, environment, time windows, client IP). + +- Team/Resource Quotas and Policies: + - Per‑team limits (tools/servers/resources/agents); per‑team defaults for resource visibility and creation rights. + +- Public Resource Access Controls: + - Fine‑grained cross‑tenant rate limits and opt‑in masking for metadata shown to non‑members. + +This architecture provides a robust, secure, and scalable multi-tenant system that supports complex organizational structures while maintaining strict data isolation and flexible resource sharing capabilities. diff --git a/docs/docs/architecture/plugins.md b/docs/docs/architecture/plugins.md index 356a0d6f5..994ff1e22 100644 --- a/docs/docs/architecture/plugins.md +++ b/docs/docs/architecture/plugins.md @@ -1,23 +1,13 @@ # Plugin Framework Architecture -The MCP Context Forge Gateway implements a comprehensive, platform-agnostic plugin framework for AI safety middleware, security processing, and extensible gateway capabilities. This document provides a detailed architectural overview of the plugin system implementation, focusing on both **self-contained plugins** (running in-process) and **external/remote plugins** (as MCP servers) through a unified, reusable interface. +The MCP Context Forge Gateway implements a comprehensive plugin framework for AI safety middleware, security processing, and extensible gateway capabilities. This document provides a detailed architectural overview of the plugin system implementation. ## Overview -The plugin framework is designed as a **standalone, platform-agnostic ecosystem** that can be embedded in any application requiring extensible middleware processing. It enables both **self-contained plugins** (running in-process) and **external plugin integrations** (remote MCP servers) through a unified interface. This hybrid approach balances performance, security, and operational requirements while providing maximum flexibility for deployment across different environments and platforms. - -### Key Design Principles - -- **Platform Agnostic**: Framework can be integrated into any Python application -- **Protocol Neutral**: Supports multiple transport mechanisms (HTTP, WebSocket, STDIO, SSE) -- **MCP Native**: Remote plugins are fully compliant MCP servers -- **Security First**: Comprehensive timeout protection, input validation, and isolation -- **Production Ready**: Built for high-throughput, low-latency enterprise environments +The plugin framework enables both **self-contained plugins** (running in-process) and **external middleware service integrations** (calling external AI safety services) through a unified interface. This hybrid approach balances performance, security, and operational requirements. ## Architecture Components -The plugin framework is built around a modular, extensible architecture that supports multiple deployment patterns and integration scenarios. - ### Core Framework Structure ``` @@ -36,100 +26,9 @@ mcpgateway/plugins/framework/ └── mcp/ # MCP external service integration ├── client.py # MCP client for external plugin communication └── server/ # MCP server runtime for plugin hosting - ├── server.py # MCP server implementation - └── runtime.py # Plugin runtime management ``` -### Plugin Types and Deployment Patterns - -The framework supports three distinct plugin deployment patterns: - -#### 1. **Self-Contained Plugins** (In-Process) -- Execute within the main application process -- Written in Python and extend the base `Plugin` class -- Fastest execution with shared memory access -- Examples: regex filters, simple transforms, validation - -#### 2. **External Plugins** (Remote MCP Servers) -- Standalone MCP servers implementing plugin logic -- Can be written in any language (Python, TypeScript, Go, Rust, etc.) -- Communicate via MCP protocol (HTTP, WebSocket, STDIO) -- Examples: LlamaGuard, OpenAI Moderation, custom AI services - -#### 3. **Hybrid Plugins** (Platform Integration) -- Combine self-contained and external patterns -- Self-contained wrapper that orchestrates external services -- Enables complex workflows and service composition - -## Plugin System Architecture - -The plugin framework implements a sophisticated execution pipeline designed for enterprise-grade performance, security, and reliability. - -### Architectural Overview - -```mermaid -flowchart TB - subgraph "Request Lifecycle" - Client["🧑‍💻 Client Request"] --> Gateway["🌐 MCP Gateway"] - Gateway --> PM["🔌 Plugin Manager"] - PM --> Pipeline["⚡ Execution Pipeline"] - Pipeline --> Response["📤 Response"] - end - - subgraph "Plugin Manager Components" - PM --> Registry["📋 Plugin Registry"] - PM --> Config["⚙️ Configuration Loader"] - PM --> Executor["🔄 Plugin Executor"] - PM --> Context["📊 Context Manager"] - end - - subgraph "Plugin Types" - SelfContained["📦 Self-Contained\\n(In-Process)"] - External["🌍 External/Remote\\n(MCP Servers)"] - Hybrid["🔗 Hybrid\\n(Orchestration)"] - end - - subgraph "Hook Points" - PPF["🔍 prompt_pre_fetch"] - PPO["✅ prompt_post_fetch"] - TPI["🛠️ tool_pre_invoke"] - TPO["✅ tool_post_invoke"] - RPF["📄 resource_pre_fetch"] - RPO["✅ resource_post_fetch"] - end - - subgraph "External Integration" - MCP["📡 MCP Protocol"] - HTTP["🌐 HTTP/REST"] - WS["⚡ WebSocket"] - STDIO["💻 STDIO"] - SSE["📡 Server-Sent Events"] - end - - Registry --> SelfContained - Registry --> External - Registry --> Hybrid - - Executor --> PPF - Executor --> PPO - Executor --> TPI - Executor --> TPO - Executor --> RPF - Executor --> RPO - - External --> MCP - MCP --> HTTP - MCP --> WS - MCP --> STDIO - MCP --> SSE - - style Client fill:#e1f5fe - style Gateway fill:#f3e5f5 - style PM fill:#fff3e0 - style SelfContained fill:#e8f5e8 - style External fill:#fff8e1 - style Hybrid fill:#fce4ec -``` +## Plugin Architecture ### 1. Base Plugin Classes @@ -734,310 +633,12 @@ FEDERATION_POST_SYNC = "federation_post_sync" # Post-federation processing ### External Service Integrations -#### Current Integrations - -- ✅ **LlamaGuard:** Content safety classification and filtering -- ✅ **OpenAI Moderation API:** Commercial content moderation -- ✅ **Custom MCP Servers:** Any language, any protocol - -#### Planned Integrations (Phase 2-3) - -- 🔄 **HashiCorp Vault:** Secret management for plugin configurations -- 🔄 **Open Policy Agent (OPA):** Policy-as-code enforcement engine -- 🔄 **SPIFFE/SPIRE:** Workload identity and attestation -- 📋 **AWS GuardDuty:** Cloud security monitoring integration -- 📋 **Azure Cognitive Services:** Enterprise AI services -- 📋 **Google Cloud AI:** ML model integration -- 📋 **Kubernetes Operators:** Native K8s plugin deployment -- 📋 **Istio/Envoy:** Service mesh integration - -## Platform-Agnostic Design - -The plugin framework is designed as a **reusable, standalone ecosystem** that can be embedded in any application requiring extensible middleware processing. - -### Framework Portability - -```mermaid -flowchart TD - subgraph "Core Framework (Portable)" - Framework["🔌 Plugin Framework\\n(Python Package)"] - Interface["📋 Plugin Interface\\n(Language Agnostic)"] - Protocol["📡 MCP Protocol\\n(Cross-Platform)"] - end - - subgraph "Host Applications" - MCPGateway["🌐 MCP Gateway\\n(Primary Use Case)"] - WebFramework["🕷️ FastAPI/Flask App"] - CLITool["💻 CLI Application"] - Microservice["⚙️ Microservice"] - DataPipeline["📊 Data Pipeline"] - end - - Framework --> Interface - Interface --> Protocol - - Framework --> MCPGateway - Framework --> WebFramework - Framework --> CLITool - Framework --> Microservice - Framework --> DataPipeline - - style Framework fill:#fff3e0 - style Protocol fill:#e8f5e8 - style MCPGateway fill:#e3f2fd -``` - -### Integration Patterns - -#### Framework as Python Package - -```python -# Any Python application can embed the plugin framework -from mcpgateway.plugins import PluginManager, PluginConfig - -class MyApplication: - def __init__(self): - self.plugin_manager = PluginManager( - config_path="/path/to/plugins.yaml", - timeout=30 - ) - - async def process_request(self, request): - payload = RequestPayload(data=request.data) - context = GlobalContext(request_id=request.id) - - # Pre-processing with plugins - result, _ = await self.plugin_manager.custom_pre_hook( - payload, context - ) - - if not result.continue_processing: - return ErrorResponse(result.violation.description) - - # Your application logic here - response = await self.process_business_logic( - result.modified_payload or payload - ) - - return response -``` - -### Language Interoperability - -The MCP-based external plugin system enables **true polyglot development**: - -```yaml -# Multi-language plugin deployment -plugins: - # Python self-contained plugin - - name: "FastValidation" - kind: "internal.validators.FastValidator" - - # TypeScript/Node.js plugin - - name: "OpenAIModerationTS" - kind: "external" - mcp: - proto: "STREAMABLEHTTP" - url: "http://nodejs-plugin:3000/mcp" - - # Go plugin - - name: "HighPerformanceFilter" - kind: "external" - mcp: - proto: "STDIO" - script: "/opt/plugins/go-filter" - - # Rust plugin - - name: "CryptoValidator" - kind: "external" - mcp: - proto: "STREAMABLEHTTP" - url: "http://rust-plugin:8080/mcp" -``` - -## Remote Plugin MCP Server Integration - -External plugins communicate with the gateway using the Model Context Protocol (MCP), enabling language-agnostic plugin development. - -### MCP Plugin Protocol Flow - -```mermaid -sequenceDiagram - participant Gateway as MCP Gateway - participant Client as External Plugin Client - participant Server as Remote MCP Server - participant Service as External AI Service - - Note over Gateway,Service: Plugin Initialization - Gateway->>Client: Initialize External Plugin - Client->>Server: MCP Connection (HTTP/WS/STDIO) - Server-->>Client: Connection Established - Client->>Server: get_plugin_config(plugin_name) - Server-->>Client: Plugin Configuration - Client-->>Gateway: Plugin Ready - - Note over Gateway,Service: Request Processing - Gateway->>Client: tool_pre_invoke(payload, context) - Client->>Server: MCP Tool Call: tool_pre_invoke - - alt Self-Processing - Server->>Server: Process Internally - else External Service Call - Server->>Service: API Call (OpenAI, LlamaGuard, etc.) - Service-->>Server: Service Response - end - - Server-->>Client: MCP Response - Client-->>Gateway: PluginResult -``` - -### MCP Plugin Server Tools - -Remote plugin servers must implement standard MCP tools: - -```python -# Standard MCP Tools for Plugin Servers -REQUIRED_TOOLS = [ - "get_plugin_config", # Return plugin configuration - "prompt_pre_fetch", # Process prompt before fetching - "prompt_post_fetch", # Process prompt after rendering - "tool_pre_invoke", # Process tool before invocation - "tool_post_invoke", # Process tool after invocation - "resource_pre_fetch", # Process resource before fetching - "resource_post_fetch", # Process resource after fetching -] -``` - -### External Plugin Example (TypeScript) - -```typescript -// TypeScript/Node.js external plugin example -import { MCPServer, Tool } from '@modelcontextprotocol/sdk'; -import OpenAI from 'openai'; - -class OpenAIModerationPlugin { - private openai: OpenAI; - - constructor() { - this.openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY - }); - } - - @Tool('tool_pre_invoke') - async handleToolPreInvoke(params: any) { - const { payload, context } = params; - - const content = Object.values(payload.args || {}) - .filter(v => typeof v === 'string') - .join(' '); - - if (!content.trim()) { - return { continue_processing: true }; - } - - try { - const moderation = await this.openai.moderations.create({ - input: content, - model: 'text-moderation-stable' - }); - - const result = moderation.results[0]; - - if (result.flagged) { - const flaggedCategories = Object.entries(result.categories) - .filter(([_, flagged]) => flagged) - .map(([category, _]) => category); - - return { - continue_processing: false, - violation: { - reason: 'Content policy violation', - description: `OpenAI Moderation flagged: ${flaggedCategories.join(', ')}`, - code: 'OPENAI_MODERATION_FLAGGED', - details: { - categories: result.categories, - flagged_categories: flaggedCategories - } - } - }; - } - - return { - continue_processing: true, - metadata: { - openai_moderation_score: Math.max(...Object.values(result.category_scores)) - } - }; - - } catch (error) { - return { - continue_processing: true, - metadata: { moderation_error: error.message } - }; - } - } - - @Tool('get_plugin_config') - async getPluginConfig(params: { name: string }) { - return { - name: params.name, - description: 'OpenAI Content Moderation', - version: '1.0.0', - hooks: ['tool_pre_invoke', 'prompt_pre_fetch'], - tags: ['openai', 'moderation', 'content-safety'], - mode: 'enforce', - priority: 30 - }; - } -} - -const server = new MCPServer(); -const plugin = new OpenAIModerationPlugin(); -server.registerPlugin(plugin); -server.listen({ transport: 'stdio' }); -``` - -## Related Issues and References - -### GitHub Issues - -- **Issue #773**: [Feature] Add support for external plugins - - ✅ **Status**: Completed - - **Impact**: Enables polyglot plugin development and service integration - -- **Issue #673**: [ARCHITECTURE] Identify Next Steps for Plugin Development - - 🔄 **Status**: In Progress - - **Impact**: Defines framework evolution and enterprise features - -- **Issue #720**: [Feature] Add CLI for authoring and packaging plugins - - 🔄 **Status**: In Progress - - **Impact**: Streamlines plugin development and deployment - -- **Issue #319**: [Feature Request] AI Middleware Integration / Plugin Framework - - ✅ **Status**: Completed (Core Framework) - - **Impact**: Enables extensible gateway capabilities and AI safety integration - -### Architecture Decisions - -1. **Hybrid Plugin Model**: Support both self-contained and external plugins -2. **MCP Protocol**: Enable language-agnostic plugin development -3. **Priority-Based Execution**: Sequential execution with deterministic behavior -4. **Singleton Manager**: Consistent state and resource management -5. **Context Isolation**: Per-request isolation with automatic cleanup -6. **Security First**: Timeout protection, input validation, and audit logging +- **LlamaGuard:** Content safety classification and filtering +- **OpenAI Moderation API:** Commercial content moderation +- **HashiCorp Vault:** Secret management for plugin configurations +- **Open Policy Agent (OPA):** Policy-as-code enforcement engine +- **SPIFFE/SPIRE:** Workload identity and attestation --- -## Summary - -The MCP Context Forge plugin framework provides a **production-ready, platform-agnostic foundation** for extensible middleware processing. The architecture successfully balances: - -✅ **Performance**: Sub-millisecond latency for self-contained plugins, optimized external plugin communication -✅ **Flexibility**: Support for any programming language via MCP protocol -✅ **Security**: Comprehensive protection mechanisms and compliance features -✅ **Scalability**: Horizontal scaling for self-contained, vertical scaling for external plugins -✅ **Developer Experience**: Simple APIs, comprehensive testing, and CLI tooling -✅ **Enterprise Ready**: Multi-tenant support, audit logging, and integration capabilities - -The framework supports both **immediate security needs** through self-contained plugins and **future enterprise AI safety integrations** through the external plugin ecosystem. With its platform-agnostic design, the framework can be embedded in any application requiring middleware processing capabilities. +This plugin framework provides the foundation for comprehensive AI safety middleware while maintaining high performance and operational simplicity. The architecture supports both immediate security needs through self-contained plugins and future enterprise AI safety integrations through external service support. diff --git a/docs/docs/deployment/container.md b/docs/docs/deployment/container.md index bd2c65909..1c6518c88 100644 --- a/docs/docs/deployment/container.md +++ b/docs/docs/deployment/container.md @@ -13,6 +13,8 @@ docker run -d --name mcpgateway \ -p 4444:4444 \ -e HOST=0.0.0.0 \ -e JWT_SECRET_KEY=my-test-key \ + -e JWT_AUDIENCE=mcpgateway-api \ + -e JWT_ISSUER=mcpgateway \ -e BASIC_AUTH_USER=admin \ -e BASIC_AUTH_PASSWORD=changeme \ -e AUTH_REQUIRED=true \ diff --git a/docs/docs/deployment/google-cloud-run.md b/docs/docs/deployment/google-cloud-run.md index 36ffe2ac6..8e16fb471 100644 --- a/docs/docs/deployment/google-cloud-run.md +++ b/docs/docs/deployment/google-cloud-run.md @@ -337,7 +337,7 @@ Use the MCP Gateway container to generate a JWT token: ```bash docker run -it --rm ghcr.io/ibm/mcp-context-forge:0.6.0 \ - python3 -m mcpgateway.utils.create_jwt_token -u admin --secret jwt-secret-key + python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret jwt-secret-key ``` Export the token as an environment variable: diff --git a/docs/docs/deployment/ibm-code-engine.md b/docs/docs/deployment/ibm-code-engine.md index 9b5f0c466..e2526fe5d 100644 --- a/docs/docs/deployment/ibm-code-engine.md +++ b/docs/docs/deployment/ibm-code-engine.md @@ -81,7 +81,7 @@ To access the APIs you need to generate your JWT token using the same `JWT_SECRE ```bash # Generate a one-off token for the default admin user -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com) echo ${MCPGATEWAY_BEARER_TOKEN} # Check that the key was generated ``` @@ -224,7 +224,7 @@ Test the API endpoints with the generated `MCPGATEWAY_BEARER_TOKEN`: ```bash # Generate a one-off token for the default admin user -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com) # Call a protected endpoint. Since there are not tools, initially this just returns `[]` curl -H "Authorization: Bearer ${MCPGATEWAY_BEARER_TOKEN}" \ diff --git a/docs/docs/deployment/local.md b/docs/docs/deployment/local.md index a90c06ac6..33442faf6 100644 --- a/docs/docs/deployment/local.md +++ b/docs/docs/deployment/local.md @@ -63,6 +63,6 @@ Visit [http://localhost:4444/admin](http://localhost:4444/admin) and login using ## 🔁 Quick JWT Setup ```bash -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com) curl -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" http://localhost:4444/tools ``` diff --git a/docs/docs/development/developer-onboarding.md b/docs/docs/development/developer-onboarding.md index ad540ddf6..a972a943e 100644 --- a/docs/docs/development/developer-onboarding.md +++ b/docs/docs/development/developer-onboarding.md @@ -92,7 +92,7 @@ ???+ check "Generate and use a Bearer token" - [ ] Export a token with: ```bash - export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) + export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) ``` - [ ] Verify authenticated API access: diff --git a/docs/docs/development/github.md b/docs/docs/development/github.md index c5cb3ce16..7ff3106a1 100644 --- a/docs/docs/development/github.md +++ b/docs/docs/development/github.md @@ -214,7 +214,7 @@ make compose-up Quickly confirm that authentication works and the gateway is healthy: ```bash -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) curl -s -k -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" https://localhost:4444/health ``` diff --git a/docs/docs/development/index.md b/docs/docs/development/index.md index 030d225fa..262f0b571 100644 --- a/docs/docs/development/index.md +++ b/docs/docs/development/index.md @@ -96,7 +96,7 @@ Admin UI and API are protected by Basic Auth or JWT. To generate a JWT token: ```bash -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) echo $MCPGATEWAY_BEARER_TOKEN ``` diff --git a/docs/docs/development/mcp-developer-guide-json-rpc.md b/docs/docs/development/mcp-developer-guide-json-rpc.md index bc11db720..c67c7258c 100644 --- a/docs/docs/development/mcp-developer-guide-json-rpc.md +++ b/docs/docs/development/mcp-developer-guide-json-rpc.md @@ -22,7 +22,7 @@ MCP Gateway uses JWT Bearer tokens for authentication. Generate a token before m ```bash # Generate authentication token export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ - --username admin --exp 10080 --secret my-test-key) + --username admin@example.com --exp 10080 --secret my-test-key) # Verify the token was generated echo "Token: ${MCPGATEWAY_BEARER_TOKEN}" @@ -506,7 +506,7 @@ echo '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' | python3 -m mcpgateway.wr # Setup export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ - --username admin --exp 10080 --secret my-test-key) + --username admin@example.com --exp 10080 --secret my-test-key) # Function to make authenticated JSON-RPC calls make_call() { @@ -625,7 +625,7 @@ echo "=== Session Complete ===" # Setup export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ - --username admin --exp 10080 --secret my-test-key) + --username admin@example.com --exp 10080 --secret my-test-key) echo "=== Starting SSE Session ===" @@ -771,7 +771,7 @@ MCP follows JSON-RPC 2.0 error handling standards: ```bash # Verify token generation export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token \ - --username admin --exp 10080 --secret my-test-key) + --username admin@example.com --exp 10080 --secret my-test-key) # Test token validity curl -s -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" \ @@ -1090,7 +1090,7 @@ async function main() { try { // Generate authentication token const authToken = execSync( - 'python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-test-key', + 'python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-test-key', { encoding: 'utf8' } ).trim(); diff --git a/docs/docs/development/review.md b/docs/docs/development/review.md index 32a6749c4..d28147616 100644 --- a/docs/docs/development/review.md +++ b/docs/docs/development/review.md @@ -58,7 +58,7 @@ make compose-up # spins up the Docker Compose stack # Test the basics curl -k https://localhost:4444/health` # {"status":"healthy"} -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) curl -sk -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" http://localhost:4444/version | jq -c '.database, .redis' # Add an MCP server to http://localhost:4444 then check logs: diff --git a/docs/docs/faq/index.md b/docs/docs/faq/index.md index 008e5a74e..1138dae20 100644 --- a/docs/docs/faq/index.md +++ b/docs/docs/faq/index.md @@ -122,7 +122,7 @@ ???+ example "🔑 How do I generate and use a JWT token?" ```bash - export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin -exp 0 --secret my-test-key) + export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com -exp 0 --secret my-test-key) curl -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" https://localhost:4444/tools ``` diff --git a/docs/docs/index.md b/docs/docs/index.md index 4829395d0..b7db2de12 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -300,7 +300,7 @@ docker logs -f mcpgateway # Generating an API key docker run --rm -it ghcr.io/ibm/mcp-context-forge:0.6.0 \ - python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key + python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key ``` Browse to **[http://localhost:4444/admin](http://localhost:4444/admin)** (user `admin` / pass `changeme`). @@ -415,7 +415,7 @@ podman run -d --name mcpgateway \ * **JWT tokens** - Generate one in the running container: ```bash - docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin -e 10080 --secret my-test-key + docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com -e 10080 --secret my-test-key ``` * **Upgrades** - Stop, remove, and rerun with the same `-v $(pwd)/data:/data` mount; your DB and config stay intact. @@ -438,7 +438,7 @@ podman run -d --name mcpgateway \ ```bash # Set environment variables - export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-test-key) + export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-test-key) export MCP_AUTH=${MCPGATEWAY_BEARER_TOKEN} export MCP_SERVER_URL='http://localhost:4444/servers/UUID_OF_SERVER_1/mcp' export MCP_TOOL_CALL_TIMEOUT=120 @@ -836,7 +836,7 @@ You can get started by copying the provided [.env.example](.env.example) to `.en > * Generate tokens via: > > ```bash -> export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +> export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) > echo $MCPGATEWAY_BEARER_TOKEN > ``` > * Tokens allow non-interactive API clients to authenticate securely. @@ -1280,7 +1280,7 @@ Generate an API Bearer token, and test the various API endpoints. ```bash # Generate a bearer token using the configured secret key (use the same as your .env) -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) echo ${MCPGATEWAY_BEARER_TOKEN} # Quickly confirm that authentication works and the gateway is healthy diff --git a/docs/docs/manage/.pages b/docs/docs/manage/.pages index d31e68cb4..7a0686abe 100644 --- a/docs/docs/manage/.pages +++ b/docs/docs/manage/.pages @@ -13,6 +13,11 @@ nav: - proxy.md - oauth.md - securing.md + - sso.md + - sso-github-tutorial.md + - sso-google-tutorial.md + - sso-ibm-tutorial.md + - sso-okta-tutorial.md - tuning.md - ui-customization.md - upgrade.md diff --git a/docs/docs/manage/export-import-reference.md b/docs/docs/manage/export-import-reference.md index 408198552..66f35e27f 100644 --- a/docs/docs/manage/export-import-reference.md +++ b/docs/docs/manage/export-import-reference.md @@ -181,7 +181,7 @@ mcpgateway import backup.json --include "tools:*" ### "Authentication Error" ```bash -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 0 --secret my-test-key) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 0 --secret my-test-key) ``` ### "Gateway Connection Failed" diff --git a/docs/docs/manage/securing.md b/docs/docs/manage/securing.md index bee80dafc..219dd502e 100644 --- a/docs/docs/manage/securing.md +++ b/docs/docs/manage/securing.md @@ -35,6 +35,10 @@ MCPGATEWAY_AUTH_ENABLED=true MCPGATEWAY_AUTH_USERNAME=custom-username # Change from default MCPGATEWAY_AUTH_PASSWORD=strong-password-here # Use secrets manager +# Platform admin user (auto-created during bootstrap) +PLATFORM_ADMIN_EMAIL=admin@yourcompany.com # Change from default +PLATFORM_ADMIN_PASSWORD=secure-admin-password # Use secrets manager + # Set environment for security defaults ENVIRONMENT=production @@ -49,7 +53,52 @@ COOKIE_SAMESITE=strict CORS_ALLOW_CREDENTIALS=true ``` -### 3. Network Security +#### Platform Admin Security Notes + +The platform admin user (`PLATFORM_ADMIN_EMAIL`) is automatically created during database bootstrap with full administrative privileges. This user: + +- Has access to all RBAC-protected endpoints +- Can manage users, teams, and system configuration +- Is recognized by both database-persisted and virtual authentication flows +- Should use a strong, unique email and password in production + +### 3. Token Scoping Security + +The gateway supports fine-grained token scoping to restrict token access to specific servers, permissions, IP ranges, and time windows. This provides defense-in-depth security for API access. + +#### Server-Scoped Tokens + +Server-scoped tokens are restricted to specific MCP servers and cannot access admin endpoints: + +```bash +# Generate server-scoped token (example) +python3 -m mcpgateway.utils.create_jwt_token \ + --username user@example.com \ + --scopes '{"server_id": "my-specific-server"}' +``` + +**Security Features:** +- Server-scoped tokens **cannot access `/admin`** endpoints (security hardening) +- Only truly public endpoints (`/health`, `/metrics`, `/docs`) bypass server restrictions +- RBAC permission checks still apply to all endpoints + +#### Permission-Scoped Tokens + +Tokens can be restricted to specific permission sets: + +```bash +# Generate permission-scoped token +python3 -m mcpgateway.utils.create_jwt_token \ + --username user@example.com \ + --scopes '{"permissions": ["tools.read", "resources.read"]}' +``` + +**Canonical Permissions Used:** +- `tools.create`, `tools.read`, `tools.update`, `tools.delete`, `tools.execute` +- `resources.create`, `resources.read`, `resources.update`, `resources.delete` +- `admin.system_config`, `admin.user_management`, `admin.security_audit` + +### 4. Network Security - [ ] Configure TLS/HTTPS with valid certificates - [ ] Implement firewall rules and network policies diff --git a/docs/docs/manage/sso-github-tutorial.md b/docs/docs/manage/sso-github-tutorial.md new file mode 100644 index 000000000..d71da2914 --- /dev/null +++ b/docs/docs/manage/sso-github-tutorial.md @@ -0,0 +1,382 @@ +# GitHub SSO Setup Tutorial + +This tutorial walks you through setting up GitHub Single Sign-On (SSO) authentication for MCP Gateway, allowing users to log in with their GitHub accounts. + +## Prerequisites + +- MCP Gateway installed and running +- GitHub account with admin access to create OAuth apps +- Access to your gateway's environment configuration + +## Step 1: Create GitHub OAuth Application + +### 1.1 Navigate to GitHub Settings + +1. Log into GitHub and go to **Settings** (click your profile picture → Settings) +2. In the left sidebar, click **Developer settings** +3. Click **OAuth Apps** +4. Click **New OAuth App** + +### 1.2 Configure OAuth Application + +Fill out the OAuth application form: + +**Application name**: `MCP Gateway - [Your Organization]` +- Example: `MCP Gateway - Acme Corp` + +**Homepage URL**: Your gateway's public URL +- Production: `https://gateway.yourcompany.com` +- Development (port 8000): `http://localhost:8000` +- Development (make serve, port 4444): `http://localhost:4444` + +**Application description** (optional): +``` +Model Context Protocol Gateway SSO Authentication +``` + +**Authorization callback URL**: **This is critical - must be exact** +``` +# Production +https://gateway.yourcompany.com/auth/sso/callback/github + +# Development (port 8000) +http://localhost:8000/auth/sso/callback/github + +# Development (make serve, port 4444) +http://localhost:4444/auth/sso/callback/github +``` + +**Important**: The callback URL must match your gateway's actual port and protocol exactly. + +### 1.3 Generate Client Secret + +1. Click **Register application** +2. Note the **Client ID** (visible immediately) +3. Click **Generate a new client secret** +4. **Important**: Copy the client secret immediately - you won't see it again +5. Store both Client ID and Client Secret securely + +## Step 2: Configure MCP Gateway Environment + +### 2.1 Update Environment Variables + +Add these variables to your `.env` file: + +```bash +# Enable SSO System +SSO_ENABLED=true + +# GitHub OAuth Configuration +SSO_GITHUB_ENABLED=true +SSO_GITHUB_CLIENT_ID=Iv1.a1b2c3d4e5f6g7h8 +SSO_GITHUB_CLIENT_SECRET=ghp_1234567890abcdef1234567890abcdef12345678 + +# Optional: Auto-create users on first login +SSO_AUTO_CREATE_USERS=true + +# Optional: Restrict to specific email domains +SSO_TRUSTED_DOMAINS=["yourcompany.com", "contractor.org"] + +# Optional: Preserve local admin authentication +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 2.2 Example Production Configuration + +```bash +# Production GitHub SSO Setup +SSO_ENABLED=true +SSO_GITHUB_ENABLED=true +SSO_GITHUB_CLIENT_ID=Iv1.real-client-id-from-github +SSO_GITHUB_CLIENT_SECRET=ghp_real-secret-from-github + +# Security settings +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=["yourcompany.com"] +SSO_PRESERVE_ADMIN_AUTH=true + +# Optional: GitHub organization team mapping +GITHUB_ORG_TEAM_MAPPING={"your-github-org": "dev-team-uuid"} +``` + +### 2.3 Development Configuration + +```bash +# Development GitHub SSO Setup +SSO_ENABLED=true +SSO_GITHUB_ENABLED=true +SSO_GITHUB_CLIENT_ID=Iv1.dev-client-id +SSO_GITHUB_CLIENT_SECRET=ghp_dev-secret + +# More permissive for testing +SSO_AUTO_CREATE_USERS=true +SSO_PRESERVE_ADMIN_AUTH=true +``` + +## Step 3: Restart and Verify Gateway + +### 3.1 Restart the Gateway + +```bash +# Development +make dev + +# Or directly with uvicorn +uvicorn mcpgateway.main:app --reload --host 0.0.0.0 --port 8000 + +# Production +make serve +``` + +### 3.2 Verify SSO is Enabled + +Test that SSO endpoints are accessible: + +```bash +# For development server (port 8000) +curl -X GET http://localhost:8000/auth/sso/providers + +# For production server (port 4444, make serve) +curl -X GET http://localhost:4444/auth/sso/providers + +# Should return GitHub provider: +[ + { + "id": "github", + "name": "github", + "display_name": "GitHub", + "authorization_url": null + } +] +``` + +**Troubleshooting**: +- **404 error**: Check that `SSO_ENABLED=true` in your environment and restart gateway +- **Empty array `[]`**: SSO is enabled but GitHub provider not created - restart gateway to auto-bootstrap +- **Connection refused**: Gateway not running or wrong port + +## Step 4: Test GitHub SSO Login + +### 4.1 Access Login Page + +1. Navigate to your gateway's login page: + - Development (port 8000): `http://localhost:8000/admin/login` + - Development (make serve, port 4444): `http://localhost:4444/admin/login` + - Production: `https://gateway.yourcompany.com/admin/login` + +2. You should see a "Continue with GitHub" button + +### 4.2 Test Authentication Flow + +1. Click **Continue with GitHub** +2. You'll be redirected to GitHub's authorization page +3. Click **Authorize** to grant access +4. You'll be redirected back to the gateway admin panel +5. You should be logged in successfully + +### 4.3 Verify User Creation + +Check that a user was created in the gateway: + +```bash +# Using the admin API (requires admin token) +curl -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + http://localhost:8000/auth/users + +# Look for your GitHub email in the user list +``` + +## Step 5: Advanced Configuration (Optional) + +### 5.1 GitHub Organization Team Mapping + +Map GitHub organizations to gateway teams: + +```bash +# Environment variable format +GITHUB_ORG_TEAM_MAPPING={"your-github-org": "dev-team-uuid", "admin-org": "admin-team-uuid"} +``` + +Create teams first using the admin API: + +```bash +# Create a team +curl -X POST http://localhost:8000/teams \ + -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "GitHub Developers", + "description": "Users from GitHub organization" + }' +``` + +### 5.2 Custom OAuth Scopes + +Request additional GitHub permissions: + +```bash +# Add to .env +SSO_GITHUB_SCOPE="user:email read:org" +``` + +### 5.3 Trusted Domains Restriction + +Only allow users from specific email domains: + +```bash +SSO_TRUSTED_DOMAINS=["yourcompany.com", "contractor.com"] +``` + +Users with emails from other domains will be blocked. + +## Step 6: Production Deployment Checklist + +### 6.1 Security Requirements + +- [ ] Use HTTPS for all callback URLs +- [ ] Store client secrets in secure vault/secret management +- [ ] Set restrictive `SSO_TRUSTED_DOMAINS` +- [ ] Enable audit logging +- [ ] Regular secret rotation schedule + +### 6.2 Callback URL Verification + +Ensure callback URLs match exactly: + +**GitHub OAuth App**: `https://gateway.yourcompany.com/auth/sso/callback/github` +**Gateway Config**: Gateway must be accessible at `https://gateway.yourcompany.com` + +### 6.3 Firewall and Network + +- [ ] Gateway accessible from internet (for GitHub callbacks) +- [ ] HTTPS certificates valid and auto-renewing +- [ ] CDN/load balancer configured if needed + +## Troubleshooting + +### Error: "SSO authentication is disabled" + +**Problem**: SSO endpoints return 404 +**Solution**: Set `SSO_ENABLED=true` and restart gateway + +```bash +# Check environment +echo $SSO_ENABLED + +# Should output: true +``` + +### Error: "The redirect_uri is not associated with this application" + +**Problem**: GitHub OAuth app callback URL doesn't match your gateway's actual URL +**Solution**: Update GitHub OAuth app settings to match your gateway's port and protocol + +```bash +# For make serve (port 4444): +Homepage URL: http://localhost:4444 +Authorization callback URL: http://localhost:4444/auth/sso/callback/github + +# For development server (port 8000): +Homepage URL: http://localhost:8000 +Authorization callback URL: http://localhost:8000/auth/sso/callback/github + +# Common mistakes: +http://localhost:4444/auth/sso/callback/github/ # Extra slash +http://localhost:8000/auth/sso/callback/github # Wrong port (when using 4444) +https://localhost:4444/auth/sso/callback/github # HTTPS on localhost +``` + +### Error: Missing query parameters (code, state) + +**Problem**: Direct access to callback URL without OAuth flow +**Solution**: Don't navigate directly to `/auth/sso/callback/github` - use the "Continue with GitHub" button + +### Error: "User creation failed" + +**Problem**: User's email domain not in trusted domains +**Solution**: Add domain to `SSO_TRUSTED_DOMAINS` or remove restriction + +```bash +# Add user's domain +SSO_TRUSTED_DOMAINS=["yourcompany.com", "user-domain.com"] + +# Or remove restriction entirely +SSO_TRUSTED_DOMAINS=[] +``` + +### Error: No GitHub button appears + +**Problem**: JavaScript fails to load SSO providers +**Solution**: Check browser console and Content Security Policy + +```bash +# Check if providers endpoint works +curl http://localhost:8000/auth/sso/providers + +# Check browser console for CSP violations +``` + +### GitHub Authorization Returns Error + +**Problem**: GitHub shows "Application suspended" or similar +**Solution**: Check GitHub OAuth app status and limits + +1. Go to GitHub Settings → Developer settings → OAuth Apps +2. Check if your app is suspended or has issues +3. Verify callback URL is correct +4. Check if you've exceeded rate limits + +### Users Can't Access After Login + +**Problem**: User logs in successfully but has no permissions +**Solution**: Assign users to teams or roles + +```bash +# List users to find the GitHub user +curl -H "Authorization: Bearer ADMIN_TOKEN" \ + http://localhost:8000/auth/users + +# Assign user to a team +curl -X POST http://localhost:8000/teams/TEAM_ID/members \ + -H "Authorization: Bearer ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"user_id": "USER_ID", "role": "member"}' +``` + +## Testing Checklist + +- [ ] GitHub OAuth app created and configured +- [ ] Environment variables set correctly +- [ ] Gateway restarted with new config +- [ ] `/auth/sso/providers` returns GitHub provider +- [ ] Login page shows "Continue with GitHub" button +- [ ] Clicking GitHub button redirects to GitHub +- [ ] GitHub authorization redirects back successfully +- [ ] User is logged into gateway admin panel +- [ ] User appears in gateway user list + +## Next Steps + +After GitHub SSO is working: + +1. **Set up additional providers** (Google, Okta, IBM Verify) +2. **Configure team mappings** for automatic role assignment +3. **Set up monitoring** for authentication failures +4. **Configure backup authentication** methods +5. **Document user onboarding** process for your organization + +## Related Documentation + +- [Complete SSO Guide](sso.md) - Full SSO documentation +- [Team Management](teams.md) - Managing teams and roles +- [RBAC Configuration](rbac.md) - Role-based access control +- [Security Best Practices](../architecture/security-features.md) + +## Support + +If you encounter issues: + +1. Check the [Troubleshooting section](#troubleshooting) above +2. Enable debug logging: `LOG_LEVEL=DEBUG` +3. Review gateway logs for SSO-related errors +4. Verify GitHub OAuth app configuration matches exactly diff --git a/docs/docs/manage/sso-google-tutorial.md b/docs/docs/manage/sso-google-tutorial.md new file mode 100644 index 000000000..12191769b --- /dev/null +++ b/docs/docs/manage/sso-google-tutorial.md @@ -0,0 +1,399 @@ +# Google OAuth/OIDC Setup Tutorial + +This tutorial walks you through setting up Google Single Sign-On (SSO) authentication for MCP Gateway, allowing users to log in with their Google accounts. + +## Prerequisites + +- MCP Gateway installed and running +- Google account with access to Google Cloud Console +- Access to your gateway's environment configuration + +## Step 1: Create Google OAuth Application + +### 1.1 Access Google Cloud Console + +1. Go to [Google Cloud Console](https://console.cloud.google.com/) +2. Select or create a project for your MCP Gateway +3. In the left sidebar, navigate to **APIs & Services** → **Credentials** + +### 1.2 Enable Required APIs + +Before creating credentials, enable the necessary APIs: + +1. Go to **APIs & Services** → **Library** +2. Search for and enable: + - **Google Identity Service** (for user authentication) + - **Google People API** (for user profile information) + - **Google Identity and Access Management (IAM) API** (optional, for advanced features) + +### 1.3 Configure OAuth Consent Screen + +1. Go to **APIs & Services** → **OAuth consent screen** +2. Choose **External** (for general use) or **Internal** (for Google Workspace) +3. Fill out the required fields: + +**App name**: `MCP Gateway - [Your Organization]` + +**User support email**: Your support email + +**Application home page**: Your gateway URL +- Example: `https://gateway.yourcompany.com` + +**Authorized domains**: Add your domain +- Example: `yourcompany.com` + +**Developer contact information**: Your email + +4. Click **Save and Continue** +5. Add scopes (optional for basic auth): + - `userinfo.email` + - `userinfo.profile` + - `openid` + +### 1.4 Create OAuth Client ID + +1. Go to **APIs & Services** → **Credentials** +2. Click **Create Credentials** → **OAuth client ID** +3. Choose **Web application** +4. Configure the client: + +**Name**: `MCP Gateway OAuth Client` + +**Authorized JavaScript origins**: Your gateway domain +- Production: `https://gateway.yourcompany.com` +- Development: `http://localhost:8000` + +**Authorized redirect URIs**: **Critical - must be exact** +- Production: `https://gateway.yourcompany.com/auth/sso/callback/google` +- Development: `http://localhost:8000/auth/sso/callback/google` + +5. Click **Create** +6. **Important**: Copy the Client ID and Client Secret immediately + +## Step 2: Configure MCP Gateway Environment + +### 2.1 Update Environment Variables + +Add these variables to your `.env` file: + +```bash +# Enable SSO System +SSO_ENABLED=true + +# Google OAuth Configuration +SSO_GOOGLE_ENABLED=true +SSO_GOOGLE_CLIENT_ID=123456789012-abcdefghijklmnopqrstuvwxyz123456.apps.googleusercontent.com +SSO_GOOGLE_CLIENT_SECRET=GOCSPX-1234567890abcdefghijklmnop + +# Optional: Auto-create users on first login +SSO_AUTO_CREATE_USERS=true + +# Optional: Restrict to Google Workspace domain +SSO_TRUSTED_DOMAINS=["yourcompany.com"] + +# Optional: Preserve local admin authentication +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 2.2 Example Production Configuration + +```bash +# Production Google SSO Setup +SSO_ENABLED=true +SSO_GOOGLE_ENABLED=true +SSO_GOOGLE_CLIENT_ID=123456789012-realclientid.apps.googleusercontent.com +SSO_GOOGLE_CLIENT_SECRET=GOCSPX-realsecretfromgoogle + +# Security settings for Google Workspace +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=["yourcompany.com"] # Only company emails +SSO_PRESERVE_ADMIN_AUTH=true + +# Optional: Custom OAuth scopes +SSO_GOOGLE_SCOPE="openid profile email" +``` + +### 2.3 Development Configuration + +```bash +# Development Google SSO Setup +SSO_ENABLED=true +SSO_GOOGLE_ENABLED=true +SSO_GOOGLE_CLIENT_ID=123456789012-devtest.apps.googleusercontent.com +SSO_GOOGLE_CLIENT_SECRET=GOCSPX-devtestsecret + +# More permissive for testing +SSO_AUTO_CREATE_USERS=true +SSO_PRESERVE_ADMIN_AUTH=true +# SSO_TRUSTED_DOMAINS=[] # Allow any email for testing +``` + +### 2.4 Google Workspace Domain Restriction + +For organizations using Google Workspace: + +```bash +# Restrict to your organization's domain +SSO_TRUSTED_DOMAINS=["yourcompany.com"] + +# Allow multiple domains +SSO_TRUSTED_DOMAINS=["yourcompany.com", "subsidiary.com", "contractor.org"] +``` + +## Step 3: Restart and Verify Gateway + +### 3.1 Restart the Gateway + +```bash +# Development +make dev + +# Or directly with uvicorn +uvicorn mcpgateway.main:app --reload --host 0.0.0.0 --port 8000 + +# Production +make serve +``` + +### 3.2 Verify Google SSO is Enabled + +Test that Google appears in SSO providers: + +```bash +# For development server (port 8000) +curl -X GET http://localhost:8000/auth/sso/providers + +# For production server (port 4444, make serve) +curl -X GET http://localhost:4444/auth/sso/providers + +# Should return Google in the list: +[ + { + "id": "google", + "name": "google", + "display_name": "Google", + "authorization_url": null + } +] +``` + +**Troubleshooting**: +- **404 error**: Check that `SSO_ENABLED=true` in your environment and restart gateway +- **Empty array `[]`**: SSO is enabled but Google provider not created - restart gateway to auto-bootstrap + +## Step 4: Test Google SSO Login + +### 4.1 Access Login Page + +1. Navigate to your gateway's login page: + - Development (port 8000): `http://localhost:8000/admin/login` + - Development (make serve, port 4444): `http://localhost:4444/admin/login` + - Production: `https://gateway.yourcompany.com/admin/login` + +2. You should see a "Continue with Google" button + +### 4.2 Test Authentication Flow + +1. Click **Continue with Google** +2. You'll be redirected to Google's sign-in page +3. Enter your Google credentials +4. Grant permissions if prompted +5. You'll be redirected back to the gateway admin panel +6. You should be logged in successfully + +### 4.3 Verify User Creation + +Check that a user was created: + +```bash +# Using the admin API (requires admin token) +curl -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + http://localhost:8000/auth/users + +# Look for your Google email in the user list +``` + +## Step 5: Google Workspace Integration (Advanced) + +### 5.1 Google Workspace Domain Verification + +For Google Workspace organizations: + +1. In Google Cloud Console, go to **Domain verification** +2. Verify ownership of your domain +3. This allows stricter domain controls + +### 5.2 Google Groups Integration + +Map Google Groups to gateway teams: + +```bash +# Custom configuration (requires additional API setup) +GOOGLE_GROUPS_MAPPING={"group1@yourcompany.com": "team-uuid-1", "admins@yourcompany.com": "admin-team-uuid"} +``` + +**Note**: This requires additional Google Groups API setup and custom development. + +### 5.3 Advanced OAuth Scopes + +Request additional Google permissions: + +```bash +# Extended scopes for Google Workspace +SSO_GOOGLE_SCOPE="openid profile email https://www.googleapis.com/auth/admin.directory.group.readonly" +``` + +Common useful scopes: +- `openid profile email` - Basic user info (default) +- `https://www.googleapis.com/auth/admin.directory.user.readonly` - Read user directory +- `https://www.googleapis.com/auth/admin.directory.group.readonly` - Read group memberships + +## Step 6: Production Deployment Checklist + +### 6.1 Security Requirements + +- [ ] Use HTTPS for all redirect URIs +- [ ] Store client secrets securely (vault/secret management) +- [ ] Set restrictive `SSO_TRUSTED_DOMAINS` for Google Workspace +- [ ] Configure OAuth consent screen properly +- [ ] Regular secret rotation + +### 6.2 Google Cloud Configuration + +- [ ] OAuth consent screen configured +- [ ] Authorized domains added +- [ ] Required APIs enabled +- [ ] Redirect URIs match exactly +- [ ] Client ID and secret copied securely + +### 6.3 DNS and Certificates + +- [ ] Gateway accessible from internet +- [ ] HTTPS certificates valid +- [ ] Domain verification completed (for Workspace) + +## Troubleshooting + +### Error: "SSO authentication is disabled" + +**Problem**: SSO endpoints return 404 +**Solution**: Set `SSO_ENABLED=true` and restart gateway + +### Error: "redirect_uri_mismatch" + +**Problem**: Google OAuth redirect URI doesn't match +**Solution**: Verify exact URL match in Google Cloud Console + +```bash +# Google Cloud Console authorized redirect URIs must exactly match: +https://your-domain.com/auth/sso/callback/google + +# Common mistakes: +https://your-domain.com/auth/sso/callback/google/ # Extra slash +http://your-domain.com/auth/sso/callback/google # HTTP instead of HTTPS +https://www.your-domain.com/auth/sso/callback/google # Wrong subdomain +``` + +### Error: "Access blocked: This app's request is invalid" + +**Problem**: OAuth consent screen not configured properly +**Solution**: Complete OAuth consent screen configuration + +1. Go to Google Cloud Console → OAuth consent screen +2. Fill in all required fields +3. Add your domain to authorized domains +4. Publish the app (for external users) + +### Error: "User creation failed" + +**Problem**: User's email domain not in trusted domains +**Solution**: Add domain to trusted domains or remove restriction + +```bash +# For Google Workspace - add your domain +SSO_TRUSTED_DOMAINS=["yourcompany.com"] + +# For consumer Google accounts - remove restriction +SSO_TRUSTED_DOMAINS=[] +``` + +### Google Sign-in Shows "This app isn't verified" + +**Problem**: App verification required for production use +**Solution**: For internal use, users can click "Advanced" → "Go to [App Name] (unsafe)" + +For production apps with external users: +1. Go through Google's app verification process +2. Or limit to internal users only (Google Workspace) + +### Error: "invalid_client" + +**Problem**: Wrong client ID or secret +**Solution**: Verify credentials from Google Cloud Console + +```bash +# Double-check these values match Google Cloud Console +SSO_GOOGLE_CLIENT_ID=your-actual-client-id.apps.googleusercontent.com +SSO_GOOGLE_CLIENT_SECRET=GOCSPX-your-actual-client-secret +``` + +## Testing Checklist + +- [ ] Google Cloud project created +- [ ] OAuth consent screen configured +- [ ] OAuth client ID created with correct redirect URI +- [ ] Client ID and secret added to environment +- [ ] Gateway restarted with new config +- [ ] `/auth/sso/providers` returns Google provider +- [ ] Login page shows "Continue with Google" button +- [ ] Clicking Google button redirects to Google sign-in +- [ ] Google sign-in redirects back successfully +- [ ] User is logged into gateway admin panel +- [ ] User appears in gateway user list + +## Google Workspace Specific Setup + +### Admin Console Configuration + +If using Google Workspace: + +1. Go to [Google Admin Console](https://admin.google.com) +2. Navigate to **Security** → **API controls** +3. Click **MANAGE THIRD-PARTY APP ACCESS** +4. Configure app access for your MCP Gateway OAuth app + +### Domain-Wide Delegation (Advanced) + +For service account access (advanced use cases): + +1. Create a service account in Google Cloud Console +2. Enable domain-wide delegation +3. In Google Admin Console, configure API scopes +4. Use service account for server-to-server authentication + +## Next Steps + +After Google SSO is working: + +1. **Test with different user types** (admin, regular users) +2. **Set up team mappings** for automatic role assignment +3. **Configure additional SSO providers** for redundancy +4. **Monitor authentication logs** for issues +5. **Document user onboarding** process + +## Related Documentation + +- [Complete SSO Guide](sso.md) - Full SSO documentation +- [GitHub SSO Tutorial](sso-github-tutorial.md) - GitHub setup guide +- [Team Management](teams.md) - Managing teams and roles +- [RBAC Configuration](rbac.md) - Role-based access control + +## Support + +If you encounter issues: + +1. Check Google Cloud Console for error messages +2. Enable debug logging: `LOG_LEVEL=DEBUG` +3. Review gateway logs for Google OAuth errors +4. Verify all Google Cloud Console settings match tutorial +5. Test with a simple curl command to isolate issues diff --git a/docs/docs/manage/sso-ibm-tutorial.md b/docs/docs/manage/sso-ibm-tutorial.md new file mode 100644 index 000000000..b3b31d1e2 --- /dev/null +++ b/docs/docs/manage/sso-ibm-tutorial.md @@ -0,0 +1,425 @@ +# IBM Security Verify Setup Tutorial + +This tutorial walks you through setting up IBM Security Verify (formerly IBM Cloud Identity) SSO authentication for MCP Gateway, enabling enterprise-grade identity management. + +## Prerequisites + +- MCP Gateway installed and running +- IBM Security Verify tenant with admin access +- Access to your gateway's environment configuration + +## Step 1: Configure IBM Security Verify Application + +### 1.1 Access IBM Security Verify Admin Console + +1. Navigate to your IBM Security Verify admin console + - URL format: `https://[tenant-name].verify.ibm.com` +2. Log in with your administrator credentials +3. Go to **Applications** in the left sidebar + +### 1.2 Create New Application + +1. Click **Add application** +2. Choose **Custom Application** +3. Select **OpenID Connect** as the sign-on method + +### 1.3 Configure Application Settings + +**General Settings**: +- **Application name**: `MCP Gateway` +- **Description**: `Model Context Protocol Gateway SSO Authentication` +- **Application URL**: Your gateway's public URL + - Example: `https://gateway.yourcompany.com` + +**Sign-on Settings**: +- **Application type**: `Web` +- **Grant types**: Select `Authorization Code` +- **Redirect URIs**: **Critical - must be exact** + - Production: `https://gateway.yourcompany.com/auth/sso/callback/ibm_verify` + - Development: `http://localhost:8000/auth/sso/callback/ibm_verify` + +### 1.4 Configure Advanced Settings + +**Token Settings**: +- **Access token lifetime**: 3600 seconds (1 hour) +- **Refresh token lifetime**: 86400 seconds (24 hours) +- **ID token lifetime**: 3600 seconds (1 hour) + +**Scopes**: +- Select `openid` (required) +- Select `profile` (recommended) +- Select `email` (required) + +### 1.5 Obtain Client Credentials + +After saving the application: + +1. Go to the **Sign-on** tab +2. Note the **Client ID** +3. Click **Generate secret** to create a client secret +4. **Important**: Copy the client secret immediately - you won't see it again +5. Note the **Discovery endpoint** URL (usually `https://[tenant].verify.ibm.com/oidc/endpoint/default/.well-known/openid_configuration`) + +## Step 2: Configure MCP Gateway Environment + +### 2.1 Find Your IBM Security Verify Endpoints + +Before configuring, you need your tenant's OIDC endpoints: + +```bash +# Replace [tenant-name] with your actual tenant name +curl https://[tenant-name].verify.ibm.com/oidc/endpoint/default/.well-known/openid-configuration + +# This returns endpoint URLs you'll need +``` + +### 2.2 Update Environment Variables + +Add these variables to your `.env` file: + +```bash +# Enable SSO System +SSO_ENABLED=true + +# IBM Security Verify OIDC Configuration +SSO_IBM_VERIFY_ENABLED=true +SSO_IBM_VERIFY_CLIENT_ID=your-client-id-from-ibm-verify +SSO_IBM_VERIFY_CLIENT_SECRET=your-client-secret-from-ibm-verify +SSO_IBM_VERIFY_ISSUER=https://[tenant-name].verify.ibm.com/oidc/endpoint/default + +# Optional: Auto-create users on first login +SSO_AUTO_CREATE_USERS=true + +# Optional: Restrict to corporate email domains +SSO_TRUSTED_DOMAINS=["yourcompany.com"] + +# Optional: Preserve local admin authentication +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 2.3 Example Production Configuration + +```bash +# Production IBM Security Verify SSO Setup +SSO_ENABLED=true +SSO_IBM_VERIFY_ENABLED=true +SSO_IBM_VERIFY_CLIENT_ID=12345678-abcd-1234-efgh-123456789012 +SSO_IBM_VERIFY_CLIENT_SECRET=AbCdEfGhIjKlMnOpQrStUvWxYz123456 +SSO_IBM_VERIFY_ISSUER=https://acmecorp.verify.ibm.com/oidc/endpoint/default + +# Enterprise security settings +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=["acmecorp.com"] +SSO_PRESERVE_ADMIN_AUTH=true + +# Optional: Custom scopes for additional user attributes +SSO_IBM_VERIFY_SCOPE="openid profile email" +``` + +### 2.4 Development Configuration + +```bash +# Development IBM Security Verify SSO Setup +SSO_ENABLED=true +SSO_IBM_VERIFY_ENABLED=true +SSO_IBM_VERIFY_CLIENT_ID=dev-client-id +SSO_IBM_VERIFY_CLIENT_SECRET=dev-client-secret +SSO_IBM_VERIFY_ISSUER=https://dev-tenant.verify.ibm.com/oidc/endpoint/default + +# More permissive for testing +SSO_AUTO_CREATE_USERS=true +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 2.5 Advanced Configuration Options + +```bash +# Custom OAuth scopes for enterprise features +SSO_IBM_VERIFY_SCOPE="openid profile email groups" + +# Custom user attribute mappings (if needed) +IBM_VERIFY_USER_MAPPING={"preferred_username": "username", "family_name": "last_name"} + +# Group/role mapping for automatic team assignment +IBM_VERIFY_GROUP_MAPPING={"CN=Developers,OU=Groups": "dev-team-uuid", "CN=Administrators,OU=Groups": "admin-team-uuid"} +``` + +## Step 3: Configure User Access in IBM Security Verify + +### 3.1 Assign Users to Application + +1. In IBM Security Verify admin console, go to **Applications** +2. Find your MCP Gateway application +3. Go to **Access** tab +4. Click **Assign access** +5. Choose assignment method: + - **Users**: Assign specific users + - **Groups**: Assign entire groups (recommended) + - **Everyone**: Allow all users (not recommended for production) + +### 3.2 Configure Group-Based Access (Recommended) + +1. Create or use existing groups in IBM Security Verify +2. Assign the application to appropriate groups: + - `MCP_Gateway_Users` - Regular users + - `MCP_Gateway_Admins` - Administrative users +3. Add users to these groups as needed + +## Step 4: Restart and Verify Gateway + +### 4.1 Restart the Gateway + +```bash +# Development +make dev + +# Or directly with uvicorn +uvicorn mcpgateway.main:app --reload --host 0.0.0.0 --port 8000 + +# Production +make serve +``` + +### 4.2 Verify IBM Security Verify SSO is Enabled + +Test that IBM Security Verify appears in SSO providers: + +```bash +# Check if IBM Security Verify is listed +curl -X GET http://localhost:8000/auth/sso/providers + +# Should return IBM Security Verify in the list: +[ + { + "id": "ibm_verify", + "name": "ibm_verify", + "display_name": "IBM Security Verify" + } +] +``` + +## Step 5: Test IBM Security Verify SSO Login + +### 5.1 Access Login Page + +1. Navigate to your gateway's login page: + - Development: `http://localhost:8000/admin/login` + - Production: `https://gateway.yourcompany.com/admin/login` + +2. You should see a "Continue with IBM Security Verify" button + +### 5.2 Test Authentication Flow + +1. Click **Continue with IBM Security Verify** +2. You'll be redirected to IBM Security Verify's login page +3. Enter your corporate credentials +4. Complete any multi-factor authentication if required +5. Grant consent if prompted +6. You'll be redirected back to the gateway admin panel +7. You should be logged in successfully + +### 5.3 Verify User Creation + +Check that a user was created: + +```bash +# Using the admin API (requires admin token) +curl -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + http://localhost:8000/auth/users + +# Look for your IBM Security Verify email in the user list +``` + +## Step 6: Enterprise Features (Advanced) + +### 6.1 Multi-Factor Authentication (MFA) + +IBM Security Verify MFA is handled automatically: + +1. Configure MFA policies in IBM Security Verify admin console +2. Go to **Security** → **Multi-factor authentication** +3. Set up policies for your MCP Gateway application +4. Users will be prompted for MFA during login + +### 6.2 Conditional Access + +Configure access policies based on conditions: + +1. In IBM Security Verify, go to **Security** → **Access policies** +2. Create policies for your MCP Gateway application +3. Configure conditions: + - Device compliance + - Location-based access + - Risk-based authentication + - Time-based restrictions + +### 6.3 User Lifecycle Management + +Configure automatic user provisioning: + +1. Set up SCIM provisioning (if supported) +2. Configure user attribute synchronization +3. Set up automatic de-provisioning for terminated users + +### 6.4 Audit and Compliance + +Enable comprehensive audit logging: + +1. In IBM Security Verify, configure audit settings +2. Enable logging for: + - Authentication events + - Authorization decisions + - User provisioning actions + - Administrative changes + +## Step 7: Production Deployment Checklist + +### 7.1 Security Requirements + +- [ ] HTTPS enforced for all redirect URIs +- [ ] Client secrets stored in secure vault +- [ ] MFA policies configured +- [ ] Conditional access policies set +- [ ] Audit logging enabled +- [ ] Regular security reviews scheduled + +### 7.2 IBM Security Verify Configuration + +- [ ] Application created with correct settings +- [ ] Redirect URIs match exactly +- [ ] Appropriate users/groups assigned access +- [ ] MFA policies configured +- [ ] Audit logging enabled + +### 7.3 Network and Infrastructure + +- [ ] Gateway accessible from corporate network +- [ ] IBM Security Verify endpoints reachable +- [ ] HTTPS certificates valid +- [ ] Load balancer configured (if needed) + +## Troubleshooting + +### Error: "SSO authentication is disabled" + +**Problem**: SSO endpoints return 404 +**Solution**: Set `SSO_ENABLED=true` and restart gateway + +### Error: "invalid_redirect_uri" + +**Problem**: IBM Security Verify redirect URI doesn't match +**Solution**: Verify exact URL match in IBM Security Verify application settings + +```bash +# IBM Security Verify redirect URI must exactly match: +https://your-domain.com/auth/sso/callback/ibm_verify + +# Common mistakes: +https://your-domain.com/auth/sso/callback/ibm_verify/ # Extra slash +http://your-domain.com/auth/sso/callback/ibm_verify # HTTP instead of HTTPS +https://your-domain.com/auth/sso/callback/ibm-verify # Wrong provider ID +``` + +### Error: "invalid_client" + +**Problem**: Wrong client ID or client secret +**Solution**: Verify credentials from IBM Security Verify application + +```bash +# Double-check these values match IBM Security Verify +SSO_IBM_VERIFY_CLIENT_ID=your-actual-client-id +SSO_IBM_VERIFY_CLIENT_SECRET=your-actual-client-secret +``` + +### Error: "User not authorized" + +**Problem**: User not assigned access to the application +**Solution**: Assign user or their group to the MCP Gateway application + +1. In IBM Security Verify admin console, go to Applications +2. Find MCP Gateway application → Access tab +3. Assign access to the user or their group + +### Error: "Issuer mismatch" + +**Problem**: Wrong issuer URL configured +**Solution**: Verify issuer URL matches your IBM Security Verify tenant + +```bash +# Get the correct issuer from the well-known configuration +curl https://[tenant-name].verify.ibm.com/oidc/endpoint/default/.well-known/openid-configuration + +# Look for "issuer" field in response +``` + +### MFA Not Working + +**Problem**: Multi-factor authentication not triggered +**Solution**: Check MFA policies in IBM Security Verify + +1. Go to Security → Multi-factor authentication +2. Ensure policies are enabled for your application +3. Check user enrollment status +4. Verify policy conditions are met + +## Testing Checklist + +- [ ] IBM Security Verify application created +- [ ] Client ID and secret generated +- [ ] Redirect URI configured correctly +- [ ] Users/groups assigned access to application +- [ ] Environment variables set correctly +- [ ] Gateway restarted with new config +- [ ] `/auth/sso/providers` returns IBM Security Verify provider +- [ ] Login page shows "Continue with IBM Security Verify" button +- [ ] Authentication flow completes successfully +- [ ] User appears in gateway user list +- [ ] MFA working (if configured) + +## Enterprise Integration + +### Active Directory Integration + +If IBM Security Verify is connected to Active Directory: + +1. User attributes sync automatically +2. Group memberships are available +3. Configure group-based access in IBM Security Verify +4. Map AD groups to gateway teams + +### SAML Federation (Alternative) + +For environments preferring SAML over OIDC: + +1. Configure SAML application in IBM Security Verify +2. Use custom SAML integration (requires additional development) +3. Configure SAML assertions and attribute mapping + +## Next Steps + +After IBM Security Verify SSO is working: + +1. **Configure MFA policies** for enhanced security +2. **Set up conditional access** based on risk factors +3. **Integrate with existing AD/LDAP** if needed +4. **Configure audit logging** for compliance +5. **Train users** on the new login process +6. **Set up monitoring** for authentication failures + +## Related Documentation + +- [Complete SSO Guide](sso.md) - Full SSO documentation +- [GitHub SSO Tutorial](sso-github-tutorial.md) - GitHub setup guide +- [Google SSO Tutorial](sso-google-tutorial.md) - Google setup guide +- [Team Management](teams.md) - Managing teams and roles +- [RBAC Configuration](rbac.md) - Role-based access control + +## Support + +If you encounter issues: + +1. Check IBM Security Verify admin console for error messages +2. Enable debug logging: `LOG_LEVEL=DEBUG` +3. Review gateway logs for IBM Security Verify errors +4. Verify all IBM Security Verify settings match tutorial +5. Contact IBM Security Verify support for tenant-specific issues diff --git a/docs/docs/manage/sso-okta-tutorial.md b/docs/docs/manage/sso-okta-tutorial.md new file mode 100644 index 000000000..edcdb7b8e --- /dev/null +++ b/docs/docs/manage/sso-okta-tutorial.md @@ -0,0 +1,469 @@ +# Okta OIDC Setup Tutorial + +This tutorial walks you through setting up Okta Single Sign-On (SSO) authentication for MCP Gateway, enabling enterprise identity management with Okta's comprehensive platform. + +## Prerequisites + +- MCP Gateway installed and running +- Okta account with admin access (Developer or Enterprise edition) +- Access to your gateway's environment configuration + +## Step 1: Create Okta Application Integration + +### 1.1 Access Okta Admin Console + +1. Navigate to your Okta admin console + - URL format: `https://[org-name].okta.com` or `https://[org-name].oktapreview.com` +2. Log in with your administrator credentials +3. Go to **Applications** → **Applications** in the left sidebar + +### 1.2 Create New App Integration + +1. Click **Create App Integration** +2. Choose **OIDC - OpenID Connect** as the sign-in method +3. Choose **Web Application** as the application type +4. Click **Next** + +### 1.3 Configure General Settings + +**App integration name**: `MCP Gateway` + +**App logo**: Upload your organization's logo (optional) + +**Grant type**: Select **Authorization Code** (should be pre-selected) + +### 1.4 Configure Sign-in Settings + +**Sign-in redirect URIs**: **Critical - must be exact** +- Production: `https://gateway.yourcompany.com/auth/sso/callback/okta` +- Development: `http://localhost:8000/auth/sso/callback/okta` +- Click **Add URI** if you need both + +**Sign-out redirect URIs** (optional): +- Production: `https://gateway.yourcompany.com/admin/login` +- Development: `http://localhost:8000/admin/login` + +**Controlled access**: Choose appropriate option: +- **Allow everyone in your organization to access** (most common) +- **Limit access to selected groups** (recommended for production) +- **Skip group assignment for now** (development only) + +### 1.5 Save and Obtain Credentials + +1. Click **Save** +2. After creation, you'll see the **Client Credentials**: + - **Client ID**: Copy this value + - **Client secret**: Copy this value (click to reveal) +3. Note your **Okta domain** (e.g., `https://dev-12345.okta.com`) + +## Step 2: Configure Okta Application Settings + +### 2.1 Configure Token Settings (Optional) + +1. In your application, go to the **General** tab +2. Scroll to **General Settings** → **Edit** +3. Configure token lifetimes: + - **Access token lifetime**: 1 hour (default) + - **Refresh token lifetime**: 90 days (default) + - **ID token lifetime**: 1 hour (default) + +### 2.2 Configure Claims (Advanced) + +1. Go to the **Sign On** tab +2. Scroll to **OpenID Connect ID Token** +3. Configure claims if you need custom user attributes: + - `groups` - User's group memberships + - `department` - User's department + - `title` - User's job title + +Example custom claim configuration: +- **Name**: `groups` +- **Include in token type**: ID Token, Always +- **Value type**: Groups +- **Filter**: Matches regex `.*` (for all groups) + +## Step 3: Configure User and Group Access + +### 3.1 Assign Users to Application + +1. Go to the **Assignments** tab in your application +2. Click **Assign** → **Assign to People** +3. Select users who should have access +4. Click **Assign** for each user +5. Click **Save and Go Back** + +### 3.2 Assign Groups to Application (Recommended) + +1. Click **Assign** → **Assign to Groups** +2. Select groups that should have access: + - `Everyone` - All users (not recommended for production) + - `MCP Gateway Users` - Custom group for gateway access + - `IT Admins` - Administrative access +3. For each group, you can set a custom **Application username** +4. Click **Assign** and **Done** + +### 3.3 Create Custom Groups (Optional) + +If you want specific groups for MCP Gateway: + +1. Go to **Directory** → **Groups** +2. Click **Add Group** +3. Create groups like: + - **Name**: `MCP Gateway Users` + - **Description**: `Users with access to MCP Gateway` +4. Add appropriate users to these groups + +## Step 4: Configure MCP Gateway Environment + +### 4.1 Update Environment Variables + +Add these variables to your `.env` file: + +```bash +# Enable SSO System +SSO_ENABLED=true + +# Okta OIDC Configuration +SSO_OKTA_ENABLED=true +SSO_OKTA_CLIENT_ID=0oa1b2c3d4e5f6g7h8i9 +SSO_OKTA_CLIENT_SECRET=AbCdEfGhIjKlMnOpQrStUvWxYz1234567890abcdef +SSO_OKTA_ISSUER=https://dev-12345.okta.com + +# Optional: Auto-create users on first login +SSO_AUTO_CREATE_USERS=true + +# Optional: Restrict to corporate email domains +SSO_TRUSTED_DOMAINS=["yourcompany.com"] + +# Optional: Preserve local admin authentication +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 4.2 Example Production Configuration + +```bash +# Production Okta SSO Setup +SSO_ENABLED=true +SSO_OKTA_ENABLED=true +SSO_OKTA_CLIENT_ID=0oa1b2c3d4e5f6g7h8i9 +SSO_OKTA_CLIENT_SECRET=AbCdEfGhIjKlMnOpQrStUvWxYz1234567890abcdef +SSO_OKTA_ISSUER=https://acmecorp.okta.com + +# Enterprise security settings +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=["acmecorp.com"] +SSO_PRESERVE_ADMIN_AUTH=true + +# Optional: Custom scopes for additional user attributes +SSO_OKTA_SCOPE="openid profile email groups" +``` + +### 4.3 Development Configuration + +```bash +# Development Okta SSO Setup +SSO_ENABLED=true +SSO_OKTA_ENABLED=true +SSO_OKTA_CLIENT_ID=0oa_dev_client_id +SSO_OKTA_CLIENT_SECRET=dev_client_secret +SSO_OKTA_ISSUER=https://dev-12345.oktapreview.com + +# More permissive for testing +SSO_AUTO_CREATE_USERS=true +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 4.4 Advanced Configuration Options + +```bash +# Custom OAuth scopes for enhanced user data +SSO_OKTA_SCOPE="openid profile email groups address phone" + +# Group mapping for automatic team assignment +OKTA_GROUP_MAPPING={"MCP Gateway Admins": "admin-team-uuid", "MCP Gateway Users": "user-team-uuid"} + +# Custom authorization server (if using custom Okta authorization server) +SSO_OKTA_ISSUER=https://dev-12345.okta.com/oauth2/custom-auth-server-id +``` + +## Step 5: Restart and Verify Gateway + +### 5.1 Restart the Gateway + +```bash +# Development +make dev + +# Or directly with uvicorn +uvicorn mcpgateway.main:app --reload --host 0.0.0.0 --port 8000 + +# Production +make serve +``` + +### 5.2 Verify Okta SSO is Enabled + +Test that Okta appears in SSO providers: + +```bash +# Check if Okta is listed +curl -X GET http://localhost:8000/auth/sso/providers + +# Should return Okta in the list: +[ + { + "id": "okta", + "name": "okta", + "display_name": "Okta" + } +] +``` + +## Step 6: Test Okta SSO Login + +### 6.1 Access Login Page + +1. Navigate to your gateway's login page: + - Development: `http://localhost:8000/admin/login` + - Production: `https://gateway.yourcompany.com/admin/login` + +2. You should see a "Continue with Okta" button + +### 6.2 Test Authentication Flow + +1. Click **Continue with Okta** +2. You'll be redirected to Okta's sign-in page +3. Enter your Okta credentials +4. Complete any multi-factor authentication if required +5. Grant consent for the application if prompted +6. You'll be redirected back to the gateway admin panel +7. You should be logged in successfully + +### 6.3 Verify User Creation + +Check that a user was created: + +```bash +# Using the admin API (requires admin token) +curl -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + http://localhost:8000/auth/users + +# Look for your Okta email in the user list +``` + +## Step 7: Okta Advanced Features (Enterprise) + +### 7.1 Multi-Factor Authentication (MFA) + +Configure MFA policies in Okta: + +1. Go to **Security** → **Multifactor** +2. Set up MFA policies for your MCP Gateway application +3. Configure factors (SMS, Email, Okta Verify app, etc.) +4. Users will be prompted for MFA during login + +### 7.2 Adaptive Authentication + +Configure risk-based authentication: + +1. Go to **Security** → **Authentication** → **Sign On** +2. Create policies with conditions: + - Device trust + - Network location + - User risk level + - Time-based restrictions + +### 7.3 Universal Directory Integration + +Sync user attributes from external directories: + +1. Go to **Directory** → **Directory Integrations** +2. Configure integration with: + - Active Directory + - LDAP + - HR systems (Workday, BambooHR, etc.) +3. Map attributes for automatic user provisioning + +### 7.4 API Access Management + +For programmatic API access: + +1. Create a custom authorization server +2. Configure API scopes and claims +3. Issue API tokens for service-to-service authentication + +## Step 8: Production Deployment Checklist + +### 8.1 Security Requirements + +- [ ] HTTPS enforced for all redirect URIs +- [ ] Client secrets stored securely (vault/secret management) +- [ ] MFA policies configured appropriately +- [ ] Adaptive authentication policies set +- [ ] Password policies enforced +- [ ] Session management configured + +### 8.2 Okta Configuration + +- [ ] Application created with correct settings +- [ ] Appropriate users/groups assigned access +- [ ] Custom claims configured if needed +- [ ] Token lifetimes set appropriately +- [ ] Sign-out redirect URIs configured + +### 8.3 Monitoring and Compliance + +- [ ] System Log monitoring enabled +- [ ] Audit trail configured +- [ ] Compliance reporting set up (if required) +- [ ] Regular access reviews scheduled + +## Troubleshooting + +### Error: "SSO authentication is disabled" + +**Problem**: SSO endpoints return 404 +**Solution**: Set `SSO_ENABLED=true` and restart gateway + +### Error: "invalid_client" + +**Problem**: Wrong client ID or client secret +**Solution**: Verify credentials from Okta application settings + +```bash +# Double-check these values match your Okta application +SSO_OKTA_CLIENT_ID=your-actual-client-id +SSO_OKTA_CLIENT_SECRET=your-actual-client-secret +``` + +### Error: "redirect_uri_mismatch" + +**Problem**: Okta redirect URI doesn't match +**Solution**: Verify exact URL match in Okta application settings + +```bash +# Okta redirect URI must exactly match: +https://your-domain.com/auth/sso/callback/okta + +# Common mistakes: +https://your-domain.com/auth/sso/callback/okta/ # Extra slash +http://your-domain.com/auth/sso/callback/okta # HTTP instead of HTTPS +https://your-domain.com/auth/sso/callback/oauth # Wrong provider ID +``` + +### Error: "User is not assigned to the client application" + +**Problem**: User doesn't have access to the application +**Solution**: Assign user to the application + +1. In Okta admin console, go to Applications → [Your App] +2. Go to Assignments tab +3. Assign the user or their group to the application + +### Error: "The issuer specified in the request is invalid" + +**Problem**: Wrong Okta domain or issuer URL +**Solution**: Verify issuer URL matches your Okta domain + +```bash +# Get the correct issuer from Okta's well-known configuration +curl https://[your-okta-domain].okta.com/.well-known/openid_configuration + +# Use the "issuer" field value +``` + +### MFA Bypass Issues + +**Problem**: Users not prompted for MFA +**Solution**: Check MFA policies and user enrollment + +1. Verify MFA policies are active for your application +2. Check user MFA enrollment status +3. Ensure policy conditions are met (device, location, etc.) + +### Token Validation Errors + +**Problem**: JWT tokens failing validation +**Solution**: Check token configuration and clock sync + +1. Verify token lifetime settings +2. Check server clock synchronization +3. Validate JWT signature verification + +## Testing Checklist + +- [ ] Okta application integration created +- [ ] Client ID and secret configured +- [ ] Redirect URIs set correctly +- [ ] Users/groups assigned to application +- [ ] Environment variables configured +- [ ] Gateway restarted with new config +- [ ] `/auth/sso/providers` returns Okta provider +- [ ] Login page shows "Continue with Okta" button +- [ ] Authentication flow completes successfully +- [ ] User appears in gateway user list +- [ ] MFA working (if configured) +- [ ] Group claims included in tokens (if configured) + +## Okta API Integration (Advanced) + +### Programmatic User Management + +Use Okta APIs for advanced user management: + +```python +# Example: Sync Okta groups with Gateway teams +import requests + +def sync_okta_groups(): + okta_token = "your-okta-api-token" + okta_domain = "https://dev-12345.okta.com" + + # Get user's groups from Okta + response = requests.get( + f"{okta_domain}/api/v1/users/{user_id}/groups", + headers={"Authorization": f"SSWS {okta_token}"} + ) + + groups = response.json() + return [group['profile']['name'] for group in groups] +``` + +### Custom Authorization Server + +For advanced API access patterns: + +1. Create custom authorization server in Okta +2. Define custom scopes for MCP Gateway APIs +3. Configure audience restrictions +4. Use for service-to-service authentication + +## Next Steps + +After Okta SSO is working: + +1. **Configure MFA policies** for enhanced security +2. **Set up adaptive authentication** based on risk +3. **Integrate with existing directories** (AD/LDAP) +4. **Configure custom user attributes** and claims +5. **Set up automated user provisioning/deprovisioning** +6. **Monitor authentication patterns** for security insights + +## Related Documentation + +- [Complete SSO Guide](sso.md) - Full SSO documentation +- [GitHub SSO Tutorial](sso-github-tutorial.md) - GitHub setup guide +- [Google SSO Tutorial](sso-google-tutorial.md) - Google setup guide +- [IBM Security Verify Tutorial](sso-ibm-tutorial.md) - IBM setup guide +- [Team Management](teams.md) - Managing teams and roles +- [RBAC Configuration](rbac.md) - Role-based access control + +## Support + +If you encounter issues: + +1. Check Okta System Log for authentication errors +2. Enable debug logging: `LOG_LEVEL=DEBUG` +3. Review gateway logs for Okta-specific errors +4. Verify all Okta settings match tutorial exactly +5. Use Okta's support resources and community forums diff --git a/docs/docs/manage/sso.md b/docs/docs/manage/sso.md new file mode 100644 index 000000000..b61d4e998 --- /dev/null +++ b/docs/docs/manage/sso.md @@ -0,0 +1,662 @@ +# Single Sign-On (SSO) Authentication + +MCP Gateway supports enterprise Single Sign-On authentication through OAuth2 and OpenID Connect (OIDC) providers. This enables seamless integration with existing identity providers while maintaining backward compatibility with local authentication. + +## Overview + +The SSO system provides: + +- **Multi-Provider Support**: GitHub, Google, IBM Security Verify, and Okta +- **Hybrid Authentication**: SSO alongside preserved local admin authentication +- **Automatic User Provisioning**: Creates users on first SSO login +- **Security Best Practices**: PKCE, CSRF protection, encrypted secrets +- **Team Integration**: Automatic team assignment and inheritance +- **Admin Management**: Full CRUD API for provider configuration + +## Architecture + +### Authentication Flows + +```mermaid +sequenceDiagram + participant U as User + participant G as Gateway + participant P as SSO Provider + participant D as Database + + U->>G: GET /auth/sso/login/github + G->>D: Create auth session + G->>U: Redirect to provider with PKCE + U->>P: Authenticate with provider + P->>G: Callback with auth code + G->>P: Exchange code for tokens (PKCE) + P->>G: Access token + user info + G->>D: Create/update user + G->>U: Set JWT cookie + redirect +``` + +### Database Schema + +**SSOProvider Table**: +- Provider configuration (OAuth endpoints, client credentials) +- Encrypted client secrets using Fernet encryption +- Trusted domains and team mapping rules + +**SSOAuthSession Table**: +- Temporary session tracking during OAuth flow +- CSRF state parameters and PKCE verifiers +- 10-minute expiration for security + +## Supported Providers + +### GitHub OAuth + +Perfect for developer-focused organizations with GitHub repositories. + +**Features**: +- GitHub organization mapping to teams +- Repository access integration +- Developer-friendly onboarding + +### Google OAuth/OIDC + +Ideal for Google Workspace organizations. + +**Features**: +- Google Workspace domain verification +- GSuite organization mapping +- Professional email verification + +### IBM Security Verify + +Enterprise-grade identity provider with advanced security features. + +**Features**: +- Enterprise SSO compliance +- Advanced user attributes +- Corporate directory integration + +### Okta + +Popular enterprise identity provider with extensive integrations. + +**Features**: +- Enterprise directory synchronization +- Multi-factor authentication support +- Custom user attributes + +## Quick Start + +### 1. Enable SSO + +Set the master SSO switch in your environment: + +```bash +# Enable SSO system +SSO_ENABLED=true + +# Optional: Keep local admin authentication (recommended) +SSO_PRESERVE_ADMIN_AUTH=true +``` + +### 2. Configure GitHub OAuth (Example) + +#### Register OAuth App + +1. Go to GitHub → Settings → Developer settings → OAuth Apps +2. Click "New OAuth App" +3. Set **Authorization callback URL**: `https://your-gateway.com/auth/sso/callback/github` +4. Note the **Client ID** and **Client Secret** + +#### Environment Configuration + +```bash +# GitHub OAuth Configuration +SSO_GITHUB_ENABLED=true +SSO_GITHUB_CLIENT_ID=your-github-client-id +SSO_GITHUB_CLIENT_SECRET=your-github-client-secret + +# Optional: Auto-create users and trusted domains +SSO_AUTO_CREATE_USERS=true +SSO_TRUSTED_DOMAINS=["yourcompany.com", "github.com"] +``` + +#### Start Gateway + +```bash +# Restart gateway to load SSO configuration +make dev +# or +docker-compose restart gateway +``` + +### 3. Test SSO Flow + +#### List Available Providers + +```bash +curl -X GET http://localhost:8000/auth/sso/providers +``` + +Response: +```json +[ + { + "id": "github", + "name": "github", + "display_name": "GitHub" + } +] +``` + +#### Initiate SSO Login + +```bash +curl -X GET "http://localhost:8000/auth/sso/login/github?redirect_uri=https://yourapp.com/callback" +``` + +Response: +```json +{ + "authorization_url": "https://github.com/login/oauth/authorize?client_id=...", + "state": "csrf-protection-token" +} +``` + +## Provider Configuration + +### GitHub OAuth Setup + +#### 1. Create OAuth App + +1. **GitHub Settings** → **Developer settings** → **OAuth Apps** +2. **New OAuth App**: + - **Application name**: `MCP Gateway - YourOrg` + - **Homepage URL**: `https://your-gateway.com` + - **Authorization callback URL**: `https://your-gateway.com/auth/sso/callback/github` + +#### 2. Environment Variables + +```bash +# GitHub OAuth Configuration +SSO_GITHUB_ENABLED=true +SSO_GITHUB_CLIENT_ID=Iv1.a1b2c3d4e5f6g7h8 +SSO_GITHUB_CLIENT_SECRET=1234567890abcdef1234567890abcdef12345678 + +# Organization-based team mapping (optional) +GITHUB_ORG_TEAM_MAPPING={"your-github-org": "developers-team-id"} +``` + +#### 3. Team Mapping (Advanced) + +Map GitHub organizations to Gateway teams: + +```json +{ + "team_mapping": { + "your-github-org": { + "team_id": "dev-team-uuid", + "role": "member" + }, + "admin-github-org": { + "team_id": "admin-team-uuid", + "role": "owner" + } + } +} +``` + +### Google OAuth Setup + +#### 1. Google Cloud Console Setup + +1. **Google Cloud Console** → **APIs & Services** → **Credentials** +2. **Create Credentials** → **OAuth client ID** +3. **Application type**: Web application +4. **Authorized redirect URIs**: `https://your-gateway.com/auth/sso/callback/google` + +#### 2. Environment Variables + +```bash +# Google OAuth Configuration +SSO_GOOGLE_ENABLED=true +SSO_GOOGLE_CLIENT_ID=123456789012-abcdefghijklmnop.apps.googleusercontent.com +SSO_GOOGLE_CLIENT_SECRET=GOCSPX-1234567890abcdefghijklmnop + +# Google Workspace domain restrictions +SSO_TRUSTED_DOMAINS=["yourcompany.com"] +``` + +### IBM Security Verify Setup + +#### 1. IBM Security Verify Configuration + +1. **IBM Security Verify Admin Console** → **Applications** +2. **Add application** → **Custom Application** +3. **Sign-on** → **Open ID Connect** +4. **Redirect URI**: `https://your-gateway.com/auth/sso/callback/ibm_verify` + +#### 2. Environment Variables + +```bash +# IBM Security Verify OIDC Configuration +SSO_IBM_VERIFY_ENABLED=true +SSO_IBM_VERIFY_CLIENT_ID=your-client-id +SSO_IBM_VERIFY_CLIENT_SECRET=your-client-secret +SSO_IBM_VERIFY_ISSUER=https://your-tenant.verify.ibm.com/oidc/endpoint/default +``` + +### Okta Setup + +#### 1. Okta Admin Console + +1. **Applications** → **Create App Integration** +2. **OIDC - OpenID Connect** → **Web Application** +3. **Sign-in redirect URIs**: `https://your-gateway.com/auth/sso/callback/okta` + +#### 2. Environment Variables + +```bash +# Okta OIDC Configuration +SSO_OKTA_ENABLED=true +SSO_OKTA_CLIENT_ID=0oa1b2c3d4e5f6g7h8i9 +SSO_OKTA_CLIENT_SECRET=1234567890abcdef1234567890abcdef12345678 +SSO_OKTA_ISSUER=https://your-company.okta.com +``` + +## Advanced Configuration + +### Trusted Domains + +Restrict SSO access to specific email domains: + +```bash +# JSON array of trusted domains +SSO_TRUSTED_DOMAINS=["yourcompany.com", "partner.org", "contractor.net"] +``` + +Only users with email addresses from these domains can authenticate via SSO. + +### Auto User Creation + +Control automatic user provisioning: + +```bash +# Enable automatic user creation (default: true) +SSO_AUTO_CREATE_USERS=true + +# Disable to manually approve SSO users +SSO_AUTO_CREATE_USERS=false +``` + +### Team Mapping Rules + +Configure automatic team assignment based on SSO provider attributes: + +```json +{ + "team_mapping": { + "github_org_name": { + "team_id": "uuid-of-gateway-team", + "role": "member", + "conditions": { + "email_domain": "company.com" + } + }, + "google_workspace_domain": { + "team_id": "uuid-of-workspace-team", + "role": "owner", + "conditions": { + "email_verified": true + } + } + } +} +``` + +## API Reference + +### Public Endpoints + +#### List Available Providers + +```http +GET /auth/sso/providers +``` + +Response: +```json +[ + { + "id": "github", + "name": "github", + "display_name": "GitHub" + } +] +``` + +#### Initiate SSO Login + +```http +GET /auth/sso/login/{provider_id}?redirect_uri={callback_url}&scopes={oauth_scopes} +``` + +Parameters: +- `provider_id`: Provider identifier (`github`, `google`, `ibm_verify`, `okta`) +- `redirect_uri`: Callback URL after authentication +- `scopes`: Optional space-separated OAuth scopes + +Response: +```json +{ + "authorization_url": "https://provider.com/oauth/authorize?...", + "state": "csrf-protection-token" +} +``` + +#### Handle SSO Callback + +```http +GET /auth/sso/callback/{provider_id}?code={auth_code}&state={csrf_token} +``` + +This endpoint is called by the SSO provider after user authentication. + +Response: +```json +{ + "access_token": "jwt-session-token", + "token_type": "bearer", + "expires_in": 604800, + "user": { + "email": "user@example.com", + "full_name": "John Doe", + "provider": "github" + } +} +``` + +### Admin Endpoints + +All admin endpoints require `admin.sso_providers` permissions. + +#### Create SSO Provider + +```http +POST /auth/sso/admin/providers +Authorization: Bearer +Content-Type: application/json + +{ + "id": "custom_provider", + "name": "custom_provider", + "display_name": "Custom Provider", + "provider_type": "oidc", + "client_id": "client-id", + "client_secret": "client-secret", + "authorization_url": "https://provider.com/oauth/authorize", + "token_url": "https://provider.com/oauth/token", + "userinfo_url": "https://provider.com/oauth/userinfo", + "issuer": "https://provider.com", + "scope": "openid profile email", + "trusted_domains": ["company.com"], + "auto_create_users": true +} +``` + +#### List All Providers + +```http +GET /auth/sso/admin/providers +Authorization: Bearer +``` + +#### Update Provider + +```http +PUT /auth/sso/admin/providers/{provider_id} +Authorization: Bearer +Content-Type: application/json + +{ + "display_name": "Updated Provider Name", + "is_enabled": false +} +``` + +#### Delete Provider + +```http +DELETE /auth/sso/admin/providers/{provider_id} +Authorization: Bearer +``` + +## Security Considerations + +### Client Secret Encryption + +Client secrets are encrypted using Fernet (AES 128) before database storage: + +```python +# Automatic encryption in SSOService +provider_data["client_secret_encrypted"] = self._encrypt_secret(client_secret) +``` + +### PKCE Protection + +All OAuth flows use PKCE (Proof Key for Code Exchange) for enhanced security: + +```python +# Automatic PKCE generation +code_verifier, code_challenge = self.generate_pkce_challenge() +``` + +### CSRF Protection + +OAuth state parameters prevent cross-site request forgery: + +```python +# Cryptographically secure state generation +state = secrets.token_urlsafe(32) +``` + +### Session Security + +- **HTTP-only cookies** prevent XSS attacks +- **Secure flag** for HTTPS deployments +- **SameSite=Lax** protection +- **10-minute OAuth session** expiration + +## Troubleshooting + +### Common Issues + +#### SSO Endpoints Return 404 + +**Problem**: SSO routes not available +**Solution**: Ensure `SSO_ENABLED=true` and restart gateway + +```bash +# Check SSO status +curl -I http://localhost:8000/auth/sso/providers +# Should return 200 if enabled, 404 if disabled +``` + +#### OAuth Callback Errors + +**Problem**: Invalid redirect URI +**Solution**: Verify callback URL matches provider configuration exactly + +```bash +# Correct format +https://your-gateway.com/auth/sso/callback/github + +# Common mistakes +https://your-gateway.com/auth/sso/callback/github/ # Extra slash +http://your-gateway.com/auth/sso/callback/github # HTTP instead of HTTPS +``` + +#### User Creation Fails + +**Problem**: Email domain not trusted +**Solution**: Add domain to trusted domains list + +```bash +SSO_TRUSTED_DOMAINS=["company.com", "contractor.org"] +``` + +### Debug Mode + +Enable verbose SSO logging: + +```bash +LOG_LEVEL=DEBUG +SSO_DEBUG=true +``` + +Check logs for detailed OAuth flow information: + +```bash +tail -f logs/gateway.log | grep -i sso +``` + +### Health Checks + +Verify SSO provider connectivity: + +```bash +# Test provider endpoints +curl -I https://github.com/login/oauth/authorize +curl -I https://github.com/login/oauth/access_token +curl -I https://api.github.com/user +``` + +## Migration Guide + +### From Local Auth Only + +1. **Enable SSO** alongside existing authentication: + ```bash + SSO_ENABLED=true + SSO_PRESERVE_ADMIN_AUTH=true # Keep local admin login + ``` + +2. **Configure first provider** (e.g., GitHub) + +3. **Test SSO flow** with test users + +4. **Gradually migrate** production users + +5. **Optional**: Disable local auth after full migration + +### Adding New Providers + +1. **Implement provider-specific** user info normalization in `SSOService._normalize_user_info` + +2. **Add environment variables** in `config.py` + +3. **Update bootstrap utilities** in `sso_bootstrap.py` + +4. **Test integration** thoroughly + +## Best Practices + +### Production Deployment + +1. **Use HTTPS** for all SSO callbacks +2. **Secure client secrets** in vault/secret management +3. **Monitor failed authentications** +4. **Regular secret rotation** +5. **Audit SSO access logs** + +### User Experience + +1. **Clear provider labeling** (GitHub, Google, etc.) +2. **Graceful error handling** for auth failures +3. **Fallback to local auth** if SSO unavailable +4. **User session management** + +### Security Hardening + +1. **Restrict trusted domains** to organization emails +2. **Enable audit logging** for admin operations +3. **Regular provider configuration** reviews +4. **Monitor unusual auth patterns** + +## Integration Examples + +### Frontend Integration + +```javascript +// Check available SSO providers +const providers = await fetch('/auth/sso/providers').then(r => r.json()); + +// Initiate SSO login +const redirectUrl = `${window.location.origin}/dashboard`; +const ssoResponse = await fetch( + `/auth/sso/login/github?redirect_uri=${encodeURIComponent(redirectUrl)}` +).then(r => r.json()); + +// Redirect user to SSO provider +window.location.href = ssoResponse.authorization_url; +``` + +### CLI Integration + +```bash +#!/bin/bash +# CLI SSO authentication helper + +GATEWAY_URL="https://your-gateway.com" +PROVIDER="github" + +# Get authorization URL +AUTH_RESPONSE=$(curl -s "$GATEWAY_URL/auth/sso/login/$PROVIDER?redirect_uri=urn:ietf:wg:oauth:2.0:oob") +AUTH_URL=$(echo "$AUTH_RESPONSE" | jq -r '.authorization_url') + +echo "Open this URL in your browser:" +echo "$AUTH_URL" + +echo "Enter the authorization code:" +read -r AUTH_CODE + +# Exchange code for token (manual callback simulation) +# Note: In practice, this would be handled by the callback endpoint +``` + +### API Client Integration + +```python +import requests +import webbrowser +from urllib.parse import urlparse, parse_qs + +# SSO authentication for API clients +class SSOAuthenticator: + def __init__(self, gateway_url, provider): + self.gateway_url = gateway_url + self.provider = provider + + def authenticate(self): + # Get authorization URL + response = requests.get( + f"{self.gateway_url}/auth/sso/login/{self.provider}", + params={"redirect_uri": "http://localhost:8080/callback"} + ) + auth_data = response.json() + + # Open browser for user authentication + webbrowser.open(auth_data["authorization_url"]) + + # Wait for callback (implement callback server) + # Return JWT token for API access + return self.handle_callback() +``` + +## Related Documentation + +- [Authentication Overview](../manage/securing.md) +- [Team Management](../manage/teams.md) +- [RBAC Configuration](../manage/rbac.md) +- [Environment Variables](../deployment/index.md#environment-variables) +- [Security Best Practices](../architecture/security-features.md) diff --git a/docs/docs/testing/acceptance.md b/docs/docs/testing/acceptance.md index 8895d6b32..c724a3b32 100644 --- a/docs/docs/testing/acceptance.md +++ b/docs/docs/testing/acceptance.md @@ -81,7 +81,7 @@ graph TB |---------|-------------|---------|-----------------|--------|-------| | Set Gateway URL | `export GW_URL=http://localhost:4444` | Set base URL (can be remote) | Variable exported | ☐ | Change to your gateway URL if remote | | Install Gateway Package | `pip install mcp-contextforge-gateway` | Install the gateway package for utilities | Successfully installed | ☐ | Needed for JWT token creation and wrapper testing | -| Generate JWT Token | `export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key)` | Generate auth token using installed package | Token generated and exported | ☐ | Default expiry 10080 (7 days) | +| Generate JWT Token | `export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key)` | Generate auth token using installed package | Token generated and exported | ☐ | Default expiry 10080 (7 days) | | Verify Health | `curl -s $GW_URL/health` | GET request (no auth required) | `{"status":"ok"}` | ☐ | Basic connectivity check | | Verify Ready | `curl -s $GW_URL/ready` | GET request (no auth required) | `{"ready":true,"database":"ok","redis":"ok"}` | ☐ | All subsystems ready | | Test Auth Required | `curl -s $GW_URL/version` | GET without auth | `{"detail":"Not authenticated"}` | ☐ | Confirms auth is enforced | @@ -382,7 +382,7 @@ MCPGATEWAY_ADMIN_API_ENABLED=true - **Gateway Base URL**: Set `export GW_URL=http://your-gateway:4444` for remote gateways - **Authentication**: Use Bearer token in format: `Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN` -- **JWT Token Generation**: Can also be done inside Docker container: `docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin -e 10080 --secret my-test-key` +- **JWT Token Generation**: Can also be done inside Docker container: `docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com -e 10080 --secret my-test-key` - **Time Servers**: The time server gateways are used throughout testing as reference implementations - **Gateway Tool Separator**: Default is `__` (double underscore) between gateway name and tool name, but newer versions may use `-` - **Status Column**: Check ☐ when test passes, add ✗ if test fails with failure reason diff --git a/docs/docs/testing/basic.md b/docs/docs/testing/basic.md index c0cac3843..7bd832ca2 100644 --- a/docs/docs/testing/basic.md +++ b/docs/docs/testing/basic.md @@ -38,7 +38,7 @@ Gateway will listen on: #### Gateway JWT (for local API access) ```bash -export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin) +export MCPGATEWAY_BEARER_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com) curl -s -k -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" https://localhost:4444/health ``` diff --git a/docs/docs/using/clients/continue.md b/docs/docs/using/clients/continue.md index c5a66957b..e6f313e31 100644 --- a/docs/docs/using/clients/continue.md +++ b/docs/docs/using/clients/continue.md @@ -57,7 +57,7 @@ There are **two ways** to attach Continue to a gateway: *Generate a token*: ```bash -export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) +export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) ``` ### Option B - Local stdio bridge (`mcpgateway.wrapper`) diff --git a/docs/docs/using/clients/copilot.md b/docs/docs/using/clients/copilot.md index 8cb305706..f47939007 100644 --- a/docs/docs/using/clients/copilot.md +++ b/docs/docs/using/clients/copilot.md @@ -44,7 +44,7 @@ HTTP or require local stdio, you can insert the bundled **`mcpgateway.wrapper`** > **Tip - generate a token** ```bash -python3 -m mcpgateway.utils.create_jwt_token -u admin --exp 10080 --secret my-test-key +python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --exp 10080 --secret my-test-key ``` ## 🔗 Option 2 - Streamable HTTP (best for prod / remote) @@ -142,7 +142,7 @@ Copilot routes the call → Gateway → tool, and prints the reply. * **Use SSE for production**, stdio for local/offline. * You can manage servers, tools and prompts from the Gateway **Admin UI** (`/admin`). * Need a bearer quickly? - `export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key)` + `export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key)` --- diff --git a/docs/docs/using/clients/mcp-cli.md b/docs/docs/using/clients/mcp-cli.md index 8d5b714c6..1d2c7cf85 100644 --- a/docs/docs/using/clients/mcp-cli.md +++ b/docs/docs/using/clients/mcp-cli.md @@ -124,7 +124,7 @@ Create a `server_config.json` file to define your MCP Context Forge Gateway conn ```bash # From your mcp-context-forge directory -python3 -m mcpgateway.utils.create_jwt_token -u admin --exp 10080 --secret my-test-key +python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --exp 10080 --secret my-test-key ``` > **⚠️ Important Notes** @@ -489,7 +489,7 @@ docker run -d --name mcpgateway \ ghcr.io/ibm/mcp-context-forge:0.6.0 # Generate token -export MCPGATEWAY_BEARER_TOKEN=$(docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret my-secret-key) +export MCPGATEWAY_BEARER_TOKEN=$(docker exec mcpgateway python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret my-secret-key) # Test connection curl -H "Authorization: Bearer $MCPGATEWAY_BEARER_TOKEN" http://localhost:4444/tools diff --git a/docs/docs/using/clients/mcp-inspector.md b/docs/docs/using/clients/mcp-inspector.md index 729b6c9a9..2083f223b 100644 --- a/docs/docs/using/clients/mcp-inspector.md +++ b/docs/docs/using/clients/mcp-inspector.md @@ -34,7 +34,7 @@ Most wrappers / servers will need at least: ```bash export MCP_SERVER_URL=http://localhost:4444/servers/UUID_OF_SERVER_1 # one or many -export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin --secret my-test-key) +export MCP_AUTH=$(python3 -m mcpgateway.utils.create_jwt_token -u admin@example.com --secret my-test-key) ``` If you point Inspector **directly** at a Gateway SSE stream, pass the header: diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index dd4cdb292..320e588c5 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -20,19 +20,22 @@ # Standard from collections import defaultdict import csv -from datetime import datetime +from datetime import datetime, timedelta, timezone from functools import wraps +import html import io import json from pathlib import Path import time from typing import Any, cast, Dict, List, Optional, Union +import urllib.parse import uuid # Third-Party from fastapi import APIRouter, Depends, HTTPException, Request, Response from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, RedirectResponse, StreamingResponse import httpx +import jwt from pydantic import ValidationError from pydantic_core import ValidationError as CoreValidationError from sqlalchemy.exc import IntegrityError @@ -42,6 +45,7 @@ from mcpgateway.config import settings from mcpgateway.db import get_db, GlobalConfig from mcpgateway.db import Tool as DbTool +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_permission from mcpgateway.models import LogLevel from mcpgateway.schemas import ( A2AAgentCreate, @@ -88,8 +92,6 @@ from mcpgateway.utils.oauth_encryption import get_oauth_encryption from mcpgateway.utils.passthrough_headers import PassthroughHeadersError from mcpgateway.utils.retry_manager import ResilientHttpClient -from mcpgateway.utils.security_cookies import set_auth_cookie -from mcpgateway.utils.verify_credentials import require_auth, require_basic_auth # Import the shared logging service from main # This will be set by main.py when it imports admin_router @@ -115,6 +117,10 @@ def set_logging_service(service: LoggingService): logging_service = LoggingService() LOGGER = logging_service.get_logger("mcpgateway.admin") + +# Removed duplicate function definition - using the more comprehensive version below + + # Initialize services server_service: ServerService = ServerService() tool_service: ToolService = ToolService() @@ -197,6 +203,39 @@ async def wrapper(*args, request: Request = None, **kwargs): return decorator +def get_user_email(user) -> str: + """Extract user email from JWT payload consistently. + + Args: + user: User object from JWT token (from get_current_user_with_permissions) + + Returns: + str: User email address + """ + if isinstance(user, dict): + # Standard JWT format - try 'sub' first, then 'email' + return user.get("sub") or user.get("email", "unknown") + if hasattr(user, "email"): + # User object with email attribute + return user.email + # Fallback to string representation + return str(user) if user else "unknown" + + +def serialize_datetime(obj): + """Convert datetime objects to ISO format strings for JSON serialization. + + Args: + obj: Object to serialize, potentially a datetime + + Returns: + str: ISO format string if obj is datetime, otherwise returns obj unchanged + """ + if isinstance(obj, datetime): + return obj.isoformat() + return obj + + admin_router = APIRouter(prefix="/admin", tags=["Admin UI"]) #################### @@ -208,7 +247,7 @@ async def wrapper(*args, request: Request = None, **kwargs): @rate_limit(requests_per_minute=30) # Lower limit for config endpoints async def get_global_passthrough_headers( db: Session = Depends(get_db), - _user: str = Depends(require_auth), + _user=Depends(get_current_user_with_permissions), ) -> GlobalConfigRead: """Get the global passthrough headers configuration. @@ -243,7 +282,7 @@ async def update_global_passthrough_headers( request: Request, # pylint: disable=unused-argument config_update: GlobalConfigUpdate, db: Session = Depends(get_db), - _user: str = Depends(require_auth), + _user=Depends(get_current_user_with_permissions), ) -> GlobalConfigRead: """Update the global passthrough headers configuration. @@ -278,15 +317,13 @@ async def update_global_passthrough_headers( config.passthrough_headers = config_update.passthrough_headers db.commit() return GlobalConfigRead(passthrough_headers=config.passthrough_headers) - except Exception as e: + except (IntegrityError, ValidationError, PassthroughHeadersError) as e: + db.rollback() if isinstance(e, IntegrityError): - db.rollback() raise HTTPException(status_code=409, detail="Passthrough headers conflict") if isinstance(e, ValidationError): - db.rollback() raise HTTPException(status_code=422, detail="Invalid passthrough headers format") if isinstance(e, PassthroughHeadersError): - db.rollback() raise HTTPException(status_code=500, detail=str(e)) @@ -294,7 +331,7 @@ async def update_global_passthrough_headers( async def admin_list_servers( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List servers for the admin UI with an option to include inactive servers. @@ -314,7 +351,7 @@ async def admin_list_servers( >>> >>> # Mock dependencies >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Mock server service >>> from datetime import datetime, timezone @@ -342,9 +379,9 @@ async def admin_list_servers( ... metrics=mock_metrics ... ) >>> - >>> # Mock the server_service.list_servers method - >>> original_list_servers = server_service.list_servers - >>> server_service.list_servers = AsyncMock(return_value=[mock_server]) + >>> # Mock the server_service.list_servers_for_user method + >>> original_list_servers_for_user = server_service.list_servers_for_user + >>> server_service.list_servers_for_user = AsyncMock(return_value=[mock_server]) >>> >>> # Test the function >>> async def test_admin_list_servers(): @@ -360,10 +397,10 @@ async def admin_list_servers( True >>> >>> # Restore original method - >>> server_service.list_servers = original_list_servers + >>> server_service.list_servers_for_user = original_list_servers_for_user >>> >>> # Additional test for empty server list - >>> server_service.list_servers = AsyncMock(return_value=[]) + >>> server_service.list_servers_for_user = AsyncMock(return_value=[]) >>> async def test_admin_list_servers_empty(): ... result = await admin_list_servers( ... include_inactive=True, @@ -373,13 +410,13 @@ async def admin_list_servers( ... return result == [] >>> asyncio.run(test_admin_list_servers_empty()) True - >>> server_service.list_servers = original_list_servers + >>> server_service.list_servers_for_user = original_list_servers_for_user >>> >>> # Additional test for exception handling >>> import pytest >>> from fastapi import HTTPException >>> async def test_admin_list_servers_exception(): - ... server_service.list_servers = AsyncMock(side_effect=Exception("Test error")) + ... server_service.list_servers_for_user = AsyncMock(side_effect=Exception("Test error")) ... try: ... await admin_list_servers(False, mock_db, mock_user) ... except Exception as e: @@ -387,13 +424,14 @@ async def admin_list_servers( >>> asyncio.run(test_admin_list_servers_exception()) True """ - LOGGER.debug(f"User {user} requested server list") - servers = await server_service.list_servers(db, include_inactive=include_inactive) + LOGGER.debug(f"User {get_user_email(user)} requested server list") + user_email = get_user_email(user) + servers = await server_service.list_servers_for_user(db, user_email, include_inactive=include_inactive) return [server.model_dump(by_alias=True) for server in servers] @admin_router.get("/servers/{server_id}", response_model=ServerRead) -async def admin_get_server(server_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, Any]: +async def admin_get_server(server_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """ Retrieve server details for the admin UI. @@ -418,7 +456,7 @@ async def admin_get_server(server_id: str, db: Session = Depends(get_db), user: >>> >>> # Mock dependencies >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> server_id = "test-server-1" >>> >>> # Mock server response @@ -486,7 +524,7 @@ async def admin_get_server(server_id: str, db: Session = Depends(get_db), user: >>> server_service.get_server = original_get_server """ try: - LOGGER.debug(f"User {user} requested details for server ID {server_id}") + LOGGER.debug(f"User {get_user_email(user)} requested details for server ID {server_id}") server = await server_service.get_server(db, server_id) return server.model_dump(by_alias=True) except ServerNotFoundError as e: @@ -497,7 +535,7 @@ async def admin_get_server(server_id: str, db: Session = Depends(get_db), user: @admin_router.post("/servers", response_model=ServerRead) -async def admin_add_server(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> JSONResponse: +async def admin_add_server(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> JSONResponse: """ Add a new server via the admin UI. @@ -509,9 +547,9 @@ async def admin_add_server(request: Request, db: Session = Depends(get_db), user - name (required): The name of the server - description (optional): A description of the server's purpose - icon (optional): URL or path to the server's icon - - associatedTools (optional, comma-separated): Tools associated with this server - - associatedResources (optional, comma-separated): Resources associated with this server - - associatedPrompts (optional, comma-separated): Prompts associated with this server + - associatedTools (optional, multiple values): Tools associated with this server + - associatedResources (optional, multiple values): Resources associated with this server + - associatedPrompts (optional, multiple values): Prompts associated with this server Args: request (Request): FastAPI request containing form data. @@ -535,7 +573,7 @@ async def admin_add_server(request: Request, db: Session = Depends(get_db), user >>> timestamp = datetime.now().strftime("%Y%m%d%H%M%S") >>> short_uuid = str(uuid.uuid4())[:8] >>> unq_ext = f"{timestamp}-{short_uuid}" - >>> mock_user = "test_user_" + unq_ext + >>> mock_user = {"email": "test_user_" + unq_ext, "db": mock_db} >>> # Mock form data for successful server creation >>> form_data = FormData([ ... ("name", "Test-Server-"+unq_ext ), @@ -544,7 +582,9 @@ async def admin_add_server(request: Request, db: Session = Depends(get_db), user ... ("associatedTools", "tool1"), ... ("associatedTools", "tool2"), ... ("associatedResources", "resource1"), + ... ("associatedResources", "resource2"), ... ("associatedPrompts", "prompt1"), + ... ("associatedPrompts", "prompt2"), ... ("is_inactive_checked", "false") ... ]) >>> @@ -620,15 +660,15 @@ async def admin_add_server(request: Request, db: Session = Depends(get_db), user tags: list[str] = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else [] try: - LOGGER.debug(f"User {user} is adding a new server with name: {form['name']}") + LOGGER.debug(f"User {get_user_email(user)} is adding a new server with name: {form['name']}") server = ServerCreate( id=form.get("id") or None, name=form.get("name"), description=form.get("description"), icon=form.get("icon"), associated_tools=",".join(form.getlist("associatedTools")), - associated_resources=form.get("associatedResources"), - associated_prompts=form.get("associatedPrompts"), + associated_resources=",".join(form.getlist("associatedResources")), + associated_prompts=",".join(form.getlist("associatedPrompts")), tags=tags, ) except KeyError as e: @@ -636,7 +676,22 @@ async def admin_add_server(request: Request, db: Session = Depends(get_db), user return JSONResponse(content={"message": f"Missing required field: {e}", "success": False}, status_code=422) try: - await server_service.register_server(db, server) + user_email = get_user_email(user) + # Determine personal team for default assignment + team_id = None + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((t for t in user_teams if getattr(t, "is_personal", False)), None) + team_id = personal_team.id if personal_team else None + except Exception: + team_id = None + + # Ensure default visibility is private and assign to personal team when available + await server_service.register_server(db, server, created_by=user_email, team_id=team_id, visibility="private") return JSONResponse( content={"message": "Server created successfully!", "success": True}, status_code=200, @@ -661,7 +716,7 @@ async def admin_edit_server( server_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> JSONResponse: """ Edit an existing server via the admin UI. @@ -675,9 +730,9 @@ async def admin_edit_server( - name (optional): The updated name of the server - description (optional): An updated description of the server's purpose - icon (optional): Updated URL or path to the server's icon - - associatedTools (optional, comma-separated): Updated list of tools associated with this server - - associatedResources (optional, comma-separated): Updated list of resources associated with this server - - associatedPrompts (optional, comma-separated): Updated list of prompts associated with this server + - associatedTools (optional, multiple values): Updated list of tools associated with this server + - associatedResources (optional, multiple values): Updated list of resources associated with this server + - associatedPrompts (optional, multiple values): Updated list of prompts associated with this server Args: server_id (str): The ID of the server to edit @@ -696,7 +751,7 @@ async def admin_edit_server( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> server_id = "server-to-edit" >>> >>> # Happy path: Edit server with new name @@ -778,15 +833,15 @@ async def admin_edit_server( tags_str = str(form.get("tags", "")) tags: list[str] = [tag.strip() for tag in tags_str.split(",") if tag.strip()] if tags_str else [] try: - LOGGER.debug(f"User {user} is editing server ID {server_id} with name: {form.get('name')}") + LOGGER.debug(f"User {get_user_email(user)} is editing server ID {server_id} with name: {form.get('name')}") server = ServerUpdate( id=form.get("id"), name=form.get("name"), description=form.get("description"), icon=form.get("icon"), associated_tools=",".join(form.getlist("associatedTools")), - associated_resources=form.get("associatedResources"), - associated_prompts=form.get("associatedPrompts"), + associated_resources=",".join(form.getlist("associatedResources")), + associated_prompts=",".join(form.getlist("associatedPrompts")), tags=tags, ) await server_service.update_server(db, server_id, server) @@ -817,7 +872,7 @@ async def admin_toggle_server( server_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """ Toggle a server's active status via the admin UI. @@ -845,7 +900,7 @@ async def admin_toggle_server( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> server_id = "server-to-toggle" >>> >>> # Happy path: Activate server @@ -903,7 +958,7 @@ async def admin_toggle_server( >>> server_service.toggle_server_status = original_toggle_server_status """ form = await request.form() - LOGGER.debug(f"User {user} is toggling server ID {server_id} with activate: {form.get('activate')}") + LOGGER.debug(f"User {get_user_email(user)} is toggling server ID {server_id} with activate: {form.get('activate')}") activate = str(form.get("activate", "true")).lower() == "true" is_inactive_checked = str(form.get("is_inactive_checked", "false")) try: @@ -918,7 +973,7 @@ async def admin_toggle_server( @admin_router.post("/servers/{server_id}/delete") -async def admin_delete_server(server_id: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_server(server_id: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a server via the admin UI. @@ -943,7 +998,7 @@ async def admin_delete_server(server_id: str, request: Request, db: Session = De >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> server_id = "server-to-delete" >>> >>> # Happy path: Delete server @@ -989,7 +1044,7 @@ async def admin_delete_server(server_id: str, request: Request, db: Session = De >>> server_service.delete_server = original_delete_server """ try: - LOGGER.debug(f"User {user} is deleting server ID {server_id}") + LOGGER.debug(f"User {get_user_email(user)} is deleting server ID {server_id}") await server_service.delete_server(db, server_id) except Exception as e: LOGGER.error(f"Error deleting server: {e}") @@ -1007,7 +1062,7 @@ async def admin_delete_server(server_id: str, request: Request, db: Session = De async def admin_list_resources( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List resources for the admin UI with an option to include inactive resources. @@ -1031,7 +1086,7 @@ async def admin_list_resources( >>> from datetime import datetime, timezone >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Mock resource data >>> mock_resource = ResourceRead( @@ -1052,9 +1107,9 @@ async def admin_list_resources( ... tags=[] ... ) >>> - >>> # Mock the resource_service.list_resources method - >>> original_list_resources = resource_service.list_resources - >>> resource_service.list_resources = AsyncMock(return_value=[mock_resource]) + >>> # Mock the resource_service.list_resources_for_user method + >>> original_list_resources_for_user = resource_service.list_resources_for_user + >>> resource_service.list_resources_for_user = AsyncMock(return_value=[mock_resource]) >>> >>> # Test listing active resources >>> async def test_admin_list_resources_active(): @@ -1075,7 +1130,7 @@ async def admin_list_resources( ... avg_response_time=0.0, last_execution_time=None), ... tags=[] ... ) - >>> resource_service.list_resources = AsyncMock(return_value=[mock_resource, mock_inactive_resource]) + >>> resource_service.list_resources_for_user = AsyncMock(return_value=[mock_resource, mock_inactive_resource]) >>> async def test_admin_list_resources_all(): ... result = await admin_list_resources(include_inactive=True, db=mock_db, user=mock_user) ... return len(result) == 2 and not result[1]['isActive'] @@ -1084,7 +1139,7 @@ async def admin_list_resources( True >>> >>> # Test empty list - >>> resource_service.list_resources = AsyncMock(return_value=[]) + >>> resource_service.list_resources_for_user = AsyncMock(return_value=[]) >>> async def test_admin_list_resources_empty(): ... result = await admin_list_resources(include_inactive=False, db=mock_db, user=mock_user) ... return result == [] @@ -1093,7 +1148,7 @@ async def admin_list_resources( True >>> >>> # Test exception handling - >>> resource_service.list_resources = AsyncMock(side_effect=Exception("Resource list error")) + >>> resource_service.list_resources_for_user = AsyncMock(side_effect=Exception("Resource list error")) >>> async def test_admin_list_resources_exception(): ... try: ... await admin_list_resources(False, mock_db, mock_user) @@ -1105,10 +1160,11 @@ async def admin_list_resources( True >>> >>> # Restore original method - >>> resource_service.list_resources = original_list_resources + >>> resource_service.list_resources_for_user = original_list_resources_for_user """ - LOGGER.debug(f"User {user} requested resource list") - resources = await resource_service.list_resources(db, include_inactive=include_inactive) + LOGGER.debug(f"User {get_user_email(user)} requested resource list") + user_email = get_user_email(user) + resources = await resource_service.list_resources_for_user(db, user_email, include_inactive=include_inactive) return [resource.model_dump(by_alias=True) for resource in resources] @@ -1116,7 +1172,7 @@ async def admin_list_resources( async def admin_list_prompts( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List prompts for the admin UI with an option to include inactive prompts. @@ -1140,7 +1196,7 @@ async def admin_list_prompts( >>> from datetime import datetime, timezone >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Mock prompt data >>> mock_prompt = PromptRead( @@ -1160,9 +1216,9 @@ async def admin_list_prompts( ... tags=[] ... ) >>> - >>> # Mock the prompt_service.list_prompts method - >>> original_list_prompts = prompt_service.list_prompts - >>> prompt_service.list_prompts = AsyncMock(return_value=[mock_prompt]) + >>> # Mock the prompt_service.list_prompts_for_user method + >>> original_list_prompts_for_user = prompt_service.list_prompts_for_user + >>> prompt_service.list_prompts_for_user = AsyncMock(return_value=[mock_prompt]) >>> >>> # Test listing active prompts >>> async def test_admin_list_prompts_active(): @@ -1183,7 +1239,7 @@ async def admin_list_prompts( ... ), ... tags=[] ... ) - >>> prompt_service.list_prompts = AsyncMock(return_value=[mock_prompt, mock_inactive_prompt]) + >>> prompt_service.list_prompts_for_user = AsyncMock(return_value=[mock_prompt, mock_inactive_prompt]) >>> async def test_admin_list_prompts_all(): ... result = await admin_list_prompts(include_inactive=True, db=mock_db, user=mock_user) ... return len(result) == 2 and not result[1]['isActive'] @@ -1192,7 +1248,7 @@ async def admin_list_prompts( True >>> >>> # Test empty list - >>> prompt_service.list_prompts = AsyncMock(return_value=[]) + >>> prompt_service.list_prompts_for_user = AsyncMock(return_value=[]) >>> async def test_admin_list_prompts_empty(): ... result = await admin_list_prompts(include_inactive=False, db=mock_db, user=mock_user) ... return result == [] @@ -1201,7 +1257,7 @@ async def admin_list_prompts( True >>> >>> # Test exception handling - >>> prompt_service.list_prompts = AsyncMock(side_effect=Exception("Prompt list error")) + >>> prompt_service.list_prompts_for_user = AsyncMock(side_effect=Exception("Prompt list error")) >>> async def test_admin_list_prompts_exception(): ... try: ... await admin_list_prompts(False, mock_db, mock_user) @@ -1213,10 +1269,11 @@ async def admin_list_prompts( True >>> >>> # Restore original method - >>> prompt_service.list_prompts = original_list_prompts + >>> prompt_service.list_prompts_for_user = original_list_prompts_for_user """ - LOGGER.debug(f"User {user} requested prompt list") - prompts = await prompt_service.list_prompts(db, include_inactive=include_inactive) + LOGGER.debug(f"User {get_user_email(user)} requested prompt list") + user_email = get_user_email(user) + prompts = await prompt_service.list_prompts_for_user(db, user_email, include_inactive=include_inactive) return [prompt.model_dump(by_alias=True) for prompt in prompts] @@ -1224,7 +1281,7 @@ async def admin_list_prompts( async def admin_list_gateways( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List gateways for the admin UI with an option to include inactive gateways. @@ -1248,7 +1305,7 @@ async def admin_list_gateways( >>> from datetime import datetime, timezone >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Mock gateway data >>> mock_gateway = GatewayRead( @@ -1321,7 +1378,7 @@ async def admin_list_gateways( >>> # Restore original method >>> gateway_service.list_gateways = original_list_gateways """ - LOGGER.debug(f"User {user} requested gateway list") + LOGGER.debug(f"User {get_user_email(user)} requested gateway list") gateways = await gateway_service.list_gateways(db, include_inactive=include_inactive) return [gateway.model_dump(by_alias=True) for gateway in gateways] @@ -1331,7 +1388,7 @@ async def admin_toggle_gateway( gateway_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """ Toggle the active status of a gateway via the admin UI. @@ -1358,7 +1415,7 @@ async def admin_toggle_gateway( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> gateway_id = "gateway-to-toggle" >>> >>> # Happy path: Activate gateway @@ -1415,202 +1472,2263 @@ async def admin_toggle_gateway( >>> # Restore original method >>> gateway_service.toggle_gateway_status = original_toggle_gateway_status """ - LOGGER.debug(f"User {user} is toggling gateway ID {gateway_id}") - form = await request.form() - activate = str(form.get("activate", "true")).lower() == "true" - is_inactive_checked = str(form.get("is_inactive_checked", "false")) + LOGGER.debug(f"User {get_user_email(user)} is toggling gateway ID {gateway_id}") + form = await request.form() + activate = str(form.get("activate", "true")).lower() == "true" + is_inactive_checked = str(form.get("is_inactive_checked", "false")) + + try: + await gateway_service.toggle_gateway_status(db, gateway_id, activate) + except Exception as e: + LOGGER.error(f"Error toggling gateway status: {e}") + + root_path = request.scope.get("root_path", "") + if is_inactive_checked.lower() == "true": + return RedirectResponse(f"{root_path}/admin/?include_inactive=true#gateways", status_code=303) + return RedirectResponse(f"{root_path}/admin#gateways", status_code=303) + + +@admin_router.get("/", name="admin_home", response_class=HTMLResponse) +async def admin_ui( + request: Request, + include_inactive: bool = False, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), + _jwt_token: str = Depends(get_jwt_token), +) -> Any: + """ + Render the admin dashboard HTML page. + + This endpoint serves as the main entry point to the admin UI. It fetches data for + servers, tools, resources, prompts, gateways, and roots from their respective + services, then renders the admin dashboard template with this data. + + The endpoint also sets a JWT token as a cookie for authentication in subsequent + requests. This token is HTTP-only for security reasons. + + Args: + request (Request): FastAPI request object. + include_inactive (bool): Whether to include inactive items in all listings. + db (Session): Database session dependency. + user (dict): Authenticated user context with permissions. + + Returns: + Any: Rendered HTML template for the admin dashboard. + + Examples: + >>> import asyncio + >>> from unittest.mock import AsyncMock, MagicMock, patch + >>> from fastapi import Request + >>> from fastapi.responses import HTMLResponse + >>> from mcpgateway.schemas import ServerRead, ToolRead, ResourceRead, PromptRead, GatewayRead, ServerMetrics, ToolMetrics, ResourceMetrics, PromptMetrics + >>> from datetime import datetime, timezone + >>> + >>> mock_db = MagicMock() + >>> mock_user = {"email": "admin_user", "db": mock_db} + >>> + >>> # Mock services to return empty lists for simplicity in doctest + >>> original_list_servers_for_user = server_service.list_servers_for_user + >>> original_list_tools_for_user = tool_service.list_tools_for_user + >>> original_list_resources_for_user = resource_service.list_resources_for_user + >>> original_list_prompts_for_user = prompt_service.list_prompts_for_user + >>> original_list_gateways = gateway_service.list_gateways + >>> original_list_roots = root_service.list_roots + >>> + >>> server_service.list_servers_for_user = AsyncMock(return_value=[]) + >>> tool_service.list_tools_for_user = AsyncMock(return_value=[]) + >>> resource_service.list_resources_for_user = AsyncMock(return_value=[]) + >>> prompt_service.list_prompts_for_user = AsyncMock(return_value=[]) + >>> gateway_service.list_gateways = AsyncMock(return_value=[]) + >>> root_service.list_roots = AsyncMock(return_value=[]) + >>> + >>> # Mock request and template rendering + >>> mock_request = MagicMock(spec=Request, scope={"root_path": "/admin_prefix"}) + >>> mock_request.app.state.templates = MagicMock() + >>> mock_template_response = HTMLResponse("Admin UI") + >>> mock_request.app.state.templates.TemplateResponse.return_value = mock_template_response + >>> + >>> # Test basic rendering + >>> async def test_admin_ui_basic_render(): + ... response = await admin_ui(mock_request, False, mock_db, mock_user) + ... return isinstance(response, HTMLResponse) and response.status_code == 200 + >>> + >>> asyncio.run(test_admin_ui_basic_render()) + True + >>> + >>> # Test with include_inactive=True + >>> async def test_admin_ui_include_inactive(): + ... response = await admin_ui(mock_request, True, mock_db, mock_user) + ... # Verify list methods were called with include_inactive=True + ... server_service.list_servers_for_user.assert_called_with(mock_db, mock_user["email"], include_inactive=True) + ... return isinstance(response, HTMLResponse) + >>> + >>> asyncio.run(test_admin_ui_include_inactive()) + True + >>> + >>> # Test with populated data (mocking a few items) + >>> mock_server = ServerRead(id="s1", name="S1", description="d", created_at=datetime.now(timezone.utc), updated_at=datetime.now(timezone.utc), is_active=True, associated_tools=[], associated_resources=[], associated_prompts=[], icon="i", metrics=ServerMetrics(total_executions=0, successful_executions=0, failed_executions=0, failure_rate=0.0, min_response_time=0.0, max_response_time=0.0, avg_response_time=0.0, last_execution_time=None)) + >>> mock_tool = ToolRead( + ... id="t1", name="T1", original_name="T1", url="http://t1.com", description="d", + ... created_at=datetime.now(timezone.utc), updated_at=datetime.now(timezone.utc), + ... enabled=True, reachable=True, gateway_slug="default", custom_name_slug="t1", + ... request_type="GET", integration_type="MCP", headers={}, input_schema={}, + ... annotations={}, jsonpath_filter=None, auth=None, execution_count=0, + ... metrics=ToolMetrics( + ... total_executions=0, successful_executions=0, failed_executions=0, + ... failure_rate=0.0, min_response_time=0.0, max_response_time=0.0, + ... avg_response_time=0.0, last_execution_time=None + ... ), + ... gateway_id=None, + ... customName="T1", + ... tags=[] + ... ) + >>> server_service.list_servers_for_user = AsyncMock(return_value=[mock_server]) + >>> tool_service.list_tools_for_user = AsyncMock(return_value=[mock_tool]) + >>> + >>> async def test_admin_ui_with_data(): + ... response = await admin_ui(mock_request, False, mock_db, mock_user) + ... # Check if template context was populated (indirectly via mock calls) + ... assert mock_request.app.state.templates.TemplateResponse.call_count >= 1 + ... context = mock_request.app.state.templates.TemplateResponse.call_args[0][2] + ... return len(context['servers']) == 1 and len(context['tools']) == 1 + >>> + >>> asyncio.run(test_admin_ui_with_data()) + True + >>> + >>> # Test exception handling during data fetching + >>> server_service.list_servers_for_user = AsyncMock(side_effect=Exception("DB error")) + >>> async def test_admin_ui_exception_handled(): + ... try: + ... response = await admin_ui(mock_request, False, mock_db, mock_user) + ... return False # Should not reach here if exception is properly raised + ... except Exception as e: + ... return str(e) == "DB error" + >>> + >>> asyncio.run(test_admin_ui_exception_handled()) + True + >>> + >>> # Restore original methods + >>> server_service.list_servers_for_user = original_list_servers_for_user + >>> tool_service.list_tools_for_user = original_list_tools_for_user + >>> resource_service.list_resources_for_user = original_list_resources_for_user + >>> prompt_service.list_prompts_for_user = original_list_prompts_for_user + >>> gateway_service.list_gateways = original_list_gateways + >>> root_service.list_roots = original_list_roots + """ + LOGGER.debug(f"User {get_user_email(user)} accessed the admin UI") + user_email = get_user_email(user) + + # Use team-filtered methods to show only resources the user can access + tools = [ + tool.model_dump(by_alias=True) + for tool in sorted(await tool_service.list_tools_for_user(db, user_email, include_inactive=include_inactive), key=lambda t: ((t.url or "").lower(), (t.original_name or "").lower())) + ] + servers = [server.model_dump(by_alias=True) for server in await server_service.list_servers_for_user(db, user_email, include_inactive=include_inactive)] + resources = [resource.model_dump(by_alias=True) for resource in await resource_service.list_resources_for_user(db, user_email, include_inactive=include_inactive)] + prompts = [prompt.model_dump(by_alias=True) for prompt in await prompt_service.list_prompts_for_user(db, user_email, include_inactive=include_inactive)] + gateways_raw = await gateway_service.list_gateways(db, include_inactive=include_inactive) + gateways = [gateway.model_dump(by_alias=True) for gateway in gateways_raw] + + roots = [root.model_dump(by_alias=True) for root in await root_service.list_roots()] + + # Load A2A agents if enabled + a2a_agents = [] + if a2a_service and settings.mcpgateway_a2a_enabled: + a2a_agents_raw = await a2a_service.list_agents(db, include_inactive=include_inactive) + a2a_agents = [agent.model_dump(by_alias=True) for agent in a2a_agents_raw] + + root_path = settings.app_root_path + max_name_length = settings.validation_max_name_length + + # Get user teams for team selector + user_teams = [] + if getattr(settings, "email_auth_enabled", False): + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + if user_email and "@" in user_email: + raw_teams = await team_service.get_user_teams(user_email) + user_teams = [] + for team in raw_teams: + try: + team_dict = { + "id": str(team.id) if team.id else "", + "name": str(team.name) if team.name else "", + "type": str(getattr(team, "type", "organization")), + "is_personal": bool(getattr(team, "is_personal", False)), + "member_count": len(getattr(team, "members", []) or []), + } + user_teams.append(team_dict) + except Exception as team_error: + LOGGER.warning(f"Failed to serialize team {getattr(team, 'id', 'unknown')}: {team_error}") + continue + except Exception as e: + LOGGER.warning(f"Failed to load user teams: {e}") + user_teams = [] + + response = request.app.state.templates.TemplateResponse( + request, + "admin.html", + { + "request": request, + "servers": servers, + "tools": tools, + "resources": resources, + "prompts": prompts, + "gateways": gateways, + "a2a_agents": a2a_agents, + "roots": roots, + "include_inactive": include_inactive, + "root_path": root_path, + "max_name_length": max_name_length, + "gateway_tool_name_separator": settings.gateway_tool_name_separator, + "bulk_import_max_tools": settings.mcpgateway_bulk_import_max_tools, + "a2a_enabled": settings.mcpgateway_a2a_enabled, + "current_user": get_user_email(user), + "email_auth_enabled": getattr(settings, "email_auth_enabled", False), + "is_admin": bool(user.get("is_admin") if isinstance(user, dict) else False), + "user_teams": user_teams, + }, + ) + + # Set JWT token cookie for HTMX requests if email auth is enabled + if getattr(settings, "email_auth_enabled", False): + try: + # JWT library is imported at top level as jwt + + # Determine the admin user email + admin_email = get_user_email(user) + is_admin_flag = bool(user.get("is_admin") if isinstance(user, dict) else True) + + # Generate a comprehensive JWT token that matches the email auth format + now = datetime.now(timezone.utc) + payload = { + "sub": admin_email, + "iss": settings.jwt_issuer, + "aud": settings.jwt_audience, + "iat": int(now.timestamp()), + "exp": int((now + timedelta(minutes=settings.token_expiry)).timestamp()), + "jti": str(uuid.uuid4()), + "user": {"email": admin_email, "full_name": getattr(settings, "platform_admin_full_name", "Platform User"), "is_admin": is_admin_flag, "auth_provider": "local"}, + "teams": [], # Teams populated downstream when needed + "namespaces": [f"user:{admin_email}", "public"], + "scopes": {"server_id": None, "permissions": ["*"], "ip_restrictions": [], "time_restrictions": {}}, + } + + token = jwt.encode(payload, settings.jwt_secret_key, algorithm=settings.jwt_algorithm) + + # Set HTTP-only cookie for security + response.set_cookie( + key="jwt_token", + value=token, + httponly=True, + secure=getattr(settings, "secure_cookies", False), + samesite=getattr(settings, "cookie_samesite", "lax"), + max_age=settings.token_expiry * 60, # Convert minutes to seconds + path="/", # Make cookie available for all paths + ) + LOGGER.debug(f"Set comprehensive JWT token cookie for user: {admin_email}") + except Exception as e: + LOGGER.warning(f"Failed to set JWT token cookie for user {user}: {e}") + + return response + + +@admin_router.get("/login") +async def admin_login_page(request: Request) -> HTMLResponse: + """ + Render the admin login page. + + This endpoint serves the login form for email-based authentication. + If email auth is disabled, redirects to the main admin page. + + Args: + request (Request): FastAPI request object. + + Returns: + HTMLResponse: Rendered HTML login page. + + Examples: + >>> from fastapi import Request + >>> from fastapi.responses import HTMLResponse + >>> from unittest.mock import MagicMock + >>> + >>> # Mock request + >>> mock_request = MagicMock(spec=Request) + >>> mock_request.scope = {"root_path": "/test"} + >>> mock_request.app.state.templates = MagicMock() + >>> mock_response = HTMLResponse("Login") + >>> mock_request.app.state.templates.TemplateResponse.return_value = mock_response + >>> + >>> import asyncio + >>> async def test_login_page(): + ... response = await admin_login_page(mock_request) + ... return isinstance(response, HTMLResponse) + >>> + >>> asyncio.run(test_login_page()) + True + """ + # Check if email auth is enabled + if not getattr(settings, "email_auth_enabled", False): + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin", status_code=303) + + root_path = settings.app_root_path + + # Use external template file + return request.app.state.templates.TemplateResponse("login.html", {"request": request, "root_path": root_path}) + + +@admin_router.post("/login") +async def admin_login_handler(request: Request, db: Session = Depends(get_db)) -> RedirectResponse: + """ + Handle admin login form submission. + + This endpoint processes the email/password login form, authenticates the user, + sets the JWT cookie, and redirects to the admin panel or back to login with error. + + Args: + request (Request): FastAPI request object. + db (Session): Database session dependency. + + Returns: + RedirectResponse: Redirect to admin panel on success or login page on failure. + + Examples: + >>> from fastapi import Request + >>> from fastapi.responses import RedirectResponse + >>> from unittest.mock import MagicMock, AsyncMock + >>> + >>> # Mock request with form data + >>> mock_request = MagicMock(spec=Request) + >>> mock_request.scope = {"root_path": "/test"} + >>> mock_form = {"email": "admin@example.com", "password": "changeme"} + >>> mock_request.form = AsyncMock(return_value=mock_form) + >>> + >>> mock_db = MagicMock() + >>> + >>> import asyncio + >>> async def test_login_handler(): + ... try: + ... response = await admin_login_handler(mock_request, mock_db) + ... return isinstance(response, RedirectResponse) + ... except Exception: + ... return True # Expected due to mocked dependencies + >>> + >>> asyncio.run(test_login_handler()) + True + """ + if not getattr(settings, "email_auth_enabled", False): + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin", status_code=303) + + try: + form = await request.form() + email = form.get("email") + password = form.get("password") + + if not email or not password: + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin/login?error=missing_fields", status_code=303) + + # Authenticate using the email auth service + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + try: + # Authenticate user + LOGGER.debug(f"Attempting authentication for {email}") + user = await auth_service.authenticate_user(email, password) + LOGGER.debug(f"Authentication result: {user}") + + if not user: + LOGGER.warning(f"Authentication failed for {email} - user is None") + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin/login?error=invalid_credentials", status_code=303) + + # Create JWT token with proper audience and issuer claims + # First-Party + from mcpgateway.routers.email_auth import create_access_token # pylint: disable=import-outside-toplevel + + token, _ = create_access_token(user) # expires_seconds not needed here + + # Create redirect response + root_path = request.scope.get("root_path", "") + response = RedirectResponse(url=f"{root_path}/admin", status_code=303) + + # Set JWT token as secure cookie + # First-Party + from mcpgateway.utils.security_cookies import set_auth_cookie # pylint: disable=import-outside-toplevel + + set_auth_cookie(response, token, remember_me=False) + + LOGGER.info(f"Admin user {email} logged in successfully") + return response + + except Exception as e: + LOGGER.warning(f"Login failed for {email}: {e}") + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin/login?error=invalid_credentials", status_code=303) + + except Exception as e: + LOGGER.error(f"Login handler error: {e}") + root_path = request.scope.get("root_path", "") + return RedirectResponse(url=f"{root_path}/admin/login?error=server_error", status_code=303) + + +@admin_router.post("/logout") +async def admin_logout(request: Request) -> RedirectResponse: + """ + Handle admin logout by clearing authentication cookies. + + This endpoint clears the JWT authentication cookie and redirects + the user to a login page or back to the admin page (which will + trigger authentication). + + Args: + request (Request): FastAPI request object. + + Returns: + RedirectResponse: Redirect to admin page with cleared cookies. + + Examples: + >>> from fastapi import Request + >>> from fastapi.responses import RedirectResponse + >>> from unittest.mock import MagicMock + >>> + >>> # Mock request + >>> mock_request = MagicMock(spec=Request) + >>> mock_request.scope = {"root_path": "/test"} + >>> + >>> import asyncio + >>> async def test_logout(): + ... response = await admin_logout(mock_request) + ... return isinstance(response, RedirectResponse) and response.status_code == 303 + >>> + >>> asyncio.run(test_logout()) + True + """ + LOGGER.info("Admin user logging out") + root_path = request.scope.get("root_path", "") + + # Create redirect response to login page + response = RedirectResponse(url=f"{root_path}/admin/login", status_code=303) + + # Clear JWT token cookie + response.delete_cookie("jwt_token", path="/", secure=True, httponly=True, samesite="lax") + + return response + + +# ============================================================================ # +# TEAM ADMIN ROUTES # +# ============================================================================ # + + +async def _generate_unified_teams_view(team_service, current_user, root_path): # pylint: disable=unused-argument + """Generate unified team view with relationship badges. + + Args: + team_service: Service for team operations + current_user: Current authenticated user + root_path: Application root path + + Returns: + HTML string containing the unified teams view + """ + # Get user's teams (owned + member) + user_teams = await team_service.get_user_teams(current_user.email) + + # Get public teams user can join + public_teams = await team_service.discover_public_teams(current_user.email) + + # Combine teams with relationship information + all_teams = [] + + # Add user's teams (owned and member) + for team in user_teams: + user_role = await team_service.get_user_role_in_team(current_user.email, team.id) + relationship = "owner" if user_role == "owner" else "member" + all_teams.append({"team": team, "relationship": relationship, "member_count": len(team.members) if team.members else 0}) + + # Add public teams user can join - check for pending requests + for team in public_teams: + # Check if user has a pending join request + user_requests = await team_service.get_user_join_requests(current_user.email, team.id) + pending_request = next((req for req in user_requests if req.status == "pending"), None) + + relationship_data = {"team": team, "relationship": "join", "member_count": len(team.members) if team.members else 0, "pending_request": pending_request} + all_teams.append(relationship_data) + + # Generate HTML for unified team view + teams_html = "" + for item in all_teams: + team = item["team"] + relationship = item["relationship"] + member_count = item["member_count"] + pending_request = item.get("pending_request") + + # Relationship badge - special handling for personal teams + if team.is_personal: + badge_html = 'PERSONAL' + elif relationship == "owner": + badge_html = ( + 'OWNER' + ) + elif relationship == "member": + badge_html = ( + 'MEMBER' + ) + else: # join + badge_html = 'CAN JOIN' + + # Visibility badge + visibility_badge = ( + f'{team.visibility.upper()}' + ) + + # Subtitle based on relationship - special handling for personal teams + if team.is_personal: + subtitle = "Your personal team • Private workspace" + elif relationship == "owner": + subtitle = "You own this team" + elif relationship == "member": + subtitle = f"You are a member • Owner: {team.created_by}" + else: # join + subtitle = f"Public team • Owner: {team.created_by}" + + # Escape team name for safe HTML attributes + safe_team_name = html.escape(team.name) + + # Actions based on relationship - special handling for personal teams + actions_html = "" + if team.is_personal: + # Personal teams have no management actions - they're private workspaces + actions_html = """ +
+ + Personal workspace - no actions available + +
+ """ + elif relationship == "owner": + delete_button = f'' + join_requests_button = ( + f'' + if team.visibility == "public" + else "" + ) + actions_html = f""" +
+ + + {join_requests_button} + {delete_button} +
+ """ + elif relationship == "member": + leave_button = f'' + actions_html = f""" +
+ {leave_button} +
+ """ + else: # join + if pending_request: + # Show "Requested to Join [Cancel Request]" state + actions_html = f""" +
+ + ⏳ Requested to Join + + +
+ """ + else: + # Show "Request to Join" button + actions_html = f""" +
+ +
+ """ + + # Truncated description (properly escaped) + description_text = "" + if team.description: + safe_description = html.escape(team.description) + truncated = safe_description[:80] + "..." if len(safe_description) > 80 else safe_description + description_text = f'

{truncated}

' + + teams_html += f""" +
+
+
+
+

🏢 {safe_team_name}

+ {badge_html} + {visibility_badge} + {member_count} members +
+

{subtitle}

+ {description_text} +
+
+ {actions_html} +
+ """ + + if not teams_html: + teams_html = '

No teams found. Create your first team using the button above.

' + + return HTMLResponse(content=teams_html) + + +@admin_router.get("/teams") +@require_permission("teams.read") +async def admin_list_teams( + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), + unified: bool = False, +) -> HTMLResponse: + """List teams for admin UI via HTMX. + + Args: + request: FastAPI request object + db: Database session + user: Authenticated admin user + unified: If True, return unified team view with relationship badges + + Returns: + HTML response with teams list + + Raises: + HTTPException: If email auth is disabled or user not found + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='

Email authentication is disabled. Teams feature requires email auth.

', status_code=200) + + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + team_service = TeamManagementService(db) + + # Get current user + user_email = get_user_email(user) + current_user = await auth_service.get_user_by_email(user_email) + if not current_user: + return HTMLResponse(content='

User not found

', status_code=200) + + root_path = request.scope.get("root_path", "") + + if unified: + # Generate unified team view + return await _generate_unified_teams_view(team_service, current_user, root_path) + + # Generate traditional admin view + if current_user.is_admin: + teams, _ = await team_service.list_teams() + else: + teams = await team_service.get_user_teams(current_user.email) + + # Generate HTML for teams (traditional view) + teams_html = "" + for team in teams: + member_count = len(team.members) if team.members else 0 + teams_html += f""" +
+
+
+

{team.name}

+

Slug: {team.slug}

+

Visibility: {team.visibility}

+

Members: {member_count}

+ {f'

{team.description}

' if team.description else ''} +
+
+ + + {f'' if not team.is_personal and not current_user.is_admin else ''} + {f'' if not team.is_personal else ''} +
+
+
+
+ """ + + if not teams_html: + teams_html = '

No teams found. Create your first team above.

' + + return HTMLResponse(content=teams_html) + + except Exception as e: + LOGGER.error(f"Error listing teams for admin {user}: {e}") + return HTMLResponse(content=f'

Error loading teams: {str(e)}

', status_code=200) + + +@admin_router.post("/teams") +@require_permission("teams.create") +async def admin_create_team( + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Create team via admin UI form submission. + + Args: + request: FastAPI request object + db: Database session + user: Authenticated admin user + + Returns: + HTML response with new team or error message + + Raises: + HTTPException: If email auth is disabled or validation fails + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + form = await request.form() + name = form.get("name") + slug = form.get("slug") or None + description = form.get("description") or None + visibility = form.get("visibility", "private") + + if not name: + return HTMLResponse(content='
Team name is required
', status_code=400) + + # Create team + # First-Party + from mcpgateway.schemas import TeamCreateRequest # pylint: disable=import-outside-toplevel + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + team_data = TeamCreateRequest(name=name, slug=slug, description=description, visibility=visibility) + + # Extract user email from user dict + user_email = get_user_email(user) + + team = await team_service.create_team(name=team_data.name, description=team_data.description, created_by=user_email, visibility=team_data.visibility) + + # Return HTML for the new team + member_count = 1 # Creator is automatically a member + team_html = f""" +
+
+
+

{team.name}

+

Slug: {team.slug}

+

Visibility: {team.visibility}

+

Members: {member_count}

+ {f'

{team.description}

' if team.description else ''} +
+
+ + {'' if not team.is_personal else ''} +
+
+
+
+ + """ + + return HTMLResponse(content=team_html, status_code=201) + + except IntegrityError as e: + LOGGER.error(f"Error creating team for admin {user}: {e}") + if "UNIQUE constraint failed: email_teams.slug" in str(e): + return HTMLResponse(content='
A team with this name already exists. Please choose a different name.
', status_code=400) + + return HTMLResponse(content=f'
Database error creating team: {str(e)}
', status_code=400) + except Exception as e: + LOGGER.error(f"Error creating team for admin {user}: {e}") + return HTMLResponse(content=f'
Error creating team: {str(e)}
', status_code=400) + + +@admin_router.get("/teams/{team_id}/members") +@require_permission("teams.read") +async def admin_view_team_members( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """View team members via admin UI. + + Args: + team_id: ID of the team to view members for + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Rendered team members view + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # Get root_path from request + root_path = request.scope.get("root_path", "") + + # Get current user context for logging and authorization + user_email = get_user_email(user) + LOGGER.info(f"User {user_email} viewing members for team {team_id}") + + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + # Get team details + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + # Get team members + members = await team_service.get_team_members(team_id) + + members_html = "" + for member_user, membership in members: + role_display = membership.role.replace("_", " ").title() if membership.role else "Member" + members_html += f""" +
+
+ {member_user.email} + {role_display} +
+
+ + Joined: {membership.joined_at.strftime('%Y-%m-%d') if membership.joined_at else 'Unknown'} + + +
+
+ """ + + if not members_html: + members_html = '
No members found
' + + # Add member management interface + management_html = f""" +
+
+

Manage Members: {team.name}

+ +
""" + + # Only show Add Member for public teams + if team.visibility == "public": + management_html += f""" +
+
+ +
+ +
""" + else: + management_html += """ +
+
+ + + + Private Team - Invitation Only +
+

+ Private teams require invitations to add new members. Use the team invitation system instead. +

+
""" + + management_html += """ +
+ """ + + return HTMLResponse(content=f'{management_html}
{members_html}
') + + except Exception as e: + LOGGER.error(f"Error viewing team members {team_id}: {e}") + return HTMLResponse(content=f'
Error loading members: {str(e)}
', status_code=500) + + +@admin_router.get("/teams/{team_id}/edit") +@require_permission("teams.update") +async def admin_get_team_edit( + team_id: str, + _request: Request, + db: Session = Depends(get_db), + _user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Get team edit form via admin UI. + + Args: + team_id: ID of the team to edit + db: Database session + + Returns: + HTMLResponse: Rendered team edit form + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + edit_form = f""" +
+

Edit Team

+
+
+ + +
+
+ + +

Slug cannot be changed

+
+
+ + +
+
+ + +
+
+ + +
+
+
+ """ + return HTMLResponse(content=edit_form) + + except Exception as e: + LOGGER.error(f"Error getting team edit form for {team_id}: {e}") + return HTMLResponse(content=f'
Error loading team: {str(e)}
', status_code=500) + + +@admin_router.post("/teams/{team_id}/update") +@require_permission("teams.update") +async def admin_update_team( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Update team via admin UI. + + Args: + team_id: ID of the team to update + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Result of team update operation + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + form = await request.form() + name = form.get("name") + description = form.get("description") or None + visibility = form.get("visibility", "private") + + if not name: + is_htmx = request.headers.get("HX-Request") == "true" + if is_htmx: + return HTMLResponse(content='
Team name is required
', status_code=400) + error_msg = urllib.parse.quote("Team name is required") + return RedirectResponse(url=f"/admin/?error={error_msg}#teams", status_code=303) + + # Update team + user_email = getattr(user, "email", None) or str(user) + await team_service.update_team(team_id=team_id, name=name, description=description, visibility=visibility, updated_by=user_email) + + # Check if this is an HTMX request + is_htmx = request.headers.get("HX-Request") == "true" + + if is_htmx: + # Return success message with auto-close and refresh for HTMX + success_html = """ +
+

Team updated successfully

+ +
+ """ + return HTMLResponse(content=success_html) + # For regular form submission, redirect to admin page with teams section + return RedirectResponse(url="/admin/#teams", status_code=303) + + except Exception as e: + LOGGER.error(f"Error updating team {team_id}: {e}") + + # Check if this is an HTMX request for error handling too + is_htmx = request.headers.get("HX-Request") == "true" + + if is_htmx: + return HTMLResponse(content=f'
Error updating team: {str(e)}
', status_code=400) + # For regular form submission, redirect to admin page with error parameter + error_msg = urllib.parse.quote(f"Error updating team: {str(e)}") + return RedirectResponse(url=f"/admin/?error={error_msg}#teams", status_code=303) + + +@admin_router.delete("/teams/{team_id}") +@require_permission("teams.delete") +async def admin_delete_team( + team_id: str, + _request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Delete team via admin UI. + + Args: + team_id: ID of the team to delete + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + # Get team name for success message + team = await team_service.get_team_by_id(team_id) + team_name = team.name if team else "Unknown" + + # Delete team (get user email from JWT payload) + user_email = get_user_email(user) + await team_service.delete_team(team_id, deleted_by=user_email) + + # Return success message with script to refresh teams list + success_html = f""" +
+

Team "{team_name}" deleted successfully

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error deleting team {team_id}: {e}") + return HTMLResponse(content=f'
Error deleting team: {str(e)}
', status_code=400) + + +@admin_router.post("/teams/{team_id}/add-member") +@require_permission("admin.user_management") +async def admin_add_team_member( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Add member to team via admin UI. + + Args: + team_id: ID of the team to add member to + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + auth_service = EmailAuthService(db) + + # Check if team exists and validate visibility + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + # Private teams cannot have members added directly - they need invitations + if team.visibility == "private": + return HTMLResponse(content='
Cannot add members to private teams. Use the invitation system instead.
', status_code=403) + + form = await request.form() + user_email = form.get("user_email") + role = form.get("role", "member") + + if not user_email: + return HTMLResponse(content='
User email is required
', status_code=400) + + # Check if user exists + target_user = await auth_service.get_user_by_email(user_email) + if not target_user: + return HTMLResponse(content=f'
User {user_email} not found
', status_code=400) + + # Add member to team + user_email_from_jwt = get_user_email(user) + await team_service.add_member_to_team(team_id=team_id, user_email=user_email, role=role, invited_by=user_email_from_jwt) + + # Return success message with script to refresh modal + success_html = f""" +
+

Member {user_email} added successfully

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error adding member to team {team_id}: {e}") + return HTMLResponse(content=f'
Error adding member: {str(e)}
', status_code=400) + + +@admin_router.post("/teams/{team_id}/update-member-role") +@require_permission("admin.user_management") +async def admin_update_team_member_role( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Update team member role via admin UI. + + Args: + team_id: ID of the team containing the member + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + form = await request.form() + user_email = form.get("user_email") + new_role = form.get("role", "member") + + if not user_email: + return HTMLResponse(content='
User email is required
', status_code=400) + + if not new_role: + return HTMLResponse(content='
Role is required
', status_code=400) + + # Update member role + user_email_from_jwt = get_user_email(user) + await team_service.update_member_role(team_id=team_id, user_email=user_email, new_role=new_role, updated_by=user_email_from_jwt) + + # Return success message with auto-close and refresh + success_html = f""" +
+

Role updated successfully for {user_email}

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error updating member role in team {team_id}: {e}") + return HTMLResponse(content=f'
Error updating role: {str(e)}
', status_code=400) + + +@admin_router.post("/teams/{team_id}/remove-member") +@require_permission("admin.user_management") +async def admin_remove_team_member( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Remove member from team via admin UI. + + Args: + team_id: ID of the team to remove member from + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + form = await request.form() + user_email = form.get("user_email") + + if not user_email: + return HTMLResponse(content='
User email is required
', status_code=400) + + # Remove member from team + user_email_from_jwt = get_user_email(user) + + try: + success = await team_service.remove_member_from_team(team_id=team_id, user_email=user_email, removed_by=user_email_from_jwt) + if not success: + return HTMLResponse(content='
Failed to remove member from team
', status_code=400) + except ValueError as e: + # Handle specific business logic errors (like last owner) + return HTMLResponse(content=f'
{str(e)}
', status_code=400) + + # Return success message with script to refresh modal + success_html = f""" +
+

Member {user_email} removed successfully

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error removing member from team {team_id}: {e}") + return HTMLResponse(content=f'
Error removing member: {str(e)}
', status_code=400) + + +# ============================================================================ # +# TEAM JOIN REQUEST ADMIN ROUTES # +# ============================================================================ # + + +@admin_router.post("/teams/{team_id}/join-request") +@require_permission("teams.join") +async def admin_create_join_request( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Create a join request for a team via admin UI. + + Args: + team_id: ID of the team to request to join + request: FastAPI request object + db: Database session + user: Authenticated user + + Returns: + HTML response with success message or error + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + + # Get team to verify it's public + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + if team.visibility != "public": + return HTMLResponse(content='
Can only request to join public teams
', status_code=400) + + # Check if user is already a member + user_role = await team_service.get_user_role_in_team(user_email, team_id) + if user_role: + return HTMLResponse(content='
You are already a member of this team
', status_code=400) + + # Check if user already has a pending request + existing_requests = await team_service.get_user_join_requests(user_email, team_id) + pending_request = next((req for req in existing_requests if req.status == "pending"), None) + if pending_request: + return HTMLResponse( + content=f""" +
+

You already have a pending request to join this team.

+ +
+ """, + status_code=200, + ) + + # Get form data for optional message + form = await request.form() + message = form.get("message", "") + + # Create join request + join_request = await team_service.create_join_request(team_id=team_id, user_email=user_email, message=message) + + return HTMLResponse( + content=f""" +
+

Join request submitted successfully!

+ +
+ """, + status_code=201, + ) + + except Exception as e: + LOGGER.error(f"Error creating join request for team {team_id}: {e}") + return HTMLResponse(content=f'
Error creating join request: {str(e)}
', status_code=400) + + +@admin_router.delete("/teams/{team_id}/join-request/{request_id}") +@require_permission("teams.join") +async def admin_cancel_join_request( + team_id: str, + request_id: str, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Cancel a join request via admin UI. + + Args: + team_id: ID of the team + request_id: ID of the join request to cancel + db: Database session + user: Authenticated user + + Returns: + HTML response with updated button state + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + + # Cancel the join request + success = await team_service.cancel_join_request(request_id, user_email) + if not success: + return HTMLResponse(content='
Failed to cancel join request
', status_code=400) + + # Return the "Request to Join" button + return HTMLResponse( + content=f""" + + """, + status_code=200, + ) + + except Exception as e: + LOGGER.error(f"Error canceling join request {request_id}: {e}") + return HTMLResponse(content=f'
Error canceling join request: {str(e)}
', status_code=400) + + +@admin_router.get("/teams/{team_id}/join-requests") +@require_permission("teams.manage_members") +async def admin_list_join_requests( + team_id: str, + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """List join requests for a team via admin UI. + + Args: + team_id: ID of the team + request: FastAPI request object + db: Database session + user: Authenticated user + + Returns: + HTML response with join requests list + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + request.scope.get("root_path", "") + + # Get team and verify ownership + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + user_role = await team_service.get_user_role_in_team(user_email, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can view join requests
', status_code=403) + + # Get join requests + join_requests = await team_service.list_join_requests(team_id) + + if not join_requests: + return HTMLResponse( + content=""" +
+

No pending join requests

+
+ """, + status_code=200, + ) + + requests_html = "" + for req in join_requests: + requests_html += f""" +
+
+

{req.user_email}

+

Requested: {req.requested_at.strftime('%Y-%m-%d %H:%M') if req.requested_at else 'Unknown'}

+ {f'

Message: {req.message}

' if req.message else ''} + {req.status.upper()} +
+
+ + +
+
+ """ + + return HTMLResponse( + content=f""" +
+

Join Requests for {team.name}

+ {requests_html} +
+ """, + status_code=200, + ) + + except Exception as e: + LOGGER.error(f"Error listing join requests for team {team_id}: {e}") + return HTMLResponse(content=f'
Error loading join requests: {str(e)}
', status_code=400) + + +@admin_router.post("/teams/{team_id}/join-requests/{request_id}/approve") +@require_permission("teams.manage_members") +async def admin_approve_join_request( + team_id: str, + request_id: str, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Approve a join request via admin UI. + + Args: + team_id: ID of the team + request_id: ID of the join request to approve + db: Database session + user: Authenticated user + + Returns: + HTML response with success message + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + + # Verify team ownership + user_role = await team_service.get_user_role_in_team(user_email, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can approve join requests
', status_code=403) + + # Approve join request + member = await team_service.approve_join_request(request_id, approved_by=user_email) + if not member: + return HTMLResponse(content='
Join request not found
', status_code=404) + + return HTMLResponse( + content=f""" +
+

Join request approved! {member.user_email} is now a team member.

+ +
+ """, + status_code=200, + ) + + except Exception as e: + LOGGER.error(f"Error approving join request {request_id}: {e}") + return HTMLResponse(content=f'
Error approving join request: {str(e)}
', status_code=400) + + +@admin_router.post("/teams/{team_id}/join-requests/{request_id}/reject") +@require_permission("teams.manage_members") +async def admin_reject_join_request( + team_id: str, + request_id: str, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Reject a join request via admin UI. + + Args: + team_id: ID of the team + request_id: ID of the join request to reject + db: Database session + user: Authenticated user + + Returns: + HTML response with success message + """ + if not getattr(settings, "email_auth_enabled", False): + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_email = get_user_email(user) + + # Verify team ownership + user_role = await team_service.get_user_role_in_team(user_email, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can reject join requests
', status_code=403) + + # Reject join request + success = await team_service.reject_join_request(request_id, rejected_by=user_email) + if not success: + return HTMLResponse(content='
Join request not found
', status_code=404) + + return HTMLResponse( + content=f""" +
+

Join request rejected.

+ +
+ """, + status_code=200, + ) + + except Exception as e: + LOGGER.error(f"Error rejecting join request {request_id}: {e}") + return HTMLResponse(content=f'
Error rejecting join request: {str(e)}
', status_code=400) + + +# ============================================================================ # +# USER MANAGEMENT ADMIN ROUTES # +# ============================================================================ # + + +@admin_router.get("/users") +@require_permission("admin.user_management") +async def admin_list_users( + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """List users for admin UI via HTMX. + + Args: + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: HTML response with users list + """ + try: + if not settings.email_auth_enabled: + return HTMLResponse(content='

Email authentication is disabled. User management requires email auth.

', status_code=200) + + # Get root_path from request + root_path = request.scope.get("root_path", "") + + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # List all users (admin endpoint) + users = await auth_service.list_users() + + # Check if JSON response is requested (for dropdown population) + accept_header = request.headers.get("accept", "") + is_json_request = "application/json" in accept_header or request.query_params.get("format") == "json" + + if is_json_request: + # Return JSON for dropdown population + users_data = [] + for user_obj in users: + users_data.append({"email": user_obj.email, "full_name": user_obj.full_name, "is_active": user_obj.is_active, "is_admin": user_obj.is_admin}) + return JSONResponse(content={"users": users_data}) + + # Generate HTML for users + users_html = "" + for user_obj in users: + status_class = "text-green-600" if user_obj.is_active else "text-red-600" + status_text = "Active" if user_obj.is_active else "Inactive" + admin_badge = 'Admin' if user_obj.is_admin else "" + + users_html += f""" +
+
+
+
+

{user_obj.full_name or 'N/A'}

+ {admin_badge} + {status_text} +
+

📧 {user_obj.email}

+

🔐 Provider: {user_obj.auth_provider}

+

📅 Created: {user_obj.created_at.strftime('%Y-%m-%d %H:%M')}

+
+
+ + {('') if not user_obj.is_active else ( + '')} + +
+
+
+ """ + + if not users_html: + users_html = '

No users found.

' + + return HTMLResponse(content=users_html) + + except Exception as e: + LOGGER.error(f"Error listing users for admin {user}: {e}") + return HTMLResponse(content=f'

Error loading users: {str(e)}

', status_code=200) + + +@admin_router.post("/users") +@require_permission("admin.user_management") +async def admin_create_user( + request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Create a new user via admin UI. + + Args: + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + try: + form = await request.form() + + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # Create new user + new_user = await auth_service.create_user( + email=str(form.get("email", "")), password=str(form.get("password", "")), full_name=str(form.get("full_name", "")), is_admin=form.get("is_admin") == "on", auth_provider="local" + ) + + LOGGER.info(f"Admin {user} created user: {new_user.email}") + + # Generate HTML for the new user + status_class = "text-green-600" + status_text = "Active" + admin_badge = 'Admin' if new_user.is_admin else "" + + user_html = f""" +
+
+
+
+

{new_user.full_name or 'N/A'}

+ {admin_badge} + {status_text} +
+

📧 {new_user.email}

+

🔐 Provider: {new_user.auth_provider}

+

📅 Created: {new_user.created_at.strftime('%Y-%m-%d %H:%M')}

+
+
+ + +
+
+
+ """ + + return HTMLResponse(content=user_html, status_code=201) + + except Exception as e: + LOGGER.error(f"Error creating user by admin {user}: {e}") + return HTMLResponse(content=f'
Error creating user: {str(e)}
', status_code=400) + + +@admin_router.get("/users/{user_email}/edit") +@require_permission("admin.user_management") +async def admin_get_user_edit( + user_email: str, + _request: Request, + db: Session = Depends(get_db), + _user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Get user edit form via admin UI. + + Args: + user_email: Email of user to edit + db: Database session + + Returns: + HTMLResponse: User edit form HTML + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # URL decode the email + + decoded_email = urllib.parse.unquote(user_email) + + user_obj = await auth_service.get_user_by_email(decoded_email) + if not user_obj: + return HTMLResponse(content='
User not found
', status_code=404) + + # Create edit form HTML + edit_form = f""" +
+

Edit User

+
+
+ + +
+
+ + +
+
+ +
+
+ + +
+
+ + +
+
+
+ """ + return HTMLResponse(content=edit_form) + + except Exception as e: + LOGGER.error(f"Error getting user edit form for {user_email}: {e}") + return HTMLResponse(content=f'
Error loading user: {str(e)}
', status_code=500) + + +@admin_router.post("/users/{user_email}/update") +@require_permission("admin.user_management") +async def admin_update_user( + user_email: str, + request: Request, + db: Session = Depends(get_db), + _user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Update user via admin UI. + + Args: + user_email: Email of user to update + request: FastAPI request object + db: Database session + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # URL decode the email + + decoded_email = urllib.parse.unquote(user_email) + + form = await request.form() + full_name = form.get("full_name") + is_admin = form.get("is_admin") == "on" + password = form.get("password") + + # Check if trying to demote platform admin + user_obj = await auth_service.get_user_by_email(decoded_email) + if user_obj and user_obj.email == settings.platform_admin_email and user_obj.is_admin and not is_admin: + return HTMLResponse(content='
Cannot remove administrator privileges from platform admin
', status_code=400) + + # Update user + await auth_service.update_user(email=decoded_email, full_name=full_name, is_admin=is_admin, password=password if password else None) + + # Return success message with auto-close and refresh + success_html = """ +
+

User updated successfully

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error updating user {user_email}: {e}") + return HTMLResponse(content=f'
Error updating user: {str(e)}
', status_code=400) + + +@admin_router.post("/users/{user_email}/activate") +@require_permission("admin.user_management") +async def admin_activate_user( + user_email: str, + _request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Activate user via admin UI. + + Args: + user_email: Email of user to activate + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: - await gateway_service.toggle_gateway_status(db, gateway_id, activate) - except Exception as e: - LOGGER.error(f"Error toggling gateway status: {e}") + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # URL decode the email + + decoded_email = urllib.parse.unquote(user_email) + + # Get current user email from JWT (used for logging purposes) + get_user_email(user) + + user_obj = await auth_service.activate_user(decoded_email) + user_html = f""" +
+
+
+
+

{user_obj.full_name}

+ Active +
+

📧 {user_obj.email}

+

🔐 Provider: {user_obj.auth_provider}

+

📅 Created: {user_obj.created_at.strftime('%Y-%m-%d %H:%M') if user_obj.created_at else 'Unknown'}

+
+
+ + + +
+
+
+ """ + return HTMLResponse(content=user_html) - root_path = request.scope.get("root_path", "") - if is_inactive_checked.lower() == "true": - return RedirectResponse(f"{root_path}/admin/?include_inactive=true#gateways", status_code=303) - return RedirectResponse(f"{root_path}/admin#gateways", status_code=303) + except Exception as e: + LOGGER.error(f"Error activating user {user_email}: {e}") + return HTMLResponse(content=f'
Error activating user: {str(e)}
', status_code=400) -@admin_router.get("/", name="admin_home", response_class=HTMLResponse) -async def admin_ui( - request: Request, - include_inactive: bool = False, +@admin_router.post("/users/{user_email}/deactivate") +@require_permission("admin.user_management") +async def admin_deactivate_user( + user_email: str, + _request: Request, db: Session = Depends(get_db), - user: str = Depends(require_basic_auth), - jwt_token: str = Depends(get_jwt_token), -) -> Any: + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Deactivate user via admin UI. + + Args: + user_email: Email of user to deactivate + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response """ - Render the admin dashboard HTML page. + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) - This endpoint serves as the main entry point to the admin UI. It fetches data for - servers, tools, resources, prompts, gateways, and roots from their respective - services, then renders the admin dashboard template with this data. + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + + # URL decode the email + + decoded_email = urllib.parse.unquote(user_email) + + # Get current user email from JWT + current_user_email = get_user_email(user) + + # Protect platform admin + if decoded_email == settings.platform_admin_email: + return HTMLResponse(content='
Cannot deactivate the platform administrator
', status_code=400) + + # Prevent self-deactivation + if decoded_email == current_user_email: + return HTMLResponse(content='
Cannot deactivate your own account
', status_code=400) + + user_obj = await auth_service.deactivate_user(decoded_email) + user_html = f""" +
+
+
+
+

{user_obj.full_name}

+ Inactive +
+

📧 {user_obj.email}

+

🔐 Provider: {user_obj.auth_provider}

+

📅 Created: {user_obj.created_at.strftime('%Y-%m-%d %H:%M') if user_obj.created_at else 'Unknown'}

+
+
+ + + +
+
+
+ """ + return HTMLResponse(content=user_html) - The endpoint also sets a JWT token as a cookie for authentication in subsequent - requests. This token is HTTP-only for security reasons. + except Exception as e: + LOGGER.error(f"Error deactivating user {user_email}: {e}") + return HTMLResponse(content=f'
Error deactivating user: {str(e)}
', status_code=400) + + +@admin_router.delete("/users/{user_email}") +@require_permission("admin.user_management") +async def admin_delete_user( + user_email: str, + _request: Request, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Delete user via admin UI. Args: - request (Request): FastAPI request object. - include_inactive (bool): Whether to include inactive items in all listings. - db (Session): Database session dependency. - user (str): Authenticated user from basic auth dependency. - jwt_token (str): JWT token for authentication. + user_email: Email address of user to delete + _request: FastAPI request object (unused) + db: Database session + user: Current authenticated user context Returns: - Any: Rendered HTML template for the admin dashboard. - - Examples: - >>> import asyncio - >>> from unittest.mock import AsyncMock, MagicMock, patch - >>> from fastapi import Request - >>> from fastapi.responses import HTMLResponse - >>> from mcpgateway.schemas import ServerRead, ToolRead, ResourceRead, PromptRead, GatewayRead, ServerMetrics, ToolMetrics, ResourceMetrics, PromptMetrics - >>> from datetime import datetime, timezone - >>> - >>> mock_db = MagicMock() - >>> mock_user = "admin_user" - >>> mock_jwt = "fake.jwt.token" - >>> - >>> # Mock services to return empty lists for simplicity in doctest - >>> original_list_servers = server_service.list_servers - >>> original_list_tools = tool_service.list_tools - >>> original_list_resources = resource_service.list_resources - >>> original_list_prompts = prompt_service.list_prompts - >>> original_list_gateways = gateway_service.list_gateways - >>> original_list_roots = root_service.list_roots - >>> - >>> server_service.list_servers = AsyncMock(return_value=[]) - >>> tool_service.list_tools = AsyncMock(return_value=[]) - >>> resource_service.list_resources = AsyncMock(return_value=[]) - >>> prompt_service.list_prompts = AsyncMock(return_value=[]) - >>> gateway_service.list_gateways = AsyncMock(return_value=[]) - >>> root_service.list_roots = AsyncMock(return_value=[]) - >>> - >>> # Mock request and template rendering - >>> mock_request = MagicMock(spec=Request, scope={"root_path": "/admin_prefix"}) - >>> mock_request.app.state.templates = MagicMock() - >>> mock_template_response = HTMLResponse("Admin UI") - >>> mock_request.app.state.templates.TemplateResponse.return_value = mock_template_response - >>> - >>> # Test basic rendering - >>> async def test_admin_ui_basic_render(): - ... response = await admin_ui(mock_request, False, mock_db, mock_user, mock_jwt) - ... return isinstance(response, HTMLResponse) and response.status_code == 200 and "jwt_token" in response.headers.get("set-cookie", "") - >>> - >>> asyncio.run(test_admin_ui_basic_render()) - True - >>> - >>> # Test with include_inactive=True - >>> async def test_admin_ui_include_inactive(): - ... response = await admin_ui(mock_request, True, mock_db, mock_user, mock_jwt) - ... # Verify list methods were called with include_inactive=True - ... server_service.list_servers.assert_called_with(mock_db, include_inactive=True) - ... return isinstance(response, HTMLResponse) - >>> - >>> asyncio.run(test_admin_ui_include_inactive()) - True - >>> - >>> # Test with populated data (mocking a few items) - >>> mock_server = ServerRead(id="s1", name="S1", description="d", created_at=datetime.now(timezone.utc), updated_at=datetime.now(timezone.utc), is_active=True, associated_tools=[], associated_resources=[], associated_prompts=[], icon="i", metrics=ServerMetrics(total_executions=0, successful_executions=0, failed_executions=0, failure_rate=0.0, min_response_time=0.0, max_response_time=0.0, avg_response_time=0.0, last_execution_time=None)) - >>> mock_tool = ToolRead( - ... id="t1", name="T1", original_name="T1", url="http://t1.com", description="d", - ... created_at=datetime.now(timezone.utc), updated_at=datetime.now(timezone.utc), - ... enabled=True, reachable=True, gateway_slug="default", custom_name_slug="t1", - ... request_type="GET", integration_type="MCP", headers={}, input_schema={}, - ... annotations={}, jsonpath_filter=None, auth=None, execution_count=0, - ... metrics=ToolMetrics( - ... total_executions=0, successful_executions=0, failed_executions=0, - ... failure_rate=0.0, min_response_time=0.0, max_response_time=0.0, - ... avg_response_time=0.0, last_execution_time=None - ... ), - ... gateway_id=None, - ... customName="T1", - ... tags=[] - ... ) - >>> server_service.list_servers = AsyncMock(return_value=[mock_server]) - >>> tool_service.list_tools = AsyncMock(return_value=[mock_tool]) - >>> - >>> async def test_admin_ui_with_data(): - ... response = await admin_ui(mock_request, False, mock_db, mock_user, mock_jwt) - ... # Check if template context was populated (indirectly via mock calls) - ... assert mock_request.app.state.templates.TemplateResponse.call_count >= 1 - ... context = mock_request.app.state.templates.TemplateResponse.call_args[0][2] - ... return len(context['servers']) == 1 and len(context['tools']) == 1 - >>> - >>> asyncio.run(test_admin_ui_with_data()) - True - >>> - >>> # Test exception handling during data fetching - >>> server_service.list_servers = AsyncMock(side_effect=Exception("DB error")) - >>> async def test_admin_ui_exception_handled(): - ... try: - ... response = await admin_ui(mock_request, False, mock_db, mock_user, mock_jwt) - ... return False # Should not reach here if exception is properly raised - ... except Exception as e: - ... return str(e) == "DB error" - >>> - >>> asyncio.run(test_admin_ui_exception_handled()) - True - >>> - >>> # Restore original methods - >>> server_service.list_servers = original_list_servers - >>> tool_service.list_tools = original_list_tools - >>> resource_service.list_resources = original_list_resources - >>> prompt_service.list_prompts = original_list_prompts - >>> gateway_service.list_gateways = original_list_gateways - >>> root_service.list_roots = original_list_roots + HTMLResponse: Success/error message """ - LOGGER.debug(f"User {user} accessed the admin UI") - tools = [ - tool.model_dump(by_alias=True) for tool in sorted(await tool_service.list_tools(db, include_inactive=include_inactive), key=lambda t: ((t.url or "").lower(), (t.original_name or "").lower())) - ] - servers = [server.model_dump(by_alias=True) for server in await server_service.list_servers(db, include_inactive=include_inactive)] - resources = [resource.model_dump(by_alias=True) for resource in await resource_service.list_resources(db, include_inactive=include_inactive)] - prompts = [prompt.model_dump(by_alias=True) for prompt in await prompt_service.list_prompts(db, include_inactive=include_inactive)] - gateways_raw = await gateway_service.list_gateways(db, include_inactive=include_inactive) - gateways = [gateway.model_dump(by_alias=True) for gateway in gateways_raw] + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) - roots = [root.model_dump(by_alias=True) for root in await root_service.list_roots()] + try: + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel - # Load A2A agents if enabled - a2a_agents = [] - if a2a_service and settings.mcpgateway_a2a_enabled: - a2a_agents_raw = await a2a_service.list_agents(db, include_inactive=include_inactive) - a2a_agents = [agent.model_dump(by_alias=True) for agent in a2a_agents_raw] + auth_service = EmailAuthService(db) - root_path = settings.app_root_path - max_name_length = settings.validation_max_name_length - response = request.app.state.templates.TemplateResponse( - request, - "admin.html", - { - "request": request, - "servers": servers, - "tools": tools, - "resources": resources, - "prompts": prompts, - "gateways": gateways, - "a2a_agents": a2a_agents, - "roots": roots, - "include_inactive": include_inactive, - "root_path": root_path, - "max_name_length": max_name_length, - "gateway_tool_name_separator": settings.gateway_tool_name_separator, - "bulk_import_max_tools": settings.mcpgateway_bulk_import_max_tools, - "a2a_enabled": settings.mcpgateway_a2a_enabled, - }, - ) + # URL decode the email - # Use secure cookie utility for proper security attributes - set_auth_cookie(response, jwt_token, remember_me=False) - return response + decoded_email = urllib.parse.unquote(user_email) + + # Get current user email from JWT + current_user_email = get_user_email(user) + + # Protect platform admin + if decoded_email == settings.platform_admin_email: + return HTMLResponse(content='
Cannot delete the platform administrator
', status_code=400) + + # Prevent self-deletion + if decoded_email == current_user_email: + return HTMLResponse(content='
Cannot delete your own account
', status_code=400) + + await auth_service.delete_user(decoded_email) + + # Return empty content to remove the user from the list + return HTMLResponse(content="", status_code=200) + + except Exception as e: + LOGGER.error(f"Error deleting user {user_email}: {e}") + return HTMLResponse(content=f'
Error deleting user: {str(e)}
', status_code=400) @admin_router.get("/tools", response_model=List[ToolRead]) async def admin_list_tools( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List tools for the admin UI with an option to include inactive tools. @@ -1634,7 +3752,7 @@ async def admin_list_tools( >>> from datetime import datetime, timezone >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Mock tool data >>> mock_tool = ToolRead( @@ -1667,9 +3785,9 @@ async def admin_list_tools( ... tags=[] ... ) # Added gateway_id=None >>> - >>> # Mock the tool_service.list_tools method - >>> original_list_tools = tool_service.list_tools - >>> tool_service.list_tools = AsyncMock(return_value=[mock_tool]) + >>> # Mock the tool_service.list_tools_for_user method + >>> original_list_tools_for_user = tool_service.list_tools_for_user + >>> tool_service.list_tools_for_user = AsyncMock(return_value=[mock_tool]) >>> >>> # Test listing active tools >>> async def test_admin_list_tools_active(): @@ -1695,7 +3813,7 @@ async def admin_list_tools( ... customName="Inactive Tool", ... tags=[] ... ) - >>> tool_service.list_tools = AsyncMock(return_value=[mock_tool, mock_inactive_tool]) + >>> tool_service.list_tools_for_user = AsyncMock(return_value=[mock_tool, mock_inactive_tool]) >>> async def test_admin_list_tools_all(): ... result = await admin_list_tools(include_inactive=True, db=mock_db, user=mock_user) ... return len(result) == 2 and not result[1]['enabled'] @@ -1704,7 +3822,7 @@ async def admin_list_tools( True >>> >>> # Test empty list - >>> tool_service.list_tools = AsyncMock(return_value=[]) + >>> tool_service.list_tools_for_user = AsyncMock(return_value=[]) >>> async def test_admin_list_tools_empty(): ... result = await admin_list_tools(include_inactive=False, db=mock_db, user=mock_user) ... return result == [] @@ -1713,7 +3831,7 @@ async def admin_list_tools( True >>> >>> # Test exception handling - >>> tool_service.list_tools = AsyncMock(side_effect=Exception("Tool list error")) + >>> tool_service.list_tools_for_user = AsyncMock(side_effect=Exception("Tool list error")) >>> async def test_admin_list_tools_exception(): ... try: ... await admin_list_tools(False, mock_db, mock_user) @@ -1725,16 +3843,17 @@ async def admin_list_tools( True >>> >>> # Restore original method - >>> tool_service.list_tools = original_list_tools + >>> tool_service.list_tools_for_user = original_list_tools_for_user """ - LOGGER.debug(f"User {user} requested tool list") - tools = await tool_service.list_tools(db, include_inactive=include_inactive) + LOGGER.debug(f"User {get_user_email(user)} requested tool list") + user_email = get_user_email(user) + tools = await tool_service.list_tools_for_user(db, user_email, include_inactive=include_inactive) return [tool.model_dump(by_alias=True) for tool in tools] @admin_router.get("/tools/{tool_id}", response_model=ToolRead) -async def admin_get_tool(tool_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, Any]: +async def admin_get_tool(tool_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """ Retrieve specific tool details for the admin UI. @@ -1763,7 +3882,7 @@ async def admin_get_tool(tool_id: str, db: Session = Depends(get_db), user: str >>> from fastapi import HTTPException >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> tool_id = "test-tool-id" >>> >>> # Mock tool data @@ -1822,7 +3941,7 @@ async def admin_get_tool(tool_id: str, db: Session = Depends(get_db), user: str >>> # Restore original method >>> tool_service.get_tool = original_get_tool """ - LOGGER.debug(f"User {user} requested details for tool ID {tool_id}") + LOGGER.debug(f"User {get_user_email(user)} requested details for tool ID {tool_id}") try: tool = await tool_service.get_tool(db, tool_id) return tool.model_dump(by_alias=True) @@ -1839,7 +3958,7 @@ async def admin_get_tool(tool_id: str, db: Session = Depends(get_db), user: str async def admin_add_tool( request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> JSONResponse: """ Add a tool via the admin UI with error handling. @@ -1882,7 +4001,7 @@ async def admin_add_tool( >>> import json >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> # Happy path: Add a new tool successfully >>> form_data_success = FormData([ @@ -1961,7 +4080,7 @@ async def admin_add_tool( >>> tool_service.register_tool = original_register_tool """ - LOGGER.debug(f"User {user} is adding a new tool") + LOGGER.debug(f"User {get_user_email(user)} is adding a new tool") form = await request.form() LOGGER.debug(f"Received form data: {dict(form)}") @@ -2041,7 +4160,7 @@ async def admin_edit_tool( tool_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Response: """ Edit a tool via the admin UI. @@ -2091,7 +4210,7 @@ async def admin_edit_tool( >>> import json >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> tool_id = "tool-to-edit" >>> # Happy path: Edit tool successfully @@ -2215,7 +4334,7 @@ async def admin_edit_tool( >>> tool_service.update_tool = original_update_tool """ - LOGGER.debug(f"User {user} is editing tool ID {tool_id}") + LOGGER.debug(f"User {get_user_email(user)} is editing tool ID {tool_id}") form = await request.form() # Parse tags from comma-separated string tags_str = str(form.get("tags", "")) @@ -2282,7 +4401,7 @@ async def admin_edit_tool( @admin_router.post("/tools/{tool_id}/delete") -async def admin_delete_tool(tool_id: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_tool(tool_id: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a tool via the admin UI. @@ -2308,7 +4427,7 @@ async def admin_delete_tool(tool_id: str, request: Request, db: Session = Depend >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> tool_id = "tool-to-delete" >>> >>> # Happy path: Delete tool @@ -2353,7 +4472,7 @@ async def admin_delete_tool(tool_id: str, request: Request, db: Session = Depend >>> # Restore original method >>> tool_service.delete_tool = original_delete_tool """ - LOGGER.debug(f"User {user} is deleting tool ID {tool_id}") + LOGGER.debug(f"User {get_user_email(user)} is deleting tool ID {tool_id}") try: await tool_service.delete_tool(db, tool_id) except Exception as e: @@ -2373,7 +4492,7 @@ async def admin_toggle_tool( tool_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """ Toggle a tool's active status via the admin UI. @@ -2401,7 +4520,7 @@ async def admin_toggle_tool( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> tool_id = "tool-to-toggle" >>> >>> # Happy path: Activate tool @@ -2458,7 +4577,7 @@ async def admin_toggle_tool( >>> # Restore original method >>> tool_service.toggle_tool_status = original_toggle_tool_status """ - LOGGER.debug(f"User {user} is toggling tool ID {tool_id}") + LOGGER.debug(f"User {get_user_email(user)} is toggling tool ID {tool_id}") form = await request.form() activate = str(form.get("activate", "true")).lower() == "true" is_inactive_checked = str(form.get("is_inactive_checked", "false")) @@ -2474,7 +4593,7 @@ async def admin_toggle_tool( @admin_router.get("/gateways/{gateway_id}", response_model=GatewayRead) -async def admin_get_gateway(gateway_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, Any]: +async def admin_get_gateway(gateway_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """Get gateway details for the admin UI. Args: @@ -2498,7 +4617,7 @@ async def admin_get_gateway(gateway_id: str, db: Session = Depends(get_db), user >>> from fastapi import HTTPException >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> gateway_id = "test-gateway-id" >>> >>> # Mock gateway data @@ -2550,7 +4669,7 @@ async def admin_get_gateway(gateway_id: str, db: Session = Depends(get_db), user >>> # Restore original method >>> gateway_service.get_gateway = original_get_gateway """ - LOGGER.debug(f"User {user} requested details for gateway ID {gateway_id}") + LOGGER.debug(f"User {get_user_email(user)} requested details for gateway ID {gateway_id}") try: gateway = await gateway_service.get_gateway(db, gateway_id) return gateway.model_dump(by_alias=True) @@ -2562,7 +4681,7 @@ async def admin_get_gateway(gateway_id: str, db: Session = Depends(get_db), user @admin_router.post("/gateways") -async def admin_add_gateway(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> JSONResponse: +async def admin_add_gateway(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> JSONResponse: """Add a gateway via the admin UI. Expects form fields: @@ -2592,7 +4711,7 @@ async def admin_add_gateway(request: Request, db: Session = Depends(get_db), use >>> import json # Added import for json.loads >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> # Happy path: Add a new gateway successfully with basic auth details >>> form_data_success = FormData([ @@ -2672,7 +4791,7 @@ async def admin_add_gateway(request: Request, db: Session = Depends(get_db), use >>> # Restore original method >>> gateway_service.register_gateway = original_register_gateway """ - LOGGER.debug(f"User {user} is adding a new gateway") + LOGGER.debug(f"User {get_user_email(user)} is adding a new gateway") form = await request.form() try: # Parse tags from comma-separated string @@ -2792,7 +4911,7 @@ async def admin_edit_gateway( gateway_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> JSONResponse: """Edit a gateway via the admin UI. @@ -2820,7 +4939,7 @@ async def admin_edit_gateway( >>> from pydantic import ValidationError >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> gateway_id = "gateway-to-edit" >>> >>> # Happy path: Edit gateway successfully @@ -2893,7 +5012,7 @@ async def admin_edit_gateway( >>> # Restore original method >>> gateway_service.update_gateway = original_update_gateway """ - LOGGER.debug(f"User {user} is editing gateway ID {gateway_id}") + LOGGER.debug(f"User {get_user_email(user)} is editing gateway ID {gateway_id}") form = await request.form() try: # Parse tags from comma-separated string @@ -2971,7 +5090,7 @@ async def admin_edit_gateway( @admin_router.post("/gateways/{gateway_id}/delete") -async def admin_delete_gateway(gateway_id: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_gateway(gateway_id: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a gateway via the admin UI. @@ -2997,7 +5116,7 @@ async def admin_delete_gateway(gateway_id: str, request: Request, db: Session = >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> gateway_id = "gateway-to-delete" >>> >>> # Happy path: Delete gateway @@ -3042,7 +5161,7 @@ async def admin_delete_gateway(gateway_id: str, request: Request, db: Session = >>> # Restore original method >>> gateway_service.delete_gateway = original_delete_gateway """ - LOGGER.debug(f"User {user} is deleting gateway ID {gateway_id}") + LOGGER.debug(f"User {get_user_email(user)} is deleting gateway ID {gateway_id}") try: await gateway_service.delete_gateway(db, gateway_id) except Exception as e: @@ -3058,7 +5177,7 @@ async def admin_delete_gateway(gateway_id: str, request: Request, db: Session = @admin_router.get("/resources/{uri:path}") -async def admin_get_resource(uri: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, Any]: +async def admin_get_resource(uri: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """Get resource details for the admin UI. Args: @@ -3082,7 +5201,7 @@ async def admin_get_resource(uri: str, db: Session = Depends(get_db), user: str >>> from fastapi import HTTPException >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> resource_uri = "test://resource/get" >>> >>> # Mock resource data @@ -3141,7 +5260,7 @@ async def admin_get_resource(uri: str, db: Session = Depends(get_db), user: str >>> resource_service.get_resource_by_uri = original_get_resource_by_uri >>> resource_service.read_resource = original_read_resource """ - LOGGER.debug(f"User {user} requested details for resource URI {uri}") + LOGGER.debug(f"User {get_user_email(user)} requested details for resource URI {uri}") try: resource = await resource_service.get_resource_by_uri(db, uri) content = await resource_service.read_resource(db, uri) @@ -3154,7 +5273,7 @@ async def admin_get_resource(uri: str, db: Session = Depends(get_db), user: str @admin_router.post("/resources") -async def admin_add_resource(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Response: +async def admin_add_resource(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Response: """ Add a resource via the admin UI. @@ -3181,7 +5300,7 @@ async def admin_add_resource(request: Request, db: Session = Depends(get_db), us >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> form_data = FormData([ ... ("uri", "test://resource1"), ... ("name", "Test Resource"), @@ -3204,7 +5323,7 @@ async def admin_add_resource(request: Request, db: Session = Depends(get_db), us True >>> resource_service.register_resource = original_register_resource """ - LOGGER.debug(f"User {user} is adding a new resource") + LOGGER.debug(f"User {get_user_email(user)} is adding a new resource") form = await request.form() # Parse tags from comma-separated string @@ -3256,7 +5375,7 @@ async def admin_edit_resource( uri: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> JSONResponse: """ Edit a resource via the admin UI. @@ -3284,7 +5403,7 @@ async def admin_edit_resource( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> form_data = FormData([ ... ("name", "Updated Resource"), ... ("description", "Updated description"), @@ -3342,7 +5461,7 @@ async def admin_edit_resource( >>> # Reset mock >>> resource_service.update_resource = original_update_resource """ - LOGGER.debug(f"User {user} is editing resource URI {uri}") + LOGGER.debug(f"User {get_user_email(user)} is editing resource URI {uri}") form = await request.form() # Parse tags from comma-separated string @@ -3376,7 +5495,7 @@ async def admin_edit_resource( @admin_router.post("/resources/{uri:path}/delete") -async def admin_delete_resource(uri: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_resource(uri: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a resource via the admin UI. @@ -3402,7 +5521,7 @@ async def admin_delete_resource(uri: str, request: Request, db: Session = Depend >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([("is_inactive_checked", "false")]) >>> mock_request.form = AsyncMock(return_value=form_data) @@ -3423,15 +5542,15 @@ async def admin_delete_resource(uri: str, request: Request, db: Session = Depend >>> mock_request.form = AsyncMock(return_value=form_data_inactive) >>> >>> async def test_admin_delete_resource_inactive(): - ... response = await admin_delete_resource("test://resource1", mock_request, mock_user) + ... response = await admin_delete_resource("test://resource1", mock_request, mock_db, mock_user) ... return isinstance(response, RedirectResponse) and "include_inactive=true" in response.headers["location"] >>> >>> asyncio.run(test_admin_delete_resource_inactive()) True >>> resource_service.delete_resource = original_delete_resource """ - LOGGER.debug(f"User {user} is deleting resource URI {uri}") - await resource_service.delete_resource(db, uri) + LOGGER.debug(f"User {get_user_email(user)} is deleting resource URI {uri}") + await resource_service.delete_resource(user["db"] if isinstance(user, dict) else db, uri) form = await request.form() is_inactive_checked: str = str(form.get("is_inactive_checked", "false")) root_path = request.scope.get("root_path", "") @@ -3445,7 +5564,7 @@ async def admin_toggle_resource( resource_id: int, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """ Toggle a resource's active status via the admin UI. @@ -3473,7 +5592,7 @@ async def admin_toggle_resource( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([ ... ("activate", "true"), @@ -3536,7 +5655,7 @@ async def admin_toggle_resource( True >>> resource_service.toggle_resource_status = original_toggle_resource_status """ - LOGGER.debug(f"User {user} is toggling resource ID {resource_id}") + LOGGER.debug(f"User {get_user_email(user)} is toggling resource ID {resource_id}") form = await request.form() activate = str(form.get("activate", "true")).lower() == "true" is_inactive_checked = str(form.get("is_inactive_checked", "false")) @@ -3552,7 +5671,7 @@ async def admin_toggle_resource( @admin_router.get("/prompts/{name}") -async def admin_get_prompt(name: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, Any]: +async def admin_get_prompt(name: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """Get prompt details for the admin UI. Args: @@ -3576,7 +5695,7 @@ async def admin_get_prompt(name: str, db: Session = Depends(get_db), user: str = >>> from fastapi import HTTPException >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> prompt_name = "test-prompt" >>> >>> # Mock prompt details @@ -3639,7 +5758,7 @@ async def admin_get_prompt(name: str, db: Session = Depends(get_db), user: str = >>> >>> prompt_service.get_prompt_details = original_get_prompt_details """ - LOGGER.debug(f"User {user} requested details for prompt name {name}") + LOGGER.debug(f"User {get_user_email(user)} requested details for prompt name {name}") try: prompt_details = await prompt_service.get_prompt_details(db, name) prompt = PromptRead.model_validate(prompt_details) @@ -3652,7 +5771,7 @@ async def admin_get_prompt(name: str, db: Session = Depends(get_db), user: str = @admin_router.post("/prompts") -async def admin_add_prompt(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> JSONResponse: +async def admin_add_prompt(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> JSONResponse: """Add a prompt via the admin UI. Expects form fields: @@ -3677,7 +5796,7 @@ async def admin_add_prompt(request: Request, db: Session = Depends(get_db), user >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> form_data = FormData([ ... ("name", "Test Prompt"), ... ("description", "A test prompt"), @@ -3700,7 +5819,7 @@ async def admin_add_prompt(request: Request, db: Session = Depends(get_db), user >>> prompt_service.register_prompt = original_register_prompt """ - LOGGER.debug(f"User {user} is adding a new prompt") + LOGGER.debug(f"User {get_user_email(user)} is adding a new prompt") form = await request.form() # Parse tags from comma-separated string @@ -3754,7 +5873,7 @@ async def admin_edit_prompt( name: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Response: """Edit a prompt via the admin UI. @@ -3781,7 +5900,7 @@ async def admin_edit_prompt( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> prompt_name = "test-prompt" >>> form_data = FormData([ ... ("name", "Updated Prompt"), @@ -3821,7 +5940,7 @@ async def admin_edit_prompt( True >>> prompt_service.update_prompt = original_update_prompt """ - LOGGER.debug(f"User {user} is editing prompt name {name}") + LOGGER.debug(f"User {get_user_email(user)} is editing prompt name {name}") form = await request.form() args_json: str = str(form.get("arguments")) or "[]" @@ -3861,7 +5980,7 @@ async def admin_edit_prompt( @admin_router.post("/prompts/{name}/delete") -async def admin_delete_prompt(name: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_prompt(name: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a prompt via the admin UI. @@ -3887,7 +6006,7 @@ async def admin_delete_prompt(name: str, request: Request, db: Session = Depends >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([("is_inactive_checked", "false")]) >>> mock_request.form = AsyncMock(return_value=form_data) @@ -3915,7 +6034,7 @@ async def admin_delete_prompt(name: str, request: Request, db: Session = Depends True >>> prompt_service.delete_prompt = original_delete_prompt """ - LOGGER.debug(f"User {user} is deleting prompt name {name}") + LOGGER.debug(f"User {get_user_email(user)} is deleting prompt name {name}") await prompt_service.delete_prompt(db, name) form = await request.form() is_inactive_checked: str = str(form.get("is_inactive_checked", "false")) @@ -3930,7 +6049,7 @@ async def admin_toggle_prompt( prompt_id: int, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """ Toggle a prompt's active status via the admin UI. @@ -3958,7 +6077,7 @@ async def admin_toggle_prompt( >>> from starlette.datastructures import FormData >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([ ... ("activate", "true"), @@ -4021,7 +6140,7 @@ async def admin_toggle_prompt( True >>> prompt_service.toggle_prompt_status = original_toggle_prompt_status """ - LOGGER.debug(f"User {user} is toggling prompt ID {prompt_id}") + LOGGER.debug(f"User {get_user_email(user)} is toggling prompt ID {prompt_id}") form = await request.form() activate: bool = str(form.get("activate", "true")).lower() == "true" is_inactive_checked: str = str(form.get("is_inactive_checked", "false")) @@ -4037,7 +6156,7 @@ async def admin_toggle_prompt( @admin_router.post("/roots") -async def admin_add_root(request: Request, user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_add_root(request: Request, user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """Add a new root via the admin UI. Expects form fields: @@ -4058,7 +6177,8 @@ async def admin_add_root(request: Request, user: str = Depends(require_auth)) -> >>> from fastapi.responses import RedirectResponse >>> from starlette.datastructures import FormData >>> - >>> mock_user = "test_user" + >>> mock_db = MagicMock() + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([ ... ("uri", "test://root1"), @@ -4078,7 +6198,7 @@ async def admin_add_root(request: Request, user: str = Depends(require_auth)) -> True >>> root_service.add_root = original_add_root """ - LOGGER.debug(f"User {user} is adding a new root") + LOGGER.debug(f"User {get_user_email(user)} is adding a new root") form = await request.form() uri = str(form["uri"]) name_value = form.get("name") @@ -4091,7 +6211,7 @@ async def admin_add_root(request: Request, user: str = Depends(require_auth)) -> @admin_router.post("/roots/{uri:path}/delete") -async def admin_delete_root(uri: str, request: Request, user: str = Depends(require_auth)) -> RedirectResponse: +async def admin_delete_root(uri: str, request: Request, user=Depends(get_current_user_with_permissions)) -> RedirectResponse: """ Delete a root via the admin UI. @@ -4115,7 +6235,8 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ >>> from fastapi.responses import RedirectResponse >>> from starlette.datastructures import FormData >>> - >>> mock_user = "test_user" + >>> mock_db = MagicMock() + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = MagicMock(spec=Request) >>> form_data = FormData([("is_inactive_checked", "false")]) >>> mock_request.form = AsyncMock(return_value=form_data) @@ -4143,7 +6264,7 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ True >>> root_service.remove_root = original_remove_root """ - LOGGER.debug(f"User {user} is deleting root URI {uri}") + LOGGER.debug(f"User {get_user_email(user)} is deleting root URI {uri}") await root_service.remove_root(uri) form = await request.form() root_path = request.scope.get("root_path", "") @@ -4160,7 +6281,7 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ # @admin_router.get("/metrics", response_model=MetricsDict) # async def admin_get_metrics( # db: Session = Depends(get_db), -# user: str = Depends(require_auth), +# user=Depends(get_current_user_with_permissions), # ) -> MetricsDict: # """ # Retrieve aggregate metrics for all entity types via the admin UI. @@ -4180,7 +6301,7 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ # resources, servers, and prompts. Each value is a Pydantic model instance # specific to the entity type. # """ -# LOGGER.debug(f"User {user} requested aggregate metrics") +# LOGGER.debug(f"User {get_user_email(user)} requested aggregate metrics") # tool_metrics = await tool_service.aggregate_metrics(db) # resource_metrics = await resource_service.aggregate_metrics(db) # server_metrics = await server_service.aggregate_metrics(db) @@ -4198,7 +6319,7 @@ async def admin_delete_root(uri: str, request: Request, user: str = Depends(requ @admin_router.get("/metrics") async def get_aggregated_metrics( db: Session = Depends(get_db), - _user: str = Depends(require_auth), + _user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """Retrieve aggregated metrics and top performers for all entity types. @@ -4235,7 +6356,7 @@ async def get_aggregated_metrics( @admin_router.post("/metrics/reset", response_model=Dict[str, object]) -async def admin_reset_metrics(db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, object]: +async def admin_reset_metrics(db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, object]: """ Reset all metrics for tools, resources, servers, and prompts. Each service must implement its own reset_metrics method. @@ -4252,7 +6373,7 @@ async def admin_reset_metrics(db: Session = Depends(get_db), user: str = Depends >>> from unittest.mock import AsyncMock, MagicMock >>> >>> mock_db = MagicMock() - >>> mock_user = "test_user" + >>> mock_user = {"email": "test_user", "db": mock_db} >>> >>> original_reset_metrics_tool = tool_service.reset_metrics >>> original_reset_metrics_resource = resource_service.reset_metrics @@ -4276,7 +6397,7 @@ async def admin_reset_metrics(db: Session = Depends(get_db), user: str = Depends >>> server_service.reset_metrics = original_reset_metrics_server >>> prompt_service.reset_metrics = original_reset_metrics_prompt """ - LOGGER.debug(f"User {user} requested to reset all metrics") + LOGGER.debug(f"User {get_user_email(user)} requested to reset all metrics") await tool_service.reset_metrics(db) await resource_service.reset_metrics(db) await server_service.reset_metrics(db) @@ -4285,7 +6406,7 @@ async def admin_reset_metrics(db: Session = Depends(get_db), user: str = Depends @admin_router.post("/gateways/test", response_model=GatewayTestResponse) -async def admin_test_gateway(request: GatewayTestRequest, user: str = Depends(require_auth)) -> GatewayTestResponse: +async def admin_test_gateway(request: GatewayTestRequest, user=Depends(get_current_user_with_permissions)) -> GatewayTestResponse: """ Test a gateway by sending a request to its URL. This endpoint allows administrators to test the connectivity and response @@ -4304,7 +6425,8 @@ async def admin_test_gateway(request: GatewayTestRequest, user: str = Depends(re >>> from fastapi import Request >>> import httpx >>> - >>> mock_user = "test_user" + >>> mock_db = MagicMock() + >>> mock_user = {"email": "test_user", "db": mock_db} >>> mock_request = GatewayTestRequest( ... base_url="https://api.example.com", ... path="/test", @@ -4425,7 +6547,7 @@ async def admin_test_gateway(request: GatewayTestRequest, user: str = Depends(re """ full_url = str(request.base_url).rstrip("/") + "/" + request.path.lstrip("/") full_url = full_url.rstrip("/") - LOGGER.debug(f"User {user} testing server at {request.base_url}.") + LOGGER.debug(f"User {get_user_email(user)} testing server at {request.base_url}.") try: start_time: float = time.monotonic() async with ResilientHttpClient(client_args={"timeout": settings.federation_timeout, "verify": not settings.skip_ssl_verify}) as client: @@ -4454,7 +6576,7 @@ async def admin_list_tags( entity_types: Optional[str] = None, include_entities: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Dict[str, Any]]: """ List all unique tags with statistics for the admin UI. @@ -4534,7 +6656,7 @@ async def admin_list_tags( async def admin_import_tools( request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> JSONResponse: """Bulk import multiple tools in a single request. @@ -4701,7 +6823,7 @@ async def admin_get_logs( limit: int = 100, offset: int = 0, order: str = "desc", - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ) -> Dict[str, Any]: """Get filtered log entries from the in-memory buffer. @@ -4785,7 +6907,7 @@ async def admin_stream_logs( entity_type: Optional[str] = None, entity_id: Optional[str] = None, level: Optional[str] = None, - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ): """Stream real-time log updates via Server-Sent Events. @@ -4868,7 +6990,7 @@ async def generate(): @admin_router.get("/logs/file") async def admin_get_log_file( filename: Optional[str] = None, - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ): """Download log file. @@ -4986,7 +7108,7 @@ async def admin_export_logs( end_time: Optional[str] = None, request_id: Optional[str] = None, search: Optional[str] = None, - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ): """Export filtered logs in JSON or CSV format. @@ -5113,7 +7235,7 @@ async def admin_export_configuration( include_inactive: bool = False, include_dependencies: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ): """ Export gateway configuration via Admin UI. @@ -5180,7 +7302,7 @@ async def admin_export_configuration( @admin_router.post("/export/selective") -async def admin_export_selective(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)): +async def admin_export_selective(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)): """ Export selected entities via Admin UI with entity selection. @@ -5240,7 +7362,7 @@ async def admin_export_selective(request: Request, db: Session = Depends(get_db) @admin_router.post("/import/configuration") -async def admin_import_configuration(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)): +async def admin_import_configuration(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)): """ Import configuration via Admin UI. @@ -5302,7 +7424,7 @@ async def admin_import_configuration(request: Request, db: Session = Depends(get @admin_router.get("/import/status/{import_id}") -async def admin_get_import_status(import_id: str, user: str = Depends(require_auth)): +async def admin_get_import_status(import_id: str, user=Depends(get_current_user_with_permissions)): """Get import status via Admin UI. Args: @@ -5325,7 +7447,7 @@ async def admin_get_import_status(import_id: str, user: str = Depends(require_au @admin_router.get("/import/status") -async def admin_list_import_statuses(user: str = Depends(require_auth)): +async def admin_list_import_statuses(user=Depends(get_current_user_with_permissions)): """List all import statuses via Admin UI. Args: @@ -5350,7 +7472,7 @@ async def admin_list_a2a_agents( include_inactive: bool = False, tags: Optional[str] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> HTMLResponse: """List A2A agents for admin UI. @@ -5483,7 +7605,7 @@ async def admin_list_a2a_agents( async def admin_add_a2a_agent( request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> RedirectResponse: """Add a new A2A agent via admin UI. @@ -5565,7 +7687,7 @@ async def admin_toggle_a2a_agent( agent_id: str, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ) -> RedirectResponse: """Toggle A2A agent status via admin UI. @@ -5608,7 +7730,7 @@ async def admin_delete_a2a_agent( agent_id: str, request: Request, # pylint: disable=unused-argument db: Session = Depends(get_db), - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ) -> RedirectResponse: """Delete A2A agent via admin UI. @@ -5648,7 +7770,7 @@ async def admin_test_a2a_agent( agent_id: str, request: Request, # pylint: disable=unused-argument db: Session = Depends(get_db), - user: str = Depends(require_auth), # pylint: disable=unused-argument + user=Depends(get_current_user_with_permissions), # pylint: disable=unused-argument ) -> JSONResponse: """Test A2A agent via admin UI. @@ -5690,3 +7812,276 @@ async def admin_test_a2a_agent( except Exception as e: LOGGER.error(f"Error testing A2A agent {agent_id}: {e}") return JSONResponse(content={"success": False, "error": str(e), "agent_id": agent_id}, status_code=500) + + +# Team-scoped resource section endpoints +@admin_router.get("/sections/tools") +@require_permission("admin") +async def get_tools_section( + team_id: Optional[str] = None, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +): + """Get tools data filtered by team. + + Args: + team_id: Optional team ID to filter by + db: Database session + user: Current authenticated user context + + Returns: + JSONResponse: Tools data with team filtering applied + """ + try: + local_tool_service = ToolService() + user_email = get_user_email(user) + + # Get team-filtered tools + tools_list = await local_tool_service.list_tools_for_user(db, user_email, team_id=team_id, include_inactive=True) + + # Convert to JSON-serializable format + tools = [] + for tool in tools_list: + tool_dict = ( + tool.model_dump(by_alias=True) + if hasattr(tool, "model_dump") + else { + "id": tool.id, + "name": tool.name, + "description": tool.description, + "tags": tool.tags or [], + "isActive": tool.isActive, + "team_id": getattr(tool, "team_id", None), + "visibility": getattr(tool, "visibility", "private"), + } + ) + tools.append(tool_dict) + + return JSONResponse(content={"tools": tools, "team_id": team_id}) + + except Exception as e: + LOGGER.error(f"Error loading tools section: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + + +@admin_router.get("/sections/resources") +@require_permission("admin") +async def get_resources_section( + team_id: Optional[str] = None, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +): + """Get resources data filtered by team. + + Args: + team_id: Optional team ID to filter by + db: Database session + user: Current authenticated user context + + Returns: + JSONResponse: Resources data with team filtering applied + """ + try: + local_resource_service = ResourceService() + user_email = get_user_email(user) + LOGGER.debug(f"User {user_email} requesting resources section with team_id={team_id}") + + # Get all resources and filter by team + resources_list = await local_resource_service.list_resources(db, include_inactive=True) + + # Apply team filtering if specified + if team_id: + resources_list = [r for r in resources_list if getattr(r, "team_id", None) == team_id] + + # Convert to JSON-serializable format + resources = [] + for resource in resources_list: + resource_dict = ( + resource.model_dump(by_alias=True) + if hasattr(resource, "model_dump") + else { + "id": resource.id, + "name": resource.name, + "description": resource.description, + "uri": resource.uri, + "tags": resource.tags or [], + "isActive": resource.isActive, + "team_id": getattr(resource, "team_id", None), + "visibility": getattr(resource, "visibility", "private"), + } + ) + resources.append(resource_dict) + + return JSONResponse(content={"resources": resources, "team_id": team_id}) + + except Exception as e: + LOGGER.error(f"Error loading resources section: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + + +@admin_router.get("/sections/prompts") +@require_permission("admin") +async def get_prompts_section( + team_id: Optional[str] = None, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +): + """Get prompts data filtered by team. + + Args: + team_id: Optional team ID to filter by + db: Database session + user: Current authenticated user context + + Returns: + JSONResponse: Prompts data with team filtering applied + """ + try: + local_prompt_service = PromptService() + user_email = get_user_email(user) + LOGGER.debug(f"User {user_email} requesting prompts section with team_id={team_id}") + + # Get all prompts and filter by team + prompts_list = await local_prompt_service.list_prompts(db, include_inactive=True) + + # Apply team filtering if specified + if team_id: + prompts_list = [p for p in prompts_list if getattr(p, "team_id", None) == team_id] + + # Convert to JSON-serializable format + prompts = [] + for prompt in prompts_list: + prompt_dict = ( + prompt.model_dump(by_alias=True) + if hasattr(prompt, "model_dump") + else { + "id": prompt.id, + "name": prompt.name, + "description": prompt.description, + "arguments": prompt.arguments or [], + "tags": prompt.tags or [], + "isActive": prompt.isActive, + "team_id": getattr(prompt, "team_id", None), + "visibility": getattr(prompt, "visibility", "private"), + } + ) + prompts.append(prompt_dict) + + return JSONResponse(content={"prompts": prompts, "team_id": team_id}) + + except Exception as e: + LOGGER.error(f"Error loading prompts section: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + + +@admin_router.get("/sections/servers") +@require_permission("admin") +async def get_servers_section( + team_id: Optional[str] = None, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +): + """Get servers data filtered by team. + + Args: + team_id: Optional team ID to filter by + db: Database session + user: Current authenticated user context + + Returns: + JSONResponse: Servers data with team filtering applied + """ + try: + local_server_service = ServerService() + user_email = get_user_email(user) + LOGGER.debug(f"User {user_email} requesting servers section with team_id={team_id}") + + # Get all servers and filter by team + servers_list = await local_server_service.list_servers(db, include_inactive=True) + + # Apply team filtering if specified + if team_id: + servers_list = [s for s in servers_list if getattr(s, "team_id", None) == team_id] + + # Convert to JSON-serializable format + servers = [] + for server in servers_list: + server_dict = ( + server.model_dump(by_alias=True) + if hasattr(server, "model_dump") + else { + "id": server.id, + "name": server.name, + "description": server.description, + "tags": server.tags or [], + "isActive": server.isActive, + "team_id": getattr(server, "team_id", None), + "visibility": getattr(server, "visibility", "private"), + } + ) + servers.append(server_dict) + + return JSONResponse(content={"servers": servers, "team_id": team_id}) + + except Exception as e: + LOGGER.error(f"Error loading servers section: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) + + +@admin_router.get("/sections/gateways") +@require_permission("admin") +async def get_gateways_section( + team_id: Optional[str] = None, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +): + """Get gateways data filtered by team. + + Args: + team_id: Optional team ID to filter by + db: Database session + user: Current authenticated user context + + Returns: + JSONResponse: Gateways data with team filtering applied + """ + try: + local_gateway_service = GatewayService() + get_user_email(user) + + # Get all gateways and filter by team + gateways_list = await local_gateway_service.list_gateways(db, include_inactive=True) + + # Apply team filtering if specified + if team_id: + gateways_list = [g for g in gateways_list if getattr(g, "team_id", None) == team_id] + + # Convert to JSON-serializable format + gateways = [] + for gateway in gateways_list: + if hasattr(gateway, "model_dump"): + # Get dict and serialize datetime objects + gateway_dict = gateway.model_dump(by_alias=True) + # Convert datetime objects to strings + for key, value in gateway_dict.items(): + gateway_dict[key] = serialize_datetime(value) + else: + gateway_dict = { + "id": gateway.id, + "name": gateway.name, + "host": gateway.host, + "port": gateway.port, + "tags": gateway.tags or [], + "isActive": gateway.isActive, + "team_id": getattr(gateway, "team_id", None), + "visibility": getattr(gateway, "visibility", "private"), + "created_at": serialize_datetime(getattr(gateway, "created_at", None)), + "updated_at": serialize_datetime(getattr(gateway, "updated_at", None)), + } + gateways.append(gateway_dict) + + return JSONResponse(content={"gateways": gateways, "team_id": team_id}) + + except Exception as e: + LOGGER.error(f"Error loading gateways section: {e}") + return JSONResponse(content={"error": str(e)}, status_code=500) diff --git a/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py b/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py new file mode 100644 index 000000000..0fe34bc7c --- /dev/null +++ b/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py @@ -0,0 +1,510 @@ +# -*- coding: utf-8 -*- +# pylint: disable=no-member,not-callable +"""consolidated_multiuser_team_rbac_migration + +Revision ID: cfc3d6aa0fb2 +Revises: 733159a4fa74 +Create Date: 2025-08-29 22:50:14.315471 + +This migration consolidates all multi-user, team scoping, RBAC, and authentication +features into a single migration for clean deployment. +""" + +# Standard +from typing import Sequence, Union + +# Third-Party +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision: str = "cfc3d6aa0fb2" +down_revision: Union[str, Sequence[str], None] = "733159a4fa74" +branch_labels: Union[str, Sequence[str], None] = None +depends_on: Union[str, Sequence[str], None] = None + + +def upgrade() -> None: + """Consolidated upgrade schema for multi-user, team, and RBAC features.""" + + def safe_create_index(index_name: str, table_name: str, columns: list): + """Helper function to safely create indexes, ignoring if they already exist. + + Args: + index_name: Name of the index to create + table_name: Name of the table to create index on + columns: List of column names for the index + """ + try: + op.create_index(index_name, table_name, columns) + except Exception: + pass # Index might already exist + + # =============================== + # STEP 1: Core User Authentication + # =============================== + + # Check if email_users table exists + conn = op.get_bind() + inspector = sa.inspect(conn) + existing_tables = inspector.get_table_names() + + if "email_users" not in existing_tables: + # Create email_users table + op.create_table( + "email_users", + sa.Column("email", sa.String(255), primary_key=True, index=True), + sa.Column("password_hash", sa.String(255), nullable=False), + sa.Column("full_name", sa.String(255), nullable=True), + sa.Column("is_admin", sa.Boolean, default=False, nullable=False), + sa.Column("is_active", sa.Boolean, default=True, nullable=False), + sa.Column("email_verified_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("auth_provider", sa.String(50), default="local", nullable=False), + sa.Column("password_hash_type", sa.String(20), default="argon2id", nullable=False), + sa.Column("failed_login_attempts", sa.Integer, default=0, nullable=False), + sa.Column("locked_until", sa.DateTime(timezone=True), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("last_login", sa.DateTime(timezone=True), nullable=True), + ) + + safe_create_index(op.f("ix_email_users_email"), "email_users", ["email"]) + + if "email_auth_events" not in existing_tables: + # Create email_auth_events table + op.create_table( + "email_auth_events", + sa.Column("id", sa.Integer, primary_key=True, autoincrement=True), + sa.Column("timestamp", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("user_email", sa.String(255), nullable=True, index=True), + sa.Column("event_type", sa.String(50), nullable=False), + sa.Column("success", sa.Boolean, nullable=False), + sa.Column("ip_address", sa.String(45), nullable=True), # IPv6 compatible + sa.Column("user_agent", sa.Text, nullable=True), + sa.Column("failure_reason", sa.String(255), nullable=True), + sa.Column("details", sa.Text, nullable=True), # JSON string + ) + safe_create_index(op.f("ix_email_auth_events_user_email"), "email_auth_events", ["user_email"]) + safe_create_index(op.f("ix_email_auth_events_timestamp"), "email_auth_events", ["timestamp"]) + + # =============================== + # STEP 2: Team Management + # =============================== + + if "email_teams" not in existing_tables: + # Create email_teams table + op.create_table( + "email_teams", + sa.Column("id", sa.String(36), nullable=False), + sa.Column("name", sa.String(255), nullable=False), + sa.Column("slug", sa.String(255), nullable=False), + sa.Column("description", sa.Text(), nullable=True), + sa.Column("created_by", sa.String(255), nullable=False), + sa.Column("is_personal", sa.Boolean(), nullable=False, default=False), + sa.Column("visibility", sa.String(20), nullable=False, default="private"), + sa.Column("max_members", sa.Integer(), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False, default=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("slug"), + sa.CheckConstraint("visibility IN ('private', 'public')", name="ck_email_teams_visibility"), + ) + else: + # Add visibility constraint to existing email_teams table if it doesn't exist + try: + # Use batch mode for SQLite compatibility + with op.batch_alter_table("email_teams", schema=None) as batch_op: + batch_op.create_check_constraint("ck_email_teams_visibility", "visibility IN ('private', 'public')") + except Exception: + # Constraint might already exist, ignore + pass + + if "email_team_members" not in existing_tables: + # Create email_team_members table + op.create_table( + "email_team_members", + sa.Column("id", sa.String(36), nullable=False), + sa.Column("team_id", sa.String(36), nullable=False), + sa.Column("user_email", sa.String(255), nullable=False), + sa.Column("role", sa.String(50), nullable=False, default="member"), + sa.Column("joined_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("invited_by", sa.String(255), nullable=True), + sa.Column("is_active", sa.Boolean(), nullable=False, default=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("team_id", "user_email", name="uq_team_member"), + ) + + if "email_team_invitations" not in existing_tables: + # Create email_team_invitations table + op.create_table( + "email_team_invitations", + sa.Column("id", sa.String(36), nullable=False), + sa.Column("team_id", sa.String(36), nullable=False), + sa.Column("email", sa.String(255), nullable=False), + sa.Column("role", sa.String(50), nullable=False, default="member"), + sa.Column("invited_by", sa.String(255), nullable=False), + sa.Column("invited_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("token", sa.String(500), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False, default=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("token"), + ) + + # =============================== + # STEP 3: JWT Token Management + # =============================== + + if "email_api_tokens" not in existing_tables: + # Create email_api_tokens table + op.create_table( + "email_api_tokens", + sa.Column("id", sa.String(36), nullable=False, comment="Unique token ID"), + sa.Column("user_email", sa.String(255), nullable=False, comment="Owner email address"), + sa.Column("name", sa.String(255), nullable=False, comment="Human-readable token name"), + sa.Column("jti", sa.String(36), nullable=False, comment="JWT ID for revocation tracking"), + sa.Column("token_hash", sa.String(255), nullable=False, comment="Hashed token value"), + # Scoping fields + sa.Column("server_id", sa.String(36), nullable=True, comment="Limited to specific server (NULL = global)"), + sa.Column("resource_scopes", sa.Text(), nullable=True, comment="JSON array of resource permissions"), + sa.Column("ip_restrictions", sa.Text(), nullable=True, comment="JSON array of allowed IP addresses/CIDR"), + sa.Column("time_restrictions", sa.Text(), nullable=True, comment="JSON object of time-based restrictions"), + sa.Column("usage_limits", sa.Text(), nullable=True, comment="JSON object of usage limits"), + # Lifecycle fields + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP"), comment="Token creation timestamp"), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=True, comment="Token expiry timestamp"), + sa.Column("last_used", sa.DateTime(timezone=True), nullable=True, comment="Last usage timestamp"), + sa.Column("is_active", sa.Boolean(), nullable=False, server_default=sa.text("true"), comment="Active status flag"), + # Metadata fields + sa.Column("description", sa.Text(), nullable=True, comment="Token description"), + sa.Column("tags", sa.Text(), nullable=True, comment="JSON array of tags"), + sa.Column("team_id", sa.String(length=36), nullable=True), # Team scoping + # Constraints + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("jti", name="uq_email_api_tokens_jti"), + sa.UniqueConstraint("user_email", "name", name="uq_email_api_tokens_user_email_name"), + ) + + # Create indexes for email_api_tokens + safe_create_index("idx_email_api_tokens_user_email", "email_api_tokens", ["user_email"]) + safe_create_index("idx_email_api_tokens_server_id", "email_api_tokens", ["server_id"]) + safe_create_index("idx_email_api_tokens_is_active", "email_api_tokens", ["is_active"]) + safe_create_index("idx_email_api_tokens_expires_at", "email_api_tokens", ["expires_at"]) + safe_create_index("idx_email_api_tokens_last_used", "email_api_tokens", ["last_used"]) + safe_create_index(op.f("ix_email_api_tokens_team_id"), "email_api_tokens", ["team_id"]) + + if "token_revocations" not in existing_tables: + # Create token_revocations table (blacklist) + op.create_table( + "token_revocations", + sa.Column("jti", sa.String(36), nullable=False, comment="JWT ID of revoked token"), + sa.Column("revoked_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.text("CURRENT_TIMESTAMP"), comment="Revocation timestamp"), + sa.Column("revoked_by", sa.String(255), nullable=True, comment="Email of user who revoked token"), + sa.Column("reason", sa.String(255), nullable=True, comment="Reason for revocation"), + # Constraints + sa.PrimaryKeyConstraint("jti"), + ) + + # Create indexes for token_revocations + safe_create_index("idx_token_revocations_revoked_at", "token_revocations", ["revoked_at"]) + safe_create_index("idx_token_revocations_revoked_by", "token_revocations", ["revoked_by"]) + + # =============================== + # STEP 4: RBAC System + # =============================== + + if "roles" not in existing_tables: + # Create RBAC roles table + op.create_table( + "roles", + sa.Column("id", sa.String(length=36), nullable=False), + sa.Column("name", sa.String(length=255), nullable=False), + sa.Column("description", sa.Text(), nullable=True), + sa.Column("scope", sa.String(length=20), nullable=False), + sa.Column("permissions", sa.JSON(), nullable=False), + sa.Column("inherits_from", sa.String(length=36), nullable=True), + sa.Column("created_by", sa.String(length=255), nullable=False), + sa.Column("is_system_role", sa.Boolean(), nullable=False), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False), + sa.PrimaryKeyConstraint("id"), + comment="Roles for RBAC permission system", + ) + + if "user_roles" not in existing_tables: + # Create RBAC user_roles table + op.create_table( + "user_roles", + sa.Column("id", sa.String(length=36), nullable=False), + sa.Column("user_email", sa.String(length=255), nullable=False), + sa.Column("role_id", sa.String(length=36), nullable=False), + sa.Column("scope", sa.String(length=20), nullable=False), + sa.Column("scope_id", sa.String(length=36), nullable=True), + sa.Column("granted_by", sa.String(length=255), nullable=False), + sa.Column("granted_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("is_active", sa.Boolean(), nullable=False), + sa.PrimaryKeyConstraint("id"), + comment="User role assignments for RBAC system", + ) + + # Create indexes for performance + safe_create_index("idx_user_roles_user_email", "user_roles", ["user_email"]) + safe_create_index("idx_user_roles_role_id", "user_roles", ["role_id"]) + safe_create_index("idx_user_roles_scope", "user_roles", ["scope"]) + safe_create_index("idx_user_roles_scope_id", "user_roles", ["scope_id"]) + + if "permission_audit_log" not in existing_tables: + # Create RBAC permission_audit_log table + op.create_table( + "permission_audit_log", + sa.Column("id", sa.Integer(), nullable=False, autoincrement=True), + sa.Column("timestamp", sa.DateTime(timezone=True), nullable=False), + sa.Column("user_email", sa.String(length=255), nullable=True), + sa.Column("permission", sa.String(length=100), nullable=False), + sa.Column("resource_type", sa.String(length=50), nullable=True), + sa.Column("resource_id", sa.String(length=255), nullable=True), + sa.Column("team_id", sa.String(length=36), nullable=True), + sa.Column("granted", sa.Boolean(), nullable=False), + sa.Column("roles_checked", sa.JSON(), nullable=True), + sa.Column("ip_address", sa.String(length=45), nullable=True), + sa.Column("user_agent", sa.Text(), nullable=True), + sa.PrimaryKeyConstraint("id"), + comment="Permission audit log for RBAC compliance", + ) + + safe_create_index("idx_permission_audit_log_user_email", "permission_audit_log", ["user_email"]) + safe_create_index("idx_permission_audit_log_timestamp", "permission_audit_log", ["timestamp"]) + safe_create_index("idx_permission_audit_log_permission", "permission_audit_log", ["permission"]) + + # =============================== + # STEP 5: User Approval System + # =============================== + + if "pending_user_approvals" not in existing_tables: + op.create_table( + "pending_user_approvals", + sa.Column("id", sa.String(length=36), nullable=False), + sa.Column("email", sa.String(length=255), nullable=False), + sa.Column("full_name", sa.String(length=255), nullable=False), + sa.Column("auth_provider", sa.String(length=50), nullable=False), + sa.Column("sso_metadata", sa.JSON(), nullable=True), + sa.Column("requested_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("approved_by", sa.String(length=255), nullable=True), + sa.Column("approved_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("status", sa.String(length=20), nullable=False), + sa.Column("rejection_reason", sa.Text(), nullable=True), + sa.Column("admin_notes", sa.Text(), nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("email"), + ) + + # =============================== + # STEP 6: Add Team Scoping to Existing Tables + # =============================== + + # Check which columns already exist before adding them + def add_team_columns_if_not_exists(table_name: str): + """Add team_id and owner_email columns to a table if they don't already exist. + + Args: + table_name: Name of the table to add columns to. + """ + columns = inspector.get_columns(table_name) + existing_column_names = [col["name"] for col in columns] + + if "team_id" not in existing_column_names: + op.add_column(table_name, sa.Column("team_id", sa.String(length=36), nullable=True)) + + if "owner_email" not in existing_column_names: + op.add_column(table_name, sa.Column("owner_email", sa.String(length=255), nullable=True)) + + if "visibility" not in existing_column_names: + op.add_column(table_name, sa.Column("visibility", sa.String(length=20), nullable=False, server_default="private")) + + # Add team scoping to existing resource tables if they exist + resource_tables = ["prompts", "resources", "servers", "tools", "gateways", "a2a_agents"] + + for table_name in resource_tables: + if table_name in existing_tables: + add_team_columns_if_not_exists(table_name) + + # =============================== + # STEP 8: SSO Provider Management + # =============================== + + if "sso_providers" not in existing_tables: + # Create sso_providers table + op.create_table( + "sso_providers", + sa.Column("id", sa.String(50), primary_key=True), + sa.Column("name", sa.String(100), nullable=False, unique=True), + sa.Column("display_name", sa.String(100), nullable=False), + sa.Column("provider_type", sa.String(20), nullable=False), + sa.Column("is_enabled", sa.Boolean, nullable=False, default=True), + sa.Column("client_id", sa.String(255), nullable=False), + sa.Column("client_secret_encrypted", sa.Text, nullable=False), + sa.Column("authorization_url", sa.String(500), nullable=False), + sa.Column("token_url", sa.String(500), nullable=False), + sa.Column("userinfo_url", sa.String(500), nullable=False), + sa.Column("issuer", sa.String(500), nullable=True), + sa.Column("trusted_domains", sa.JSON, nullable=False, default="[]"), + sa.Column("scope", sa.String(200), nullable=False, default="openid profile email"), + sa.Column("auto_create_users", sa.Boolean, nullable=False, default=True), + sa.Column("team_mapping", sa.JSON, nullable=False, default="{}"), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("updated_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + ) + + if "email_team_join_requests" not in existing_tables: + # Create email_team_join_requests table + op.create_table( + "email_team_join_requests", + sa.Column("id", sa.String(36), nullable=False), + sa.Column("team_id", sa.String(36), nullable=False), + sa.Column("user_email", sa.String(255), nullable=False), + sa.Column("message", sa.Text, nullable=True), + sa.Column("status", sa.String(20), nullable=False, default="pending"), + sa.Column("requested_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("reviewed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("reviewed_by", sa.String(255), nullable=True), + sa.Column("notes", sa.Text, nullable=True), + sa.PrimaryKeyConstraint("id"), + sa.UniqueConstraint("team_id", "user_email", name="uq_team_join_request"), + ) + + if "sso_auth_sessions" not in existing_tables: + # Create sso_auth_sessions table + op.create_table( + "sso_auth_sessions", + sa.Column("id", sa.String(36), primary_key=True), + sa.Column("provider_id", sa.String(50), nullable=False), + sa.Column("state", sa.String(255), nullable=False, unique=True), + sa.Column("code_verifier", sa.String(255), nullable=True), + sa.Column("nonce", sa.String(255), nullable=True), + sa.Column("redirect_uri", sa.String(500), nullable=False), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("user_email", sa.String(255), nullable=True), + sa.Column("created_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + ) + + if "pending_user_approvals" not in existing_tables: + # Create pending_user_approvals table + op.create_table( + "pending_user_approvals", + sa.Column("id", sa.String(36), primary_key=True), + sa.Column("email", sa.String(255), nullable=False, index=True), + sa.Column("provider_id", sa.String(50), nullable=False), + sa.Column("provider_user_id", sa.String(255), nullable=True), + sa.Column("full_name", sa.String(255), nullable=True), + sa.Column("status", sa.String(20), nullable=False, default="pending"), + sa.Column("requested_at", sa.DateTime(timezone=True), nullable=False, server_default=sa.func.now()), + sa.Column("reviewed_at", sa.DateTime(timezone=True), nullable=True), + sa.Column("reviewed_by", sa.String(255), nullable=True), + sa.Column("expires_at", sa.DateTime(timezone=True), nullable=False), + sa.Column("notes", sa.Text, nullable=True), + sa.UniqueConstraint("email", "provider_id", name="uq_pending_approval"), + ) + + # Note: Foreign key constraints are intentionally omitted for SQLite compatibility + # The ORM models handle the relationships properly + + +def downgrade() -> None: + """Consolidated downgrade schema for multi-user, team, and RBAC features.""" + + def safe_drop_index(index_name: str, table_name: str): + """Helper function to safely drop indexes, ignoring if they don't exist. + + Args: + index_name: Name of the index to drop + table_name: Name of the table containing the index + """ + try: + op.drop_index(index_name, table_name) + except Exception: + pass # Index might not exist + + # Get current tables to check what exists + conn = op.get_bind() + inspector = sa.inspect(conn) + existing_tables = inspector.get_table_names() + + # Remove team scoping columns from resource tables + resource_tables = ["tools", "servers", "resources", "prompts", "gateways", "a2a_agents"] + + for table_name in resource_tables: + if table_name in existing_tables: + columns = inspector.get_columns(table_name) + existing_column_names = [col["name"] for col in columns] + + # SQLite has issues dropping columns with foreign key constraints + # Use safe column dropping that ignores errors + if "visibility" in existing_column_names: + try: + op.drop_column(table_name, "visibility") + except Exception: + pass # SQLite constraint issues + if "owner_email" in existing_column_names: + try: + op.drop_column(table_name, "owner_email") + except Exception: + pass # SQLite constraint issues + if "team_id" in existing_column_names: + try: + op.drop_column(table_name, "team_id") + except Exception: + pass # SQLite constraint issues + + # Drop new tables in reverse order + tables_to_drop = [ + "sso_auth_sessions", + "sso_providers", + "email_team_join_requests", + "pending_user_approvals", + "permission_audit_log", + "user_roles", + "roles", + "token_revocations", + "email_api_tokens", + "email_team_invitations", + "email_team_members", + "email_teams", + "email_auth_events", + "email_users", + ] + + for table_name in tables_to_drop: + if table_name in existing_tables: + # Drop indexes first if they exist + if table_name == "email_api_tokens": + safe_drop_index("ix_email_api_tokens_team_id", table_name) + safe_drop_index("idx_email_api_tokens_last_used", table_name) + safe_drop_index("idx_email_api_tokens_expires_at", table_name) + safe_drop_index("idx_email_api_tokens_is_active", table_name) + safe_drop_index("idx_email_api_tokens_server_id", table_name) + safe_drop_index("idx_email_api_tokens_user_email", table_name) + elif table_name == "token_revocations": + safe_drop_index("idx_token_revocations_revoked_by", table_name) + safe_drop_index("idx_token_revocations_revoked_at", table_name) + elif table_name == "user_roles": + safe_drop_index("idx_user_roles_scope_id", table_name) + safe_drop_index("idx_user_roles_scope", table_name) + safe_drop_index("idx_user_roles_role_id", table_name) + safe_drop_index("idx_user_roles_user_email", table_name) + elif table_name == "permission_audit_log": + safe_drop_index("idx_permission_audit_log_permission", table_name) + safe_drop_index("idx_permission_audit_log_timestamp", table_name) + safe_drop_index("idx_permission_audit_log_user_email", table_name) + elif table_name == "email_auth_events": + safe_drop_index(op.f("ix_email_auth_events_timestamp"), table_name) + safe_drop_index(op.f("ix_email_auth_events_user_email"), table_name) + elif table_name == "email_users": + safe_drop_index(op.f("ix_email_users_email"), table_name) + + # Drop the table + op.drop_table(table_name) diff --git a/mcpgateway/auth.py b/mcpgateway/auth.py new file mode 100644 index 000000000..b9a0b9f81 --- /dev/null +++ b/mcpgateway/auth.py @@ -0,0 +1,228 @@ +# -*- coding: utf-8 -*- +"""Shared authentication utilities. + +This module provides common authentication functions that can be shared +across different parts of the application without creating circular imports. +""" + +# Standard +from datetime import datetime, timezone +import hashlib +import logging +from typing import Optional + +# Third-Party +from fastapi import Depends, HTTPException, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +import jwt +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import EmailUser, SessionLocal + +# Security scheme +bearer_scheme = HTTPBearer(auto_error=False) + + +def get_db(): + """Database dependency. + + Yields: + Session: SQLAlchemy database session + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +async def get_current_user(credentials: Optional[HTTPAuthorizationCredentials] = Depends(bearer_scheme), db: Session = Depends(get_db)) -> EmailUser: + """Get current authenticated user from JWT token with revocation checking. + + Args: + credentials: HTTP authorization credentials + db: Database session + + Returns: + EmailUser: Authenticated user + + Raises: + HTTPException: If authentication fails + """ + logger = logging.getLogger(__name__) + + if not credentials: + logger.debug("No credentials provided") + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Authentication required", + headers={"WWW-Authenticate": "Bearer"}, + ) + + logger.debug("Attempting authentication with token: %s...", credentials.credentials[:20]) + email = None + + try: + # Try JWT token first + logger.debug("Attempting JWT token validation") + payload = jwt.decode(credentials.credentials, settings.jwt_secret_key, algorithms=[settings.jwt_algorithm], audience=settings.jwt_audience, issuer=settings.jwt_issuer) + + logger.debug("JWT token validated successfully") + # Extract user identifier (support both new and legacy token formats) + email = payload.get("sub") + if email is None: + # Try legacy format + email = payload.get("email") + + if email is None: + logger.debug("No email/sub found in JWT payload") + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid token", + headers={"WWW-Authenticate": "Bearer"}, + ) + + logger.debug("JWT authentication successful for email: %s", email) + + # Check for token revocation if JTI is present (new format) + jti = payload.get("jti") + if jti: + try: + # First-Party + from mcpgateway.services.token_catalog_service import TokenCatalogService # pylint: disable=import-outside-toplevel + + token_service = TokenCatalogService(db) + is_revoked = await token_service.is_token_revoked(jti) + if is_revoked: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token has been revoked", + headers={"WWW-Authenticate": "Bearer"}, + ) + except Exception as revoke_check_error: + # Log the error but don't fail authentication for admin tokens + logger.warning(f"Token revocation check failed for JTI {jti}: {revoke_check_error}") + + except jwt.ExpiredSignatureError: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Token expired", + headers={"WWW-Authenticate": "Bearer"}, + ) + except jwt.PyJWTError as jwt_error: + # JWT validation failed, try database API token + logger.debug("JWT validation failed with error: %s, trying database API token", jwt_error) + try: + # First-Party + from mcpgateway.services.token_catalog_service import TokenCatalogService # pylint: disable=import-outside-toplevel + + token_service = TokenCatalogService(db) + token_hash = hashlib.sha256(credentials.credentials.encode()).hexdigest() + logger.debug("Generated token hash: %s", token_hash) + + # Find active API token by hash + # Third-Party + from sqlalchemy import select + + # First-Party + from mcpgateway.db import EmailApiToken + + result = db.execute(select(EmailApiToken).where(EmailApiToken.token_hash == token_hash, EmailApiToken.is_active.is_(True))) + api_token = result.scalar_one_or_none() + logger.debug(f"Database lookup result: {api_token is not None}") + + if api_token: + logger.debug(f"Found API token for user: {api_token.user_email}") + # Check if token is expired + if api_token.expires_at and api_token.expires_at < datetime.now(timezone.utc): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="API token expired", + headers={"WWW-Authenticate": "Bearer"}, + ) + + # Check if token is revoked + is_revoked = await token_service.is_token_revoked(api_token.jti) + if is_revoked: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="API token has been revoked", + headers={"WWW-Authenticate": "Bearer"}, + ) + + # Use the email from the API token + email = api_token.user_email + logger.debug(f"API token authentication successful for email: {email}") + + # Update last_used timestamp + # First-Party + from mcpgateway.db import utc_now + + api_token.last_used = utc_now() + db.commit() + else: + logger.debug("API token not found in database, checking bootstrap admin key") + # Check for bootstrap admin API key + if settings.bootstrap_admin_api_key and credentials.credentials == settings.bootstrap_admin_api_key: + # Bootstrap admin key - create virtual admin user + email = settings.platform_admin_email + logger.debug("Bootstrap admin key authentication successful") + else: + logger.debug("No valid authentication method found") + # Neither JWT nor API token nor bootstrap key worked + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + except HTTPException: + # Re-raise HTTP exceptions + raise + except Exception as e: + # Neither JWT nor API token validation worked + logger.debug(f"Database API token validation failed with exception: {e}") + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid authentication credentials", + headers={"WWW-Authenticate": "Bearer"}, + ) + + # Get user from database + # First-Party + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + + auth_service = EmailAuthService(db) + user = await auth_service.get_user_by_email(email) + + if user is None: + # Special case for platform admin - if user doesn't exist but token is valid + # and email matches platform admin, create a virtual admin user object + if email == getattr(settings, "platform_admin_email", "admin@example.com"): + # Create a virtual admin user for authentication purposes + user = EmailUser( + email=email, + password_hash="", # Not used for JWT authentication + full_name=getattr(settings, "platform_admin_full_name", "Platform Administrator"), + is_admin=True, + is_active=True, + is_email_verified=True, + created_at=datetime.now(timezone.utc), + updated_at=datetime.now(timezone.utc), + ) + else: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="User not found", + headers={"WWW-Authenticate": "Bearer"}, + ) + + if not user.is_active: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Account disabled", + headers={"WWW-Authenticate": "Bearer"}, + ) + + return user diff --git a/mcpgateway/bootstrap_db.py b/mcpgateway/bootstrap_db.py index 22807e1b6..a60f5ee06 100644 --- a/mcpgateway/bootstrap_db.py +++ b/mcpgateway/bootstrap_db.py @@ -50,12 +50,201 @@ logger = logging_service.get_logger(__name__) +async def bootstrap_admin_user() -> None: + """ + Bootstrap the platform admin user from environment variables. + + Creates the admin user if email authentication is enabled and the user doesn't exist. + Also creates a personal team for the admin user if auto-creation is enabled. + """ + if not settings.email_auth_enabled: + logger.info("Email authentication disabled - skipping admin user bootstrap") + return + + try: + # Import services here to avoid circular imports + # First-Party + from mcpgateway.db import SessionLocal # pylint: disable=import-outside-toplevel + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + + with SessionLocal() as db: + auth_service = EmailAuthService(db) + + # Check if admin user already exists + existing_user = await auth_service.get_user_by_email(settings.platform_admin_email) + if existing_user: + logger.info(f"Admin user {settings.platform_admin_email} already exists - skipping creation") + return + + # Create admin user + logger.info(f"Creating platform admin user: {settings.platform_admin_email}") + admin_user = await auth_service.create_user( + email=settings.platform_admin_email, + password=settings.platform_admin_password, + full_name=settings.platform_admin_full_name, + is_admin=True, + ) + + # Mark admin user as email verified + # First-Party + from mcpgateway.db import utc_now # pylint: disable=import-outside-toplevel + + admin_user.email_verified_at = utc_now() + db.commit() + + # Personal team is automatically created during user creation if enabled + if settings.auto_create_personal_teams: + logger.info("Personal team automatically created for admin user") + + db.commit() + logger.info(f"Platform admin user created successfully: {settings.platform_admin_email}") + + except Exception as e: + logger.error(f"Failed to bootstrap admin user: {e}") + # Don't fail the entire bootstrap process if admin user creation fails + return + + +async def bootstrap_default_roles() -> None: + """Bootstrap default system roles and assign them to admin user. + + Creates essential RBAC roles and assigns administrative privileges + to the platform admin user. + """ + if not settings.email_auth_enabled: + logger.info("Email authentication disabled - skipping default roles bootstrap") + return + + try: + # First-Party + from mcpgateway.db import get_db # pylint: disable=import-outside-toplevel + from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel + from mcpgateway.services.role_service import RoleService # pylint: disable=import-outside-toplevel + + # Get database session + db_gen = get_db() + db = next(db_gen) + + try: + role_service = RoleService(db) + auth_service = EmailAuthService(db) + + # Check if admin user exists + admin_user = await auth_service.get_user_by_email(settings.platform_admin_email) + if not admin_user: + logger.info("Admin user not found - skipping role assignment") + return + + # Default system roles to create + default_roles = [ + {"name": "platform_admin", "description": "Platform administrator with all permissions", "scope": "global", "permissions": ["*"], "is_system_role": True}, # All permissions + { + "name": "team_admin", + "description": "Team administrator with team management permissions", + "scope": "team", + "permissions": ["teams.read", "teams.update", "teams.manage_members", "tools.read", "tools.execute", "resources.read", "prompts.read"], + "is_system_role": True, + }, + { + "name": "developer", + "description": "Developer with tool and resource access", + "scope": "team", + "permissions": ["tools.read", "tools.execute", "resources.read", "prompts.read"], + "is_system_role": True, + }, + {"name": "viewer", "description": "Read-only access to resources", "scope": "team", "permissions": ["tools.read", "resources.read", "prompts.read"], "is_system_role": True}, + ] + + # Create default roles + created_roles = [] + for role_def in default_roles: + try: + # Check if role already exists + existing_role = await role_service.get_role_by_name(role_def["name"], role_def["scope"]) + if existing_role: + logger.info(f"System role {role_def['name']} already exists - skipping") + created_roles.append(existing_role) + continue + + # Create the role + role = await role_service.create_role( + name=role_def["name"], + description=role_def["description"], + scope=role_def["scope"], + permissions=role_def["permissions"], + created_by=settings.platform_admin_email, + is_system_role=role_def["is_system_role"], + ) + created_roles.append(role) + logger.info(f"Created system role: {role.name}") + + except Exception as e: + logger.error(f"Failed to create role {role_def['name']}: {e}") + continue + + # Assign platform_admin role to admin user + platform_admin_role = next((r for r in created_roles if r.name == "platform_admin"), None) + if platform_admin_role: + try: + # Check if assignment already exists + existing_assignment = await role_service.get_user_role_assignment(user_email=admin_user.email, role_id=platform_admin_role.id, scope="global", scope_id=None) + + if not existing_assignment or not existing_assignment.is_active: + await role_service.assign_role_to_user(user_email=admin_user.email, role_id=platform_admin_role.id, scope="global", scope_id=None, granted_by="system") + logger.info(f"Assigned platform_admin role to {admin_user.email}") + else: + logger.info("Admin user already has platform_admin role") + + except Exception as e: + logger.error(f"Failed to assign platform_admin role: {e}") + + logger.info("Default RBAC roles bootstrap completed successfully") + + finally: + db.close() + + except Exception as e: + logger.error(f"Failed to bootstrap default roles: {e}") + # Don't fail the entire bootstrap process if role creation fails + return + + +def normalize_team_visibility() -> int: + """Normalize team visibility values to the supported set {private, public}. + + Any team with an unsupported visibility (e.g., 'team') is set to 'private'. + + Returns: + int: Number of teams updated + """ + try: + # First-Party + from mcpgateway.db import EmailTeam, SessionLocal # pylint: disable=import-outside-toplevel + + with SessionLocal() as db: + # Find teams with invalid visibility + invalid = db.query(EmailTeam).filter(EmailTeam.visibility.notin_(["private", "public"])) + count = 0 + for team in invalid.all(): + old = team.visibility + team.visibility = "private" + count += 1 + logger.info(f"Normalized team visibility: id={team.id} {old} -> private") + if count: + db.commit() + return count + except Exception as e: + logger.error(f"Failed to normalize team visibility: {e}") + return 0 + + async def main() -> None: """ Bootstrap or upgrade the database schema, then log readiness. Runs `create_all()` + `alembic stamp head` on an empty DB, otherwise just executes `alembic upgrade head`, leaving application data intact. + Also creates the platform admin user if email authentication is enabled. Args: None @@ -78,8 +267,19 @@ async def main() -> None: else: command.upgrade(cfg, "head") + # Post-upgrade normalization passes + updated = normalize_team_visibility() + if updated: + logger.info(f"Normalized {updated} team record(s) to supported visibility values") + logger.info("Database ready") + # Bootstrap admin user after database is ready + await bootstrap_admin_user() + + # Bootstrap default RBAC roles after admin user is created + await bootstrap_default_roles() + if __name__ == "__main__": asyncio.run(main()) diff --git a/mcpgateway/cache/resource_cache.py b/mcpgateway/cache/resource_cache.py index 8247692c9..4f6214fcd 100644 --- a/mcpgateway/cache/resource_cache.py +++ b/mcpgateway/cache/resource_cache.py @@ -18,7 +18,7 @@ >>> cache.get('a') 1 >>> import time - >>> time.sleep(1.5) # Use 1.5s to ensure expiration + >>> time.sleep(1.5) # Use 1.5s to ensure reliable expiration >>> cache.get('a') is None True >>> cache.set('a', 1) diff --git a/mcpgateway/cache/session_registry.py b/mcpgateway/cache/session_registry.py index 239d42f78..ef2da23c0 100644 --- a/mcpgateway/cache/session_registry.py +++ b/mcpgateway/cache/session_registry.py @@ -50,13 +50,18 @@ # Standard import asyncio +from datetime import datetime, timezone import json import logging import time +import traceback from typing import Any, Dict, Optional +from urllib.parse import urlparse +import uuid # Third-Party from fastapi import HTTPException, status +import jwt # First-Party from mcpgateway import __version__ @@ -1291,22 +1296,59 @@ async def generate_response(self, message: Dict[str, Any], transport: SSETranspo "params": params, "id": req_id, } - headers = {"Authorization": f"Bearer {user['token']}", "Content-Type": "application/json"} - rpc_url = base_url + "/rpc" + # Get the token from the current authentication context + # The user object doesn't contain the token directly, we need to reconstruct it + # Since we don't have access to the original headers here, we need a different approach + # We'll extract the token from the session or create a new admin token + token = None + if hasattr(user, "get") and "auth_token" in user: + token = user["auth_token"] + else: + # Fallback: create an admin token for internal RPC calls + now = datetime.now(timezone.utc) + payload = { + "sub": user.get("email", "system"), + "iss": settings.jwt_issuer, + "aud": settings.jwt_audience, + "iat": int(now.timestamp()), + "jti": str(uuid.uuid4()), + "user": { + "email": user.get("email", "system"), + "full_name": user.get("full_name", "System"), + "is_admin": True, # Internal calls should have admin access + "auth_provider": "internal", + }, + } + token = jwt.encode(payload, settings.jwt_secret_key, algorithm=settings.jwt_algorithm) + + headers = {"Authorization": f"Bearer {token}", "Content-Type": "application/json"} + # Extract root URL from base_url (remove /servers/{id} path) + parsed_url = urlparse(base_url) + root_url = f"{parsed_url.scheme}://{parsed_url.netloc}" + rpc_url = root_url + "/rpc" + + logger.info(f"SSE RPC: Making call to {rpc_url} with method={method}, params={params}") + async with ResilientHttpClient(client_args={"timeout": settings.federation_timeout, "verify": not settings.skip_ssl_verify}) as client: + logger.info(f"SSE RPC: Sending request to {rpc_url}") rpc_response = await client.post( url=rpc_url, json=rpc_input, headers=headers, ) + logger.info(f"SSE RPC: Got response status {rpc_response.status_code}") result = rpc_response.json() + logger.info(f"SSE RPC: Response content: {result}") result = result.get("result", {}) response = {"jsonrpc": "2.0", "result": result, "id": req_id} except JSONRPCError as e: + logger.error(f"SSE RPC: JSON-RPC error: {e}") result = e.to_dict() response = {"jsonrpc": "2.0", "error": result["error"], "id": req_id} except Exception as e: + logger.error(f"SSE RPC: Exception during RPC call: {type(e).__name__}: {e}") + logger.error(f"SSE RPC: Traceback: {traceback.format_exc()}") result = {"code": -32000, "message": "Internal error", "data": str(e)} response = {"jsonrpc": "2.0", "error": result, "id": req_id} diff --git a/mcpgateway/config.py b/mcpgateway/config.py index cc7c87956..da63e4954 100644 --- a/mcpgateway/config.py +++ b/mcpgateway/config.py @@ -77,6 +77,41 @@ logger = logging.getLogger(__name__) +def _normalize_env_list_vars() -> None: + """Normalize list-typed env vars to valid JSON arrays. + + Ensures env values parse cleanly when providers expect JSON for complex types. + If a value is empty or CSV, convert to a JSON array string. + """ + keys = [ + "SSO_TRUSTED_DOMAINS", + "SSO_AUTO_ADMIN_DOMAINS", + "SSO_GITHUB_ADMIN_ORGS", + "SSO_GOOGLE_ADMIN_DOMAINS", + ] + for key in keys: + raw = os.environ.get(key) + if raw is None: + continue + s = raw.strip() + if not s: + os.environ[key] = "[]" + continue + if s.startswith("["): + # Already JSON-like, keep as is + try: + json.loads(s) + continue + except Exception: + pass + # Convert CSV to JSON array + items = [item.strip() for item in s.split(",") if item.strip()] + os.environ[key] = json.dumps(items) + + +_normalize_env_list_vars() + + class Settings(BaseSettings): """ MCP Gateway configuration settings. @@ -130,11 +165,47 @@ class Settings(BaseSettings): basic_auth_password: str = "changeme" jwt_secret_key: str = "my-test-key" jwt_algorithm: str = "HS256" + jwt_audience: str = "mcpgateway-api" + jwt_issuer: str = "mcpgateway" auth_required: bool = True token_expiry: int = 10080 # minutes + # Bootstrap Admin API Key (for initial setup and automation) + bootstrap_admin_api_key: Optional[str] = Field(default=None, description="Master admin API key for initial setup and automation. Set in production for secure admin access.") + require_token_expiration: bool = Field(default=False, description="Require all JWT tokens to have expiration claims") # Default to flexible mode for backward compatibility + # SSO Configuration + sso_enabled: bool = Field(default=False, description="Enable Single Sign-On authentication") + sso_github_enabled: bool = Field(default=False, description="Enable GitHub OAuth authentication") + sso_github_client_id: Optional[str] = Field(default=None, description="GitHub OAuth client ID") + sso_github_client_secret: Optional[str] = Field(default=None, description="GitHub OAuth client secret") + + sso_google_enabled: bool = Field(default=False, description="Enable Google OAuth authentication") + sso_google_client_id: Optional[str] = Field(default=None, description="Google OAuth client ID") + sso_google_client_secret: Optional[str] = Field(default=None, description="Google OAuth client secret") + + sso_ibm_verify_enabled: bool = Field(default=False, description="Enable IBM Security Verify OIDC authentication") + sso_ibm_verify_client_id: Optional[str] = Field(default=None, description="IBM Security Verify client ID") + sso_ibm_verify_client_secret: Optional[str] = Field(default=None, description="IBM Security Verify client secret") + sso_ibm_verify_issuer: Optional[str] = Field(default=None, description="IBM Security Verify OIDC issuer URL") + + sso_okta_enabled: bool = Field(default=False, description="Enable Okta OIDC authentication") + sso_okta_client_id: Optional[str] = Field(default=None, description="Okta client ID") + sso_okta_client_secret: Optional[str] = Field(default=None, description="Okta client secret") + sso_okta_issuer: Optional[str] = Field(default=None, description="Okta issuer URL") + + # SSO Settings + sso_auto_create_users: bool = Field(default=True, description="Automatically create users from SSO providers") + sso_trusted_domains: Annotated[list[str], NoDecode()] = Field(default_factory=list, description="Trusted email domains (CSV or JSON list)") + sso_preserve_admin_auth: bool = Field(default=True, description="Preserve local admin authentication when SSO is enabled") + + # SSO Admin Assignment Settings + sso_auto_admin_domains: Annotated[list[str], NoDecode()] = Field(default_factory=list, description="Admin domains (CSV or JSON list)") + sso_github_admin_orgs: Annotated[list[str], NoDecode()] = Field(default_factory=list, description="GitHub orgs granting admin (CSV/JSON)") + sso_google_admin_domains: Annotated[list[str], NoDecode()] = Field(default_factory=list, description="Google admin domains (CSV/JSON)") + sso_require_admin_approval: bool = Field(default=False, description="Require admin approval for new SSO registrations") + # MCP Client Authentication mcp_client_auth_enabled: bool = Field(default=True, description="Enable JWT authentication for MCP client operations") trust_proxy_auth: bool = Field( @@ -150,6 +221,36 @@ class Settings(BaseSettings): oauth_request_timeout: int = Field(default=30, description="OAuth request timeout in seconds") oauth_max_retries: int = Field(default=3, description="Maximum retries for OAuth token requests") + # Email-Based Authentication (Epic 001) + email_auth_enabled: bool = Field(default=True, description="Enable email-based authentication") + platform_admin_email: str = Field(default="admin@example.com", description="Platform administrator email address") + platform_admin_password: str = Field(default="changeme", description="Platform administrator password") + platform_admin_full_name: str = Field(default="Platform Administrator", description="Platform administrator full name") + + # Argon2id Password Hashing Configuration + argon2id_time_cost: int = Field(default=3, description="Argon2id time cost (number of iterations)") + argon2id_memory_cost: int = Field(default=65536, description="Argon2id memory cost in KiB") + argon2id_parallelism: int = Field(default=1, description="Argon2id parallelism (number of threads)") + + # Password Policy Configuration + password_min_length: int = Field(default=8, description="Minimum password length") + password_require_uppercase: bool = Field(default=False, description="Require uppercase letters in passwords") + password_require_lowercase: bool = Field(default=False, description="Require lowercase letters in passwords") + password_require_numbers: bool = Field(default=False, description="Require numbers in passwords") + password_require_special: bool = Field(default=False, description="Require special characters in passwords") + + # Account Security Configuration + max_failed_login_attempts: int = Field(default=5, description="Maximum failed login attempts before account lockout") + account_lockout_duration_minutes: int = Field(default=30, description="Account lockout duration in minutes") + + # Personal Teams Configuration (Epic 002) + auto_create_personal_teams: bool = Field(default=True, description="Enable automatic personal team creation for new users") + personal_team_prefix: str = Field(default="personal", description="Personal team naming prefix") + max_teams_per_user: int = Field(default=50, description="Maximum number of teams a user can belong to") + max_members_per_team: int = Field(default=100, description="Maximum number of members per team") + invitation_expiry_days: int = Field(default=7, description="Number of days before team invitations expire") + require_email_verification_for_invites: bool = Field(default=True, description="Require email verification for team invitations") + # UI/Admin Feature Flags mcpgateway_ui_enabled: bool = False mcpgateway_admin_api_enabled: bool = False @@ -505,6 +606,47 @@ def _auto_enable_security_txt(cls, v, info): return bool(info.data["well_known_security_txt"].strip()) return v + # ------------------------------- + # Flexible list parsing for envs + # ------------------------------- + @field_validator( + "sso_trusted_domains", + "sso_auto_admin_domains", + "sso_github_admin_orgs", + "sso_google_admin_domains", + mode="before", + ) + @classmethod + def _parse_list_from_env(cls, v): # type: ignore[override] + """Parse list fields from environment values. + + Accepts either JSON arrays (e.g. '["a","b"]') or comma-separated + strings (e.g. 'a,b'). Empty or None becomes an empty list. + + Args: + v: The value to parse, can be None, list, or string. + + Returns: + list: Parsed list of values. + """ + if v is None: + return [] + if isinstance(v, list): + return v + if isinstance(v, str): + s = v.strip() + if not s: + return [] + if s.startswith("["): + try: + parsed = json.loads(s) + return parsed if isinstance(parsed, list) else [] + except Exception: + logger.warning("Invalid JSON list in env for list field; falling back to CSV parsing") + # CSV fallback + return [item.strip() for item in s.split(",") if item.strip()] + return v + @property def api_key(self) -> str: """ diff --git a/mcpgateway/db.py b/mcpgateway/db.py index 9aa0fca4c..4a42dc4a6 100644 --- a/mcpgateway/db.py +++ b/mcpgateway/db.py @@ -22,26 +22,36 @@ """ # Standard -from datetime import datetime, timezone -from typing import Any, Dict, Generator, List, Optional +from datetime import datetime, timedelta, timezone +import logging +from typing import Any, Dict, Generator, List, Optional, TYPE_CHECKING import uuid # Third-Party import jsonschema -from sqlalchemy import Boolean, Column, create_engine, DateTime, event, Float, ForeignKey, func, Integer, JSON, make_url, select, String, Table, Text, UniqueConstraint +from sqlalchemy import BigInteger, Boolean, Column, create_engine, DateTime, event, Float, ForeignKey, func, Index, Integer, JSON, make_url, select, String, Table, Text, UniqueConstraint from sqlalchemy.event import listen from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import DeclarativeBase, Mapped, mapped_column, relationship, Session, sessionmaker from sqlalchemy.orm.attributes import get_history +from sqlalchemy.pool import QueuePool # First-Party from mcpgateway.config import settings -from mcpgateway.models import ResourceContent from mcpgateway.utils.create_slug import slugify from mcpgateway.utils.db_isready import wait_for_db_ready from mcpgateway.validators import SecurityValidator +logger = logging.getLogger(__name__) + +if TYPE_CHECKING: + # First-Party + from mcpgateway.models import ResourceContent + +# ResourceContent will be imported locally where needed to avoid circular imports +# EmailUser models moved to this file to avoid circular imports + # --------------------------------------------------------------------------- # 1. Parse the URL so we can inspect backend ("postgresql", "sqlite", ...) # and the specific driver ("psycopg2", "asyncpg", empty string = default). @@ -80,10 +90,25 @@ # 5. Build the Engine with a single, clean connect_args mapping. # --------------------------------------------------------------------------- if backend == "sqlite": - # SQLite doesn't support pool overflow/timeout parameters + # SQLite supports connection pooling with proper configuration + # For SQLite, we use a smaller pool size since it's file-based + sqlite_pool_size = min(settings.db_pool_size, 50) # Cap at 50 for SQLite + sqlite_max_overflow = min(settings.db_max_overflow, 20) # Cap at 20 for SQLite + + logger.info("Configuring SQLite with pool_size=%s, max_overflow=%s", sqlite_pool_size, sqlite_max_overflow) + engine = create_engine( settings.database_url, + pool_pre_ping=True, # quick liveness check per checkout + pool_size=sqlite_pool_size, + max_overflow=sqlite_max_overflow, + pool_timeout=settings.db_pool_timeout, + pool_recycle=settings.db_pool_recycle, + # SQLite specific optimizations + poolclass=QueuePool, # Explicit pool class connect_args=connect_args, + # Log pool events in debug mode + echo_pool=settings.log_level == "DEBUG", ) else: # Other databases support full pooling configuration @@ -160,6 +185,1038 @@ class Base(DeclarativeBase): """Base class for all models.""" +# --------------------------------------------------------------------------- +# RBAC Models (Epic 004) - SQLAlchemy Database Models +# --------------------------------------------------------------------------- + + +class Role(Base): + """Role model for RBAC system.""" + + __tablename__ = "roles" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + + # Role metadata + name: Mapped[str] = mapped_column(String(255), nullable=False) + description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + scope: Mapped[str] = mapped_column(String(20), nullable=False) # 'global', 'team', 'personal' + + # Permissions and inheritance + permissions: Mapped[List[str]] = mapped_column(JSON, nullable=False, default=list) + inherits_from: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("roles.id"), nullable=True) + + # Metadata + created_by: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + is_system_role: Mapped[bool] = mapped_column(Boolean, nullable=False, default=False) + is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + + # Timestamps + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, default=utc_now) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, default=utc_now, onupdate=utc_now) + + # Relationships + parent_role: Mapped[Optional["Role"]] = relationship("Role", remote_side=[id], backref="child_roles") + user_assignments: Mapped[List["UserRole"]] = relationship("UserRole", back_populates="role", cascade="all, delete-orphan") + + def get_effective_permissions(self) -> List[str]: + """Get all permissions including inherited ones. + + Returns: + List of permission strings including inherited permissions + """ + effective_permissions = set(self.permissions) + if self.parent_role: + effective_permissions.update(self.parent_role.get_effective_permissions()) + return sorted(list(effective_permissions)) + + +class UserRole(Base): + """User role assignment model.""" + + __tablename__ = "user_roles" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + + # Assignment details + user_email: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + role_id: Mapped[str] = mapped_column(String(36), ForeignKey("roles.id"), nullable=False) + scope: Mapped[str] = mapped_column(String(20), nullable=False) # 'global', 'team', 'personal' + scope_id: Mapped[Optional[str]] = mapped_column(String(36), nullable=True) # Team ID if team-scoped + + # Grant metadata + granted_by: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + granted_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, default=utc_now) + expires_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + is_active: Mapped[bool] = mapped_column(Boolean, nullable=False, default=True) + + # Relationships + role: Mapped["Role"] = relationship("Role", back_populates="user_assignments") + + def is_expired(self) -> bool: + """Check if the role assignment has expired. + + Returns: + True if assignment has expired, False otherwise + """ + if not self.expires_at: + return False + return utc_now() > self.expires_at + + +class PermissionAuditLog(Base): + """Permission audit log model.""" + + __tablename__ = "permission_audit_log" + + # Primary key + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Audit metadata + timestamp: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False, default=utc_now) + user_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + + # Permission details + permission: Mapped[str] = mapped_column(String(100), nullable=False) + resource_type: Mapped[Optional[str]] = mapped_column(String(50), nullable=True) + resource_id: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + team_id: Mapped[Optional[str]] = mapped_column(String(36), nullable=True) + + # Result + granted: Mapped[bool] = mapped_column(Boolean, nullable=False) + roles_checked: Mapped[Optional[Dict]] = mapped_column(JSON, nullable=True) + + # Request metadata + ip_address: Mapped[Optional[str]] = mapped_column(String(45), nullable=True) # IPv6 max length + user_agent: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + +# Permission constants for the system +class Permissions: + """System permission constants.""" + + # User permissions + USERS_CREATE = "users.create" + USERS_READ = "users.read" + USERS_UPDATE = "users.update" + USERS_DELETE = "users.delete" + USERS_INVITE = "users.invite" + + # Team permissions + TEAMS_CREATE = "teams.create" + TEAMS_READ = "teams.read" + TEAMS_UPDATE = "teams.update" + TEAMS_DELETE = "teams.delete" + TEAMS_MANAGE_MEMBERS = "teams.manage_members" + + # Tool permissions + TOOLS_CREATE = "tools.create" + TOOLS_READ = "tools.read" + TOOLS_UPDATE = "tools.update" + TOOLS_DELETE = "tools.delete" + TOOLS_EXECUTE = "tools.execute" + + # Resource permissions + RESOURCES_CREATE = "resources.create" + RESOURCES_READ = "resources.read" + RESOURCES_UPDATE = "resources.update" + RESOURCES_DELETE = "resources.delete" + RESOURCES_SHARE = "resources.share" + + # Prompt permissions + PROMPTS_CREATE = "prompts.create" + PROMPTS_READ = "prompts.read" + PROMPTS_UPDATE = "prompts.update" + PROMPTS_DELETE = "prompts.delete" + PROMPTS_EXECUTE = "prompts.execute" + + # Server permissions + SERVERS_CREATE = "servers.create" + SERVERS_READ = "servers.read" + SERVERS_UPDATE = "servers.update" + SERVERS_DELETE = "servers.delete" + SERVERS_MANAGE = "servers.manage" + + # Token permissions + TOKENS_CREATE = "tokens.create" + TOKENS_READ = "tokens.read" + TOKENS_REVOKE = "tokens.revoke" + TOKENS_SCOPE = "tokens.scope" + + # Admin permissions + ADMIN_SYSTEM_CONFIG = "admin.system_config" + ADMIN_USER_MANAGEMENT = "admin.user_management" + ADMIN_SECURITY_AUDIT = "admin.security_audit" + + # Special permissions + ALL_PERMISSIONS = "*" # Wildcard for all permissions + + @classmethod + def get_all_permissions(cls) -> List[str]: + """Get list of all defined permissions. + + Returns: + List of all permission strings defined in the class + """ + permissions = [] + for attr_name in dir(cls): + if not attr_name.startswith("_") and attr_name.isupper() and attr_name != "ALL_PERMISSIONS": + attr_value = getattr(cls, attr_name) + if isinstance(attr_value, str) and "." in attr_value: + permissions.append(attr_value) + return sorted(permissions) + + @classmethod + def get_permissions_by_resource(cls) -> Dict[str, List[str]]: + """Get permissions organized by resource type. + + Returns: + Dictionary mapping resource types to their permissions + """ + resource_permissions = {} + for permission in cls.get_all_permissions(): + resource_type = permission.split(".")[0] + if resource_type not in resource_permissions: + resource_permissions[resource_type] = [] + resource_permissions[resource_type].append(permission) + return resource_permissions + + +# --------------------------------------------------------------------------- +# Email-based User Authentication Models (Epic 001) +# --------------------------------------------------------------------------- + + +class EmailUser(Base): + """Email-based user model for authentication. + + This model provides email-based authentication as the foundation + for all multi-user features. Users are identified by email addresses + instead of usernames. + + Attributes: + email (str): Primary key, unique email identifier + password_hash (str): Argon2id hashed password + full_name (str): Optional display name for professional appearance + is_admin (bool): Admin privileges flag + is_active (bool): Account status flag + auth_provider (str): Authentication provider ('local', 'github', etc.) + password_hash_type (str): Type of password hash used + failed_login_attempts (int): Count of failed login attempts + locked_until (datetime): Account lockout expiration + created_at (datetime): Account creation timestamp + updated_at (datetime): Last account update timestamp + last_login (datetime): Last successful login timestamp + email_verified_at (datetime): Email verification timestamp + + Examples: + >>> user = EmailUser( + ... email="alice@example.com", + ... password_hash="$argon2id$v=19$m=65536,t=3,p=1$...", + ... full_name="Alice Smith", + ... is_admin=False + ... ) + >>> user.email + 'alice@example.com' + >>> user.is_email_verified() + False + >>> user.is_account_locked() + False + """ + + __tablename__ = "email_users" + + # Core identity fields + email: Mapped[str] = mapped_column(String(255), primary_key=True, index=True) + password_hash: Mapped[str] = mapped_column(String(255), nullable=False) + full_name: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + is_admin: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False) + + # Status fields + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + email_verified_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + + # Security fields + auth_provider: Mapped[str] = mapped_column(String(50), default="local", nullable=False) + password_hash_type: Mapped[str] = mapped_column(String(20), default="argon2id", nullable=False) + failed_login_attempts: Mapped[int] = mapped_column(Integer, default=0, nullable=False) + locked_until: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + + # Timestamps + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, onupdate=utc_now, nullable=False) + last_login: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + + def __repr__(self) -> str: + """String representation of the user. + + Returns: + str: String representation of EmailUser instance + """ + return f"" + + def is_email_verified(self) -> bool: + """Check if the user's email is verified. + + Returns: + bool: True if email is verified, False otherwise + + Examples: + >>> user = EmailUser(email="test@example.com") + >>> user.is_email_verified() + False + >>> user.email_verified_at = utc_now() + >>> user.is_email_verified() + True + """ + return self.email_verified_at is not None + + def is_account_locked(self) -> bool: + """Check if the account is currently locked. + + Returns: + bool: True if account is locked, False otherwise + + Examples: + >>> from datetime import timedelta + >>> user = EmailUser(email="test@example.com") + >>> user.is_account_locked() + False + >>> user.locked_until = utc_now() + timedelta(hours=1) + >>> user.is_account_locked() + True + """ + if self.locked_until is None: + return False + return utc_now() < self.locked_until + + def get_display_name(self) -> str: + """Get the user's display name. + + Returns the full_name if available, otherwise extracts + the local part from the email address. + + Returns: + str: Display name for the user + + Examples: + >>> user = EmailUser(email="john@example.com", full_name="John Doe") + >>> user.get_display_name() + 'John Doe' + >>> user_no_name = EmailUser(email="jane@example.com") + >>> user_no_name.get_display_name() + 'jane' + """ + if self.full_name: + return self.full_name + return self.email.split("@")[0] + + def reset_failed_attempts(self) -> None: + """Reset failed login attempts counter. + + Called after successful authentication to reset the + failed attempts counter and clear any account lockout. + + Examples: + >>> user = EmailUser(email="test@example.com", failed_login_attempts=3) + >>> user.reset_failed_attempts() + >>> user.failed_login_attempts + 0 + >>> user.locked_until is None + True + """ + self.failed_login_attempts = 0 + self.locked_until = None + self.last_login = utc_now() + + def increment_failed_attempts(self, max_attempts: int = 5, lockout_duration_minutes: int = 30) -> bool: + """Increment failed login attempts and potentially lock account. + + Args: + max_attempts: Maximum allowed failed attempts before lockout + lockout_duration_minutes: Duration of lockout in minutes + + Returns: + bool: True if account is now locked, False otherwise + + Examples: + >>> user = EmailUser(email="test@example.com", password_hash="test", failed_login_attempts=0) + >>> user.increment_failed_attempts(max_attempts=3) + False + >>> user.failed_login_attempts + 1 + >>> for _ in range(2): + ... user.increment_failed_attempts(max_attempts=3) + False + True + >>> user.is_account_locked() + True + """ + self.failed_login_attempts += 1 + + if self.failed_login_attempts >= max_attempts: + self.locked_until = utc_now() + timedelta(minutes=lockout_duration_minutes) + return True + + return False + + # Team relationships + team_memberships: Mapped[List["EmailTeamMember"]] = relationship("EmailTeamMember", foreign_keys="EmailTeamMember.user_email", back_populates="user") + created_teams: Mapped[List["EmailTeam"]] = relationship("EmailTeam", foreign_keys="EmailTeam.created_by", back_populates="creator") + sent_invitations: Mapped[List["EmailTeamInvitation"]] = relationship("EmailTeamInvitation", foreign_keys="EmailTeamInvitation.invited_by", back_populates="inviter") + + # API token relationships + api_tokens: Mapped[List["EmailApiToken"]] = relationship("EmailApiToken", back_populates="user", cascade="all, delete-orphan") + + def get_teams(self) -> List["EmailTeam"]: + """Get all teams this user is a member of. + + Returns: + List[EmailTeam]: List of teams the user belongs to + + Examples: + >>> user = EmailUser(email="user@example.com") + >>> teams = user.get_teams() + >>> isinstance(teams, list) + True + """ + return [membership.team for membership in self.team_memberships if membership.is_active] + + def get_personal_team(self) -> Optional["EmailTeam"]: + """Get the user's personal team. + + Returns: + EmailTeam: The user's personal team or None if not found + + Examples: + >>> user = EmailUser(email="user@example.com") + >>> personal_team = user.get_personal_team() + """ + for team in self.created_teams: + if team.is_personal and team.is_active: + return team + return None + + def is_team_member(self, team_id: str) -> bool: + """Check if user is a member of the specified team. + + Args: + team_id: ID of the team to check + + Returns: + bool: True if user is a member, False otherwise + + Examples: + >>> user = EmailUser(email="user@example.com") + >>> user.is_team_member("team-123") + False + """ + return any(membership.team_id == team_id and membership.is_active for membership in self.team_memberships) + + def get_team_role(self, team_id: str) -> Optional[str]: + """Get user's role in a specific team. + + Args: + team_id: ID of the team to check + + Returns: + str: User's role or None if not a member + + Examples: + >>> user = EmailUser(email="user@example.com") + >>> role = user.get_team_role("team-123") + """ + for membership in self.team_memberships: + if membership.team_id == team_id and membership.is_active: + return membership.role + return None + + +class EmailAuthEvent(Base): + """Authentication event logging for email users. + + This model tracks all authentication attempts for auditing, + security monitoring, and compliance purposes. + + Attributes: + id (int): Primary key + timestamp (datetime): Event timestamp + user_email (str): Email of the user + event_type (str): Type of authentication event + success (bool): Whether the authentication was successful + ip_address (str): Client IP address + user_agent (str): Client user agent string + failure_reason (str): Reason for authentication failure + details (dict): Additional event details as JSON + + Examples: + >>> event = EmailAuthEvent( + ... user_email="alice@example.com", + ... event_type="login", + ... success=True, + ... ip_address="192.168.1.100" + ... ) + >>> event.event_type + 'login' + >>> event.success + True + """ + + __tablename__ = "email_auth_events" + + # Primary key + id: Mapped[int] = mapped_column(Integer, primary_key=True, autoincrement=True) + + # Event details + timestamp: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + user_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True, index=True) + event_type: Mapped[str] = mapped_column(String(50), nullable=False) + success: Mapped[bool] = mapped_column(Boolean, nullable=False) + + # Client information + ip_address: Mapped[Optional[str]] = mapped_column(String(45), nullable=True) # IPv6 compatible + user_agent: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + # Failure information + failure_reason: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + details: Mapped[Optional[str]] = mapped_column(Text, nullable=True) # JSON string + + def __repr__(self) -> str: + """String representation of the auth event. + + Returns: + str: String representation of EmailAuthEvent instance + """ + return f"" + + @classmethod + def create_login_attempt(cls, user_email: str, success: bool, ip_address: str = None, user_agent: str = None, failure_reason: str = None) -> "EmailAuthEvent": + """Create a login attempt event. + + Args: + user_email: Email address of the user + success: Whether the login was successful + ip_address: Client IP address + user_agent: Client user agent + failure_reason: Reason for failure (if applicable) + + Returns: + EmailAuthEvent: New authentication event + + Examples: + >>> event = EmailAuthEvent.create_login_attempt( + ... user_email="user@example.com", + ... success=True, + ... ip_address="192.168.1.1" + ... ) + >>> event.event_type + 'login' + >>> event.success + True + """ + return cls(user_email=user_email, event_type="login", success=success, ip_address=ip_address, user_agent=user_agent, failure_reason=failure_reason) + + @classmethod + def create_registration_event(cls, user_email: str, success: bool, ip_address: str = None, user_agent: str = None, failure_reason: str = None) -> "EmailAuthEvent": + """Create a registration event. + + Args: + user_email: Email address of the user + success: Whether the registration was successful + ip_address: Client IP address + user_agent: Client user agent + failure_reason: Reason for failure (if applicable) + + Returns: + EmailAuthEvent: New authentication event + """ + return cls(user_email=user_email, event_type="registration", success=success, ip_address=ip_address, user_agent=user_agent, failure_reason=failure_reason) + + @classmethod + def create_password_change_event(cls, user_email: str, success: bool, ip_address: str = None, user_agent: str = None) -> "EmailAuthEvent": + """Create a password change event. + + Args: + user_email: Email address of the user + success: Whether the password change was successful + ip_address: Client IP address + user_agent: Client user agent + + Returns: + EmailAuthEvent: New authentication event + """ + return cls(user_email=user_email, event_type="password_change", success=success, ip_address=ip_address, user_agent=user_agent) + + +class EmailTeam(Base): + """Email-based team model for multi-team collaboration. + + This model represents teams that users can belong to, with automatic + personal team creation and role-based access control. + + Attributes: + id (str): Primary key UUID + name (str): Team display name + slug (str): URL-friendly team identifier + description (str): Team description + created_by (str): Email of the user who created the team + is_personal (bool): Whether this is a personal team + visibility (str): Team visibility (private, public) + max_members (int): Maximum number of team members allowed + created_at (datetime): Team creation timestamp + updated_at (datetime): Last update timestamp + is_active (bool): Whether the team is active + + Examples: + >>> team = EmailTeam( + ... name="Engineering Team", + ... slug="engineering-team", + ... created_by="admin@example.com", + ... is_personal=False + ... ) + >>> team.name + 'Engineering Team' + >>> team.is_personal + False + """ + + __tablename__ = "email_teams" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: uuid.uuid4().hex) + + # Basic team information + name: Mapped[str] = mapped_column(String(255), nullable=False) + slug: Mapped[str] = mapped_column(String(255), unique=True, nullable=False) + description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + created_by: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + + # Team settings + is_personal: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False) + visibility: Mapped[str] = mapped_column(String(20), default="private", nullable=False) + max_members: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) + + # Timestamps + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, onupdate=utc_now, nullable=False) + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + + # Relationships + members: Mapped[List["EmailTeamMember"]] = relationship("EmailTeamMember", back_populates="team", cascade="all, delete-orphan") + invitations: Mapped[List["EmailTeamInvitation"]] = relationship("EmailTeamInvitation", back_populates="team", cascade="all, delete-orphan") + api_tokens: Mapped[List["EmailApiToken"]] = relationship("EmailApiToken", back_populates="team", cascade="all, delete-orphan") + creator: Mapped["EmailUser"] = relationship("EmailUser", foreign_keys=[created_by]) + + def __repr__(self) -> str: + """String representation of the team. + + Returns: + str: String representation of EmailTeam instance + """ + return f"" + + def get_member_count(self) -> int: + """Get the current number of team members. + + Returns: + int: Number of active team members + + Examples: + >>> team = EmailTeam(name="Test Team", slug="test-team", created_by="admin@example.com") + >>> team.get_member_count() + 0 + """ + return len([m for m in self.members if m.is_active]) + + def is_member(self, user_email: str) -> bool: + """Check if a user is a member of this team. + + Args: + user_email: Email address to check + + Returns: + bool: True if user is an active member, False otherwise + + Examples: + >>> team = EmailTeam(name="Test Team", slug="test-team", created_by="admin@example.com") + >>> team.is_member("admin@example.com") + False + """ + return any(m.user_email == user_email and m.is_active for m in self.members) + + def get_member_role(self, user_email: str) -> Optional[str]: + """Get the role of a user in this team. + + Args: + user_email: Email address to check + + Returns: + str: User's role or None if not a member + + Examples: + >>> team = EmailTeam(name="Test Team", slug="test-team", created_by="admin@example.com") + >>> team.get_member_role("admin@example.com") + """ + for member in self.members: + if member.user_email == user_email and member.is_active: + return member.role + return None + + +class EmailTeamMember(Base): + """Team membership model linking users to teams with roles. + + This model represents the many-to-many relationship between users and teams + with additional role information and audit trails. + + Attributes: + id (str): Primary key UUID + team_id (str): Foreign key to email_teams + user_email (str): Foreign key to email_users + role (str): Member role (owner, member) + joined_at (datetime): When the user joined the team + invited_by (str): Email of the user who invited this member + is_active (bool): Whether the membership is active + + Examples: + >>> member = EmailTeamMember( + ... team_id="team-123", + ... user_email="user@example.com", + ... role="member", + ... invited_by="admin@example.com" + ... ) + >>> member.role + 'member' + """ + + __tablename__ = "email_team_members" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: uuid.uuid4().hex) + + # Foreign keys + team_id: Mapped[str] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=False) + user_email: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + + # Membership details + role: Mapped[str] = mapped_column(String(50), default="member", nullable=False) + joined_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + invited_by: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=True) + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + + # Relationships + team: Mapped["EmailTeam"] = relationship("EmailTeam", back_populates="members") + user: Mapped["EmailUser"] = relationship("EmailUser", foreign_keys=[user_email]) + inviter: Mapped[Optional["EmailUser"]] = relationship("EmailUser", foreign_keys=[invited_by]) + + # Unique constraint to prevent duplicate memberships + __table_args__ = (UniqueConstraint("team_id", "user_email", name="uq_team_member"),) + + def __repr__(self) -> str: + """String representation of the team member. + + Returns: + str: String representation of EmailTeamMember instance + """ + return f"" + + +class EmailTeamInvitation(Base): + """Team invitation model for managing team member invitations. + + This model tracks invitations sent to users to join teams, including + expiration dates and invitation tokens. + + Attributes: + id (str): Primary key UUID + team_id (str): Foreign key to email_teams + email (str): Email address of the invited user + role (str): Role the user will have when they accept + invited_by (str): Email of the user who sent the invitation + invited_at (datetime): When the invitation was sent + expires_at (datetime): When the invitation expires + token (str): Unique invitation token + is_active (bool): Whether the invitation is still active + + Examples: + >>> invitation = EmailTeamInvitation( + ... team_id="team-123", + ... email="newuser@example.com", + ... role="member", + ... invited_by="admin@example.com" + ... ) + >>> invitation.role + 'member' + """ + + __tablename__ = "email_team_invitations" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: uuid.uuid4().hex) + + # Foreign keys + team_id: Mapped[str] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=False) + + # Invitation details + email: Mapped[str] = mapped_column(String(255), nullable=False) + role: Mapped[str] = mapped_column(String(50), default="member", nullable=False) + invited_by: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + + # Timing + invited_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + expires_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + + # Security + token: Mapped[str] = mapped_column(String(500), unique=True, nullable=False) + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + + # Relationships + team: Mapped["EmailTeam"] = relationship("EmailTeam", back_populates="invitations") + inviter: Mapped["EmailUser"] = relationship("EmailUser", foreign_keys=[invited_by]) + + def __repr__(self) -> str: + """String representation of the team invitation. + + Returns: + str: String representation of EmailTeamInvitation instance + """ + return f"" + + def is_expired(self) -> bool: + """Check if the invitation has expired. + + Returns: + bool: True if the invitation has expired, False otherwise + + Examples: + >>> from datetime import timedelta + >>> invitation = EmailTeamInvitation( + ... team_id="team-123", + ... email="user@example.com", + ... role="member", + ... invited_by="admin@example.com", + ... expires_at=utc_now() + timedelta(days=7) + ... ) + >>> invitation.is_expired() + False + """ + now = utc_now() + expires_at = self.expires_at + + # Handle timezone awareness mismatch + if now.tzinfo is not None and expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + elif now.tzinfo is None and expires_at.tzinfo is not None: + now = now.replace(tzinfo=timezone.utc) + + return now > expires_at + + def is_valid(self) -> bool: + """Check if the invitation is valid (active and not expired). + + Returns: + bool: True if the invitation is valid, False otherwise + + Examples: + >>> from datetime import timedelta + >>> invitation = EmailTeamInvitation( + ... team_id="team-123", + ... email="user@example.com", + ... role="member", + ... invited_by="admin@example.com", + ... expires_at=utc_now() + timedelta(days=7), + ... is_active=True + ... ) + >>> invitation.is_valid() + True + """ + return self.is_active and not self.is_expired() + + +class EmailTeamJoinRequest(Base): + """Team join request model for managing public team join requests. + + This model tracks user requests to join public teams, including + approval workflow and expiration dates. + + Attributes: + id (str): Primary key UUID + team_id (str): Foreign key to email_teams + user_email (str): Email of the user requesting to join + message (str): Optional message from the user + status (str): Request status (pending, approved, rejected, expired) + requested_at (datetime): When the request was made + expires_at (datetime): When the request expires + reviewed_at (datetime): When the request was reviewed + reviewed_by (str): Email of user who reviewed the request + notes (str): Optional admin notes + """ + + __tablename__ = "email_team_join_requests" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: uuid.uuid4().hex) + + # Foreign keys + team_id: Mapped[str] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=False) + user_email: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + + # Request details + message: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + status: Mapped[str] = mapped_column(String(20), default="pending", nullable=False) + + # Timing + requested_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + expires_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + reviewed_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + reviewed_by: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=True) + notes: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + # Relationships + team: Mapped["EmailTeam"] = relationship("EmailTeam") + user: Mapped["EmailUser"] = relationship("EmailUser", foreign_keys=[user_email]) + reviewer: Mapped[Optional["EmailUser"]] = relationship("EmailUser", foreign_keys=[reviewed_by]) + + # Unique constraint to prevent duplicate requests + __table_args__ = (UniqueConstraint("team_id", "user_email", name="uq_team_join_request"),) + + def __repr__(self) -> str: + """String representation of the team join request. + + Returns: + str: String representation of the team join request. + """ + return f"" + + def is_expired(self) -> bool: + """Check if the join request has expired. + + Returns: + bool: True if the request has expired, False otherwise. + """ + return utc_now() > self.expires_at + + def is_pending(self) -> bool: + """Check if the join request is still pending. + + Returns: + bool: True if the request is pending and not expired, False otherwise. + """ + return self.status == "pending" and not self.is_expired() + + +class PendingUserApproval(Base): + """Model for pending SSO user registrations awaiting admin approval. + + This model stores information about users who have authenticated via SSO + but require admin approval before their account is fully activated. + + Attributes: + id (str): Primary key + email (str): Email address of the pending user + full_name (str): Full name from SSO provider + auth_provider (str): SSO provider (github, google, etc.) + sso_metadata (dict): Additional metadata from SSO provider + requested_at (datetime): When the approval was requested + expires_at (datetime): When the approval request expires + approved_by (str): Email of admin who approved (if approved) + approved_at (datetime): When the approval was granted + status (str): Current status (pending, approved, rejected, expired) + rejection_reason (str): Reason for rejection (if applicable) + admin_notes (str): Notes from admin review + + Examples: + >>> from datetime import timedelta + >>> approval = PendingUserApproval( + ... email="newuser@example.com", + ... full_name="New User", + ... auth_provider="github", + ... expires_at=utc_now() + timedelta(days=30), + ... status="pending" + ... ) + >>> approval.status + 'pending' + """ + + __tablename__ = "pending_user_approvals" + + # Primary key + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + + # User details + email: Mapped[str] = mapped_column(String(255), nullable=False, unique=True) + full_name: Mapped[str] = mapped_column(String(255), nullable=False) + auth_provider: Mapped[str] = mapped_column(String(50), nullable=False) + sso_metadata: Mapped[Optional[Dict]] = mapped_column(JSON, nullable=True) + + # Request details + requested_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + expires_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), nullable=False) + + # Approval details + approved_by: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=True) + approved_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + status: Mapped[str] = mapped_column(String(20), default="pending", nullable=False) # pending, approved, rejected, expired + rejection_reason: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + admin_notes: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + # Relationships + approver: Mapped[Optional["EmailUser"]] = relationship("EmailUser", foreign_keys=[approved_by]) + + def __repr__(self) -> str: + """String representation of the pending approval. + + Returns: + str: String representation of PendingUserApproval instance + """ + return f"" + + def is_expired(self) -> bool: + """Check if the approval request has expired. + + Returns: + bool: True if the approval request has expired + """ + now = utc_now() + expires_at = self.expires_at + + # Handle timezone awareness mismatch + if now.tzinfo is not None and expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + elif now.tzinfo is None and expires_at.tzinfo is not None: + now = now.replace(tzinfo=timezone.utc) + + return now > expires_at + + def approve(self, admin_email: str, notes: Optional[str] = None) -> None: + """Approve the user registration. + + Args: + admin_email: Email of the admin approving the request + notes: Optional admin notes + """ + self.status = "approved" + self.approved_by = admin_email + self.approved_at = utc_now() + self.admin_notes = notes + + def reject(self, admin_email: str, reason: str, notes: Optional[str] = None) -> None: + """Reject the user registration. + + Args: + admin_email: Email of the admin rejecting the request + reason: Reason for rejection + notes: Optional admin notes + """ + self.status = "rejected" + self.approved_by = admin_email + self.approved_at = utc_now() + self.rejection_reason = reason + self.admin_notes = notes + + # Association table for servers and tools server_tool_association = Table( "server_tool_association", @@ -427,6 +1484,11 @@ class Tool(Base): # Relationship with ToolMetric records metrics: Mapped[List["ToolMetric"]] = relationship("ToolMetric", back_populates="tool", cascade="all, delete-orphan") + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + # @property # def gateway_slug(self) -> str: # return self.gateway.slug @@ -632,6 +1694,11 @@ def metrics_summary(self) -> Dict[str, Any]: "last_execution_time": self.last_execution_time, } + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + class Resource(Base): """ @@ -689,7 +1756,7 @@ class Resource(Base): servers: Mapped[List["Server"]] = relationship("Server", secondary=server_resource_association, back_populates="resources") @property - def content(self) -> ResourceContent: + def content(self) -> "ResourceContent": """ Returns the resource content in the appropriate format. @@ -726,6 +1793,10 @@ def content(self) -> ResourceContent: 'Resource has no content' """ + # Local import to avoid circular import + # First-Party + from mcpgateway.models import ResourceContent # pylint: disable=import-outside-toplevel + if self.text_content is not None: return ResourceContent( type="resource", @@ -839,6 +1910,11 @@ def last_execution_time(self) -> Optional[datetime]: return None return max(m.timestamp for m in self.metrics) + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + class ResourceSubscription(Base): """Tracks subscriptions to resource updates.""" @@ -940,7 +2016,7 @@ def validate_arguments(self, args: Dict[str, str]) -> None: try: jsonschema.validate(args, self.argument_schema) except jsonschema.exceptions.ValidationError as e: - raise ValueError(f"Invalid prompt arguments: {str(e)}") + raise ValueError(f"Invalid prompt arguments: {str(e)}") from e @property def execution_count(self) -> int: @@ -1039,6 +2115,11 @@ def last_execution_time(self) -> Optional[datetime]: return None return max(m.timestamp for m in self.metrics) + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + class Server(Base): """ @@ -1091,6 +2172,9 @@ class Server(Base): prompts: Mapped[List["Prompt"]] = relationship("Prompt", secondary=server_prompt_association, back_populates="servers") a2a_agents: Mapped[List["A2AAgent"]] = relationship("A2AAgent", secondary=server_a2a_association, back_populates="servers") + # API token relationships + scoped_tokens: Mapped[List["EmailApiToken"]] = relationship("EmailApiToken", back_populates="server") + @property def execution_count(self) -> int: """ @@ -1200,6 +2284,11 @@ def last_execution_time(self) -> Optional[datetime]: return None return max(m.timestamp for m in self.metrics) + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + class Gateway(Base): """ORM model for a federated peer Gateway.""" @@ -1263,6 +2352,11 @@ class Gateway(Base): # OAuth configuration oauth_config: Mapped[Optional[Dict[str, Any]]] = mapped_column(JSON, nullable=True, comment="OAuth 2.0 configuration including grant_type, client_id, encrypted client_secret, URLs, and scopes") + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + # Relationship with OAuth tokens oauth_tokens: Mapped[List["OAuthToken"]] = relationship("OAuthToken", back_populates="gateway", cascade="all, delete-orphan") @@ -1355,6 +2449,11 @@ class A2AAgent(Base): federation_source: Mapped[Optional[str]] = mapped_column(String, nullable=True) version: Mapped[int] = mapped_column(Integer, default=1, nullable=False) + # Team scoping fields for resource organization + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id"), nullable=True) + owner_email: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + visibility: Mapped[str] = mapped_column(String(20), nullable=False, default="private") + # Relationships servers: Mapped[List["Server"]] = relationship("Server", secondary=server_a2a_association, back_populates="a2a_agents") metrics: Mapped[List["A2AAgentMetric"]] = relationship("A2AAgentMetric", back_populates="a2a_agent", cascade="all, delete-orphan") @@ -1480,6 +2579,399 @@ class OAuthToken(Base): gateway: Mapped["Gateway"] = relationship("Gateway", back_populates="oauth_tokens") +class EmailApiToken(Base): + """Email user API token model for token catalog management. + + This model provides comprehensive API token management with scoping, + revocation, and usage tracking for email-based users. + + Attributes: + id (str): Unique token identifier + user_email (str): Owner's email address + team_id (str): Team the token is associated with (required for team-based access) + name (str): Human-readable token name + jti (str): JWT ID for revocation checking + token_hash (str): Hashed token value for security + server_id (str): Optional server scope limitation + resource_scopes (List[str]): Permission scopes like ['tools.read'] + ip_restrictions (List[str]): IP address/CIDR restrictions + time_restrictions (dict): Time-based access restrictions + usage_limits (dict): Rate limiting and usage quotas + created_at (datetime): Token creation timestamp + expires_at (datetime): Optional expiry timestamp + last_used (datetime): Last usage timestamp + is_active (bool): Active status flag + description (str): Token description + tags (List[str]): Organizational tags + + Examples: + >>> token = EmailApiToken( + ... user_email="alice@example.com", + ... name="Production API Access", + ... server_id="prod-server-123", + ... resource_scopes=["tools.read", "resources.read"], + ... description="Read-only access to production tools" + ... ) + >>> token.is_scoped_to_server("prod-server-123") + True + >>> token.has_permission("tools.read") + True + """ + + __tablename__ = "email_api_tokens" + + # Core identity fields + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + user_email: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email", ondelete="CASCADE"), nullable=False, index=True) + team_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("email_teams.id", ondelete="CASCADE"), nullable=True, index=True) + name: Mapped[str] = mapped_column(String(255), nullable=False) + jti: Mapped[str] = mapped_column(String(36), unique=True, nullable=False, default=lambda: str(uuid.uuid4())) + token_hash: Mapped[str] = mapped_column(String(255), nullable=False) + + # Scoping fields + server_id: Mapped[Optional[str]] = mapped_column(String(36), ForeignKey("servers.id", ondelete="CASCADE"), nullable=True) + resource_scopes: Mapped[Optional[List[str]]] = mapped_column(JSON, nullable=True, default=list) + ip_restrictions: Mapped[Optional[List[str]]] = mapped_column(JSON, nullable=True, default=list) + time_restrictions: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True, default=dict) + usage_limits: Mapped[Optional[dict]] = mapped_column(JSON, nullable=True, default=dict) + + # Lifecycle fields + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + expires_at: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + last_used: Mapped[Optional[datetime]] = mapped_column(DateTime(timezone=True), nullable=True) + is_active: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + + # Metadata fields + description: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + tags: Mapped[Optional[List[str]]] = mapped_column(JSON, nullable=True, default=list) + + # Unique constraint for user+name combination + __table_args__ = ( + UniqueConstraint("user_email", "name", name="uq_email_api_tokens_user_name"), + Index("idx_email_api_tokens_user_email", "user_email"), + Index("idx_email_api_tokens_jti", "jti"), + Index("idx_email_api_tokens_expires_at", "expires_at"), + Index("idx_email_api_tokens_is_active", "is_active"), + ) + + # Relationships + user: Mapped["EmailUser"] = relationship("EmailUser", back_populates="api_tokens") + team: Mapped[Optional["EmailTeam"]] = relationship("EmailTeam", back_populates="api_tokens") + server: Mapped[Optional["Server"]] = relationship("Server", back_populates="scoped_tokens") + + def is_scoped_to_server(self, server_id: str) -> bool: + """Check if token is scoped to a specific server. + + Args: + server_id: Server ID to check against. + + Returns: + bool: True if token is scoped to the server, False otherwise. + """ + return self.server_id == server_id if self.server_id else False + + def has_permission(self, permission: str) -> bool: + """Check if token has a specific permission. + + Args: + permission: Permission string to check for. + + Returns: + bool: True if token has the permission, False otherwise. + """ + return permission in (self.resource_scopes or []) + + def is_team_token(self) -> bool: + """Check if this is a team-based token. + + Returns: + bool: True if token is associated with a team, False otherwise. + """ + return self.team_id is not None + + def get_effective_permissions(self) -> List[str]: + """Get effective permissions for this token. + + For team tokens, this should inherit team permissions. + For personal tokens, this uses the resource_scopes. + + Returns: + List[str]: List of effective permissions for this token. + """ + if self.is_team_token() and self.team: + # For team tokens, we would inherit team permissions + # This would need to be implemented based on your RBAC system + return self.resource_scopes or [] + return self.resource_scopes or [] + + def is_expired(self) -> bool: + """Check if token is expired. + + Returns: + bool: True if token is expired, False otherwise. + """ + if not self.expires_at: + return False + return utc_now() > self.expires_at + + def is_valid(self) -> bool: + """Check if token is valid (active and not expired). + + Returns: + bool: True if token is valid, False otherwise. + """ + return self.is_active and not self.is_expired() + + +class TokenUsageLog(Base): + """Token usage logging for analytics and security monitoring. + + This model tracks every API request made with email API tokens + for security auditing and usage analytics. + + Attributes: + id (int): Auto-incrementing log ID + token_jti (str): Token JWT ID reference + user_email (str): Token owner's email + timestamp (datetime): Request timestamp + endpoint (str): API endpoint accessed + method (str): HTTP method used + ip_address (str): Client IP address + user_agent (str): Client user agent + status_code (int): HTTP response status + response_time_ms (int): Response time in milliseconds + blocked (bool): Whether request was blocked + block_reason (str): Reason for blocking if applicable + + Examples: + >>> log = TokenUsageLog( + ... token_jti="token-uuid-123", + ... user_email="alice@example.com", + ... endpoint="/tools", + ... method="GET", + ... ip_address="192.168.1.100", + ... status_code=200, + ... response_time_ms=45 + ... ) + """ + + __tablename__ = "token_usage_logs" + + # Primary key + id: Mapped[int] = mapped_column(BigInteger, primary_key=True, autoincrement=True) + + # Token reference + token_jti: Mapped[str] = mapped_column(String(36), nullable=False, index=True) + user_email: Mapped[str] = mapped_column(String(255), nullable=False, index=True) + + # Timestamp + timestamp: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False, index=True) + + # Request details + endpoint: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + method: Mapped[Optional[str]] = mapped_column(String(10), nullable=True) + ip_address: Mapped[Optional[str]] = mapped_column(String(45), nullable=True) # IPv6 max length + user_agent: Mapped[Optional[str]] = mapped_column(Text, nullable=True) + + # Response details + status_code: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) + response_time_ms: Mapped[Optional[int]] = mapped_column(Integer, nullable=True) + + # Security fields + blocked: Mapped[bool] = mapped_column(Boolean, default=False, nullable=False) + block_reason: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + + # Indexes for performance + __table_args__ = ( + Index("idx_token_usage_logs_token_jti_timestamp", "token_jti", "timestamp"), + Index("idx_token_usage_logs_user_email_timestamp", "user_email", "timestamp"), + ) + + +class TokenRevocation(Base): + """Token revocation blacklist for immediate token invalidation. + + This model maintains a blacklist of revoked JWT tokens to provide + immediate token invalidation capabilities. + + Attributes: + jti (str): JWT ID (primary key) + revoked_at (datetime): Revocation timestamp + revoked_by (str): Email of user who revoked the token + reason (str): Optional reason for revocation + + Examples: + >>> revocation = TokenRevocation( + ... jti="token-uuid-123", + ... revoked_by="admin@example.com", + ... reason="Security compromise" + ... ) + """ + + __tablename__ = "token_revocations" + + # JWT ID as primary key + jti: Mapped[str] = mapped_column(String(36), primary_key=True) + + # Revocation details + revoked_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + revoked_by: Mapped[str] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=False) + reason: Mapped[Optional[str]] = mapped_column(String(255), nullable=True) + + # Relationship + revoker: Mapped["EmailUser"] = relationship("EmailUser") + + +class SSOProvider(Base): + """SSO identity provider configuration for OAuth2/OIDC authentication. + + Stores configuration and credentials for external identity providers + like GitHub, Google, IBM Security Verify, and Okta. + + Attributes: + id (str): Unique provider ID (e.g., 'github', 'google', 'ibm_verify') + name (str): Human-readable provider name + display_name (str): Display name for UI + provider_type (str): Protocol type ('oauth2', 'oidc') + is_enabled (bool): Whether provider is active + client_id (str): OAuth client ID + client_secret_encrypted (str): Encrypted client secret + authorization_url (str): OAuth authorization endpoint + token_url (str): OAuth token endpoint + userinfo_url (str): User info endpoint + issuer (str): OIDC issuer (optional) + trusted_domains (List[str]): Auto-approved email domains + scope (str): OAuth scope string + auto_create_users (bool): Auto-create users on first login + team_mapping (dict): Organization/domain to team mapping rules + created_at (datetime): Provider creation timestamp + updated_at (datetime): Last configuration update + + Examples: + >>> provider = SSOProvider( + ... id="github", + ... name="github", + ... display_name="GitHub", + ... provider_type="oauth2", + ... client_id="gh_client_123", + ... authorization_url="https://github.com/login/oauth/authorize", + ... token_url="https://github.com/login/oauth/access_token", + ... userinfo_url="https://api.github.com/user", + ... scope="user:email" + ... ) + """ + + __tablename__ = "sso_providers" + + # Provider identification + id: Mapped[str] = mapped_column(String(50), primary_key=True) # github, google, ibm_verify, okta + name: Mapped[str] = mapped_column(String(100), nullable=False, unique=True) + display_name: Mapped[str] = mapped_column(String(100), nullable=False) + provider_type: Mapped[str] = mapped_column(String(20), nullable=False) # oauth2, oidc + is_enabled: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + + # OAuth2/OIDC Configuration + client_id: Mapped[str] = mapped_column(String(255), nullable=False) + client_secret_encrypted: Mapped[str] = mapped_column(Text, nullable=False) # Encrypted storage + authorization_url: Mapped[str] = mapped_column(String(500), nullable=False) + token_url: Mapped[str] = mapped_column(String(500), nullable=False) + userinfo_url: Mapped[str] = mapped_column(String(500), nullable=False) + issuer: Mapped[Optional[str]] = mapped_column(String(500), nullable=True) # For OIDC + + # Provider Settings + trusted_domains: Mapped[List[str]] = mapped_column(JSON, default=list, nullable=False) + scope: Mapped[str] = mapped_column(String(200), default="openid profile email", nullable=False) + auto_create_users: Mapped[bool] = mapped_column(Boolean, default=True, nullable=False) + team_mapping: Mapped[dict] = mapped_column(JSON, default=dict, nullable=False) + + # Metadata + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + updated_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, onupdate=utc_now, nullable=False) + + def __repr__(self): + """String representation of SSO provider. + + Returns: + String representation of the SSO provider instance + """ + return f"" + + +class SSOAuthSession(Base): + """Tracks SSO authentication sessions and state. + + Maintains OAuth state parameters and callback information during + the SSO authentication flow for security and session management. + + Attributes: + id (str): Unique session ID (UUID) + provider_id (str): Reference to SSO provider + state (str): OAuth state parameter for CSRF protection + code_verifier (str): PKCE code verifier (for OAuth 2.1) + nonce (str): OIDC nonce parameter + redirect_uri (str): OAuth callback URI + expires_at (datetime): Session expiration time + user_email (str): User email after successful auth (optional) + created_at (datetime): Session creation timestamp + + Examples: + >>> session = SSOAuthSession( + ... provider_id="github", + ... state="csrf-state-token", + ... redirect_uri="https://gateway.example.com/auth/sso-callback/github" + ... ) + """ + + __tablename__ = "sso_auth_sessions" + + # Session identification + id: Mapped[str] = mapped_column(String(36), primary_key=True, default=lambda: str(uuid.uuid4())) + provider_id: Mapped[str] = mapped_column(String(50), ForeignKey("sso_providers.id"), nullable=False) + + # OAuth/OIDC parameters + state: Mapped[str] = mapped_column(String(128), nullable=False, unique=True) # CSRF protection + code_verifier: Mapped[Optional[str]] = mapped_column(String(128), nullable=True) # PKCE + nonce: Mapped[Optional[str]] = mapped_column(String(128), nullable=True) # OIDC + redirect_uri: Mapped[str] = mapped_column(String(500), nullable=False) + + # Session lifecycle + expires_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=lambda: utc_now() + timedelta(minutes=10), nullable=False) # 10-minute expiration + user_email: Mapped[Optional[str]] = mapped_column(String(255), ForeignKey("email_users.email"), nullable=True) + created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=utc_now, nullable=False) + + # Relationships + provider: Mapped["SSOProvider"] = relationship("SSOProvider") + user: Mapped[Optional["EmailUser"]] = relationship("EmailUser") + + @property + def is_expired(self) -> bool: + """Check if SSO auth session has expired. + + Returns: + True if the session has expired, False otherwise + """ + now = utc_now() + expires = self.expires_at + + # Handle timezone mismatch by converting naive datetime to UTC if needed + if expires.tzinfo is None: + # expires_at is timezone-naive, assume it's UTC + expires = expires.replace(tzinfo=timezone.utc) + elif now.tzinfo is None: + # now is timezone-naive (shouldn't happen with utc_now, but just in case) + now = now.replace(tzinfo=timezone.utc) + + return now > expires + + def __repr__(self): + """String representation of SSO auth session. + + Returns: + str: String representation of the session object + """ + return f"" + + # Event listeners for validation def validate_tool_schema(mapper, connection, target): """ @@ -1500,7 +2992,7 @@ def validate_tool_schema(mapper, connection, target): try: jsonschema.Draft7Validator.check_schema(target.input_schema) except jsonschema.exceptions.SchemaError as e: - raise ValueError(f"Invalid tool input schema: {str(e)}") + raise ValueError(f"Invalid tool input schema: {str(e)}") from e def validate_tool_name(mapper, connection, target): @@ -1522,7 +3014,7 @@ def validate_tool_name(mapper, connection, target): try: SecurityValidator.validate_tool_name(target.name) except ValueError as e: - raise ValueError(f"Invalid tool name: {str(e)}") + raise ValueError(f"Invalid tool name: {str(e)}") from e def validate_prompt_schema(mapper, connection, target): @@ -1544,7 +3036,7 @@ def validate_prompt_schema(mapper, connection, target): try: jsonschema.Draft7Validator.check_schema(target.argument_schema) except jsonschema.exceptions.SchemaError as e: - raise ValueError(f"Invalid prompt argument schema: {str(e)}") + raise ValueError(f"Invalid prompt argument schema: {str(e)}") from e # Register validation listeners @@ -1628,6 +3120,18 @@ def set_a2a_agent_slug(_mapper, _conn, target): target.slug = slugify(target.name) +@event.listens_for(EmailTeam, "before_insert") +def set_email_team_slug(_mapper, _conn, target): + """Set the slug for an EmailTeam before insert. + + Args: + _mapper: Mapper + _conn: Connection + target: Target EmailTeam instance + """ + target.slug = slugify(target.name) + + @event.listens_for(Tool, "before_insert") @event.listens_for(Tool, "before_update") def set_custom_name_and_slug(mapper, connection, target): # pylint: disable=unused-argument diff --git a/mcpgateway/main.py b/mcpgateway/main.py index 768ac28e3..96dd718f9 100644 --- a/mcpgateway/main.py +++ b/mcpgateway/main.py @@ -29,13 +29,14 @@ import asyncio from contextlib import asynccontextmanager import json +import os as _os # local alias to avoid collisions import time from typing import Any, AsyncIterator, Dict, List, Optional, Union from urllib.parse import urlparse, urlunparse import uuid # Third-Party -from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Request, status, WebSocket, WebSocketDisconnect +from fastapi import APIRouter, Body, Depends, FastAPI, HTTPException, Query, Request, status, WebSocket, WebSocketDisconnect from fastapi.background import BackgroundTasks from fastapi.exception_handlers import request_validation_exception_handler as fastapi_default_validation_handler from fastapi.exceptions import RequestValidationError @@ -53,6 +54,7 @@ # First-Party from mcpgateway import __version__ from mcpgateway.admin import admin_router, set_logging_service +from mcpgateway.auth import get_current_user from mcpgateway.bootstrap_db import main as bootstrap_db from mcpgateway.cache import ResourceCache, SessionRegistry from mcpgateway.config import jsonpath_modifier, settings @@ -60,8 +62,10 @@ from mcpgateway.db import PromptMetric, refresh_slugs_on_startup, SessionLocal from mcpgateway.db import Tool as DbTool from mcpgateway.handlers.sampling import SamplingHandler +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_permission from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware -from mcpgateway.models import InitializeResult, ListResourceTemplatesResult, LogLevel, ResourceContent, Root +from mcpgateway.middleware.token_scoping import token_scoping_middleware +from mcpgateway.models import InitializeResult, ListResourceTemplatesResult, LogLevel, Root from mcpgateway.observability import init_telemetry from mcpgateway.plugins.framework import PluginManager, PluginViolationError from mcpgateway.routers.well_known import router as well_known_router @@ -112,7 +116,7 @@ from mcpgateway.utils.passthrough_headers import set_global_passthrough_headers from mcpgateway.utils.redis_isready import wait_for_redis_ready from mcpgateway.utils.retry_manager import ResilientHttpClient -from mcpgateway.utils.verify_credentials import require_auth, require_auth_override, verify_jwt_token +from mcpgateway.utils.verify_credentials import require_auth, require_docs_auth_override, verify_jwt_token from mcpgateway.validation.jsonrpc import JSONRPCError # Import the admin routes from the new module @@ -139,8 +143,15 @@ else: loop.create_task(bootstrap_db()) -# Initialize plugin manager as a singleton. -plugin_manager: PluginManager | None = PluginManager(settings.plugin_config_file) if settings.plugins_enabled else None +# Initialize plugin manager as a singleton (honor env overrides for tests) +_env_flag = _os.getenv("PLUGINS_ENABLED") +if _env_flag is not None: + _env_enabled = _env_flag.strip().lower() in {"1", "true", "yes", "on"} + _PLUGINS_ENABLED = _env_enabled +else: + _PLUGINS_ENABLED = settings.plugins_enabled +_config_file = _os.getenv("PLUGIN_CONFIG_FILE", settings.plugin_config_file) +plugin_manager: PluginManager | None = PluginManager(_config_file) if _PLUGINS_ENABLED else None # Initialize services tool_service = ToolService() @@ -173,6 +184,22 @@ message_ttl=settings.message_ttl, ) + +# Helper function for authentication compatibility +def get_user_email(user): + """Extract email from user object, handling both string and dict formats. + + Args: + user: User object, can be either a dict (new RBAC format) or string (legacy format) + + Returns: + str: User email address or 'unknown' if not available + """ + if isinstance(user, dict): + return user.get("email", "unknown") + return str(user) if user else "unknown" + + # Initialize cache resource_cache = ResourceCache(max_size=settings.resource_cache_size, ttl=settings.resource_cache_ttl) @@ -234,6 +261,17 @@ async def lifespan(_app: FastAPI) -> AsyncIterator[None]: await streamable_http_session.initialize() refresh_slugs_on_startup() + # Bootstrap SSO providers from environment configuration + if settings.sso_enabled: + try: + # First-Party + from mcpgateway.utils.sso_bootstrap import bootstrap_sso_providers # pylint: disable=import-outside-toplevel + + bootstrap_sso_providers() + logger.info("SSO providers bootstrapped successfully") + except Exception as e: + logger.warning(f"Failed to bootstrap SSO providers: {e}") + logger.info("All services initialized successfully") # Reconfigure uvicorn loggers after startup to capture access logs in dual output @@ -454,8 +492,8 @@ async def dispatch(self, request: Request, call_next): token = request.headers.get("Authorization") cookie_token = request.cookies.get("jwt_token") - # Simulate what Depends(require_auth) would do - await require_auth_override(token, cookie_token) + # Use dedicated docs authentication that bypasses global auth settings + await require_docs_auth_override(token, cookie_token) except HTTPException as e: return JSONResponse(status_code=e.status_code, content={"detail": e.detail}, headers=e.headers if e.headers else None) @@ -562,6 +600,10 @@ async def __call__(self, scope, receive, send): # Add security headers middleware app.add_middleware(SecurityHeadersMiddleware) +# Add token scoping middleware (only when email auth is enabled) +if settings.email_auth_enabled: + app.add_middleware(BaseHTTPMiddleware, dispatch=token_scoping_middleware) + # Add custom DocsAuthMiddleware app.add_middleware(DocsAuthMiddleware) @@ -733,7 +775,7 @@ def update_url_protocol(request: Request) -> str: # Protocol APIs # @protocol_router.post("/initialize") -async def initialize(request: Request, user: str = Depends(require_auth)) -> InitializeResult: +async def initialize(request: Request, user=Depends(get_current_user)) -> InitializeResult: """ Initialize a protocol. @@ -765,7 +807,7 @@ async def initialize(request: Request, user: str = Depends(require_auth)) -> Ini @protocol_router.post("/ping") -async def ping(request: Request, user: str = Depends(require_auth)) -> JSONResponse: +async def ping(request: Request, user=Depends(get_current_user)) -> JSONResponse: """ Handle a ping request according to the MCP specification. @@ -801,7 +843,7 @@ async def ping(request: Request, user: str = Depends(require_auth)) -> JSONRespo @protocol_router.post("/notifications") -async def handle_notification(request: Request, user: str = Depends(require_auth)) -> None: +async def handle_notification(request: Request, user=Depends(get_current_user)) -> None: """ Handles incoming notifications from clients. Depending on the notification method, different actions are taken (e.g., logging initialization, cancellation, or messages). @@ -829,7 +871,7 @@ async def handle_notification(request: Request, user: str = Depends(require_auth @protocol_router.post("/completion/complete") -async def handle_completion(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)): +async def handle_completion(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)): """ Handles the completion of tasks by processing a completion request. @@ -842,12 +884,12 @@ async def handle_completion(request: Request, db: Session = Depends(get_db), use The result of the completion process. """ body = await request.json() - logger.debug(f"User {user} sent a completion request") + logger.debug(f"User {user['email']} sent a completion request") return await completion_service.handle_completion(db, body) @protocol_router.post("/sampling/createMessage") -async def handle_sampling(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)): +async def handle_sampling(request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)): """ Handles the creation of a new message for sampling. @@ -859,7 +901,7 @@ async def handle_sampling(request: Request, db: Session = Depends(get_db), user: Returns: The result of the message creation process. """ - logger.debug(f"User {user} sent a sampling request") + logger.debug(f"User {user['email']} sent a sampling request") body = await request.json() return await sampling_handler.create_message(db, body) @@ -869,35 +911,50 @@ async def handle_sampling(request: Request, db: Session = Depends(get_db), user: ############### @server_router.get("", response_model=List[ServerRead]) @server_router.get("/", response_model=List[ServerRead]) +@require_permission("servers.read") async def list_servers( include_inactive: bool = False, tags: Optional[str] = None, + team_id: Optional[str] = None, + visibility: Optional[str] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[ServerRead]: """ - Lists all servers in the system, optionally including inactive ones. + Lists servers accessible to the user, with team filtering support. Args: include_inactive (bool): Whether to include inactive servers in the response. tags (Optional[str]): Comma-separated list of tags to filter by. + team_id (Optional[str]): Filter by specific team ID. + visibility (Optional[str]): Filter by visibility (private, team, public). db (Session): The database session used to interact with the data store. user (str): The authenticated user making the request. Returns: - List[ServerRead]: A list of server objects. + List[ServerRead]: A list of server objects the user has access to. """ # Parse tags parameter if provided tags_list = None if tags: tags_list = [tag.strip() for tag in tags.split(",") if tag.strip()] - - logger.debug(f"User {user} requested server list with tags={tags_list}") - return await server_service.list_servers(db, include_inactive=include_inactive, tags=tags_list) + # Get user email for team filtering + user_email = get_user_email(user) + # Use team-filtered server listing + if team_id or visibility: + data = await server_service.list_servers_for_user(db=db, user_email=user_email, team_id=team_id, visibility=visibility, include_inactive=include_inactive) + # Apply tag filtering to team-filtered results if needed + if tags_list: + data = [server for server in data if any(tag in server.tags for tag in tags_list)] + else: + # Use existing method for backward compatibility when no team filtering + data = await server_service.list_servers(db, include_inactive=include_inactive, tags=tags_list) + return data @server_router.get("/{server_id}", response_model=ServerRead) -async def get_server(server_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> ServerRead: +@require_permission("servers.read") +async def get_server(server_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> ServerRead: """ Retrieves a server by its ID. @@ -921,16 +978,21 @@ async def get_server(server_id: str, db: Session = Depends(get_db), user: str = @server_router.post("", response_model=ServerRead, status_code=201) @server_router.post("/", response_model=ServerRead, status_code=201) +@require_permission("servers.create") async def create_server( server: ServerCreate, + team_id: Optional[str] = Body(None, description="Team ID to assign server to"), + visibility: str = Body("private", description="Server visibility: private, team, public"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ServerRead: """ Creates a new server. Args: server (ServerCreate): The data for the new server. + team_id (Optional[str]): Team ID to assign the server to. + visibility (str): Server visibility level (private, team, public). db (Session): The database session used to interact with the data store. user (str): The authenticated user making the request. @@ -941,8 +1003,21 @@ async def create_server( HTTPException: If there is a conflict with the server name or other errors. """ try: - logger.debug(f"User {user} is creating a new server") - return await server_service.register_server(db, server) + # Get user email and handle team assignment + user_email = get_user_email(user) + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new server for team {team_id}") + return await server_service.register_server(db, server, created_by=user_email, team_id=team_id, owner_email=user_email, visibility=visibility) except ServerNameConflictError as e: raise HTTPException(status_code=409, detail=str(e)) except ServerError as e: @@ -956,11 +1031,12 @@ async def create_server( @server_router.put("/{server_id}", response_model=ServerRead) +@require_permission("servers.update") async def update_server( server_id: str, server: ServerUpdate, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ServerRead: """ Updates the information of an existing server. @@ -995,11 +1071,12 @@ async def update_server( @server_router.post("/{server_id}/toggle", response_model=ServerRead) +@require_permission("servers.update") async def toggle_server_status( server_id: str, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ServerRead: """ Toggles the status of a server (activate or deactivate). @@ -1026,7 +1103,8 @@ async def toggle_server_status( @server_router.delete("/{server_id}", response_model=Dict[str, str]) -async def delete_server(server_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("servers.delete") +async def delete_server(server_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Deletes a server by its ID. @@ -1055,7 +1133,8 @@ async def delete_server(server_id: str, db: Session = Depends(get_db), user: str @server_router.get("/{server_id}/sse") -async def sse_endpoint(request: Request, server_id: str, user: str = Depends(require_auth)): +@require_permission("servers.use") +async def sse_endpoint(request: Request, server_id: str, user=Depends(get_current_user_with_permissions)): """ Establishes a Server-Sent Events (SSE) connection for real-time updates about a server. @@ -1093,7 +1172,8 @@ async def sse_endpoint(request: Request, server_id: str, user: str = Depends(req @server_router.post("/{server_id}/message") -async def message_endpoint(request: Request, server_id: str, user: str = Depends(require_auth)): +@require_permission("servers.use") +async def message_endpoint(request: Request, server_id: str, user=Depends(get_current_user_with_permissions)): """ Handles incoming messages for a specific server. @@ -1134,11 +1214,12 @@ async def message_endpoint(request: Request, server_id: str, user: str = Depends @server_router.get("/{server_id}/tools", response_model=List[ToolRead]) +@require_permission("servers.read") async def server_get_tools( server_id: str, include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[ToolRead]: """ List tools for the server with an option to include inactive tools. @@ -1162,11 +1243,12 @@ async def server_get_tools( @server_router.get("/{server_id}/resources", response_model=List[ResourceRead]) +@require_permission("servers.read") async def server_get_resources( server_id: str, include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[ResourceRead]: """ List resources for the server with an option to include inactive resources. @@ -1190,11 +1272,12 @@ async def server_get_resources( @server_router.get("/{server_id}/prompts", response_model=List[PromptRead]) +@require_permission("servers.read") async def server_get_prompts( server_id: str, include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[PromptRead]: """ List prompts for the server with an option to include inactive prompts. @@ -1222,35 +1305,47 @@ async def server_get_prompts( ################## @a2a_router.get("", response_model=List[A2AAgentRead]) @a2a_router.get("/", response_model=List[A2AAgentRead]) +@require_permission("a2a.read") async def list_a2a_agents( include_inactive: bool = False, tags: Optional[str] = None, + team_id: Optional[str] = Query(None, description="Filter by team ID"), + visibility: Optional[str] = Query(None, description="Filter by visibility (private, team, public)"), + skip: int = Query(0, ge=0, description="Number of agents to skip for pagination"), + limit: int = Query(100, ge=1, le=1000, description="Maximum number of agents to return"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[A2AAgentRead]: """ - Lists all A2A agents in the system, optionally including inactive ones. + Lists A2A agents user has access to with team filtering. Args: include_inactive (bool): Whether to include inactive agents in the response. tags (Optional[str]): Comma-separated list of tags to filter by. + team_id (Optional[str]): Team ID to filter by. + visibility (Optional[str]): Visibility level to filter by. + skip (int): Number of agents to skip for pagination. + limit (int): Maximum number of agents to return. db (Session): The database session used to interact with the data store. user (str): The authenticated user making the request. Returns: - List[A2AAgentRead]: A list of A2A agent objects. + List[A2AAgentRead]: A list of A2A agent objects the user has access to. """ - # Parse tags parameter if provided + # Parse tags parameter if provided (keeping for backward compatibility) tags_list = None if tags: tags_list = [tag.strip() for tag in tags.split(",") if tag.strip()] - logger.debug(f"User {user} requested A2A agent list with tags={tags_list}") - return await a2a_service.list_agents(db, include_inactive=include_inactive, tags=tags_list) + logger.debug(f"User {user} requested A2A agent list with team_id={team_id}, visibility={visibility}, tags={tags_list}") + + # Use team-aware filtering + return await a2a_service.list_agents_for_user(db, user_email=user, team_id=team_id, visibility=visibility, include_inactive=include_inactive, skip=skip, limit=limit) @a2a_router.get("/{agent_id}", response_model=A2AAgentRead) -async def get_a2a_agent(agent_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> A2AAgentRead: +@require_permission("a2a.read") +async def get_a2a_agent(agent_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> A2AAgentRead: """ Retrieves an A2A agent by its ID. @@ -1274,11 +1369,14 @@ async def get_a2a_agent(agent_id: str, db: Session = Depends(get_db), user: str @a2a_router.post("", response_model=A2AAgentRead, status_code=201) @a2a_router.post("/", response_model=A2AAgentRead, status_code=201) +@require_permission("a2a.create") async def create_a2a_agent( agent: A2AAgentCreate, request: Request, + team_id: Optional[str] = Body(None, description="Team ID to assign agent to"), + visibility: str = Body("private", description="Agent visibility: private, team, public"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> A2AAgentRead: """ Creates a new A2A agent. @@ -1286,6 +1384,8 @@ async def create_a2a_agent( Args: agent (A2AAgentCreate): The data for the new agent. request (Request): The FastAPI request object for metadata extraction. + team_id (Optional[str]): Team ID to assign the agent to. + visibility (str): Agent visibility level (private, team, public). db (Session): The database session used to interact with the data store. user (str): The authenticated user making the request. @@ -1296,10 +1396,23 @@ async def create_a2a_agent( HTTPException: If there is a conflict with the agent name or other errors. """ try: - logger.debug(f"User {user} is creating a new A2A agent") # Extract metadata from request metadata = MetadataCapture.extract_creation_metadata(request, user) + # Get user email and handle team assignment + user_email = get_user_email(user) + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new A2A agent for team {team_id}") return await a2a_service.register_agent( db, agent, @@ -1309,6 +1422,9 @@ async def create_a2a_agent( created_user_agent=metadata["created_user_agent"], import_batch_id=metadata["import_batch_id"], federation_source=metadata["federation_source"], + team_id=team_id, + owner_email=user_email, + visibility=visibility, ) except A2AAgentNameConflictError as e: raise HTTPException(status_code=409, detail=str(e)) @@ -1323,12 +1439,13 @@ async def create_a2a_agent( @a2a_router.put("/{agent_id}", response_model=A2AAgentRead) +@require_permission("a2a.update") async def update_a2a_agent( agent_id: str, agent: A2AAgentUpdate, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> A2AAgentRead: """ Updates the information of an existing A2A agent. @@ -1375,11 +1492,12 @@ async def update_a2a_agent( @a2a_router.post("/{agent_id}/toggle", response_model=A2AAgentRead) +@require_permission("a2a.update") async def toggle_a2a_agent_status( agent_id: str, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> A2AAgentRead: """ Toggles the status of an A2A agent (activate or deactivate). @@ -1406,7 +1524,8 @@ async def toggle_a2a_agent_status( @a2a_router.delete("/{agent_id}", response_model=Dict[str, str]) -async def delete_a2a_agent(agent_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("a2a.delete") +async def delete_a2a_agent(agent_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Deletes an A2A agent by its ID. @@ -1435,12 +1554,13 @@ async def delete_a2a_agent(agent_id: str, db: Session = Depends(get_db), user: s @a2a_router.post("/{agent_name}/invoke", response_model=Dict[str, Any]) +@require_permission("a2a.invoke") async def invoke_a2a_agent( agent_name: str, parameters: Dict[str, Any] = Body(default_factory=dict), interaction_type: str = Body(default="query"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Invokes an A2A agent with the specified parameters. @@ -1472,23 +1592,28 @@ async def invoke_a2a_agent( ############# @tool_router.get("", response_model=Union[List[ToolRead], List[Dict], Dict, List]) @tool_router.get("/", response_model=Union[List[ToolRead], List[Dict], Dict, List]) +@require_permission("tools.read") async def list_tools( cursor: Optional[str] = None, include_inactive: bool = False, tags: Optional[str] = None, + team_id: Optional[str] = Query(None, description="Filter by team ID"), + visibility: Optional[str] = Query(None, description="Filter by visibility: private, team, public"), db: Session = Depends(get_db), apijsonpath: JsonPathModifier = Body(None), - _: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Union[List[ToolRead], List[Dict], Dict]: - """List all registered tools with pagination support. + """List all registered tools with team-based filtering and pagination support. Args: cursor: Pagination cursor for fetching the next set of results include_inactive: Whether to include inactive tools in the results tags: Comma-separated list of tags to filter by (e.g., "api,data") + team_id: Optional team ID to filter tools by specific team + visibility: Optional visibility filter (private, team, public) db: Database session apijsonpath: JSON path modifier to filter or transform the response - _: Authenticated user + user: Authenticated user with permissions Returns: List of tools or modified result based on jsonpath @@ -1499,8 +1624,19 @@ async def list_tools( if tags: tags_list = [tag.strip() for tag in tags.split(",") if tag.strip()] - # For now just pass the cursor parameter even if not used - data = await tool_service.list_tools(db, cursor=cursor, include_inactive=include_inactive, tags=tags_list) + # Get user email for team filtering + user_email = get_user_email(user) + + # Use team-filtered tool listing + if team_id or visibility: + data = await tool_service.list_tools_for_user(db=db, user_email=user_email, team_id=team_id, visibility=visibility, include_inactive=include_inactive) + + # Apply tag filtering to team-filtered results if needed + if tags_list: + data = [tool for tool in data if any(tag in tool.tags for tag in tags_list)] + else: + # Use existing method for backward compatibility when no team filtering + data = await tool_service.list_tools(db, cursor=cursor, include_inactive=include_inactive, tags=tags_list) if apijsonpath is None: return data @@ -1512,15 +1648,25 @@ async def list_tools( @tool_router.post("", response_model=ToolRead) @tool_router.post("/", response_model=ToolRead) -async def create_tool(tool: ToolCreate, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> ToolRead: +@require_permission("tools.create") +async def create_tool( + tool: ToolCreate, + request: Request, + team_id: Optional[str] = Body(None, description="Team ID to assign tool to"), + visibility: str = Body("private", description="Tool visibility: private, team, public"), + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> ToolRead: """ - Creates a new tool in the system. + Creates a new tool in the system with team assignment support. Args: tool (ToolCreate): The data needed to create the tool. request (Request): The FastAPI request object for metadata extraction. + team_id (Optional[str]): Team ID to assign the tool to. + visibility (str): Tool visibility (private, team, public). db (Session): The database session dependency. - user (str): The authenticated user making the request. + user: The authenticated user making the request. Returns: ToolRead: The created tool data. @@ -1532,7 +1678,20 @@ async def create_tool(tool: ToolCreate, request: Request, db: Session = Depends( # Extract metadata from request metadata = MetadataCapture.extract_creation_metadata(request, user) - logger.debug(f"User {user} is creating a new tool") + # Get user email and handle team assignment + user_email = get_user_email(user) + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new tool for team {team_id}") return await tool_service.register_tool( db, tool, @@ -1542,6 +1701,9 @@ async def create_tool(tool: ToolCreate, request: Request, db: Session = Depends( created_user_agent=metadata["created_user_agent"], import_batch_id=metadata["import_batch_id"], federation_source=metadata["federation_source"], + team_id=team_id, + owner_email=user_email, + visibility=visibility, ) except Exception as ex: logger.error(f"Error while creating tool: {ex}") @@ -1565,10 +1727,11 @@ async def create_tool(tool: ToolCreate, request: Request, db: Session = Depends( @tool_router.get("/{tool_id}", response_model=Union[ToolRead, Dict]) +@require_permission("tools.read") async def get_tool( tool_id: str, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), apijsonpath: JsonPathModifier = Body(None), ) -> Union[ToolRead, Dict]: """ @@ -1601,12 +1764,13 @@ async def get_tool( @tool_router.put("/{tool_id}", response_model=ToolRead) +@require_permission("tools.update") async def update_tool( tool_id: str, tool: ToolUpdate, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ToolRead: """ Updates an existing tool with new data. @@ -1658,7 +1822,8 @@ async def update_tool( @tool_router.delete("/{tool_id}") -async def delete_tool(tool_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("tools.delete") +async def delete_tool(tool_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Permanently deletes a tool by ID. @@ -1682,11 +1847,12 @@ async def delete_tool(tool_id: str, db: Session = Depends(get_db), user: str = D @tool_router.post("/{tool_id}/toggle") +@require_permission("tools.update") async def toggle_tool_status( tool_id: str, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Activates or deactivates a tool. @@ -1720,9 +1886,10 @@ async def toggle_tool_status( ################# # --- Resource templates endpoint - MUST come before variable paths --- @resource_router.get("/templates/list", response_model=ListResourceTemplatesResult) +@require_permission("resources.read") async def list_resource_templates( db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ListResourceTemplatesResult: """ List all available resource templates. @@ -1741,11 +1908,12 @@ async def list_resource_templates( @resource_router.post("/{resource_id}/toggle") +@require_permission("resources.update") async def toggle_resource_status( resource_id: int, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Activate or deactivate a resource by its ID. @@ -1776,47 +1944,64 @@ async def toggle_resource_status( @resource_router.get("", response_model=List[ResourceRead]) @resource_router.get("/", response_model=List[ResourceRead]) +@require_permission("resources.read") async def list_resources( cursor: Optional[str] = None, include_inactive: bool = False, tags: Optional[str] = None, + team_id: Optional[str] = None, + visibility: Optional[str] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[ResourceRead]: """ - Retrieve a list of resources. + Retrieve a list of resources accessible to the user, with team filtering support. Args: cursor (Optional[str]): Optional cursor for pagination. include_inactive (bool): Whether to include inactive resources. tags (Optional[str]): Comma-separated list of tags to filter by. + team_id (Optional[str]): Filter by specific team ID. + visibility (Optional[str]): Filter by visibility (private, team, public). db (Session): Database session. user (str): Authenticated user. Returns: - List[ResourceRead]: List of resources. + List[ResourceRead]: List of resources the user has access to. """ # Parse tags parameter if provided tags_list = None if tags: tags_list = [tag.strip() for tag in tags.split(",") if tag.strip()] - - logger.debug(f"User {user} requested resource list with cursor {cursor}, include_inactive={include_inactive}, tags={tags_list}") - if cached := resource_cache.get("resource_list"): - return cached - # Pass the cursor parameter - resources = await resource_service.list_resources(db, include_inactive=include_inactive, tags=tags_list) - resource_cache.set("resource_list", resources) - return resources + # Get user email for team filtering + user_email = get_user_email(user) + + # Use team-filtered resource listing + if team_id or visibility: + data = await resource_service.list_resources_for_user(db=db, user_email=user_email, team_id=team_id, visibility=visibility, include_inactive=include_inactive) + # Apply tag filtering to team-filtered results if needed + if tags_list: + data = [resource for resource in data if any(tag in resource.tags for tag in tags_list)] + else: + # Use existing method for backward compatibility when no team filtering + logger.debug(f"User {user_email} requested resource list with cursor {cursor}, include_inactive={include_inactive}, tags={tags_list}") + if cached := resource_cache.get("resource_list"): + return cached + data = await resource_service.list_resources(db, include_inactive=include_inactive, tags=tags_list) + resource_cache.set("resource_list", data) + return data @resource_router.post("", response_model=ResourceRead) @resource_router.post("/", response_model=ResourceRead) +@require_permission("resources.create") async def create_resource( resource: ResourceCreate, request: Request, + team_id: Optional[str] = Body(None, description="Team ID to assign resource to"), + visibility: str = Body("private", description="Resource visibility: private, team, public"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ResourceRead: """ Create a new resource. @@ -1824,6 +2009,8 @@ async def create_resource( Args: resource (ResourceCreate): Data for the new resource. request (Request): FastAPI request object for metadata extraction. + team_id (Optional[str]): Team ID to assign the resource to. + visibility (str): Resource visibility level (private, team, public). db (Session): Database session. user (str): Authenticated user. @@ -1833,10 +2020,24 @@ async def create_resource( Raises: HTTPException: On conflict or validation errors or IntegrityError. """ - logger.debug(f"User {user} is creating a new resource") try: + # Extract metadata from request metadata = MetadataCapture.extract_creation_metadata(request, user) + # Get user email and handle team assignment + user_email = get_user_email(user) + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new resource for team {team_id}") return await resource_service.register_resource( db, resource, @@ -1846,6 +2047,9 @@ async def create_resource( created_user_agent=metadata["created_user_agent"], import_batch_id=metadata["import_batch_id"], federation_source=metadata["federation_source"], + team_id=team_id, + owner_email=user_email, + visibility=visibility, ) except ResourceURIConflictError as e: raise HTTPException(status_code=409, detail=str(e)) @@ -1861,7 +2065,8 @@ async def create_resource( @resource_router.get("/{uri:path}") -async def read_resource(uri: str, request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> ResourceContent: +@require_permission("resources.read") +async def read_resource(uri: str, request: Request, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Any: """ Read a resource by its URI with plugin support. @@ -1872,7 +2077,7 @@ async def read_resource(uri: str, request: Request, db: Session = Depends(get_db user (str): Authenticated user. Returns: - ResourceContent: The content of the resource. + Any: The content of the resource. Raises: HTTPException: If the resource cannot be found or read. @@ -1889,21 +2094,47 @@ async def read_resource(uri: str, request: Request, db: Session = Depends(get_db try: # Call service with context for plugin support - content: ResourceContent = await resource_service.read_resource(db, uri, request_id=request_id, user=user, server_id=server_id) + content = await resource_service.read_resource(db, uri, request_id=request_id, user=user, server_id=server_id) except (ResourceNotFoundError, ResourceError) as exc: # Translate to FastAPI HTTP error raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(exc)) from exc resource_cache.set(uri, content) - return content + # Ensure a plain JSON-serializable structure + try: + # First-Party + from mcpgateway.models import ResourceContent # pylint: disable=import-outside-toplevel + from mcpgateway.models import TextContent # pylint: disable=import-outside-toplevel + + # If already a ResourceContent, serialize directly + if isinstance(content, ResourceContent): + return content.model_dump() + + # If TextContent, wrap into resource envelope with text + if isinstance(content, TextContent): + return {"type": "resource", "uri": uri, "text": content.text} + except Exception: + pass + + if isinstance(content, bytes): + return {"type": "resource", "uri": uri, "blob": content.decode("utf-8", errors="ignore")} + if isinstance(content, str): + return {"type": "resource", "uri": uri, "text": content} + + # Objects with a 'text' attribute (e.g., mocks) – best-effort mapping + if hasattr(content, "text"): + return {"type": "resource", "uri": uri, "text": getattr(content, "text")} + + return {"type": "resource", "uri": uri, "text": str(content)} @resource_router.put("/{uri:path}", response_model=ResourceRead) +@require_permission("resources.update") async def update_resource( uri: str, resource: ResourceUpdate, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> ResourceRead: """ Update a resource identified by its URI. @@ -1936,7 +2167,8 @@ async def update_resource( @resource_router.delete("/{uri:path}") -async def delete_resource(uri: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("resources.delete") +async def delete_resource(uri: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Delete a resource by its URI. @@ -1963,7 +2195,8 @@ async def delete_resource(uri: str, db: Session = Depends(get_db), user: str = D @resource_router.post("/subscribe/{uri:path}") -async def subscribe_resource(uri: str, user: str = Depends(require_auth)) -> StreamingResponse: +@require_permission("resources.read") +async def subscribe_resource(uri: str, user=Depends(get_current_user_with_permissions)) -> StreamingResponse: """ Subscribe to server-sent events (SSE) for a specific resource. @@ -1982,11 +2215,12 @@ async def subscribe_resource(uri: str, user: str = Depends(require_auth)) -> Str # Prompt APIs # ############### @prompt_router.post("/{prompt_id}/toggle") +@require_permission("prompts.update") async def toggle_prompt_status( prompt_id: int, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Toggle the activation status of a prompt. @@ -2017,42 +2251,61 @@ async def toggle_prompt_status( @prompt_router.get("", response_model=List[PromptRead]) @prompt_router.get("/", response_model=List[PromptRead]) +@require_permission("prompts.read") async def list_prompts( cursor: Optional[str] = None, include_inactive: bool = False, tags: Optional[str] = None, + team_id: Optional[str] = None, + visibility: Optional[str] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[PromptRead]: """ - List prompts with optional pagination and inclusion of inactive items. + List prompts accessible to the user, with team filtering support. Args: cursor: Cursor for pagination. include_inactive: Include inactive prompts. tags: Comma-separated list of tags to filter by. + team_id: Filter by specific team ID. + visibility: Filter by visibility (private, team, public). db: Database session. user: Authenticated user. Returns: - List of prompt records. + List of prompt records the user has access to. """ # Parse tags parameter if provided tags_list = None if tags: tags_list = [tag.strip() for tag in tags.split(",") if tag.strip()] - - logger.debug(f"User: {user} requested prompt list with include_inactive={include_inactive}, cursor={cursor}, tags={tags_list}") - return await prompt_service.list_prompts(db, cursor=cursor, include_inactive=include_inactive, tags=tags_list) + # Get user email for team filtering + user_email = get_user_email(user) + + # Use team-filtered prompt listing + if team_id or visibility: + data = await prompt_service.list_prompts_for_user(db=db, user_email=user_email, team_id=team_id, visibility=visibility, include_inactive=include_inactive) + # Apply tag filtering to team-filtered results if needed + if tags_list: + data = [prompt for prompt in data if any(tag in prompt.tags for tag in tags_list)] + else: + # Use existing method for backward compatibility when no team filtering + logger.debug(f"User: {user_email} requested prompt list with include_inactive={include_inactive}, cursor={cursor}, tags={tags_list}") + data = await prompt_service.list_prompts(db, cursor=cursor, include_inactive=include_inactive, tags=tags_list) + return data @prompt_router.post("", response_model=PromptRead) @prompt_router.post("/", response_model=PromptRead) +@require_permission("prompts.create") async def create_prompt( prompt: PromptCreate, request: Request, + team_id: Optional[str] = Body(None, description="Team ID to assign prompt to"), + visibility: str = Body("private", description="Prompt visibility: private, team, public"), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> PromptRead: """ Create a new prompt. @@ -2060,6 +2313,8 @@ async def create_prompt( Args: prompt (PromptCreate): Payload describing the prompt to create. request (Request): The FastAPI request object for metadata extraction. + team_id (Optional[str]): Team ID to assign the prompt to. + visibility (str): Prompt visibility level (private, team, public). db (Session): Active SQLAlchemy session. user (str): Authenticated username. @@ -2071,11 +2326,24 @@ async def create_prompt( * **400 Bad Request** - validation or persistence error raised by :pyclass:`~mcpgateway.services.prompt_service.PromptService`. """ - logger.debug(f"User: {user} requested to create prompt: {prompt}") try: # Extract metadata from request metadata = MetadataCapture.extract_creation_metadata(request, user) + # Get user email and handle team assignment + user_email = get_user_email(user) + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new prompt for team {team_id}") return await prompt_service.register_prompt( db, prompt, @@ -2085,6 +2353,9 @@ async def create_prompt( created_user_agent=metadata["created_user_agent"], import_batch_id=metadata["import_batch_id"], federation_source=metadata["federation_source"], + team_id=team_id, + owner_email=user_email, + visibility=visibility, ) except Exception as e: if isinstance(e, PromptNameConflictError): @@ -2107,11 +2378,12 @@ async def create_prompt( @prompt_router.post("/{name}") +@require_permission("prompts.read") async def get_prompt( name: str, args: Dict[str, str] = Body({}), db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Any: """Get a prompt by name with arguments. @@ -2175,10 +2447,11 @@ async def get_prompt( @prompt_router.get("/{name}") +@require_permission("prompts.read") async def get_prompt_no_args( name: str, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Any: """Get a prompt by name without arguments. @@ -2229,11 +2502,12 @@ async def get_prompt_no_args( @prompt_router.put("/{name}", response_model=PromptRead) +@require_permission("prompts.update") async def update_prompt( name: str, prompt: PromptUpdate, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> PromptRead: """ Update (overwrite) an existing prompt definition. @@ -2276,7 +2550,8 @@ async def update_prompt( @prompt_router.delete("/{name}") -async def delete_prompt(name: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("prompts.delete") +async def delete_prompt(name: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Delete a prompt by name. @@ -2313,11 +2588,12 @@ async def delete_prompt(name: str, db: Session = Depends(get_db), user: str = De # Gateway APIs # ################ @gateway_router.post("/{gateway_id}/toggle") +@require_permission("gateways.update") async def toggle_gateway_status( gateway_id: str, activate: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Toggle the activation status of a gateway. @@ -2352,10 +2628,11 @@ async def toggle_gateway_status( @gateway_router.get("", response_model=List[GatewayRead]) @gateway_router.get("/", response_model=List[GatewayRead]) +@require_permission("gateways.read") async def list_gateways( include_inactive: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[GatewayRead]: """ List all gateways. @@ -2374,11 +2651,12 @@ async def list_gateways( @gateway_router.post("", response_model=GatewayRead) @gateway_router.post("/", response_model=GatewayRead) +@require_permission("gateways.create") async def register_gateway( gateway: GatewayCreate, request: Request, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> GatewayRead: """ Register a new gateway. @@ -2397,6 +2675,23 @@ async def register_gateway( # Extract metadata from request metadata = MetadataCapture.extract_creation_metadata(request, user) + # Get user email and handle team assignment + user_email = get_user_email(user) + team_id = gateway.team_id + visibility = gateway.visibility + + # If no team specified, get user's personal team + if not team_id: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email, include_personal=True) + personal_team = next((team for team in user_teams if team.is_personal), None) + team_id = personal_team.id if personal_team else None + + logger.debug(f"User {user_email} is creating a new gateway for team {team_id}") + return await gateway_service.register_gateway( db, gateway, @@ -2404,6 +2699,9 @@ async def register_gateway( created_from_ip=metadata["created_from_ip"], created_via=metadata["created_via"], created_user_agent=metadata["created_user_agent"], + team_id=team_id, + owner_email=user_email, + visibility=visibility, ) except Exception as ex: if isinstance(ex, GatewayConnectionError): @@ -2422,7 +2720,8 @@ async def register_gateway( @gateway_router.get("/{gateway_id}", response_model=GatewayRead) -async def get_gateway(gateway_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> GatewayRead: +@require_permission("gateways.read") +async def get_gateway(gateway_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> GatewayRead: """ Retrieve a gateway by ID. @@ -2439,11 +2738,12 @@ async def get_gateway(gateway_id: str, db: Session = Depends(get_db), user: str @gateway_router.put("/{gateway_id}", response_model=GatewayRead) +@require_permission("gateways.update") async def update_gateway( gateway_id: str, gateway: GatewayUpdate, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> GatewayRead: """ Update a gateway. @@ -2479,7 +2779,8 @@ async def update_gateway( @gateway_router.delete("/{gateway_id}") -async def delete_gateway(gateway_id: str, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> Dict[str, str]: +@require_permission("gateways.delete") +async def delete_gateway(gateway_id: str, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> Dict[str, str]: """ Delete a gateway by ID. @@ -2502,7 +2803,7 @@ async def delete_gateway(gateway_id: str, db: Session = Depends(get_db), user: s @root_router.get("", response_model=List[Root]) @root_router.get("/", response_model=List[Root]) async def list_roots( - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[Root]: """ Retrieve a list of all registered roots. @@ -2521,7 +2822,7 @@ async def list_roots( @root_router.post("/", response_model=Root) async def add_root( root: Root, # Accept JSON body using the Root model from models.py - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Root: """ Add a new root. @@ -2540,7 +2841,7 @@ async def add_root( @root_router.delete("/{uri:path}") async def remove_root( uri: str, - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, str]: """ Remove a registered root by URI. @@ -2559,7 +2860,7 @@ async def remove_root( @root_router.get("/changes") async def subscribe_roots_changes( - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> StreamingResponse: """ Subscribe to real-time changes in root list via Server-Sent Events (SSE). @@ -2579,19 +2880,27 @@ async def subscribe_roots_changes( ################## @utility_router.post("/rpc/") @utility_router.post("/rpc") -async def handle_rpc(request: Request, db: Session = Depends(get_db), user: str = Depends(require_auth)): # revert this back +async def handle_rpc(request: Request, db: Session = Depends(get_db), user=Depends(require_auth)): """Handle RPC requests. Args: request (Request): The incoming FastAPI request. db (Session): Database session. - user (str): The authenticated user. + user: The authenticated user (dict with RBAC context). Returns: Response with the RPC result or error. """ try: - logger.debug(f"User {user} made an RPC request") + # Extract user identifier from either RBAC user object or JWT payload + if hasattr(user, "email"): + user_id = user.email # RBAC user object + elif isinstance(user, dict): + user_id = user.get("sub") or user.get("email") or user.get("username", "unknown") # JWT payload + else: + user_id = str(user) # String username from basic auth + + logger.debug(f"User {user_id} made an RPC request") body = await request.json() method = body["method"] req_id = body.get("id") if "body" in locals() else None @@ -2634,7 +2943,7 @@ async def handle_rpc(request: Request, db: Session = Depends(get_db), user: str request_id = params.get("requestId", None) if not uri: raise JSONRPCError(-32602, "Missing resource URI in parameters", params) - result = await resource_service.read_resource(db, uri, request_id=request_id, user=user) + result = await resource_service.read_resource(db, uri, request_id=request_id, user=get_user_email(user)) if hasattr(result, "model_dump"): result = {"contents": [result.model_dump(by_alias=True, exclude_none=True)]} else: @@ -2802,7 +3111,8 @@ async def websocket_endpoint(websocket: WebSocket): @utility_router.get("/sse") -async def utility_sse_endpoint(request: Request, user: str = Depends(require_auth)): +@require_permission("tools.invoke") +async def utility_sse_endpoint(request: Request, user=Depends(get_current_user_with_permissions)): """ Establish a Server-Sent Events (SSE) connection for real-time updates. @@ -2839,7 +3149,8 @@ async def utility_sse_endpoint(request: Request, user: str = Depends(require_aut @utility_router.post("/message") -async def utility_message_endpoint(request: Request, user: str = Depends(require_auth)): +@require_permission("tools.invoke") +async def utility_message_endpoint(request: Request, user=Depends(get_current_user_with_permissions)): """ Handle a JSON-RPC message directed to a specific SSE session. @@ -2882,7 +3193,8 @@ async def utility_message_endpoint(request: Request, user: str = Depends(require @utility_router.post("/logging/setLevel") -async def set_log_level(request: Request, user: str = Depends(require_auth)) -> None: +@require_permission("admin.system_config") +async def set_log_level(request: Request, user=Depends(get_current_user_with_permissions)) -> None: """ Update the server's log level at runtime. @@ -2904,7 +3216,8 @@ async def set_log_level(request: Request, user: str = Depends(require_auth)) -> # Metrics # #################### @metrics_router.get("", response_model=dict) -async def get_metrics(db: Session = Depends(get_db), user: str = Depends(require_auth)) -> dict: +@require_permission("admin.metrics") +async def get_metrics(db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> dict: """ Retrieve aggregated metrics for all entity types (Tools, Resources, Servers, Prompts, A2A Agents). @@ -2937,7 +3250,8 @@ async def get_metrics(db: Session = Depends(get_db), user: str = Depends(require @metrics_router.post("/reset", response_model=dict) -async def reset_metrics(entity: Optional[str] = None, entity_id: Optional[int] = None, db: Session = Depends(get_db), user: str = Depends(require_auth)) -> dict: +@require_permission("admin.metrics") +async def reset_metrics(entity: Optional[str] = None, entity_id: Optional[int] = None, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions)) -> dict: """ Reset metrics for a specific entity type and optionally a specific entity ID, or perform a global reset if no entity is specified. @@ -3033,11 +3347,12 @@ async def readiness_check(db: Session = Depends(get_db)): @tag_router.get("", response_model=List[TagInfo]) @tag_router.get("/", response_model=List[TagInfo]) +@require_permission("tags.read") async def list_tags( entity_types: Optional[str] = None, include_entities: bool = False, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[TagInfo]: """ Retrieve all unique tags across specified entity types. @@ -3072,11 +3387,12 @@ async def list_tags( @tag_router.get("/{tag_name}/entities", response_model=List[TaggedEntity]) +@require_permission("tags.read") async def get_entities_by_tag( tag_name: str, entity_types: Optional[str] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> List[TaggedEntity]: """ Get all entities that have a specific tag. @@ -3116,6 +3432,7 @@ async def get_entities_by_tag( @export_import_router.get("/export", response_model=Dict[str, Any]) +@require_permission("admin.export") async def export_configuration( export_format: str = "json", # pylint: disable=unused-argument types: Optional[str] = None, @@ -3124,7 +3441,7 @@ async def export_configuration( include_inactive: bool = False, include_dependencies: bool = True, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Export gateway configuration to JSON format. @@ -3161,8 +3478,8 @@ async def export_configuration( if tags: tags_list = [t.strip() for t in tags.split(",") if t.strip()] - # Extract username from user (which could be string or dict with token) - username = user if isinstance(user, str) else user.get("username", "unknown") + # Extract username from user (which is now an EmailUser object) + username = user.email # Perform export export_data = await export_service.export_configuration( @@ -3180,8 +3497,9 @@ async def export_configuration( @export_import_router.post("/export/selective", response_model=Dict[str, Any]) +@require_permission("admin.export") async def export_selective_configuration( - entity_selections: Dict[str, List[str]] = Body(...), include_dependencies: bool = True, db: Session = Depends(get_db), user: str = Depends(require_auth) + entity_selections: Dict[str, List[str]] = Body(...), include_dependencies: bool = True, db: Session = Depends(get_db), user=Depends(get_current_user_with_permissions) ) -> Dict[str, Any]: """ Export specific entities by their IDs/names. @@ -3208,8 +3526,8 @@ async def export_selective_configuration( try: logger.info(f"User {user} requested selective configuration export") - # Extract username from user (which could be string or dict with token) - username = user if isinstance(user, str) else user.get("username", "unknown") + # Extract username from user (which is now an EmailUser object) + username = user.email export_data = await export_service.export_selective(db=db, entity_selections=entity_selections, include_dependencies=include_dependencies, exported_by=username) @@ -3224,6 +3542,7 @@ async def export_selective_configuration( @export_import_router.post("/import", response_model=Dict[str, Any]) +@require_permission("admin.import") async def import_configuration( import_data: Dict[str, Any] = Body(...), conflict_strategy: str = "update", @@ -3231,7 +3550,7 @@ async def import_configuration( rekey_secret: Optional[str] = None, selected_entities: Optional[Dict[str, List[str]]] = None, db: Session = Depends(get_db), - user: str = Depends(require_auth), + user=Depends(get_current_user_with_permissions), ) -> Dict[str, Any]: """ Import configuration data with conflict resolution. @@ -3260,8 +3579,8 @@ async def import_configuration( except ValueError: raise HTTPException(status_code=400, detail=f"Invalid conflict strategy. Must be one of: {[s.value for s in ConflictStrategy]}") - # Extract username from user (which could be string or dict with token) - username = user if isinstance(user, str) else user.get("username", "unknown") + # Extract username from user (which is now an EmailUser object) + username = user.email # Perform import import_status = await import_service.import_configuration( @@ -3285,7 +3604,8 @@ async def import_configuration( @export_import_router.get("/import/status/{import_id}", response_model=Dict[str, Any]) -async def get_import_status(import_id: str, user: str = Depends(require_auth)) -> Dict[str, Any]: +@require_permission("admin.import") +async def get_import_status(import_id: str, user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """ Get the status of an import operation. @@ -3309,7 +3629,8 @@ async def get_import_status(import_id: str, user: str = Depends(require_auth)) - @export_import_router.get("/import/status", response_model=List[Dict[str, Any]]) -async def list_import_statuses(user: str = Depends(require_auth)) -> List[Dict[str, Any]]: +@require_permission("admin.import") +async def list_import_statuses(user=Depends(get_current_user_with_permissions)) -> List[Dict[str, Any]]: """ List all import operation statuses. @@ -3326,7 +3647,8 @@ async def list_import_statuses(user: str = Depends(require_auth)) -> List[Dict[s @export_import_router.post("/import/cleanup", response_model=Dict[str, Any]) -async def cleanup_import_statuses(max_age_hours: int = 24, user: str = Depends(require_auth)) -> Dict[str, Any]: +@require_permission("admin.import") +async def cleanup_import_statuses(max_age_hours: int = 24, user=Depends(get_current_user_with_permissions)) -> Dict[str, Any]: """ Clean up completed import statuses older than specified age. @@ -3369,6 +3691,73 @@ async def cleanup_import_statuses(max_age_hours: int = 24, user: str = Depends(r app.include_router(well_known_router) +# Include Email Authentication router if enabled +if settings.email_auth_enabled: + try: + # First-Party + from mcpgateway.routers.auth import auth_router + from mcpgateway.routers.email_auth import email_auth_router + + app.include_router(email_auth_router, prefix="/auth/email", tags=["Email Authentication"]) + app.include_router(auth_router, tags=["Main Authentication"]) + logger.info("Authentication routers included - Auth enabled") + + # Include SSO router if enabled + if settings.sso_enabled: + try: + # First-Party + from mcpgateway.routers.sso import sso_router + + app.include_router(sso_router, tags=["SSO Authentication"]) + logger.info("SSO router included - SSO authentication enabled") + except ImportError as e: + logger.error(f"SSO router not available: {e}") + else: + logger.info("SSO router not included - SSO authentication disabled") + except ImportError as e: + logger.error(f"Authentication routers not available: {e}") +else: + logger.info("Email authentication router not included - Email auth disabled") + +# Include Team Management router if email auth is enabled +if settings.email_auth_enabled: + try: + # First-Party + from mcpgateway.routers.teams import teams_router + + app.include_router(teams_router, prefix="/teams", tags=["Teams"]) + logger.info("Team management router included - Teams enabled with email auth") + except ImportError as e: + logger.error(f"Team management router not available: {e}") +else: + logger.info("Team management router not included - Email auth disabled") + +# Include JWT Token Catalog router if email auth is enabled +if settings.email_auth_enabled: + try: + # First-Party + from mcpgateway.routers.tokens import router as tokens_router + + app.include_router(tokens_router, tags=["JWT Token Catalog"]) + logger.info("JWT Token Catalog router included - Token management enabled with email auth") + except ImportError as e: + logger.error(f"JWT Token Catalog router not available: {e}") +else: + logger.info("JWT Token Catalog router not included - Email auth disabled") + +# Include RBAC router if email auth is enabled +if settings.email_auth_enabled: + try: + # First-Party + from mcpgateway.routers.rbac import router as rbac_router + + app.include_router(rbac_router, tags=["RBAC"]) + logger.info("RBAC router included - Role-based access control enabled") + except ImportError as e: + logger.error(f"RBAC router not available: {e}") +else: + logger.info("RBAC router not included - Email auth disabled") + # Include OAuth router try: # First-Party diff --git a/mcpgateway/middleware/__init__.py b/mcpgateway/middleware/__init__.py index 04c1af9a0..f06afe625 100644 --- a/mcpgateway/middleware/__init__.py +++ b/mcpgateway/middleware/__init__.py @@ -5,4 +5,9 @@ Authors: Mihai Criveti Middleware package for MCP Gateway. +Contains various middleware components for request processing. """ + +from mcpgateway.middleware.token_scoping import TokenScopingMiddleware, token_scoping_middleware + +__all__ = ["TokenScopingMiddleware", "token_scoping_middleware"] diff --git a/mcpgateway/middleware/rbac.py b/mcpgateway/middleware/rbac.py new file mode 100644 index 000000000..c921092a0 --- /dev/null +++ b/mcpgateway/middleware/rbac.py @@ -0,0 +1,460 @@ +# -*- coding: utf-8 -*- +"""RBAC Permission Checking Middleware. + +This module provides middleware for FastAPI to enforce role-based access control +on API endpoints. It includes permission decorators and dependency injection +functions for protecting routes. +""" + +# Standard +from functools import wraps +import logging +from typing import Callable, Generator, List, Optional + +# Third-Party +from fastapi import Cookie, Depends, HTTPException, Request, status +from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.auth import get_current_user +from mcpgateway.db import SessionLocal +from mcpgateway.services.permission_service import PermissionService + +logger = logging.getLogger(__name__) + +# HTTP Bearer security scheme for token extraction +security = HTTPBearer(auto_error=False) + + +def get_db() -> Generator[Session, None, None]: + """Get database session for dependency injection. + + Yields: + Session: SQLAlchemy database session + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +async def get_permission_service(db: Session = Depends(get_db)) -> PermissionService: + """Get permission service instance for dependency injection. + + Args: + db: Database session + + Returns: + PermissionService: Permission checking service instance + """ + return PermissionService(db) + + +async def get_current_user_with_permissions( + request: Request, credentials: Optional[HTTPAuthorizationCredentials] = Depends(security), jwt_token: Optional[str] = Cookie(default=None), db: Session = Depends(get_db) +): + """Extract current user from JWT token and prepare for permission checking. + + Args: + request: FastAPI request object for IP/user-agent extraction + credentials: HTTP Bearer credentials + jwt_token: JWT token from cookie + db: Database session + + Returns: + dict: User information with permission checking context + + Raises: + HTTPException: If authentication fails + + Examples: + Use as FastAPI dependency:: + + @app.get("/protected-endpoint") + async def protected_route(user = Depends(get_current_user_with_permissions)): + return {"user": user["email"]} + """ + # Try multiple sources for the token, prioritizing manual cookie reading + token = None + + # 1. First try manual cookie reading (most reliable) + if request.cookies: + # Try both jwt_token and access_token cookie names + manual_token = request.cookies.get("jwt_token") or request.cookies.get("access_token") + if manual_token: + token = manual_token + + # 2. Then try Authorization header + if not token and credentials and credentials.credentials: + token = credentials.credentials + + # 3. Finally try FastAPI Cookie dependency (fallback) + if not token and jwt_token: + token = jwt_token + + if not token: + # For browser requests (HTML Accept header or HTMX), redirect to login + accept_header = request.headers.get("accept", "") + is_htmx = request.headers.get("hx-request") == "true" + if "text/html" in accept_header or is_htmx: + raise HTTPException(status_code=status.HTTP_302_FOUND, detail="Authentication required", headers={"Location": "/admin/login"}) + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Authorization token required") + + try: + # Create credentials object if we got token from cookie + if not credentials: + credentials = HTTPAuthorizationCredentials(scheme="Bearer", credentials=token) + + # Extract user from token using the email auth function + user = await get_current_user(credentials, db) + + # Add request context for permission auditing + return { + "email": user.email, + "full_name": user.full_name, + "is_admin": user.is_admin, + "ip_address": request.client.host if request.client else None, + "user_agent": request.headers.get("user-agent"), + "db": db, + } + except Exception as e: + logger.error(f"Authentication failed: {type(e).__name__}: {e}") + + # For browser requests (HTML Accept header or HTMX), redirect to login + accept_header = request.headers.get("accept", "") + is_htmx = request.headers.get("hx-request") == "true" + if "text/html" in accept_header or is_htmx: + raise HTTPException(status_code=status.HTTP_302_FOUND, detail="Authentication required", headers={"Location": "/admin/login"}) + + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid authentication credentials") + + +def require_permission(permission: str, resource_type: Optional[str] = None): + """Decorator to require specific permission for accessing an endpoint. + + Args: + permission: Required permission (e.g., 'tools.create') + resource_type: Optional resource type for resource-specific permissions + + Returns: + Callable: Decorated function that enforces the permission requirement + + Examples: + Protect tool creation endpoint:: + + @require_permission("tools.create", "tools") + @app.post("/tools") + async def create_tool(user = Depends(get_current_user_with_permissions)): + return {"message": "Tool created"} + + Protect admin endpoint:: + + @require_permission("admin.user_management") + @app.get("/admin/users") + async def list_users(user = Depends(get_current_user_with_permissions)): + return {"users": []} + """ + + def decorator(func: Callable) -> Callable: + """Decorator function that wraps the original function with permission checking. + + Args: + func: The function to be decorated + + Returns: + Callable: The wrapped function with permission checking + """ + + @wraps(func) + async def wrapper(*args, **kwargs): + """Async wrapper function that performs permission check before calling original function. + + Args: + *args: Positional arguments passed to the wrapped function + **kwargs: Keyword arguments passed to the wrapped function + + Returns: + Any: Result from the wrapped function if permission check passes + + Raises: + HTTPException: If user authentication or permission check fails + """ + # Extract user context from kwargs + user_context = None + for _, value in kwargs.items(): + if isinstance(value, dict) and "email" in value and "db" in value: + user_context = value + break + + if not user_context: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User authentication required") + + # Create permission service and check permission + permission_service = PermissionService(user_context["db"]) + + # Extract team_id from path parameters if available + team_id = kwargs.get("team_id") + + # Check permission + granted = await permission_service.check_permission( + user_email=user_context["email"], + permission=permission, + resource_type=resource_type, + team_id=team_id, + ip_address=user_context.get("ip_address"), + user_agent=user_context.get("user_agent"), + ) + + if not granted: + logger.warning(f"Permission denied: user={user_context['email']}, " f"permission={permission}, resource_type={resource_type}") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Insufficient permissions. Required: {permission}") + + # Permission granted, execute the original function + return await func(*args, **kwargs) + + return wrapper + + return decorator + + +def require_admin_permission(): + """Decorator to require admin permissions for accessing an endpoint. + + Returns: + Callable: Decorated function that enforces admin permission requirement + + Examples: + Protect admin endpoint:: + + @require_admin_permission() + @app.get("/admin/system-config") + async def get_system_config(user = Depends(get_current_user_with_permissions)): + return {"config": "system settings"} + """ + + def decorator(func: Callable) -> Callable: + """Decorator function that wraps the original function with admin permission checking. + + Args: + func: The function to be decorated + + Returns: + Callable: The wrapped function with admin permission checking + """ + + @wraps(func) + async def wrapper(*args, **kwargs): + """Async wrapper function that performs admin permission check before calling original function. + + Args: + *args: Positional arguments passed to the wrapped function + **kwargs: Keyword arguments passed to the wrapped function + + Returns: + Any: Result from the wrapped function if admin permission check passes + + Raises: + HTTPException: If user authentication or admin permission check fails + """ + # Extract user context from kwargs + user_context = None + for _, value in kwargs.items(): + if isinstance(value, dict) and "email" in value and "db" in value: + user_context = value + break + + if not user_context: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User authentication required") + + # Create permission service and check admin permissions + permission_service = PermissionService(user_context["db"]) + + has_admin_permission = await permission_service.check_admin_permission(user_context["email"]) + + if not has_admin_permission: + logger.warning(f"Admin permission denied: user={user_context['email']}") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin permissions required") + + # Admin permission granted, execute the original function + return await func(*args, **kwargs) + + return wrapper + + return decorator + + +def require_any_permission(permissions: List[str], resource_type: Optional[str] = None): + """Decorator to require any of the specified permissions for accessing an endpoint. + + Args: + permissions: List of permissions, user needs at least one + resource_type: Optional resource type for resource-specific permissions + + Returns: + Callable: Decorated function that enforces the permission requirements + + Examples: + Require any of multiple permissions:: + + @require_any_permission(["tools.read", "tools.execute"], "tools") + @app.get("/tools/{tool_id}") + async def get_tool(tool_id: str, user = Depends(get_current_user_with_permissions)): + return {"tool_id": tool_id} + """ + + def decorator(func: Callable) -> Callable: + """Decorator function that wraps the original function with any-permission checking. + + Args: + func: The function to be decorated + + Returns: + Callable: The wrapped function with any-permission checking + """ + + @wraps(func) + async def wrapper(*args, **kwargs): + """Async wrapper function that performs any-permission check before calling original function. + + Args: + *args: Positional arguments passed to the wrapped function + **kwargs: Keyword arguments passed to the wrapped function + + Returns: + Any: Result from the wrapped function if any-permission check passes + + Raises: + HTTPException: If user authentication or any-permission check fails + """ + # Extract user context from kwargs + user_context = None + for _, value in kwargs.items(): + if isinstance(value, dict) and "email" in value and "db" in value: + user_context = value + break + + if not user_context: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="User authentication required") + + # Create permission service + permission_service = PermissionService(user_context["db"]) + + # Extract team_id from path parameters if available + team_id = kwargs.get("team_id") + + # Check if user has any of the required permissions + granted = False + for permission in permissions: + if await permission_service.check_permission( + user_email=user_context["email"], + permission=permission, + resource_type=resource_type, + team_id=team_id, + ip_address=user_context.get("ip_address"), + user_agent=user_context.get("user_agent"), + ): + granted = True + break + + if not granted: + logger.warning(f"Permission denied: user={user_context['email']}, " f"permissions={permissions}, resource_type={resource_type}") + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Insufficient permissions. Required one of: {', '.join(permissions)}") + + # Permission granted, execute the original function + return await func(*args, **kwargs) + + return wrapper + + return decorator + + +class PermissionChecker: + """Context manager for manual permission checking. + + Useful for complex permission logic that can't be handled by decorators. + + Examples: + >>> async def complex_endpoint(user = Depends(get_current_user_with_permissions)): + ... checker = PermissionChecker(user) + ... + ... # Check multiple permissions + ... if await checker.has_permission("tools.read"): + ... tools = get_tools() + ... + ... if await checker.has_admin_permission(): + ... admin_data = get_admin_data() + ... + ... return {"data": "result"} + """ + + def __init__(self, user_context: dict): + """Initialize permission checker with user context. + + Args: + user_context: User context from get_current_user_with_permissions + """ + self.user_context = user_context + self.permission_service = PermissionService(user_context["db"]) + + async def has_permission(self, permission: str, resource_type: Optional[str] = None, resource_id: Optional[str] = None, team_id: Optional[str] = None) -> bool: + """Check if user has specific permission. + + Args: + permission: Permission to check + resource_type: Optional resource type + resource_id: Optional resource ID + team_id: Optional team context + + Returns: + bool: True if user has permission + """ + return await self.permission_service.check_permission( + user_email=self.user_context["email"], + permission=permission, + resource_type=resource_type, + resource_id=resource_id, + team_id=team_id, + ip_address=self.user_context.get("ip_address"), + user_agent=self.user_context.get("user_agent"), + ) + + async def has_admin_permission(self) -> bool: + """Check if user has admin permissions. + + Returns: + bool: True if user has admin permissions + """ + return await self.permission_service.check_admin_permission(self.user_context["email"]) + + async def has_any_permission(self, permissions: List[str], resource_type: Optional[str] = None, team_id: Optional[str] = None) -> bool: + """Check if user has any of the specified permissions. + + Args: + permissions: List of permissions to check + resource_type: Optional resource type + team_id: Optional team context + + Returns: + bool: True if user has at least one permission + """ + for permission in permissions: + if await self.has_permission(permission, resource_type, team_id=team_id): + return True + return False + + async def require_permission(self, permission: str, resource_type: Optional[str] = None, resource_id: Optional[str] = None, team_id: Optional[str] = None) -> None: + """Require specific permission, raise HTTPException if not granted. + + Args: + permission: Required permission + resource_type: Optional resource type + resource_id: Optional resource ID + team_id: Optional team context + + Raises: + HTTPException: If permission is not granted + """ + if not await self.has_permission(permission, resource_type, resource_id, team_id): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Insufficient permissions. Required: {permission}") diff --git a/mcpgateway/middleware/security_headers.py b/mcpgateway/middleware/security_headers.py index eedecc10e..0cdec732f 100644 --- a/mcpgateway/middleware/security_headers.py +++ b/mcpgateway/middleware/security_headers.py @@ -75,10 +75,10 @@ async def dispatch(self, request: Request, call_next) -> Response: # This CSP is designed to work with the Admin UI while providing security csp_directives = [ "default-src 'self'", - "script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdnjs.cloudflare.com https://cdn.tailwindcss.com https://cdn.jsdelivr.net", + "script-src 'self' 'unsafe-inline' 'unsafe-eval' https://cdnjs.cloudflare.com https://cdn.tailwindcss.com https://cdn.jsdelivr.net https://unpkg.com", "style-src 'self' 'unsafe-inline' https://cdnjs.cloudflare.com https://cdn.jsdelivr.net", "img-src 'self' data: https:", - "font-src 'self' data:", + "font-src 'self' data: https://cdnjs.cloudflare.com", "connect-src 'self' ws: wss: https:", "frame-ancestors 'none'", ] @@ -98,4 +98,27 @@ async def dispatch(self, request: Request, call_next) -> Response: if "Server" in response.headers: del response.headers["Server"] + # Lightweight dynamic CORS reflection based on current settings + origin = request.headers.get("Origin") + if origin: + allow = False + if settings.environment != "production": + # In non-production, honor allowed_origins dynamically + allow = (not settings.allowed_origins) or (origin in settings.allowed_origins) + else: + # In production, require explicit allow-list + allow = origin in settings.allowed_origins + if allow: + response.headers["Access-Control-Allow-Origin"] = origin + # Standard CORS helpers + if settings.cors_allow_credentials: + response.headers["Access-Control-Allow-Credentials"] = "true" + # Expose common headers for clients + exposed = ["Content-Length", "X-Request-ID"] + response.headers["Access-Control-Expose-Headers"] = ", ".join(exposed) + # Ensure caches vary on Origin + existing_vary = response.headers.get("Vary") + vary_val = "Origin" if not existing_vary else (existing_vary + ", Origin") + response.headers["Vary"] = vary_val + return response diff --git a/mcpgateway/middleware/token_scoping.py b/mcpgateway/middleware/token_scoping.py new file mode 100644 index 000000000..de762c183 --- /dev/null +++ b/mcpgateway/middleware/token_scoping.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/middleware/token_scoping.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Token Scoping Middleware. +This middleware enforces token scoping restrictions at the API level, +including server_id restrictions, IP restrictions, permission checks, +and time-based restrictions. +""" + +# Standard +from datetime import datetime +import ipaddress +import re +from typing import Optional + +# Third-Party +from fastapi import HTTPException, Request, status +from fastapi.security import HTTPBearer +import jwt + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import Permissions + +# Security scheme +bearer_scheme = HTTPBearer(auto_error=False) + + +class TokenScopingMiddleware: + """Middleware to enforce token scoping restrictions.""" + + def __init__(self): + """Initialize token scoping middleware.""" + + def _extract_token_scopes(self, request: Request) -> Optional[dict]: + """Extract token scopes from JWT in request. + + Args: + request: FastAPI request object + + Returns: + Dict containing token scopes or None if no valid token + """ + # Get authorization header + auth_header = request.headers.get("Authorization") + if not auth_header or not auth_header.startswith("Bearer "): + return None + + token = auth_header.split(" ", 1)[1] + + try: + # Decode JWT with signature verification but skip audience/issuer checks for scope extraction + # (full verification including audience/issuer is handled by the auth system) + payload = jwt.decode(token, settings.jwt_secret_key, algorithms=[settings.jwt_algorithm], options={"verify_aud": False, "verify_iss": False}) + return payload.get("scopes") + except jwt.PyJWTError: + return None + + def _get_client_ip(self, request: Request) -> str: + """Extract client IP address from request. + + Args: + request: FastAPI request object + + Returns: + str: Client IP address + """ + # Check for X-Forwarded-For header (proxy/load balancer) + forwarded_for = request.headers.get("X-Forwarded-For") + if forwarded_for: + return forwarded_for.split(",")[0].strip() + + # Check for X-Real-IP header + real_ip = request.headers.get("X-Real-IP") + if real_ip: + return real_ip + + # Fall back to direct client IP + return request.client.host if request.client else "unknown" + + def _check_ip_restrictions(self, client_ip: str, ip_restrictions: list) -> bool: + """Check if client IP is allowed by restrictions. + + Args: + client_ip: Client's IP address + ip_restrictions: List of allowed IP addresses/CIDR ranges + + Returns: + bool: True if IP is allowed, False otherwise + """ + if not ip_restrictions: + return True # No restrictions + + try: + client_ip_obj = ipaddress.ip_address(client_ip) + + for restriction in ip_restrictions: + try: + # Check if it's a CIDR range + if "/" in restriction: + network = ipaddress.ip_network(restriction, strict=False) + if client_ip_obj in network: + return True + else: + # Single IP address + if client_ip_obj == ipaddress.ip_address(restriction): + return True + except (ValueError, ipaddress.AddressValueError): + continue + + except (ValueError, ipaddress.AddressValueError): + return False + + return False + + def _check_time_restrictions(self, time_restrictions: dict) -> bool: + """Check if current time is allowed by restrictions. + + Args: + time_restrictions: Dict containing time-based restrictions + + Returns: + bool: True if current time is allowed, False otherwise + """ + if not time_restrictions: + return True # No restrictions + + now = datetime.utcnow() + + # Check business hours restriction + if time_restrictions.get("business_hours_only"): + # Assume business hours are 9 AM to 5 PM UTC + # This could be made configurable + if not 9 <= now.hour < 17: + return False + + # Check day of week restrictions + weekdays_only = time_restrictions.get("weekdays_only") + if weekdays_only and now.weekday() >= 5: # Saturday=5, Sunday=6 + return False + + return True + + def _check_server_restriction(self, request_path: str, server_id: Optional[str]) -> bool: + """Check if request path matches server restriction. + + Args: + request_path: The request path/URL + server_id: Required server ID (None means no restriction) + + Returns: + bool: True if request is allowed, False otherwise + """ + if not server_id: + return True # No server restriction + + # Extract server ID from path patterns: + # /servers/{server_id}/... + # /sse/{server_id} + # /ws/{server_id} + # Using segment-aware patterns for precise matching + server_path_patterns = [ + r"^/servers/([^/]+)(?:$|/)", + r"^/sse/([^/?]+)(?:$|\?)", + r"^/ws/([^/?]+)(?:$|\?)", + ] + + for pattern in server_path_patterns: + match = re.search(pattern, request_path) + if match: + path_server_id = match.group(1) + return path_server_id == server_id + + # If no server ID found in path, allow general endpoints + general_endpoints = ["/health", "/metrics", "/openapi.json", "/docs", "/redoc"] + + # Check exact root path separately + if request_path == "/": + return True + + for endpoint in general_endpoints: + if request_path.startswith(endpoint): + return True + + # Default deny for unmatched paths with server restrictions + return False + + def _check_permission_restrictions(self, request_path: str, request_method: str, permissions: list) -> bool: + """Check if request is allowed by permission restrictions. + + Args: + request_path: The request path/URL + request_method: HTTP method (GET, POST, etc.) + permissions: List of allowed permissions + + Returns: + bool: True if request is allowed, False otherwise + """ + if not permissions or "*" in permissions: + return True # No restrictions or full access + + # Map HTTP methods and paths to permission requirements + # Using canonical permissions from mcpgateway.db.Permissions + # Segment-aware patterns to avoid accidental early matches + permission_map = { + # Tools permissions + ("GET", r"^/tools(?:$|/)"): Permissions.TOOLS_READ, + ("POST", r"^/tools(?:$|/)"): Permissions.TOOLS_CREATE, + ("PUT", r"^/tools/[^/]+(?:$|/)"): Permissions.TOOLS_UPDATE, + ("DELETE", r"^/tools/[^/]+(?:$|/)"): Permissions.TOOLS_DELETE, + ("GET", r"^/servers/[^/]+/tools(?:$|/)"): Permissions.TOOLS_READ, + ("POST", r"^/servers/[^/]+/tools/[^/]+/call(?:$|/)"): Permissions.TOOLS_EXECUTE, + # Resources permissions + ("GET", r"^/resources(?:$|/)"): Permissions.RESOURCES_READ, + ("POST", r"^/resources(?:$|/)"): Permissions.RESOURCES_CREATE, + ("PUT", r"^/resources/[^/]+(?:$|/)"): Permissions.RESOURCES_UPDATE, + ("DELETE", r"^/resources/[^/]+(?:$|/)"): Permissions.RESOURCES_DELETE, + ("GET", r"^/servers/[^/]+/resources(?:$|/)"): Permissions.RESOURCES_READ, + # Prompts permissions + ("GET", r"^/prompts(?:$|/)"): Permissions.PROMPTS_READ, + ("POST", r"^/prompts(?:$|/)"): Permissions.PROMPTS_CREATE, + ("PUT", r"^/prompts/[^/]+(?:$|/)"): Permissions.PROMPTS_UPDATE, + ("DELETE", r"^/prompts/[^/]+(?:$|/)"): Permissions.PROMPTS_DELETE, + # Server management permissions + ("GET", r"^/servers(?:$|/)"): Permissions.SERVERS_READ, + ("POST", r"^/servers(?:$|/)"): Permissions.SERVERS_CREATE, + ("PUT", r"^/servers/[^/]+(?:$|/)"): Permissions.SERVERS_UPDATE, + ("DELETE", r"^/servers/[^/]+(?:$|/)"): Permissions.SERVERS_DELETE, + # Admin permissions + ("GET", r"^/admin(?:$|/)"): Permissions.ADMIN_USER_MANAGEMENT, + ("POST", r"^/admin/[^/]+(?:$|/)"): Permissions.ADMIN_USER_MANAGEMENT, + ("PUT", r"^/admin/[^/]+(?:$|/)"): Permissions.ADMIN_USER_MANAGEMENT, + ("DELETE", r"^/admin/[^/]+(?:$|/)"): Permissions.ADMIN_USER_MANAGEMENT, + } + + # Check each permission mapping + for (method, path_pattern), required_permission in permission_map.items(): + if request_method == method and re.match(path_pattern, request_path): + return required_permission in permissions + + # Default allow for unmatched paths + return True + + async def __call__(self, request: Request, call_next): + """Middleware function to check token scoping. + + Args: + request: FastAPI request object + call_next: Next middleware/handler in chain + + Returns: + Response from next handler or HTTPException + + Raises: + HTTPException: If token scoping restrictions are violated + """ + # Skip scoping for certain paths (truly public endpoints only) + skip_paths = ["/health", "/metrics", "/openapi.json", "/docs", "/redoc", "/auth/email/login", "/auth/email/register", "/.well-known/"] + + # Check exact root path separately + if request.url.path == "/": + return await call_next(request) + + if any(request.url.path.startswith(path) for path in skip_paths): + return await call_next(request) + + # Extract token scopes + scopes = self._extract_token_scopes(request) + + # If no scopes, continue (regular auth will handle this) + if not scopes: + return await call_next(request) + + # Check server ID restriction + server_id = scopes.get("server_id") + if not self._check_server_restriction(request.url.path, server_id): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Token not authorized for this server. Required: {server_id}") + + # Check IP restrictions + ip_restrictions = scopes.get("ip_restrictions", []) + if ip_restrictions: + client_ip = self._get_client_ip(request) + if not self._check_ip_restrictions(client_ip, ip_restrictions): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f"Request from IP {client_ip} not allowed by token restrictions") + + # Check time restrictions + time_restrictions = scopes.get("time_restrictions", {}) + if not self._check_time_restrictions(time_restrictions): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Request not allowed at this time by token restrictions") + + # Check permission restrictions + permissions = scopes.get("permissions", []) + if not self._check_permission_restrictions(request.url.path, request.method, permissions): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions for this operation") + + # All scoping checks passed, continue to next handler + return await call_next(request) + + +# Create middleware instance +token_scoping_middleware = TokenScopingMiddleware() diff --git a/mcpgateway/models.py b/mcpgateway/models.py index e5d30c0cc..d78994cb5 100644 --- a/mcpgateway/models.py +++ b/mcpgateway/models.py @@ -827,3 +827,154 @@ class Gateway(BaseModel): url: AnyHttpUrl capabilities: ServerCapabilities last_seen: Optional[datetime] = None + + +# ===== RBAC Models (Epic 004) ===== + + +class RBACRole(BaseModel): + """Role model for RBAC system. + + Represents roles that can be assigned to users with specific permissions. + Supports global, team, and personal scopes with role inheritance. + + Attributes: + id: Unique role identifier + name: Human-readable role name + description: Role description and purpose + scope: Role scope ('global', 'team', 'personal') + permissions: List of permission strings + inherits_from: Parent role ID for inheritance + created_by: Email of user who created the role + is_system_role: Whether this is a system-defined role + is_active: Whether the role is currently active + created_at: Role creation timestamp + updated_at: Role last modification timestamp + + Examples: + >>> from datetime import datetime + >>> role = RBACRole( + ... id="role-123", + ... name="team_admin", + ... description="Team administrator with member management rights", + ... scope="team", + ... permissions=["teams.manage_members", "resources.create"], + ... created_by="admin@example.com", + ... created_at=datetime(2023, 1, 1), + ... updated_at=datetime(2023, 1, 1) + ... ) + >>> role.name + 'team_admin' + >>> "teams.manage_members" in role.permissions + True + """ + + id: str = Field(..., description="Unique role identifier") + name: str = Field(..., description="Human-readable role name") + description: Optional[str] = Field(None, description="Role description and purpose") + scope: str = Field(..., description="Role scope", pattern="^(global|team|personal)$") + permissions: List[str] = Field(..., description="List of permission strings") + inherits_from: Optional[str] = Field(None, description="Parent role ID for inheritance") + created_by: str = Field(..., description="Email of user who created the role") + is_system_role: bool = Field(False, description="Whether this is a system-defined role") + is_active: bool = Field(True, description="Whether the role is currently active") + created_at: datetime = Field(..., description="Role creation timestamp") + updated_at: datetime = Field(..., description="Role last modification timestamp") + + +class UserRoleAssignment(BaseModel): + """User role assignment model. + + Represents the assignment of roles to users in specific scopes (global, team, personal). + Includes metadata about who granted the role and when it expires. + + Attributes: + id: Unique assignment identifier + user_email: Email of the user assigned the role + role_id: ID of the assigned role + scope: Assignment scope ('global', 'team', 'personal') + scope_id: Team ID if team-scoped, None otherwise + granted_by: Email of user who granted this role + granted_at: Timestamp when role was granted + expires_at: Optional expiration timestamp + is_active: Whether the assignment is currently active + + Examples: + >>> from datetime import datetime + >>> user_role = UserRoleAssignment( + ... id="assignment-123", + ... user_email="user@example.com", + ... role_id="team-admin-123", + ... scope="team", + ... scope_id="team-engineering-456", + ... granted_by="admin@example.com", + ... granted_at=datetime(2023, 1, 1) + ... ) + >>> user_role.scope + 'team' + >>> user_role.is_active + True + """ + + id: str = Field(..., description="Unique assignment identifier") + user_email: str = Field(..., description="Email of the user assigned the role") + role_id: str = Field(..., description="ID of the assigned role") + scope: str = Field(..., description="Assignment scope", pattern="^(global|team|personal)$") + scope_id: Optional[str] = Field(None, description="Team ID if team-scoped, None otherwise") + granted_by: str = Field(..., description="Email of user who granted this role") + granted_at: datetime = Field(..., description="Timestamp when role was granted") + expires_at: Optional[datetime] = Field(None, description="Optional expiration timestamp") + is_active: bool = Field(True, description="Whether the assignment is currently active") + + +class PermissionAudit(BaseModel): + """Permission audit log model. + + Records all permission checks for security auditing and compliance. + Includes details about the user, permission, resource, and result. + + Attributes: + id: Unique audit log entry identifier + timestamp: When the permission check occurred + user_email: Email of user being checked + permission: Permission being checked (e.g., 'tools.create') + resource_type: Type of resource (e.g., 'tools', 'teams') + resource_id: Specific resource ID if applicable + team_id: Team context if applicable + granted: Whether permission was granted + roles_checked: JSON of roles that were checked + ip_address: IP address of the request + user_agent: User agent string + + Examples: + >>> from datetime import datetime + >>> audit_log = PermissionAudit( + ... id=1, + ... timestamp=datetime(2023, 1, 1), + ... user_email="user@example.com", + ... permission="tools.create", + ... resource_type="tools", + ... granted=True, + ... roles_checked={"roles": ["team_admin"]} + ... ) + >>> audit_log.granted + True + >>> audit_log.permission + 'tools.create' + """ + + id: int = Field(..., description="Unique audit log entry identifier") + timestamp: datetime = Field(..., description="When the permission check occurred") + user_email: Optional[str] = Field(None, description="Email of user being checked") + permission: str = Field(..., description="Permission being checked") + resource_type: Optional[str] = Field(None, description="Type of resource") + resource_id: Optional[str] = Field(None, description="Specific resource ID if applicable") + team_id: Optional[str] = Field(None, description="Team context if applicable") + granted: bool = Field(..., description="Whether permission was granted") + roles_checked: Optional[Dict] = Field(None, description="JSON of roles that were checked") + ip_address: Optional[str] = Field(None, description="IP address of the request") + user_agent: Optional[str] = Field(None, description="User agent string") + + +# Permission constants are imported from db.py to avoid duplication +# Use Permissions class from mcpgateway.db instead of duplicate SystemPermissions diff --git a/mcpgateway/plugins/framework/loader/config.py b/mcpgateway/plugins/framework/loader/config.py index 12608256f..a64a0815e 100644 --- a/mcpgateway/plugins/framework/loader/config.py +++ b/mcpgateway/plugins/framework/loader/config.py @@ -16,7 +16,7 @@ import yaml # First-Party -from mcpgateway.plugins.framework.models import Config +from mcpgateway.plugins.framework.models import Config, PluginSettings class ConfigLoader: @@ -72,12 +72,16 @@ def load_config(config: str, use_jinja: bool = True) -> Config: ... os.unlink(temp_path) 60 """ - with open(os.path.normpath(config), "r", encoding="utf-8") as file: - template = file.read() - if use_jinja: - jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), autoescape=True) - rendered_template = jinja_env.from_string(template).render(env=os.environ) - else: - rendered_template = template - config_data = yaml.safe_load(rendered_template) - return Config(**config_data) + try: + with open(os.path.normpath(config), "r", encoding="utf-8") as file: + template = file.read() + if use_jinja: + jinja_env = jinja2.Environment(loader=jinja2.BaseLoader(), autoescape=True) + rendered_template = jinja_env.from_string(template).render(env=os.environ) + else: + rendered_template = template + config_data = yaml.safe_load(rendered_template) or {} + return Config(**config_data) + except FileNotFoundError: + # Graceful fallback for tests and minimal environments without plugin config + return Config(plugins=[], plugin_dirs=[], plugin_settings=PluginSettings()) diff --git a/mcpgateway/reverse_proxy.py b/mcpgateway/reverse_proxy.py index 25c7c0f84..826cee5a0 100644 --- a/mcpgateway/reverse_proxy.py +++ b/mcpgateway/reverse_proxy.py @@ -22,7 +22,7 @@ Example: $ export REVERSE_PROXY_GATEWAY=https://gateway.example.com - $ export REVERSE_PROXY_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin --exp 10080 --secret key) + $ export REVERSE_PROXY_TOKEN=$(python3 -m mcpgateway.utils.create_jwt_token --username admin@example.com --exp 10080 --secret key) $ python3 -m mcpgateway.reverse_proxy --local-stdio "uvx mcp-server-git" """ diff --git a/mcpgateway/routers/auth.py b/mcpgateway/routers/auth.py new file mode 100644 index 000000000..b02eee6c1 --- /dev/null +++ b/mcpgateway/routers/auth.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/routers/auth.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Main Authentication Router. +This module provides simplified authentication endpoints for both session and API key management. +It serves as the primary entry point for authentication workflows. +""" + +# Standard +from typing import Optional + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, Request, status +from pydantic import BaseModel, EmailStr +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import SessionLocal +from mcpgateway.routers.email_auth import create_access_token, get_client_ip, get_user_agent +from mcpgateway.schemas import AuthenticationResponse, EmailUserResponse +from mcpgateway.services.email_auth_service import EmailAuthService +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + +# Create router +auth_router = APIRouter(prefix="/auth", tags=["Authentication"]) + + +def get_db(): + """Database dependency. + + Yields: + Session: SQLAlchemy database session + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +class LoginRequest(BaseModel): + """Login request supporting both email and username formats. + + Attributes: + email: User email address (can also accept 'username' field for compatibility) + password: User password + """ + + email: Optional[EmailStr] = None + username: Optional[str] = None # For compatibility + password: str + + def get_email(self) -> str: + """Get email from either email or username field. + + Returns: + str: Email address to use for authentication + + Raises: + ValueError: If neither email nor username is provided + """ + if self.email: + return str(self.email) + elif self.username: + # Support both email format and plain username + if "@" in self.username: + return self.username + else: + # If it's a plain username, we can't authenticate + # (since we're email-based system) + raise ValueError("Username format not supported. Please use email address.") + else: + raise ValueError("Either email or username must be provided") + + +@auth_router.post("/login", response_model=AuthenticationResponse) +async def login(login_request: LoginRequest, request: Request, db: Session = Depends(get_db)): + """Authenticate user and return session JWT token. + + This endpoint provides Tier 1 authentication for session-based access. + The returned JWT token should be used for UI access and API key management. + + Args: + login_request: Login credentials (email/username + password) + request: FastAPI request object + db: Database session + + Returns: + AuthenticationResponse: Session JWT token and user info + + Raises: + HTTPException: If authentication fails + + Examples: + Email format (recommended): + { + "email": "admin@example.com", + "password": "ChangeMe_12345678$" + } + + Username format (compatibility): + { + "username": "admin@example.com", + "password": "ChangeMe_12345678$" + } + """ + auth_service = EmailAuthService(db) + ip_address = get_client_ip(request) + user_agent = get_user_agent(request) + + try: + # Extract email from request + email = login_request.get_email() + + # Authenticate user + user = await auth_service.authenticate_user(email=email, password=login_request.password, ip_address=ip_address, user_agent=user_agent) + + if not user: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid email or password") + + # Create session JWT token (Tier 1 authentication) + access_token, expires_in = create_access_token(user) + + logger.info(f"User {email} authenticated successfully") + + # Return session token for UI access and API key management + return AuthenticationResponse(access_token=access_token, token_type="bearer", expires_in=expires_in, user=EmailUserResponse.from_email_user(user)) + + except ValueError as e: + logger.warning(f"Login validation error: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Login error for {login_request.email or login_request.username}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Authentication service error") diff --git a/mcpgateway/routers/email_auth.py b/mcpgateway/routers/email_auth.py new file mode 100644 index 000000000..db9c922d9 --- /dev/null +++ b/mcpgateway/routers/email_auth.py @@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/routers/email_auth.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Email Authentication Router. +This module provides FastAPI routes for email-based authentication +including login, registration, password management, and user profile endpoints. + +Examples: + >>> from fastapi import FastAPI + >>> from mcpgateway.routers.email_auth import email_auth_router + >>> app = FastAPI() + >>> app.include_router(email_auth_router, prefix="/auth/email", tags=["Email Auth"]) +""" + +# Standard +from datetime import datetime, timedelta +from typing import Optional + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, Request, status +from fastapi.security import HTTPBearer +import jwt +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.auth import get_current_user +from mcpgateway.config import settings +from mcpgateway.db import EmailUser, SessionLocal +from mcpgateway.middleware.rbac import require_permission +from mcpgateway.schemas import ( + AuthenticationResponse, + AuthEventResponse, + ChangePasswordRequest, + EmailLoginRequest, + EmailRegistrationRequest, + EmailUserResponse, + SuccessResponse, + UserListResponse, +) +from mcpgateway.services.email_auth_service import AuthenticationError, EmailAuthService, EmailValidationError, PasswordValidationError, UserExistsError +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + +# Create router +email_auth_router = APIRouter() + +# Security scheme +bearer_scheme = HTTPBearer(auto_error=False) + + +def get_db(): + """Database dependency. + + Yields: + Session: SQLAlchemy database session + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +def get_client_ip(request: Request) -> str: + """Extract client IP address from request. + + Args: + request: FastAPI request object + + Returns: + str: Client IP address + """ + # Check for X-Forwarded-For header (proxy/load balancer) + forwarded_for = request.headers.get("X-Forwarded-For") + if forwarded_for: + return forwarded_for.split(",")[0].strip() + + # Check for X-Real-IP header + real_ip = request.headers.get("X-Real-IP") + if real_ip: + return real_ip + + # Fall back to direct client IP + return request.client.host if request.client else "unknown" + + +def get_user_agent(request: Request) -> str: + """Extract user agent from request. + + Args: + request: FastAPI request object + + Returns: + str: User agent string + """ + return request.headers.get("User-Agent", "unknown") + + +def create_access_token(user: EmailUser, token_scopes: Optional[dict] = None, jti: Optional[str] = None) -> tuple[str, int]: + """Create JWT access token for user with enhanced scoping. + + Args: + user: EmailUser instance + token_scopes: Optional token scoping information + jti: Optional JWT ID for revocation tracking + + Returns: + Tuple of (token_string, expires_in_seconds) + """ + now = datetime.utcnow() + expires_delta = timedelta(minutes=settings.token_expiry) + expire = now + expires_delta + + # Get user's teams for namespace information + teams = user.get_teams() + + # Create enhanced JWT payload with team and namespace information + payload = { + # Standard JWT claims + "sub": user.email, + "iss": settings.jwt_issuer, + "aud": settings.jwt_audience, + "iat": int(now.timestamp()), + "exp": int(expire.timestamp()), + "jti": jti or str(__import__("uuid").uuid4()), + # User profile information + "user": { + "email": user.email, + "full_name": user.full_name, + "is_admin": user.is_admin, + "auth_provider": user.auth_provider, + }, + # Team memberships for authorization + "teams": [ + {"id": team.id, "name": team.name, "slug": team.slug, "is_personal": team.is_personal, "role": next((m.role for m in user.team_memberships if m.team_id == team.id), "member")} + for team in teams + ], + # Namespace access (backwards compatible) + "namespaces": [f"user:{user.email}", *[f"team:{team.slug}" for team in teams], "public"], + # Token scoping (if provided) + "scopes": token_scopes or {"server_id": None, "permissions": ["*"], "ip_restrictions": [], "time_restrictions": {}}, # Full access for regular user tokens + } + + # Generate token + token = jwt.encode(payload, settings.jwt_secret_key, algorithm=settings.jwt_algorithm) + + return token, int(expires_delta.total_seconds()) + + +def create_legacy_access_token(user: EmailUser) -> tuple[str, int]: + """Create legacy JWT access token for backwards compatibility. + + Args: + user: EmailUser instance + + Returns: + Tuple of (token_string, expires_in_seconds) + """ + now = datetime.utcnow() + expires_delta = timedelta(minutes=settings.token_expiry) + expire = now + expires_delta + + # Create simple JWT payload (original format) + payload = { + "sub": user.email, + "email": user.email, + "full_name": user.full_name, + "is_admin": user.is_admin, + "auth_provider": user.auth_provider, + "iat": int(now.timestamp()), + "exp": int(expire.timestamp()), + "iss": settings.jwt_issuer, + "aud": settings.jwt_audience, + } + + # Generate token + token = jwt.encode(payload, settings.jwt_secret_key, algorithm=settings.jwt_algorithm) + + return token, int(expires_delta.total_seconds()) + + +@email_auth_router.post("/login", response_model=AuthenticationResponse) +async def login(login_request: EmailLoginRequest, request: Request, db: Session = Depends(get_db)): + """Authenticate user with email and password. + + Args: + login_request: Login credentials + request: FastAPI request object + db: Database session + + Returns: + AuthenticationResponse: Access token and user info + + Raises: + HTTPException: If authentication fails + + Examples: + Request JSON: + { + "email": "user@example.com", + "password": "secure_password" + } + """ + auth_service = EmailAuthService(db) + ip_address = get_client_ip(request) + user_agent = get_user_agent(request) + + try: + # Authenticate user + user = await auth_service.authenticate_user(email=login_request.email, password=login_request.password, ip_address=ip_address, user_agent=user_agent) + + if not user: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid email or password") + + # Create access token + access_token, expires_in = create_access_token(user) + + # Return authentication response + return AuthenticationResponse(access_token=access_token, token_type="bearer", expires_in=expires_in, user=EmailUserResponse.from_email_user(user)) + + except Exception as e: + logger.error(f"Login error for {login_request.email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Authentication service error") + + +@email_auth_router.post("/register", response_model=AuthenticationResponse) +async def register(registration_request: EmailRegistrationRequest, request: Request, db: Session = Depends(get_db)): + """Register a new user account. + + Args: + registration_request: Registration information + request: FastAPI request object + db: Database session + + Returns: + AuthenticationResponse: Access token and user info + + Raises: + HTTPException: If registration fails + + Examples: + Request JSON: + { + "email": "new@example.com", + "password": "secure_password", + "full_name": "New User" + } + """ + auth_service = EmailAuthService(db) + get_client_ip(request) + get_user_agent(request) + + try: + # Create new user + user = await auth_service.create_user( + email=registration_request.email, + password=registration_request.password, + full_name=registration_request.full_name, + is_admin=False, # Regular users cannot self-register as admin + auth_provider="local", + ) + + # Create access token + access_token, expires_in = create_access_token(user) + + logger.info(f"New user registered: {user.email}") + + return AuthenticationResponse(access_token=access_token, token_type="bearer", expires_in=expires_in, user=EmailUserResponse.from_email_user(user)) + + except EmailValidationError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except PasswordValidationError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except UserExistsError as e: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e)) + except Exception as e: + logger.error(f"Registration error for {registration_request.email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Registration service error") + + +@email_auth_router.post("/change-password", response_model=SuccessResponse) +async def change_password(password_request: ChangePasswordRequest, request: Request, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Change user's password. + + Args: + password_request: Old and new passwords + request: FastAPI request object + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Success confirmation + + Raises: + HTTPException: If password change fails + + Examples: + Request JSON (with Bearer token in Authorization header): + { + "old_password": "current_password", + "new_password": "new_secure_password" + } + """ + auth_service = EmailAuthService(db) + ip_address = get_client_ip(request) + user_agent = get_user_agent(request) + + try: + # Change password + success = await auth_service.change_password( + email=current_user.email, old_password=password_request.old_password, new_password=password_request.new_password, ip_address=ip_address, user_agent=user_agent + ) + + if success: + return SuccessResponse(success=True, message="Password changed successfully") + else: + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to change password") + + except AuthenticationError as e: + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=str(e)) + except PasswordValidationError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Password change error for {current_user.email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Password change service error") + + +@email_auth_router.get("/me", response_model=EmailUserResponse) +async def get_current_user_profile(current_user: EmailUser = Depends(get_current_user)): + """Get current user's profile information. + + Args: + current_user: Currently authenticated user + + Returns: + EmailUserResponse: User profile information + + Raises: + HTTPException: If user authentication fails + + Examples: + >>> # GET /auth/email/me + >>> # Headers: Authorization: Bearer + """ + return EmailUserResponse.from_email_user(current_user) + + +@email_auth_router.get("/events", response_model=list[AuthEventResponse]) +async def get_auth_events(limit: int = 50, offset: int = 0, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Get authentication events for the current user. + + Args: + limit: Maximum number of events to return + offset: Number of events to skip + current_user: Currently authenticated user + db: Database session + + Returns: + List[AuthEventResponse]: Authentication events + + Raises: + HTTPException: If user authentication fails + + Examples: + >>> # GET /auth/email/events?limit=10&offset=0 + >>> # Headers: Authorization: Bearer + """ + auth_service = EmailAuthService(db) + + try: + events = await auth_service.get_auth_events(email=current_user.email, limit=limit, offset=offset) + + return [AuthEventResponse.model_validate(event) for event in events] + + except Exception as e: + logger.error(f"Error getting auth events for {current_user.email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve authentication events") + + +# Admin-only endpoints +@email_auth_router.get("/admin/users", response_model=UserListResponse) +@require_permission("admin.user_management") +async def list_users(limit: int = 100, offset: int = 0, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """List all users (admin only). + + Args: + limit: Maximum number of users to return + offset: Number of users to skip + current_user: Currently authenticated user + db: Database session + + Returns: + UserListResponse: List of users with pagination + + Raises: + HTTPException: If user is not admin + + Examples: + >>> # GET /auth/email/admin/users?limit=10&offset=0 + >>> # Headers: Authorization: Bearer + """ + + auth_service = EmailAuthService(db) + + try: + users = await auth_service.list_users(limit=limit, offset=offset) + total_count = await auth_service.count_users() + + return UserListResponse(users=[EmailUserResponse.from_email_user(user) for user in users], total_count=total_count, limit=limit, offset=offset) + + except Exception as e: + logger.error(f"Error listing users: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve user list") + + +@email_auth_router.get("/admin/events", response_model=list[AuthEventResponse]) +@require_permission("admin.user_management") +async def list_all_auth_events(limit: int = 100, offset: int = 0, user_email: Optional[str] = None, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """List authentication events for all users (admin only). + + Args: + limit: Maximum number of events to return + offset: Number of events to skip + user_email: Filter events by specific user email + current_user: Currently authenticated user + db: Database session + + Returns: + List[AuthEventResponse]: Authentication events + + Raises: + HTTPException: If user is not admin + + Examples: + >>> # GET /auth/email/admin/events?limit=50&user_email=user@example.com + >>> # Headers: Authorization: Bearer + """ + + auth_service = EmailAuthService(db) + + try: + events = await auth_service.get_auth_events(email=user_email, limit=limit, offset=offset) + + return [AuthEventResponse.model_validate(event) for event in events] + + except Exception as e: + logger.error(f"Error getting auth events: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve authentication events") + + +@email_auth_router.post("/admin/users", response_model=EmailUserResponse, status_code=status.HTTP_201_CREATED) +@require_permission("admin.user_management") +async def create_user(user_request: EmailRegistrationRequest, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Create a new user account (admin only). + + Args: + user_request: User creation information + current_user: Currently authenticated admin user + db: Database session + + Returns: + EmailUserResponse: Created user information + + Raises: + HTTPException: If user creation fails + + Examples: + Request JSON: + { + "email": "newuser@example.com", + "password": "secure_password", + "full_name": "New User", + "is_admin": false + } + """ + auth_service = EmailAuthService(db) + + try: + # Create new user with admin privileges + user = await auth_service.create_user( + email=user_request.email, + password=user_request.password, + full_name=user_request.full_name, + is_admin=getattr(user_request, "is_admin", False), + auth_provider="local", + ) + + logger.info(f"Admin {current_user.email} created user: {user.email}") + + return EmailUserResponse.from_email_user(user) + + except EmailValidationError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except PasswordValidationError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except UserExistsError as e: + raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail=str(e)) + except Exception as e: + logger.error(f"Admin user creation error: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="User creation failed") + + +@email_auth_router.get("/admin/users/{user_email}", response_model=EmailUserResponse) +@require_permission("admin.user_management") +async def get_user(user_email: str, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Get user by email (admin only). + + Args: + user_email: Email of user to retrieve + current_user: Currently authenticated admin user + db: Database session + + Returns: + EmailUserResponse: User information + + Raises: + HTTPException: If user not found + """ + auth_service = EmailAuthService(db) + + try: + user = await auth_service.get_user_by_email(user_email) + if not user: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found") + + return EmailUserResponse.from_email_user(user) + + except Exception as e: + logger.error(f"Error retrieving user {user_email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve user") + + +@email_auth_router.put("/admin/users/{user_email}", response_model=EmailUserResponse) +@require_permission("admin.user_management") +async def update_user(user_email: str, user_request: EmailRegistrationRequest, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Update user information (admin only). + + Args: + user_email: Email of user to update + user_request: Updated user information + current_user: Currently authenticated admin user + db: Database session + + Returns: + EmailUserResponse: Updated user information + + Raises: + HTTPException: If user not found or update fails + """ + auth_service = EmailAuthService(db) + + try: + # Get existing user + user = await auth_service.get_user_by_email(user_email) + if not user: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found") + + # Update user fields + user.full_name = user_request.full_name + user.is_admin = getattr(user_request, "is_admin", user.is_admin) + + # Update password if provided + if user_request.password: + await auth_service.change_password( + email=user_email, + old_password=None, # Admin can change without old password + new_password=user_request.password, + ip_address="admin_update", + user_agent="admin_panel", + skip_old_password_check=True, + ) + + db.commit() + db.refresh(user) + + logger.info(f"Admin {current_user.email} updated user: {user.email}") + + return EmailUserResponse.from_email_user(user) + + except Exception as e: + logger.error(f"Error updating user {user_email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update user") + + +@email_auth_router.delete("/admin/users/{user_email}", response_model=SuccessResponse) +@require_permission("admin.user_management") +async def delete_user(user_email: str, current_user: EmailUser = Depends(get_current_user), db: Session = Depends(get_db)): + """Delete/deactivate user (admin only). + + Args: + user_email: Email of user to delete + current_user: Currently authenticated admin user + db: Database session + + Returns: + SuccessResponse: Success confirmation + + Raises: + HTTPException: If user not found or deletion fails + """ + auth_service = EmailAuthService(db) + + try: + # Prevent admin from deleting themselves + if user_email == current_user.email: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot delete your own account") + + user = await auth_service.get_user_by_email(user_email) + if not user: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="User not found") + + # Soft delete by deactivating + user.is_active = False + db.commit() + + logger.info(f"Admin {current_user.email} deactivated user: {user.email}") + + return SuccessResponse(success=True, message=f"User {user_email} has been deactivated") + + except Exception as e: + logger.error(f"Error deleting user {user_email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to delete user") diff --git a/mcpgateway/routers/rbac.py b/mcpgateway/routers/rbac.py new file mode 100644 index 000000000..fec6f4155 --- /dev/null +++ b/mcpgateway/routers/rbac.py @@ -0,0 +1,477 @@ +# -*- coding: utf-8 -*- +"""RBAC API Router. + +This module provides REST API endpoints for Role-Based Access Control (RBAC) +management including roles, user role assignments, and permission checking. +""" + +# Standard +from datetime import datetime +import logging +from typing import Generator, List, Optional + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, Query, status +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import Permissions, SessionLocal +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_admin_permission, require_permission +from mcpgateway.schemas import PermissionCheckRequest, PermissionCheckResponse, PermissionListResponse, RoleCreateRequest, RoleResponse, RoleUpdateRequest, UserRoleAssignRequest, UserRoleResponse +from mcpgateway.services.permission_service import PermissionService +from mcpgateway.services.role_service import RoleService + +logger = logging.getLogger(__name__) + +router = APIRouter(prefix="/rbac", tags=["RBAC"]) + + +def get_db() -> Generator[Session, None, None]: + """Get database session for dependency injection. + + Yields: + Session: SQLAlchemy database session + """ + db = SessionLocal() + try: + yield db + finally: + db.close() + + +# ===== Role Management Endpoints ===== + + +@router.post("/roles", response_model=RoleResponse) +@require_admin_permission() +async def create_role(role_data: RoleCreateRequest, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Create a new role. + + Requires admin permissions to create roles. + + Args: + role_data: Role creation data + user: Current authenticated user + db: Database session + + Returns: + RoleResponse: Created role details + + Raises: + HTTPException: If role creation fails + """ + try: + role_service = RoleService(db) + role = await role_service.create_role( + name=role_data.name, + description=role_data.description, + scope=role_data.scope, + permissions=role_data.permissions, + inherits_from=role_data.inherits_from, + created_by=user["email"], + is_system_role=role_data.is_system_role or False, + ) + + logger.info(f"Role created: {role.id} by {user['email']}") + return RoleResponse.from_orm(role) + + except ValueError as e: + logger.error(f"Role creation validation error: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Role creation failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create role") + + +@router.get("/roles", response_model=List[RoleResponse]) +@require_permission("admin.user_management") +async def list_roles( + scope: Optional[str] = Query(None, description="Filter by scope"), + active_only: bool = Query(True, description="Show only active roles"), + user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +): + """List all roles. + + Args: + scope: Optional scope filter + active_only: Whether to show only active roles + user: Current authenticated user + db: Database session + + Returns: + List[RoleResponse]: List of roles + + Raises: + HTTPException: If user lacks required permissions + """ + try: + role_service = RoleService(db) + roles = await role_service.list_roles(scope=scope, active_only=active_only) + + return [RoleResponse.from_orm(role) for role in roles] + + except Exception as e: + logger.error(f"Failed to list roles: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve roles") + + +@router.get("/roles/{role_id}", response_model=RoleResponse) +@require_permission("admin.user_management") +async def get_role(role_id: str, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Get role details by ID. + + Args: + role_id: Role identifier + user: Current authenticated user + db: Database session + + Returns: + RoleResponse: Role details + + Raises: + HTTPException: If role not found + """ + try: + role_service = RoleService(db) + role = await role_service.get_role_by_id(role_id) + + if not role: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Role not found") + + return RoleResponse.from_orm(role) + + except HTTPException: + raise + except Exception as e: + logger.error(f"Failed to get role {role_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve role") + + +@router.put("/roles/{role_id}", response_model=RoleResponse) +@require_admin_permission() +async def update_role(role_id: str, role_data: RoleUpdateRequest, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Update an existing role. + + Args: + role_id: Role identifier + role_data: Role update data + user: Current authenticated user + db: Database session + + Returns: + RoleResponse: Updated role details + + Raises: + HTTPException: If role not found or update fails + """ + try: + role_service = RoleService(db) + role = await role_service.update_role(role_id, **role_data.dict(exclude_unset=True)) + + if not role: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Role not found") + + logger.info(f"Role updated: {role_id} by {user['email']}") + return RoleResponse.from_orm(role) + + except HTTPException: + raise + except ValueError as e: + logger.error(f"Role update validation error: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Role update failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update role") + + +@router.delete("/roles/{role_id}") +@require_admin_permission() +async def delete_role(role_id: str, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Delete a role. + + Args: + role_id: Role identifier + user: Current authenticated user + db: Database session + + Returns: + dict: Success message + + Raises: + HTTPException: If role not found or deletion fails + """ + try: + role_service = RoleService(db) + success = await role_service.delete_role(role_id) + + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Role not found") + + logger.info(f"Role deleted: {role_id} by {user['email']}") + return {"message": "Role deleted successfully"} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Role deletion failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to delete role") + + +# ===== User Role Assignment Endpoints ===== + + +@router.post("/users/{user_email}/roles", response_model=UserRoleResponse) +@require_permission("admin.user_management") +async def assign_role_to_user(user_email: str, assignment_data: UserRoleAssignRequest, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Assign a role to a user. + + Args: + user_email: User email address + assignment_data: Role assignment data + user: Current authenticated user + db: Database session + + Returns: + UserRoleResponse: Created role assignment + + Raises: + HTTPException: If assignment fails + """ + try: + role_service = RoleService(db) + user_role = await role_service.assign_role_to_user( + user_email=user_email, role_id=assignment_data.role_id, scope=assignment_data.scope, scope_id=assignment_data.scope_id, granted_by=user["email"], expires_at=assignment_data.expires_at + ) + + logger.info(f"Role assigned: {assignment_data.role_id} to {user_email} by {user['email']}") + return UserRoleResponse.from_orm(user_role) + + except ValueError as e: + logger.error(f"Role assignment validation error: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Role assignment failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to assign role") + + +@router.get("/users/{user_email}/roles", response_model=List[UserRoleResponse]) +@require_permission("admin.user_management") +async def get_user_roles( + user_email: str, + scope: Optional[str] = Query(None, description="Filter by scope"), + active_only: bool = Query(True, description="Show only active assignments"), + user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +): + """Get roles assigned to a user. + + Args: + user_email: User email address + scope: Optional scope filter + active_only: Whether to show only active assignments + user: Current authenticated user + db: Database session + + Returns: + List[UserRoleResponse]: User's role assignments + + Raises: + HTTPException: If role retrieval fails + """ + try: + permission_service = PermissionService(db) + user_roles = await permission_service.get_user_roles(user_email=user_email, scope=scope, include_expired=not active_only) + + return [UserRoleResponse.from_orm(user_role) for user_role in user_roles] + + except Exception as e: + logger.error(f"Failed to get user roles for {user_email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve user roles") + + +@router.delete("/users/{user_email}/roles/{role_id}") +@require_permission("admin.user_management") +async def revoke_user_role( + user_email: str, + role_id: str, + scope: Optional[str] = Query(None, description="Scope filter"), + scope_id: Optional[str] = Query(None, description="Scope ID filter"), + user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +): + """Revoke a role from a user. + + Args: + user_email: User email address + role_id: Role identifier + scope: Optional scope filter + scope_id: Optional scope ID filter + user: Current authenticated user + db: Database session + + Returns: + dict: Success message + + Raises: + HTTPException: If revocation fails + """ + try: + role_service = RoleService(db) + success = await role_service.revoke_role_from_user(user_email=user_email, role_id=role_id, scope=scope, scope_id=scope_id) + + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Role assignment not found") + + logger.info(f"Role revoked: {role_id} from {user_email} by {user['email']}") + return {"message": "Role revoked successfully"} + + except HTTPException: + raise + except Exception as e: + logger.error(f"Role revocation failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to revoke role") + + +# ===== Permission Checking Endpoints ===== + + +@router.post("/permissions/check", response_model=PermissionCheckResponse) +@require_permission("admin.security_audit") +async def check_permission(check_data: PermissionCheckRequest, user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Check if a user has specific permission. + + Args: + check_data: Permission check request + user: Current authenticated user + db: Database session + + Returns: + PermissionCheckResponse: Permission check result + + Raises: + HTTPException: If permission check fails + """ + try: + permission_service = PermissionService(db) + granted = await permission_service.check_permission( + user_email=check_data.user_email, + permission=check_data.permission, + resource_type=check_data.resource_type, + resource_id=check_data.resource_id, + team_id=check_data.team_id, + ip_address=user.get("ip_address"), + user_agent=user.get("user_agent"), + ) + + return PermissionCheckResponse(user_email=check_data.user_email, permission=check_data.permission, granted=granted, checked_at=datetime.utcnow(), checked_by=user["email"]) + + except Exception as e: + logger.error(f"Permission check failed: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to check permission") + + +@router.get("/permissions/user/{user_email}", response_model=List[str]) +@require_permission("admin.security_audit") +async def get_user_permissions(user_email: str, team_id: Optional[str] = Query(None, description="Team context"), user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Get all effective permissions for a user. + + Args: + user_email: User email address + team_id: Optional team context + user: Current authenticated user + db: Database session + + Returns: + List[str]: User's effective permissions + + Raises: + HTTPException: If retrieving user permissions fails + """ + try: + permission_service = PermissionService(db) + permissions = await permission_service.get_user_permissions(user_email=user_email, team_id=team_id) + + return sorted(list(permissions)) + + except Exception as e: + logger.error(f"Failed to get user permissions for {user_email}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve user permissions") + + +@router.get("/permissions/available", response_model=PermissionListResponse) +async def get_available_permissions(user=Depends(get_current_user_with_permissions)): + """Get all available permissions in the system. + + Args: + user: Current authenticated user + + Returns: + PermissionListResponse: Available permissions organized by resource type + + Raises: + HTTPException: If retrieving available permissions fails + """ + try: + all_permissions = Permissions.get_all_permissions() + permissions_by_resource = Permissions.get_permissions_by_resource() + + return PermissionListResponse(all_permissions=all_permissions, permissions_by_resource=permissions_by_resource, total_count=len(all_permissions)) + + except Exception as e: + logger.error(f"Failed to get available permissions: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve available permissions") + + +# ===== Self-Service Endpoints ===== + + +@router.get("/my/roles", response_model=List[UserRoleResponse]) +async def get_my_roles(user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Get current user's role assignments. + + Args: + user: Current authenticated user + db: Database session + + Returns: + List[UserRoleResponse]: Current user's role assignments + + Raises: + HTTPException: If retrieving user roles fails + """ + try: + permission_service = PermissionService(db) + user_roles = await permission_service.get_user_roles(user_email=user["email"], include_expired=False) + + return [UserRoleResponse.from_orm(user_role) for user_role in user_roles] + + except Exception as e: + logger.error(f"Failed to get my roles for {user['email']}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve your roles") + + +@router.get("/my/permissions", response_model=List[str]) +async def get_my_permissions(team_id: Optional[str] = Query(None, description="Team context"), user=Depends(get_current_user_with_permissions), db: Session = Depends(get_db)): + """Get current user's effective permissions. + + Args: + team_id: Optional team context + user: Current authenticated user + db: Database session + + Returns: + List[str]: Current user's effective permissions + + Raises: + HTTPException: If retrieving user permissions fails + """ + try: + permission_service = PermissionService(db) + permissions = await permission_service.get_user_permissions(user_email=user["email"], team_id=team_id) + + return sorted(list(permissions)) + + except Exception as e: + logger.error(f"Failed to get my permissions for {user['email']}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to retrieve your permissions") diff --git a/mcpgateway/routers/sso.py b/mcpgateway/routers/sso.py new file mode 100644 index 000000000..fdf65f2b5 --- /dev/null +++ b/mcpgateway/routers/sso.py @@ -0,0 +1,566 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/routers/sso.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Single Sign-On (SSO) authentication routes for OAuth2/OIDC providers. +Handles SSO login flows, provider configuration, and callback handling. +""" + +# Standard +from typing import Dict, List, Optional + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response +from pydantic import BaseModel +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import get_db +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_permission +from mcpgateway.services.sso_service import SSOService + + +class SSOProviderCreateRequest(BaseModel): + """Request to create SSO provider.""" + + id: str + name: str + display_name: str + provider_type: str # oauth2, oidc + client_id: str + client_secret: str + authorization_url: str + token_url: str + userinfo_url: str + issuer: Optional[str] = None + scope: str = "openid profile email" + trusted_domains: List[str] = [] + auto_create_users: bool = True + team_mapping: Dict = {} + + +class SSOProviderUpdateRequest(BaseModel): + """Request to update SSO provider.""" + + name: Optional[str] = None + display_name: Optional[str] = None + provider_type: Optional[str] = None + client_id: Optional[str] = None + client_secret: Optional[str] = None + authorization_url: Optional[str] = None + token_url: Optional[str] = None + userinfo_url: Optional[str] = None + issuer: Optional[str] = None + scope: Optional[str] = None + trusted_domains: Optional[List[str]] = None + auto_create_users: Optional[bool] = None + team_mapping: Optional[Dict] = None + is_enabled: Optional[bool] = None + + +# Create router +sso_router = APIRouter(prefix="/auth/sso", tags=["SSO Authentication"]) + + +class SSOProviderResponse(BaseModel): + """SSO provider information for client.""" + + id: str + name: str + display_name: str + authorization_url: Optional[str] = None # Only provided when initiating login + + +class SSOLoginResponse(BaseModel): + """SSO login initiation response.""" + + authorization_url: str + state: str + + +class SSOCallbackResponse(BaseModel): + """SSO authentication callback response.""" + + access_token: str + token_type: str = "bearer" + expires_in: int + user: Dict + + +@sso_router.get("/providers", response_model=List[SSOProviderResponse]) +async def list_sso_providers( + db: Session = Depends(get_db), +) -> List[SSOProviderResponse]: + """List available SSO providers for login. + + Args: + db: Database session + + Returns: + List of enabled SSO providers with basic information. + + Raises: + HTTPException: If SSO authentication is disabled + + Examples: + curl -X GET http://localhost:8000/auth/sso/providers + """ + if not settings.sso_enabled: + raise HTTPException(status_code=404, detail="SSO authentication is disabled") + + sso_service = SSOService(db) + providers = sso_service.list_enabled_providers() + + return [SSOProviderResponse(id=provider.id, name=provider.name, display_name=provider.display_name) for provider in providers] + + +@sso_router.get("/login/{provider_id}", response_model=SSOLoginResponse) +async def initiate_sso_login( + provider_id: str, + redirect_uri: str = Query(..., description="Callback URI after authentication"), + scopes: Optional[str] = Query(None, description="Space-separated OAuth scopes"), + db: Session = Depends(get_db), +) -> SSOLoginResponse: + """Initiate SSO authentication flow. + + Args: + provider_id: SSO provider identifier (e.g., 'github', 'google') + redirect_uri: Callback URI after successful authentication + scopes: Optional custom OAuth scopes (space-separated) + db: Database session + + Returns: + Authorization URL and state parameter for redirect. + + Raises: + HTTPException: If SSO is disabled or provider not found + + Examples: + curl -X GET "http://localhost:8000/auth/sso/login/github?redirect_uri=https://app.com/callback" + """ + if not settings.sso_enabled: + raise HTTPException(status_code=404, detail="SSO authentication is disabled") + + sso_service = SSOService(db) + scope_list = scopes.split() if scopes else None + + auth_url = sso_service.get_authorization_url(provider_id, redirect_uri, scope_list) + if not auth_url: + raise HTTPException(status_code=404, detail=f"SSO provider '{provider_id}' not found or disabled") + + # Extract state from URL for client reference + # Standard + import urllib.parse + + parsed = urllib.parse.urlparse(auth_url) + params = urllib.parse.parse_qs(parsed.query) + state = params.get("state", [""])[0] + + return SSOLoginResponse(authorization_url=auth_url, state=state) + + +@sso_router.get("/callback/{provider_id}") +async def handle_sso_callback( + provider_id: str, + code: str = Query(..., description="Authorization code from SSO provider"), + state: str = Query(..., description="CSRF state parameter"), + request: Request = None, + response: Response = None, + db: Session = Depends(get_db), +): + """Handle SSO authentication callback. + + Args: + provider_id: SSO provider identifier + code: Authorization code from provider + state: CSRF state parameter for validation + request: FastAPI request object + response: FastAPI response object + db: Database session + + Returns: + JWT access token and user information. + + Raises: + HTTPException: If SSO is disabled or authentication fails + + Examples: + # This is typically called by the SSO provider, not directly by clients + curl -X GET "http://localhost:8000/auth/sso/callback/github?code=abc123&state=xyz789" + """ + if not settings.sso_enabled: + raise HTTPException(status_code=404, detail="SSO authentication is disabled") + + sso_service = SSOService(db) + + # Handle OAuth callback + user_info = await sso_service.handle_oauth_callback(provider_id, code, state) + if not user_info: + # Redirect back to login with error + # Third-Party + from fastapi.responses import RedirectResponse + + return RedirectResponse(url="/admin/login?error=sso_failed", status_code=302) + + # Authenticate or create user + access_token = await sso_service.authenticate_or_create_user(user_info) + if not access_token: + # Redirect back to login with error + # Third-Party + from fastapi.responses import RedirectResponse + + return RedirectResponse(url="/admin/login?error=user_creation_failed", status_code=302) + + # Create redirect response + # Third-Party + from fastapi.responses import RedirectResponse + + redirect_response = RedirectResponse(url="/admin", status_code=302) + + # Set secure HTTP-only cookie using the same method as email auth + # First-Party + from mcpgateway.utils.security_cookies import set_auth_cookie + + set_auth_cookie(redirect_response, access_token, remember_me=False) + + return redirect_response + + +# Admin endpoints for SSO provider management +@sso_router.post("/admin/providers", response_model=Dict) +@require_permission("admin.sso_providers:create") +async def create_sso_provider( + provider_data: SSOProviderCreateRequest, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> Dict: + """Create new SSO provider configuration (Admin only). + + Args: + provider_data: SSO provider configuration + db: Database session + user: Current authenticated user + + Returns: + Created provider information. + + Raises: + HTTPException: If provider already exists or creation fails + """ + sso_service = SSOService(db) + + # Check if provider already exists + existing = sso_service.get_provider(provider_data.id) + if existing: + raise HTTPException(status_code=409, detail=f"SSO provider '{provider_data.id}' already exists") + + provider = sso_service.create_provider(provider_data.dict()) + + return { + "id": provider.id, + "name": provider.name, + "display_name": provider.display_name, + "provider_type": provider.provider_type, + "is_enabled": provider.is_enabled, + "created_at": provider.created_at, + } + + +@sso_router.get("/admin/providers", response_model=List[Dict]) +@require_permission("admin.sso_providers:read") +async def list_all_sso_providers( + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> List[Dict]: + """List all SSO providers including disabled ones (Admin only). + + Args: + db: Database session + user: Current authenticated user + + Returns: + List of all SSO providers with configuration details. + """ + # Third-Party + from sqlalchemy import select + + # First-Party + from mcpgateway.db import SSOProvider + + stmt = select(SSOProvider) + result = db.execute(stmt) + providers = result.scalars().all() + + return [ + { + "id": provider.id, + "name": provider.name, + "display_name": provider.display_name, + "provider_type": provider.provider_type, + "is_enabled": provider.is_enabled, + "trusted_domains": provider.trusted_domains, + "auto_create_users": provider.auto_create_users, + "created_at": provider.created_at, + "updated_at": provider.updated_at, + } + for provider in providers + ] + + +@sso_router.get("/admin/providers/{provider_id}", response_model=Dict) +@require_permission("admin.sso_providers:read") +async def get_sso_provider( + provider_id: str, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> Dict: + """Get SSO provider details (Admin only). + + Args: + provider_id: Provider identifier + db: Database session + user: Current authenticated user + + Returns: + Provider configuration details. + + Raises: + HTTPException: If provider not found + """ + sso_service = SSOService(db) + provider = sso_service.get_provider(provider_id) + + if not provider: + raise HTTPException(status_code=404, detail=f"SSO provider '{provider_id}' not found") + + return { + "id": provider.id, + "name": provider.name, + "display_name": provider.display_name, + "provider_type": provider.provider_type, + "client_id": provider.client_id, + "authorization_url": provider.authorization_url, + "token_url": provider.token_url, + "userinfo_url": provider.userinfo_url, + "issuer": provider.issuer, + "scope": provider.scope, + "trusted_domains": provider.trusted_domains, + "auto_create_users": provider.auto_create_users, + "team_mapping": provider.team_mapping, + "is_enabled": provider.is_enabled, + "created_at": provider.created_at, + "updated_at": provider.updated_at, + } + + +@sso_router.put("/admin/providers/{provider_id}", response_model=Dict) +@require_permission("admin.sso_providers:update") +async def update_sso_provider( + provider_id: str, + provider_data: SSOProviderUpdateRequest, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> Dict: + """Update SSO provider configuration (Admin only). + + Args: + provider_id: Provider identifier + provider_data: Updated provider configuration + db: Database session + user: Current authenticated user + + Returns: + Updated provider information. + + Raises: + HTTPException: If provider not found or update fails + """ + sso_service = SSOService(db) + + # Filter out None values + update_data = {k: v for k, v in provider_data.dict().items() if v is not None} + if not update_data: + raise HTTPException(status_code=400, detail="No update data provided") + + provider = sso_service.update_provider(provider_id, update_data) + if not provider: + raise HTTPException(status_code=404, detail=f"SSO provider '{provider_id}' not found") + + return { + "id": provider.id, + "name": provider.name, + "display_name": provider.display_name, + "provider_type": provider.provider_type, + "is_enabled": provider.is_enabled, + "updated_at": provider.updated_at, + } + + +@sso_router.delete("/admin/providers/{provider_id}") +@require_permission("admin.sso_providers:delete") +async def delete_sso_provider( + provider_id: str, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> Dict: + """Delete SSO provider configuration (Admin only). + + Args: + provider_id: Provider identifier + db: Database session + user: Current authenticated user + + Returns: + Deletion confirmation. + + Raises: + HTTPException: If provider not found + """ + sso_service = SSOService(db) + + if not sso_service.delete_provider(provider_id): + raise HTTPException(status_code=404, detail=f"SSO provider '{provider_id}' not found") + + return {"message": f"SSO provider '{provider_id}' deleted successfully"} + + +# --------------------------------------------------------------------------- +# SSO User Approval Management Endpoints +# --------------------------------------------------------------------------- + + +class PendingUserApprovalResponse(BaseModel): + """Response model for pending user approval.""" + + id: str + email: str + full_name: str + auth_provider: str + requested_at: str + expires_at: str + status: str + sso_metadata: Optional[Dict] = None + + +class ApprovalActionRequest(BaseModel): + """Request model for approval actions.""" + + action: str # "approve" or "reject" + reason: Optional[str] = None # Required for rejection + notes: Optional[str] = None + + +@sso_router.get("/pending-approvals", response_model=List[PendingUserApprovalResponse]) +@require_permission("admin.user_management") +async def list_pending_approvals( + include_expired: bool = Query(False, description="Include expired approval requests"), + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> List[PendingUserApprovalResponse]: + """List pending SSO user approval requests (Admin only). + + Args: + include_expired: Whether to include expired requests + db: Database session + user: Current authenticated admin user + + Returns: + List of pending approval requests + """ + # Third-Party + from sqlalchemy import select + + # First-Party + from mcpgateway.db import PendingUserApproval + + query = select(PendingUserApproval) + + if not include_expired: + # First-Party + from mcpgateway.db import utc_now + + query = query.where(PendingUserApproval.expires_at > utc_now()) + + # Filter by status + query = query.where(PendingUserApproval.status == "pending") + query = query.order_by(PendingUserApproval.requested_at.desc()) + + result = db.execute(query) + pending_approvals = result.scalars().all() + + return [ + PendingUserApprovalResponse( + id=approval.id, + email=approval.email, + full_name=approval.full_name, + auth_provider=approval.auth_provider, + requested_at=approval.requested_at.isoformat(), + expires_at=approval.expires_at.isoformat(), + status=approval.status, + sso_metadata=approval.sso_metadata, + ) + for approval in pending_approvals + ] + + +@sso_router.post("/pending-approvals/{approval_id}/action") +@require_permission("admin.user_management") +async def handle_approval_request( + approval_id: str, + request: ApprovalActionRequest, + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> Dict: + """Approve or reject a pending SSO user registration (Admin only). + + Args: + approval_id: ID of the approval request + request: Approval action (approve/reject) with optional reason/notes + db: Database session + user: Current authenticated admin user + + Returns: + Action confirmation message + + Raises: + HTTPException: If approval not found or invalid action + """ + # Third-Party + from sqlalchemy import select + + # First-Party + from mcpgateway.db import PendingUserApproval + + # Get pending approval + approval = db.execute(select(PendingUserApproval).where(PendingUserApproval.id == approval_id)).scalar_one_or_none() + + if not approval: + raise HTTPException(status_code=404, detail="Approval request not found") + + if approval.status != "pending": + raise HTTPException(status_code=400, detail=f"Approval request is already {approval.status}") + + if approval.is_expired(): + approval.status = "expired" + db.commit() + raise HTTPException(status_code=400, detail="Approval request has expired") + + admin_email = user["email"] + + if request.action == "approve": + approval.approve(admin_email, request.notes) + db.commit() + return {"message": f"User {approval.email} approved successfully"} + + elif request.action == "reject": + if not request.reason: + raise HTTPException(status_code=400, detail="Rejection reason is required") + approval.reject(admin_email, request.reason, request.notes) + db.commit() + return {"message": f"User {approval.email} rejected"} + + else: + raise HTTPException(status_code=400, detail="Invalid action. Must be 'approve' or 'reject'") diff --git a/mcpgateway/routers/teams.py b/mcpgateway/routers/teams.py new file mode 100644 index 000000000..a170d334d --- /dev/null +++ b/mcpgateway/routers/teams.py @@ -0,0 +1,949 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/routers/teams.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Team Management Router. +This module provides FastAPI routes for team management including +team creation, member management, and invitation handling. + +Examples: + >>> from fastapi import FastAPI + >>> from mcpgateway.routers.teams import teams_router + >>> app = FastAPI() + >>> app.include_router(teams_router, prefix="/teams", tags=["Teams"]) +""" + +# Standard +from typing import List + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, Query, status +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.auth import get_current_user +from mcpgateway.db import get_db +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_permission +from mcpgateway.schemas import ( + EmailUserResponse, + SuccessResponse, + TeamCreateRequest, + TeamDiscoveryResponse, + TeamInvitationResponse, + TeamInviteRequest, + TeamJoinRequest, + TeamJoinRequestResponse, + TeamListResponse, + TeamMemberResponse, + TeamMemberUpdateRequest, + TeamResponse, + TeamUpdateRequest, +) +from mcpgateway.services.logging_service import LoggingService +from mcpgateway.services.team_invitation_service import TeamInvitationService +from mcpgateway.services.team_management_service import TeamManagementService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + +# Create router +teams_router = APIRouter() + + +# --------------------------------------------------------------------------- +# Team CRUD Operations +# --------------------------------------------------------------------------- + + +@teams_router.post("/", response_model=TeamResponse, status_code=status.HTTP_201_CREATED) +@require_permission("teams.create") +async def create_team(request: TeamCreateRequest, current_user_ctx: dict = Depends(get_current_user_with_permissions)) -> TeamResponse: + """Create a new team. + + Args: + request: Team creation request data + current_user_ctx: Currently authenticated user context + + Returns: + TeamResponse: Created team data + + Raises: + HTTPException: If team creation fails + """ + try: + db = current_user_ctx["db"] + service = TeamManagementService(db) + team = await service.create_team(name=request.name, description=request.description, created_by=current_user_ctx["email"], visibility=request.visibility, max_members=request.max_members) + + return TeamResponse( + id=team.id, + name=team.name, + slug=team.slug, + description=team.description, + created_by=team.created_by, + is_personal=team.is_personal, + visibility=team.visibility, + max_members=team.max_members, + member_count=team.get_member_count(), + created_at=team.created_at, + updated_at=team.updated_at, + is_active=team.is_active, + ) + except ValueError as e: + logger.error(f"Team creation failed: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Unexpected error creating team: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create team") + + +@teams_router.get("/", response_model=TeamListResponse) +@require_permission("teams.read") +async def list_teams( + skip: int = Query(0, ge=0, description="Number of teams to skip"), + limit: int = Query(50, ge=1, le=100, description="Number of teams to return"), + current_user_ctx: dict = Depends(get_current_user_with_permissions), +) -> TeamListResponse: + """List teams visible to the caller. + + - Administrators see all non-personal teams (paginated) + - Regular users see only teams they are a member of (paginated client-side) + + Args: + skip: Number of teams to skip for pagination + limit: Maximum number of teams to return + current_user_ctx: Current user context with permissions and database session + + Returns: + TeamListResponse: List of teams and total count + + Raises: + HTTPException: If there's an error listing teams + """ + try: + db = current_user_ctx["db"] + service = TeamManagementService(db) + + if current_user_ctx.get("is_admin"): + teams, total = await service.list_teams(limit=limit, offset=skip) + else: + # Fallback to user teams and apply pagination locally + user_teams = await service.get_user_teams(current_user_ctx["email"], include_personal=True) + total = len(user_teams) + teams = user_teams[skip : skip + limit] + + team_responses = [ + TeamResponse( + id=team.id, + name=team.name, + slug=team.slug, + description=team.description, + created_by=team.created_by, + is_personal=team.is_personal, + visibility=team.visibility, + max_members=team.max_members, + member_count=team.get_member_count(), + created_at=team.created_at, + updated_at=team.updated_at, + is_active=team.is_active, + ) + for team in teams + ] + + return TeamListResponse(teams=team_responses, total=total) + except Exception as e: + logger.error(f"Error listing teams: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list teams") + + +@teams_router.get("/{team_id}", response_model=TeamResponse) +@require_permission("teams.read") +async def get_team(team_id: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> TeamResponse: + """Get a specific team by ID. + + Args: + team_id: Team UUID + current_user: Currently authenticated user + db: Database session + + Returns: + TeamResponse: Team data + + Raises: + HTTPException: If team not found or access denied + """ + try: + service = TeamManagementService(db) + team = await service.get_team_by_id(team_id) + + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + # Check if user has access to the team + user_role = await service.get_user_role_in_team(current_user.email, team_id) + if not user_role: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access denied to team") + + return TeamResponse( + id=team.id, + name=team.name, + slug=team.slug, + description=team.description, + created_by=team.created_by, + is_personal=team.is_personal, + visibility=team.visibility, + max_members=team.max_members, + member_count=team.get_member_count(), + created_at=team.created_at, + updated_at=team.updated_at, + is_active=team.is_active, + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error getting team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to get team") + + +@teams_router.put("/{team_id}", response_model=TeamResponse) +@require_permission("teams.update") +async def update_team(team_id: str, request: TeamUpdateRequest, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> TeamResponse: + """Update a team. + + Args: + team_id: Team UUID + request: Team update request data + current_user: Currently authenticated user + db: Database session + + Returns: + TeamResponse: Updated team data + + Raises: + HTTPException: If team not found, access denied, or update fails + """ + try: + service = TeamManagementService(db) + + # Check if user is team owner + role = await service.get_user_role_in_team(current_user.email, team_id) + if role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + team = await service.update_team(team_id=team_id, name=request.name, description=request.description, visibility=request.visibility, max_members=request.max_members) + + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + return TeamResponse( + id=team.id, + name=team.name, + slug=team.slug, + description=team.description, + created_by=team.created_by, + is_personal=team.is_personal, + visibility=team.visibility, + max_members=team.max_members, + member_count=team.get_member_count(), + created_at=team.created_at, + updated_at=team.updated_at, + is_active=team.is_active, + ) + except HTTPException: + raise + except ValueError as e: + logger.error(f"Team update failed: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Error updating team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update team") + + +@teams_router.delete("/{team_id}", response_model=SuccessResponse) +@require_permission("teams.delete") +async def delete_team(team_id: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> SuccessResponse: + """Delete a team. + + Args: + team_id: Team UUID + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Success confirmation + + Raises: + HTTPException: If team not found, access denied, or deletion fails + """ + try: + service = TeamManagementService(db) + + # Check if user is team owner + role = await service.get_user_role_in_team(current_user.email, team_id) + if role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only team owners can delete teams") + + success = await service.delete_team(team_id, current_user.email) + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + return SuccessResponse(message="Team deleted successfully") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error deleting team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to delete team") + + +# --------------------------------------------------------------------------- +# Team Member Management +# --------------------------------------------------------------------------- + + +@teams_router.get("/{team_id}/members", response_model=List[TeamMemberResponse]) +@require_permission("teams.read") +async def list_team_members(team_id: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> List[TeamMemberResponse]: + """List team members. + + Args: + team_id: Team UUID + current_user: Currently authenticated user + db: Database session + + Returns: + List[TeamMemberResponse]: List of team members + + Raises: + HTTPException: If team not found or access denied + """ + try: + service = TeamManagementService(db) + + # Check if user has access to the team + user_role = await service.get_user_role_in_team(current_user.email, team_id) + if not user_role: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Access denied to team") + + members = await service.get_team_members(team_id) + + member_responses = [] + for member in members: + member_responses.append( + TeamMemberResponse( + id=member.id, team_id=member.team_id, user_email=member.user_email, role=member.role, joined_at=member.joined_at, invited_by=member.invited_by, is_active=member.is_active + ) + ) + + return member_responses + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing team members for team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list team members") + + +@teams_router.put("/{team_id}/members/{user_email}", response_model=TeamMemberResponse) +@require_permission("teams.manage_members") +async def update_team_member( + team_id: str, user_email: str, request: TeamMemberUpdateRequest, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db) +) -> TeamMemberResponse: + """Update a team member's role. + + Args: + team_id: Team UUID + user_email: Email of the member to update + request: Member update request data + current_user: Currently authenticated user + db: Database session + + Returns: + TeamMemberResponse: Updated member data + + Raises: + HTTPException: If member not found, access denied, or update fails + """ + try: + service = TeamManagementService(db) + + # Check if user is team owner + role = await service.get_user_role_in_team(current_user.email, team_id) + if role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + member = await service.update_member_role(team_id, user_email, request.role) + if not member: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team member not found") + + return TeamMemberResponse( + id=member.id, team_id=member.team_id, user_email=member.user_email, role=member.role, joined_at=member.joined_at, invited_by=member.invited_by, is_active=member.is_active + ) + except HTTPException: + raise + except ValueError as e: + logger.error(f"Member update failed: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Error updating team member {user_email} in team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to update team member") + + +@teams_router.delete("/{team_id}/members/{user_email}", response_model=SuccessResponse) +@require_permission("teams.manage_members") +async def remove_team_member(team_id: str, user_email: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> SuccessResponse: + """Remove a team member. + + Args: + team_id: Team UUID + user_email: Email of the member to remove + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Success confirmation + + Raises: + HTTPException: If member not found, access denied, or removal fails + """ + try: + service = TeamManagementService(db) + + # Users can remove themselves, or owners can remove others + current_user_role = await service.get_user_role_in_team(current_user.email, team_id) + if current_user.email != user_email and current_user_role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + success = await service.remove_member_from_team(team_id, user_email) + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team member not found") + + return SuccessResponse(message="Team member removed successfully") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error removing team member {user_email} from team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to remove team member") + + +# --------------------------------------------------------------------------- +# Team Invitations +# --------------------------------------------------------------------------- + + +@teams_router.post("/{team_id}/invitations", response_model=TeamInvitationResponse, status_code=status.HTTP_201_CREATED) +@require_permission("teams.manage_members") +async def invite_team_member(team_id: str, request: TeamInviteRequest, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> TeamInvitationResponse: + """Invite a user to join a team. + + Args: + team_id: Team UUID + request: Invitation request data + current_user: Currently authenticated user + db: Database session + + Returns: + TeamInvitationResponse: Created invitation data + + Raises: + HTTPException: If team not found, access denied, or invitation fails + """ + try: + team_service = TeamManagementService(db) + invitation_service = TeamInvitationService(db) + + # Check if user is team owner + role = await team_service.get_user_role_in_team(current_user.email, team_id) + if role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + invitation = await invitation_service.create_invitation(team_id=team_id, email=str(request.email), role=request.role, invited_by=current_user.email) + + # Get team name for response + team = await team_service.get_team_by_id(team_id) + team_name = team.name if team else "Unknown Team" + + return TeamInvitationResponse( + id=invitation.id, + team_id=invitation.team_id, + team_name=team_name, + email=invitation.email, + role=invitation.role, + invited_by=invitation.invited_by, + invited_at=invitation.invited_at, + expires_at=invitation.expires_at, + token=invitation.token, + is_active=invitation.is_active, + is_expired=invitation.is_expired(), + ) + except HTTPException: + raise + except ValueError as e: + logger.error(f"Team invitation failed: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Error creating team invitation for team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create invitation") + + +@teams_router.get("/{team_id}/invitations", response_model=List[TeamInvitationResponse]) +@require_permission("teams.read") +async def list_team_invitations(team_id: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> List[TeamInvitationResponse]: + """List team invitations. + + Args: + team_id: Team UUID + current_user: Currently authenticated user + db: Database session + + Returns: + List[TeamInvitationResponse]: List of team invitations + + Raises: + HTTPException: If team not found or access denied + """ + try: + team_service = TeamManagementService(db) + invitation_service = TeamInvitationService(db) + + # Check if user is team owner + role = await team_service.get_user_role_in_team(current_user.email, team_id) + if role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + invitations = await invitation_service.get_team_invitations(team_id) + + # Get team name for responses + team = await team_service.get_team_by_id(team_id) + team_name = team.name if team else "Unknown Team" + + invitation_responses = [] + for invitation in invitations: + invitation_responses.append( + TeamInvitationResponse( + id=invitation.id, + team_id=invitation.team_id, + team_name=team_name, + email=invitation.email, + role=invitation.role, + invited_by=invitation.invited_by, + invited_at=invitation.invited_at, + expires_at=invitation.expires_at, + token=invitation.token, + is_active=invitation.is_active, + is_expired=invitation.is_expired(), + ) + ) + + return invitation_responses + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing team invitations for team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list invitations") + + +@teams_router.post("/invitations/{token}/accept", response_model=TeamMemberResponse) +@require_permission("teams.read") +async def accept_team_invitation(token: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> TeamMemberResponse: + """Accept a team invitation. + + Args: + token: Invitation token + current_user: Currently authenticated user + db: Database session + + Returns: + TeamMemberResponse: New team member data + + Raises: + HTTPException: If invitation not found, expired, or acceptance fails + """ + try: + invitation_service = TeamInvitationService(db) + + member = await invitation_service.accept_invitation(token, current_user.email) + if not member: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Invalid or expired invitation") + + return TeamMemberResponse( + id=member.id, team_id=member.team_id, user_email=member.user_email, role=member.role, joined_at=member.joined_at, invited_by=member.invited_by, is_active=member.is_active + ) + except HTTPException: + raise + except ValueError as e: + logger.error(f"Invitation acceptance failed: {e}") + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + except Exception as e: + logger.error(f"Error accepting invitation {token}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to accept invitation") + + +@teams_router.delete("/invitations/{invitation_id}", response_model=SuccessResponse) +@require_permission("teams.manage_members") +async def cancel_team_invitation(invitation_id: str, current_user: EmailUserResponse = Depends(get_current_user), db: Session = Depends(get_db)) -> SuccessResponse: + """Cancel a team invitation. + + Args: + invitation_id: Invitation UUID + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Success confirmation + + Raises: + HTTPException: If invitation not found, access denied, or cancellation fails + """ + try: + team_service = TeamManagementService(db) + invitation_service = TeamInvitationService(db) + + # Get invitation to check team permissions + # First-Party + from mcpgateway.db import EmailTeamInvitation + + invitation = db.query(EmailTeamInvitation).filter(EmailTeamInvitation.id == invitation_id).first() + if not invitation: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Invitation not found") + + # Check if user is team owner or the inviter + role = await team_service.get_user_role_in_team(current_user.email, invitation.team_id) + if role != "owner" and current_user.email != invitation.invited_by: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Insufficient permissions") + + success = await invitation_service.revoke_invitation(invitation_id, current_user.email) + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Invitation not found") + + return SuccessResponse(message="Team invitation cancelled successfully") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error cancelling invitation {invitation_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to cancel invitation") + + +@teams_router.get("/discover", response_model=List[TeamDiscoveryResponse]) +@require_permission("teams.read") +async def discover_public_teams( + skip: int = Query(0, ge=0, description="Number of teams to skip"), + limit: int = Query(50, ge=1, le=100, description="Number of teams to return"), + current_user_ctx: dict = Depends(get_current_user_with_permissions), +) -> List[TeamDiscoveryResponse]: + """Discover public teams that can be joined. + + Returns public teams that are discoverable to all authenticated users. + Only shows teams where the current user is not already a member. + + Args: + skip: Number of teams to skip for pagination + limit: Maximum number of teams to return + current_user_ctx: Current user context with permissions and database session + + Returns: + List[TeamDiscoveryResponse]: List of discoverable public teams + + Raises: + HTTPException: If there's an error discovering teams + """ + try: + db = current_user_ctx["db"] + team_service = TeamManagementService(db) + + # Get public teams where user is not already a member + public_teams = await team_service.discover_public_teams(current_user_ctx["email"], skip=skip, limit=limit) + + discovery_responses = [] + for team in public_teams: + discovery_responses.append( + TeamDiscoveryResponse( + id=team.id, + name=team.name, + description=team.description, + member_count=team.get_member_count(), + created_at=team.created_at, + is_joinable=True, # All returned teams are joinable + ) + ) + + return discovery_responses + except Exception as e: + logger.error(f"Error discovering public teams: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to discover teams") + + +@teams_router.post("/{team_id}/join", response_model=TeamJoinRequestResponse) +async def request_to_join_team( + team_id: str, + join_request: TeamJoinRequest, + current_user: EmailUserResponse = Depends(get_current_user), + db: Session = Depends(get_db), +) -> TeamJoinRequestResponse: + """Request to join a public team. + + Allows users to request membership in public teams. The request will be + pending until approved by a team owner. + + Args: + team_id: ID of the team to join + join_request: Join request details including optional message + current_user: Currently authenticated user + db: Database session + + Returns: + TeamJoinRequestResponse: Created join request details + + Raises: + HTTPException: If team not found, not public, user already member, or request fails + """ + try: + team_service = TeamManagementService(db) + + # Validate team exists and is public + team = await team_service.get_team_by_id(team_id) + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + if team.visibility != "public": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Can only request to join public teams") + + # Check if user is already a member + user_role = await team_service.get_user_role_in_team(current_user.email, team_id) + if user_role: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User is already a member of this team") + + # Create join request + join_req = await team_service.create_join_request(team_id=team_id, user_email=current_user.email, message=join_request.message) + + return TeamJoinRequestResponse( + id=join_req.id, + team_id=join_req.team_id, + team_name=team.name, + user_email=join_req.user_email, + message=join_req.message, + status=join_req.status, + requested_at=join_req.requested_at, + expires_at=join_req.expires_at, + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error creating join request for team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to create join request") + + +@teams_router.delete("/{team_id}/leave", response_model=SuccessResponse) +async def leave_team( + team_id: str, + current_user: EmailUserResponse = Depends(get_current_user), + db: Session = Depends(get_db), +) -> SuccessResponse: + """Leave a team. + + Allows users to remove themselves from a team. Cannot leave personal teams + or if they are the last owner of a team. + + Args: + team_id: ID of the team to leave + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Confirmation of leaving the team + + Raises: + HTTPException: If team not found, user not member, cannot leave personal team, or last owner + """ + try: + team_service = TeamManagementService(db) + + # Validate team exists + team = await team_service.get_team_by_id(team_id) + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + # Cannot leave personal team + if team.is_personal: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Cannot leave personal team") + + # Check if user is member + user_role = await team_service.get_user_role_in_team(current_user.email, team_id) + if not user_role: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User is not a member of this team") + + # Remove user from team + success = await team_service.remove_member_from_team(team_id, current_user.email, removed_by=current_user.email) + if not success: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Cannot leave team - you may be the last owner") + + return SuccessResponse(message="Successfully left the team") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error leaving team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to leave team") + + +@teams_router.get("/{team_id}/join-requests", response_model=List[TeamJoinRequestResponse]) +@require_permission("teams.manage_members") +async def list_team_join_requests( + team_id: str, + current_user: EmailUserResponse = Depends(get_current_user), + db: Session = Depends(get_db), +) -> List[TeamJoinRequestResponse]: + """List pending join requests for a team. + + Only team owners can view join requests for their teams. + + Args: + team_id: ID of the team + current_user: Currently authenticated user + db: Database session + + Returns: + List[TeamJoinRequestResponse]: List of pending join requests + + Raises: + HTTPException: If team not found or user not authorized + """ + try: + team_service = TeamManagementService(db) + + # Validate team exists and user is owner + team = await team_service.get_team_by_id(team_id) + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + user_role = await team_service.get_user_role_in_team(current_user.email, team_id) + if user_role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only team owners can view join requests") + + # Get join requests + join_requests = await team_service.list_join_requests(team_id) + + return [ + TeamJoinRequestResponse( + id=req.id, + team_id=req.team_id, + team_name=team.name, + user_email=req.user_email, + message=req.message, + status=req.status, + requested_at=req.requested_at, + expires_at=req.expires_at, + ) + for req in join_requests + ] + except HTTPException: + raise + except Exception as e: + logger.error(f"Error listing join requests for team {team_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to list join requests") + + +@teams_router.post("/{team_id}/join-requests/{request_id}/approve", response_model=TeamMemberResponse) +@require_permission("teams.manage_members") +async def approve_join_request( + team_id: str, + request_id: str, + current_user: EmailUserResponse = Depends(get_current_user), + db: Session = Depends(get_db), +) -> TeamMemberResponse: + """Approve a team join request. + + Only team owners can approve join requests for their teams. + + Args: + team_id: ID of the team + request_id: ID of the join request + current_user: Currently authenticated user + db: Database session + + Returns: + TeamMemberResponse: New team member data + + Raises: + HTTPException: If request not found or user not authorized + """ + try: + team_service = TeamManagementService(db) + + # Validate team exists and user is owner + team = await team_service.get_team_by_id(team_id) + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + user_role = await team_service.get_user_role_in_team(current_user.email, team_id) + if user_role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only team owners can approve join requests") + + # Approve join request + member = await team_service.approve_join_request(request_id, approved_by=current_user.email) + if not member: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Join request not found") + + return TeamMemberResponse( + id=member.id, + team_id=member.team_id, + user_email=member.user_email, + role=member.role, + joined_at=member.joined_at, + invited_by=member.invited_by, + is_active=member.is_active, + ) + except HTTPException: + raise + except Exception as e: + logger.error(f"Error approving join request {request_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to approve join request") + + +@teams_router.delete("/{team_id}/join-requests/{request_id}", response_model=SuccessResponse) +@require_permission("teams.manage_members") +async def reject_join_request( + team_id: str, + request_id: str, + current_user: EmailUserResponse = Depends(get_current_user), + db: Session = Depends(get_db), +) -> SuccessResponse: + """Reject a team join request. + + Only team owners can reject join requests for their teams. + + Args: + team_id: ID of the team + request_id: ID of the join request + current_user: Currently authenticated user + db: Database session + + Returns: + SuccessResponse: Confirmation of rejection + + Raises: + HTTPException: If request not found or user not authorized + """ + try: + team_service = TeamManagementService(db) + + # Validate team exists and user is owner + team = await team_service.get_team_by_id(team_id) + if not team: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Team not found") + + user_role = await team_service.get_user_role_in_team(current_user.email, team_id) + if user_role != "owner": + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Only team owners can reject join requests") + + # Reject join request + success = await team_service.reject_join_request(request_id, rejected_by=current_user.email) + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Join request not found") + + return SuccessResponse(message="Join request rejected successfully") + except HTTPException: + raise + except Exception as e: + logger.error(f"Error rejecting join request {request_id}: {e}") + raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail="Failed to reject join request") diff --git a/mcpgateway/routers/tokens.py b/mcpgateway/routers/tokens.py new file mode 100644 index 000000000..08b45a84d --- /dev/null +++ b/mcpgateway/routers/tokens.py @@ -0,0 +1,586 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/routers/tokens.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +JWT Token Catalog API endpoints. +Provides comprehensive API token management with scoping, revocation, and analytics. +""" + +# Standard +from typing import Optional + +# Third-Party +from fastapi import APIRouter, Depends, HTTPException, status +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import get_db +from mcpgateway.middleware.rbac import get_current_user_with_permissions, require_permission +from mcpgateway.schemas import ( + TokenCreateRequest, + TokenCreateResponse, + TokenListResponse, + TokenResponse, + TokenRevokeRequest, + TokenUpdateRequest, + TokenUsageStatsResponse, +) +from mcpgateway.services.token_catalog_service import TokenCatalogService, TokenScope + +router = APIRouter(prefix="/tokens", tags=["tokens"]) + + +@router.post("", response_model=TokenCreateResponse, status_code=status.HTTP_201_CREATED) +@require_permission("tokens.create") +async def create_token( + request: TokenCreateRequest, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenCreateResponse: + """Create a new API token for the current user. + + Args: + request: Token creation request with name, description, scoping, etc. + current_user: Authenticated user from JWT + db: Database session + + Returns: + TokenCreateResponse: Created token details with raw token + + Raises: + HTTPException: If token name already exists or validation fails + """ + service = TokenCatalogService(db) + + # Convert request to TokenScope if provided + scope = None + if request.scope: + scope = TokenScope( + server_id=request.scope.server_id, + permissions=request.scope.permissions, + ip_restrictions=request.scope.ip_restrictions, + time_restrictions=request.scope.time_restrictions, + usage_limits=request.scope.usage_limits, + ) + + try: + token_record, raw_token = await service.create_token( + user_email=current_user["email"], + name=request.name, + description=request.description, + scope=scope, + expires_in_days=request.expires_in_days, + tags=request.tags, + team_id=getattr(request, "team_id", None), + ) + + # Create TokenResponse for the token info + token_response = TokenResponse( + id=token_record.id, + name=token_record.name, + description=token_record.description, + user_email=token_record.user_email, + team_id=token_record.team_id, + server_id=token_record.server_id, + resource_scopes=token_record.resource_scopes or [], + ip_restrictions=token_record.ip_restrictions or [], + time_restrictions=token_record.time_restrictions or {}, + usage_limits=token_record.usage_limits or {}, + created_at=token_record.created_at, + expires_at=token_record.expires_at, + last_used=token_record.last_used, + is_active=token_record.is_active, + tags=token_record.tags or [], + ) + + return TokenCreateResponse( + token=token_response, + access_token=raw_token, + ) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + + +@router.get("", response_model=TokenListResponse) +@require_permission("tokens.read") +async def list_tokens( + include_inactive: bool = False, + limit: int = 50, + offset: int = 0, + db: Session = Depends(get_db), + current_user=Depends(get_current_user_with_permissions), +) -> TokenListResponse: + """List API tokens for the current user. + + Args: + include_inactive: Include inactive/expired tokens + limit: Maximum number of tokens to return (default 50) + offset: Number of tokens to skip for pagination + current_user: Authenticated user from JWT + db: Database session + + Returns: + TokenListResponse: List of user's API tokens + """ + service = TokenCatalogService(db) + tokens = await service.list_user_tokens( + user_email=current_user["email"], + include_inactive=include_inactive, + limit=limit, + offset=offset, + ) + + token_responses = [] + for token in tokens: + # Check if token is revoked + revocation_info = await service.get_token_revocation(token.jti) + + token_responses.append( + TokenResponse( + id=token.id, + name=token.name, + description=token.description, + user_email=token.user_email, + team_id=token.team_id, + created_at=token.created_at, + expires_at=token.expires_at, + last_used=token.last_used, + is_active=token.is_active, + is_revoked=revocation_info is not None, + revoked_at=revocation_info.revoked_at if revocation_info else None, + revoked_by=revocation_info.revoked_by if revocation_info else None, + revocation_reason=revocation_info.reason if revocation_info else None, + tags=token.tags, + server_id=token.server_id, + resource_scopes=token.resource_scopes, + ip_restrictions=token.ip_restrictions, + time_restrictions=token.time_restrictions, + usage_limits=token.usage_limits, + ) + ) + + return TokenListResponse(tokens=token_responses, total=len(token_responses), limit=limit, offset=offset) + + +@router.get("/{token_id}", response_model=TokenResponse) +async def get_token( + token_id: str, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenResponse: + """Get details of a specific token. + + Args: + token_id: Token ID to retrieve + current_user: Authenticated user from JWT + db: Database session + + Returns: + TokenResponse: Token details + + Raises: + HTTPException: If token not found or not owned by user + """ + service = TokenCatalogService(db) + token = await service.get_token(token_id, current_user["email"]) + + if not token: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Token not found") + + return TokenResponse( + id=token.id, + name=token.name, + description=token.description, + user_email=token.user_email, + team_id=token.team_id, + created_at=token.created_at, + expires_at=token.expires_at, + last_used=token.last_used, + is_active=token.is_active, + tags=token.tags, + server_id=token.server_id, + resource_scopes=token.resource_scopes, + ip_restrictions=token.ip_restrictions, + time_restrictions=token.time_restrictions, + usage_limits=token.usage_limits, + ) + + +@router.put("/{token_id}", response_model=TokenResponse) +async def update_token( + token_id: str, + request: TokenUpdateRequest, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenResponse: + """Update an existing token. + + Args: + token_id: Token ID to update + request: Token update request + current_user: Authenticated user from JWT + db: Database session + + Returns: + TokenResponse: Updated token details + + Raises: + HTTPException: If token not found or validation fails + """ + service = TokenCatalogService(db) + + # Convert request to TokenScope if provided + scope = None + if request.scope: + scope = TokenScope( + server_id=request.scope.server_id, + permissions=request.scope.permissions, + ip_restrictions=request.scope.ip_restrictions, + time_restrictions=request.scope.time_restrictions, + usage_limits=request.scope.usage_limits, + ) + + try: + token = await service.update_token( + token_id=token_id, + user_email=current_user["email"], + name=request.name, + description=request.description, + scope=scope, + tags=request.tags, + ) + + if not token: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Token not found") + + return TokenResponse( + id=token.id, + name=token.name, + description=token.description, + user_email=token.user_email, + team_id=token.team_id, + created_at=token.created_at, + expires_at=token.expires_at, + last_used=token.last_used, + is_active=token.is_active, + tags=token.tags, + server_id=token.server_id, + resource_scopes=token.resource_scopes, + ip_restrictions=token.ip_restrictions, + time_restrictions=token.time_restrictions, + usage_limits=token.usage_limits, + ) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + + +@router.delete("/{token_id}", status_code=status.HTTP_204_NO_CONTENT) +async def revoke_token( + token_id: str, + request: Optional[TokenRevokeRequest] = None, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> None: + """Revoke (delete) a token. + + Args: + token_id: Token ID to revoke + request: Optional revocation request with reason + current_user: Authenticated user from JWT + db: Database session + + Raises: + HTTPException: If token not found + """ + service = TokenCatalogService(db) + + reason = request.reason if request else "Revoked by user" + success = await service.revoke_token(token_id=token_id, revoked_by=current_user["email"], reason=reason) + + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Token not found") + + +@router.get("/{token_id}/usage", response_model=TokenUsageStatsResponse) +async def get_token_usage_stats( + token_id: str, + days: int = 30, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenUsageStatsResponse: + """Get usage statistics for a specific token. + + Args: + token_id: Token ID to get stats for + days: Number of days to analyze (default 30) + current_user: Authenticated user from JWT + db: Database session + + Returns: + TokenUsageStatsResponse: Token usage statistics + + Raises: + HTTPException: If token not found or not owned by user + """ + service = TokenCatalogService(db) + + # Verify token ownership + token = await service.get_token(token_id, current_user["email"]) + if not token: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Token not found") + + stats = await service.get_token_usage_stats(user_email=current_user["email"], token_id=token_id, days=days) + + return TokenUsageStatsResponse(**stats) + + +# Admin endpoints for token oversight +@router.get("/admin/all", response_model=TokenListResponse, tags=["admin"]) +async def list_all_tokens( + user_email: Optional[str] = None, + include_inactive: bool = False, + limit: int = 100, + offset: int = 0, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenListResponse: + """Admin endpoint to list all tokens or tokens for a specific user. + + Args: + user_email: Filter tokens by user email (admin only) + include_inactive: Include inactive/expired tokens + limit: Maximum number of tokens to return + offset: Number of tokens to skip + current_user: Authenticated admin user + db: Database session + + Returns: + TokenListResponse: List of tokens + + Raises: + HTTPException: If user is not admin + """ + if not current_user["is_admin"]: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin access required") + + service = TokenCatalogService(db) + + if user_email: + # Get tokens for specific user + tokens = await service.list_user_tokens( + user_email=user_email, + include_inactive=include_inactive, + limit=limit, + offset=offset, + ) + else: + # This would need a new method in service for all tokens + # For now, return empty list - can implement later if needed + tokens = [] + + token_responses = [] + for token in tokens: + # Check if token is revoked + revocation_info = await service.get_token_revocation(token.jti) + + token_responses.append( + TokenResponse( + id=token.id, + name=token.name, + description=token.description, + user_email=token.user_email, + team_id=token.team_id, + created_at=token.created_at, + expires_at=token.expires_at, + last_used=token.last_used, + is_active=token.is_active, + is_revoked=revocation_info is not None, + revoked_at=revocation_info.revoked_at if revocation_info else None, + revoked_by=revocation_info.revoked_by if revocation_info else None, + revocation_reason=revocation_info.reason if revocation_info else None, + tags=token.tags, + server_id=token.server_id, + resource_scopes=token.resource_scopes, + ip_restrictions=token.ip_restrictions, + time_restrictions=token.time_restrictions, + usage_limits=token.usage_limits, + ) + ) + + return TokenListResponse(tokens=token_responses, total=len(token_responses), limit=limit, offset=offset) + + +@router.delete("/admin/{token_id}", status_code=status.HTTP_204_NO_CONTENT, tags=["admin"]) +async def admin_revoke_token( + token_id: str, + request: Optional[TokenRevokeRequest] = None, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> None: + """Admin endpoint to revoke any token. + + Args: + token_id: Token ID to revoke + request: Optional revocation request with reason + current_user: Authenticated admin user + db: Database session + + Raises: + HTTPException: If user is not admin or token not found + """ + if not current_user["is_admin"]: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Admin access required") + + service = TokenCatalogService(db) + admin_email = current_user["email"] + reason = request.reason if request else f"Revoked by admin {admin_email}" + + success = await service.revoke_token(token_id=token_id, revoked_by=current_user["email"], reason=reason) + + if not success: + raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Token not found") + + +# Team-based token endpoints +@router.post("/teams/{team_id}", response_model=TokenCreateResponse, status_code=status.HTTP_201_CREATED) +async def create_team_token( + team_id: str, + request: TokenCreateRequest, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenCreateResponse: + """Create a new API token for a team (only team owners can do this). + + Args: + team_id: Team ID to create token for + request: Token creation request with name, description, scoping, etc. + current_user: Authenticated user (must be team owner) + db: Database session + + Returns: + TokenCreateResponse: Created token details with raw token + + Raises: + HTTPException: If user is not team owner or validation fails + """ + service = TokenCatalogService(db) + + # Convert request to TokenScope if provided + scope = None + if request.scope: + scope = TokenScope( + server_id=request.scope.server_id, + permissions=request.scope.permissions, + ip_restrictions=request.scope.ip_restrictions, + time_restrictions=request.scope.time_restrictions, + usage_limits=request.scope.usage_limits, + ) + + try: + token_record, raw_token = await service.create_token( + user_email=current_user["email"], + name=request.name, + description=request.description, + scope=scope, + expires_in_days=request.expires_in_days, + tags=request.tags, + team_id=team_id, # This will validate team ownership + ) + + # Create TokenResponse for the token info + token_response = TokenResponse( + id=token_record.id, + name=token_record.name, + description=token_record.description, + user_email=token_record.user_email, + team_id=token_record.team_id, + server_id=token_record.server_id, + resource_scopes=token_record.resource_scopes or [], + ip_restrictions=token_record.ip_restrictions or [], + time_restrictions=token_record.time_restrictions or {}, + usage_limits=token_record.usage_limits or {}, + created_at=token_record.created_at, + expires_at=token_record.expires_at, + last_used=token_record.last_used, + is_active=token_record.is_active, + tags=token_record.tags or [], + ) + + return TokenCreateResponse( + token=token_response, + access_token=raw_token, + ) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) + + +@router.get("/teams/{team_id}", response_model=TokenListResponse) +async def list_team_tokens( + team_id: str, + include_inactive: bool = False, + limit: int = 50, + offset: int = 0, + current_user=Depends(get_current_user_with_permissions), + db: Session = Depends(get_db), +) -> TokenListResponse: + """List API tokens for a team (only team owners can do this). + + Args: + team_id: Team ID to list tokens for + include_inactive: Include inactive/expired tokens + limit: Maximum number of tokens to return (default 50) + offset: Number of tokens to skip for pagination + current_user: Authenticated user (must be team owner) + db: Database session + + Returns: + TokenListResponse: List of teams API tokens + + Raises: + HTTPException: If user is not team owner + """ + service = TokenCatalogService(db) + + try: + tokens = await service.list_team_tokens( + team_id=team_id, + user_email=current_user["email"], # This will validate team ownership + include_inactive=include_inactive, + limit=limit, + offset=offset, + ) + + token_responses = [] + for token in tokens: + # Check if token is revoked + revocation_info = await service.get_token_revocation(token.jti) + + token_responses.append( + TokenResponse( + id=token.id, + name=token.name, + description=token.description, + user_email=token.user_email, + team_id=token.team_id, + created_at=token.created_at, + expires_at=token.expires_at, + last_used=token.last_used, + is_active=token.is_active, + is_revoked=revocation_info is not None, + revoked_at=revocation_info.revoked_at if revocation_info else None, + revoked_by=revocation_info.revoked_by if revocation_info else None, + revocation_reason=revocation_info.reason if revocation_info else None, + tags=token.tags, + server_id=token.server_id, + resource_scopes=token.resource_scopes, + ip_restrictions=token.ip_restrictions, + time_restrictions=token.time_restrictions, + usage_limits=token.usage_limits, + ) + ) + + return TokenListResponse(tokens=token_responses, total=len(token_responses), limit=limit, offset=offset) + except ValueError as e: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=str(e)) diff --git a/mcpgateway/schemas.py b/mcpgateway/schemas.py index 743424f3a..a341b0b46 100644 --- a/mcpgateway/schemas.py +++ b/mcpgateway/schemas.py @@ -29,7 +29,7 @@ from typing import Any, Dict, List, Literal, Optional, Self, Union # Third-Party -from pydantic import AnyHttpUrl, BaseModel, ConfigDict, Field, field_serializer, field_validator, model_validator, ValidationInfo +from pydantic import AnyHttpUrl, BaseModel, ConfigDict, EmailStr, Field, field_serializer, field_validator, model_validator, ValidationInfo # First-Party from mcpgateway.config import settings @@ -348,6 +348,11 @@ class ToolCreate(BaseModel): gateway_id: Optional[str] = Field(None, description="id of gateway for the tool") tags: Optional[List[str]] = Field(default_factory=list, description="Tags for categorizing the tool") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the tool owner") + visibility: str = Field(default="private", description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -964,6 +969,11 @@ class ToolRead(BaseModelWithConfigDict): federation_source: Optional[str] = Field(None, description="Source gateway for federated entities") version: Optional[int] = Field(1, description="Entity version for change tracking") + # Team scoping fields + team_id: Optional[str] = Field(None, description="ID of the team that owns this resource") + owner_email: Optional[str] = Field(None, description="Email of the user who owns this resource") + visibility: str = Field(default="private", description="Visibility level: private, team, or public") + class ToolInvocation(BaseModelWithConfigDict): """Schema for tool invocation requests. @@ -1147,6 +1157,11 @@ class ResourceCreate(BaseModel): content: Union[str, bytes] = Field(..., description="Resource content (text or binary)") tags: Optional[List[str]] = Field(default_factory=list, description="Tags for categorizing the resource") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the resource owner") + visibility: str = Field(default="private", description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -1267,6 +1282,11 @@ class ResourceUpdate(BaseModelWithConfigDict): content: Optional[Union[str, bytes]] = Field(None, description="Resource content (text or binary)") tags: Optional[List[str]] = Field(None, description="Tags for categorizing the resource") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the resource owner") + visibility: Optional[str] = Field(None, description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -1401,6 +1421,11 @@ class ResourceRead(BaseModelWithConfigDict): federation_source: Optional[str] = Field(None, description="Source gateway for federated entities") version: Optional[int] = Field(1, description="Entity version for change tracking") + # Team scoping fields + team_id: Optional[str] = Field(None, description="ID of the team that owns this resource") + owner_email: Optional[str] = Field(None, description="Email of the user who owns this resource") + visibility: str = Field(default="private", description="Visibility level: private, team, or public") + class ResourceSubscription(BaseModelWithConfigDict): """Schema for resource subscriptions. @@ -1646,6 +1671,11 @@ class PromptCreate(BaseModel): arguments: List[PromptArgument] = Field(default_factory=list, description="List of arguments for the template") tags: Optional[List[str]] = Field(default_factory=list, description="Tags for categorizing the prompt") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the prompt owner") + visibility: str = Field(default="private", description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -1761,6 +1791,11 @@ class PromptUpdate(BaseModelWithConfigDict): tags: Optional[List[str]] = Field(None, description="Tags for categorizing the prompt") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the prompt owner") + visibility: Optional[str] = Field(None, description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -1873,6 +1908,11 @@ class PromptRead(BaseModelWithConfigDict): federation_source: Optional[str] = Field(None, description="Source gateway for federated entities") version: Optional[int] = Field(1, description="Entity version for change tracking") + # Team scoping fields + team_id: Optional[str] = Field(None, description="ID of the team that owns this resource") + owner_email: Optional[str] = Field(None, description="Email of the user who owns this resource") + visibility: str = Field(default="private", description="Visibility level: private, team, or public") + class PromptInvocation(BaseModelWithConfigDict): """Schema for prompt invocation requests. @@ -1973,6 +2013,11 @@ class GatewayCreate(BaseModel): auth_value: Optional[str] = Field(None, validate_default=True) tags: Optional[List[str]] = Field(default_factory=list, description="Tags for categorizing the gateway") + # Team scoping fields for resource organization + team_id: Optional[str] = Field(None, description="Team ID this gateway belongs to") + owner_email: Optional[str] = Field(None, description="Email of the gateway owner") + visibility: str = Field(default="public", description="Gateway visibility: private, team, or public") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -2211,6 +2256,11 @@ class GatewayUpdate(BaseModelWithConfigDict): tags: Optional[List[str]] = Field(None, description="Tags for categorizing the gateway") + # Team scoping fields for resource organization + team_id: Optional[str] = Field(None, description="Team ID this gateway belongs to") + owner_email: Optional[str] = Field(None, description="Email of the gateway owner") + visibility: Optional[str] = Field(None, description="Gateway visibility: private, team, or public") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -2447,6 +2497,11 @@ class GatewayRead(BaseModelWithConfigDict): auth_header_value: Optional[str] = Field(None, description="vallue for custom headers authentication") tags: List[str] = Field(default_factory=list, description="Tags for categorizing the gateway") + # Team scoping fields for resource organization + team_id: Optional[str] = Field(None, description="Team ID this gateway belongs to") + owner_email: Optional[str] = Field(None, description="Email of the gateway owner") + visibility: str = Field(default="private", description="Gateway visibility: private, team, or public") + # Comprehensive metadata for audit tracking created_by: Optional[str] = Field(None, description="Username who created this entity") created_from_ip: Optional[str] = Field(None, description="IP address of creator") @@ -2899,6 +2954,11 @@ def validate_id(cls, v: Optional[str]) -> Optional[str]: associated_prompts: Optional[List[str]] = Field(None, description="Comma-separated prompt IDs") associated_a2a_agents: Optional[List[str]] = Field(None, description="Comma-separated A2A agent IDs") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the server owner") + visibility: str = Field(default="private", description="Visibility level (private, team, public)") + @field_validator("name") @classmethod def validate_name(cls, v: str) -> str: @@ -2963,6 +3023,39 @@ def split_comma_separated(cls, v): return [item.strip() for item in v.split(",") if item.strip()] return v + @field_validator("visibility") + @classmethod + def validate_visibility(cls, v: str) -> str: + """Validate visibility level. + + Args: + v: Visibility value to validate + + Returns: + Validated visibility value + + Raises: + ValueError: If visibility is invalid + """ + if v not in ["private", "team", "public"]: + raise ValueError("Visibility must be one of: private, team, public") + return v + + @field_validator("team_id") + @classmethod + def validate_team_id(cls, v: Optional[str]) -> Optional[str]: + """Validate team ID format. + + Args: + v: Team ID to validate + + Returns: + Validated team ID + """ + if v is not None: + return SecurityValidator.validate_uuid(v, "team_id") + return v + class ServerUpdate(BaseModelWithConfigDict): """Schema for updating an existing server. @@ -2976,6 +3069,11 @@ class ServerUpdate(BaseModelWithConfigDict): icon: Optional[str] = Field(None, description="URL for the server's icon") tags: Optional[List[str]] = Field(None, description="Tags for categorizing the server") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the server owner") + visibility: Optional[str] = Field(None, description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -3128,6 +3226,11 @@ class ServerRead(BaseModelWithConfigDict): federation_source: Optional[str] = Field(None, description="Source gateway for federated entities") version: Optional[int] = Field(1, description="Entity version for change tracking") + # Team scoping fields + team_id: Optional[str] = Field(None, description="ID of the team that owns this resource") + owner_email: Optional[str] = Field(None, description="Email of the user who owns this resource") + visibility: str = Field(default="private", description="Visibility level: private, team, or public") + @model_validator(mode="before") @classmethod def populate_associated_ids(cls, values): @@ -3259,6 +3362,8 @@ class A2AAgentCreate(BaseModel): auth_type (Optional[str]): Type of authentication ("api_key", "oauth", "bearer", etc.). auth_value (Optional[str]): Authentication credentials (will be encrypted). tags (List[str]): Tags for categorizing the agent. + team_id (Optional[str]): Team ID for resource organization. + visibility (str): Visibility level ("private", "team", "public"). """ model_config = ConfigDict(str_strip_whitespace=True) @@ -3274,6 +3379,11 @@ class A2AAgentCreate(BaseModel): auth_value: Optional[str] = Field(None, description="Authentication credentials") tags: List[str] = Field(default_factory=list, description="Tags for categorizing the agent") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the agent owner") + visibility: str = Field(default="private", description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> List[str]: @@ -3347,6 +3457,39 @@ def validate_json_fields(cls, v: Dict[str, Any]) -> Dict[str, Any]: SecurityValidator.validate_json_depth(v) return v + @field_validator("visibility") + @classmethod + def validate_visibility(cls, v: str) -> str: + """Validate visibility level. + + Args: + v: Visibility value to validate + + Returns: + Validated visibility value + + Raises: + ValueError: If visibility is invalid + """ + if v not in ["private", "team", "public"]: + raise ValueError("Visibility must be one of: private, team, public") + return v + + @field_validator("team_id") + @classmethod + def validate_team_id(cls, v: Optional[str]) -> Optional[str]: + """Validate team ID format. + + Args: + v: Team ID to validate + + Returns: + Validated team ID + """ + if v is not None: + return SecurityValidator.validate_uuid(v, "team_id") + return v + class A2AAgentUpdate(BaseModelWithConfigDict): """Schema for updating an existing A2A agent. @@ -3365,6 +3508,11 @@ class A2AAgentUpdate(BaseModelWithConfigDict): auth_value: Optional[str] = Field(None, description="Authentication credentials") tags: Optional[List[str]] = Field(None, description="Tags for categorizing the agent") + # Team scoping fields + team_id: Optional[str] = Field(None, description="Team ID for resource organization") + owner_email: Optional[str] = Field(None, description="Email of the agent owner") + visibility: Optional[str] = Field(None, description="Visibility level (private, team, public)") + @field_validator("tags") @classmethod def validate_tags(cls, v: Optional[List[str]]) -> Optional[List[str]]: @@ -3442,6 +3590,39 @@ def validate_json_fields(cls, v: Optional[Dict[str, Any]]) -> Optional[Dict[str, SecurityValidator.validate_json_depth(v) return v + @field_validator("visibility") + @classmethod + def validate_visibility(cls, v: Optional[str]) -> Optional[str]: + """Validate visibility level. + + Args: + v: Visibility value to validate + + Returns: + Validated visibility value + + Raises: + ValueError: If visibility is invalid + """ + if v is not None and v not in ["private", "team", "public"]: + raise ValueError("Visibility must be one of: private, team, public") + return v + + @field_validator("team_id") + @classmethod + def validate_team_id(cls, v: Optional[str]) -> Optional[str]: + """Validate team ID format. + + Args: + v: Team ID to validate + + Returns: + Validated team ID + """ + if v is not None: + return SecurityValidator.validate_uuid(v, "team_id") + return v + class A2AAgentRead(BaseModelWithConfigDict): """Schema for reading A2A agent information. @@ -3488,6 +3669,11 @@ class A2AAgentRead(BaseModelWithConfigDict): federation_source: Optional[str] = Field(None, description="Source gateway for federated entities") version: Optional[int] = Field(1, description="Entity version for change tracking") + # Team scoping fields + team_id: Optional[str] = Field(None, description="ID of the team that owns this resource") + owner_email: Optional[str] = Field(None, description="Email of the user who owns this resource") + visibility: str = Field(default="private", description="Visibility level: private, team, or public") + class A2AAgentInvocation(BaseModelWithConfigDict): """Schema for A2A agent invocation requests. @@ -3531,3 +3717,1331 @@ def validate_parameters(cls, v: Dict[str, Any]) -> Dict[str, Any]: """ SecurityValidator.validate_json_depth(v) return v + + +# --------------------------------------------------------------------------- +# Email-Based Authentication Schemas (Epic 001) +# --------------------------------------------------------------------------- + + +class EmailLoginRequest(BaseModel): + """Request schema for email login. + + Attributes: + email: User's email address + password: User's password + + Examples: + >>> request = EmailLoginRequest(email="user@example.com", password="secret123") + >>> request.email + 'user@example.com' + >>> request.password + 'secret123' + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + email: EmailStr = Field(..., description="User's email address") + password: str = Field(..., min_length=1, description="User's password") + + +class EmailRegistrationRequest(BaseModel): + """Request schema for user registration. + + Attributes: + email: User's email address + password: User's password + full_name: Optional full name for display + + Examples: + >>> request = EmailRegistrationRequest( + ... email="new@example.com", + ... password="secure123", + ... full_name="New User" + ... ) + >>> request.email + 'new@example.com' + >>> request.full_name + 'New User' + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + email: EmailStr = Field(..., description="User's email address") + password: str = Field(..., min_length=8, description="User's password") + full_name: Optional[str] = Field(None, max_length=255, description="User's full name") + + @field_validator("password") + @classmethod + def validate_password(cls, v: str) -> str: + """Validate password meets minimum requirements. + + Args: + v: Password string to validate + + Returns: + str: Validated password + + Raises: + ValueError: If password doesn't meet requirements + """ + if len(v) < 8: + raise ValueError("Password must be at least 8 characters long") + return v + + +class ChangePasswordRequest(BaseModel): + """Request schema for password change. + + Attributes: + old_password: Current password for verification + new_password: New password to set + + Examples: + >>> request = ChangePasswordRequest( + ... old_password="old_secret", + ... new_password="new_secure_password" + ... ) + >>> request.old_password + 'old_secret' + >>> request.new_password + 'new_secure_password' + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + old_password: str = Field(..., min_length=1, description="Current password") + new_password: str = Field(..., min_length=8, description="New password") + + @field_validator("new_password") + @classmethod + def validate_new_password(cls, v: str) -> str: + """Validate new password meets minimum requirements. + + Args: + v: New password string to validate + + Returns: + str: Validated new password + + Raises: + ValueError: If new password doesn't meet requirements + """ + if len(v) < 8: + raise ValueError("New password must be at least 8 characters long") + return v + + +class EmailUserResponse(BaseModel): + """Response schema for user information. + + Attributes: + email: User's email address + full_name: User's full name + is_admin: Whether user has admin privileges + is_active: Whether account is active + auth_provider: Authentication provider used + created_at: Account creation timestamp + last_login: Last successful login timestamp + email_verified: Whether email is verified + + Examples: + >>> user = EmailUserResponse( + ... email="user@example.com", + ... full_name="Test User", + ... is_admin=False, + ... is_active=True, + ... auth_provider="local", + ... created_at=datetime.now(), + ... last_login=None, + ... email_verified=False + ... ) + >>> user.email + 'user@example.com' + >>> user.is_admin + False + """ + + model_config = ConfigDict(from_attributes=True) + + email: str = Field(..., description="User's email address") + full_name: Optional[str] = Field(None, description="User's full name") + is_admin: bool = Field(..., description="Whether user has admin privileges") + is_active: bool = Field(..., description="Whether account is active") + auth_provider: str = Field(..., description="Authentication provider") + created_at: datetime = Field(..., description="Account creation timestamp") + last_login: Optional[datetime] = Field(None, description="Last successful login") + email_verified: bool = Field(False, description="Whether email is verified") + + @classmethod + def from_email_user(cls, user) -> "EmailUserResponse": + """Create response from EmailUser model. + + Args: + user: EmailUser model instance + + Returns: + EmailUserResponse: Response schema instance + """ + return cls( + email=user.email, + full_name=user.full_name, + is_admin=user.is_admin, + is_active=user.is_active, + auth_provider=user.auth_provider, + created_at=user.created_at, + last_login=user.last_login, + email_verified=user.is_email_verified(), + ) + + +class AuthenticationResponse(BaseModel): + """Response schema for successful authentication. + + Attributes: + access_token: JWT token for API access + token_type: Type of token (always 'bearer') + expires_in: Token expiration time in seconds + user: User information + + Examples: + >>> from datetime import datetime + >>> response = AuthenticationResponse( + ... access_token="jwt.token.here", + ... token_type="bearer", + ... expires_in=3600, + ... user=EmailUserResponse( + ... email="user@example.com", + ... full_name="Test User", + ... is_admin=False, + ... is_active=True, + ... auth_provider="local", + ... created_at=datetime.now(), + ... last_login=None, + ... email_verified=False + ... ) + ... ) + >>> response.token_type + 'bearer' + >>> response.user.email + 'user@example.com' + """ + + access_token: str = Field(..., description="JWT access token") + token_type: str = Field(default="bearer", description="Token type") + expires_in: int = Field(..., description="Token expiration in seconds") + user: EmailUserResponse = Field(..., description="User information") + + +class AuthEventResponse(BaseModel): + """Response schema for authentication events. + + Attributes: + id: Event ID + timestamp: Event timestamp + user_email: User's email address + event_type: Type of authentication event + success: Whether the event was successful + ip_address: Client IP address + failure_reason: Reason for failure (if applicable) + + Examples: + >>> from datetime import datetime + >>> event = AuthEventResponse( + ... id=1, + ... timestamp=datetime.now(), + ... user_email="user@example.com", + ... event_type="login", + ... success=True, + ... ip_address="192.168.1.1", + ... failure_reason=None + ... ) + >>> event.event_type + 'login' + >>> event.success + True + """ + + model_config = ConfigDict(from_attributes=True) + + id: int = Field(..., description="Event ID") + timestamp: datetime = Field(..., description="Event timestamp") + user_email: Optional[str] = Field(None, description="User's email address") + event_type: str = Field(..., description="Type of authentication event") + success: bool = Field(..., description="Whether the event was successful") + ip_address: Optional[str] = Field(None, description="Client IP address") + failure_reason: Optional[str] = Field(None, description="Reason for failure") + + +class UserListResponse(BaseModel): + """Response schema for user list. + + Attributes: + users: List of users + total_count: Total number of users + limit: Request limit + offset: Request offset + + Examples: + >>> user_list = UserListResponse( + ... users=[], + ... total_count=0, + ... limit=10, + ... offset=0 + ... ) + >>> user_list.total_count + 0 + >>> len(user_list.users) + 0 + """ + + users: list[EmailUserResponse] = Field(..., description="List of users") + total_count: int = Field(..., description="Total number of users") + limit: int = Field(..., description="Request limit") + offset: int = Field(..., description="Request offset") + + +class AdminUserCreateRequest(BaseModel): + """Request schema for admin user creation. + + Attributes: + email: User's email address + password: User's password + full_name: Optional full name + is_admin: Whether user should have admin privileges + + Examples: + >>> request = AdminUserCreateRequest( + ... email="admin@example.com", + ... password="admin_password", + ... full_name="Admin User", + ... is_admin=True + ... ) + >>> request.email + 'admin@example.com' + >>> request.is_admin + True + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + email: EmailStr = Field(..., description="User's email address") + password: str = Field(..., min_length=8, description="User's password") + full_name: Optional[str] = Field(None, max_length=255, description="User's full name") + is_admin: bool = Field(default=False, description="Whether user has admin privileges") + + +class AdminUserUpdateRequest(BaseModel): + """Request schema for admin user updates. + + Attributes: + full_name: User's full name + is_admin: Whether user has admin privileges + is_active: Whether account is active + + Examples: + >>> request = AdminUserUpdateRequest( + ... full_name="Updated Name", + ... is_admin=True, + ... is_active=True + ... ) + >>> request.full_name + 'Updated Name' + >>> request.is_admin + True + """ + + model_config = ConfigDict(str_strip_whitespace=True) + + full_name: Optional[str] = Field(None, max_length=255, description="User's full name") + is_admin: Optional[bool] = Field(None, description="Whether user has admin privileges") + is_active: Optional[bool] = Field(None, description="Whether account is active") + + +class ErrorResponse(BaseModel): + """Standard error response schema. + + Attributes: + error: Error type + message: Human-readable error message + details: Additional error details + + Examples: + >>> error = ErrorResponse( + ... error="authentication_failed", + ... message="Invalid email or password", + ... details=None + ... ) + >>> error.error + 'authentication_failed' + >>> error.message + 'Invalid email or password' + """ + + error: str = Field(..., description="Error type") + message: str = Field(..., description="Human-readable error message") + details: Optional[dict] = Field(None, description="Additional error details") + + +class SuccessResponse(BaseModel): + """Standard success response schema. + + Attributes: + success: Whether operation was successful + message: Human-readable success message + + Examples: + >>> response = SuccessResponse( + ... success=True, + ... message="Password changed successfully" + ... ) + >>> response.success + True + >>> response.message + 'Password changed successfully' + """ + + success: bool = Field(True, description="Operation success status") + message: str = Field(..., description="Human-readable success message") + + +# --------------------------------------------------------------------------- +# Team Management Schemas (Epic 002) +# --------------------------------------------------------------------------- + + +class TeamCreateRequest(BaseModel): + """Schema for creating a new team. + + Attributes: + name: Team display name + slug: URL-friendly team identifier (optional, auto-generated if not provided) + description: Team description + visibility: Team visibility level + max_members: Maximum number of members allowed + + Examples: + >>> request = TeamCreateRequest( + ... name="Engineering Team", + ... description="Software development team" + ... ) + >>> request.name + 'Engineering Team' + """ + + name: str = Field(..., min_length=1, max_length=255, description="Team display name") + slug: Optional[str] = Field(None, min_length=2, max_length=255, pattern="^[a-z0-9-]+$", description="URL-friendly team identifier") + description: Optional[str] = Field(None, max_length=1000, description="Team description") + visibility: Literal["private", "public"] = Field("private", description="Team visibility level") + max_members: Optional[int] = Field(None, ge=1, le=1000, description="Maximum number of team members") + + @field_validator("name") + @classmethod + def validate_name(cls, v: str) -> str: + """Validate team name. + + Args: + v: Team name to validate + + Returns: + str: Validated and stripped team name + + Raises: + ValueError: If team name is empty + """ + if not v.strip(): + raise ValueError("Team name cannot be empty") + return v.strip() + + @field_validator("slug") + @classmethod + def validate_slug(cls, v: Optional[str]) -> Optional[str]: + """Validate team slug. + + Args: + v: Team slug to validate + + Returns: + Optional[str]: Validated and formatted slug or None + + Raises: + ValueError: If slug format is invalid + """ + if v is None: + return v + v = v.strip().lower() + if not re.match(r"^[a-z0-9-]+$", v): + raise ValueError("Slug must contain only lowercase letters, numbers, and hyphens") + if v.startswith("-") or v.endswith("-"): + raise ValueError("Slug cannot start or end with hyphens") + return v + + +class TeamUpdateRequest(BaseModel): + """Schema for updating a team. + + Attributes: + name: Team display name + description: Team description + visibility: Team visibility level + max_members: Maximum number of members allowed + + Examples: + >>> request = TeamUpdateRequest( + ... name="Updated Engineering Team", + ... description="Updated description" + ... ) + >>> request.name + 'Updated Engineering Team' + """ + + name: Optional[str] = Field(None, min_length=1, max_length=255, description="Team display name") + description: Optional[str] = Field(None, max_length=1000, description="Team description") + visibility: Optional[Literal["private", "public"]] = Field(None, description="Team visibility level") + max_members: Optional[int] = Field(None, ge=1, le=1000, description="Maximum number of team members") + + @field_validator("name") + @classmethod + def validate_name(cls, v: Optional[str]) -> Optional[str]: + """Validate team name. + + Args: + v: Team name to validate + + Returns: + Optional[str]: Validated and stripped team name or None + + Raises: + ValueError: If team name is empty + """ + if v is not None: + if not v.strip(): + raise ValueError("Team name cannot be empty") + return v.strip() + return v + + +class TeamResponse(BaseModel): + """Schema for team response data. + + Attributes: + id: Team UUID + name: Team display name + slug: URL-friendly team identifier + description: Team description + created_by: Email of team creator + is_personal: Whether this is a personal team + visibility: Team visibility level + max_members: Maximum number of members allowed + member_count: Current number of team members + created_at: Team creation timestamp + updated_at: Last update timestamp + is_active: Whether the team is active + + Examples: + >>> team = TeamResponse( + ... id="team-123", + ... name="Engineering Team", + ... slug="engineering-team", + ... created_by="admin@example.com", + ... is_personal=False, + ... visibility="private", + ... member_count=5, + ... created_at=datetime.now(timezone.utc), + ... updated_at=datetime.now(timezone.utc), + ... is_active=True + ... ) + >>> team.name + 'Engineering Team' + """ + + id: str = Field(..., description="Team UUID") + name: str = Field(..., description="Team display name") + slug: str = Field(..., description="URL-friendly team identifier") + description: Optional[str] = Field(None, description="Team description") + created_by: str = Field(..., description="Email of team creator") + is_personal: bool = Field(..., description="Whether this is a personal team") + visibility: str = Field(..., description="Team visibility level") + max_members: Optional[int] = Field(None, description="Maximum number of members allowed") + member_count: int = Field(..., description="Current number of team members") + created_at: datetime = Field(..., description="Team creation timestamp") + updated_at: datetime = Field(..., description="Last update timestamp") + is_active: bool = Field(..., description="Whether the team is active") + + +class TeamMemberResponse(BaseModel): + """Schema for team member response data. + + Attributes: + id: Member UUID + team_id: Team UUID + user_email: Member email address + role: Member role in the team + joined_at: When the member joined + invited_by: Email of user who invited this member + is_active: Whether the membership is active + + Examples: + >>> member = TeamMemberResponse( + ... id="member-123", + ... team_id="team-123", + ... user_email="user@example.com", + ... role="member", + ... joined_at=datetime.now(timezone.utc), + ... is_active=True + ... ) + >>> member.role + 'member' + """ + + id: str = Field(..., description="Member UUID") + team_id: str = Field(..., description="Team UUID") + user_email: str = Field(..., description="Member email address") + role: str = Field(..., description="Member role in the team") + joined_at: datetime = Field(..., description="When the member joined") + invited_by: Optional[str] = Field(None, description="Email of user who invited this member") + is_active: bool = Field(..., description="Whether the membership is active") + + +class TeamInviteRequest(BaseModel): + """Schema for inviting users to a team. + + Attributes: + email: Email address of user to invite + role: Role to assign to the user + + Examples: + >>> invite = TeamInviteRequest( + ... email="newuser@example.com", + ... role="member" + ... ) + >>> invite.email + 'newuser@example.com' + """ + + email: EmailStr = Field(..., description="Email address of user to invite") + role: Literal["owner", "member"] = Field("member", description="Role to assign to the user") + + +class TeamInvitationResponse(BaseModel): + """Schema for team invitation response data. + + Attributes: + id: Invitation UUID + team_id: Team UUID + team_name: Team display name + email: Email address of invited user + role: Role the user will have when they accept + invited_by: Email of user who sent the invitation + invited_at: When the invitation was sent + expires_at: When the invitation expires + token: Invitation token + is_active: Whether the invitation is active + is_expired: Whether the invitation has expired + + Examples: + >>> invitation = TeamInvitationResponse( + ... id="invite-123", + ... team_id="team-123", + ... team_name="Engineering Team", + ... email="newuser@example.com", + ... role="member", + ... invited_by="admin@example.com", + ... invited_at=datetime.now(timezone.utc), + ... expires_at=datetime.now(timezone.utc), + ... token="invitation-token", + ... is_active=True, + ... is_expired=False + ... ) + >>> invitation.role + 'member' + """ + + id: str = Field(..., description="Invitation UUID") + team_id: str = Field(..., description="Team UUID") + team_name: str = Field(..., description="Team display name") + email: str = Field(..., description="Email address of invited user") + role: str = Field(..., description="Role the user will have when they accept") + invited_by: str = Field(..., description="Email of user who sent the invitation") + invited_at: datetime = Field(..., description="When the invitation was sent") + expires_at: datetime = Field(..., description="When the invitation expires") + token: str = Field(..., description="Invitation token") + is_active: bool = Field(..., description="Whether the invitation is active") + is_expired: bool = Field(..., description="Whether the invitation has expired") + + +class TeamMemberUpdateRequest(BaseModel): + """Schema for updating a team member's role. + + Attributes: + role: New role for the team member + + Examples: + >>> update = TeamMemberUpdateRequest(role="member") + >>> update.role + 'member' + """ + + role: Literal["owner", "member"] = Field(..., description="New role for the team member") + + +class TeamListResponse(BaseModel): + """Schema for team list response. + + Attributes: + teams: List of teams + total: Total number of teams + + Examples: + >>> response = TeamListResponse(teams=[], total=0) + >>> response.total + 0 + """ + + teams: List[TeamResponse] = Field(..., description="List of teams") + total: int = Field(..., description="Total number of teams") + + +class TeamDiscoveryResponse(BaseModel): + """Schema for public team discovery response. + + Provides limited metadata about public teams for discovery purposes. + + Attributes: + id: Team ID + name: Team name + description: Team description + member_count: Number of members + created_at: Team creation timestamp + is_joinable: Whether the current user can join this team + """ + + id: str = Field(..., description="Team ID") + name: str = Field(..., description="Team name") + description: Optional[str] = Field(None, description="Team description") + member_count: int = Field(..., description="Number of team members") + created_at: datetime = Field(..., description="Team creation timestamp") + is_joinable: bool = Field(..., description="Whether the current user can join this team") + + +class TeamJoinRequest(BaseModel): + """Schema for requesting to join a public team. + + Attributes: + message: Optional message to team owners + """ + + message: Optional[str] = Field(None, description="Optional message to team owners", max_length=500) + + +class TeamJoinRequestResponse(BaseModel): + """Schema for team join request response. + + Attributes: + id: Join request ID + team_id: Target team ID + team_name: Target team name + user_email: Requesting user email + message: Request message + status: Request status (pending, approved, rejected) + requested_at: Request timestamp + expires_at: Request expiration timestamp + """ + + id: str = Field(..., description="Join request ID") + team_id: str = Field(..., description="Target team ID") + team_name: str = Field(..., description="Target team name") + user_email: str = Field(..., description="Requesting user email") + message: Optional[str] = Field(None, description="Request message") + status: str = Field(..., description="Request status") + requested_at: datetime = Field(..., description="Request timestamp") + expires_at: datetime = Field(..., description="Request expiration") + + +# API Token Management Schemas + + +class TokenScopeRequest(BaseModel): + """Schema for token scoping configuration. + + Attributes: + server_id: Optional server ID limitation + permissions: List of permission scopes + ip_restrictions: List of IP address/CIDR restrictions + time_restrictions: Time-based access limitations + usage_limits: Rate limiting and quota settings + + Examples: + >>> scope = TokenScopeRequest( + ... server_id="server-123", + ... permissions=["tools.read", "resources.read"], + ... ip_restrictions=["192.168.1.0/24"] + ... ) + >>> scope.server_id + 'server-123' + """ + + server_id: Optional[str] = Field(None, description="Limit token to specific server") + permissions: List[str] = Field(default_factory=list, description="Permission scopes") + ip_restrictions: List[str] = Field(default_factory=list, description="IP address restrictions") + time_restrictions: Dict[str, Any] = Field(default_factory=dict, description="Time-based restrictions") + usage_limits: Dict[str, Any] = Field(default_factory=dict, description="Usage limits and quotas") + + +class TokenCreateRequest(BaseModel): + """Schema for creating a new API token. + + Attributes: + name: Human-readable token name + description: Optional token description + expires_in_days: Optional expiry in days + scope: Optional token scoping configuration + tags: Optional organizational tags + + Examples: + >>> request = TokenCreateRequest( + ... name="Production Access", + ... description="Read-only production access", + ... expires_in_days=30, + ... tags=["production", "readonly"] + ... ) + >>> request.name + 'Production Access' + """ + + name: str = Field(..., description="Human-readable token name", min_length=1, max_length=255) + description: Optional[str] = Field(None, description="Token description", max_length=1000) + expires_in_days: Optional[int] = Field(None, description="Expiry in days", ge=1, le=365) + scope: Optional[TokenScopeRequest] = Field(None, description="Token scoping configuration") + tags: List[str] = Field(default_factory=list, description="Organizational tags") + team_id: Optional[str] = Field(None, description="Team ID for team-scoped tokens") + + +class TokenUpdateRequest(BaseModel): + """Schema for updating an existing API token. + + Attributes: + name: New token name + description: New token description + scope: New token scoping configuration + tags: New organizational tags + + Examples: + >>> request = TokenUpdateRequest( + ... name="Updated Token Name", + ... description="Updated description" + ... ) + >>> request.name + 'Updated Token Name' + """ + + name: Optional[str] = Field(None, description="New token name", min_length=1, max_length=255) + description: Optional[str] = Field(None, description="New token description", max_length=1000) + scope: Optional[TokenScopeRequest] = Field(None, description="New token scoping configuration") + tags: Optional[List[str]] = Field(None, description="New organizational tags") + + +class TokenResponse(BaseModel): + """Schema for API token response. + + Attributes: + id: Token ID + name: Token name + description: Token description + server_id: Server scope limitation + resource_scopes: Permission scopes + ip_restrictions: IP restrictions + time_restrictions: Time-based restrictions + usage_limits: Usage limits + created_at: Creation timestamp + expires_at: Expiry timestamp + last_used: Last usage timestamp + is_active: Active status + tags: Organizational tags + + Examples: + >>> from datetime import datetime + >>> token = TokenResponse( + ... id="token-123", + ... name="Test Token", + ... description="Test description", + ... user_email="test@example.com", + ... server_id=None, + ... resource_scopes=["tools.read"], + ... ip_restrictions=[], + ... time_restrictions={}, + ... usage_limits={}, + ... created_at=datetime.now(), + ... expires_at=None, + ... last_used=None, + ... is_active=True, + ... tags=[] + ... ) + >>> token.name + 'Test Token' + """ + + model_config = ConfigDict(from_attributes=True) + + id: str = Field(..., description="Token ID") + name: str = Field(..., description="Token name") + description: Optional[str] = Field(None, description="Token description") + user_email: str = Field(..., description="Token creator's email") + team_id: Optional[str] = Field(None, description="Team ID for team-scoped tokens") + server_id: Optional[str] = Field(None, description="Server scope limitation") + resource_scopes: List[str] = Field(..., description="Permission scopes") + ip_restrictions: List[str] = Field(..., description="IP restrictions") + time_restrictions: Dict[str, Any] = Field(..., description="Time-based restrictions") + usage_limits: Dict[str, Any] = Field(..., description="Usage limits") + created_at: datetime = Field(..., description="Creation timestamp") + expires_at: Optional[datetime] = Field(None, description="Expiry timestamp") + last_used: Optional[datetime] = Field(None, description="Last usage timestamp") + is_active: bool = Field(..., description="Active status") + is_revoked: bool = Field(False, description="Whether token is revoked") + revoked_at: Optional[datetime] = Field(None, description="Revocation timestamp") + revoked_by: Optional[str] = Field(None, description="Email of user who revoked token") + revocation_reason: Optional[str] = Field(None, description="Reason for revocation") + tags: List[str] = Field(..., description="Organizational tags") + + +class TokenCreateResponse(BaseModel): + """Schema for token creation response. + + Attributes: + token: Token information + access_token: The actual token string (only returned on creation) + + Examples: + >>> from datetime import datetime + >>> token_info = TokenResponse( + ... id="token-123", name="Test Token", description=None, + ... user_email="test@example.com", server_id=None, resource_scopes=[], ip_restrictions=[], + ... time_restrictions={}, usage_limits={}, created_at=datetime.now(), + ... expires_at=None, last_used=None, is_active=True, tags=[] + ... ) + >>> response = TokenCreateResponse( + ... token=token_info, + ... access_token="abc123xyz" + ... ) + >>> response.access_token + 'abc123xyz' + """ + + token: TokenResponse = Field(..., description="Token information") + access_token: str = Field(..., description="The actual token string") + + +class TokenListResponse(BaseModel): + """Schema for token list response. + + Attributes: + tokens: List of tokens + total: Total number of tokens + limit: Request limit + offset: Request offset + + Examples: + >>> response = TokenListResponse( + ... tokens=[], + ... total=0, + ... limit=10, + ... offset=0 + ... ) + >>> response.total + 0 + """ + + tokens: List[TokenResponse] = Field(..., description="List of tokens") + total: int = Field(..., description="Total number of tokens") + limit: int = Field(..., description="Request limit") + offset: int = Field(..., description="Request offset") + + +class TokenRevokeRequest(BaseModel): + """Schema for token revocation. + + Attributes: + reason: Optional reason for revocation + + Examples: + >>> request = TokenRevokeRequest(reason="Security incident") + >>> request.reason + 'Security incident' + """ + + reason: Optional[str] = Field(None, description="Reason for revocation", max_length=255) + + +class TokenUsageStatsResponse(BaseModel): + """Schema for token usage statistics. + + Attributes: + period_days: Number of days analyzed + total_requests: Total number of requests + successful_requests: Number of successful requests + blocked_requests: Number of blocked requests + success_rate: Success rate percentage + average_response_time_ms: Average response time + top_endpoints: Most accessed endpoints + + Examples: + >>> stats = TokenUsageStatsResponse( + ... period_days=30, + ... total_requests=100, + ... successful_requests=95, + ... blocked_requests=5, + ... success_rate=0.95, + ... average_response_time_ms=150.5, + ... top_endpoints=[("/tools", 50), ("/resources", 30)] + ... ) + >>> stats.success_rate + 0.95 + """ + + period_days: int = Field(..., description="Number of days analyzed") + total_requests: int = Field(..., description="Total number of requests") + successful_requests: int = Field(..., description="Number of successful requests") + blocked_requests: int = Field(..., description="Number of blocked requests") + success_rate: float = Field(..., description="Success rate (0-1)") + average_response_time_ms: float = Field(..., description="Average response time in milliseconds") + top_endpoints: List[tuple[str, int]] = Field(..., description="Most accessed endpoints with counts") + + +# ===== RBAC Schemas (Epic 004) ===== + + +class RoleCreateRequest(BaseModel): + """Schema for creating a new role. + + Attributes: + name: Unique role name + description: Role description + scope: Role scope (global, team, personal) + permissions: List of permission strings + inherits_from: Optional parent role ID + is_system_role: Whether this is a system role + + Examples: + >>> request = RoleCreateRequest( + ... name="team_admin", + ... description="Team administrator with member management", + ... scope="team", + ... permissions=["teams.manage_members", "resources.create"] + ... ) + >>> request.name + 'team_admin' + """ + + name: str = Field(..., description="Unique role name", max_length=255) + description: Optional[str] = Field(None, description="Role description") + scope: str = Field(..., description="Role scope", pattern="^(global|team|personal)$") + permissions: List[str] = Field(..., description="List of permission strings") + inherits_from: Optional[str] = Field(None, description="Parent role ID for inheritance") + is_system_role: Optional[bool] = Field(False, description="Whether this is a system role") + + +class RoleUpdateRequest(BaseModel): + """Schema for updating an existing role. + + Attributes: + name: Optional new name + description: Optional new description + permissions: Optional new permissions list + inherits_from: Optional new parent role + is_active: Optional active status + + Examples: + >>> request = RoleUpdateRequest( + ... description="Updated role description", + ... permissions=["new.permission"] + ... ) + >>> request.description + 'Updated role description' + """ + + name: Optional[str] = Field(None, description="Role name", max_length=255) + description: Optional[str] = Field(None, description="Role description") + permissions: Optional[List[str]] = Field(None, description="List of permission strings") + inherits_from: Optional[str] = Field(None, description="Parent role ID for inheritance") + is_active: Optional[bool] = Field(None, description="Whether role is active") + + +class RoleResponse(BaseModel): + """Schema for role response. + + Attributes: + id: Role identifier + name: Role name + description: Role description + scope: Role scope + permissions: List of permissions + effective_permissions: All permissions including inherited + inherits_from: Parent role ID + created_by: Creator email + is_system_role: Whether system role + is_active: Whether role is active + created_at: Creation timestamp + updated_at: Update timestamp + + Examples: + >>> role = RoleResponse( + ... id="role-123", + ... name="admin", + ... scope="global", + ... permissions=["*"], + ... effective_permissions=["*"], + ... created_by="admin@example.com", + ... is_system_role=True, + ... is_active=True, + ... created_at=datetime.now(), + ... updated_at=datetime.now() + ... ) + >>> role.name + 'admin' + """ + + model_config = ConfigDict(from_attributes=True) + + id: str = Field(..., description="Role identifier") + name: str = Field(..., description="Role name") + description: Optional[str] = Field(None, description="Role description") + scope: str = Field(..., description="Role scope") + permissions: List[str] = Field(..., description="Direct permissions") + effective_permissions: Optional[List[str]] = Field(None, description="All permissions including inherited") + inherits_from: Optional[str] = Field(None, description="Parent role ID") + created_by: str = Field(..., description="Creator email") + is_system_role: bool = Field(..., description="Whether system role") + is_active: bool = Field(..., description="Whether role is active") + created_at: datetime = Field(..., description="Creation timestamp") + updated_at: datetime = Field(..., description="Update timestamp") + + +class UserRoleAssignRequest(BaseModel): + """Schema for assigning a role to a user. + + Attributes: + role_id: Role to assign + scope: Assignment scope + scope_id: Team ID if team-scoped + expires_at: Optional expiration timestamp + + Examples: + >>> request = UserRoleAssignRequest( + ... role_id="role-123", + ... scope="team", + ... scope_id="team-456" + ... ) + >>> request.scope + 'team' + """ + + role_id: str = Field(..., description="Role ID to assign") + scope: str = Field(..., description="Assignment scope", pattern="^(global|team|personal)$") + scope_id: Optional[str] = Field(None, description="Team ID if team-scoped") + expires_at: Optional[datetime] = Field(None, description="Optional expiration timestamp") + + +class UserRoleResponse(BaseModel): + """Schema for user role assignment response. + + Attributes: + id: Assignment identifier + user_email: User email + role_id: Role identifier + role_name: Role name for convenience + scope: Assignment scope + scope_id: Team ID if applicable + granted_by: Who granted the role + granted_at: When role was granted + expires_at: Optional expiration + is_active: Whether assignment is active + + Examples: + >>> user_role = UserRoleResponse( + ... id="assignment-123", + ... user_email="user@example.com", + ... role_id="role-456", + ... role_name="team_admin", + ... scope="team", + ... scope_id="team-789", + ... granted_by="admin@example.com", + ... granted_at=datetime.now(), + ... is_active=True + ... ) + >>> user_role.scope + 'team' + """ + + model_config = ConfigDict(from_attributes=True) + + id: str = Field(..., description="Assignment identifier") + user_email: str = Field(..., description="User email") + role_id: str = Field(..., description="Role identifier") + role_name: Optional[str] = Field(None, description="Role name for convenience") + scope: str = Field(..., description="Assignment scope") + scope_id: Optional[str] = Field(None, description="Team ID if applicable") + granted_by: str = Field(..., description="Who granted the role") + granted_at: datetime = Field(..., description="When role was granted") + expires_at: Optional[datetime] = Field(None, description="Optional expiration") + is_active: bool = Field(..., description="Whether assignment is active") + + +class PermissionCheckRequest(BaseModel): + """Schema for permission check request. + + Attributes: + user_email: User to check + permission: Permission to verify + resource_type: Optional resource type + resource_id: Optional resource ID + team_id: Optional team context + + Examples: + >>> request = PermissionCheckRequest( + ... user_email="user@example.com", + ... permission="tools.create", + ... resource_type="tools" + ... ) + >>> request.permission + 'tools.create' + """ + + user_email: str = Field(..., description="User email to check") + permission: str = Field(..., description="Permission to verify") + resource_type: Optional[str] = Field(None, description="Resource type") + resource_id: Optional[str] = Field(None, description="Resource ID") + team_id: Optional[str] = Field(None, description="Team context") + + +class PermissionCheckResponse(BaseModel): + """Schema for permission check response. + + Attributes: + user_email: User checked + permission: Permission checked + granted: Whether permission was granted + checked_at: When check was performed + checked_by: Who performed the check + + Examples: + >>> response = PermissionCheckResponse( + ... user_email="user@example.com", + ... permission="tools.create", + ... granted=True, + ... checked_at=datetime.now(), + ... checked_by="admin@example.com" + ... ) + >>> response.granted + True + """ + + user_email: str = Field(..., description="User email checked") + permission: str = Field(..., description="Permission checked") + granted: bool = Field(..., description="Whether permission was granted") + checked_at: datetime = Field(..., description="When check was performed") + checked_by: str = Field(..., description="Who performed the check") + + +class PermissionListResponse(BaseModel): + """Schema for available permissions list. + + Attributes: + all_permissions: List of all available permissions + permissions_by_resource: Permissions grouped by resource type + total_count: Total number of permissions + + Examples: + >>> response = PermissionListResponse( + ... all_permissions=["users.create", "tools.read"], + ... permissions_by_resource={"users": ["users.create"], "tools": ["tools.read"]}, + ... total_count=2 + ... ) + >>> response.total_count + 2 + """ + + all_permissions: List[str] = Field(..., description="All available permissions") + permissions_by_resource: Dict[str, List[str]] = Field(..., description="Permissions by resource type") + total_count: int = Field(..., description="Total number of permissions") + + +# ============================================================================== +# SSO Authentication Schemas +# ============================================================================== + + +class SSOProviderResponse(BaseModelWithConfigDict): + """Response schema for SSO provider information. + + Attributes: + id: Provider identifier (e.g., 'github', 'google') + name: Provider name + display_name: Human-readable display name + provider_type: Type of provider ('oauth2', 'oidc') + is_enabled: Whether provider is currently enabled + authorization_url: OAuth authorization URL (optional) + + Examples: + >>> provider = SSOProviderResponse( + ... id="github", + ... name="github", + ... display_name="GitHub", + ... provider_type="oauth2", + ... is_enabled=True + ... ) + >>> provider.id + 'github' + """ + + id: str = Field(..., description="Provider identifier") + name: str = Field(..., description="Provider name") + display_name: str = Field(..., description="Human-readable display name") + provider_type: Optional[str] = Field(None, description="Provider type (oauth2, oidc)") + is_enabled: Optional[bool] = Field(None, description="Whether provider is enabled") + authorization_url: Optional[str] = Field(None, description="OAuth authorization URL") + + +class SSOLoginResponse(BaseModelWithConfigDict): + """Response schema for SSO login initiation. + + Attributes: + authorization_url: URL to redirect user for authentication + state: CSRF state parameter for validation + + Examples: + >>> login = SSOLoginResponse( + ... authorization_url="https://github.com/login/oauth/authorize?...", + ... state="csrf-token-123" + ... ) + >>> "github.com" in login.authorization_url + True + """ + + authorization_url: str = Field(..., description="OAuth authorization URL") + state: str = Field(..., description="CSRF state parameter") + + +class SSOCallbackResponse(BaseModelWithConfigDict): + """Response schema for SSO authentication callback. + + Attributes: + access_token: JWT access token for authenticated user + token_type: Token type (always 'bearer') + expires_in: Token expiration time in seconds + user: User information from SSO provider + + Examples: + >>> callback = SSOCallbackResponse( + ... access_token="jwt.token.here", + ... token_type="bearer", + ... expires_in=3600, + ... user={"email": "user@example.com", "full_name": "User"} + ... ) + >>> callback.token_type + 'bearer' + """ + + access_token: str = Field(..., description="JWT access token") + token_type: str = Field(default="bearer", description="Token type") + expires_in: int = Field(..., description="Token expiration in seconds") + user: Dict[str, Any] = Field(..., description="User information") diff --git a/mcpgateway/services/a2a_service.py b/mcpgateway/services/a2a_service.py index 6a0645c83..6965ba443 100644 --- a/mcpgateway/services/a2a_service.py +++ b/mcpgateway/services/a2a_service.py @@ -17,7 +17,7 @@ # Third-Party import httpx -from sqlalchemy import case, delete, desc, func, select +from sqlalchemy import and_, case, delete, desc, func, or_, select from sqlalchemy.orm import Session # First-Party @@ -25,6 +25,7 @@ from mcpgateway.db import A2AAgentMetric from mcpgateway.schemas import A2AAgentCreate, A2AAgentMetrics, A2AAgentRead, A2AAgentUpdate from mcpgateway.services.logging_service import LoggingService +from mcpgateway.services.team_management_service import TeamManagementService # Initialize logging service first logging_service = LoggingService() @@ -96,6 +97,9 @@ async def register_agent( created_user_agent: Optional[str] = None, import_batch_id: Optional[str] = None, federation_source: Optional[str] = None, + team_id: Optional[str] = None, + owner_email: Optional[str] = None, + visibility: str = "private", ) -> A2AAgentRead: """Register a new A2A agent. @@ -108,6 +112,9 @@ async def register_agent( created_user_agent: User agent of creation request. import_batch_id: UUID of bulk import batch. federation_source: Source gateway for federated entities. + team_id (Optional[str]): Team ID to assign the agent to. + owner_email (Optional[str]): Email of the user who owns this agent. + visibility (str): Agent visibility level (private, team, public). Returns: The created agent data. @@ -134,6 +141,10 @@ async def register_agent( auth_type=agent_data.auth_type, auth_value=agent_data.auth_value, # This should be encrypted in practice tags=agent_data.tags, + # Team scoping fields - use schema values if provided, otherwise fallback to parameters + team_id=getattr(agent_data, "team_id", None) or team_id, + owner_email=getattr(agent_data, "owner_email", None) or owner_email or created_by, + visibility=getattr(agent_data, "visibility", None) or visibility, created_by=created_by, created_from_ip=created_from_ip, created_via=created_via, @@ -180,6 +191,75 @@ async def list_agents(self, db: Session, cursor: Optional[str] = None, include_i agents = db.execute(query).scalars().all() return [self._db_to_schema(agent) for agent in agents] + async def list_agents_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[A2AAgentRead]: + """ + List A2A agents user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting agents + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive agents + skip: Number of agents to skip for pagination + limit: Maximum number of agents to return + + Returns: + List[A2AAgentRead]: A2A agents the user has access to + """ + + # Build query following existing patterns from list_agents() + query = select(DbA2AAgent) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbA2AAgent.enabled.is_(True)) + + if team_id: + # Filter by specific team + query = query.where(DbA2AAgent.team_id == team_id) + + # Validate user has access to team + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + access_conditions = [] + + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbA2AAgent.owner_email == user_email) + + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbA2AAgent.team_id.in_(team_ids), DbA2AAgent.visibility.in_(["team", "public"]))) + + # 3. Public resources (if visibility allows) + access_conditions.append(DbA2AAgent.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbA2AAgent.visibility == visibility) + + # Apply pagination following existing patterns + query = query.order_by(desc(DbA2AAgent.created_at)) + query = query.offset(skip).limit(limit) + + agents = db.execute(query).scalars().all() + return [self._db_to_schema(agent) for agent in agents] + async def get_agent(self, db: Session, agent_id: str) -> A2AAgentRead: """Retrieve an A2A agent by ID. diff --git a/mcpgateway/services/argon2_service.py b/mcpgateway/services/argon2_service.py new file mode 100644 index 000000000..a496bce5e --- /dev/null +++ b/mcpgateway/services/argon2_service.py @@ -0,0 +1,304 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/argon2_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Argon2id Password Hashing Service. +This module provides secure password hashing and verification using Argon2id, +the winner of the Password Hashing Competition and recommended by OWASP. + +Examples: + >>> from mcpgateway.services.argon2_service import Argon2PasswordService + >>> service = Argon2PasswordService() + >>> hash = service.hash_password("test123") + >>> service.verify_password("test123", hash) + True + >>> service.verify_password("wrong", hash) + False +""" + +# Standard +from typing import Optional + +# Third-Party +from argon2 import PasswordHasher +from argon2.exceptions import HashingError, InvalidHash, VerifyMismatchError + +# First-Party +from mcpgateway.config import settings +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class Argon2PasswordService: + """Service for Argon2id password hashing and verification. + + This service provides secure password hashing using Argon2id with + configurable parameters for time cost, memory cost, and parallelism. + It follows OWASP recommendations for password storage. + + Attributes: + hasher (PasswordHasher): Configured Argon2 password hasher + + Examples: + >>> service = Argon2PasswordService() + >>> password = "secure_password_123" + >>> hash_value = service.hash_password(password) + >>> service.verify_password(password, hash_value) + True + >>> service.verify_password("wrong_password", hash_value) + False + """ + + def __init__(self, time_cost: Optional[int] = None, memory_cost: Optional[int] = None, parallelism: Optional[int] = None, hash_len: int = 32, salt_len: int = 16): + """Initialize the Argon2 password service. + + Args: + time_cost: Number of iterations (default from settings) + memory_cost: Memory usage in KiB (default from settings) + parallelism: Number of threads (default from settings) + hash_len: Length of the hash in bytes + salt_len: Length of the salt in bytes + + Examples: + >>> service = Argon2PasswordService() + >>> isinstance(service.hasher, PasswordHasher) + True + >>> custom_service = Argon2PasswordService(time_cost=2, memory_cost=32768) + >>> isinstance(custom_service.hasher, PasswordHasher) + True + """ + # Use settings values or provided defaults + self.time_cost = time_cost or getattr(settings, "argon2id_time_cost", 3) + self.memory_cost = memory_cost or getattr(settings, "argon2id_memory_cost", 65536) + self.parallelism = parallelism or getattr(settings, "argon2id_parallelism", 1) + + # Initialize Argon2 password hasher with configured parameters + self.hasher = PasswordHasher(time_cost=self.time_cost, memory_cost=self.memory_cost, parallelism=self.parallelism, hash_len=hash_len, salt_len=salt_len) + + logger.info(f"Initialized Argon2PasswordService with time_cost={self.time_cost}, " f"memory_cost={self.memory_cost}, parallelism={self.parallelism}") + + def hash_password(self, password: str) -> str: + """Hash a password using Argon2id. + + Args: + password: The plain text password to hash + + Returns: + str: The Argon2id hash string + + Raises: + ValueError: If password is empty or None + HashingError: If hashing fails + + Examples: + >>> service = Argon2PasswordService() + >>> hash_value = service.hash_password("test123") + >>> hash_value.startswith("$argon2id$") + True + >>> len(hash_value) > 50 + True + >>> service.hash_password("test123") != service.hash_password("test123") + True + """ + if not password: + raise ValueError("Password cannot be empty or None") + + try: + hash_value = self.hasher.hash(password) + logger.debug("Successfully hashed password for user authentication") + return hash_value + except HashingError as e: + logger.error(f"Failed to hash password: {e}") + raise HashingError(f"Password hashing failed: {e}") from e + + def verify_password(self, password: str, hash_value: str) -> bool: + """Verify a password against its Argon2id hash. + + Args: + password: The plain text password to verify + hash_value: The stored Argon2id hash + + Returns: + bool: True if password matches hash, False otherwise + + Examples: + >>> service = Argon2PasswordService() + >>> hash_val = service.hash_password("correct_password") + >>> service.verify_password("correct_password", hash_val) + True + >>> service.verify_password("wrong_password", hash_val) + False + >>> service.verify_password("", hash_val) + False + """ + if not password or not hash_value: + return False + + try: + # verify() raises VerifyMismatchError if password doesn't match + self.hasher.verify(hash_value, password) + logger.debug("Password verification successful") + return True + except VerifyMismatchError: + logger.debug("Password verification failed - password mismatch") + return False + except (InvalidHash, ValueError) as e: + logger.warning(f"Invalid hash format during verification: {e}") + return False + except Exception as e: + logger.error(f"Unexpected error during password verification: {e}") + return False + + def needs_rehash(self, hash_value: str) -> bool: + """Check if a hash needs to be rehashed due to parameter changes. + + This is useful for gradually updating password hashes when you + change Argon2 parameters (e.g., increasing time_cost for security). + + Args: + hash_value: The stored Argon2id hash to check + + Returns: + bool: True if hash should be updated, False otherwise + + Examples: + >>> service = Argon2PasswordService() + >>> hash_val = service.hash_password("test") + >>> service.needs_rehash(hash_val) + False + >>> service_new = Argon2PasswordService(time_cost=5) + >>> service_new.needs_rehash(hash_val) + True + """ + if not hash_value: + return True + + try: + return self.hasher.check_needs_rehash(hash_value) + except (InvalidHash, ValueError) as e: + logger.warning(f"Invalid hash format when checking rehash need: {e}") + return True + except Exception as e: + logger.error(f"Unexpected error checking rehash need: {e}") + return True + + def get_hash_info(self, hash_value: str) -> Optional[dict]: + """Extract information from an Argon2 hash. + + Args: + hash_value: The Argon2id hash to analyze + + Returns: + dict: Hash parameters or None if invalid + + Examples: + >>> service = Argon2PasswordService() + >>> hash_val = service.hash_password("test") + >>> info = service.get_hash_info(hash_val) + >>> info is not None + True + >>> 'time_cost' in info + True + >>> 'memory_cost' in info + True + """ + if not hash_value: + return None + + try: + # Parse the hash to extract parameters + # Argon2 hash format: $argon2id$v=19$m=65536,t=3,p=1$salt$hash + parts = hash_value.split("$") + if len(parts) < 4 or parts[1] != "argon2id": + return None + + params_part = parts[3] # m=65536,t=3,p=1 + params = {} + + for param in params_part.split(","): + key, value = param.split("=") + if key == "m": + params["memory_cost"] = int(value) + elif key == "t": + params["time_cost"] = int(value) + elif key == "p": + params["parallelism"] = int(value) + + params["variant"] = "argon2id" + if len(parts) > 2: + params["version"] = parts[2] + + return params + except (ValueError, IndexError) as e: + logger.warning(f"Failed to parse Argon2 hash info: {e}") + return None + + def __repr__(self) -> str: + """String representation of the service. + + Returns: + str: String representation of Argon2PasswordService instance + """ + return f"Argon2PasswordService(time_cost={self.time_cost}, " f"memory_cost={self.memory_cost}, parallelism={self.parallelism})" + + +# Global instance for use throughout the application +password_service = Argon2PasswordService() + + +def hash_password(password: str) -> str: + """Hash a password using the global Argon2 service. + + Convenience function for password hashing. + + Args: + password: The password to hash + + Returns: + str: The hashed password + + Examples: + >>> hash_val = hash_password("test123") + >>> hash_val.startswith("$argon2id$") + True + """ + return password_service.hash_password(password) + + +def verify_password(password: str, hash_value: str) -> bool: + """Verify a password using the global Argon2 service. + + Convenience function for password verification. + + Args: + password: The password to verify + hash_value: The stored hash + + Returns: + bool: True if password matches + + Examples: + >>> hash_val = hash_password("test123") + >>> verify_password("test123", hash_val) + True + >>> verify_password("wrong", hash_val) + False + """ + return password_service.verify_password(password, hash_value) + + +def needs_rehash(hash_value: str) -> bool: + """Check if a hash needs rehashing using the global service. + + Args: + hash_value: The hash to check + + Returns: + bool: True if rehash is needed + """ + return password_service.needs_rehash(hash_value) diff --git a/mcpgateway/services/email_auth_service.py b/mcpgateway/services/email_auth_service.py new file mode 100644 index 000000000..e34e76955 --- /dev/null +++ b/mcpgateway/services/email_auth_service.py @@ -0,0 +1,732 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/email_auth_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Email Authentication Service. +This module provides email-based user authentication services including +user creation, authentication, password management, and security features. + +Examples: + Basic usage (requires async context): + from mcpgateway.services.email_auth_service import EmailAuthService + from mcpgateway.db import SessionLocal + + with SessionLocal() as db: + service = EmailAuthService(db) + # Use in async context: + # user = await service.create_user("test@example.com", "password123") + # authenticated = await service.authenticate_user("test@example.com", "password123") +""" + +# Standard +from datetime import datetime, timezone +import re +from typing import Optional + +# Third-Party +from sqlalchemy import delete, select +from sqlalchemy.exc import IntegrityError +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import EmailAuthEvent, EmailUser +from mcpgateway.services.argon2_service import Argon2PasswordService +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class EmailValidationError(Exception): + """Raised when email format is invalid.""" + + +class PasswordValidationError(Exception): + """Raised when password doesn't meet policy requirements.""" + + +class UserExistsError(Exception): + """Raised when attempting to create a user that already exists.""" + + +class AuthenticationError(Exception): + """Raised when authentication fails.""" + + +class EmailAuthService: + """Service for email-based user authentication. + + This service handles user registration, authentication, password management, + and security features like account lockout and failed attempt tracking. + + Attributes: + db (Session): Database session + password_service (Argon2PasswordService): Password hashing service + + Examples: + >>> from mcpgateway.db import SessionLocal + >>> with SessionLocal() as db: + ... service = EmailAuthService(db) + ... # Service is ready to use + """ + + def __init__(self, db: Session): + """Initialize the email authentication service. + + Args: + db: SQLAlchemy database session + """ + self.db = db + self.password_service = Argon2PasswordService() + logger.debug("EmailAuthService initialized") + + def validate_email(self, email: str) -> bool: + """Validate email address format. + + Args: + email: Email address to validate + + Returns: + bool: True if email is valid + + Raises: + EmailValidationError: If email format is invalid + + Examples: + >>> service = EmailAuthService(None) + >>> service.validate_email("user@example.com") + True + >>> try: + ... service.validate_email("invalid-email") + ... except EmailValidationError as e: + ... str(e) + 'Invalid email format' + """ + if not email or not isinstance(email, str): + raise EmailValidationError("Email is required and must be a string") + + # Basic email regex pattern + email_pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$" + + if not re.match(email_pattern, email): + raise EmailValidationError("Invalid email format") + + if len(email) > 255: + raise EmailValidationError("Email address too long (max 255 characters)") + + return True + + def validate_password(self, password: str) -> bool: + """Validate password against policy requirements. + + Args: + password: Password to validate + + Returns: + bool: True if password meets policy + + Raises: + PasswordValidationError: If password doesn't meet requirements + + Examples: + >>> service = EmailAuthService(None) + >>> service.validate_password("password123") + True + """ + if not password: + raise PasswordValidationError("Password is required") + + # Get password policy settings + min_length = getattr(settings, "password_min_length", 8) + require_uppercase = getattr(settings, "password_require_uppercase", False) + require_lowercase = getattr(settings, "password_require_lowercase", False) + require_numbers = getattr(settings, "password_require_numbers", False) + require_special = getattr(settings, "password_require_special", False) + + if len(password) < min_length: + raise PasswordValidationError(f"Password must be at least {min_length} characters long") + + if require_uppercase and not re.search(r"[A-Z]", password): + raise PasswordValidationError("Password must contain at least one uppercase letter") + + if require_lowercase and not re.search(r"[a-z]", password): + raise PasswordValidationError("Password must contain at least one lowercase letter") + + if require_numbers and not re.search(r"[0-9]", password): + raise PasswordValidationError("Password must contain at least one number") + + if require_special and not re.search(r'[!@#$%^&*(),.?":{}|<>]', password): + raise PasswordValidationError("Password must contain at least one special character") + + return True + + async def get_user_by_email(self, email: str) -> Optional[EmailUser]: + """Get user by email address. + + Args: + email: Email address to look up + + Returns: + EmailUser or None if not found + + Examples: + # Assuming database has user "test@example.com" + # user = await service.get_user_by_email("test@example.com") + # user.email if user else None # Returns: 'test@example.com' + """ + try: + stmt = select(EmailUser).where(EmailUser.email == email.lower()) + result = self.db.execute(stmt) + return result.scalar_one_or_none() + except Exception as e: + logger.error(f"Error getting user by email {email}: {e}") + return None + + async def create_user(self, email: str, password: str, full_name: Optional[str] = None, is_admin: bool = False, auth_provider: str = "local") -> EmailUser: + """Create a new user with email authentication. + + Args: + email: User's email address (primary key) + password: Plain text password (will be hashed) + full_name: Optional full name for display + is_admin: Whether user has admin privileges + auth_provider: Authentication provider ('local', 'github', etc.) + + Returns: + EmailUser: The created user object + + Raises: + EmailValidationError: If email format is invalid + PasswordValidationError: If password doesn't meet policy + UserExistsError: If user already exists + + Examples: + # user = await service.create_user( + # email="new@example.com", + # password="secure123", + # full_name="New User" + # ) + # user.email # Returns: 'new@example.com' + # user.full_name # Returns: 'New User' + """ + # Normalize email to lowercase + email = email.lower().strip() + + # Validate inputs + self.validate_email(email) + self.validate_password(password) + + # Check if user already exists + existing_user = await self.get_user_by_email(email) + if existing_user: + raise UserExistsError(f"User with email {email} already exists") + + # Hash the password + password_hash = self.password_service.hash_password(password) + + # Create new user + user = EmailUser(email=email, password_hash=password_hash, full_name=full_name, is_admin=is_admin, auth_provider=auth_provider) + + try: + self.db.add(user) + self.db.commit() + self.db.refresh(user) + + logger.info(f"Created new user: {email}") + + # Create personal team if enabled + if getattr(settings, "auto_create_personal_teams", True): + try: + # Import here to avoid circular imports + # First-Party + from mcpgateway.services.personal_team_service import PersonalTeamService # pylint: disable=import-outside-toplevel + + personal_team_service = PersonalTeamService(self.db) + personal_team = await personal_team_service.create_personal_team(user) + logger.info(f"Created personal team '{personal_team.name}' for user {email}") + except Exception as e: + logger.warning(f"Failed to create personal team for {email}: {e}") + # Don't fail user creation if personal team creation fails + + # Log registration event + registration_event = EmailAuthEvent.create_registration_event(user_email=email, success=True) + self.db.add(registration_event) + self.db.commit() + + return user + + except IntegrityError as e: + self.db.rollback() + logger.error(f"Database error creating user {email}: {e}") + raise UserExistsError(f"User with email {email} already exists") from e + except Exception as e: + self.db.rollback() + logger.error(f"Unexpected error creating user {email}: {e}") + + # Log failed registration + registration_event = EmailAuthEvent.create_registration_event(user_email=email, success=False, failure_reason=str(e)) + self.db.add(registration_event) + self.db.commit() + + raise + + async def authenticate_user(self, email: str, password: str, ip_address: Optional[str] = None, user_agent: Optional[str] = None) -> Optional[EmailUser]: + """Authenticate a user with email and password. + + Args: + email: User's email address + password: Plain text password + ip_address: Client IP address for logging + user_agent: Client user agent for logging + + Returns: + EmailUser if authentication successful, None otherwise + + Examples: + # user = await service.authenticate_user("user@example.com", "correct_password") + # user.email if user else None # Returns: 'user@example.com' + # await service.authenticate_user("user@example.com", "wrong_password") # Returns: None + """ + email = email.lower().strip() + + # Get user from database + user = await self.get_user_by_email(email) + + # Track authentication attempt + auth_success = False + failure_reason = None + + try: + if not user: + failure_reason = "User not found" + logger.info(f"Authentication failed for {email}: user not found") + return None + + if not user.is_active: + failure_reason = "Account is disabled" + logger.info(f"Authentication failed for {email}: account disabled") + return None + + if user.is_account_locked(): + failure_reason = "Account is locked" + logger.info(f"Authentication failed for {email}: account locked") + return None + + # Verify password + if not self.password_service.verify_password(password, user.password_hash): + failure_reason = "Invalid password" + + # Increment failed attempts + max_attempts = getattr(settings, "max_failed_login_attempts", 5) + lockout_duration = getattr(settings, "account_lockout_duration_minutes", 30) + + is_locked = user.increment_failed_attempts(max_attempts, lockout_duration) + + if is_locked: + logger.warning(f"Account locked for {email} after {max_attempts} failed attempts") + failure_reason = "Account locked due to too many failed attempts" + + self.db.commit() + logger.info(f"Authentication failed for {email}: invalid password") + return None + + # Authentication successful + user.reset_failed_attempts() + self.db.commit() + + auth_success = True + logger.info(f"Authentication successful for {email}") + + return user + + finally: + # Log authentication event + auth_event = EmailAuthEvent.create_login_attempt(user_email=email, success=auth_success, ip_address=ip_address, user_agent=user_agent, failure_reason=failure_reason) + self.db.add(auth_event) + self.db.commit() + + async def change_password(self, email: str, old_password: str, new_password: str, ip_address: Optional[str] = None, user_agent: Optional[str] = None) -> bool: + """Change a user's password. + + Args: + email: User's email address + old_password: Current password for verification + new_password: New password to set + ip_address: Client IP address for logging + user_agent: Client user agent for logging + + Returns: + bool: True if password changed successfully + + Raises: + AuthenticationError: If old password is incorrect + PasswordValidationError: If new password doesn't meet policy + Exception: If database operation fails + + Examples: + # success = await service.change_password( + # "user@example.com", + # "old_password", + # "new_secure_password" + # ) + # success # Returns: True + """ + # First authenticate with old password + user = await self.authenticate_user(email, old_password, ip_address, user_agent) + if not user: + raise AuthenticationError("Current password is incorrect") + + # Validate new password + self.validate_password(new_password) + + # Check if new password is same as old (optional policy) + if self.password_service.verify_password(new_password, user.password_hash): + raise PasswordValidationError("New password must be different from current password") + + success = False + try: + # Hash new password and update + new_password_hash = self.password_service.hash_password(new_password) + user.password_hash = new_password_hash + + self.db.commit() + success = True + + logger.info(f"Password changed successfully for {email}") + + except Exception as e: + self.db.rollback() + logger.error(f"Error changing password for {email}: {e}") + raise + finally: + # Log password change event + password_event = EmailAuthEvent.create_password_change_event(user_email=email, success=success, ip_address=ip_address, user_agent=user_agent) + self.db.add(password_event) + self.db.commit() + + return success + + async def create_platform_admin(self, email: str, password: str, full_name: Optional[str] = None) -> EmailUser: + """Create or update the platform administrator user. + + This method is used during system bootstrap to create the initial + admin user from environment variables. + + Args: + email: Admin email address + password: Admin password + full_name: Admin full name + + Returns: + EmailUser: The admin user + + Examples: + # admin = await service.create_platform_admin( + # "admin@example.com", + # "admin_password", + # "Platform Administrator" + # ) + # admin.is_admin # Returns: True + """ + # Check if admin user already exists + existing_admin = await self.get_user_by_email(email) + + if existing_admin: + # Update existing admin if password or name changed + if full_name and existing_admin.full_name != full_name: + existing_admin.full_name = full_name + + # Check if password needs update (verify current password first) + if not self.password_service.verify_password(password, existing_admin.password_hash): + existing_admin.password_hash = self.password_service.hash_password(password) + + # Ensure admin status + existing_admin.is_admin = True + existing_admin.is_active = True + + self.db.commit() + logger.info(f"Updated platform admin user: {email}") + return existing_admin + + # Create new admin user + admin_user = await self.create_user(email=email, password=password, full_name=full_name, is_admin=True, auth_provider="local") + + logger.info(f"Created platform admin user: {email}") + return admin_user + + async def update_last_login(self, email: str) -> None: + """Update the last login timestamp for a user. + + Args: + email: User's email address + """ + user = await self.get_user_by_email(email) + if user: + user.reset_failed_attempts() # This also updates last_login + self.db.commit() + + async def list_users(self, limit: int = 100, offset: int = 0) -> list[EmailUser]: + """List all users with pagination. + + Args: + limit: Maximum number of users to return + offset: Number of users to skip + + Returns: + List of EmailUser objects + + Examples: + # users = await service.list_users(limit=10) + # len(users) <= 10 # Returns: True + """ + try: + stmt = select(EmailUser).offset(offset).limit(limit) + result = self.db.execute(stmt) + return list(result.scalars().all()) + except Exception as e: + logger.error(f"Error listing users: {e}") + return [] + + async def get_all_users(self) -> list[EmailUser]: + """Get all users without pagination. + + Returns: + List of all EmailUser objects + + Examples: + # users = await service.get_all_users() + # isinstance(users, list) # Returns: True + """ + return await self.list_users(limit=10000) # Large limit to get all users + + async def count_users(self) -> int: + """Count total number of users. + + Returns: + int: Total user count + """ + try: + stmt = select(EmailUser) + result = self.db.execute(stmt) + return len(list(result.scalars().all())) + except Exception as e: + logger.error(f"Error counting users: {e}") + return 0 + + async def get_auth_events(self, email: Optional[str] = None, limit: int = 100, offset: int = 0) -> list[EmailAuthEvent]: + """Get authentication events for auditing. + + Args: + email: Filter by specific user email (optional) + limit: Maximum number of events to return + offset: Number of events to skip + + Returns: + List of EmailAuthEvent objects + """ + try: + stmt = select(EmailAuthEvent) + if email: + stmt = stmt.where(EmailAuthEvent.user_email == email) + stmt = stmt.order_by(EmailAuthEvent.timestamp.desc()).offset(offset).limit(limit) + + result = self.db.execute(stmt) + return list(result.scalars().all()) + except Exception as e: + logger.error(f"Error getting auth events: {e}") + return [] + + async def update_user(self, email: str, full_name: Optional[str] = None, is_admin: Optional[bool] = None, password: Optional[str] = None) -> EmailUser: + """Update user information. + + Args: + email: User's email address (primary key) + full_name: New full name (optional) + is_admin: New admin status (optional) + password: New password (optional, will be hashed) + + Returns: + EmailUser: Updated user object + + Raises: + ValueError: If user doesn't exist + PasswordValidationError: If password doesn't meet policy + """ + try: + # Get existing user + stmt = select(EmailUser).where(EmailUser.email == email) + result = self.db.execute(stmt) + user = result.scalar_one_or_none() + + if not user: + raise ValueError(f"User {email} not found") + + # Update fields if provided + if full_name is not None: + user.full_name = full_name + + if is_admin is not None: + user.is_admin = is_admin + + if password is not None: + if not self.validate_password(password): + raise ValueError("Password does not meet security requirements") + user.password_hash = self.password_service.hash_password(password) + + user.updated_at = datetime.now(timezone.utc) + + self.db.commit() + return user + + except Exception as e: + self.db.rollback() + logger.error(f"Error updating user {email}: {e}") + raise + + async def activate_user(self, email: str) -> EmailUser: + """Activate a user account. + + Args: + email: User's email address + + Returns: + EmailUser: Updated user object + + Raises: + ValueError: If user doesn't exist + """ + try: + stmt = select(EmailUser).where(EmailUser.email == email) + result = self.db.execute(stmt) + user = result.scalar_one_or_none() + + if not user: + raise ValueError(f"User {email} not found") + + user.is_active = True + user.updated_at = datetime.now(timezone.utc) + + self.db.commit() + logger.info(f"User {email} activated") + return user + + except Exception as e: + self.db.rollback() + logger.error(f"Error activating user {email}: {e}") + raise + + async def deactivate_user(self, email: str) -> EmailUser: + """Deactivate a user account. + + Args: + email: User's email address + + Returns: + EmailUser: Updated user object + + Raises: + ValueError: If user doesn't exist + """ + try: + stmt = select(EmailUser).where(EmailUser.email == email) + result = self.db.execute(stmt) + user = result.scalar_one_or_none() + + if not user: + raise ValueError(f"User {email} not found") + + user.is_active = False + user.updated_at = datetime.now(timezone.utc) + + self.db.commit() + logger.info(f"User {email} deactivated") + return user + + except Exception as e: + self.db.rollback() + logger.error(f"Error deactivating user {email}: {e}") + raise + + async def delete_user(self, email: str) -> bool: + """Delete a user account permanently. + + Args: + email: User's email address + + Returns: + bool: True if user was deleted + + Raises: + ValueError: If user doesn't exist + ValueError: If user owns teams that cannot be transferred + """ + try: + stmt = select(EmailUser).where(EmailUser.email == email) + result = self.db.execute(stmt) + user = result.scalar_one_or_none() + + if not user: + raise ValueError(f"User {email} not found") + + # Check if user owns any teams + # First-Party + from mcpgateway.db import EmailTeam, EmailTeamMember # pylint: disable=import-outside-toplevel + + teams_owned_stmt = select(EmailTeam).where(EmailTeam.created_by == email) + teams_owned = self.db.execute(teams_owned_stmt).scalars().all() + + if teams_owned: + # For each team, try to transfer ownership to another owner + for team in teams_owned: + # Find other team owners who can take ownership + potential_owners_stmt = ( + select(EmailTeamMember).where(EmailTeamMember.team_id == team.id, EmailTeamMember.user_email != email, EmailTeamMember.role == "owner").order_by(EmailTeamMember.role.desc()) + ) + + potential_owners = self.db.execute(potential_owners_stmt).scalars().all() + + if potential_owners: + # Transfer ownership to the first available owner + new_owner = potential_owners[0] + team.created_by = new_owner.user_email + logger.info(f"Transferred team '{team.name}' ownership from {email} to {new_owner.user_email}") + else: + # No other owners available - check if it's a single-user team + all_members_stmt = select(EmailTeamMember).where(EmailTeamMember.team_id == team.id) + all_members = self.db.execute(all_members_stmt).scalars().all() + + if len(all_members) == 1 and all_members[0].user_email == email: + # This is a single-user personal team - cascade delete it + logger.info(f"Deleting personal team '{team.name}' (single member: {email})") + # Delete team members first (should be just the owner) + delete_team_members_stmt = delete(EmailTeamMember).where(EmailTeamMember.team_id == team.id) + self.db.execute(delete_team_members_stmt) + # Delete the team + self.db.delete(team) + else: + # Multi-member team with no other owners - cannot delete user + raise ValueError(f"Cannot delete user {email}: owns team '{team.name}' with {len(all_members)} members but no other owners to transfer ownership to") + + # Delete related auth events first + auth_events_stmt = delete(EmailAuthEvent).where(EmailAuthEvent.user_email == email) + self.db.execute(auth_events_stmt) + + # Remove user from all team memberships + team_members_stmt = delete(EmailTeamMember).where(EmailTeamMember.user_email == email) + self.db.execute(team_members_stmt) + + # Delete the user + self.db.delete(user) + self.db.commit() + + logger.info(f"User {email} deleted permanently") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Error deleting user {email}: {e}") + raise diff --git a/mcpgateway/services/gateway_service.py b/mcpgateway/services/gateway_service.py index af8f9607e..f18cd7116 100644 --- a/mcpgateway/services/gateway_service.py +++ b/mcpgateway/services/gateway_service.py @@ -54,7 +54,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import select +from sqlalchemy import and_, or_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -401,6 +401,9 @@ async def register_gateway( created_from_ip: Optional[str] = None, created_via: Optional[str] = None, created_user_agent: Optional[str] = None, + team_id: Optional[str] = None, + owner_email: Optional[str] = None, + visibility: Optional[str] = None, ) -> GatewayRead: """Register a new gateway. @@ -411,6 +414,9 @@ async def register_gateway( created_from_ip: IP address of creator created_via: Creation method (ui, api, federation) created_user_agent: User agent of creation request + team_id (Optional[str]): Team ID to assign the gateway to. + owner_email (Optional[str]): Email of the user who owns this gateway. + visibility (Optional[str]): Gateway visibility level (private, team, public). Returns: Created gateway information @@ -488,6 +494,10 @@ async def register_gateway( created_user_agent=created_user_agent, federation_source=gateway.name, version=1, + # Inherit team assignment from gateway + team_id=team_id, + owner_email=owner_email, + visibility="public", # Federated tools should be public for discovery ) for tool in tools ] @@ -507,6 +517,10 @@ async def register_gateway( created_user_agent=created_user_agent, federation_source=gateway.name, version=1, + # Inherit team assignment from gateway + team_id=team_id, + owner_email=owner_email, + visibility="public", # Federated tools should be public for discovery ) for resource in resources ] @@ -525,6 +539,10 @@ async def register_gateway( created_user_agent=created_user_agent, federation_source=gateway.name, version=1, + # Inherit team assignment from gateway + team_id=team_id, + owner_email=owner_email, + visibility="public", # Federated tools should be public for discovery ) for prompt in prompts ] @@ -551,6 +569,10 @@ async def register_gateway( created_via=created_via or "api", created_user_agent=created_user_agent, version=1, + # Team scoping fields + team_id=team_id, + owner_email=owner_email, + visibility="public" if visibility != "private" else visibility, # Default to public for federation unless explicitly private ) # Add to DB @@ -749,6 +771,74 @@ async def list_gateways(self, db: Session, include_inactive: bool = False) -> Li return [GatewayRead.model_validate(g).masked() for g in gateways] + async def list_gateways_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[GatewayRead]: + """ + List gateways user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting gateways + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive gateways + skip: Number of gateways to skip for pagination + limit: Maximum number of gateways to return + + Returns: + List[GatewayRead]: Gateways the user has access to + """ + # Build query following existing patterns from list_gateways() + query = select(DbGateway) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbGateway.enabled.is_(True)) + + if team_id: + # Filter by specific team + query = query.where(DbGateway.team_id == team_id) + # Validate user has access to team + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + access_conditions = [] + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbGateway.owner_email == user_email) + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbGateway.team_id.in_(team_ids), DbGateway.visibility.in_(["team", "public"]))) + # 3. Public resources (if visibility allows) + access_conditions.append(DbGateway.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbGateway.visibility == visibility) + + # Apply pagination following existing patterns + query = query.offset(skip).limit(limit) + + gateways = db.execute(query).scalars().all() + return [GatewayRead.model_validate(g).masked() for g in gateways] + async def update_gateway(self, db: Session, gateway_id: str, gateway_update: GatewayUpdate, include_inactive: bool = True) -> GatewayRead: """Update a gateway. diff --git a/mcpgateway/services/logging_service.py b/mcpgateway/services/logging_service.py index 4770a7d74..1f3fc522e 100644 --- a/mcpgateway/services/logging_service.py +++ b/mcpgateway/services/logging_service.py @@ -20,6 +20,13 @@ # Third-Party from pythonjsonlogger import jsonlogger # You may need to install python-json-logger package +try: + # Optional import; only used for filtering a known benign upstream error + # Third-Party + from anyio import ClosedResourceError as AnyioClosedResourceError # type: ignore # pylint: disable=invalid-name +except Exception: # pragma: no cover - environment without anyio + AnyioClosedResourceError = None # pylint: disable=invalid-name # fallback if anyio is not present + # First-Party from mcpgateway.config import settings from mcpgateway.models import LogLevel @@ -224,6 +231,9 @@ async def initialize(self) -> None: logging.info("Logging service initialized") + # Suppress noisy upstream logs for normal stream closures in MCP streamable HTTP + self._install_closedresourceerror_filter() + async def shutdown(self) -> None: """Shutdown logging service. @@ -237,6 +247,91 @@ async def shutdown(self) -> None: self._subscribers.clear() logging.info("Logging service shutdown") + def _install_closedresourceerror_filter(self) -> None: + """Install a filter to drop benign ClosedResourceError logs from upstream MCP. + + The MCP streamable HTTP server logs an ERROR when the in-memory channel is + closed during normal client disconnects, raising ``anyio.ClosedResourceError``. + This filter suppresses those specific records to keep logs clean. + + Examples: + >>> # Initialize service (installs filter) + >>> import asyncio, logging, anyio + >>> service = LoggingService() + >>> asyncio.run(service.initialize()) + >>> # Locate the installed filter on the target logger + >>> target = logging.getLogger('mcp.server.streamable_http') + >>> flts = [f for f in target.filters if f.__class__.__name__.endswith('SuppressClosedResourceErrorFilter')] + >>> len(flts) >= 1 + True + >>> filt = flts[0] + >>> # Non-target logger should pass through even if message matches + >>> rec_other = logging.makeLogRecord({'name': 'other.logger', 'msg': 'ClosedResourceError'}) + >>> filt.filter(rec_other) + True + >>> # Target logger with message containing ClosedResourceError should be suppressed + >>> rec_target_msg = logging.makeLogRecord({'name': 'mcp.server.streamable_http', 'msg': 'ClosedResourceError in normal shutdown'}) + >>> filt.filter(rec_target_msg) + False + >>> # Target logger with ClosedResourceError in exc_info should be suppressed + >>> try: + ... raise anyio.ClosedResourceError + ... except anyio.ClosedResourceError as e: + ... rec_target_exc = logging.makeLogRecord({ + ... 'name': 'mcp.server.streamable_http', + ... 'msg': 'Error in message router', + ... 'exc_info': (e.__class__, e, None), + ... }) + >>> filt.filter(rec_target_exc) + False + >>> # Cleanup + >>> asyncio.run(service.shutdown()) + """ + + class _SuppressClosedResourceErrorFilter(logging.Filter): + """Filter to suppress ClosedResourceError exceptions from MCP streamable HTTP logger. + + This filter prevents noisy ClosedResourceError exceptions from the upstream + MCP streamable HTTP implementation from cluttering the logs. These errors + are typically harmless connection cleanup events. + """ + + def filter(self, record: logging.LogRecord) -> bool: # noqa: D401 + """Filter log records to suppress ClosedResourceError exceptions. + + Args: + record: The log record to evaluate + + Returns: + True to allow the record through, False to suppress it + """ + # Apply only to upstream MCP streamable HTTP logger + if not record.name.startswith("mcp.server.streamable_http"): + return True + + # If exception info is present, check its type + exc_info = getattr(record, "exc_info", None) + if exc_info and AnyioClosedResourceError is not None: + exc_type, exc, _tb = exc_info + try: + if isinstance(exc, AnyioClosedResourceError) or (getattr(exc_type, "__name__", "") == "ClosedResourceError"): + return False + except Exception: + # Be permissive if anything goes wrong, don't drop logs accidentally + return True + + # Fallback: drop if message text clearly indicates ClosedResourceError + try: + msg = record.getMessage() + if "ClosedResourceError" in msg: + return False + except Exception: + pass + return True + + target_logger = logging.getLogger("mcp.server.streamable_http") + target_logger.addFilter(_SuppressClosedResourceErrorFilter()) + def get_logger(self, name: str) -> logging.Logger: """Get or create logger instance. diff --git a/mcpgateway/services/permission_service.py b/mcpgateway/services/permission_service.py new file mode 100644 index 000000000..4e41fe22f --- /dev/null +++ b/mcpgateway/services/permission_service.py @@ -0,0 +1,503 @@ +# -*- coding: utf-8 -*- +"""Permission Service for RBAC System. + +This module provides the core permission checking logic for the RBAC system. +It handles role-based permission validation, permission auditing, and caching. +""" + +# Standard +from datetime import datetime +import logging +from typing import Dict, List, Optional, Set + +# Third-Party +from sqlalchemy import and_, or_, select +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import PermissionAuditLog, Permissions, Role, UserRole, utc_now + +logger = logging.getLogger(__name__) + + +class PermissionService: + """Service for checking and managing user permissions. + + Provides role-based permission checking with caching, auditing, + and support for global, team, and personal scopes. + + Attributes: + db: Database session + audit_enabled: Whether to log permission checks + cache_ttl: Permission cache TTL in seconds + + Examples: + Example usage:: + + service = PermissionService(db_session) + granted = await service.check_permission( + user_email="user@example.com", + permission="tools.create", + resource_type="tools" + ) + # granted -> True or False + """ + + def __init__(self, db: Session, audit_enabled: bool = True): + """Initialize permission service. + + Args: + db: Database session + audit_enabled: Whether to enable permission auditing + """ + self.db = db + self.audit_enabled = audit_enabled + self._permission_cache: Dict[str, Set[str]] = {} + self._cache_timestamps: Dict[str, datetime] = {} + self.cache_ttl = 300 # 5 minutes + + async def check_permission( + self, + user_email: str, + permission: str, + resource_type: Optional[str] = None, + resource_id: Optional[str] = None, + team_id: Optional[str] = None, + ip_address: Optional[str] = None, + user_agent: Optional[str] = None, + ) -> bool: + """Check if user has specific permission. + + Checks user's roles across all applicable scopes (global, team, personal) + and returns True if any role grants the required permission. + + Args: + user_email: Email of the user to check + permission: Permission to check (e.g., 'tools.create') + resource_type: Type of resource being accessed + resource_id: Specific resource ID if applicable + team_id: Team context for the permission check + ip_address: IP address for audit logging + user_agent: User agent for audit logging + + Returns: + bool: True if permission is granted, False otherwise + + Examples: + Check global permission:: + + service = PermissionService(db) + granted = await service.check_permission("user@example.com", "users.read") + # granted -> True + + Check team-scoped permission:: + + granted = await service.check_permission( + "user@example.com", + "teams.manage_members", + team_id="team-123" + ) + # granted -> False + """ + try: + # First check if user is admin (bypass all permission checks) + if await self._is_user_admin(user_email): + return True + + # Get user's effective permissions from roles + user_permissions = await self.get_user_permissions(user_email, team_id) + + # Check if user has the specific permission or wildcard + granted = permission in user_permissions or Permissions.ALL_PERMISSIONS in user_permissions + + # If no explicit permissions found, check fallback permissions for team operations + if not granted and permission.startswith("teams."): + granted = await self._check_team_fallback_permissions(user_email, permission, team_id) + + # Log the permission check if auditing is enabled + if self.audit_enabled: + await self._log_permission_check( + user_email=user_email, + permission=permission, + resource_type=resource_type, + resource_id=resource_id, + team_id=team_id, + granted=granted, + roles_checked=await self._get_roles_for_audit(user_email, team_id), + ip_address=ip_address, + user_agent=user_agent, + ) + + logger.debug(f"Permission check: user={user_email}, permission={permission}, " f"team={team_id}, granted={granted}") + + return granted + + except Exception as e: + logger.error(f"Error checking permission for {user_email}: {e}") + # Default to deny on error + return False + + async def get_user_permissions(self, user_email: str, team_id: Optional[str] = None) -> Set[str]: + """Get all effective permissions for a user. + + Collects permissions from all user's roles across applicable scopes. + Includes role inheritance and handles permission caching. + + Args: + user_email: Email of the user + team_id: Optional team context + + Returns: + Set[str]: All effective permissions for the user + + Examples: + Get user permissions:: + + service = PermissionService(db) + permissions = await service.get_user_permissions("admin@example.com") + # "admin.system_config" in permissions -> True + """ + # Check cache first + cache_key = f"{user_email}:{team_id or 'global'}" + if self._is_cache_valid(cache_key): + return self._permission_cache[cache_key] + + permissions = set() + + # Get all active roles for the user + user_roles = await self._get_user_roles(user_email, team_id) + + # Collect permissions from all roles + for user_role in user_roles: + role_permissions = user_role.role.get_effective_permissions() + permissions.update(role_permissions) + + # Cache the result + self._permission_cache[cache_key] = permissions + self._cache_timestamps[cache_key] = utc_now() + + return permissions + + async def get_user_roles(self, user_email: str, scope: Optional[str] = None, team_id: Optional[str] = None, include_expired: bool = False) -> List[UserRole]: + """Get user's role assignments. + + Args: + user_email: Email of the user + scope: Filter by scope ('global', 'team', 'personal') + team_id: Filter by team ID + include_expired: Whether to include expired roles + + Returns: + List[UserRole]: User's role assignments + + Examples: + Get user roles:: + + service = PermissionService(db) + roles = await service.get_user_roles("user@example.com", scope="team") + # len(roles) > 0 -> True + """ + query = select(UserRole).join(Role).where(and_(UserRole.user_email == user_email, UserRole.is_active.is_(True), Role.is_active.is_(True))) + + if scope: + query = query.where(UserRole.scope == scope) + + if team_id: + query = query.where(UserRole.scope_id == team_id) + + if not include_expired: + now = utc_now() + query = query.where((UserRole.expires_at.is_(None)) | (UserRole.expires_at > now)) + + result = self.db.execute(query) + return result.scalars().all() + + async def has_permission_on_resource(self, user_email: str, permission: str, resource_type: str, resource_id: str, team_id: Optional[str] = None) -> bool: + """Check if user has permission on a specific resource. + + This method can be extended to include resource-specific + permission logic (e.g., resource ownership, sharing rules). + + Args: + user_email: Email of the user + permission: Permission to check + resource_type: Type of resource + resource_id: Specific resource ID + team_id: Team context + + Returns: + bool: True if user has permission on the resource + + Examples: + Check resource permission:: + + service = PermissionService(db) + granted = await service.has_permission_on_resource( + "user@example.com", + "tools.delete", + "tools", + "tool-123" + ) + # granted -> True + """ + # Basic permission check + if not await self.check_permission(user_email=user_email, permission=permission, resource_type=resource_type, resource_id=resource_id, team_id=team_id): + return False + + # NOTE: Add resource-specific logic here in future enhancement + # For example: + # - Check resource ownership + # - Check resource sharing permissions + # - Check resource team membership + + return True + + async def check_admin_permission(self, user_email: str) -> bool: + """Check if user has any admin permissions. + + Args: + user_email: Email of the user + + Returns: + bool: True if user has admin permissions + + Examples: + Check admin permission:: + + service = PermissionService(db) + is_admin = await service.check_admin_permission("admin@example.com") + # is_admin -> True + """ + # First check if user is admin (handles platform admin virtual user) + if await self._is_user_admin(user_email): + return True + + admin_permissions = [Permissions.ADMIN_SYSTEM_CONFIG, Permissions.ADMIN_USER_MANAGEMENT, Permissions.ADMIN_SECURITY_AUDIT, Permissions.ALL_PERMISSIONS] + + user_permissions = await self.get_user_permissions(user_email) + return any(perm in user_permissions for perm in admin_permissions) + + def clear_user_cache(self, user_email: str) -> None: + """Clear cached permissions for a user. + + Should be called when user's roles change. + + Args: + user_email: Email of the user + + Examples: + Clear user cache:: + + service = PermissionService(db) + service.clear_user_cache("user@example.com") + """ + keys_to_remove = [key for key in self._permission_cache if key.startswith(f"{user_email}:")] + + for key in keys_to_remove: + self._permission_cache.pop(key, None) + self._cache_timestamps.pop(key, None) + + logger.debug(f"Cleared permission cache for user: {user_email}") + + def clear_cache(self) -> None: + """Clear all cached permissions. + + Examples: + Clear all cache:: + + service = PermissionService(db) + service.clear_cache() + """ + self._permission_cache.clear() + self._cache_timestamps.clear() + logger.debug("Cleared all permission cache") + + async def _get_user_roles(self, user_email: str, team_id: Optional[str] = None) -> List[UserRole]: + """Get user roles for permission checking. + + Includes global roles and team-specific roles if team_id is provided. + + Args: + user_email: Email address of the user + team_id: Optional team ID to include team-specific roles + + Returns: + List[UserRole]: List of active roles for the user + """ + query = select(UserRole).join(Role).where(and_(UserRole.user_email == user_email, UserRole.is_active.is_(True), Role.is_active.is_(True))) + + # Include global roles and team-specific roles + scope_conditions = [UserRole.scope == "global", UserRole.scope == "personal"] + + if team_id: + scope_conditions.append(and_(UserRole.scope == "team", UserRole.scope_id == team_id)) + + query = query.where(or_(*scope_conditions)) + + # Filter out expired roles + now = utc_now() + query = query.where((UserRole.expires_at.is_(None)) | (UserRole.expires_at > now)) + + result = self.db.execute(query) + return result.scalars().all() + + async def _log_permission_check( + self, + user_email: str, + permission: str, + resource_type: Optional[str], + resource_id: Optional[str], + team_id: Optional[str], + granted: bool, + roles_checked: Dict, + ip_address: Optional[str], + user_agent: Optional[str], + ) -> None: + """Log permission check for auditing. + + Args: + user_email: Email address of the user + permission: Permission being checked + resource_type: Type of resource being accessed + resource_id: ID of specific resource + team_id: ID of team context + granted: Whether permission was granted + roles_checked: Dictionary of roles that were checked + ip_address: IP address of request + user_agent: User agent of request + """ + audit_log = PermissionAuditLog( + user_email=user_email, + permission=permission, + resource_type=resource_type, + resource_id=resource_id, + team_id=team_id, + granted=granted, + roles_checked=roles_checked, + ip_address=ip_address, + user_agent=user_agent, + ) + + self.db.add(audit_log) + self.db.commit() + + async def _get_roles_for_audit(self, user_email: str, team_id: Optional[str]) -> Dict: + """Get role information for audit logging. + + Args: + user_email: Email address of the user + team_id: Optional team ID for context + + Returns: + Dict: Role information for audit logging + """ + user_roles = await self._get_user_roles(user_email, team_id) + return {"roles": [{"id": ur.role_id, "name": ur.role.name, "scope": ur.scope, "permissions": ur.role.permissions} for ur in user_roles]} + + def _is_cache_valid(self, cache_key: str) -> bool: + """Check if cached permissions are still valid. + + Args: + cache_key: Cache key to check validity for + + Returns: + bool: True if cache is valid, False otherwise + """ + if cache_key not in self._permission_cache: + return False + + if cache_key not in self._cache_timestamps: + return False + + age = utc_now() - self._cache_timestamps[cache_key] + return age.total_seconds() < self.cache_ttl + + async def _is_user_admin(self, user_email: str) -> bool: + """Check if user is admin by looking up user record directly. + + Args: + user_email: Email address of the user + + Returns: + bool: True if user is admin + """ + # First-Party + from mcpgateway.config import settings # pylint: disable=import-outside-toplevel + from mcpgateway.db import EmailUser # pylint: disable=import-outside-toplevel + + # Special case for platform admin (virtual user) + if user_email == getattr(settings, "platform_admin_email", ""): + return True + + user = self.db.execute(select(EmailUser).where(EmailUser.email == user_email)).scalar_one_or_none() + return bool(user and user.is_admin) + + async def _check_team_fallback_permissions(self, user_email: str, permission: str, team_id: Optional[str]) -> bool: + """Check fallback team permissions for users without explicit RBAC roles. + + This provides basic team management permissions for authenticated users on teams they belong to. + + Args: + user_email: Email address of the user + permission: Permission being checked + team_id: Team ID context + + Returns: + bool: True if user has fallback permission + """ + if not team_id: + # For global team operations, allow authenticated users to read their teams and create new teams + if permission in ["teams.create", "teams.read"]: + return True + return False + + # Check if user is a member of this team + if not await self._is_team_member(user_email, team_id): + return False + + # Get user's role in the team + user_role = await self._get_user_team_role(user_email, team_id) + + # Define fallback permissions based on team role + if user_role == "owner": + # Team owners get full permissions on their teams + return permission in ["teams.read", "teams.update", "teams.delete", "teams.manage_members", "teams.create"] + if user_role in ["member"]: + # Team members get basic read permissions + return permission in ["teams.read"] + + return False + + async def _is_team_member(self, user_email: str, team_id: str) -> bool: + """Check if user is a member of the specified team. + + Args: + user_email: Email address of the user + team_id: Team ID + + Returns: + bool: True if user is a team member + """ + # First-Party + from mcpgateway.db import EmailTeamMember # pylint: disable=import-outside-toplevel + + member = self.db.execute(select(EmailTeamMember).where(and_(EmailTeamMember.user_email == user_email, EmailTeamMember.team_id == team_id, EmailTeamMember.is_active))).scalar_one_or_none() + + return member is not None + + async def _get_user_team_role(self, user_email: str, team_id: str) -> Optional[str]: + """Get user's role in the specified team. + + Args: + user_email: Email address of the user + team_id: Team ID + + Returns: + Optional[str]: User's role in the team or None if not a member + """ + # First-Party + from mcpgateway.db import EmailTeamMember # pylint: disable=import-outside-toplevel + + member = self.db.execute(select(EmailTeamMember).where(and_(EmailTeamMember.user_email == user_email, EmailTeamMember.team_id == team_id, EmailTeamMember.is_active))).scalar_one_or_none() + + return member.role if member else None diff --git a/mcpgateway/services/personal_team_service.py b/mcpgateway/services/personal_team_service.py new file mode 100644 index 000000000..b5257dead --- /dev/null +++ b/mcpgateway/services/personal_team_service.py @@ -0,0 +1,245 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/personal_team_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Personal Team Service. +This module provides automatic personal team creation and management +for email-based user authentication system. + +Examples: + >>> from mcpgateway.services.personal_team_service import PersonalTeamService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = PersonalTeamService(db) + >>> # Service handles personal team creation automatically +""" + +# Standard +from typing import Optional + +# Third-Party +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import EmailTeam, EmailTeamMember, EmailUser, utc_now +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class PersonalTeamService: + """Service for managing personal teams. + + This service handles automatic creation of personal teams for users + and manages team membership for personal workspaces. + + Attributes: + db (Session): SQLAlchemy database session + + Examples: + >>> from mcpgateway.services.personal_team_service import PersonalTeamService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = PersonalTeamService(db) + >>> service.db is not None + True + """ + + def __init__(self, db: Session): + """Initialize the personal team service. + + Args: + db: SQLAlchemy database session + + Examples: + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = PersonalTeamService(db) + >>> isinstance(service.db, Session) + True + """ + self.db = db + + async def create_personal_team(self, user: EmailUser) -> EmailTeam: + """Create a personal team for a user. + + Args: + user: EmailUser instance for whom to create personal team + + Returns: + EmailTeam: The created personal team + + Raises: + ValueError: If user already has a personal team + Exception: If team creation fails + + Examples: + Personal team creation is handled automatically during user registration. + The team name is derived from the user's full name or email. + """ + try: + # Check if user already has a personal team + existing_team = self.db.query(EmailTeam).filter(EmailTeam.created_by == user.email, EmailTeam.is_personal.is_(True), EmailTeam.is_active.is_(True)).first() + + if existing_team: + logger.warning(f"User {user.email} already has a personal team: {existing_team.id}") + raise ValueError(f"User {user.email} already has a personal team") + + # Generate team name from user's display name + display_name = user.get_display_name() + team_name = f"{display_name}'s Team" + + # Create team slug from email to ensure uniqueness + email_slug = user.email.replace("@", "-").replace(".", "-").lower() + team_slug = f"personal-{email_slug}" + + # Create the personal team + team = EmailTeam( + name=team_name, + slug=team_slug, # Will be auto-generated by event listener if not set + description=f"Personal workspace for {user.email}", + created_by=user.email, + is_personal=True, + visibility="private", + is_active=True, + ) + + self.db.add(team) + self.db.flush() # Get the team ID + + # Add the user as the owner of their personal team + membership = EmailTeamMember(team_id=team.id, user_email=user.email, role="owner", joined_at=utc_now(), is_active=True) + + self.db.add(membership) + self.db.commit() + + logger.info(f"Created personal team '{team.name}' for user {user.email}") + return team + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to create personal team for {user.email}: {e}") + raise + + async def get_personal_team(self, user_email: str) -> Optional[EmailTeam]: + """Get the personal team for a user. + + Args: + user_email: Email address of the user + + Returns: + EmailTeam: The user's personal team or None if not found + + Examples: + Personal team retrieval for accessing user's private workspace. + """ + try: + team = self.db.query(EmailTeam).filter(EmailTeam.created_by == user_email, EmailTeam.is_personal.is_(True), EmailTeam.is_active.is_(True)).first() + + return team + + except Exception as e: + logger.error(f"Failed to get personal team for {user_email}: {e}") + return None + + async def ensure_personal_team(self, user: EmailUser) -> EmailTeam: + """Ensure a user has a personal team, creating one if needed. + + Args: + user: EmailUser instance + + Returns: + EmailTeam: The user's personal team (existing or newly created) + + Raises: + Exception: If team creation or retrieval fails + + Examples: + Used during user login or registration to ensure personal team exists. + """ + try: + # Try to get existing personal team + team = await self.get_personal_team(user.email) + + if team is None: + # Create personal team if it doesn't exist + logger.info(f"Creating missing personal team for user {user.email}") + team = await self.create_personal_team(user) + + return team + + except ValueError: + # User already has a team, get it + team = await self.get_personal_team(user.email) + if team is None: + raise Exception(f"Failed to get or create personal team for {user.email}") + return team + + def is_personal_team(self, team_id: str) -> bool: + """Check if a team is a personal team. + + Args: + team_id: ID of the team to check + + Returns: + bool: True if the team is a personal team, False otherwise + + Examples: + # service = PersonalTeamService(db) + # Check if team deletion should be prevented + # is_personal = service.is_personal_team("team-123") + """ + try: + team = self.db.query(EmailTeam).filter(EmailTeam.id == team_id, EmailTeam.is_active.is_(True)).first() + + return team is not None and team.is_personal + + except Exception as e: + logger.error(f"Failed to check if team {team_id} is personal: {e}") + return False + + async def delete_personal_team(self, team_id: str) -> bool: + """Delete a personal team (not allowed). + + Personal teams cannot be deleted, only deactivated. + + Args: + team_id: ID of the team to delete + + Returns: + bool: False (personal teams cannot be deleted) + + Raises: + ValueError: Always, as personal teams cannot be deleted + + Examples: + Personal teams are protected from deletion to maintain user workspaces. + """ + if self.is_personal_team(team_id): + raise ValueError("Personal teams cannot be deleted") + return False + + async def get_personal_team_owner(self, team_id: str) -> Optional[str]: + """Get the owner email of a personal team. + + Args: + team_id: ID of the personal team + + Returns: + str: Owner email address or None if not found + + Examples: + Used for access control and team management operations. + """ + try: + team = self.db.query(EmailTeam).filter(EmailTeam.id == team_id, EmailTeam.is_personal.is_(True), EmailTeam.is_active.is_(True)).first() + + return team.created_by if team else None + + except Exception as e: + logger.error(f"Failed to get personal team owner for {team_id}: {e}") + return None diff --git a/mcpgateway/services/prompt_service.py b/mcpgateway/services/prompt_service.py index 9e2bb6380..b7b775193 100644 --- a/mcpgateway/services/prompt_service.py +++ b/mcpgateway/services/prompt_service.py @@ -17,6 +17,7 @@ # Standard import asyncio from datetime import datetime, timezone +import os from string import Formatter import time from typing import Any, AsyncGenerator, Dict, List, Optional, Set @@ -121,7 +122,15 @@ def __init__(self) -> None: """ self._event_subscribers: List[asyncio.Queue] = [] self._jinja_env = Environment(autoescape=select_autoescape(["html", "xml"]), trim_blocks=True, lstrip_blocks=True) - self._plugin_manager: PluginManager | None = PluginManager() if settings.plugins_enabled else None + # Initialize plugin manager with env overrides for testability + env_flag = os.getenv("PLUGINS_ENABLED") + if env_flag is not None: + env_enabled = env_flag.strip().lower() in {"1", "true", "yes", "on"} + plugins_enabled = env_enabled + else: + plugins_enabled = settings.plugins_enabled + config_file = os.getenv("PLUGIN_CONFIG_FILE", getattr(settings, "plugin_config_file", "plugins/config.yaml")) + self._plugin_manager: PluginManager | None = PluginManager(config_file) if plugins_enabled else None async def initialize(self) -> None: """Initialize the service.""" @@ -250,6 +259,9 @@ async def register_prompt( created_user_agent: Optional[str] = None, import_batch_id: Optional[str] = None, federation_source: Optional[str] = None, + team_id: Optional[str] = None, + owner_email: Optional[str] = None, + visibility: str = "private", ) -> PromptRead: """Register a new prompt template. @@ -262,6 +274,9 @@ async def register_prompt( created_user_agent: User agent of creation request import_batch_id: UUID for bulk import operations federation_source: Source gateway for federated prompts + team_id (Optional[str]): Team ID to assign the prompt to. + owner_email (Optional[str]): Email of the user who owns this prompt. + visibility (str): Prompt visibility level (private, team, public). Returns: Created prompt information @@ -322,6 +337,10 @@ async def register_prompt( import_batch_id=import_batch_id, federation_source=federation_source, version=1, + # Team scoping fields - use schema values if provided, otherwise fallback to parameters + team_id=getattr(prompt, "team_id", None) or team_id, + owner_email=getattr(prompt, "owner_email", None) or owner_email or created_by, + visibility=getattr(prompt, "visibility", None) or visibility, ) # Add to DB @@ -396,6 +415,79 @@ async def list_prompts(self, db: Session, include_inactive: bool = False, cursor prompts = db.execute(query).scalars().all() return [PromptRead.model_validate(self._convert_db_prompt(p)) for p in prompts] + async def list_prompts_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[PromptRead]: + """ + List prompts user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting prompts + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive prompts + skip: Number of prompts to skip for pagination + limit: Maximum number of prompts to return + + Returns: + List[PromptRead]: Prompts the user has access to + """ + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + # Build query following existing patterns from list_prompts() + query = select(DbPrompt) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbPrompt.is_active) + + if team_id: + # Filter by specific team + query = query.where(DbPrompt.team_id == team_id) + + # Validate user has access to team + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + # Third-Party + from sqlalchemy import and_, or_ # pylint: disable=import-outside-toplevel + + access_conditions = [] + + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbPrompt.owner_email == user_email) + + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbPrompt.team_id.in_(team_ids), DbPrompt.visibility.in_(["team", "public"]))) + + # 3. Public resources (if visibility allows) + access_conditions.append(DbPrompt.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbPrompt.visibility == visibility) + + # Apply pagination following existing patterns + query = query.offset(skip).limit(limit) + + prompts = db.execute(query).scalars().all() + return [PromptRead.model_validate(self._convert_db_prompt(p)) for p in prompts] + async def list_server_prompts(self, db: Session, server_id: str, include_inactive: bool = False, cursor: Optional[str] = None) -> List[PromptRead]: """ Retrieve a list of prompt templates from the database. diff --git a/mcpgateway/services/resource_service.py b/mcpgateway/services/resource_service.py index fe5bd0bd0..9ad1460a7 100644 --- a/mcpgateway/services/resource_service.py +++ b/mcpgateway/services/resource_service.py @@ -113,13 +113,26 @@ def __init__(self) -> None: self._event_subscribers: Dict[str, List[asyncio.Queue]] = {} self._template_cache: Dict[str, ResourceTemplate] = {} - # Initialize plugin manager if plugins are enabled + # Initialize plugin manager if plugins are enabled in settings self._plugin_manager = None - if PLUGINS_AVAILABLE and os.getenv("PLUGINS_ENABLED", "false").lower() == "true": + if PLUGINS_AVAILABLE: try: - config_file = os.getenv("PLUGIN_CONFIG_FILE", "plugins/config.yaml") - self._plugin_manager = PluginManager(config_file) - logger.info(f"Plugin manager initialized for ResourceService with config: {config_file}") + # First-Party + from mcpgateway.config import settings # pylint: disable=import-outside-toplevel + + # Support env overrides for testability without reloading settings + env_flag = os.getenv("PLUGINS_ENABLED") + if env_flag is not None: + env_enabled = env_flag.strip().lower() in {"1", "true", "yes", "on"} + plugins_enabled = env_enabled + else: + plugins_enabled = settings.plugins_enabled + + config_file = os.getenv("PLUGIN_CONFIG_FILE", settings.plugin_config_file) + + if plugins_enabled: + self._plugin_manager = PluginManager(config_file) + logger.info(f"Plugin manager initialized for ResourceService with config: {config_file}") except Exception as e: logger.warning(f"Plugin manager initialization failed in ResourceService: {e}") self._plugin_manager = None @@ -229,6 +242,9 @@ async def register_resource( created_user_agent: Optional[str] = None, import_batch_id: Optional[str] = None, federation_source: Optional[str] = None, + team_id: Optional[str] = None, + owner_email: Optional[str] = None, + visibility: str = "private", ) -> ResourceRead: """Register a new resource. @@ -241,6 +257,9 @@ async def register_resource( created_user_agent: User agent of the creator import_batch_id: Optional batch ID for bulk imports federation_source: Optional source of the resource if federated + team_id (Optional[str]): Team ID to assign the resource to. + owner_email (Optional[str]): Email of the user who owns this resource. + visibility (str): Resource visibility level (private, team, public). Returns: Created resource information @@ -294,6 +313,10 @@ async def register_resource( import_batch_id=import_batch_id, federation_source=federation_source, version=1, + # Team scoping fields - use schema values if provided, otherwise fallback to parameters + team_id=getattr(resource, "team_id", None) or team_id, + owner_email=getattr(resource, "owner_email", None) or owner_email or created_by, + visibility=getattr(resource, "visibility", None) or visibility, ) # Add to DB @@ -361,6 +384,79 @@ async def list_resources(self, db: Session, include_inactive: bool = False, tags resources = db.execute(query).scalars().all() return [self._convert_resource_to_read(r) for r in resources] + async def list_resources_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[ResourceRead]: + """ + List resources user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting resources + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive resources + skip: Number of resources to skip for pagination + limit: Maximum number of resources to return + + Returns: + List[ResourceRead]: Resources the user has access to + """ + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + # Build query following existing patterns from list_resources() + query = select(DbResource) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbResource.is_active) + + if team_id: + # Filter by specific team + query = query.where(DbResource.team_id == team_id) + + # Validate user has access to team + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + # Third-Party + from sqlalchemy import and_, or_ # pylint: disable=import-outside-toplevel + + access_conditions = [] + + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbResource.owner_email == user_email) + + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbResource.team_id.in_(team_ids), DbResource.visibility.in_(["team", "public"]))) + + # 3. Public resources (if visibility allows) + access_conditions.append(DbResource.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbResource.visibility == visibility) + + # Apply pagination following existing patterns + query = query.offset(skip).limit(limit) + + resources = db.execute(query).scalars().all() + return [self._convert_resource_to_read(r) for r in resources] + async def list_server_resources(self, db: Session, server_id: str, include_inactive: bool = False) -> List[ResourceRead]: """ Retrieve a list of registered resources from the database. @@ -419,13 +515,14 @@ async def read_resource(self, db: Session, uri: str, request_id: Optional[str] = Examples: >>> from mcpgateway.services.resource_service import ResourceService >>> from unittest.mock import MagicMock + >>> from mcpgateway.models import ResourceContent >>> service = ResourceService() >>> db = MagicMock() >>> uri = 'http://example.com/resource.txt' >>> db.execute.return_value.scalar_one_or_none.return_value = MagicMock(content='test') >>> import asyncio >>> result = asyncio.run(service.read_resource(db, uri)) - >>> result == 'test' + >>> isinstance(result, ResourceContent) True """ start_time = time.monotonic() @@ -450,7 +547,8 @@ async def read_resource(self, db: Session, uri: str, request_id: Optional[str] = contexts = None # Call pre-fetch hooks if plugin manager is available - if self._plugin_manager and PLUGINS_AVAILABLE: + plugin_eligible = bool(self._plugin_manager and PLUGINS_AVAILABLE and ("://" in uri)) + if plugin_eligible: # Initialize plugin manager if needed # pylint: disable=protected-access if not self._plugin_manager._initialized: @@ -458,7 +556,18 @@ async def read_resource(self, db: Session, uri: str, request_id: Optional[str] = # pylint: enable=protected-access # Create plugin context - global_context = GlobalContext(request_id=request_id, user=user, server_id=server_id) + # Normalize user to an identifier string if provided + user_id = None + if user is not None: + if isinstance(user, dict) and "email" in user: + user_id = user.get("email") + elif isinstance(user, str): + user_id = user + else: + # Attempt to fallback to attribute access + user_id = getattr(user, "email", None) + + global_context = GlobalContext(request_id=request_id, user=user_id, server_id=server_id) # Create pre-fetch payload pre_payload = ResourcePreFetchPayload(uri=uri, metadata={}) @@ -505,7 +614,7 @@ async def read_resource(self, db: Session, uri: str, request_id: Optional[str] = content = resource.content # Call post-fetch hooks if plugin manager is available - if self._plugin_manager and PLUGINS_AVAILABLE: + if plugin_eligible: # Create post-fetch payload post_payload = ResourcePostFetchPayload(uri=original_uri, content=content) @@ -542,8 +651,26 @@ async def read_resource(self, db: Session, uri: str, request_id: Optional[str] = if content: span.set_attribute("content.size", len(str(content))) - # Return content - return content + # Return standardized content without breaking callers that expect passthrough + # Prefer returning first-class content models or objects with content-like attributes. + # ResourceContent and TextContent already imported at top level + + # If content is already a Pydantic content model, return as-is + if isinstance(content, (ResourceContent, TextContent)): + return content + + # If content is any object that quacks like content (e.g., MagicMock with .text/.blob), return as-is + if hasattr(content, "text") or hasattr(content, "blob"): + return content + + # Normalize primitive types to ResourceContent + if isinstance(content, bytes): + return ResourceContent(type="resource", uri=original_uri, blob=content) + if isinstance(content, str): + return ResourceContent(type="resource", uri=original_uri, text=content) + + # Fallback to stringified content + return ResourceContent(type="resource", uri=original_uri, text=str(content)) async def toggle_resource_status(self, db: Session, resource_id: int, activate: bool) -> ResourceRead: """ diff --git a/mcpgateway/services/role_service.py b/mcpgateway/services/role_service.py new file mode 100644 index 000000000..4136e538f --- /dev/null +++ b/mcpgateway/services/role_service.py @@ -0,0 +1,539 @@ +# -*- coding: utf-8 -*- +"""Role Management Service for RBAC System. + +This module provides CRUD operations for roles and user role assignments. +It handles role creation, assignment, revocation, and validation. +""" + +# Standard +from datetime import datetime +import logging +from typing import List, Optional + +# Third-Party +from sqlalchemy import and_, select +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.db import Permissions, Role, UserRole, utc_now + +logger = logging.getLogger(__name__) + + +class RoleService: + """Service for managing roles and role assignments. + + Provides comprehensive role management including creation, assignment, + revocation, and validation with support for role inheritance. + + Attributes: + Database session + + Examples: + Create a role:: + + service = RoleService(db_session) + role = await service.create_role( + name="team_admin", + description="Team administrator", + scope="team", + permissions=["teams.manage_members"], + created_by="admin@example.com" + ) + # role.name -> 'team_admin' + """ + + def __init__(self, db: Session): + """Initialize role service. + + Args: + db: Database session + """ + self.db = db + + async def create_role(self, name: str, description: str, scope: str, permissions: List[str], created_by: str, inherits_from: Optional[str] = None, is_system_role: bool = False) -> Role: + """Create a new role. + + Args: + name: Role name (must be unique within scope) + description: Role description + scope: Role scope ('global', 'team', 'personal') + permissions: List of permission strings + created_by: Email of user creating the role + inherits_from: ID of parent role for inheritance + is_system_role: Whether this is a system-defined role + + Returns: + Role: The created role + + Raises: + ValueError: If role name already exists or invalid parameters + + Examples: + service = RoleService(db) + role = await service.create_role( + ... name="developer", + ... description="Software developer role", + ... scope="team", + ... permissions=["tools.read", "tools.execute"], + ... created_by="admin@example.com" + ... ) + role.scope + 'team' + """ + # Validate scope + if scope not in ["global", "team", "personal"]: + raise ValueError(f"Invalid scope: {scope}") + + # Check for duplicate name within scope + existing = await self.get_role_by_name(name, scope) + if existing: + raise ValueError(f"Role '{name}' already exists in scope '{scope}'") + + # Validate permissions + valid_permissions = Permissions.get_all_permissions() + valid_permissions.append(Permissions.ALL_PERMISSIONS) # Allow wildcard + + invalid_perms = [p for p in permissions if p not in valid_permissions] + if invalid_perms: + raise ValueError(f"Invalid permissions: {invalid_perms}") + + # Validate inheritance + parent_role = None + if inherits_from: + parent_role = await self.get_role_by_id(inherits_from) + if not parent_role: + raise ValueError(f"Parent role not found: {inherits_from}") + + # Check for circular inheritance + if await self._would_create_cycle(inherits_from, None): + raise ValueError("Role inheritance would create a cycle") + + # Create the role + role = Role(name=name, description=description, scope=scope, permissions=permissions, created_by=created_by, inherits_from=inherits_from, is_system_role=is_system_role) + + self.db.add(role) + self.db.commit() + self.db.refresh(role) + + logger.info(f"Created role: {role.name} (scope: {role.scope}, id: {role.id})") + return role + + async def get_role_by_id(self, role_id: str) -> Optional[Role]: + """Get role by ID. + + Args: + role_id: Role ID to lookup + + Returns: + Optional[Role]: The role if found, None otherwise + + Examples: + service = RoleService(db) + role = await service.get_role_by_id("role-123") + role.name if role else None + 'admin' + """ + result = self.db.execute(select(Role).where(Role.id == role_id)) + return result.scalar_one_or_none() + + async def get_role_by_name(self, name: str, scope: str) -> Optional[Role]: + """Get role by name and scope. + + Args: + name: Role name + scope: Role scope + + Returns: + Optional[Role]: The role if found, None otherwise + + Examples: + service = RoleService(db) + role = await service.get_role_by_name("admin", "global") + role.scope if role else None + 'global' + """ + result = self.db.execute(select(Role).where(and_(Role.name == name, Role.scope == scope, Role.is_active.is_(True)))) + return result.scalar_one_or_none() + + async def list_roles(self, scope: Optional[str] = None, include_system: bool = True, include_inactive: bool = False) -> List[Role]: + """List roles with optional filtering. + + Args: + scope: Filter by scope ('global', 'team', 'personal') + include_system: Whether to include system roles + include_inactive: Whether to include inactive roles + + Returns: + List[Role]: List of matching roles + + Examples: + service = RoleService(db) + team_roles = await service.list_roles(scope="team") + len(team_roles) >= 0 + True + """ + query = select(Role) + + conditions = [] + + if scope: + conditions.append(Role.scope == scope) + + if not include_system: + conditions.append(Role.is_system_role.is_(False)) + + if not include_inactive: + conditions.append(Role.is_active.is_(True)) + + if conditions: + query = query.where(and_(*conditions)) + + query = query.order_by(Role.scope, Role.name) + + result = self.db.execute(query) + return result.scalars().all() + + async def update_role( + self, + role_id: str, + name: Optional[str] = None, + description: Optional[str] = None, + permissions: Optional[List[str]] = None, + inherits_from: Optional[str] = None, + is_active: Optional[bool] = None, + ) -> Optional[Role]: + """Update an existing role. + + Args: + role_id: ID of role to update + name: New role name + description: New role description + permissions: New permissions list + inherits_from: New parent role ID + is_active: New active status + + Returns: + Optional[Role]: Updated role or None if not found + + Raises: + ValueError: If update would create invalid state + + Examples: + service = RoleService(db) + role = await service.update_role( + ... role_id="role-123", + ... permissions=["tools.read", "tools.write"] + ... ) + "tools.write" in role.permissions if role else False + True + """ + role = await self.get_role_by_id(role_id) + if not role: + return None + + # Prevent modification of system roles + if role.is_system_role: + raise ValueError("Cannot modify system roles") + + # Validate new name if provided + if name and name != role.name: + existing = await self.get_role_by_name(name, role.scope) + if existing and existing.id != role_id: + raise ValueError(f"Role '{name}' already exists in scope '{role.scope}'") + role.name = name + + # Update description + if description is not None: + role.description = description + + # Validate and update permissions + if permissions is not None: + valid_permissions = Permissions.get_all_permissions() + valid_permissions.append(Permissions.ALL_PERMISSIONS) + + invalid_perms = [p for p in permissions if p not in valid_permissions] + if invalid_perms: + raise ValueError(f"Invalid permissions: {invalid_perms}") + + role.permissions = permissions + + # Validate and update inheritance + if inherits_from is not None: + if inherits_from != role.inherits_from: + if inherits_from: + parent_role = await self.get_role_by_id(inherits_from) + if not parent_role: + raise ValueError(f"Parent role not found: {inherits_from}") + + # Check for circular inheritance + if await self._would_create_cycle(inherits_from, role_id): + raise ValueError("Role inheritance would create a cycle") + + role.inherits_from = inherits_from + + # Update active status + if is_active is not None: + role.is_active = is_active + + # Update timestamp + role.updated_at = utc_now() + + self.db.commit() + self.db.refresh(role) + + logger.info(f"Updated role: {role.name} (id: {role.id})") + return role + + async def delete_role(self, role_id: str) -> bool: + """Delete a role. + + Soft deletes the role by setting is_active to False. + Also deactivates all user role assignments. + + Args: + role_id: ID of role to delete + + Returns: + bool: True if role was deleted, False if not found + + Raises: + ValueError: If trying to delete a system role + + Examples: + service = RoleService(db) + success = await service.delete_role("role-123") + success + True + """ + role = await self.get_role_by_id(role_id) + if not role: + return False + + if role.is_system_role: + raise ValueError("Cannot delete system roles") + + # Soft delete the role + role.is_active = False + role.updated_at = utc_now() + + # Deactivate all user assignments of this role + self.db.execute(select(UserRole).where(UserRole.role_id == role_id)).update({"is_active": False}) + + self.db.commit() + + logger.info(f"Deleted role: {role.name} (id: {role.id})") + return True + + async def assign_role_to_user(self, user_email: str, role_id: str, scope: str, scope_id: Optional[str], granted_by: str, expires_at: Optional[datetime] = None) -> UserRole: + """Assign a role to a user. + + Args: + user_email: Email of user to assign role to + role_id: ID of role to assign + scope: Scope of assignment ('global', 'team', 'personal') + scope_id: Team ID if team-scoped + granted_by: Email of user granting the role + expires_at: Optional expiration datetime + + Returns: + UserRole: The role assignment + + Raises: + ValueError: If invalid parameters or assignment already exists + + Examples: + service = RoleService(db) + user_role = await service.assign_role_to_user( + ... user_email="user@example.com", + ... role_id="role-123", + ... scope="team", + ... scope_id="team-456", + ... granted_by="admin@example.com" + ... ) + user_role.user_email + 'user@example.com' + """ + # Validate role exists and is active + role = await self.get_role_by_id(role_id) + if not role or not role.is_active: + raise ValueError(f"Role not found or inactive: {role_id}") + + # Validate scope consistency + if role.scope != scope: + raise ValueError(f"Role scope '{role.scope}' doesn't match assignment scope '{scope}'") + + # Validate scope_id requirements + if scope == "team" and not scope_id: + raise ValueError("scope_id required for team-scoped assignments") + if scope in ["global", "personal"] and scope_id: + raise ValueError(f"scope_id not allowed for {scope} assignments") + + # Check for existing active assignment + existing = await self.get_user_role_assignment(user_email, role_id, scope, scope_id) + if existing and existing.is_active and not existing.is_expired(): + raise ValueError("User already has this role assignment") + + # Create the assignment + user_role = UserRole(user_email=user_email, role_id=role_id, scope=scope, scope_id=scope_id, granted_by=granted_by, expires_at=expires_at) + + self.db.add(user_role) + self.db.commit() + self.db.refresh(user_role) + + logger.info(f"Assigned role {role.name} to {user_email} " f"(scope: {scope}, scope_id: {scope_id})") + return user_role + + async def revoke_role_from_user(self, user_email: str, role_id: str, scope: str, scope_id: Optional[str]) -> bool: + """Revoke a role from a user. + + Args: + user_email: Email of user + role_id: ID of role to revoke + scope: Scope of assignment + scope_id: Team ID if team-scoped + + Returns: + bool: True if role was revoked, False if not found + + Examples: + service = RoleService(db) + success = await service.revoke_role_from_user( + ... user_email="user@example.com", + ... role_id="role-123", + ... scope="team", + ... scope_id="team-456" + ... ) + success + True + """ + user_role = await self.get_user_role_assignment(user_email, role_id, scope, scope_id) + + if not user_role or not user_role.is_active: + return False + + user_role.is_active = False + self.db.commit() + + logger.info(f"Revoked role {role_id} from {user_email} " f"(scope: {scope}, scope_id: {scope_id})") + return True + + async def get_user_role_assignment(self, user_email: str, role_id: str, scope: str, scope_id: Optional[str]) -> Optional[UserRole]: + """Get a specific user role assignment. + + Args: + user_email: Email of user + role_id: ID of role + scope: Scope of assignment + scope_id: Team ID if team-scoped + + Returns: + Optional[UserRole]: The role assignment if found + + Examples: + service = RoleService(db) + user_role = await service.get_user_role_assignment( + ... "user@example.com", "role-123", "global", None + ... ) + user_role.scope if user_role else None + 'global' + """ + conditions = [UserRole.user_email == user_email, UserRole.role_id == role_id, UserRole.scope == scope] + + if scope_id: + conditions.append(UserRole.scope_id == scope_id) + else: + conditions.append(UserRole.scope_id.is_(None)) + + result = self.db.execute(select(UserRole).where(and_(*conditions))) + return result.scalar_one_or_none() + + async def list_user_roles(self, user_email: str, scope: Optional[str] = None, include_expired: bool = False) -> List[UserRole]: + """List all role assignments for a user. + + Args: + user_email: Email of user + scope: Filter by scope + include_expired: Whether to include expired roles + + Returns: + List[UserRole]: User's role assignments + + Examples: + service = RoleService(db) + roles = await service.list_user_roles("user@example.com") + len(roles) >= 0 + True + """ + query = select(UserRole).join(Role).where(and_(UserRole.user_email == user_email, UserRole.is_active.is_(True), Role.is_active.is_(True))) + + if scope: + query = query.where(UserRole.scope == scope) + + if not include_expired: + now = utc_now() + query = query.where((UserRole.expires_at.is_(None)) | (UserRole.expires_at > now)) + + query = query.order_by(UserRole.scope, Role.name) + + result = self.db.execute(query) + return result.scalars().all() + + async def list_role_assignments(self, role_id: str, scope: Optional[str] = None, include_expired: bool = False) -> List[UserRole]: + """List all user assignments for a role. + + Args: + role_id: ID of role + scope: Filter by scope + include_expired: Whether to include expired assignments + + Returns: + List[UserRole]: Role assignments + + Examples: + service = RoleService(db) + assignments = await service.list_role_assignments("role-123") + len(assignments) >= 0 + True + """ + query = select(UserRole).where(and_(UserRole.role_id == role_id, UserRole.is_active.is_(True))) + + if scope: + query = query.where(UserRole.scope == scope) + + if not include_expired: + now = utc_now() + query = query.where((UserRole.expires_at.is_(None)) | (UserRole.expires_at > now)) + + query = query.order_by(UserRole.user_email) + + result = self.db.execute(query) + return result.scalars().all() + + async def _would_create_cycle(self, parent_id: str, child_id: Optional[str]) -> bool: + """Check if setting parent_id as parent of child_id would create a cycle. + + Args: + parent_id: ID of the proposed parent role + child_id: ID of the proposed child role + + Returns: + True if setting this relationship would create a cycle, False otherwise + """ + if not child_id: + return False + + visited = set() + current = parent_id + + while current and current not in visited: + if current == child_id: + return True + + visited.add(current) + + # Get parent of current role + result = self.db.execute(select(Role.inherits_from).where(Role.id == current)) + current = result.scalar_one_or_none() + + return False diff --git a/mcpgateway/services/server_service.py b/mcpgateway/services/server_service.py index 492635352..9054dbd11 100644 --- a/mcpgateway/services/server_service.py +++ b/mcpgateway/services/server_service.py @@ -261,7 +261,9 @@ def _assemble_associated_items( "a2a_agents": a2a_agents or [], } - async def register_server(self, db: Session, server_in: ServerCreate) -> ServerRead: + async def register_server( + self, db: Session, server_in: ServerCreate, created_by: Optional[str] = None, team_id: Optional[str] = None, owner_email: Optional[str] = None, visibility: str = "private" + ) -> ServerRead: """ Register a new server in the catalog and validate that all associated items exist. @@ -279,6 +281,10 @@ async def register_server(self, db: Session, server_in: ServerCreate) -> ServerR db (Session): The SQLAlchemy database session. server_in (ServerCreate): The server creation schema containing server details and lists of associated tool, resource, and prompt IDs (as strings). + created_by (Optional[str]): Email of the user creating the server, used for ownership tracking. + team_id (Optional[str]): Team ID to assign the server to. + owner_email (Optional[str]): Email of the user who owns this server. + visibility (str): Server visibility level (private, team, public). Returns: ServerRead: The newly created server, with associated item IDs. @@ -314,6 +320,10 @@ async def register_server(self, db: Session, server_in: ServerCreate) -> ServerR icon=server_in.icon, is_active=True, tags=server_in.tags or [], + # Team scoping fields - use schema values if provided, otherwise fallback to parameters + team_id=getattr(server_in, "team_id", None) or team_id, + owner_email=getattr(server_in, "owner_email", None) or owner_email or created_by, + visibility=getattr(server_in, "visibility", None) or visibility, ) # Set custom UUID if provided @@ -438,6 +448,79 @@ async def list_servers(self, db: Session, include_inactive: bool = False, tags: servers = db.execute(query).scalars().all() return [self._convert_server_to_read(s) for s in servers] + async def list_servers_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[ServerRead]: + """ + List servers user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting servers + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive servers + skip: Number of servers to skip for pagination + limit: Maximum number of servers to return + + Returns: + List[ServerRead]: Servers the user has access to + """ + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + # Build query following existing patterns from list_servers() + query = select(DbServer) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbServer.is_active) + + if team_id: + # Filter by specific team + query = query.where(DbServer.team_id == team_id) + + # Validate user has access to team + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + # Third-Party + from sqlalchemy import and_, or_ # pylint: disable=import-outside-toplevel + + access_conditions = [] + + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbServer.owner_email == user_email) + + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbServer.team_id.in_(team_ids), DbServer.visibility.in_(["team", "public"]))) + + # 3. Public resources (if visibility allows) + access_conditions.append(DbServer.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbServer.visibility == visibility) + + # Apply pagination following existing patterns + query = query.offset(skip).limit(limit) + + servers = db.execute(query).scalars().all() + return [self._convert_server_to_read(s) for s in servers] + async def get_server(self, db: Session, server_id: str) -> ServerRead: """Retrieve server details by ID. diff --git a/mcpgateway/services/sso_service.py b/mcpgateway/services/sso_service.py new file mode 100644 index 000000000..f7026e1fd --- /dev/null +++ b/mcpgateway/services/sso_service.py @@ -0,0 +1,656 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/sso_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Single Sign-On (SSO) authentication service for OAuth2 and OIDC providers. +Handles provider management, OAuth flows, and user authentication. +""" + +# Future +from __future__ import annotations + +# Standard +import base64 +from datetime import timedelta +import hashlib +import logging +import secrets +import string +from typing import Any, Dict, List, Optional, Tuple +import urllib.parse + +# Third-Party +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC +import httpx +from sqlalchemy import and_, select +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import PendingUserApproval, SSOAuthSession, SSOProvider, utc_now +from mcpgateway.services.email_auth_service import EmailAuthService +from mcpgateway.utils.create_jwt_token import create_jwt_token + +# Logger +logger = logging.getLogger(__name__) + + +class SSOService: + """Service for managing SSO authentication flows and providers. + + Handles OAuth2/OIDC authentication flows, provider configuration, + and integration with the local user system. + + Examples: + >>> # sso_service = SSOService(db_session) + >>> # providers = sso_service.list_enabled_providers() + >>> # len(providers) >= 0 + >>> # auth_url = sso_service.get_authorization_url("github", "https://app.com/callback") + >>> # "github.com" in auth_url or auth_url is None # None if provider not configured + >>> True + True + """ + + def __init__(self, db: Session): + """Initialize SSO service with database session. + + Args: + db: SQLAlchemy database session + """ + self.db = db + self.auth_service = EmailAuthService(db) + self._encryption_key = self._get_or_create_encryption_key() + + def _get_or_create_encryption_key(self) -> bytes: + """Get or create encryption key for client secrets. + + Returns: + Encryption key bytes + """ + # Use the same encryption secret as the auth service + key = settings.auth_encryption_secret + if not key: + # Generate a new key - in production, this should be persisted + key = Fernet.generate_key() + # Derive a proper Fernet key from the secret + + if isinstance(key, str): + key = key.encode() + + # Derive a 32-byte key using PBKDF2 + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=b"sso_salt", # Static salt for consistency + iterations=100000, + ) + derived_key = base64.urlsafe_b64encode(kdf.derive(key)) + return derived_key + + def _encrypt_secret(self, secret: str) -> str: + """Encrypt a client secret for secure storage. + + Args: + secret: Plain text client secret + + Returns: + Encrypted secret string + """ + fernet = Fernet(self._encryption_key) + return fernet.encrypt(secret.encode()).decode() + + def _decrypt_secret(self, encrypted_secret: str) -> str: + """Decrypt a client secret for use. + + Args: + encrypted_secret: Encrypted secret string + + Returns: + Plain text client secret + """ + fernet = Fernet(self._encryption_key) + return fernet.decrypt(encrypted_secret.encode()).decode() + + def list_enabled_providers(self) -> List[SSOProvider]: + """Get list of enabled SSO providers. + + Returns: + List of enabled SSO providers + + Examples: + # >>> service = SSOService(db_session) + # >>> providers = service.list_enabled_providers() + # >>> all(p.is_enabled for p in providers) + # True + """ + stmt = select(SSOProvider).where(SSOProvider.is_enabled.is_(True)) + result = self.db.execute(stmt) + return list(result.scalars().all()) + + def get_provider(self, provider_id: str) -> Optional[SSOProvider]: + """Get SSO provider by ID. + + Args: + provider_id: Provider identifier (e.g., 'github', 'google') + + Returns: + SSO provider or None if not found + """ + stmt = select(SSOProvider).where(SSOProvider.id == provider_id) + result = self.db.execute(stmt) + return result.scalar_one_or_none() + + def get_provider_by_name(self, provider_name: str) -> Optional[SSOProvider]: + """Get SSO provider by name. + + Args: + provider_name: Provider name (e.g., 'github', 'google') + + Returns: + SSO provider or None if not found + """ + stmt = select(SSOProvider).where(SSOProvider.name == provider_name) + result = self.db.execute(stmt) + return result.scalar_one_or_none() + + def create_provider(self, provider_data: Dict[str, Any]) -> SSOProvider: + """Create new SSO provider configuration. + + Args: + provider_data: Provider configuration data + + Returns: + Created SSO provider + + Examples: + # >>> service = SSOService(db_session) + # >>> data = { + # ... "id": "github", + # ... "name": "github", + # ... "display_name": "GitHub", + # ... "provider_type": "oauth2", + # ... "client_id": "client_123", + # ... "client_secret": "secret_456", + # ... "authorization_url": "https://github.com/login/oauth/authorize", + # ... "token_url": "https://github.com/login/oauth/access_token", + # ... "userinfo_url": "https://api.github.com/user" + # ... } + # >>> provider = service.create_provider(data) + # >>> provider.id == "github" + # True + """ + # Encrypt client secret + client_secret = provider_data.pop("client_secret") + provider_data["client_secret_encrypted"] = self._encrypt_secret(client_secret) + + provider = SSOProvider(**provider_data) + self.db.add(provider) + self.db.commit() + self.db.refresh(provider) + return provider + + def update_provider(self, provider_id: str, provider_data: Dict[str, Any]) -> Optional[SSOProvider]: + """Update existing SSO provider configuration. + + Args: + provider_id: Provider identifier + provider_data: Updated provider data + + Returns: + Updated SSO provider or None if not found + """ + provider = self.get_provider(provider_id) + if not provider: + return None + + # Handle client secret encryption if provided + if "client_secret" in provider_data: + client_secret = provider_data.pop("client_secret") + provider_data["client_secret_encrypted"] = self._encrypt_secret(client_secret) + + for key, value in provider_data.items(): + if hasattr(provider, key): + setattr(provider, key, value) + + provider.updated_at = utc_now() + self.db.commit() + self.db.refresh(provider) + return provider + + def delete_provider(self, provider_id: str) -> bool: + """Delete SSO provider configuration. + + Args: + provider_id: Provider identifier + + Returns: + True if deleted, False if not found + """ + provider = self.get_provider(provider_id) + if not provider: + return False + + self.db.delete(provider) + self.db.commit() + return True + + def generate_pkce_challenge(self) -> Tuple[str, str]: + """Generate PKCE code verifier and challenge for OAuth 2.1. + + Returns: + Tuple of (code_verifier, code_challenge) + + Examples: + # >>> service = SSOService(db_session) + # >>> verifier, challenge = service.generate_pkce_challenge() + # >>> len(verifier) >= 43 + # True + # >>> len(challenge) == 43 # Base64URL encoded SHA256 hash + # True + """ + # Generate cryptographically random code verifier + code_verifier = base64.urlsafe_b64encode(secrets.token_bytes(32)).decode("utf-8").rstrip("=") + + # Generate code challenge using SHA256 + code_challenge = base64.urlsafe_b64encode(hashlib.sha256(code_verifier.encode("utf-8")).digest()).decode("utf-8").rstrip("=") + + return code_verifier, code_challenge + + def get_authorization_url(self, provider_id: str, redirect_uri: str, scopes: Optional[List[str]] = None) -> Optional[str]: + """Generate OAuth authorization URL for provider. + + Args: + provider_id: Provider identifier + redirect_uri: Callback URI after authorization + scopes: Optional custom scopes (uses provider default if None) + + Returns: + Authorization URL or None if provider not found + + Examples: + # >>> service = SSOService(db_session) + # >>> url = service.get_authorization_url("github", "https://app.com/callback") + # >>> url is None or "github.com" in url + # True + """ + provider = self.get_provider(provider_id) + if not provider or not provider.is_enabled: + return None + + # Generate PKCE parameters + code_verifier, code_challenge = self.generate_pkce_challenge() + + # Generate CSRF state + state = secrets.token_urlsafe(32) + + # Generate OIDC nonce if applicable + nonce = secrets.token_urlsafe(16) if provider.provider_type == "oidc" else None + + # Create auth session + auth_session = SSOAuthSession(provider_id=provider_id, state=state, code_verifier=code_verifier, nonce=nonce, redirect_uri=redirect_uri) + self.db.add(auth_session) + self.db.commit() + + # Build authorization URL + params = { + "client_id": provider.client_id, + "response_type": "code", + "redirect_uri": redirect_uri, + "state": state, + "scope": " ".join(scopes) if scopes else provider.scope, + "code_challenge": code_challenge, + "code_challenge_method": "S256", + } + + if nonce: + params["nonce"] = nonce + + return f"{provider.authorization_url}?{urllib.parse.urlencode(params)}" + + async def handle_oauth_callback(self, provider_id: str, code: str, state: str) -> Optional[Dict[str, Any]]: + """Handle OAuth callback and exchange code for tokens. + + Args: + provider_id: Provider identifier + code: Authorization code from callback + state: CSRF state parameter + + Returns: + User info dict or None if authentication failed + + Examples: + # >>> service = SSOService(db_session) + # >>> import asyncio + # >>> result = asyncio.run(service.handle_oauth_callback("github", "code123", "state456")) + # >>> result is None or "email" in result + # True + """ + # Validate auth session + stmt = select(SSOAuthSession).where(SSOAuthSession.state == state, SSOAuthSession.provider_id == provider_id) + auth_session = self.db.execute(stmt).scalar_one_or_none() + + if not auth_session or auth_session.is_expired: + return None + + provider = auth_session.provider + if not provider or not provider.is_enabled: + return None + + try: + # Exchange authorization code for tokens + logger.info(f"Starting token exchange for provider {provider_id}") + token_data = await self._exchange_code_for_tokens(provider, auth_session, code) + if not token_data: + logger.error(f"Failed to exchange code for tokens for provider {provider_id}") + return None + logger.info(f"Token exchange successful for provider {provider_id}") + + # Get user info from provider + user_info = await self._get_user_info(provider, token_data["access_token"]) + if not user_info: + logger.error(f"Failed to get user info for provider {provider_id}") + return None + + # Clean up auth session + self.db.delete(auth_session) + self.db.commit() + + return user_info + + except Exception as e: + # Clean up auth session on error + logger.error(f"OAuth callback failed for provider {provider_id}: {type(e).__name__}: {str(e)}") + logger.exception("Full traceback for OAuth callback failure:") + self.db.delete(auth_session) + self.db.commit() + return None + + async def _exchange_code_for_tokens(self, provider: SSOProvider, auth_session: SSOAuthSession, code: str) -> Optional[Dict[str, Any]]: + """Exchange authorization code for access tokens. + + Args: + provider: SSO provider configuration + auth_session: Auth session with PKCE parameters + code: Authorization code + + Returns: + Token response dict or None if failed + """ + token_params = { + "client_id": provider.client_id, + "client_secret": self._decrypt_secret(provider.client_secret_encrypted), + "code": code, + "grant_type": "authorization_code", + "redirect_uri": auth_session.redirect_uri, + "code_verifier": auth_session.code_verifier, + } + + async with httpx.AsyncClient() as client: + response = await client.post(provider.token_url, data=token_params, headers={"Accept": "application/json"}) + + if response.status_code == 200: + return response.json() + logger.error(f"Token exchange failed for {provider.name}: HTTP {response.status_code} - {response.text}") + + return None + + async def _get_user_info(self, provider: SSOProvider, access_token: str) -> Optional[Dict[str, Any]]: + """Get user information from provider using access token. + + Args: + provider: SSO provider configuration + access_token: OAuth access token + + Returns: + User info dict or None if failed + """ + async with httpx.AsyncClient() as client: + response = await client.get(provider.userinfo_url, headers={"Authorization": f"Bearer {access_token}"}) + + if response.status_code == 200: + user_data = response.json() + + # For GitHub, also fetch organizations if admin assignment is configured + if provider.id == "github" and settings.sso_github_admin_orgs: + try: + orgs_response = await client.get("https://api.github.com/user/orgs", headers={"Authorization": f"Bearer {access_token}"}) + if orgs_response.status_code == 200: + orgs_data = orgs_response.json() + user_data["organizations"] = [org["login"] for org in orgs_data] + else: + logger.warning(f"Failed to fetch GitHub organizations: HTTP {orgs_response.status_code}") + user_data["organizations"] = [] + except Exception as e: + logger.warning(f"Error fetching GitHub organizations: {e}") + user_data["organizations"] = [] + + # Normalize user info across providers + return self._normalize_user_info(provider, user_data) + logger.error(f"User info request failed for {provider.name}: HTTP {response.status_code} - {response.text}") + + return None + + def _normalize_user_info(self, provider: SSOProvider, user_data: Dict[str, Any]) -> Dict[str, Any]: + """Normalize user info from different providers to common format. + + Args: + provider: SSO provider configuration + user_data: Raw user data from provider + + Returns: + Normalized user info dict + """ + # Handle GitHub provider + if provider.id == "github": + return { + "email": user_data.get("email"), + "full_name": user_data.get("name") or user_data.get("login"), + "avatar_url": user_data.get("avatar_url"), + "provider_id": user_data.get("id"), + "username": user_data.get("login"), + "provider": "github", + "organizations": user_data.get("organizations", []), + } + + # Handle Google provider + if provider.id == "google": + return { + "email": user_data.get("email"), + "full_name": user_data.get("name"), + "avatar_url": user_data.get("picture"), + "provider_id": user_data.get("sub"), + "username": user_data.get("email", "").split("@")[0], + "provider": "google", + } + + # Handle IBM Verify provider + if provider.id == "ibm_verify": + return { + "email": user_data.get("email"), + "full_name": user_data.get("name"), + "avatar_url": user_data.get("picture"), + "provider_id": user_data.get("sub"), + "username": user_data.get("preferred_username") or user_data.get("email", "").split("@")[0], + "provider": "ibm_verify", + } + + # Handle Okta provider + if provider.id == "okta": + return { + "email": user_data.get("email"), + "full_name": user_data.get("name"), + "avatar_url": user_data.get("picture"), + "provider_id": user_data.get("sub"), + "username": user_data.get("preferred_username") or user_data.get("email", "").split("@")[0], + "provider": "okta", + } + + # Generic OIDC format for all other providers + return { + "email": user_data.get("email"), + "full_name": user_data.get("name"), + "avatar_url": user_data.get("picture"), + "provider_id": user_data.get("sub"), + "username": user_data.get("preferred_username") or user_data.get("email", "").split("@")[0], + "provider": provider.id, + } + + async def authenticate_or_create_user(self, user_info: Dict[str, Any]) -> Optional[str]: + """Authenticate existing user or create new user from SSO info. + + Args: + user_info: Normalized user info from SSO provider + + Returns: + JWT token for authenticated user or None if failed + """ + email = user_info.get("email") + if not email: + return None + + # Check if user exists + user = await self.auth_service.get_user_by_email(email) + + if user: + # Update user info from SSO + if user_info.get("full_name") and user_info["full_name"] != user.full_name: + user.full_name = user_info["full_name"] + + # Update auth provider if changed + if user.auth_provider == "local" or user.auth_provider != user_info.get("provider"): + user.auth_provider = user_info.get("provider", "sso") + + # Mark email as verified for SSO users + user.email_verified = True + user.last_login = utc_now() + + self.db.commit() + else: + # Auto-create user if enabled + provider = self.get_provider(user_info.get("provider")) + if not provider or not provider.auto_create_users: + return None + + # Check trusted domains if configured + if provider.trusted_domains: + domain = email.split("@")[1].lower() + if domain not in [d.lower() for d in provider.trusted_domains]: + return None + + # Check if admin approval is required + if settings.sso_require_admin_approval: + # Check if user is already pending approval + + pending = self.db.execute(select(PendingUserApproval).where(PendingUserApproval.email == email)).scalar_one_or_none() + + if pending: + if pending.status == "pending" and not pending.is_expired(): + return None # Still waiting for approval + if pending.status == "rejected": + return None # User was rejected + if pending.status == "approved": + # User was approved, create account now + pass # Continue with user creation below + else: + # Create pending approval request + + pending = PendingUserApproval( + email=email, + full_name=user_info.get("full_name", email), + auth_provider=user_info.get("provider", "sso"), + sso_metadata=user_info, + expires_at=utc_now() + timedelta(days=30), # 30-day approval window + ) + self.db.add(pending) + self.db.commit() + logger.info(f"Created pending approval request for SSO user: {email}") + return None # No token until approved + + # Create new user (either no approval required, or approval already granted) + # Generate a secure random password for SSO users (they won't use it) + + random_password = "".join(secrets.choice(string.ascii_letters + string.digits + string.punctuation) for _ in range(32)) + + # Determine if user should be admin based on domain/organization + is_admin = self._should_user_be_admin(email, user_info, provider) + + user = await self.auth_service.create_user( + email=email, + password=random_password, # Random password for SSO users (not used) + full_name=user_info.get("full_name", email), + is_admin=is_admin, + auth_provider=user_info.get("provider", "sso"), + ) + if not user: + return None + + # If user was created from approved request, mark request as used + if settings.sso_require_admin_approval: + + pending = self.db.execute(select(PendingUserApproval).where(and_(PendingUserApproval.email == email, PendingUserApproval.status == "approved"))).scalar_one_or_none() + if pending: + # Mark as used (we could delete or keep for audit trail) + pending.status = "completed" + self.db.commit() + + # Generate JWT token for user + token_data = { + "sub": user.email, + "email": user.email, + "full_name": user.full_name, + "auth_provider": user.auth_provider, + "iat": int(utc_now().timestamp()), + "user": {"email": user.email, "full_name": user.full_name, "is_admin": user.is_admin, "auth_provider": user.auth_provider}, + } + + # Add user teams to token + teams = user.get_teams() + token_data["teams"] = [{"id": team.id, "name": team.name, "slug": team.slug, "is_personal": team.is_personal, "role": user.get_team_role(team.id)} for team in teams] + + # Add namespaces for RBAC + namespaces = [f"user:{user.email}"] + namespaces.extend([f"team:{team['slug']}" for team in token_data["teams"]]) + namespaces.append("public") + token_data["namespaces"] = namespaces + + # Add scopes + token_data["scopes"] = {"server_id": None, "permissions": ["*"] if user.is_admin else [], "ip_restrictions": [], "time_restrictions": {}} + + # Create JWT token + token = await create_jwt_token(token_data, expires_in_minutes=settings.token_expiry) + return token + + def _should_user_be_admin(self, email: str, user_info: Dict[str, Any], provider: SSOProvider) -> bool: + """Determine if SSO user should be granted admin privileges. + + Args: + email: User's email address + user_info: Normalized user info from SSO provider + provider: SSO provider configuration + + Returns: + True if user should be admin, False otherwise + """ + # Check domain-based admin assignment + domain = email.split("@")[1].lower() + if domain in [d.lower() for d in settings.sso_auto_admin_domains]: + return True + + # Check provider-specific admin assignment + if provider.id == "github" and settings.sso_github_admin_orgs: + # For GitHub, we'd need to fetch user's organizations + # This is a placeholder - in production, you'd make API calls to get orgs + github_orgs = user_info.get("organizations", []) + if any(org.lower() in [o.lower() for o in settings.sso_github_admin_orgs] for org in github_orgs): + return True + + if provider.id == "google" and settings.sso_google_admin_domains: + # Check if user's domain is in admin domains + if domain in [d.lower() for d in settings.sso_google_admin_domains]: + return True + + return False diff --git a/mcpgateway/services/team_invitation_service.py b/mcpgateway/services/team_invitation_service.py new file mode 100644 index 000000000..678c5976d --- /dev/null +++ b/mcpgateway/services/team_invitation_service.py @@ -0,0 +1,446 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/team_invitation_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Team Invitation Service. +This module provides team invitation creation, management, and acceptance +for the multi-team collaboration system. + +Examples: + >>> from mcpgateway.services.team_invitation_service import TeamInvitationService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamInvitationService(db) + >>> # Service handles team invitation lifecycle +""" + +# Standard +from datetime import timedelta +import secrets +from typing import List, Optional + +# Third-Party +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import EmailTeam, EmailTeamInvitation, EmailTeamMember, EmailUser, utc_now +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class TeamInvitationService: + """Service for team invitation management. + + This service handles invitation creation, validation, acceptance, + and cleanup for team membership management. + + Attributes: + db (Session): SQLAlchemy database session + + Examples: + >>> from mcpgateway.services.team_invitation_service import TeamInvitationService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamInvitationService(db) + >>> service.db is not None + True + """ + + def __init__(self, db: Session): + """Initialize the team invitation service. + + Args: + db: SQLAlchemy database session + + Examples: + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamInvitationService(db) + >>> isinstance(service.db, Session) + True + """ + self.db = db + + def _generate_invitation_token(self) -> str: + """Generate a secure invitation token. + + Returns: + str: A cryptographically secure random token + + Examples: + Tokens are used for secure invitation acceptance. + """ + return secrets.token_urlsafe(32) + + async def create_invitation(self, team_id: str, email: str, role: str, invited_by: str, expiry_days: Optional[int] = None) -> Optional[EmailTeamInvitation]: + """Create a team invitation. + + Args: + team_id: ID of the team + email: Email address to invite + role: Role to assign (owner, member) + invited_by: Email of user sending the invitation + expiry_days: Days until invitation expires (default from settings) + + Returns: + EmailTeamInvitation: The created invitation or None if failed + + Raises: + ValueError: If invitation parameters are invalid + Exception: If invitation creation fails + + Examples: + Team owners can send invitations to new members. + """ + try: + # Validate role + valid_roles = ["owner", "member"] + if role not in valid_roles: + raise ValueError(f"Invalid role. Must be one of: {', '.join(valid_roles)}") + + # Check if team exists + team = self.db.query(EmailTeam).filter(EmailTeam.id == team_id, EmailTeam.is_active.is_(True)).first() + + if not team: + logger.warning(f"Team {team_id} not found") + return None + + # Prevent invitations to personal teams + if team.is_personal: + logger.warning(f"Cannot send invitations to personal team {team_id}") + raise ValueError("Cannot send invitations to personal teams") + + # Check if inviter exists and is a team member + inviter = self.db.query(EmailUser).filter(EmailUser.email == invited_by).first() + if not inviter: + logger.warning(f"Inviter {invited_by} not found") + return None + + # Check if inviter is a member of the team with appropriate permissions + inviter_membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == invited_by, EmailTeamMember.is_active.is_(True)).first() + + if not inviter_membership: + logger.warning(f"Inviter {invited_by} is not a member of team {team_id}") + raise ValueError("Only team members can send invitations") + + # Only owners can send invitations + if inviter_membership.role != "owner": + logger.warning(f"User {invited_by} does not have permission to invite to team {team_id}") + raise ValueError("Only team owners can send invitations") + + # Check if user is already a team member + existing_member = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == email, EmailTeamMember.is_active.is_(True)).first() + + if existing_member: + logger.warning(f"User {email} is already a member of team {team_id}") + raise ValueError(f"User {email} is already a member of this team") + + # Check for existing active invitations + existing_invitation = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.team_id == team_id, EmailTeamInvitation.email == email, EmailTeamInvitation.is_active.is_(True)).first() + + if existing_invitation and not existing_invitation.is_expired(): + logger.warning(f"Active invitation already exists for {email} to team {team_id}") + raise ValueError(f"An active invitation already exists for {email}") + + # Check team member limit + if team.max_members: + current_member_count = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.is_active.is_(True)).count() + + pending_invitation_count = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.team_id == team_id, EmailTeamInvitation.is_active.is_(True)).count() + + if (current_member_count + pending_invitation_count) >= team.max_members: + logger.warning(f"Team {team_id} has reached maximum member limit") + raise ValueError(f"Team has reached maximum member limit of {team.max_members}") + + # Deactivate any existing invitations for this email/team combination + if existing_invitation: + existing_invitation.is_active = False + + # Set expiry + if expiry_days is None: + expiry_days = getattr(settings, "invitation_expiry_days", 7) + expires_at = utc_now() + timedelta(days=expiry_days) + + # Create the invitation + invitation = EmailTeamInvitation( + team_id=team_id, email=email, role=role, invited_by=invited_by, invited_at=utc_now(), expires_at=expires_at, token=self._generate_invitation_token(), is_active=True + ) + + self.db.add(invitation) + self.db.commit() + + logger.info(f"Created invitation for {email} to team {team_id} by {invited_by}") + return invitation + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to create invitation for {email} to team {team_id}: {e}") + raise + + async def get_invitation_by_token(self, token: str) -> Optional[EmailTeamInvitation]: + """Get an invitation by its token. + + Args: + token: The invitation token + + Returns: + EmailTeamInvitation: The invitation or None if not found + + Examples: + Used for invitation acceptance and validation. + """ + try: + invitation = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.token == token).first() + + return invitation + + except Exception as e: + logger.error(f"Failed to get invitation by token: {e}") + return None + + async def accept_invitation(self, token: str, accepting_user_email: Optional[str] = None) -> bool: + """Accept a team invitation. + + Args: + token: The invitation token + accepting_user_email: Email of user accepting (for validation) + + Returns: + bool: True if invitation was accepted successfully, False otherwise + + Raises: + ValueError: If invitation is invalid or expired + Exception: If acceptance fails + + Examples: + Users can accept invitations to join teams. + """ + try: + # Get the invitation + invitation = await self.get_invitation_by_token(token) + if not invitation: + logger.warning("Invitation not found for token") + raise ValueError("Invitation not found") + + # Check if invitation is valid + if not invitation.is_valid(): + logger.warning(f"Invalid or expired invitation for {invitation.email}") + raise ValueError("Invitation is invalid or expired") + + # Validate accepting user email if provided + if accepting_user_email and accepting_user_email != invitation.email: + logger.warning(f"Email mismatch: invitation for {invitation.email}, accepting as {accepting_user_email}") + raise ValueError("Email address does not match invitation") + + # Check if user exists (if email provided, they must exist) + if accepting_user_email: + user = self.db.query(EmailUser).filter(EmailUser.email == accepting_user_email).first() + if not user: + logger.warning(f"User {accepting_user_email} not found") + raise ValueError("User account not found") + + # Check if team still exists + team = self.db.query(EmailTeam).filter(EmailTeam.id == invitation.team_id, EmailTeam.is_active.is_(True)).first() + + if not team: + logger.warning(f"Team {invitation.team_id} not found or inactive") + raise ValueError("Team not found or inactive") + + # Check if user is already a member + existing_member = ( + self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == invitation.team_id, EmailTeamMember.user_email == invitation.email, EmailTeamMember.is_active.is_(True)).first() + ) + + if existing_member: + logger.warning(f"User {invitation.email} is already a member of team {invitation.team_id}") + # Deactivate the invitation since they're already a member + invitation.is_active = False + self.db.commit() + raise ValueError("User is already a member of this team") + + # Check team member limit + if team.max_members: + current_member_count = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == invitation.team_id, EmailTeamMember.is_active.is_(True)).count() + if current_member_count >= team.max_members: + logger.warning(f"Team {invitation.team_id} has reached maximum member limit") + raise ValueError(f"Team has reached maximum member limit of {team.max_members}") + + # Create team membership + membership = EmailTeamMember(team_id=invitation.team_id, user_email=invitation.email, role=invitation.role, joined_at=utc_now(), invited_by=invitation.invited_by, is_active=True) + + self.db.add(membership) + + # Deactivate the invitation + invitation.is_active = False + + self.db.commit() + + logger.info(f"User {invitation.email} accepted invitation to team {invitation.team_id}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to accept invitation: {e}") + raise + + async def decline_invitation(self, token: str, declining_user_email: Optional[str] = None) -> bool: + """Decline a team invitation. + + Args: + token: The invitation token + declining_user_email: Email of user declining (for validation) + + Returns: + bool: True if invitation was declined successfully, False otherwise + + Examples: + Users can decline invitations they don't want to accept. + """ + try: + # Get the invitation + invitation = await self.get_invitation_by_token(token) + if not invitation: + logger.warning("Invitation not found for token") + return False + + # Validate declining user email if provided + if declining_user_email and declining_user_email != invitation.email: + logger.warning(f"Email mismatch: invitation for {invitation.email}, declining as {declining_user_email}") + return False + + # Deactivate the invitation + invitation.is_active = False + self.db.commit() + + logger.info(f"User {invitation.email} declined invitation to team {invitation.team_id}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to decline invitation: {e}") + return False + + async def revoke_invitation(self, invitation_id: str, revoked_by: str) -> bool: + """Revoke a team invitation. + + Args: + invitation_id: ID of the invitation to revoke + revoked_by: Email of user revoking the invitation + + Returns: + bool: True if invitation was revoked successfully, False otherwise + + Examples: + Team owners can revoke pending invitations. + """ + try: + # Get the invitation + invitation = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.id == invitation_id, EmailTeamInvitation.is_active.is_(True)).first() + + if not invitation: + logger.warning(f"Active invitation {invitation_id} not found") + return False + + # Check if revoker has permission + revoker_membership = ( + self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == invitation.team_id, EmailTeamMember.user_email == revoked_by, EmailTeamMember.is_active.is_(True)).first() + ) + + if not revoker_membership or revoker_membership.role != "owner": + logger.warning(f"User {revoked_by} does not have permission to revoke invitation {invitation_id}") + return False + + # Revoke the invitation + invitation.is_active = False + self.db.commit() + + logger.info(f"Invitation {invitation_id} revoked by {revoked_by}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to revoke invitation {invitation_id}: {e}") + return False + + async def get_team_invitations(self, team_id: str, active_only: bool = True) -> List[EmailTeamInvitation]: + """Get all invitations for a team. + + Args: + team_id: ID of the team + active_only: Whether to return only active invitations + + Returns: + List[EmailTeamInvitation]: List of team invitations + + Examples: + Team management interface showing pending invitations. + """ + try: + query = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.team_id == team_id) + + if active_only: + query = query.filter(EmailTeamInvitation.is_active.is_(True)) + + invitations = query.order_by(EmailTeamInvitation.invited_at.desc()).all() + return invitations + + except Exception as e: + logger.error(f"Failed to get invitations for team {team_id}: {e}") + return [] + + async def get_user_invitations(self, email: str, active_only: bool = True) -> List[EmailTeamInvitation]: + """Get all invitations for a user. + + Args: + email: Email address of the user + active_only: Whether to return only active invitations + + Returns: + List[EmailTeamInvitation]: List of invitations for the user + + Examples: + User dashboard showing pending team invitations. + """ + try: + query = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.email == email) + + if active_only: + query = query.filter(EmailTeamInvitation.is_active.is_(True)) + + invitations = query.order_by(EmailTeamInvitation.invited_at.desc()).all() + return invitations + + except Exception as e: + logger.error(f"Failed to get invitations for user {email}: {e}") + return [] + + async def cleanup_expired_invitations(self) -> int: + """Clean up expired invitations. + + Returns: + int: Number of invitations cleaned up + + Examples: + Periodic cleanup task to remove expired invitations. + """ + try: + now = utc_now() + expired_count = self.db.query(EmailTeamInvitation).filter(EmailTeamInvitation.expires_at < now, EmailTeamInvitation.is_active.is_(True)).update({"is_active": False}) + + self.db.commit() + + if expired_count > 0: + logger.info(f"Cleaned up {expired_count} expired invitations") + + return expired_count + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to cleanup expired invitations: {e}") + return 0 diff --git a/mcpgateway/services/team_management_service.py b/mcpgateway/services/team_management_service.py new file mode 100644 index 000000000..57dd69832 --- /dev/null +++ b/mcpgateway/services/team_management_service.py @@ -0,0 +1,799 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/team_management_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Team Management Service. +This module provides team creation, management, and membership operations +for the multi-team collaboration system. + +Examples: + >>> from mcpgateway.services.team_management_service import TeamManagementService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamManagementService(db) + >>> # Service handles team CRUD operations +""" + +# Standard +from datetime import timedelta +from typing import List, Optional, Tuple + +# Third-Party +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import EmailTeam, EmailTeamJoinRequest, EmailTeamMember, EmailUser, utc_now +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class TeamManagementService: + """Service for team management operations. + + This service handles team creation, membership management, + role assignments, and team access control. + + Attributes: + db (Session): SQLAlchemy database session + + Examples: + >>> from mcpgateway.services.team_management_service import TeamManagementService + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamManagementService(db) + >>> service.db is not None + True + """ + + def __init__(self, db: Session): + """Initialize the team management service. + + Args: + db: SQLAlchemy database session + + Examples: + >>> from mcpgateway.db import SessionLocal + >>> db = SessionLocal() + >>> service = TeamManagementService(db) + >>> isinstance(service.db, Session) + True + """ + self.db = db + + async def create_team(self, name: str, description: Optional[str], created_by: str, visibility: str = "private", max_members: Optional[int] = None) -> EmailTeam: + """Create a new team. + + Args: + name: Team name + description: Team description + created_by: Email of the user creating the team + visibility: Team visibility (private, team, public) + max_members: Maximum number of team members allowed + + Returns: + EmailTeam: The created team + + Raises: + ValueError: If team name is taken or invalid + Exception: If team creation fails + + Examples: + Team creation with automatic owner membership assignment. + """ + try: + # Validate visibility + valid_visibilities = ["private", "public"] + if visibility not in valid_visibilities: + raise ValueError(f"Invalid visibility. Must be one of: {', '.join(valid_visibilities)}") + + # Apply default max members from settings + if max_members is None: + max_members = getattr(settings, "max_members_per_team", 100) + + # Create the team (slug will be auto-generated by event listener) + team = EmailTeam(name=name, description=description, created_by=created_by, is_personal=False, visibility=visibility, max_members=max_members, is_active=True) + + self.db.add(team) + self.db.flush() # Get the team ID + + # Add the creator as owner + membership = EmailTeamMember(team_id=team.id, user_email=created_by, role="owner", joined_at=utc_now(), is_active=True) + + self.db.add(membership) + self.db.commit() + + logger.info(f"Created team '{team.name}' by {created_by}") + return team + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to create team '{name}': {e}") + raise + + async def get_team_by_id(self, team_id: str) -> Optional[EmailTeam]: + """Get a team by ID. + + Args: + team_id: Team ID to lookup + + Returns: + EmailTeam: The team or None if not found + + Examples: + Used for team lookup and validation operations. + """ + try: + team = self.db.query(EmailTeam).filter(EmailTeam.id == team_id, EmailTeam.is_active.is_(True)).first() + + return team + + except Exception as e: + logger.error(f"Failed to get team by ID {team_id}: {e}") + return None + + async def get_team_by_slug(self, slug: str) -> Optional[EmailTeam]: + """Get a team by slug. + + Args: + slug: Team slug to lookup + + Returns: + EmailTeam: The team or None if not found + + Examples: + Used for URL-friendly team access and routing. + """ + try: + team = self.db.query(EmailTeam).filter(EmailTeam.slug == slug, EmailTeam.is_active.is_(True)).first() + + return team + + except Exception as e: + logger.error(f"Failed to get team by slug {slug}: {e}") + return None + + async def update_team( + self, team_id: str, name: Optional[str] = None, description: Optional[str] = None, visibility: Optional[str] = None, max_members: Optional[int] = None, updated_by: Optional[str] = None + ) -> bool: + """Update team information. + + Args: + team_id: ID of the team to update + name: New team name + description: New team description + visibility: New visibility setting + max_members: New maximum member limit + updated_by: Email of user making the update + + Returns: + bool: True if update succeeded, False otherwise + + Raises: + ValueError: If visibility setting is invalid + + Examples: + Update team settings and configuration. + """ + try: + team = await self.get_team_by_id(team_id) + if not team: + logger.warning(f"Team {team_id} not found for update") + return False + + # Prevent updating personal teams + if team.is_personal: + logger.warning(f"Cannot update personal team {team_id}") + return False + + # Update fields if provided + if name is not None: + team.name = name + # Slug will be updated by event listener if name changes + + if description is not None: + team.description = description + + if visibility is not None: + valid_visibilities = ["private", "public"] + if visibility not in valid_visibilities: + raise ValueError(f"Invalid visibility. Must be one of: {', '.join(valid_visibilities)}") + team.visibility = visibility + + if max_members is not None: + team.max_members = max_members + + team.updated_at = utc_now() + self.db.commit() + + logger.info(f"Updated team {team_id} by {updated_by}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to update team {team_id}: {e}") + return False + + async def delete_team(self, team_id: str, deleted_by: str) -> bool: + """Delete a team (soft delete). + + Args: + team_id: ID of the team to delete + deleted_by: Email of user performing deletion + + Returns: + bool: True if deletion succeeded, False otherwise + + Raises: + ValueError: If attempting to delete a personal team + + Examples: + Team deletion with membership cleanup. + """ + try: + team = await self.get_team_by_id(team_id) + if not team: + logger.warning(f"Team {team_id} not found for deletion") + return False + + # Prevent deleting personal teams + if team.is_personal: + logger.warning(f"Cannot delete personal team {team_id}") + raise ValueError("Personal teams cannot be deleted") + + # Soft delete the team + team.is_active = False + team.updated_at = utc_now() + + # Deactivate all memberships + self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id).update({"is_active": False}) + + self.db.commit() + + logger.info(f"Deleted team {team_id} by {deleted_by}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to delete team {team_id}: {e}") + return False + + async def add_member_to_team(self, team_id: str, user_email: str, role: str = "member", invited_by: Optional[str] = None) -> bool: + """Add a member to a team. + + Args: + team_id: ID of the team + user_email: Email of the user to add + role: Role to assign (owner, member) + invited_by: Email of user who added this member + + Returns: + bool: True if member was added successfully, False otherwise + + Raises: + ValueError: If role is invalid or team member limit exceeded + + Examples: + Team membership management with role assignment. + """ + try: + # Validate role + valid_roles = ["owner", "member"] + if role not in valid_roles: + raise ValueError(f"Invalid role. Must be one of: {', '.join(valid_roles)}") + + # Check if team exists + team = await self.get_team_by_id(team_id) + if not team: + logger.warning(f"Team {team_id} not found") + return False + + # Check if user exists + user = self.db.query(EmailUser).filter(EmailUser.email == user_email).first() + if not user: + logger.warning(f"User {user_email} not found") + return False + + # Check if user is already a member + existing_membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email).first() + + if existing_membership and existing_membership.is_active: + logger.warning(f"User {user_email} is already a member of team {team_id}") + return False + + # Check team member limit + if team.max_members: + current_member_count = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.is_active.is_(True)).count() + + if current_member_count >= team.max_members: + logger.warning(f"Team {team_id} has reached maximum member limit") + raise ValueError(f"Team has reached maximum member limit of {team.max_members}") + + # Add or reactivate membership + if existing_membership: + existing_membership.is_active = True + existing_membership.role = role + existing_membership.joined_at = utc_now() + existing_membership.invited_by = invited_by + else: + membership = EmailTeamMember(team_id=team_id, user_email=user_email, role=role, joined_at=utc_now(), invited_by=invited_by, is_active=True) + self.db.add(membership) + + self.db.commit() + + logger.info(f"Added {user_email} to team {team_id} with role {role}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to add {user_email} to team {team_id}: {e}") + return False + + async def remove_member_from_team(self, team_id: str, user_email: str, removed_by: Optional[str] = None) -> bool: + """Remove a member from a team. + + Args: + team_id: ID of the team + user_email: Email of the user to remove + removed_by: Email of user performing the removal + + Returns: + bool: True if member was removed successfully, False otherwise + + Raises: + ValueError: If attempting to remove the last owner + + Examples: + Team membership management with role-based access control. + """ + try: + team = await self.get_team_by_id(team_id) + if not team: + logger.warning(f"Team {team_id} not found") + return False + + # Prevent removing members from personal teams + if team.is_personal: + logger.warning(f"Cannot remove members from personal team {team_id}") + return False + + # Find the membership + membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email, EmailTeamMember.is_active.is_(True)).first() + + if not membership: + logger.warning(f"User {user_email} is not a member of team {team_id}") + return False + + # Prevent removing the last owner + if membership.role == "owner": + owner_count = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.role == "owner", EmailTeamMember.is_active.is_(True)).count() + + if owner_count <= 1: + logger.warning(f"Cannot remove the last owner from team {team_id}") + raise ValueError("Cannot remove the last owner from a team") + + # Remove membership (soft delete) + membership.is_active = False + self.db.commit() + + logger.info(f"Removed {user_email} from team {team_id} by {removed_by}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to remove {user_email} from team {team_id}: {e}") + return False + + async def update_member_role(self, team_id: str, user_email: str, new_role: str, updated_by: Optional[str] = None) -> bool: + """Update a team member's role. + + Args: + team_id: ID of the team + user_email: Email of the user whose role to update + new_role: New role to assign + updated_by: Email of user making the change + + Returns: + bool: True if role was updated successfully, False otherwise + + Raises: + ValueError: If role is invalid or removing last owner role + + Examples: + Role management within teams for access control. + """ + try: + # Validate role + valid_roles = ["owner", "member"] + if new_role not in valid_roles: + raise ValueError(f"Invalid role. Must be one of: {', '.join(valid_roles)}") + + team = await self.get_team_by_id(team_id) + if not team: + logger.warning(f"Team {team_id} not found") + return False + + # Prevent updating roles in personal teams + if team.is_personal: + logger.warning(f"Cannot update roles in personal team {team_id}") + return False + + # Find the membership + membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email, EmailTeamMember.is_active.is_(True)).first() + + if not membership: + logger.warning(f"User {user_email} is not a member of team {team_id}") + return False + + # Prevent changing the role of the last owner to non-owner + if membership.role == "owner" and new_role != "owner": + owner_count = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.role == "owner", EmailTeamMember.is_active.is_(True)).count() + + if owner_count <= 1: + logger.warning(f"Cannot remove owner role from the last owner of team {team_id}") + raise ValueError("Cannot remove owner role from the last owner of a team") + + # Update the role + membership.role = new_role + self.db.commit() + + logger.info(f"Updated role of {user_email} in team {team_id} to {new_role} by {updated_by}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to update role of {user_email} in team {team_id}: {e}") + return False + + async def get_user_teams(self, user_email: str, include_personal: bool = True) -> List[EmailTeam]: + """Get all teams a user belongs to. + + Args: + user_email: Email of the user + include_personal: Whether to include personal teams + + Returns: + List[EmailTeam]: List of teams the user belongs to + + Examples: + User dashboard showing team memberships. + """ + try: + query = self.db.query(EmailTeam).join(EmailTeamMember).filter(EmailTeamMember.user_email == user_email, EmailTeamMember.is_active.is_(True), EmailTeam.is_active.is_(True)) + + if not include_personal: + query = query.filter(EmailTeam.is_personal.is_(False)) + + teams = query.all() + return teams + + except Exception as e: + logger.error(f"Failed to get teams for user {user_email}: {e}") + return [] + + async def get_team_members(self, team_id: str) -> List[Tuple[EmailUser, EmailTeamMember]]: + """Get all members of a team. + + Args: + team_id: ID of the team + + Returns: + List[Tuple[EmailUser, EmailTeamMember]]: List of (user, membership) tuples + + Examples: + Team member management and role display. + """ + try: + members = ( + self.db.query(EmailUser, EmailTeamMember) + .join(EmailTeamMember, EmailUser.email == EmailTeamMember.user_email) + .filter(EmailTeamMember.team_id == team_id, EmailTeamMember.is_active.is_(True)) + .all() + ) + + return members + + except Exception as e: + logger.error(f"Failed to get members for team {team_id}: {e}") + return [] + + async def get_user_role_in_team(self, user_email: str, team_id: str) -> Optional[str]: + """Get a user's role in a specific team. + + Args: + user_email: Email of the user + team_id: ID of the team + + Returns: + str: User's role or None if not a member + + Examples: + Access control and permission checking. + """ + try: + membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.user_email == user_email, EmailTeamMember.team_id == team_id, EmailTeamMember.is_active.is_(True)).first() + + return membership.role if membership else None + + except Exception as e: + logger.error(f"Failed to get role for {user_email} in team {team_id}: {e}") + return None + + async def list_teams(self, limit: int = 100, offset: int = 0, visibility_filter: Optional[str] = None) -> Tuple[List[EmailTeam], int]: + """List teams with pagination. + + Args: + limit: Maximum number of teams to return + offset: Number of teams to skip + visibility_filter: Filter by visibility (private, team, public) + + Returns: + Tuple[List[EmailTeam], int]: (teams, total_count) + + Examples: + Team discovery and administration. + """ + try: + query = self.db.query(EmailTeam).filter(EmailTeam.is_active.is_(True), EmailTeam.is_personal.is_(False)) # Exclude personal teams from listings + + if visibility_filter: + query = query.filter(EmailTeam.visibility == visibility_filter) + + total_count = query.count() + teams = query.offset(offset).limit(limit).all() + + return teams, total_count + + except Exception as e: + logger.error(f"Failed to list teams: {e}") + return [], 0 + + async def discover_public_teams(self, user_email: str, skip: int = 0, limit: int = 50) -> List[EmailTeam]: + """Discover public teams that user can join. + + Args: + user_email: Email of the user discovering teams + skip: Number of teams to skip for pagination + limit: Maximum number of teams to return + + Returns: + List[EmailTeam]: List of public teams user can join + + Raises: + Exception: If discovery fails + """ + try: + # Get teams where user is not already a member + user_team_ids = [result[0] for result in self.db.query(EmailTeamMember.team_id).filter(EmailTeamMember.user_email == user_email, EmailTeamMember.is_active.is_(True)).all()] + + query = self.db.query(EmailTeam).filter(EmailTeam.visibility == "public", EmailTeam.is_active.is_(True), EmailTeam.is_personal.is_(False)) + + if user_team_ids: + query = query.filter(EmailTeam.id.notin_(user_team_ids)) + + return query.offset(skip).limit(limit).all() + + except Exception as e: + logger.error(f"Failed to discover public teams for {user_email}: {e}") + return [] + + async def create_join_request(self, team_id: str, user_email: str, message: Optional[str] = None) -> "EmailTeamJoinRequest": + """Create a request to join a public team. + + Args: + team_id: ID of the team to join + user_email: Email of the user requesting to join + message: Optional message to team owners + + Returns: + EmailTeamJoinRequest: Created join request + + Raises: + ValueError: If team not found, not public, or user already member/has pending request + """ + try: + + # Validate team + team = await self.get_team_by_id(team_id) + if not team: + raise ValueError("Team not found") + + if team.visibility != "public": + raise ValueError("Can only request to join public teams") + + # Check if user is already a member + existing_member = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email, EmailTeamMember.is_active.is_(True)).first() + + if existing_member: + raise ValueError("User is already a member of this team") + + # Check for existing pending request + existing_request = ( + self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.team_id == team_id, EmailTeamJoinRequest.user_email == user_email, EmailTeamJoinRequest.status == "pending").first() + ) + + if existing_request and not existing_request.is_expired(): + raise ValueError("User already has a pending join request for this team") + + # Create join request + join_request = EmailTeamJoinRequest(team_id=team_id, user_email=user_email, message=message, expires_at=utc_now() + timedelta(days=7)) # 7 day expiry + + self.db.add(join_request) + self.db.commit() + self.db.refresh(join_request) + + logger.info(f"Created join request for user {user_email} to team {team_id}") + return join_request + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to create join request: {e}") + raise + + async def list_join_requests(self, team_id: str) -> List["EmailTeamJoinRequest"]: + """List pending join requests for a team. + + Args: + team_id: ID of the team + + Returns: + List[EmailTeamJoinRequest]: List of pending join requests + """ + try: + return ( + self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.team_id == team_id, EmailTeamJoinRequest.status == "pending").order_by(EmailTeamJoinRequest.requested_at.desc()).all() + ) + + except Exception as e: + logger.error(f"Failed to list join requests for team {team_id}: {e}") + return [] + + async def approve_join_request(self, request_id: str, approved_by: str) -> Optional[EmailTeamMember]: + """Approve a team join request. + + Args: + request_id: ID of the join request + approved_by: Email of the user approving the request + + Returns: + EmailTeamMember: New team member or None if request not found + + Raises: + ValueError: If request not found, expired, or already processed + """ + try: + # Get join request + join_request = self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.id == request_id, EmailTeamJoinRequest.status == "pending").first() + + if not join_request: + raise ValueError("Join request not found or already processed") + + if join_request.is_expired(): + join_request.status = "expired" + self.db.commit() + raise ValueError("Join request has expired") + + # Add user to team + member = EmailTeamMember(team_id=join_request.team_id, user_email=join_request.user_email, role="member", invited_by=approved_by, joined_at=utc_now()) # New joiners are always members + + self.db.add(member) + + # Update join request status + join_request.status = "approved" + join_request.reviewed_at = utc_now() + join_request.reviewed_by = approved_by + + self.db.commit() + self.db.refresh(member) + + logger.info(f"Approved join request {request_id}: user {join_request.user_email} joined team {join_request.team_id}") + return member + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to approve join request {request_id}: {e}") + raise + + async def reject_join_request(self, request_id: str, rejected_by: str) -> bool: + """Reject a team join request. + + Args: + request_id: ID of the join request + rejected_by: Email of the user rejecting the request + + Returns: + bool: True if request was rejected successfully + + Raises: + ValueError: If request not found or already processed + """ + try: + # Get join request + join_request = self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.id == request_id, EmailTeamJoinRequest.status == "pending").first() + + if not join_request: + raise ValueError("Join request not found or already processed") + + # Update join request status + join_request.status = "rejected" + join_request.reviewed_at = utc_now() + join_request.reviewed_by = rejected_by + + self.db.commit() + + logger.info(f"Rejected join request {request_id}: user {join_request.user_email} for team {join_request.team_id}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to reject join request {request_id}: {e}") + raise + + async def get_user_join_requests(self, user_email: str, team_id: Optional[str] = None) -> List["EmailTeamJoinRequest"]: + """Get join requests made by a user. + + Args: + user_email: Email of the user + team_id: Optional team ID to filter requests + + Returns: + List[EmailTeamJoinRequest]: List of join requests made by the user + + Examples: + Get all requests made by a user or for a specific team. + """ + try: + query = self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.user_email == user_email) + + if team_id: + query = query.filter(EmailTeamJoinRequest.team_id == team_id) + + requests = query.all() + return requests + + except Exception as e: + logger.error(f"Failed to get join requests for user {user_email}: {e}") + return [] + + async def cancel_join_request(self, request_id: str, user_email: str) -> bool: + """Cancel a join request. + + Args: + request_id: ID of the join request to cancel + user_email: Email of the user canceling the request + + Returns: + bool: True if canceled successfully, False otherwise + + Examples: + Allow users to cancel their pending join requests. + """ + try: + # Get the join request + join_request = ( + self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.id == request_id, EmailTeamJoinRequest.user_email == user_email, EmailTeamJoinRequest.status == "pending").first() + ) + + if not join_request: + logger.warning(f"Join request {request_id} not found for user {user_email} or not pending") + return False + + # Update join request status + join_request.status = "cancelled" + join_request.reviewed_at = utc_now() + join_request.reviewed_by = user_email + + self.db.commit() + + logger.info(f"Cancelled join request {request_id} by user {user_email}") + return True + + except Exception as e: + self.db.rollback() + logger.error(f"Failed to cancel join request {request_id}: {e}") + return False diff --git a/mcpgateway/services/token_catalog_service.py b/mcpgateway/services/token_catalog_service.py new file mode 100644 index 000000000..b465fbe28 --- /dev/null +++ b/mcpgateway/services/token_catalog_service.py @@ -0,0 +1,678 @@ +# -*- coding: utf-8 -*- +"""Location: ./mcpgateway/services/token_catalog_service.py +Copyright 2025 +SPDX-License-Identifier: Apache-2.0 +Authors: Mihai Criveti + +Token Catalog Service. +This module provides comprehensive API token management with scoping, +revocation, usage tracking, and analytics for email-based users. + +Examples: + >>> from mcpgateway.services.token_catalog_service import TokenCatalogService + >>> service = TokenCatalogService(None) # Mock database for doctest + >>> # Service provides full token lifecycle management +""" + +# Standard +from datetime import datetime, timedelta, timezone +import hashlib +from typing import List, Optional +import uuid + +# Third-Party +import jwt +from sqlalchemy import and_, or_, select +from sqlalchemy.orm import Session + +# First-Party +from mcpgateway.config import settings +from mcpgateway.db import EmailApiToken, EmailUser, TokenRevocation, TokenUsageLog, utc_now +from mcpgateway.services.logging_service import LoggingService + +# Initialize logging +logging_service = LoggingService() +logger = logging_service.get_logger(__name__) + + +class TokenScope: + """Token scoping configuration for fine-grained access control. + + This class encapsulates token scoping parameters including + server restrictions, permissions, IP limitations, and usage quotas. + + Attributes: + server_id (Optional[str]): Limit token to specific server + permissions (List[str]): Specific permission scopes + ip_restrictions (List[str]): IP address/CIDR restrictions + time_restrictions (dict): Time-based access limitations + usage_limits (dict): Rate limiting and quota settings + + Examples: + >>> scope = TokenScope( + ... server_id="prod-server-123", + ... permissions=["tools.read", "resources.read"], + ... ip_restrictions=["192.168.1.0/24"], + ... time_restrictions={"business_hours_only": True} + ... ) + >>> scope.is_server_scoped() + True + >>> scope.has_permission("tools.read") + True + """ + + def __init__( + self, + server_id: Optional[str] = None, + permissions: Optional[List[str]] = None, + ip_restrictions: Optional[List[str]] = None, + time_restrictions: Optional[dict] = None, + usage_limits: Optional[dict] = None, + ): + """Initialize TokenScope with specified restrictions and limits. + + Args: + server_id: Optional server ID to scope token to specific server + permissions: List of permissions granted to this token + ip_restrictions: List of IP addresses/ranges allowed to use token + time_restrictions: Dictionary of time-based access restrictions + usage_limits: Dictionary of usage limits for the token + """ + self.server_id = server_id + self.permissions = permissions or [] + self.ip_restrictions = ip_restrictions or [] + self.time_restrictions = time_restrictions or {} + self.usage_limits = usage_limits or {} + + def is_server_scoped(self) -> bool: + """Check if token is scoped to a specific server. + + Returns: + bool: True if scoped to a server, False otherwise. + """ + return self.server_id is not None + + def has_permission(self, permission: str) -> bool: + """Check if scope includes specific permission. + + Args: + permission: Permission string to check for. + + Returns: + bool: True if permission is included, False otherwise. + """ + return permission in self.permissions + + def to_dict(self) -> dict: + """Convert scope to dictionary for JSON storage. + + Returns: + dict: Dictionary representation of the token scope. + """ + return {"server_id": self.server_id, "permissions": self.permissions, "ip_restrictions": self.ip_restrictions, "time_restrictions": self.time_restrictions, "usage_limits": self.usage_limits} + + @classmethod + def from_dict(cls, data: dict) -> "TokenScope": + """Create TokenScope from dictionary. + + Args: + data: Dictionary containing scope configuration. + + Returns: + TokenScope: New TokenScope instance. + """ + return cls( + server_id=data.get("server_id"), + permissions=data.get("permissions", []), + ip_restrictions=data.get("ip_restrictions", []), + time_restrictions=data.get("time_restrictions", {}), + usage_limits=data.get("usage_limits", {}), + ) + + +class TokenCatalogService: + """Service for managing user API token catalogs. + + This service provides comprehensive token lifecycle management including + creation, scoping, revocation, usage tracking, and analytics. + + Attributes: + db (Session): SQLAlchemy database session + + Examples: + >>> from mcpgateway.services.token_catalog_service import TokenCatalogService + >>> service = TokenCatalogService(None) # Mock database for doctest + >>> service.db is None + True + """ + + def __init__(self, db: Session): + """Initialize TokenCatalogService with database session. + + Args: + db: SQLAlchemy database session for token operations + """ + self.db = db + + def _generate_token(self, user_email: str, team_id: Optional[str] = None, expires_at: Optional[datetime] = None, scope: Optional["TokenScope"] = None, user: Optional[object] = None) -> str: + """Generate a JWT token for API access. + + Args: + user_email: User's email address for the token subject + team_id: Optional team ID for team-scoped tokens + expires_at: Optional expiration datetime + scope: Optional token scope information + user: Optional user object to extract admin privileges + + Returns: + str: JWT token string + + Examples: + >>> service = TokenCatalogService(None) + >>> token = service._generate_token("user@example.com") + >>> isinstance(token, str) and len(token) > 100 + True + """ + now = datetime.now(timezone.utc) + + # Build JWT payload with required claims + payload = { + "sub": user_email, # Subject (user email) + "iss": settings.jwt_issuer, # Issuer + "aud": settings.jwt_audience, # Audience + "iat": int(now.timestamp()), # Issued at + "jti": str(uuid.uuid4()), # JWT ID for uniqueness + "user": {"email": user_email, "full_name": "API Token User", "is_admin": user.is_admin if user else False, "auth_provider": "api_token"}, # Use actual admin status if user provided + "teams": [team_id] if team_id else [], + "namespaces": [f"user:{user_email}", "public"] + ([f"team:{team_id}"] if team_id else []), + } + + # Add expiration if specified + if expires_at: + payload["exp"] = int(expires_at.timestamp()) + + # Add scoping information if available + if scope: + payload["scopes"] = { + "server_id": scope.server_id, + "permissions": scope.permissions or ["*"], + "ip_restrictions": scope.ip_restrictions or [], + "time_restrictions": scope.time_restrictions or {}, + } + else: + payload["scopes"] = { + "server_id": None, + "permissions": ["*"], + "ip_restrictions": [], + "time_restrictions": {}, + } + + # Generate JWT token + return jwt.encode(payload, settings.jwt_secret_key, algorithm=settings.jwt_algorithm) + + def _hash_token(self, token: str) -> str: + """Create secure hash of token for storage. + + Args: + token: Raw token string + + Returns: + str: SHA-256 hash of token + + Examples: + >>> service = TokenCatalogService(None) + >>> hash_val = service._hash_token("test_token") + >>> len(hash_val) == 64 + True + """ + return hashlib.sha256(token.encode()).hexdigest() + + async def create_token( + self, + user_email: str, + name: str, + description: Optional[str] = None, + scope: Optional[TokenScope] = None, + expires_in_days: Optional[int] = None, + tags: Optional[List[str]] = None, + team_id: Optional[str] = None, + ) -> tuple[EmailApiToken, str]: + """Create a new API token for user or team. + + Args: + user_email: Owner's email address + name: Human-readable token name + description: Optional token description + scope: Token scoping configuration + expires_in_days: Optional expiry in days + tags: Optional organizational tags + team_id: Optional team ID for team-scoped tokens + + Returns: + Tuple of (EmailApiToken, raw_token_string) + + Raises: + ValueError: If user not found, token name exists, or team access denied + + Examples: + >>> # This method requires database operations, shown for reference + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # token, raw_token = await service.create_token(...) + >>> # Returns (EmailApiToken, raw_token_string) tuple + """ + # Validate user exists + user = self.db.execute(select(EmailUser).where(EmailUser.email == user_email)).scalar_one_or_none() + + if not user: + raise ValueError(f"User not found: {user_email}") + + # Validate team access if team_id is provided + if team_id: + # First-Party + from mcpgateway.db import EmailTeam, EmailTeamMember # pylint: disable=import-outside-toplevel + + # Check if team exists + team = self.db.execute(select(EmailTeam).where(EmailTeam.id == team_id)).scalar_one_or_none() + if not team: + raise ValueError(f"Team not found: {team_id}") + + # Check if user is a team OWNER + membership = self.db.execute( + select(EmailTeamMember).where(and_(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email, EmailTeamMember.role == "owner", EmailTeamMember.is_active.is_(True))) + ).scalar_one_or_none() + + if not membership: + raise ValueError(f"Only team owners can create API keys for team {team_id}") + + # Check for duplicate token name (scoped by user and team) + name_check_conditions = [EmailApiToken.user_email == user_email, EmailApiToken.name == name, EmailApiToken.is_active.is_(True)] + + if team_id: + name_check_conditions.append(EmailApiToken.team_id == team_id) + else: + name_check_conditions.append(EmailApiToken.team_id.is_(None)) + + existing_token = self.db.execute(select(EmailApiToken).where(and_(*name_check_conditions))).scalar_one_or_none() + + if existing_token: + scope_desc = f" for team {team_id}" if team_id else "" + raise ValueError(f"Token name '{name}' already exists{scope_desc}") + + # Calculate expiry + expires_at = None + if expires_in_days: + expires_at = utc_now() + timedelta(days=expires_in_days) + + # Generate JWT token with proper claims and user admin status + raw_token = self._generate_token(user_email=user_email, team_id=team_id, expires_at=expires_at, scope=scope, user=user) + token_hash = self._hash_token(raw_token) + + # Create token record + api_token = EmailApiToken( + user_email=user_email, + name=name, + token_hash=token_hash, + description=description, + expires_at=expires_at, + tags=tags or [], + team_id=team_id, + server_id=scope.server_id if scope else None, + resource_scopes=scope.permissions if scope else [], + ip_restrictions=scope.ip_restrictions if scope else [], + time_restrictions=scope.time_restrictions if scope else {}, + usage_limits=scope.usage_limits if scope else {}, + ) + + self.db.add(api_token) + self.db.commit() + self.db.refresh(api_token) + + scope_desc = f" for team {team_id}" if team_id else "" + logger.info(f"Created API token '{name}' for user {user_email}{scope_desc}") + + return api_token, raw_token + + async def list_user_tokens(self, user_email: str, include_inactive: bool = False, limit: int = 100, offset: int = 0) -> List[EmailApiToken]: + """List API tokens for a user. + + Args: + user_email: User's email address + include_inactive: Include inactive/expired tokens + limit: Maximum tokens to return + offset: Number of tokens to skip + + Returns: + List[EmailApiToken]: User's API tokens + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns List[EmailApiToken] for user + """ + # Validate parameters + if limit <= 0 or limit > 1000: + limit = 50 # Use default + offset = max(offset, 0) # Use default + query = select(EmailApiToken).where(EmailApiToken.user_email == user_email) + + if not include_inactive: + query = query.where(and_(EmailApiToken.is_active.is_(True), or_(EmailApiToken.expires_at.is_(None), EmailApiToken.expires_at > utc_now()))) + + query = query.order_by(EmailApiToken.created_at.desc()).limit(limit).offset(offset) + + result = self.db.execute(query) + return result.scalars().all() + + async def list_team_tokens(self, team_id: str, user_email: str, include_inactive: bool = False, limit: int = 100, offset: int = 0) -> List[EmailApiToken]: + """List API tokens for a team (only accessible by team owners). + + Args: + team_id: Team ID to list tokens for + user_email: User's email (must be team owner) + include_inactive: Include inactive/expired tokens + limit: Maximum tokens to return + offset: Number of tokens to skip + + Returns: + List[EmailApiToken]: Team's API tokens + + Raises: + ValueError: If user is not a team owner + """ + # Validate user is team owner + # First-Party + from mcpgateway.db import EmailTeamMember # pylint: disable=import-outside-toplevel + + membership = self.db.execute( + select(EmailTeamMember).where(and_(EmailTeamMember.team_id == team_id, EmailTeamMember.user_email == user_email, EmailTeamMember.role == "owner", EmailTeamMember.is_active.is_(True))) + ).scalar_one_or_none() + + if not membership: + raise ValueError(f"Only team owners can view team tokens for {team_id}") + + # Validate parameters + if limit <= 0 or limit > 1000: + limit = 50 + offset = max(offset, 0) + + query = select(EmailApiToken).where(EmailApiToken.team_id == team_id) + + if not include_inactive: + query = query.where(and_(EmailApiToken.is_active.is_(True), or_(EmailApiToken.expires_at.is_(None), EmailApiToken.expires_at > utc_now()))) + + query = query.order_by(EmailApiToken.created_at.desc()).limit(limit).offset(offset) + result = self.db.execute(query) + return result.scalars().all() + + async def get_token(self, token_id: str, user_email: Optional[str] = None) -> Optional[EmailApiToken]: + """Get a specific token by ID. + + Args: + token_id: Token ID + user_email: Optional user email filter for security + + Returns: + Optional[EmailApiToken]: Token if found and authorized + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns Optional[EmailApiToken] if found and authorized + """ + query = select(EmailApiToken).where(EmailApiToken.id == token_id) + + if user_email: + query = query.where(EmailApiToken.user_email == user_email) + + result = self.db.execute(query) + return result.scalar_one_or_none() + + async def update_token( + self, token_id: str, user_email: str, name: Optional[str] = None, description: Optional[str] = None, scope: Optional[TokenScope] = None, tags: Optional[List[str]] = None + ) -> Optional[EmailApiToken]: + """Update an existing token. + + Args: + token_id: Token ID to update + user_email: Owner's email for security + name: New token name + description: New description + scope: New scoping configuration + tags: New tags + + Returns: + Optional[EmailApiToken]: Updated token if found + + Raises: + ValueError: If token not found or name conflicts + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns Optional[EmailApiToken] if updated successfully + """ + token = await self.get_token(token_id, user_email) + if not token: + raise ValueError("Token not found or not authorized") + + # Check for duplicate name if changing + if name and name != token.name: + existing = self.db.execute( + select(EmailApiToken).where(and_(EmailApiToken.user_email == user_email, EmailApiToken.name == name, EmailApiToken.id != token_id, EmailApiToken.is_active.is_(True))) + ).scalar_one_or_none() + + if existing: + raise ValueError(f"Token name '{name}' already exists") + + token.name = name + + if description is not None: + token.description = description + + if tags is not None: + token.tags = tags + + if scope: + token.server_id = scope.server_id + token.resource_scopes = scope.permissions + token.ip_restrictions = scope.ip_restrictions + token.time_restrictions = scope.time_restrictions + token.usage_limits = scope.usage_limits + + self.db.commit() + self.db.refresh(token) + + logger.info(f"Updated token '{token.name}' for user {user_email}") + + return token + + async def revoke_token(self, token_id: str, revoked_by: str, reason: Optional[str] = None) -> bool: + """Revoke a token immediately. + + Args: + token_id: Token ID to revoke + revoked_by: Email of user revoking the token + reason: Optional reason for revocation + + Returns: + bool: True if token was revoked + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns bool: True if token was revoked successfully + """ + token = await self.get_token(token_id) + if not token: + return False + + # Mark token as inactive + token.is_active = False + + # Add to blacklist + revocation = TokenRevocation(jti=token.jti, revoked_by=revoked_by, reason=reason) + + self.db.add(revocation) + self.db.commit() + + logger.info(f"Revoked token '{token.name}' (JTI: {token.jti}) by {revoked_by}") + + return True + + async def is_token_revoked(self, jti: str) -> bool: + """Check if a token JTI is revoked. + + Args: + jti: JWT ID to check + + Returns: + bool: True if token is revoked + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns bool: True if token is revoked + """ + revocation = self.db.execute(select(TokenRevocation).where(TokenRevocation.jti == jti)).scalar_one_or_none() + + return revocation is not None + + async def log_token_usage( + self, + jti: str, + user_email: str, + endpoint: Optional[str] = None, + method: Optional[str] = None, + ip_address: Optional[str] = None, + user_agent: Optional[str] = None, + status_code: Optional[int] = None, + response_time_ms: Optional[int] = None, + blocked: bool = False, + block_reason: Optional[str] = None, + ) -> None: + """Log token usage for analytics and security. + + Args: + jti: JWT ID of token used + user_email: Token owner's email + endpoint: API endpoint accessed + method: HTTP method + ip_address: Client IP address + user_agent: Client user agent + status_code: HTTP response status + response_time_ms: Response time in milliseconds + blocked: Whether request was blocked + block_reason: Reason for blocking + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Logs token usage for analytics - no return value + """ + usage_log = TokenUsageLog( + token_jti=jti, + user_email=user_email, + endpoint=endpoint, + method=method, + ip_address=ip_address, + user_agent=user_agent, + status_code=status_code, + response_time_ms=response_time_ms, + blocked=blocked, + block_reason=block_reason, + ) + + self.db.add(usage_log) + self.db.commit() + + # Update token last_used timestamp + token = self.db.execute(select(EmailApiToken).where(EmailApiToken.jti == jti)).scalar_one_or_none() + + if token: + token.last_used = utc_now() + self.db.commit() + + async def get_token_usage_stats(self, user_email: str, token_id: Optional[str] = None, days: int = 30) -> dict: + """Get token usage statistics. + + Args: + user_email: User's email address + token_id: Optional specific token ID + days: Number of days to analyze + + Returns: + dict: Usage statistics + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns dict with usage statistics + """ + start_date = utc_now() - timedelta(days=days) + + query = select(TokenUsageLog).where(and_(TokenUsageLog.user_email == user_email, TokenUsageLog.timestamp >= start_date)) + + if token_id: + # Get JTI for the token + token = await self.get_token(token_id, user_email) + if token: + query = query.where(TokenUsageLog.token_jti == token.jti) + + usage_logs = self.db.execute(query).scalars().all() + + # Calculate statistics + total_requests = len(usage_logs) + successful_requests = sum(1 for log in usage_logs if log.status_code and log.status_code < 400) + blocked_requests = sum(1 for log in usage_logs if log.blocked) + + # Average response time + response_times = [log.response_time_ms for log in usage_logs if log.response_time_ms] + avg_response_time = sum(response_times) / len(response_times) if response_times else 0 + + # Most accessed endpoints + endpoint_counts = {} + for log in usage_logs: + if log.endpoint: + endpoint_counts[log.endpoint] = endpoint_counts.get(log.endpoint, 0) + 1 + + top_endpoints = sorted(endpoint_counts.items(), key=lambda x: x[1], reverse=True)[:5] + + return { + "period_days": days, + "total_requests": total_requests, + "successful_requests": successful_requests, + "blocked_requests": blocked_requests, + "success_rate": successful_requests / total_requests if total_requests > 0 else 0, + "average_response_time_ms": round(avg_response_time, 2), + "top_endpoints": top_endpoints, + } + + async def get_token_revocation(self, jti: str) -> Optional[TokenRevocation]: + """Get token revocation information by JTI. + + Args: + jti: JWT token ID + + Returns: + Optional[TokenRevocation]: Revocation info if token is revoked + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns Optional[TokenRevocation] if token is revoked + """ + result = self.db.execute(select(TokenRevocation).where(TokenRevocation.jti == jti)) + return result.scalar_one_or_none() + + async def cleanup_expired_tokens(self) -> int: + """Clean up expired tokens. + + Returns: + int: Number of tokens cleaned up + + Examples: + >>> service = TokenCatalogService(None) # Would use real DB session + >>> # Returns int: Number of tokens cleaned up + """ + expired_tokens = self.db.execute(select(EmailApiToken).where(and_(EmailApiToken.expires_at < utc_now(), EmailApiToken.is_active.is_(True)))).scalars().all() + + for token in expired_tokens: + token.is_active = False + + self.db.commit() + + logger.info(f"Cleaned up {len(expired_tokens)} expired tokens") + + return len(expired_tokens) diff --git a/mcpgateway/services/tool_service.py b/mcpgateway/services/tool_service.py index 37ac2c5e4..1707c6174 100644 --- a/mcpgateway/services/tool_service.py +++ b/mcpgateway/services/tool_service.py @@ -19,6 +19,7 @@ import base64 from datetime import datetime, timezone import json +import os import re import time from typing import Any, AsyncGenerator, Dict, List, Optional @@ -30,7 +31,7 @@ from mcp import ClientSession from mcp.client.sse import sse_client from mcp.client.streamable_http import streamablehttp_client -from sqlalchemy import case, delete, desc, Float, func, not_, select +from sqlalchemy import and_, case, delete, desc, Float, func, not_, or_, select from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import Session @@ -47,6 +48,7 @@ from mcpgateway.schemas import ToolCreate, ToolRead, ToolUpdate, TopPerformer from mcpgateway.services.logging_service import LoggingService from mcpgateway.services.oauth_manager import OAuthManager +from mcpgateway.services.team_management_service import TeamManagementService from mcpgateway.utils.create_slug import slugify from mcpgateway.utils.display_name import generate_display_name from mcpgateway.utils.metrics_common import build_top_performers @@ -170,7 +172,15 @@ def __init__(self) -> None: """ self._event_subscribers: List[asyncio.Queue] = [] self._http_client = ResilientHttpClient(client_args={"timeout": settings.federation_timeout, "verify": not settings.skip_ssl_verify}) - self._plugin_manager: PluginManager | None = PluginManager() if settings.plugins_enabled else None + # Initialize plugin manager with env overrides to ease testing + env_flag = os.getenv("PLUGINS_ENABLED") + if env_flag is not None: + env_enabled = env_flag.strip().lower() in {"1", "true", "yes", "on"} + plugins_enabled = env_enabled + else: + plugins_enabled = settings.plugins_enabled + config_file = os.getenv("PLUGIN_CONFIG_FILE", getattr(settings, "plugin_config_file", "plugins/config.yaml")) + self._plugin_manager: PluginManager | None = PluginManager(config_file) if plugins_enabled else None self.oauth_manager = OAuthManager( request_timeout=int(settings.oauth_request_timeout if hasattr(settings, "oauth_request_timeout") else 30), max_retries=int(settings.oauth_max_retries if hasattr(settings, "oauth_max_retries") else 3), @@ -331,8 +341,11 @@ async def register_tool( created_user_agent: Optional[str] = None, import_batch_id: Optional[str] = None, federation_source: Optional[str] = None, + team_id: Optional[str] = None, + owner_email: Optional[str] = None, + visibility: str = "private", ) -> ToolRead: - """Register a new tool. + """Register a new tool with team support. Args: db: Database session. @@ -343,6 +356,9 @@ async def register_tool( created_user_agent: User agent of creation request. import_batch_id: UUID for bulk import operations. federation_source: Source gateway for federated tools. + team_id: Optional team ID to assign tool to. + owner_email: Optional owner email for tool ownership. + visibility: Tool visibility (private, team, public). Returns: Created tool information. @@ -407,6 +423,10 @@ async def register_tool( import_batch_id=import_batch_id, federation_source=federation_source, version=1, + # Team scoping fields + team_id=team_id, + owner_email=owner_email or created_by, + visibility=visibility, ) db.add(db_tool) db.commit() @@ -510,6 +530,75 @@ async def list_server_tools(self, db: Session, server_id: str, include_inactive: tools = db.execute(query).scalars().all() return [self._convert_tool_to_read(t) for t in tools] + async def list_tools_for_user( + self, db: Session, user_email: str, team_id: Optional[str] = None, visibility: Optional[str] = None, include_inactive: bool = False, skip: int = 0, limit: int = 100 + ) -> List[ToolRead]: + """ + List tools user has access to with team filtering. + + Args: + db: Database session + user_email: Email of the user requesting tools + team_id: Optional team ID to filter by specific team + visibility: Optional visibility filter (private, team, public) + include_inactive: Whether to include inactive tools + skip: Number of tools to skip for pagination + limit: Maximum number of tools to return + + Returns: + List[ToolRead]: Tools the user has access to + """ + + # Build query following existing patterns from list_tools() + query = select(DbTool) + + # Apply active/inactive filter + if not include_inactive: + query = query.where(DbTool.enabled.is_(True)) + + if team_id: + # Filter by specific team + query = query.where(DbTool.team_id == team_id) + + # Validate user has access to team + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + if team_id not in team_ids: + return [] # No access to team + else: + # Get user's accessible teams + team_service = TeamManagementService(db) + user_teams = await team_service.get_user_teams(user_email) + team_ids = [team.id for team in user_teams] + + # Build access conditions following existing patterns + + access_conditions = [] + + # 1. User's personal resources (owner_email matches) + access_conditions.append(DbTool.owner_email == user_email) + + # 2. Team resources where user is member + if team_ids: + access_conditions.append(and_(DbTool.team_id.in_(team_ids), DbTool.visibility.in_(["team", "public"]))) + + # 3. Public resources (if visibility allows) + access_conditions.append(DbTool.visibility == "public") + + query = query.where(or_(*access_conditions)) + + # Apply visibility filter if specified + if visibility: + query = query.where(DbTool.visibility == visibility) + + # Apply pagination following existing patterns + query = query.offset(skip).limit(limit) + + tools = db.execute(query).scalars().all() + return [self._convert_tool_to_read(t) for t in tools] + async def get_tool(self, db: Session, tool_id: str) -> ToolRead: """ Retrieve a tool by its ID. diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index 3f5b13d47..184133292 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -568,6 +568,8 @@ function closeModal(modalId, clearId = null) { cleanupGatewayTestModal(); } else if (modalId === "tool-test-modal") { cleanupToolTestModal(); // ADD THIS LINE + } else if (modalId === "prompt-test-modal") { + cleanupPromptTestModal(); } modal.classList.add("hidden"); @@ -3379,27 +3381,87 @@ async function viewServer(serverId) { if (serverDetailsDiv) { const container = document.createElement("div"); container.className = - "space-y-2 dark:bg-gray-900 dark:text-gray-100"; + "space-y-4 dark:bg-gray-900 dark:text-gray-100"; + + // Header section with server name and icon + const headerDiv = document.createElement("div"); + headerDiv.className = + "flex items-center space-x-3 pb-4 border-b border-gray-200 dark:border-gray-600"; + + if (server.icon) { + const iconImg = document.createElement("img"); + iconImg.src = server.icon; + iconImg.alt = `${server.name} icon`; + iconImg.className = "w-12 h-12 rounded-lg object-cover"; + iconImg.onerror = function () { + this.style.display = "none"; + }; + headerDiv.appendChild(iconImg); + } + + const headerTextDiv = document.createElement("div"); + const serverTitle = document.createElement("h2"); + serverTitle.className = + "text-xl font-bold text-gray-900 dark:text-gray-100"; + serverTitle.textContent = server.name; + headerTextDiv.appendChild(serverTitle); + + if (server.description) { + const serverDesc = document.createElement("p"); + serverDesc.className = + "text-sm text-gray-600 dark:text-gray-400 mt-1"; + serverDesc.textContent = server.description; + headerTextDiv.appendChild(serverDesc); + } + + headerDiv.appendChild(headerTextDiv); + container.appendChild(headerDiv); + + // Basic information section + const basicInfoDiv = document.createElement("div"); + basicInfoDiv.className = "space-y-2"; + + const basicInfoTitle = document.createElement("strong"); + basicInfoTitle.textContent = "Basic Information:"; + basicInfoTitle.className = + "block text-gray-900 dark:text-gray-100 mb-3"; + basicInfoDiv.appendChild(basicInfoTitle); const fields = [ - { label: "Name", value: server.name }, - { label: "URL", value: server.url }, - { label: "Description", value: server.description || "N/A" }, + { label: "Server ID", value: server.id }, + { label: "URL", value: server.url || "N/A" }, + { label: "Type", value: "Virtual Server" }, ]; fields.forEach((field) => { const p = document.createElement("p"); + p.className = "text-sm"; const strong = document.createElement("strong"); strong.textContent = field.label + ": "; + strong.className = + "font-medium text-gray-700 dark:text-gray-300"; p.appendChild(strong); - p.appendChild(document.createTextNode(field.value)); - container.appendChild(p); + const valueSpan = document.createElement("span"); + valueSpan.textContent = field.value; + valueSpan.className = "text-gray-600 dark:text-gray-400"; + p.appendChild(valueSpan); + basicInfoDiv.appendChild(p); }); + container.appendChild(basicInfoDiv); + + // Tags and Status section + const tagsStatusDiv = document.createElement("div"); + tagsStatusDiv.className = + "flex items-center justify-between space-y-2"; + // Tags section const tagsP = document.createElement("p"); + tagsP.className = "text-sm"; const tagsStrong = document.createElement("strong"); tagsStrong.textContent = "Tags: "; + tagsStrong.className = + "font-medium text-gray-700 dark:text-gray-300"; tagsP.appendChild(tagsStrong); if (server.tags && server.tags.length > 0) { @@ -3411,25 +3473,229 @@ async function viewServer(serverId) { tagsP.appendChild(tagSpan); }); } else { - tagsP.appendChild(document.createTextNode("None")); + const noneSpan = document.createElement("span"); + noneSpan.textContent = "None"; + noneSpan.className = "text-gray-500 dark:text-gray-400"; + tagsP.appendChild(noneSpan); } - container.appendChild(tagsP); - // Status + // Status section const statusP = document.createElement("p"); + statusP.className = "text-sm"; const statusStrong = document.createElement("strong"); statusStrong.textContent = "Status: "; + statusStrong.className = + "font-medium text-gray-700 dark:text-gray-300"; statusP.appendChild(statusStrong); const statusSpan = document.createElement("span"); statusSpan.className = `px-2 inline-flex text-xs leading-5 font-semibold rounded-full ${ server.isActive - ? "bg-green-100 text-green-800" - : "bg-red-100 text-red-800" + ? "bg-green-100 text-green-800 dark:bg-green-900 dark:text-green-300" + : "bg-red-100 text-red-800 dark:bg-red-900 dark:text-red-300" }`; statusSpan.textContent = server.isActive ? "Active" : "Inactive"; statusP.appendChild(statusSpan); - container.appendChild(statusP); + + tagsStatusDiv.appendChild(tagsP); + tagsStatusDiv.appendChild(statusP); + container.appendChild(tagsStatusDiv); + + // Associated Tools, Resources, and Prompts section + const associatedDiv = document.createElement("div"); + associatedDiv.className = "mt-6 border-t pt-4"; + + const associatedTitle = document.createElement("strong"); + associatedTitle.textContent = "Associated Items:"; + associatedDiv.appendChild(associatedTitle); + + // Tools section + if (server.associatedTools && server.associatedTools.length > 0) { + const toolsSection = document.createElement("div"); + toolsSection.className = "mt-3"; + + const toolsLabel = document.createElement("p"); + const toolsStrong = document.createElement("strong"); + toolsStrong.textContent = "Tools: "; + toolsLabel.appendChild(toolsStrong); + + const toolsList = document.createElement("div"); + toolsList.className = "mt-1 space-y-1"; + + server.associatedTools.forEach((toolId) => { + const toolItem = document.createElement("div"); + toolItem.className = "flex items-center space-x-2"; + + const toolBadge = document.createElement("span"); + toolBadge.className = + "inline-block bg-green-100 text-green-800 text-xs px-2 py-1 rounded-full dark:bg-green-900 dark:text-green-200"; + toolBadge.textContent = + window.toolMapping && window.toolMapping[toolId] + ? window.toolMapping[toolId] + : toolId; + + const toolIdSpan = document.createElement("span"); + toolIdSpan.className = + "text-xs text-gray-500 dark:text-gray-400"; + toolIdSpan.textContent = `(${toolId})`; + + toolItem.appendChild(toolBadge); + toolItem.appendChild(toolIdSpan); + toolsList.appendChild(toolItem); + }); + + toolsLabel.appendChild(toolsList); + toolsSection.appendChild(toolsLabel); + associatedDiv.appendChild(toolsSection); + } + + // Resources section + if ( + server.associatedResources && + server.associatedResources.length > 0 + ) { + const resourcesSection = document.createElement("div"); + resourcesSection.className = "mt-3"; + + const resourcesLabel = document.createElement("p"); + const resourcesStrong = document.createElement("strong"); + resourcesStrong.textContent = "Resources: "; + resourcesLabel.appendChild(resourcesStrong); + + const resourcesList = document.createElement("div"); + resourcesList.className = "mt-1 space-y-1"; + + server.associatedResources.forEach((resourceId) => { + const resourceItem = document.createElement("div"); + resourceItem.className = "flex items-center space-x-2"; + + const resourceBadge = document.createElement("span"); + resourceBadge.className = + "inline-block bg-blue-100 text-blue-800 text-xs px-2 py-1 rounded-full dark:bg-blue-900 dark:text-blue-200"; + resourceBadge.textContent = + window.resourceMapping && + window.resourceMapping[resourceId] + ? window.resourceMapping[resourceId] + : `Resource ${resourceId}`; + + const resourceIdSpan = document.createElement("span"); + resourceIdSpan.className = + "text-xs text-gray-500 dark:text-gray-400"; + resourceIdSpan.textContent = `(${resourceId})`; + + resourceItem.appendChild(resourceBadge); + resourceItem.appendChild(resourceIdSpan); + resourcesList.appendChild(resourceItem); + }); + + resourcesLabel.appendChild(resourcesList); + resourcesSection.appendChild(resourcesLabel); + associatedDiv.appendChild(resourcesSection); + } + + // Prompts section + if ( + server.associatedPrompts && + server.associatedPrompts.length > 0 + ) { + const promptsSection = document.createElement("div"); + promptsSection.className = "mt-3"; + + const promptsLabel = document.createElement("p"); + const promptsStrong = document.createElement("strong"); + promptsStrong.textContent = "Prompts: "; + promptsLabel.appendChild(promptsStrong); + + const promptsList = document.createElement("div"); + promptsList.className = "mt-1 space-y-1"; + + server.associatedPrompts.forEach((promptId) => { + const promptItem = document.createElement("div"); + promptItem.className = "flex items-center space-x-2"; + + const promptBadge = document.createElement("span"); + promptBadge.className = + "inline-block bg-purple-100 text-purple-800 text-xs px-2 py-1 rounded-full dark:bg-purple-900 dark:text-purple-200"; + promptBadge.textContent = + window.promptMapping && window.promptMapping[promptId] + ? window.promptMapping[promptId] + : `Prompt ${promptId}`; + + const promptIdSpan = document.createElement("span"); + promptIdSpan.className = + "text-xs text-gray-500 dark:text-gray-400"; + promptIdSpan.textContent = `(${promptId})`; + + promptItem.appendChild(promptBadge); + promptItem.appendChild(promptIdSpan); + promptsList.appendChild(promptItem); + }); + + promptsLabel.appendChild(promptsList); + promptsSection.appendChild(promptsLabel); + associatedDiv.appendChild(promptsSection); + } + + // A2A Agents section + if ( + server.associatedA2aAgents && + server.associatedA2aAgents.length > 0 + ) { + const agentsSection = document.createElement("div"); + agentsSection.className = "mt-3"; + + const agentsLabel = document.createElement("p"); + const agentsStrong = document.createElement("strong"); + agentsStrong.textContent = "A2A Agents: "; + agentsLabel.appendChild(agentsStrong); + + const agentsList = document.createElement("div"); + agentsList.className = "mt-1 space-y-1"; + + server.associatedA2aAgents.forEach((agentId) => { + const agentItem = document.createElement("div"); + agentItem.className = "flex items-center space-x-2"; + + const agentBadge = document.createElement("span"); + agentBadge.className = + "inline-block bg-orange-100 text-orange-800 text-xs px-2 py-1 rounded-full dark:bg-orange-900 dark:text-orange-200"; + agentBadge.textContent = `Agent ${agentId}`; + + const agentIdSpan = document.createElement("span"); + agentIdSpan.className = + "text-xs text-gray-500 dark:text-gray-400"; + agentIdSpan.textContent = `(${agentId})`; + + agentItem.appendChild(agentBadge); + agentItem.appendChild(agentIdSpan); + agentsList.appendChild(agentItem); + }); + + agentsLabel.appendChild(agentsList); + agentsSection.appendChild(agentsLabel); + associatedDiv.appendChild(agentsSection); + } + + // Show message if no associated items + if ( + (!server.associatedTools || + server.associatedTools.length === 0) && + (!server.associatedResources || + server.associatedResources.length === 0) && + (!server.associatedPrompts || + server.associatedPrompts.length === 0) && + (!server.associatedA2aAgents || + server.associatedA2aAgents.length === 0) + ) { + const noItemsP = document.createElement("p"); + noItemsP.className = + "mt-2 text-sm text-gray-500 dark:text-gray-400"; + noItemsP.textContent = + "No tools, resources, prompts, or A2A agents are currently associated with this server."; + associatedDiv.appendChild(noItemsP); + } + + container.appendChild(associatedDiv); // Add metadata section const metadataDiv = document.createElement("div"); @@ -3445,31 +3711,45 @@ async function viewServer(serverId) { const metadataFields = [ { label: "Created By", - value: server.createdBy || "Legacy Entity", + value: server.created_by || "Legacy Entity", }, { label: "Created At", - value: server.createdAt - ? new Date(server.createdAt).toLocaleString() + value: server.created_at + ? new Date(server.created_at).toLocaleString() : "Pre-metadata", }, { - label: "Created From", - value: server.createdFromIp || "Unknown", + label: "Created From IP", + value: server.created_from_ip || "Unknown", + }, + { + label: "Created Via", + value: server.created_via || "Unknown", }, - { label: "Created Via", value: server.createdVia || "Unknown" }, { label: "Last Modified By", - value: server.modifiedBy || "N/A", + value: server.modified_by || "N/A", }, { label: "Last Modified At", - value: server.modifiedAt - ? new Date(server.modifiedAt).toLocaleString() + value: server.updated_at + ? new Date(server.updated_at).toLocaleString() : "N/A", }, + { + label: "Modified From IP", + value: server.modified_from_ip || "N/A", + }, + { + label: "Modified Via", + value: server.modified_via || "N/A", + }, { label: "Version", value: server.version || "1" }, - { label: "Import Batch", value: server.importBatchId || "N/A" }, + { + label: "Import Batch", + value: server.import_batch_id || "N/A", + }, ]; metadataFields.forEach((field) => { @@ -3570,7 +3850,135 @@ async function editServer(serverId) { tagsField.value = server.tags ? server.tags.join(", ") : ""; } + // Set icon field + const iconField = safeGetElement("edit-server-icon"); + if (iconField) { + iconField.value = server.icon || ""; + } + + // Store server data for modal population + window.currentEditingServer = server; + openModal("server-edit-modal"); + + // Use multiple approaches to ensure checkboxes get set + setEditServerAssociations(server); + setTimeout(() => setEditServerAssociations(server), 100); + setTimeout(() => setEditServerAssociations(server), 300); + + // Set associated items after modal is opened + setTimeout(() => { + console.log("Setting associated items for server:", server.id); + console.log("Associated tools:", server.associatedTools); + console.log("Associated resources:", server.associatedResources); + console.log("Associated prompts:", server.associatedPrompts); + + // Set associated tools checkboxes + const toolCheckboxes = document.querySelectorAll( + 'input[name="associatedTools"]', + ); + console.log("Found", toolCheckboxes.length, "tool checkboxes"); + + toolCheckboxes.forEach((checkbox) => { + const isChecked = + server.associatedTools && + server.associatedTools.includes(checkbox.value); + console.log( + `Tool ${checkbox.value}: ${isChecked ? "CHECKED" : "unchecked"}`, + ); + checkbox.checked = isChecked; + }); + + // Set associated resources checkboxes + const resourceCheckboxes = document.querySelectorAll( + 'input[name="associatedResources"]', + ); + console.log( + "Found", + resourceCheckboxes.length, + "resource checkboxes", + ); + + resourceCheckboxes.forEach((checkbox) => { + const checkboxValue = parseInt(checkbox.value); + const isChecked = + server.associatedResources && + server.associatedResources.includes(checkboxValue); + console.log( + `Resource ${checkboxValue}: ${isChecked ? "CHECKED" : "unchecked"}`, + ); + checkbox.checked = isChecked; + }); + + // Set associated prompts checkboxes + const promptCheckboxes = document.querySelectorAll( + 'input[name="associatedPrompts"]', + ); + console.log("Found", promptCheckboxes.length, "prompt checkboxes"); + + promptCheckboxes.forEach((checkbox) => { + const checkboxValue = parseInt(checkbox.value); + const isChecked = + server.associatedPrompts && + server.associatedPrompts.includes(checkboxValue); + console.log( + `Prompt ${checkboxValue}: ${isChecked ? "CHECKED" : "unchecked"}`, + ); + checkbox.checked = isChecked; + }); + + // Manually trigger the selector update functions to refresh pills + setTimeout(() => { + // Find and trigger existing tool selector update + const toolContainer = + document.getElementById("edit-server-tools"); + if (toolContainer) { + const firstToolCheckbox = toolContainer.querySelector( + 'input[type="checkbox"]', + ); + if (firstToolCheckbox) { + const changeEvent = new Event("change", { + bubbles: true, + }); + firstToolCheckbox.dispatchEvent(changeEvent); + } + } + + // Trigger resource selector update + const resourceContainer = document.getElementById( + "edit-server-resources", + ); + if (resourceContainer) { + const firstResourceCheckbox = + resourceContainer.querySelector( + 'input[type="checkbox"]', + ); + if (firstResourceCheckbox) { + const changeEvent = new Event("change", { + bubbles: true, + }); + firstResourceCheckbox.dispatchEvent(changeEvent); + } + } + + // Trigger prompt selector update + const promptContainer = document.getElementById( + "edit-server-prompts", + ); + if (promptContainer) { + const firstPromptCheckbox = promptContainer.querySelector( + 'input[type="checkbox"]', + ); + if (firstPromptCheckbox) { + const changeEvent = new Event("change", { + bubbles: true, + }); + firstPromptCheckbox.dispatchEvent(changeEvent); + } + } + }, 50); + }, 200); + console.log("✓ Server edit modal loaded successfully"); } catch (error) { console.error("Error fetching server for editing:", error); @@ -3579,6 +3987,78 @@ async function editServer(serverId) { } } +// Helper function to set edit server associations +function setEditServerAssociations(server) { + // Set associated tools checkboxes + const toolCheckboxes = document.querySelectorAll( + 'input[name="associatedTools"]', + ); + + if (toolCheckboxes.length === 0) { + return; + } + + toolCheckboxes.forEach((checkbox) => { + let isChecked = false; + if (server.associatedTools && window.toolMapping) { + // Get the tool name for this checkbox UUID + const toolName = window.toolMapping[checkbox.value]; + + // Check if this tool name is in the associated tools array + isChecked = toolName && server.associatedTools.includes(toolName); + } + + checkbox.checked = isChecked; + }); + + // Set associated resources checkboxes + const resourceCheckboxes = document.querySelectorAll( + 'input[name="associatedResources"]', + ); + + resourceCheckboxes.forEach((checkbox) => { + const checkboxValue = parseInt(checkbox.value); + const isChecked = + server.associatedResources && + server.associatedResources.includes(checkboxValue); + checkbox.checked = isChecked; + }); + + // Set associated prompts checkboxes + const promptCheckboxes = document.querySelectorAll( + 'input[name="associatedPrompts"]', + ); + + promptCheckboxes.forEach((checkbox) => { + const checkboxValue = parseInt(checkbox.value); + const isChecked = + server.associatedPrompts && + server.associatedPrompts.includes(checkboxValue); + checkbox.checked = isChecked; + }); + + // Force update the pill displays by triggering change events + setTimeout(() => { + const allCheckboxes = [ + ...document.querySelectorAll( + '#edit-server-tools input[type="checkbox"]', + ), + ...document.querySelectorAll( + '#edit-server-resources input[type="checkbox"]', + ), + ...document.querySelectorAll( + '#edit-server-prompts input[type="checkbox"]', + ), + ]; + + allCheckboxes.forEach((checkbox) => { + if (checkbox.checked) { + checkbox.dispatchEvent(new Event("change", { bubbles: true })); + } + }); + }, 50); +} + // =================================================================== // ENHANCED TAB HANDLING with Better Error Management // =================================================================== @@ -3651,12 +4131,51 @@ function showTab(tabName) { } } + if (tabName === "teams") { + // Load Teams list if not already loaded + const teamsList = safeGetElement("teams-list"); + if (teamsList) { + // Check if it's still showing the loading message or is empty + const hasLoadingMessage = + teamsList.innerHTML.includes("Loading teams..."); + const isEmpty = teamsList.innerHTML.trim() === ""; + if (hasLoadingMessage || isEmpty) { + // Trigger HTMX load manually if HTMX is available + if (window.htmx && window.htmx.trigger) { + window.htmx.trigger(teamsList, "load"); + } + } + } + } + + if (tabName === "tokens") { + // Load Tokens list and set up form handling + const tokensList = safeGetElement("tokens-list"); + if (tokensList) { + const hasLoadingMessage = + tokensList.innerHTML.includes("Loading tokens..."); + const isEmpty = tokensList.innerHTML.trim() === ""; + if (hasLoadingMessage || isEmpty) { + loadTokensList(); + } + } + + // Set up create token form if not already set up + const createForm = safeGetElement("create-token-form"); + if (createForm && !createForm.hasAttribute("data-setup")) { + setupCreateTokenForm(); + createForm.setAttribute("data-setup", "true"); + } + } + if (tabName === "a2a-agents") { // Load A2A agents list if not already loaded const agentsList = safeGetElement("a2a-agents-list"); if (agentsList && agentsList.innerHTML.trim() === "") { - // Trigger HTMX load manually - window.htmx.trigger(agentsList, "load"); + // Trigger HTMX load manually if HTMX is available + if (window.htmx && window.htmx.trigger) { + window.htmx.trigger(agentsList, "load"); + } } } @@ -3718,6 +4237,30 @@ function showTab(tabName) { } } } + + if (tabName === "permissions") { + // Initialize permissions panel when tab is shown + if (!panel.classList.contains("hidden")) { + console.log("🔄 Initializing permissions tab content"); + try { + // Check if initializePermissionsPanel function exists + if ( + typeof initializePermissionsPanel === "function" + ) { + initializePermissionsPanel(); + } else { + console.warn( + "initializePermissionsPanel function not found", + ); + } + } catch (error) { + console.error( + "Error initializing permissions panel:", + error, + ); + } + } + } } catch (error) { console.error( `Error in tab ${tabName} content loading:`, @@ -4233,58 +4776,196 @@ function initToolSelect( checkboxes.forEach((cb) => cb.addEventListener("change", update)); } -// =================================================================== -// INACTIVE ITEMS HANDLING -// =================================================================== +function initResourceSelect( + selectId, + pillsId, + warnId, + max = 10, + selectBtnId = null, + clearBtnId = null, +) { + const container = document.getElementById(selectId); + const pillsBox = document.getElementById(pillsId); + const warnBox = document.getElementById(warnId); + const clearBtn = clearBtnId ? document.getElementById(clearBtnId) : null; + const selectBtn = selectBtnId ? document.getElementById(selectBtnId) : null; -function toggleInactiveItems(type) { - const checkbox = safeGetElement(`show-inactive-${type}`); - if (!checkbox) { + if (!container || !pillsBox || !warnBox) { + console.warn( + `Resource select elements not found: ${selectId}, ${pillsId}, ${warnId}`, + ); return; } - const url = new URL(window.location); - if (checkbox.checked) { - url.searchParams.set("include_inactive", "true"); - } else { - url.searchParams.delete("include_inactive"); - } - window.location = url; -} + const checkboxes = container.querySelectorAll('input[type="checkbox"]'); + const pillClasses = + "inline-block px-3 py-1 text-xs font-semibold text-blue-700 bg-blue-100 rounded-full shadow dark:text-blue-300 dark:bg-blue-900"; -function handleToggleSubmit(event, type) { - event.preventDefault(); + function update() { + try { + const checked = Array.from(checkboxes).filter((cb) => cb.checked); + const count = checked.length; - const isInactiveCheckedBool = isInactiveChecked(type); - const form = event.target; - const hiddenField = document.createElement("input"); - hiddenField.type = "hidden"; - hiddenField.name = "is_inactive_checked"; - hiddenField.value = isInactiveCheckedBool; + // Rebuild pills safely + pillsBox.innerHTML = ""; + checked.forEach((cb) => { + const span = document.createElement("span"); + span.className = pillClasses; + span.textContent = + cb.nextElementSibling?.textContent?.trim() || "Unnamed"; + pillsBox.appendChild(span); + }); - form.appendChild(hiddenField); - form.submit(); -} + // Warning when > max + if (count > max) { + warnBox.textContent = `Selected ${count} resources. Selecting more than ${max} resources can degrade agent performance with the server.`; + } else { + warnBox.textContent = ""; + } + } catch (error) { + console.error("Error updating resource select:", error); + } + } -function handleSubmitWithConfirmation(event, type) { - event.preventDefault(); + if (clearBtn) { + clearBtn.addEventListener("click", () => { + checkboxes.forEach((cb) => (cb.checked = false)); + update(); + }); + } - const confirmationMessage = `Are you sure you want to permanently delete this ${type}? (Deactivation is reversible, deletion is permanent)`; - const confirmation = confirm(confirmationMessage); - if (!confirmation) { - return false; + if (selectBtn) { + selectBtn.addEventListener("click", () => { + checkboxes.forEach((cb) => (cb.checked = true)); + update(); + }); } - return handleToggleSubmit(event, type); + update(); // Initial render + checkboxes.forEach((cb) => cb.addEventListener("change", update)); } -// =================================================================== -// ENHANCED TOOL TESTING with Safe State Management -// =================================================================== +function initPromptSelect( + selectId, + pillsId, + warnId, + max = 8, + selectBtnId = null, + clearBtnId = null, +) { + const container = document.getElementById(selectId); + const pillsBox = document.getElementById(pillsId); + const warnBox = document.getElementById(warnId); + const clearBtn = clearBtnId ? document.getElementById(clearBtnId) : null; + const selectBtn = selectBtnId ? document.getElementById(selectBtnId) : null; -// Track active tool test requests globally -const toolTestState = { - activeRequests: new Map(), // toolId -> AbortController + if (!container || !pillsBox || !warnBox) { + console.warn( + `Prompt select elements not found: ${selectId}, ${pillsId}, ${warnId}`, + ); + return; + } + + const checkboxes = container.querySelectorAll('input[type="checkbox"]'); + const pillClasses = + "inline-block px-3 py-1 text-xs font-semibold text-purple-700 bg-purple-100 rounded-full shadow dark:text-purple-300 dark:bg-purple-900"; + + function update() { + try { + const checked = Array.from(checkboxes).filter((cb) => cb.checked); + const count = checked.length; + + // Rebuild pills safely + pillsBox.innerHTML = ""; + checked.forEach((cb) => { + const span = document.createElement("span"); + span.className = pillClasses; + span.textContent = + cb.nextElementSibling?.textContent?.trim() || "Unnamed"; + pillsBox.appendChild(span); + }); + + // Warning when > max + if (count > max) { + warnBox.textContent = `Selected ${count} prompts. Selecting more than ${max} prompts can degrade agent performance with the server.`; + } else { + warnBox.textContent = ""; + } + } catch (error) { + console.error("Error updating prompt select:", error); + } + } + + if (clearBtn) { + clearBtn.addEventListener("click", () => { + checkboxes.forEach((cb) => (cb.checked = false)); + update(); + }); + } + + if (selectBtn) { + selectBtn.addEventListener("click", () => { + checkboxes.forEach((cb) => (cb.checked = true)); + update(); + }); + } + + update(); // Initial render + checkboxes.forEach((cb) => cb.addEventListener("change", update)); +} + +// =================================================================== +// INACTIVE ITEMS HANDLING +// =================================================================== + +function toggleInactiveItems(type) { + const checkbox = safeGetElement(`show-inactive-${type}`); + if (!checkbox) { + return; + } + + const url = new URL(window.location); + if (checkbox.checked) { + url.searchParams.set("include_inactive", "true"); + } else { + url.searchParams.delete("include_inactive"); + } + window.location = url; +} + +function handleToggleSubmit(event, type) { + event.preventDefault(); + + const isInactiveCheckedBool = isInactiveChecked(type); + const form = event.target; + const hiddenField = document.createElement("input"); + hiddenField.type = "hidden"; + hiddenField.name = "is_inactive_checked"; + hiddenField.value = isInactiveCheckedBool; + + form.appendChild(hiddenField); + form.submit(); +} + +function handleSubmitWithConfirmation(event, type) { + event.preventDefault(); + + const confirmationMessage = `Are you sure you want to permanently delete this ${type}? (Deactivation is reversible, deletion is permanent)`; + const confirmation = confirm(confirmationMessage); + if (!confirmation) { + return false; + } + + return handleToggleSubmit(event, type); +} + +// =================================================================== +// ENHANCED TOOL TESTING with Safe State Management +// =================================================================== + +// Track active tool test requests globally +const toolTestState = { + activeRequests: new Map(), // toolId -> AbortController lastRequestTime: new Map(), // toolId -> timestamp debounceDelay: 1000, // Increased from 500ms requestTimeout: 30000, // Increased from 10000ms @@ -4405,7 +5086,8 @@ async function testTool(toolId) { } if (descElement) { if (tool.description) { - descElement.innerHTML = tool.description.replace( + // Escape HTML and then replace newlines with
tags + descElement.innerHTML = escapeHtml(tool.description).replace( /\n/g, "
", ); @@ -4804,6 +5486,10 @@ async function runToolTest() { "Content-Type": "application/json", }; + // Authentication will be handled automatically by the JWT cookie + // that was set when the admin UI loaded. The 'credentials: "include"' + // in the fetch request ensures the cookie is sent with the request. + const passthroughHeadersField = document.getElementById( "test-passthrough-headers", ); @@ -4982,131 +5668,507 @@ function cleanupToolTestModal() { } // =================================================================== -// ENHANCED GATEWAY TEST FUNCTIONALITY +// PROMPT TEST FUNCTIONALITY // =================================================================== -let gatewayTestHeadersEditor = null; -let gatewayTestBodyEditor = null; -let gatewayTestFormHandler = null; -let gatewayTestCloseHandler = null; +// State management for prompt testing +const promptTestState = { + lastRequestTime: new Map(), + activeRequests: new Set(), + currentTestPrompt: null, +}; -async function testGateway(gatewayURL) { +/** + * Test a prompt by opening the prompt test modal + */ +async function testPrompt(promptName) { try { - console.log("Opening gateway test modal for:", gatewayURL); + console.log(`Testing prompt: ${promptName}`); - // Validate URL - const urlValidation = validateUrl(gatewayURL); - if (!urlValidation.valid) { - showErrorMessage(`Invalid gateway URL: ${urlValidation.error}`); + // Debouncing to prevent rapid clicking + const now = Date.now(); + const lastRequest = + promptTestState.lastRequestTime.get(promptName) || 0; + const timeSinceLastRequest = now - lastRequest; + const debounceDelay = 1000; + + if (timeSinceLastRequest < debounceDelay) { + console.log(`Prompt ${promptName} test request debounced`); return; } - // Clean up any existing event listeners first - cleanupGatewayTestModal(); - - // Open the modal - openModal("gateway-test-modal"); + // Check if modal is already active + if (AppState.isModalActive("prompt-test-modal")) { + console.warn("Prompt test modal is already active"); + return; + } - // Initialize CodeMirror editors if they don't exist - if (!gatewayTestHeadersEditor) { - const headersElement = safeGetElement("gateway-test-headers"); - if (headersElement && window.CodeMirror) { - gatewayTestHeadersEditor = window.CodeMirror.fromTextArea( - headersElement, - { - mode: "application/json", - lineNumbers: true, - lineWrapping: true, - }, + // Update button state + const testButton = document.querySelector( + `[onclick*="testPrompt('${promptName}')"]`, + ); + if (testButton) { + if (testButton.disabled) { + console.log( + "Test button already disabled, request in progress", ); - gatewayTestHeadersEditor.setSize(null, 100); - console.log("✓ Initialized gateway test headers editor"); + return; } + testButton.disabled = true; + testButton.textContent = "Loading..."; + testButton.classList.add("opacity-50", "cursor-not-allowed"); } - if (!gatewayTestBodyEditor) { - const bodyElement = safeGetElement("gateway-test-body"); - if (bodyElement && window.CodeMirror) { - gatewayTestBodyEditor = window.CodeMirror.fromTextArea( - bodyElement, - { - mode: "application/json", - lineNumbers: true, - lineWrapping: true, + // Record request time and mark as active + promptTestState.lastRequestTime.set(promptName, now); + promptTestState.activeRequests.add(promptName); + + // Fetch prompt details + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), 30000); + + try { + // Fetch prompt details from the prompts endpoint (view mode) + const response = await fetch( + `/admin/prompts/${encodeURIComponent(promptName)}`, + { + method: "GET", + headers: { + Accept: "application/json", }, + credentials: "include", + signal: controller.signal, + }, + ); + + clearTimeout(timeoutId); + + if (!response.ok) { + throw new Error( + `Failed to fetch prompt details: ${response.status} ${response.statusText}`, ); - gatewayTestBodyEditor.setSize(null, 100); - console.log("✓ Initialized gateway test body editor"); } - } - // Set form action and URL - const form = safeGetElement("gateway-test-form"); - const urlInput = safeGetElement("gateway-test-url"); + const prompt = await response.json(); + promptTestState.currentTestPrompt = prompt; - if (form) { - form.action = `${window.ROOT_PATH}/admin/gateways/test`; - } - if (urlInput) { - urlInput.value = urlValidation.value; - } + // Set modal title and description + const titleElement = safeGetElement("prompt-test-modal-title"); + const descElement = safeGetElement("prompt-test-modal-description"); - // Set up form submission handler - if (form) { - gatewayTestFormHandler = async (e) => { - await handleGatewayTestSubmit(e); - }; - form.addEventListener("submit", gatewayTestFormHandler); - } + if (titleElement) { + titleElement.textContent = `Test Prompt: ${prompt.name || promptName}`; + } + if (descElement) { + if (prompt.description) { + // Escape HTML and then replace newlines with
tags + descElement.innerHTML = escapeHtml( + prompt.description, + ).replace(/\n/g, "
"); + } else { + descElement.textContent = "No description available."; + } + } - // Set up close button handler - const closeButton = safeGetElement("gateway-test-close"); - if (closeButton) { - gatewayTestCloseHandler = () => { - handleGatewayTestClose(); - }; - closeButton.addEventListener("click", gatewayTestCloseHandler); + // Build form fields based on prompt arguments + buildPromptTestForm(prompt); + + // Open the modal + openModal("prompt-test-modal"); + } catch (error) { + clearTimeout(timeoutId); + + if (error.name === "AbortError") { + console.warn("Request was cancelled (timeout or user action)"); + showErrorMessage("Request timed out. Please try again."); + } else { + console.error("Error fetching prompt details:", error); + const errorMessage = + error.message || "Failed to load prompt details"; + showErrorMessage(`Error testing prompt: ${errorMessage}`); + } } } catch (error) { - console.error("Error setting up gateway test modal:", error); - showErrorMessage("Failed to open gateway test modal"); + console.error("Error in testPrompt:", error); + showErrorMessage(`Error testing prompt: ${error.message}`); + } finally { + // Always restore button state + const testButton = document.querySelector( + `[onclick*="testPrompt('${promptName}')"]`, + ); + if (testButton) { + testButton.disabled = false; + testButton.textContent = "Test"; + testButton.classList.remove("opacity-50", "cursor-not-allowed"); + } + + // Clean up state + promptTestState.activeRequests.delete(promptName); } } -async function handleGatewayTestSubmit(e) { - e.preventDefault(); +/** + * Build the form fields for prompt testing based on prompt arguments + */ +function buildPromptTestForm(prompt) { + const fieldsContainer = safeGetElement("prompt-test-form-fields"); + if (!fieldsContainer) { + console.error("Prompt test form fields container not found"); + return; + } - const loading = safeGetElement("gateway-test-loading"); - const responseDiv = safeGetElement("gateway-test-response-json"); - const resultDiv = safeGetElement("gateway-test-result"); - const testButton = safeGetElement("gateway-test-submit"); + // Clear existing fields + fieldsContainer.innerHTML = ""; - try { - // Show loading - if (loading) { - loading.classList.remove("hidden"); - } - if (resultDiv) { - resultDiv.classList.add("hidden"); - } - if (testButton) { - testButton.disabled = true; - testButton.textContent = "Testing..."; - } + if (!prompt.arguments || prompt.arguments.length === 0) { + fieldsContainer.innerHTML = ` +
+ This prompt has no arguments - it will render as-is. +
+ `; + return; + } - const form = e.target; - const url = form.action; + // Create fields for each prompt argument + prompt.arguments.forEach((arg, index) => { + const fieldDiv = document.createElement("div"); + fieldDiv.className = "space-y-2"; - // Get form data with validation - const formData = new FormData(form); - const baseUrl = formData.get("url"); - const method = formData.get("method"); - const path = formData.get("path"); + const label = document.createElement("label"); + label.className = + "block text-sm font-medium text-gray-700 dark:text-gray-300"; + label.textContent = `${arg.name}${arg.required ? " *" : ""}`; - // Validate URL - const urlValidation = validateUrl(baseUrl); - if (!urlValidation.valid) { - throw new Error(`Invalid URL: ${urlValidation.error}`); + const input = document.createElement("input"); + input.type = "text"; + input.id = `prompt-arg-${index}`; + input.name = `arg-${arg.name}`; + input.className = + "mt-1 block w-full rounded-md border-gray-300 shadow-sm focus:border-indigo-500 focus:ring-indigo-500 dark:bg-gray-700 dark:border-gray-600 dark:text-gray-300"; + + if (arg.description) { + input.placeholder = arg.description; + } + + if (arg.required) { + input.required = true; + } + + fieldDiv.appendChild(label); + if (arg.description) { + const description = document.createElement("div"); + description.className = "text-xs text-gray-500 dark:text-gray-400"; + description.textContent = arg.description; + fieldDiv.appendChild(description); + } + fieldDiv.appendChild(input); + + fieldsContainer.appendChild(fieldDiv); + }); +} + +/** + * Run the prompt test by calling the API with the provided arguments + */ +async function runPromptTest() { + const form = safeGetElement("prompt-test-form"); + const loadingElement = safeGetElement("prompt-test-loading"); + const resultContainer = safeGetElement("prompt-test-result"); + const runButton = document.querySelector( + 'button[onclick="runPromptTest()"]', + ); + + if (!form || !promptTestState.currentTestPrompt) { + console.error("Prompt test form or current prompt not found"); + showErrorMessage("Prompt test form not available"); + return; + } + + // Prevent multiple concurrent test runs + if (runButton && runButton.disabled) { + console.log("Prompt test already running"); + return; + } + + try { + // Disable button and show loading + if (runButton) { + runButton.disabled = true; + runButton.textContent = "Rendering..."; + } + if (loadingElement) { + loadingElement.classList.remove("hidden"); + } + if (resultContainer) { + resultContainer.innerHTML = ` +
+ Rendering prompt... +
+ `; + } + + // Collect form data (prompt arguments) + const formData = new FormData(form); + const args = {}; + + // Parse the form data into arguments object + for (const [key, value] of formData.entries()) { + if (key.startsWith("arg-")) { + const argName = key.substring(4); // Remove 'arg-' prefix + args[argName] = value; + } + } + + // Call the prompt API endpoint + const response = await fetch( + `/prompts/${encodeURIComponent(promptTestState.currentTestPrompt.name)}`, + { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + credentials: "include", + body: JSON.stringify(args), + }, + ); + + if (!response.ok) { + let errorMessage; + try { + const errorData = await response.json(); + errorMessage = + errorData.message || + `HTTP ${response.status}: ${response.statusText}`; + + // Show more detailed error information + if (errorData.details) { + errorMessage += `\nDetails: ${errorData.details}`; + } + } catch { + errorMessage = `HTTP ${response.status}: ${response.statusText}`; + } + throw new Error(errorMessage); + } + + const result = await response.json(); + + // Display the result + if (resultContainer) { + let resultHtml = ""; + + if (result.messages && Array.isArray(result.messages)) { + result.messages.forEach((message, index) => { + resultHtml += ` +
+
+ Message ${index + 1} (${message.role || "unknown"}) +
+
${escapeHtml(message.content?.text || JSON.stringify(message.content) || "")}
+
+ `; + }); + } else { + resultHtml = ` +
${escapeHtml(JSON.stringify(result, null, 2))}
+ `; + } + + resultContainer.innerHTML = resultHtml; + } + + console.log("Prompt rendered successfully"); + } catch (error) { + console.error("Error rendering prompt:", error); + + if (resultContainer) { + resultContainer.innerHTML = ` +
+ Error: ${escapeHtml(error.message)} +
+ `; + } + + showErrorMessage(`Failed to render prompt: ${error.message}`); + } finally { + // Hide loading and restore button + if (loadingElement) { + loadingElement.classList.add("hidden"); + } + if (runButton) { + runButton.disabled = false; + runButton.textContent = "Render Prompt"; + } + } +} + +/** + * Clean up prompt test modal state + */ +function cleanupPromptTestModal() { + try { + // Clear current test prompt + promptTestState.currentTestPrompt = null; + + // Reset form + const form = safeGetElement("prompt-test-form"); + if (form) { + form.reset(); + } + + // Clear form fields + const fieldsContainer = safeGetElement("prompt-test-form-fields"); + if (fieldsContainer) { + fieldsContainer.innerHTML = ""; + } + + // Clear result container + const resultContainer = safeGetElement("prompt-test-result"); + if (resultContainer) { + resultContainer.innerHTML = ` +
+ Click "Render Prompt" to see the rendered output +
+ `; + } + + // Hide loading + const loadingElement = safeGetElement("prompt-test-loading"); + if (loadingElement) { + loadingElement.classList.add("hidden"); + } + + console.log("✓ Prompt test modal cleaned up"); + } catch (error) { + console.error("Error cleaning up prompt test modal:", error); + } +} + +// =================================================================== +// ENHANCED GATEWAY TEST FUNCTIONALITY +// =================================================================== + +let gatewayTestHeadersEditor = null; +let gatewayTestBodyEditor = null; +let gatewayTestFormHandler = null; +let gatewayTestCloseHandler = null; + +async function testGateway(gatewayURL) { + try { + console.log("Opening gateway test modal for:", gatewayURL); + + // Validate URL + const urlValidation = validateUrl(gatewayURL); + if (!urlValidation.valid) { + showErrorMessage(`Invalid gateway URL: ${urlValidation.error}`); + return; + } + + // Clean up any existing event listeners first + cleanupGatewayTestModal(); + + // Open the modal + openModal("gateway-test-modal"); + + // Initialize CodeMirror editors if they don't exist + if (!gatewayTestHeadersEditor) { + const headersElement = safeGetElement("gateway-test-headers"); + if (headersElement && window.CodeMirror) { + gatewayTestHeadersEditor = window.CodeMirror.fromTextArea( + headersElement, + { + mode: "application/json", + lineNumbers: true, + lineWrapping: true, + }, + ); + gatewayTestHeadersEditor.setSize(null, 100); + console.log("✓ Initialized gateway test headers editor"); + } + } + + if (!gatewayTestBodyEditor) { + const bodyElement = safeGetElement("gateway-test-body"); + if (bodyElement && window.CodeMirror) { + gatewayTestBodyEditor = window.CodeMirror.fromTextArea( + bodyElement, + { + mode: "application/json", + lineNumbers: true, + lineWrapping: true, + }, + ); + gatewayTestBodyEditor.setSize(null, 100); + console.log("✓ Initialized gateway test body editor"); + } + } + + // Set form action and URL + const form = safeGetElement("gateway-test-form"); + const urlInput = safeGetElement("gateway-test-url"); + + if (form) { + form.action = `${window.ROOT_PATH}/admin/gateways/test`; + } + if (urlInput) { + urlInput.value = urlValidation.value; + } + + // Set up form submission handler + if (form) { + gatewayTestFormHandler = async (e) => { + await handleGatewayTestSubmit(e); + }; + form.addEventListener("submit", gatewayTestFormHandler); + } + + // Set up close button handler + const closeButton = safeGetElement("gateway-test-close"); + if (closeButton) { + gatewayTestCloseHandler = () => { + handleGatewayTestClose(); + }; + closeButton.addEventListener("click", gatewayTestCloseHandler); + } + } catch (error) { + console.error("Error setting up gateway test modal:", error); + showErrorMessage("Failed to open gateway test modal"); + } +} + +async function handleGatewayTestSubmit(e) { + e.preventDefault(); + + const loading = safeGetElement("gateway-test-loading"); + const responseDiv = safeGetElement("gateway-test-response-json"); + const resultDiv = safeGetElement("gateway-test-result"); + const testButton = safeGetElement("gateway-test-submit"); + + try { + // Show loading + if (loading) { + loading.classList.remove("hidden"); + } + if (resultDiv) { + resultDiv.classList.add("hidden"); + } + if (testButton) { + testButton.disabled = true; + testButton.textContent = "Testing..."; + } + + const form = e.target; + const url = form.action; + + // Get form data with validation + const formData = new FormData(form); + const baseUrl = formData.get("url"); + const method = formData.get("method"); + const path = formData.get("path"); + + // Validate URL + const urlValidation = validateUrl(baseUrl); + if (!urlValidation.valid) { + throw new Error(`Invalid URL: ${urlValidation.error}`); } // Get CodeMirror content safely @@ -5567,7 +6629,7 @@ async function viewTool(toolId) { tagsElement.innerHTML = tool.tags .map( (tag) => - `${tag}`, + `${escapeHtml(tag)}`, ) .join(""); } else { @@ -6854,6 +7916,26 @@ function initializeToolSelects() { "selectAllEditToolsBtn", "clearAllEditToolsBtn", ); + + // Initialize resource selector + initResourceSelect( + "edit-server-resources", + "selectedEditResourcesPills", + "selectedEditResourcesWarning", + 10, + "selectAllEditResourcesBtn", + "clearAllEditResourcesBtn", + ); + + // Initialize prompt selector + initPromptSelect( + "edit-server-prompts", + "selectedEditPromptsPills", + "selectedEditPromptsWarning", + 8, + "selectAllEditPromptsBtn", + "clearAllEditPromptsBtn", + ); } function initializeEventListeners() { @@ -6883,7 +7965,11 @@ function setupTabNavigation() { ]; tabs.forEach((tabName) => { - const tabElement = safeGetElement(`tab-${tabName}`); + // Suppress warnings for optional tabs that might not be enabled + const optionalTabs = ["roots", "logs", "export-import", "version-info"]; + const suppressWarning = optionalTabs.includes(tabName); + + const tabElement = safeGetElement(`tab-${tabName}`, suppressWarning); if (tabElement) { tabElement.addEventListener("click", () => showTab(tabName)); } @@ -7060,23 +8146,81 @@ function setupFormHandlers() { } }); } + + // Setup search functionality for selectors + setupSelectorSearch(); } -function handleAuthTypeChange() { - const authType = this.value; - const basicFields = safeGetElement("auth-basic-fields-gw"); - const bearerFields = safeGetElement("auth-bearer-fields-gw"); - const headersFields = safeGetElement("auth-headers-fields-gw"); - const oauthFields = safeGetElement("auth-oauth-fields-gw"); +/** + * Setup search functionality for multi-select dropdowns + */ +function setupSelectorSearch() { + // Tools search + const searchTools = safeGetElement("searchTools", true); + if (searchTools) { + searchTools.addEventListener("input", function () { + filterItems(this.value, ".tool-item", ["span"]); + }); + } - // Hide all auth sections first - if (basicFields) { - basicFields.style.display = "none"; + // Resources search + const searchResources = safeGetElement("searchResources", true); + if (searchResources) { + searchResources.addEventListener("input", function () { + filterItems(this.value, ".resource-item", ["span", ".text-xs"]); + }); } - if (bearerFields) { - bearerFields.style.display = "none"; + + // Prompts search + const searchPrompts = safeGetElement("searchPrompts", true); + if (searchPrompts) { + searchPrompts.addEventListener("input", function () { + filterItems(this.value, ".prompt-item", ["span", ".text-xs"]); + }); } - if (headersFields) { +} + +/** + * Generic function to filter items in multi-select dropdowns + */ +function filterItems(searchText, itemSelector, textSelectors) { + const items = document.querySelectorAll(itemSelector); + const search = searchText.toLowerCase(); + + items.forEach((item) => { + let textContent = ""; + + // Collect text from all specified selectors within the item + textSelectors.forEach((selector) => { + const elements = item.querySelectorAll(selector); + elements.forEach((el) => { + textContent += " " + el.textContent; + }); + }); + + if (textContent.toLowerCase().includes(search)) { + item.style.display = ""; + } else { + item.style.display = "none"; + } + }); +} + +function handleAuthTypeChange() { + const authType = this.value; + const basicFields = safeGetElement("auth-basic-fields-gw"); + const bearerFields = safeGetElement("auth-bearer-fields-gw"); + const headersFields = safeGetElement("auth-headers-fields-gw"); + const oauthFields = safeGetElement("auth-oauth-fields-gw"); + + // Hide all auth sections first + if (basicFields) { + basicFields.style.display = "none"; + } + if (bearerFields) { + bearerFields.style.display = "none"; + } + if (headersFields) { headersFields.style.display = "none"; } if (oauthFields) { @@ -7311,6 +8455,8 @@ window.editGateway = editGateway; window.viewServer = viewServer; window.editServer = editServer; window.runToolTest = runToolTest; +window.testPrompt = testPrompt; +window.runPromptTest = runPromptTest; window.closeModal = closeModal; window.testGateway = testGateway; @@ -8193,9 +9339,7 @@ function setupBulkImportModal() { const modal = safeGetElement(modalId, true); if (!openBtn || !modal) { - console.warn( - "Bulk Import modal wiring skipped (missing button or modal).", - ); + // Bulk import feature not available - skip silently return; } @@ -8492,7 +9636,7 @@ async function handleExportAll() { const response = await fetch(`/admin/export/configuration?${params}`, { method: "GET", headers: { - Authorization: `Bearer ${getCookie("jwt_token")}`, + Authorization: `Bearer ${await getAuthToken()}`, }, }); @@ -8798,14 +9942,17 @@ async function handleImport(dryRun = false) { rekey_secret: rekeySecret, }; - const response = await fetch("/admin/import/configuration", { - method: "POST", - headers: { - "Content-Type": "application/json", - Authorization: `Bearer ${getCookie("jwt_token")}`, + const response = await fetch( + (window.ROOT_PATH || "") + "/admin/import/configuration", + { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${await getAuthToken()}`, + }, + body: JSON.stringify(requestData), }, - body: JSON.stringify(requestData), - }); + ); if (!response.ok) { const errorData = await response.json(); @@ -8947,11 +10094,14 @@ function showImportProgress(show) { */ async function loadRecentImports() { try { - const response = await fetch("/admin/import/status", { - headers: { - Authorization: `Bearer ${getCookie("jwt_token")}`, + const response = await fetch( + (window.ROOT_PATH || "") + "/admin/import/status", + { + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + }, }, - }); + ); if (response.ok) { const imports = await response.json(); @@ -9050,20 +10200,8 @@ async function testA2AAgent(agentId, agentName, endpointUrl) { '
🔄 Testing agent...
'; testResult.classList.remove("hidden"); - // Get auth token from cookie or local storage - let token = getCookie("jwt_token"); - - // Try alternative cookie names if primary not found - if (!token) { - token = getCookie("access_token") || getCookie("auth_token"); - } - - // Try to get from localStorage as fallback - if (!token) { - token = - localStorage.getItem("jwt_token") || - localStorage.getItem("auth_token"); - } + // Get auth token using the robust getAuthToken function + const token = await getAuthToken(); // Debug logging console.log("Available cookies:", document.cookie); @@ -9157,3 +10295,993 @@ async function testA2AAgent(agentId, agentName, endpointUrl) { // Expose A2A test function to global scope window.testA2AAgent = testA2AAgent; + +/** + * Token Management Functions + */ + +/** + * Load tokens list from API + */ +async function loadTokensList() { + const tokensList = safeGetElement("tokens-list"); + if (!tokensList) { + return; + } + + try { + tokensList.innerHTML = + '

Loading tokens...

'; + + const response = await fetchWithTimeout(`${window.ROOT_PATH}/tokens`, { + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }); + + if (!response.ok) { + throw new Error(`Failed to load tokens: ${response.status}`); + } + + const data = await response.json(); + displayTokensList(data.tokens); + } catch (error) { + console.error("Error loading tokens:", error); + tokensList.innerHTML = `
Error loading tokens: ${escapeHtml(error.message)}
`; + } +} + +/** + * Display tokens list in the UI + */ +function displayTokensList(tokens) { + const tokensList = safeGetElement("tokens-list"); + if (!tokensList) { + return; + } + + if (!tokens || tokens.length === 0) { + tokensList.innerHTML = + '

No tokens found. Create your first token above.

'; + return; + } + + let tokensHTML = ""; + tokens.forEach((token) => { + const expiresText = token.expires_at + ? new Date(token.expires_at).toLocaleDateString() + : "Never"; + const lastUsedText = token.last_used + ? new Date(token.last_used).toLocaleDateString() + : "Never"; + const statusBadge = token.is_active + ? 'Active' + : 'Inactive'; + + tokensHTML += ` +
+
+
+
+

${escapeHtml(token.name)}

+ ${statusBadge} +
+ ${token.description ? `

${escapeHtml(token.description)}

` : ""} +
+
+ Created: ${new Date(token.created_at).toLocaleDateString()} +
+
+ Expires: ${expiresText} +
+
+ Last Used: ${lastUsedText} +
+
+ ${token.server_id ? `
Scoped to Server: ${escapeHtml(token.server_id)}
` : ""} + ${token.resource_scopes && token.resource_scopes.length > 0 ? `
Permissions: ${token.resource_scopes.map((p) => escapeHtml(p)).join(", ")}
` : ""} +
+
+ + +
+
+
+ `; + }); + + tokensList.innerHTML = tokensHTML; +} + +/** + * Set up create token form handling + */ +function setupCreateTokenForm() { + const form = safeGetElement("create-token-form"); + if (!form) { + return; + } + + form.addEventListener("submit", async (e) => { + e.preventDefault(); + await createToken(form); + }); +} + +/** + * Create a new API token + */ +async function createToken(form) { + const formData = new FormData(form); + const submitButton = form.querySelector('button[type="submit"]'); + const originalText = submitButton.textContent; + + try { + submitButton.textContent = "Creating..."; + submitButton.disabled = true; + + // Build request payload + const payload = { + name: formData.get("name"), + description: formData.get("description") || null, + expires_in_days: formData.get("expires_in_days") + ? parseInt(formData.get("expires_in_days")) + : null, + }; + + // Add scoping if provided + const scope = {}; + if (formData.get("server_id")) { + scope.server_id = formData.get("server_id"); + } + if (formData.get("ip_restrictions")) { + scope.ip_restrictions = [formData.get("ip_restrictions")]; + } + if (formData.get("permissions")) { + scope.permissions = formData + .get("permissions") + .split(",") + .map((p) => p.trim()); + } + + if (Object.keys(scope).length > 0) { + payload.scope = scope; + } + + const response = await fetchWithTimeout(`${window.ROOT_PATH}/tokens`, { + method: "POST", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + body: JSON.stringify(payload), + }); + + if (!response.ok) { + const error = await response.json(); + throw new Error( + error.detail || `Failed to create token: ${response.status}`, + ); + } + + const result = await response.json(); + + // Show the new token to the user (this is the only time they'll see it) + showTokenCreatedModal(result); + + // Reset form and reload tokens list + form.reset(); + await loadTokensList(); + + showNotification("Token created successfully", "success"); + } catch (error) { + console.error("Error creating token:", error); + showNotification(`Error creating token: ${error.message}`, "error"); + } finally { + submitButton.textContent = originalText; + submitButton.disabled = false; + } +} + +/** + * Show modal with new token (one-time display) + */ +function showTokenCreatedModal(tokenData) { + const modal = document.createElement("div"); + modal.className = + "fixed inset-0 bg-gray-600 bg-opacity-50 overflow-y-auto h-full w-full z-50"; + modal.innerHTML = ` +
+
+
+

Token Created Successfully

+ +
+ +
+
+
+ + + +
+
+

+ Important: Save your token now! +

+
+ This is the only time you will be able to see this token. Make sure to save it in a secure location. +
+
+
+
+ +
+ +
+ + +
+
+ +
+ Token Name: ${escapeHtml(tokenData.token.name || "Unnamed Token")}
+ Expires: ${tokenData.token.expires_at ? new Date(tokenData.token.expires_at).toLocaleDateString() : "Never"} +
+ +
+ +
+
+
+ `; + + document.body.appendChild(modal); + + // Focus the token input for easy selection + const tokenInput = modal.querySelector("#new-token-value"); + tokenInput.focus(); + tokenInput.select(); +} + +/** + * Copy text to clipboard + */ +function copyToClipboard(elementId) { + const element = document.getElementById(elementId); + if (element) { + element.select(); + document.execCommand("copy"); + showNotification("Token copied to clipboard", "success"); + } +} + +/** + * Revoke a token + */ +async function revokeToken(tokenId, tokenName) { + if ( + !confirm( + `Are you sure you want to revoke the token "${tokenName}"? This action cannot be undone.`, + ) + ) { + return; + } + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH}/tokens/${tokenId}`, + { + method: "DELETE", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + reason: "Revoked by user via admin interface", + }), + }, + ); + + if (!response.ok) { + const error = await response.json(); + throw new Error( + error.detail || `Failed to revoke token: ${response.status}`, + ); + } + + showNotification("Token revoked successfully", "success"); + await loadTokensList(); + } catch (error) { + console.error("Error revoking token:", error); + showNotification(`Error revoking token: ${error.message}`, "error"); + } +} + +/** + * View token usage statistics + */ +async function viewTokenUsage(tokenId) { + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH}/tokens/${tokenId}/usage`, + { + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }, + ); + + if (!response.ok) { + throw new Error(`Failed to load usage stats: ${response.status}`); + } + + const stats = await response.json(); + showUsageStatsModal(stats); + } catch (error) { + console.error("Error loading usage stats:", error); + showNotification( + `Error loading usage stats: ${error.message}`, + "error", + ); + } +} + +/** + * Show usage statistics modal + */ +function showUsageStatsModal(stats) { + const modal = document.createElement("div"); + modal.className = + "fixed inset-0 bg-gray-600 bg-opacity-50 overflow-y-auto h-full w-full z-50"; + modal.innerHTML = ` +
+
+

Token Usage Statistics (Last ${stats.period_days} Days)

+ +
+ +
+
+
${stats.total_requests}
+
Total Requests
+
+
+
${stats.successful_requests}
+
Successful
+
+
+
${stats.blocked_requests}
+
Blocked
+
+
+
${Math.round(stats.success_rate * 100)}%
+
Success Rate
+
+
+ +
+

Average Response Time

+
${stats.average_response_time_ms}ms
+
+ + ${ + stats.top_endpoints && stats.top_endpoints.length > 0 + ? ` +
+

Top Endpoints

+
+ ${stats.top_endpoints + .map( + ([endpoint, count]) => ` +
+ ${escapeHtml(endpoint)} + ${count} requests +
+ `, + ) + .join("")} +
+
+ ` + : "" + } + +
+ +
+
+ `; + + document.body.appendChild(modal); +} + +/** + * Get auth token from storage or user input + */ +async function getAuthToken() { + // Use the same authentication method as the rest of the admin interface + let token = getCookie("jwt_token"); + + // Try alternative cookie names if primary not found + if (!token) { + token = getCookie("token"); + } + + // Fallback to localStorage for compatibility + if (!token) { + token = localStorage.getItem("auth_token"); + } + + return token || ""; +} + +// Expose token management functions to global scope +window.loadTokensList = loadTokensList; +window.setupCreateTokenForm = setupCreateTokenForm; +window.createToken = createToken; +window.revokeToken = revokeToken; +window.viewTokenUsage = viewTokenUsage; +window.copyToClipboard = copyToClipboard; + +// =================================================================== +// USER MANAGEMENT FUNCTIONS +// =================================================================== + +/** + * Show user edit modal and load edit form + */ +function showUserEditModal(userEmail) { + const modal = document.getElementById("user-edit-modal"); + if (modal) { + modal.style.display = "block"; + modal.classList.remove("hidden"); + } +} + +/** + * Hide user edit modal + */ +function hideUserEditModal() { + const modal = document.getElementById("user-edit-modal"); + if (modal) { + modal.style.display = "none"; + modal.classList.add("hidden"); + } +} + +/** + * Close modal when clicking outside of it + */ +document.addEventListener("DOMContentLoaded", function () { + const userModal = document.getElementById("user-edit-modal"); + if (userModal) { + userModal.addEventListener("click", function (event) { + if (event.target === userModal) { + hideUserEditModal(); + } + }); + } + + const teamModal = document.getElementById("team-edit-modal"); + if (teamModal) { + teamModal.addEventListener("click", function (event) { + if (event.target === teamModal) { + hideTeamEditModal(); + } + }); + } + + // Handle HTMX events to show/hide modal + document.body.addEventListener("htmx:afterRequest", function (event) { + if ( + event.detail.pathInfo.requestPath.includes("/admin/users/") && + event.detail.pathInfo.requestPath.includes("/edit") + ) { + showUserEditModal(); + } + }); +}); + +// Expose user modal functions to global scope +window.showUserEditModal = showUserEditModal; +window.hideUserEditModal = hideUserEditModal; + +// Team edit modal functions +async function showTeamEditModal(teamId) { + // Get the root path by extracting it from the current pathname + let rootPath = window.location.pathname; + const adminIndex = rootPath.lastIndexOf("/admin"); + if (adminIndex !== -1) { + rootPath = rootPath.substring(0, adminIndex); + } else { + rootPath = ""; + } + + // Construct the full URL - ensure it starts with / + const url = (rootPath || "") + "/admin/teams/" + teamId + "/edit"; + + // Load the team edit form via HTMX + fetch(url, { + method: "GET", + headers: { + Authorization: "Bearer " + (await getAuthToken()), + }, + }) + .then((response) => response.text()) + .then((html) => { + document.getElementById("team-edit-modal-content").innerHTML = html; + document + .getElementById("team-edit-modal") + .classList.remove("hidden"); + }) + .catch((error) => { + console.error("Error loading team edit form:", error); + }); +} + +function hideTeamEditModal() { + document.getElementById("team-edit-modal").classList.add("hidden"); +} + +// Expose team modal functions to global scope +window.showTeamEditModal = showTeamEditModal; +window.hideTeamEditModal = hideTeamEditModal; + +// Team member management functions +function showAddMemberForm(teamId) { + const form = document.getElementById("add-member-form-" + teamId); + if (form) { + form.classList.remove("hidden"); + } +} + +function hideAddMemberForm(teamId) { + const form = document.getElementById("add-member-form-" + teamId); + if (form) { + form.classList.add("hidden"); + // Reset form + const formElement = form.querySelector("form"); + if (formElement) { + formElement.reset(); + } + } +} + +// Expose team member management functions to global scope +window.showAddMemberForm = showAddMemberForm; +window.hideAddMemberForm = hideAddMemberForm; + +// Logs refresh function +function refreshLogs() { + const logsSection = document.getElementById("logs"); + if (logsSection && typeof window.htmx !== "undefined") { + // Trigger HTMX refresh on the logs section + window.htmx.trigger(logsSection, "refresh"); + } +} + +// Expose logs functions to global scope +window.refreshLogs = refreshLogs; + +// User edit modal functions (already defined above) +// Functions are already exposed to global scope + +// Team permissions functions are implemented in the admin.html template +// Remove placeholder functions to avoid overriding template functionality + +function initializePermissionsPanel() { + // Load team data if available + if (window.USER_TEAMS && window.USER_TEAMS.length > 0) { + const membersList = document.getElementById("team-members-list"); + const rolesList = document.getElementById("role-assignments-list"); + + if (membersList) { + membersList.innerHTML = + '
Use the Teams Management tab to view and manage team members.
'; + } + + if (rolesList) { + rolesList.innerHTML = + '
Use the Teams Management tab to assign roles to team members.
'; + } + } +} + +// Permission functions are implemented in admin.html template - don't override them +window.initializePermissionsPanel = initializePermissionsPanel; + +// =================================================================== +// TEAM DISCOVERY AND SELF-SERVICE FUNCTIONS +// =================================================================== + +/** + * Load and display public teams that the user can join + */ +async function loadPublicTeams() { + const container = safeGetElement("public-teams-list"); + if (!container) { + console.error("Public teams list container not found"); + return; + } + + // Show loading state + container.innerHTML = + '
Loading public teams...
'; + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH || ""}/teams/discover`, + { + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }, + ); + if (!response.ok) { + throw new Error(`Failed to load teams: ${response.status}`); + } + + const teams = await response.json(); + displayPublicTeams(teams); + } catch (error) { + console.error("Error loading public teams:", error); + container.innerHTML = ` +
+
+
+ + + +
+
+

+ Failed to load public teams +

+
+ ${escapeHtml(error.message)} +
+
+
+
+ `; + } +} + +/** + * Display public teams in the UI + * @param {Array} teams - Array of team objects + */ +function displayPublicTeams(teams) { + const container = safeGetElement("public-teams-list"); + if (!container) { + return; + } + + if (!teams || teams.length === 0) { + container.innerHTML = ` +
+ + + +

No public teams found

+

There are no public teams available to join at the moment.

+
+ `; + return; + } + + // Create teams grid + const teamsHtml = teams + .map( + (team) => ` +
+
+

+ ${escapeHtml(team.name)} +

+ + Public + +
+ + ${ + team.description + ? ` +

+ ${escapeHtml(team.description)} +

+ ` + : "" + } + +
+
+ + + + ${team.member_count} members +
+ +
+
+ `, + ) + .join(""); + + container.innerHTML = ` +
+ ${teamsHtml} +
+ `; +} + +/** + * Request to join a public team + * @param {string} teamId - ID of the team to join + */ +async function requestToJoinTeam(teamId) { + if (!teamId) { + console.error("Team ID is required"); + return; + } + + // Show confirmation dialog + const message = prompt("Optional: Enter a message to the team owners:"); + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH || ""}/teams/${teamId}/join`, + { + method: "POST", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + message: message || null, + }), + }, + ); + + if (!response.ok) { + const errorData = await response.json().catch(() => null); + throw new Error( + errorData?.detail || + `Failed to request join: ${response.status}`, + ); + } + + const result = await response.json(); + + // Show success message + showSuccessMessage( + `Join request sent to ${result.team_name}! Team owners will review your request.`, + ); + + // Refresh the public teams list + setTimeout(loadPublicTeams, 1000); + } catch (error) { + console.error("Error requesting to join team:", error); + showErrorMessage(`Failed to send join request: ${error.message}`); + } +} + +/** + * Leave a team + * @param {string} teamId - ID of the team to leave + * @param {string} teamName - Name of the team (for confirmation) + */ +async function leaveTeam(teamId, teamName) { + if (!teamId) { + console.error("Team ID is required"); + return; + } + + // Show confirmation dialog + const confirmed = confirm( + `Are you sure you want to leave the team "${teamName}"? This action cannot be undone.`, + ); + if (!confirmed) { + return; + } + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH || ""}/teams/${teamId}/leave`, + { + method: "DELETE", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }, + ); + + if (!response.ok) { + const errorData = await response.json().catch(() => null); + throw new Error( + errorData?.detail || `Failed to leave team: ${response.status}`, + ); + } + + await response.json(); + + // Show success message + showSuccessMessage(`Successfully left ${teamName}`); + + // Refresh teams list + const teamsList = safeGetElement("teams-list"); + if (teamsList && window.htmx) { + window.htmx.trigger(teamsList, "load"); + } + + // Refresh team selector if available + if (typeof updateTeamContext === "function") { + // Force reload teams data + setTimeout(() => { + window.location.reload(); + }, 1500); + } + } catch (error) { + console.error("Error leaving team:", error); + showErrorMessage(`Failed to leave team: ${error.message}`); + } +} + +/** + * Approve a join request + * @param {string} teamId - ID of the team + * @param {string} requestId - ID of the join request + */ +async function approveJoinRequest(teamId, requestId) { + if (!teamId || !requestId) { + console.error("Team ID and request ID are required"); + return; + } + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH || ""}/teams/${teamId}/join-requests/${requestId}/approve`, + { + method: "POST", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }, + ); + + if (!response.ok) { + const errorData = await response.json().catch(() => null); + throw new Error( + errorData?.detail || + `Failed to approve join request: ${response.status}`, + ); + } + + const result = await response.json(); + + // Show success message + showSuccessMessage( + `Join request approved! ${result.user_email} is now a member.`, + ); + + // Refresh teams list + const teamsList = safeGetElement("teams-list"); + if (teamsList && window.htmx) { + window.htmx.trigger(teamsList, "load"); + } + } catch (error) { + console.error("Error approving join request:", error); + showErrorMessage(`Failed to approve join request: ${error.message}`); + } +} + +/** + * Reject a join request + * @param {string} teamId - ID of the team + * @param {string} requestId - ID of the join request + */ +async function rejectJoinRequest(teamId, requestId) { + if (!teamId || !requestId) { + console.error("Team ID and request ID are required"); + return; + } + + const confirmed = confirm( + "Are you sure you want to reject this join request?", + ); + if (!confirmed) { + return; + } + + try { + const response = await fetchWithTimeout( + `${window.ROOT_PATH || ""}/teams/${teamId}/join-requests/${requestId}`, + { + method: "DELETE", + headers: { + Authorization: `Bearer ${await getAuthToken()}`, + "Content-Type": "application/json", + }, + }, + ); + + if (!response.ok) { + const errorData = await response.json().catch(() => null); + throw new Error( + errorData?.detail || + `Failed to reject join request: ${response.status}`, + ); + } + + // Show success message + showSuccessMessage("Join request rejected."); + + // Refresh teams list + const teamsList = safeGetElement("teams-list"); + if (teamsList && window.htmx) { + window.htmx.trigger(teamsList, "load"); + } + } catch (error) { + console.error("Error rejecting join request:", error); + showErrorMessage(`Failed to reject join request: ${error.message}`); + } +} + +// Expose team functions to global scope +window.loadPublicTeams = loadPublicTeams; +window.requestToJoinTeam = requestToJoinTeam; +window.leaveTeam = leaveTeam; +window.approveJoinRequest = approveJoinRequest; +window.rejectJoinRequest = rejectJoinRequest; diff --git a/mcpgateway/static/logo.png b/mcpgateway/static/logo.png new file mode 100644 index 0000000000000000000000000000000000000000..3cdce054d6a5cc5989b6d2819f11e498cd5491ac GIT binary patch literal 247320 zcmV)eK&HQmP)s+jUzA)&WMg>3ZpVsVneVoGolWH zg_&`5!~!U!U?7CSqyUt*Y?WC&(n#->cSpYfnIb#^CyoYJ> z9=rGK#muf57d`yw<6oZay5ns#XLhZ19e17QdCx^rxNz~Jg|iFAl31}~#i9!@x^Ug% zMT^nZ)dk0O0OpkUN&olTZoJB8&J?e!*6iO3q1Kd7aafM1^4yp3+o?3c%+5=id?b63 z_nYJ?sCF=(k6EEUU6xM73RI-OoJt7!zP$ zTW#!eZmNAQ#N<2J9?xX+$?0qNX+;@=AEI)uq{%u8P{7{pESt-o&KNtu82h#3IM2zi@4fH%pD|}ncXL~7 z3uPXa^^U927Fr%n4uMKi@dpO-|6c}j zyzPHI13AuCje#6%Tu1xt1%h5J-6* zXc&TnF$Y_>ZO8Y&_XG4D8AdvtM#;~wbX{k~wryMQx&QtLb{=a^eqWk5)e9NJEC4Sn z194sVp_lW1d-m*oLB5}}=A6?nzv7B(+S}TZ$z&iP;4%+k6u@=d3EGEhzkTNMhxYr~ zUJCR1UA3w9`K63)D!;9I%}bb0dA3n;PxaZ9eypxN)@7Q(+1J{?l3I)(`f!*-*U0~$ zaKe1N``z!v?%jLv)RRv?`q|HYwyvY2^Pc+px)lIkHU=_d`t*6d2M+ws7}C@kh~q2+ zaL3E}aAOX8x!Tvie(Q$St5>70t{#w#!aPT-qpst{pZs5KzmLT25AFX-o8sR4t!&ZB zA7V6#m!iqp=Pc4UVOrbaBBD=oHEAqbJRc{Yvha+Bi|23n@sED`JLoL4mi{WeMK@hS1Y-|hb{JN`p^DQz+% z-T$@yVwg$9!wKIphoR&LSh;cqPCDtN75{zL-J{bwIuHEtjyu2lJLu}m-ln{kr>|Oh z&0`y%xZyY3ovF9K`pYY?eee7KVg2k`vyscy#-9r@(aH6@Xj5fYFIO*r*O~{j&CeC> zXnvGx{*G#Xk(ct@s@GMWuZbkDTHjx+zh~m}&5b6C8@#F`T1?u{GG9QIPV*{eTOq3W zS*GZ0Pd=+ID#@Fs@4o!-aW-uc(>xb{n5`qJmy+S`fxa>12T+-i(Dzjf_HRJ`J+YiZ}G{iz}`SKdz? z<2xy<8Hw$9UT|0fY+;Gir$8*M{IjFor=ouo`YQB&Cb2m)STd!hWgD!)|( zq7q?O{)wm}MV9y2{_(`;RwH!P*7)d1CHq-N(>9LwyRsf=^{e_`tF9+hd#-gy#-`90 zz5gn17mti3dlY)^CLgu+si<6e{d=`3=_M?;w$uy+OFkp!P~jqX1cL)st~Ci97yeSf z+RMkfR-S!5G3FdmKLwBJLS0=w)~!4DQh8M&pU2_DNA^syE7Zsb()$M=$fWHxmt5R@ z$t9Qk;7xCO)4ImSCNRf|iL@&TVA&RVsf`|LqP;} zMILCbkG-SKEIi`6LleyeF(%(334;7xu*OfcVXB=uYEwUm@2`cE$*!$NR75b*NlOl#egu3;?ET05Qy~NlD~B}H*RTKj*KeD$S&K2Y6u|Gyg!R9gf&9tqUw7?4-FV~r8C_k7 zxFWc=B&D{|i`083G;vG@A}mR+Qlbl@2t~hy;o%V?ZpA`@h>sxvWiLl*Nv@*~mE7Ep z{)~2)V?W=H8?fIw*@2HG4e^Ta&2!};bDZ+!XU;@)iu{ZY8#}sh$);HjjS=y9Ja7J`*$ynu{s`QW8eKq@tf@Q(ud6(}Kg61$$VG&Z`;T(~CMG<%u z(ck9zk?D*6OkPt>fT8cSun^HbBKp0&x6n+vKxae|Sgz+Uc1VX2Ss0*#*XXx?;GT{>?krFI#^4 zl%!b4aq6dX-EXQL2Lll=Dthw8w$I#f{cUf%>Z(6)Y;2%uc3cRwPv;&n5MdA=TS21^L`Zol>W$}m@$53K;mC2Qe)IT8Bpt>KM3Z5Cu#HqHi3tn? zaTGJA{*{?BEfbhN>_*5Mi0$k_2qxzEI4e+<|I{TxhLRLa<3YbJzq^ ztNb0;gxVG|9LK4+E_qhqM-`uGb4bNbHgI*h#Yjkz@8+BcZfQS`GBMeZR30jZB^i~T zXD-;p2y-VnEAcE<9H?qQfXM(2Ol!ErU{G#LT248AEs#F?G~nU9%sn;~eXqK=eh60&Fg7}56jbrYD6 zW~{Inkuvy}Gg$eZB=B$)Gjz1JW!6X?0S-rai1Zw!9ghSd5k%qYjbdNDW#P}HWT3S%bnE3IoY^jd3g+tW2CJ~&FPLizqg_H zKrhZ%eeR?O&iVaeAivpm$;B5p@7lF{>GjuNw|>>?)0On>XztD`-Jmb6QJjcbY_0^_ z5F7{l`tj6LPhrQ7-FW1YN3eU}UL5N0Cuvk}ILXZ9j&!6N%WZqNGc65rH7}N?lt_|_N!W)daRISd+7udl8B1)AqX{jG9p=jFH<92cbIN0f1h4d5 zN#-8I_bqLZTLx!chMixD3D?nCqLWp?eBbdf^Woe*zXkB zG-axM`l^*5dTiqppZZO7V$q_7XJ2&TMdx4p(T}cgZfm8_$|knTZzg9cl~+%^O45sv zbBvGY@%R%@;_=5f;_;17;JN3XM?PPqL#`xUk2Db?QbK9%LL_!_RI|4q193H#X5_YG zW6hIUh&2Xbk>E+3x6rokV@ZdsO0v1h`%z`Rc#!O)}%z7 zqo{&0R2|KPtQV7xC-K1?YbRRYF;VePX9Ags?^~Jh@F?*YacjUN41$9gDJlN+uz-gB8n{;q=ujaORn-Fte)zjvK|=KW+1jvv$Lf^ zGwoYSv^GYJ+-RJV9{u7M_iniUhR^-wmMt&*ugRq~Wd?G_>Xp|%`uGzsTjE30zWT3U zx$2~oPQ3Z-HER|6N6HwEb4;UEbq10gC}Bx$l~)esi@4>Rx8c@XZ^6L8FiDZF<09pz z=-ClBgwL~-H0^sJlL01WAVwADj8FVdi)hC(0mYbUDIx&lZ7^(5aFp#O4IYkwiYjp-vjQ||w8C5diR`aPUG)`H{%gLRuQ(`LS zzR5izJAQ1wOS9-#YG**?`xSmjO5DmShIpsbIW#BTzvJw>*b z%oB}mqQSC!PFa=nQ295vO#>{7$KqIt8I`7_w8|h-PX4;OEY_WSCf@&#SEIe9MQPmv z2>cS5)J}|P(`YJ3VIYWoN+Q;_ptqLPj#4R)=+&vGt$z8;F)y)A*@QJ!ne%wsr#|(m zA69>M|=#6469CXsVo!#eI#46(thZ;K(>0d;AIf^sb-dk;gWor{@5IAfVJI z>6A;9h|3qPW1OidmW<~lu@D9iFD!WDgaap%F~ty9 zO$3cJ#S9EKHmmUXr=o0D>_fzq7FFR8c5NG*MuQ3M^DvG+GY(@KNY0(!>|hBTQaFiE z!#Ky8NNjmRly#74^F$>|t4#QHlnIT)X2#;EOwIT`ghn(aS{uqV1Qt7sc@RMT3Y>e+ z*=TF70}}z6x)Y!T2O}CVG5Cv@`D~P+!89_B>+R)-A(~rT@Z9syiW{!~>_6Og+rNKd z`t)hp0|yQkC$7eCr%m02wR+{XkG+f<$f}hqul>=TKU&{0t&{%k==cs>PO8?qqoq1` ztiR==d5(iX0RQ=4x8sJ-egTILA11qQDxE0{KvRxWX9CWjNT!oWa}HyPZQME(js-b2 zu-63to;-)?03rj$q%pFIPOxeLT;{q7>c1sDC8ivWY1f3)WeM?P71F+qC^4up1FIw- z#U#%Z8g@Whl_e>2k=V)Y`*V>H{4(vQiUZXi|3s%Zt4f%T31yBYzX~9@6<5Nnc!8=~ z3Ag4<6C+n6%&PTiVq=Nv&Z3a^{nQ6+l9-cn$yyGDGggiY5~6I6s^(}P#ni<>LN_@R zfg8!3P1LyDinXYWUzvEX_?8oxr5e9=KRsRCXxib#yi>f4<5b*F>$;ORKDmZwYg@6V z58GtO327+zj$a%jhx=c>_0MtDU;YI$=`;aRqNqsDagbS2#X6Ni)6F?CdyFRx#Fl4V z*CjNh>!z@K*KWM>m9P4pY9qfBC;Ez0Pg`;Qr?0=LqoX4s5V1sl{O)q6LfV#pj};2I z=f3;!{U7`g8#X)$Kj3gvS#r2hkX)HJtNIho##c;1Ga8-mUmmbjDn;qyrD~aSJ$NZk zK|&n{&r7MkISySX-(63+VI|RFo=cB)os>elXeg;vi>0<9-)WOsH>`qACR#-pD*V4}B|4}~K@16<#>*vAmPm$jUdR!t8J1M@jW{VR zcrGmFBFE}FPV#)t%QO%UoMRo3{;ZMaBJ96tbh=)rV3mszkv-uUOfSs}Mlp7{|W#oeq|xy<*}Uw1B`c@qpQP zCE>=zc<^Y#1&QgAmzgAyAbaU7K*P};*QNdET2Suzdf9htZ^%6#g%QnVsT9BuLL!!h zLS94bMa;xioQpNDwiZ!^fzT{fF%`PhKL+QCGLe6I#bFe@#i+xU_-kaxcQ9ru0YBOh7UIel6gF=1Lt5>CPd zrmBI!$U__`1wMZCv!CJm&)`}V_;k^%F`p2(5h+?jCy1S05Yf~E~6sHv?%TWd4g z+S}3G(15zSI@H$I5yJo-4l8wh5@xF3M1mS|tk3lIVU!?D?4OnsN*!3oM7hmZl-pC2 znJB-`v5alW;ox+E2~M+p6P$<}Csmt^i4J{I`C~H;)7b%>_)s&Jn92-7Do$v}v=A+j z*m|{Q7E63Ohz*ly7S%qh=6tQZz8b8w>|WJ;G*zYciuydsW~(GeW(A3(9>6T~?Tef?~O!i(pwBBxvo7&wLo5(KKBQqZfZsYP3B zE3%m^2~B?BV|ZkUV!Mi^q7sNO@qAa$pOt8+4p*AB%Oau^mtFR!xZ=;>jHadrFdh)n zfy}9<|JY^$Y$r$T?6W1>_|FM#GL9$7&GXxK;fgEYif{bezhQJVk8~;x zP#lRmEGUEBR&L|u6DjpbrZID77tTNbl~}%fITkHm2&pkOH#H-j&cuh&(HmN6Do(r! znGSBU72||pCg>xf)14^0IP_Mp^e`GiV-B_^I!Nr;$(iAa#KK|`DNd85UX}*Nl1(d_ zH)lGh5Yu!=$70a*ISj&8<~fR3rfXapL;RVeV@Bdw6;SXMv=l6KHi0B|LBtG)*_sqF zCRPDhu!9~^Mo>lNnC+MOP9)N`jd)3DO*xaC_?cR^6UPgQin$d?<}?=a%y+y}J5|DkV}T(^ zD~h!v1$$4Jk_xOG8XiXfz#u{ABO_$-P({aU;Ud%#@+8qmocwlT?%Rh8XM~I%2&K%-JU&ruxN1uWewy{ z-|+gY|Lc1{_`-{Q(A9tcH}`$;Lmyq&($W$mC=>xQ63p=#t_-bY4zOcSFTVPXZ{UY_ z{s_ZkW9r@}YS@Y)5pgtb!Yqs_L*A^et`^g$PeXTiH)hV7g%p{Y5<9H0f&q=xOb%T( zQBrkHYHdpzGUK*$1jTb9tn?kcn)E%*-`2GUP zcr+D>ot1`Qc;%9sW5J!l5;9mV9l9{l$dx9l_`$YKSi}q%l|GOvsGQhLSH-7N*@u{P zi=(Z<)KaEY*Oct_G4nmS_Jk>*_RNK`bc|}giz?~d*~ESd=1%;~UDW+Ocxk63m$1iA*L&Clb&z9UMyXUaS+@7LAUh5Gt>6)&SF#%RHE z%u*wfukm65#bOa*saS4Stw1$0UcPTw%y|V)J9P!Fe9PrnbJj{gY9qpjqxV~+GYb%1 zV99VmJqmTXi_V0fG+hGt$2IJY*10F^PvbRR80w+GBKE$N5jemec2d@9lG^0 zF_8GGW;PWD0*h#Iph^tM z(HY3G=HbOK5albIIL>NN;A-ZyDKn62>&TJ_W}AW7TAO<0OJ^Wu$tz9c$rwUy!ir64 zmj6#OlDNwidqQdjei)Exsb~Lw0=sa^rN!V2-~-oOgLhx~M!035diB5Znj?tE>=FY| zeV1pvB()ZV_@Do|2UoxMLqv#)SvX2aY;0`AtXZ=t>fX?zgn=aIJ%XV7$@49l-T2rz z{hM-vIn2toU~41ET7uF7sl+nZec~gy@D=O8gA!a?M~*NCw}kPaNMM=*1Cbhuoaawp z_nG5L$SB)+YtLzpqUfpz9(*{i`Ly!1Wz8#BET;@4|9Z=96Jk%^^Ve5BF=eXVHDlVm zp5B8mDu89vGaH{d<&^0 zh!ZXn>DlpIw6(Qk-n{u(xbOtj)@I;9N1|%Gudp(oCWuHQu_v}luA{SQ430^OU;@V2 z1Z~4G5PR`$g2E~gQm7zC@d1s$J30fg3k;Dt!C{FRO(cE@N?K<$5urD44BtJ9VbLJC z95)#QF_6k2zpAUL$H0%uK$wZ1w-2_(K)80kPtHKpvutO#?Fdbv%BBw!83>E1Lv;pX zGcn_A_gq({+t8YDIr=RjzUJ919=0H7H0>_Ntixi@rIJ!B39?}zF~r%*9hHoQ#oA<9 zt#3&{g#kQhMy;Lv6pz7J43x0fN1ln;w_pgR>ymWucpjx~!6ZHqIaaBGNUh_A7ha%u zxp_EagrfZ7KV6SMdG!UzN(LfqGg>^|WwT<3Z<;wJ=ePrG+p!xL{qg@p|KJcrm}KUa zS9Q(oBAO7JpDm4*5<*HQp(DkiO_8LX6id`+$pk5rw9y)v)j_Mz#@|_&OXF1+oR2Fm z`!md$*+nsCLIkve+-4x+=nTXNx2A<*h`)U6+t)qy)HBCr&YCfOTDJE%A;DAjftlZ&Z!>HqxK!i5a!JvVu@@4JGE>#EF#K=@VOhGOn_w$ES^U`We;%fMd<1 z3BYp_?MVAzBmsyWn(|!|T&_SpA!S1isXS-EIRdVMjQY2dc?htw#Edo(tGcxIMo4rC zYu$Au4?&R?vdpm{)s9XEXp$V7B!G!kjcKHnN@N#B3i-s5II%@VK$ocKpeXCI^36ER zDI1@fMkhY#QUH*Lm=jafY3dn^6ZETl=sBj@w-H@*%FyJyi{GLdLj)mN$FXvEu0 z8L(?>8u0D!-HE{?LlBYn1?uyc9~&qCW~L?=J9q58A;&{|DB?O!L+u4G<&hO5m(9_= zRP9S& zxfwUzbQ3u9kjiBda-BeqmD|=QkSzwZx3%Mh#fvBdXLCynGTAJBrR=&q4vs1m3o4C) z<06}>LAoZ5vHW;)D|vWarUTEol1lU`9UU6R=;$bF>vE{CZy=}Kv4^SAPP{x>d`#rz zLV3wR1rIcnkH+OR|HkKz< zeRz177>s;{k#Z_FG&G>Ctp&9;^;A}vvhS7!?I9+FuuNS{Oq8XqsUXK=oI=9R5oIJ> zO&B9e_@|tda?X9bL|VYm&>;Hy`e>fAnH;8fb;Wya;($TEgOQODBA)qto(Q$v1Im^*(y zjYqI5*>1`Cs@BCPM_!aVrb6NrA%0z@bAJ7#P6d z&=5%ysg#Gt=4Q;CJqzhfHl8b?6AKap2}2r#JX1}LO=xXzM_sxh-kp_65MQz#@k=v*dEeV1#USmIb2E3I6n%DoloK-qLQg_^oLf&{DNhZg)gig4t}VKlTf zBjtI-%q21A6%cK%g%o@2j45W>*w~2L+FA_sA0b$JYX_|~`n3tOJs zf*U`79kMA$2N+i`z&2SEA~7*ajqk%Bx@NtkAK&?}@BhZoSyN^po|is*s)bAb=+cX? zTEG62H@CEPDYFEx?&FvYsAw$QCMk0$I0nak+;YpUxap>^09u=&#W9kaC^uio=S7+j zH#Ie(qq7s!r%y*?Lj!WT4ALn_ZBXH8s9_3yeTVR)JMKjP;Ufr3J_RDSw|C%#MGLWF z^(rE^o~5)YNuH7E>(G%ycxvMlIMCCJpy&fBkF=Zf&%b~+BdPt`$!v@fQpPYDYllaN zv3K`w3=H%^L?N15ny_fmVx%%Dm02~^(dY~okx(-zH@YMynRJ>GyhTB1r5@)5WA^`j??lRV2~#f*)264M!2X^dc#ezqwl*w2R3}>!U-?|A{SF*Hf}xR7oO;>{q%vtOKqyWiwZuZPNW^_SKTc~SJJQnHiWxJe z!^`F-R0bu;G8pHBg8!qHl{)EEF9K6D7fL&Na>aq6xVB4^H;hgmb{(DTS| zqnUv+eTC@j@5j*4P<&?P-VUkOhy$OrI88AubC7dXN>YusD2!8GQyoI#EBYdmd&ub> z8_#2)zaNcFO(dvDja>dJ3_^^LjbUVH5V>5In1sq2ty3?NX6eEeg3OlFrgdU;co-vN zMLJ8w0OPCy8j&yezw7Dj3m%?&>REj3lb^;l@4p&NjWx8N44vUeiB&UZYw{Wymw-?j z`QQERyVh^o^sC#S-?H_&W6h7_I#&WXE}+!Qu7R9->aw-}^2L9;dDg7?iojYa@~5VM zXs0TXWyO5%zV88Ccij!xwQCocn@Vb4z*aSQ@*piuNGY_VqXUi2O+>&CA3lij@ey=& z%_8U<)9DQyt>%CG-~JN={R5N=f`9{v(A(34p57ijyZKrC)!W}r&#zP?$HvBS_uY4+ z|8Sp5Elmq3uDaVer65Ajjgl7#kVIij^yo z&ZKFhRsLLR15Z5h1orOTi&C+OTrP*T>(0fD88fInrg7kq!;k<6d$D)VUZjMIO_qmJ zlGaioz^^uMMqP6gnp;}ILPtlA3pi;94K0nx)Yg!cC|||#0zBU%Eg>xW7%U89cK?&9FVgW5^UDHzLjJ*4KO(1a#MpyL`C%K7@-=3f&*G#gH?=Sin5B`p>enusLk z&u`g+u$YIZ5onSKZr{Fx_MFs2ObUMao!p<2C_Vkm(-`g_ihVJ}7-#A| zc?PB3VR~-`g#tD{@-Rk*N0r@O2iNW1u@gsT&&I0LSHtx(YTQx{j0B$Ev>AJM?uvh5TarB{;dv(Y5Z5SUNfn$7jT+Oc&aqQi`6|<*LL%Oz3t%d{mjALkc z0EZ9hG(kG;OkeLI^dB0;&aFGJ=#*2?+S#s{H3LRQMzLetHqzLFFd$7^oefR}&mtip z%95D8xMmwkr4W#KyUi6`fng$G|L(mQX&gpvT`h98H7EqCh}G!OFlk{U!^321ON2I9KTPmXoX&0qefdFQ?2l^bTw znyX-KmENsmHmQzBBn3;t;tL1g`~DB{@lSpVW21SbQfcK7t=vhb#)8fq3YxeKLGz?a zy>t6k6pJO=KvKF}ym$ni9c_prpW;MZ&%^V-{x$lK^r0q`0YV;QvQ6afkt2QR>FL4T z+4HP9amhLP%rj4;|46?Iq|-%&e0dmBDQw%i4dQdjW%egGi-vv~}}^66F_5Wpha9a_BjD2%%pji45A=R?6q`(;xmAZ+*{S)3_t5 zslox*!QkMaGQ|WD9I|~eM1Dw8>ArpYFn9KBwYJt2N(sCF@F9ey0ueRi-ga%AC{JkIs%xT3=z6 z1(r-(Iha){du8fS(&@<97-qT-#WX+%Vv zd}<@=nj1)p&*!B;QlK$;godh4ETmSZv{g)6&iB|Y@vSv9Hpc5|r;LeB;5@?U-~dL4 z2Z>~dE@-WDsT7J~frcctKR*b_99ZAbfNUlc3y2Cp;vr3@K?fb}t*EQ7p?ysLEh4)j zj*Wr=zmVC{MJW`x>C3m0nf-$EUqST-T$I<7KjvtyiE3(Un*HGYKNoA(9zE@emr5N| zOPTPSH*dz$rAvu8aI5UCRJ+NgE)Oduj#Tv^7N5Kbrz}4SXPLnaCmDwT@YB@Qx+I7ywbp6Pihoq@G3hH4roxN$CX-Cskl z40*4wuPaNPV*Er>>X-Y|@pK+B$2O6w{w-Ouge1vQ;M2bF$fOw&Q?OHJnZ%swRGOGU z6j@#ZuARG9G)G*i#`tY9w1X!ZB1>Q zhDQoRv4c2~R@ z3S*V~c?DH2RJz1xzVOfZ#fJOwiEBSdz8ewv$30dBmQZH=FMi>(_r2%p_nkM{wG8aI zQMSiXLW>>MJAdH?=dU|$pjuv{V=lb$d?+~RBO9N@4WIcuf{4NO(lKM1fIza= zToRNy^X6dH>Q#jL=;_%<6X>N}N_Hj*MaosScgJ&eH9U2Yo0^*8rBmeRNX)FPtX}uD z&S_=O=N7b1O0k}CL^6^F8&T#9L5xumU3n0Ddwb*O*;CZm*hq&*crKaA^2Kq?T`&)qzUE?l`eWDP zsw*zXs#8uxYeNlkE|B5@vS9S8_tL0!J=D1_+H+}~GOru2x!^op{hoK>qaU~iXRkg3 z2qF|m$1rqc0RH%RY<4g&M<3eJp%I!ZNjM}ilyY@*LxVEgShXQYAdLi>uAB<(gA_y8 zfs=OOcn(R^5z|>-H36icRh^n8j3Nw=j*??jX&cInBiAiqAmt{4!gyX6RMJtx6t$d1 zeN!XtAA>MsYV#?L)Fi{R60S;Vdz#cDE#wvnBwh+_ZEa|5X(m&R%{)xnoo?-)2sP@> z+_R&zgT`-9u+Uiqnwy%@Sl3WC7Rjh`H8pW*VDoY`B9H>lJeA8$u1z+braW(SPGbCC zB0nu%z64%cRrw2qAOOJV=s1A|SUL?il|r_*9-$P<RQ?G?15+dzQrT0&zxr~^qHjg(pG+^l|r+|mPf+ov- z7X}#UAHtEtM@VZ^LZezw$^)hy<_&fAm_KJO?dQ0BvRTox2P^+>u^8duhd1J;n{J_M zpm0*hN3wM@j{gfUc;&ip-*)T3WY^vQ%ZH9TxbXOUQ>86kx@7Gqulx9V=)>AM0Vax8fTxo78ktw zVsv%SK~sAN<}6r*wrMj^+t`e>JUpokkjbE~sTpxtn&XkJxog&Jv~+ZmGgOdc%OP_( zL0J6=i%(cg70u*9mVF5YCrLzZg!Z;hGG$3d#~cryo1&9hTa&{D7hQltah!^#kyJ-f zrJQV=w6I}BjA`D21#r?HRU#8Qy~yl2GcluUI++u={6>U22a$R;wKgH*fyxa^pM^xg zX3d&O6CQFaSCX!4zG%r(G`6>5T6ZVTJpU}b_3c;S@80ze%xas4l$3Z`3La8ar_FIv z+8m=&SIdJg=OBh(a!#r67o0E`m;c3Oc>U{Ni^jSdL}R0`)SjZy$7^2m8nm{zQCbcuj7T0N zSGBWi23D+E89Uq+ZQ)d=cG}DtXzA#Pp`?_|Q`hUB+ucn<6PH?}oP4QmL16iklT~o1 ztIr?xpA#{xudj)NU!m;bm_2(oiiHwFsl7z<#ZlYPfHTiJ8?)xjK_Lh+nlEAh;XWK0 z8>juB&SX*BREG~<`yu@D;rsFVFa8T=bkCpzMB?%BjWStbfAkPg;kznV# zX?n@=c_tN$t5U|(OEVO1X?sVrnA={IX35xuN};Z1M$;?fs}&3;l8Z_+fFa|0n9(%@ zU;En4`01TL#0TI1K4h{f6^BD{UoMLIJej)t2Zu2_I!0zLk9lB`5}cIQo1)Z69j&dZ zH(Y3VGEZL0BSN+_5vFj*4}XpifAljL7zyYca)L`TbryJ%J(^d6{|kd`6avMdh$YLH zG%sJaWbMmA_i;Q5;kj$hYJT9shX<>Dw{yqyLLRKR`w4d!CkdI=W`yRTsSqC10U=R6K&vd4nP<`8hDykFFWbxbn}h zz-cF)f+jDc?C&PORssetUo9>a(PI(vr4YrCt27aX07D~r^!E>7XkY*jJ@_Ct+`j>% zBSZ8%zYtKGn8wytoV;Q=+Gox{!}hJ);_-KNb>ZYCOOVcG;{BJrGh$HVBV*|4*^B)6IJ`^> z&CSi|p4F{11S?r%S@FUsBpB_%g9pi+Raak+){bd#Qm!go6sZVk^?Nm6w60-<=bzh* zLkA8Lqm<`m_RLwtbdtoD`s8VI)}~F*VB5Cs3bP}bZptGc+qvhhC85J}yclXL8S$3q zpT~jTUNQ?$pFSN8^^J7Te4&8h@o|idkKxdf0Ua+Cp?yXhKK;=T;hfV>B_B!TIM}l5 z06uZu=Wy@6zaVoLCZwuQ0#P}Okgm&O(V_)dIJ+Bj=gh{HZ+Zhdni?%wazb0mfO*x3 z46~_?if;e;z4zn2@Bbir`UWvx49HLCNR3^XxDbV|j2I_DBXT%iebI&Z;*DQGM_nyM z!AFz=Zoc)KxOV+@a9FA=|C?H0PG;cg?Hy=pY=q;AIQWCetwng2CAwmqrLL=Z`}5-f4$|lFISnd<7!i8 zAm^NY*1TUn^l+JtY|S}mo&WKVufO}8bIu{PR3!-0P9cW4Xc4M15oJ4-61y)CxL|nb zu_y5Mcf1Q@FZ%$zk7ue#)7LRyj4RcaJEf?QP((*xCYE-2bM z;xWgjtd}M_SZ5J(c^vXbz73PYfqtWV#CgI5xE1LwnU{QKgo%;k%t{j>`#_LQB4!~G zA8wgY94FD@?BAOEDeVoEm*=UCPZ0V>&h_iw@Op|rk@pNkl_Emu`US-(L?H}OERN%Y z|L|^{z4SD=oFl7icydp~D3Qq+i0o%T@ZON5(n7I>@q7uxqvM2#q8vtC5Ci$y&wi%t z`;JHRnySfR(efo|n=uU@Jg`t{Vx*bLqs=*fa>t!G+|vV;0$PM6r?156XPg1;8dBGq zVnW)a>3GUC7@4fTM9djVK?L6f7wQoi#FEhI%DGO&XVSSynZ?vxkZUtII7E9c<+xJVfLTM!?q*LuD9!L3+#O=A7iEC7jW;k)VR}t#Ky~J0; zgo=>PkB!rHHMO-0;Z)I>&M*)JK7RSj2T?2*(Ad(76BaE(U1I|r9j}zlq)4lmV>W;k zIUYG5K@d{O=TfOi0?NqPD8+|_+QK0B?*~5kK3s9>CCE^0ijwkWPsd8YJO1|l`1yuk zBGR^ed0zz@;9x06n)9n)bv`bA^~G4Sa2}aalSmB_!{31l;Z!@v%zhvfwE^)|Fc5hc zxo3M19mY?8aX-HJueV^|?!Dv$FBJo%Tqlm3A?%_Hw6-)z1`dQLm?HV|$j=Q2K^XD%>TYHl#D8-b(3G(T3+Fu6`93Wcd`@S|~Ccy|a zQk}a`YT>Va?JMx%5Bwv>hX>&yBw^IRcq7ZlT8)92Y{hbafALEWx@98Zo;J)(oRagA=yWhDko6hQJ-$b%7QO*@+Yx8aLZz6^ce)aq| zeDvd=!v5YvD)7rr<58{So{Ks2=V8?ur=v-#gR$?0#b^fTqBL^XFl9_Z%!(xDZQLoQnA;EQpym(-Ho0G_{&=TB8yVQ4D-CEBQfyq93XtQ=klK zBPxN>pNT6E0e4vSOt|mv$bLEI;6HFNqPXGXHo{>Lw$!;uOENC>dQ z9y6H?y+%extyDm)Vx1jb8rQ+L?OUln%IcLXRDn7vATovGDoc|iM43r1PV=LYeydll zST{5@bo=;N{;}PA_KsCM?o(+~W*{@BPs{c69*8xNYp!|U6Yu%Ezk73ATRYkOE0Nks zMZV-7Mv62d16lhR!`21+j) zVHB9dU}cMCNzp1EC$`eaFY_CR*?z3p7h&5I%bAC5@Ui-;|1aXY8YXpwVkmY>>ZE9m zmZCHi*`&xYq|dIY0ob*`flNAsnwlD9a#@uIBCcnx;I|=DPC4$lm^Ei6-g^0EX!TOc z#~>85j2t7Om{Pfwq}WNbxf|C5XwD6+tmXlO;tGj#8+62ce!wp#>Ai zP8(7ol~pnjlkkiJH+2A+a2+B_o~sZwuH%+x7}i|gPV?=08FJ84TBLa06ug@YiGPx!1?g(%gjQt4_xczW*aaiwb7z?po{R5!EB3Dn?lDiZG8xk zNoP@C-$dCI49G+M+?Y=i;Zg3(jT@gpds{0`KIKG(0F+E5&Vk3ueu`MmVP$bkaxc%F zH*eXGe*Cl1z5DjR=oIi%W*|26m_2*ey!MWc?3I7<)^(nj(XkH|L^Rn$WdpIZhf3zr zJCw&YA6}15n>K6plWV83O1EtOOL-od4jNjTaq4NOVd3IMBo)GSRhkaRiS4N50GL2$ zzw&fEMLdkJK~$tD*TI6!s@WZVZU>8F1}fCzPMb!DJE+vV=i#9RQ^WhI;2Q z>5J%oam`~Ms)%vdO^WddOG87Ilo~qWotZOpt#K?y8bs?yKmd=3T0V;K$CqA=(`L^^ z%C3Q)1oX!X#O8c%3Jfm(n3NWEyy1G2CHfMx{xKec@c%*Ago1$St znfoJw7$^~&eC+&VP((7;{4_&x1k(&f*DWz=thmrjq@00h_b466KnU#Evk&|C_F`}} zPgXcNUb(8C;}JoQp9L1*JPt5-bgFbNz#>6zKa+`7Wv2~D(o{aQNE403RxVTpXoA=h|#B#+UtBAG0G?oy$rz#qrir?14P zKKvn^GG{Ik*;sRCmNxCGQW9I9_o_RT$Qf%#}%vNKznN&THD(2$m368 zyy%mLB6Q4*t2O+<4=_4Bid?3KQtU*upFG+ET&Ml1t*t>mU$E$ut!JsS#|$26!Ow2q zgjw_EV)nc_6nv`^h6+uWc$u1Oa_=!zBr6pHEiJ8BdD^ng8ypu1^^D_M{)Sb5lUMfgdzQ|uC@WSH8n(l zi-iKl^F@r0`6!H!p;Q>BfI+`728a@R41348?6()OHzW~{$XTu zwP>pG;8r>-Fi~rmxJiM#?)n)%a_uKkD5^alS<<`I=N2xUi*Ma}i>j;P2x4Lc=XX`` zuu6*QXu$!o&>(02@ZhLQTjL5a9>zWe6BFf^hKYeOLXc$uH;sLLNAQu4d;<6U&;9f{ zcLa^i7S6nshncfyqjOp((T^w!RB^0O)i05~FBA$CSStmsYHO%atIE*%?svZfZ+_h+ z1OSRcpBSRxiKbx-&2B{qgrie9$!C528{gcpYxmyw9xs%4${L8>zVn^$Y-nxk(79k( z^^Ce@9|)7VDy9;0p+5YvPgB^w=cULOAY$`Y;xKm|wY`=uT}p`%YwGHVX)s-}4iqS6 z%nT`7pnN`$U;Sz`_V3$A5!g)~ZAjPTP}25s!q0ORK&4_mlB}=LQK5{LtN+Tuts>M! zTzpAXK0Xdvy{^ zmvUI;3qFce0<=I(cx-GGrNS6eEI@rOgPz_)IC!K_YlrAR)P|N5MX1? z^_mO}4B%Iro}~-{!vllJcxg1XG%KgQ*?-o7bX|`GmEGI7qc}7IM+8WdnS{d+eH`xV z!@Pwjz@=bBrP?q@`ROE6jhG8xxELvwFgQ9!saf*l1&oj9Q7rhB%dF(9pi~})3Yn#| z3scm+h6ZyDAmKU)F!qI+S{}v}s2MT0vjrQIEMo|TVu+H7jE@y4Ws1~zWS8ZV@SuSA zz2{wc*IO?`eI^4puCQnU3v5)9)^Ri-9Q8F>)TA863!W%6kF560Tfb_YbZP8 z5gvK$5j_6*(}*IE;z^((&2&y8jx)|W9j`cd4Vl|d zWTG+Ma*ODvyc7XpC?RE>ywP?M==%1|#S+PUW>Z%@^vK3fRg&_fwd2l(wS4*Vwe#mMIL==gHB2Y}6nniG6g}asZQF(DJP$VS>3a#Zn61}h_Pf|a#JHkn__ry7|%ZQEH*y=I0|EVxal+< zkeY^iOq)IpbLY&(qJ;}7Lnt{41xLyxIJ0~o9y$0FG)`Dm?2rgXGaGK5=;|vRf1R@c&Ryx(il?kF|T_L z8tWQypr;2LpWKeaLj{7nhA9`!OHsCf#`=1K!e&w#CGYYi1$(@xruIhS0!$W~P-lViLlqGa z6Z5kg$#I@?#sY1~e_86!!+b#|2UjBS>{v6sXOn@wpp5j$6L*@A&7N zZkC%oad7IEN~N)F+g5nqVlrD1f-owt<|xP6+S*E4numsls?4qSkP3#+-f$zXzwTr3 z!T`KjP&7ucm}w?f3Ksi9%zVr9C7-`w{(SuR&+q={qb^{TcHA1sJ$L{7fsW43#>DoT z*gWH*Rk2xWAR45S_SmN9@b0VMhp~~ok{GzLaciW2t0bW2_BNb;=9z>VNoUgJJV#uE zgXxXO(APJB&CmV{k38@YcD%5S+$^4xCgzk&XW($4IGV@aUAsxD+`VfTnZcTyn>99E z6Mxwi9%68C09&4aj-au<`+HCrA1A^frA$fSNlg@q8G?KcLgkyH%$ts@?fuG(#8g%U z2n(=oCAH9V#$uJ)q0}47&k%%4YvKk$A#7}hL2apiIKm+*U7=`HmQz0n5xB}e|CTqr z9__grBF;wWcC3oDa!ey(gr}Z=9s{FeL`W!UDy2gg2~5!e0?x2?#~!?}bqDI}8_7I1 zG&n?w=A6h!trNkE#^Hz}+S;e%iKl->&R3y*Lq_5+7JZD2<&nu`ka zu0Wx$7Z;zk3|C)uIbMC<8l1Co87?^Ybo|LBm!LhD!RE&w#E!ka7#&ik>K~v? zs45mn!6hLQ=P&^Q!cYh8@tC*>U3|&pGVAZ_r{Bw$&pFyVJ1Do7)Ie+$mGMzYVmv-N zh9moXDH2<07Ceg(rO5YOCP(nm#~*wc+n#<3BL{lm4-cU@I*fwU z42l@vwJUE14yeOp#GE1I+-a35kOaF|E&pit%6Fb3FQqc0K zVU!u_Z28&tjfzu%xstLZvBWmrA1~;_IaF zDK#QtYWZkCM%rl;~dh%(!>VgZv98>fvD!Wq8c5-(i;&qU7QCm}s-k#p@!G|Bc z?RfaXlo`nC6{ns5`RlLSan8DRjifK=xCAO(gQVy_vtX!pGaG3Cp(D8Ro$tc#y*)@# z0wBAjCnKrb^*n?OICJf}Xl!UiE>}aJR|al5sT5_peEO+puyflEjF07s@KDYx<|?PO zPJ?00AxsC-Y~8k%GJsB-J{{Ry4g4q|NvC)JK79Av|A}8e^DK66+kqX=zkn@UU!YQ( z^XAVZqNG+qId3H~&if@4{1Tpg`f2Rgxsx_|Q?p7>K@sOtdLl^`awJID2N=fO#(-QphyK1{l!_s8we?_`EEb=#1oIc2 zfOI-bxywAt!=|2_O=t1r@BI*C2lgQy72rvNrSx8v8rI|zAi*F^Dn~}ji5x{iY@#v{ zVZ$_(G|e=KhXN(yz`&qX0xty=%o&zK@*~ci*R6|!IW(6$x9>nHUx4F>@blvkzK>hJ z@-JB2-Gvm_MZH*JtxXUI<}}-(#TL`RB#v={Y=BTjCBH;WQ)Mqu5P0P?Na~r$0WVs( zm@*Zgv*t{kf5AGu_O%z|&6oW@tXZ=Pj*v_>P{18w_Bx9-c5dJdEXEShK9Rx$Wi8ME z9YUH>R%tosMKWJ;p|lR;Ea#e$N^K+~X%!DKZ*Di<@TZsI`RBLcg{|ADYAUS_OW?G_ zBO{ckk?O^WWPZR*N+U+dt#m3yX;LQYc#QV=p+g7J)YO0#E0$B{3zJf(T(D0HicuK_ zId&ZnAT`t0mbN+H`p$Qct9HngO<1*cwM*B&@)b1L1YcheXngZ=@u09F-YIm5P0^w-?V-{e^z2e@q9Pam`sBmD-^S zGcyHY_UzhC8D^#WHhcaY^dC8dpWc2u!a@N~ppZ2YbLgNQ+TV+x-T7mj_sR=mGlub# zLFV1IwC zjk$4Mdxp{BA=+a_p))v_N*L+uqcapSmn3p|9`3$-15R8pmxLEKxkYx;R?SqUZlj8Q zNx~=JeGPmuL2;1|m@@?1cvYo#(a?}aQ%jDKthu}|l5q@; zj-crK7%LW$&emXhQ!AmgxMnO4q;}$JPN)(=O1cw-Dj}ts`xGMKDA+Dn;FF01S4F7$ zj`Y#FkOGRa_KS*8hPt{sGA$1c4C=~=iD^onao6#1`w#EHg%`a7H8u4p1Orxqx{0%I z0&3Mq%9qyO-il9t^n;r}{PF8fnd*8@iGlq6d;VIiTy;9yJ30x*dJH~M){3Pfvgr&F z%wInC4F2s~-z8|Qa&q!GtAYI6OM57W0nS`=HsPEJ4dtk$fKpXw8f7H4jNStW@%)w- zFf=+!2UBVhk*@;u1ow3@7!e20FkQD>=LxXYG(2J$`BDkHckiMx4-5>DdOp3aLq$PnGK7ku622n9-9NdD zh_5S@qb+iQ`t}Z7aLFYY9m^{-RK!tVUk}N9NZE4~F@PP<0HCLTfIOs1~GVG9JRtn*xL&h4x!#1L5p7mW;G*U--)jJT7sn-21+Sf zpf$NDM35iHNdIB7r@I0eIdlYvwwJK#-LHom=1G#|A%}2FaF}E!1K@={Nag#$#_RFs zGfu=~k8Q??m!n}tOln^RD(*xzxll4~o1ogeZXB)dX%mgqIvingAF2DKsAMY#vRMdTN_>CXyg)>siY^6& z26Iy$2`zD$1!Vyalu*Hx1RQZ+r($uyDDW{&1V^5)(D#YCG86M;+4l>@A|V-B7$EI> zlq$oCLXy1Qvm5D&^H>pxHmPj8Ip)+Rl|*|;ZE0;>BEmbd;wPgyHJ~cgycf8|Wxc7q} zKqjL~p4|b@#qPU3KccSTjyrDO_1w|tHh$qt-}t$hd4Dc4#ee+JN7!}OT~C|vMSyk7 zuZa%nJmif!er^(f_E-0yS@RP>ufBrz_c)BOZR-|lYCXRHAinqYZ{nL@`YIlI=wWJV zE7?wl%ECOx$H&q1LReO&O`&2cSg-A7s0MAhj$# zB`r_H+F;PU0l|(rm~BxVTpD8qfn)S_=EQMQ0%R6bJqU0j?~@VU(~mq(=iT&Mx(TRi zsWQ6F7Yc-ckVY(*&(il(r%vI)2Oq!-$6mnM@o_9xDx{DYcv{0EN#DV6?)(Jirf10D zsv)PCW`7sw<}o!nt!kl!s^Vx+SxS`~C_9l+tc;qZp#>-C;#{pt@Y+P;H`bHMNwktc zq8Fp+EL8Xb8Bru9IsSRzoBxHcc!U z2GpdtV*xAXWOyA5*f7$Es5uWFHUK7bSRaV%5Wylu(4=d&QNz_+`mnku3nK~%`DAJ| zO3-Sdvaq0}Y-xT)>2^084VnrTEa}|k3zce9!;8EYq z^I8;qbZ%C8Hm>W!(KIV;84NEed08fdWdM$;04jNcSjXu(ON0KVH@z7@efVLz-k;YV zB!{7qRnn0L)aUa^RI5&34f@__G{~X(EuYqVr+&I&CVy5XYLl5XV`FNT?F9Ll)Wi5_Q{_t@zUCKaZ%ihfF3%)1ezStfTMpoNM(uCa0%J zuV8+mLhoq`t#wQoZw^bQfy{)H$tzuFebdPx$cJDU*s*ID{?luI88&0Y|6}S2ahN$! zS3yL2?P|`en3J^W_rCodyy`WtCPU1%Vd(AarHmpeCj^=TXgT!-!N1 zr2&vmCz&OQ4JGM%5t$rgq`J3u(`I^4`P#Xq3YyZ;wOUY2T`ZkHkk8JKW7L~Xg>>Ue z>X$R5pCw=F`5_h?9;RlNP;dEQwh1eWFjO#b<641VSqt=Cb$sn47nxd-*vk?9gF}+gkEIdFulU4(U^z8B}umHn@S-j$< z*Wk>z9!68x=~TUZ9$u}AdTj~0LK)omusAn|>f$^(LurtEdV8^Y%@_eI9M>lL5}83n zVMK6Is=MnZHEm?eR5}?lGZBgg$B!aZrl(P>)+h++9U7$R;;iGsA_nPXwn?NlZlWrW zJiO*LcjCSuFr0YqFoiB|E<+Lkt5&VT;Hm)>@;T}?1inuV^mG?7HG6uG(W_kAjyI6sfKnGwVkJ7 zYDb~aWo@QlrX{E?1#r+2deXZ=N3oCA6f;#1Pi6=@+U8xxw0(Y{KB`rH05T$q@IWoK0;Kn-J) zvdOLw*MOLMt*RU*2~ruzWLyI8DDT7i*#yXHhFLoq#Th+imJ$@G7`p$Qer+T0&9Z?2nDufH+pc#l!*Kao9S8K3k zq(VkLrUw`|V?6M~AEIw$4XX73pMM8QcYpJ@v3vIx6!Mugz4Bv$ zxy2=npFf9(4(!J_{^L71eS91l(kTrn*DgLNLeFb<>!d?`ptDea1k4AN4V0q?qs&g*+y$YFiI$ z(f&46$wN(Vdjy?Lc$$hdyrtl~N=!(L@p~bPg%arxwEP8%&=wXJQT2S7ev2Gt#7t_+ z6Wljey%t~o+%SpQFVUEn&&3Y|iWZk@O)NE=sQH{|_vH)hxQVMa_Ti50Srq5b!-{HP zPDCU|(vjNP67szxM18>`4(8~9qiY+hC`m^CRL)>rL1HZihul!8hrJAyfp8y8quUaA zkz*U!xnTr(%Rp0DdQVKc9o|7bv5wR;!djd7iIOKxv6bu^T&D z2Pf(F$n0}${d$b7SwldQ@zW9-itE;`0}CQ@cJgg{dizl7>!UdoX4tgPA|&?P zB%}0S{`l?q^5;K`%eQYPs4-hUW005v)`r8Mo0`JebLS~zAMEWzE~9z`O721H!&p|D z`6)rBNL8v}kyRlI92r0yXB<5A6l^=E z`%psr8lkmlG2cqm(&GY&OryGzh=VEw z|NiB#;V<9*`?|!b*}(ZlqS_Zx-S9Ys#W>#l=3m9boI zah1BQP}p3#d9I>@*Z@BHna{&(dFfCfYo9%N+)Oru+it&|Xwbb@4o;yUb2>FJ#27_J zp)_W`hi9HXgxJo)&E#l7g=U29J2v92Z~7J7c-iF`EftV(>l(TgWt$AS{vM3=m#}N| zTHJ8mUVQoM-^NdWa35-O^9oIcHcVAP?HF=Sfutz=2L>@bIzk3nMx!#bpb`dW;6{w= z_g;ac&mTh2vIy0p)SV29+H(E9xbfC~FmpM?EuW$^xybwb`)T+NV5tMC)oau|beVb4 zJq>B_Nf}Ziq2mGidduW=pV)tZ2EJsJ)i*dmkkO4>w!sS{ih$(;T3V019wM&D zL<~lR_JC);3{mLAg}JJ3V0Y5QsFIGMg?t7_N69#Zp${P!ezQV`lg9y=!8Iz(2V!YZ z)zmE+Zz}@#YZw{G!j-R!FWw~5z=%eJ$QfHaa$je7=wx1bLh}GlTi*SvZ2x0B6|pu{2*nWp)M`#z-cZcRCPq9tsXF z1Cqr>U)8@EqwGRDUy~wn#g<^uMh_Jdk2Gf-D%;3&m%)>wH&ORCQz%fNVcW_OC-gS7 z-=c`FuTUu9$}6wLo;`*Z%F5CMiL{2jysnqLMtv}&Y9 z;0XsEO`^-SJ62w;VCYedzWyFUkP>-m2jhAAK0@HW zpZo;-_Fa$buiB&fN?sE5{j5U)ltzYz@z%Gz`R~5|jc>o}>Z`82XL54lrxTOY7jb6& zOb7C+SH1dHiUx%C)kb?L<O7VoGV*=N%{NiHE*Ebmo5S84 zZ^A=A_&)5kl*uSs7fz1)FunlkZp$0KW_SSazvs`fZlDhtn0m@Lp@&lhXtwUT$e6P~ zlg0Y65#05vJ8|uvEAZhD{x4K!=cxvt2*M?)a5m@Q@;#T+qAcX{)XWRJ7%zd)Z&7pW zrW;?1pZw^4WQ%2R$HB<@^%xx)#kE&mMepsk8f56mv6)KDh!#ZXlV+w&i^EXQB_v28 zZG?rMk&#hE5C)1gF^cGSw`|>v{@z|3f8hkdO)uYbMLMM{BX64;yd2e5iyCz!J!**3 z(E2Kh5HU(=4O&eP!ivxeLn0=OBSxqc;=Kw(#lq`zSaFCRUdNpq2e8Xpf<1o@j^pXf z3xJzd1ay5v7_Ub-H?;^uM%9Vxn|`(i4K^z0ZDljW)SHo+LDNSNHVD*U^jcuFQX?R4 z0AULv@?nZ7ZQaUe7kMp3i{G&{$?)ku4%ZFvL={r$P`B6YQniN4Vui|!VZe3SAW<%K z$gmF%4Us%(QeS4&2Qx@_oe|uW)37Er-vvidt5c*uHaY@73JG)ppo^PLewWcFL71hH z=jc$a1ob3LLkSX5R!C_xNCr8ILMl7REF3}w1i79bf*V_wL-1&Mvjd{GNv5&6x|iHY zD+?;SQipVw^4g@6x$Lsb@c0u?>4|q;>R_O`7-F4zIlu+pJQHVFIU4}~fGBOOT`yP&;K8cThTWXk8Iv!K2k1}B2Z8zik zy;tF}Cy&rXdK9%#$T9qv-~BDPmOvQz)X5P7Y1UOP=BZTskN@<)@IT)353t<=iL$i` zsT6UkuJ`NmQ(A0G0UYS0UAJsnvsuTX;ZgaMfGJ7lLLo;ybgBepErY`ew0z)i|NdTl z=93>r(Q!zBClFdUL=O)F=wiJ{L3MIlgaK~5<)wfAna_UyFP?bv(7Ud^`pSELVF&Vt zH@yCyo}S*0&G@41G+l=Q6H}Ec-u>S96YVnz`7d+EQ50j_wr$w5WowFtkpJCy^G!H+ z@&wMGdO;zUs83>J?Z(Y8?E>Mf1wkeY_|xC{Eo|=VgAun46G0`lqccTYjGj6rGC7Dj zHEQMNx%;)R#V0=XNivjXXKj#yD8XC4kD_g>UQHA$0=ncixXPYaY3=kRy=4it0}QMh9>(aJ zwcw_Kz}IsD(pbctaOnh@jslF2pT$6_hsuhoCM(kPR(=(81!^qIYi{z0`b#p%X$Fc! zF%UvgW+Ox?n8&NO%eicSgI^wcvS|%@L(7r5POLICd@d}y&AxX8KTI<#9R%bxVX8=bi>** z6Wx%DnFI`J3=Ip5l_k`w4P@M0dp&6wrF>Q+Bf~mc*X#kBMrZZ9wKQW9c>yw{f1)UI zXbli`zfUFGRP-mM!-1yXkE!PpYx)rA2EIj(UFo1ZW*f$v6koh%lZFiy8?XBJT&_T0 zwr!{T)v}e52{nT*(`*GoYtKmM;(4)Bu;`%5PvM@dt9AgxQP1tY@B3Tymh-U0#U$Fv zUmKy1#p)7e(3z}5pbHrQ$$MHPAc?EXHLQ*BWENUsjKj}AkB@%xQ+VK^hY>UbQp;3Q z)FzmY6x5a!ZDYLb4CMc(=jZVUfBavud)IEf@|Abt?z`?nKI0M}p6C$zQiO#eKKM`n z6My#h|BlC=I7|WCfBnPX!`AgV4 zRZ;i$9lLRQ{5+PL0g9_euxsyLnArlMeq1Mmk%1E4`;I@ujXSp@D>QUgN9vtA8b}P2 zp(w~>!BMGJsV5>Memj%FmTlW{^vLrRnRMbKsK)C17#kg<2uY^K6x}D3?2-Pi*@NXm zb8{J(CI=6j5TQ@a{gm7;DKTU+*tL5nYPG7$)0jazLi<6a>>S^w5#=y7@wZ z@{tgMA)l?;IR6P`pltO`Azr7CiY(?E-?K%kXFa z@NXEOZK$#q(>X)3jx|9sGj_qq;@D>oYsDU20mVm&Q7LcNT z5gVAA4Dr!#9>CGr7DB^GhcVS>*sz@}i7ok_k5;puX3|_{M1~q(&E@h`9+FwHG`_iP zR>`mGja8{suzkmNaAN@-Ka>265)pX@!;};V7lyFH?wtp+T$~8CLgCC~$>H zMa`6vmKlJ&?}5zn^zdk;oA;3>-Alm}T**6!VNB3xnRziN)SUD-TX*h8E?>|$ST~kw zsL-b1APNID-+_evR}YJJ7*a*a!?3VW!9t~?2Ld&nrJ>aNUj2&O@t1${M~X$aqlw;$ z^Ox7KSglfEBF~{*ER(wA+BK`Oed}iQ_Lkr}7G-n_Rc4Yd%8%|pfP3Ek_xRr*{Wy*t zKSiw4;A(nbX!hVVe%AtPBGvI>U0(@fMHF*PP0iuK2Oq{4zVJnyyD*7suGveUoiN!F zO3buqc=_&E-GytfzY1^pwbx_Uwv8$f41M)pQKIh?QAX7Yd@|aFVhOK)&E0tXiN|pM z`~}6{oD|6tNUk1kroC;dOqdHLD%Z&?n4kk?kWDjfRI=5KhmXS_^CPa3-cI{TiPNvs9N^6mF!3@1YqqO?e3AHQ_-UR<$ZjVc$!O1g&83hc-X6HO1s33nv_TdY^G z)TpXWB%{YT#_CnW*tTU$8g+H5MAfP_(m9rU(+fkrsg*EPz?I=nI5%llf%}BGG0CYJ z8YQEM{@QP^=;?F0EJYV?##Pg%aubu{Lj6CM=+1X4moYUT!xEJ{Cb~!(O%1g@ zdEzw5&x$nbNQm~7Z)3V(P1DlM0uNOILc|PK7$O_CFdQ%8mqsk?HYbqtC*j&Mv#<$y z0?UEnxMYM)GXs_%#IdOcj-5P9b$6BwTXZl8Cyhkgw3Li$$AFue%i+)q6KFL<IwiML_Nk7IgtRjw_so}8`lkk|`MRb_zAjA=jI7Bw<5ZqgpK$t2Yx)~y`lg~ij zn?|FF^XJbKQiu#A7{SqddV8pUrD)lqwL7#}69-n0ts%e%*8u=5gUBWaR}Is~PnmG_ z7t)D%PzIm^lSG6}%|^&|8U#K9UuDD6P9O|&(m7445uq7lhOZgyy1;-lfN5A zY9K_%7iF}qCK;M?v4m9v10=gUwt6*6s5QVJtvh&BYS>;dZS5uOa9$$oBnp+$l>P{-%L6UtT>Zo z7BVz4tPonAdLLR`iVQ$140W-XkI-nR1aqCuxY%>WWw0%ioCiforb+UkkNosOSWIE^ zRR*HY#c>@nUQAD~5(ZwT{Y>+fCX?-5AW9WghLNJan3|f#-o4kS3nUS_BAN&uQ;omc zsM9?UDOxrm0t39&L?K+nsF=Y^dYib4T|h6NMJ8iHWF5qgqx3s0L9l2li5NEQ!7aG= z+lO%AnUgfpo9Ju^*53U|FF+dad_Jpcvlc^S2pEQg+WZ1G4wkXH$AQ_Z0A7<=0*qLu zlrg{^G?yIQ`;|j@^z;&9JEQr%Sh^Yo?0 zU@Dna&-V!UL4l3l?9zCbN+rb%O!_5liDLP0#X^yIrfB`j$kG6|?c9O2>(`~FD?=Oe zHZParuTs<3K`L>a>VPDa|A{ymR}EklxWw>lE!3+G#mFl_DuY9-*HA_$I(C7~wEQT> z@<^;`dwmd8@)iA%Xi1G6msN_qqbgj=q%_hS{e}6Cj z>i_ve6tXUDc%w7W(f3|nQ?*uAQ{mc?xsD4r<51%~>#F^fQ1#E%YWT$GzJR}b{|B)! zSEKo^T4dN4Ax_k6nF zM7BbVz|cLlI2D$Yd(8Jk+SAeo3=NIqi(me_Vx;LnNu~XdWIlr${{}HqmwI{#oKYwh zh^@DOaFBHP3&nz71F;^E&_Ew1v*jiQ+}+ozY^-Xz2t=(0Uh(qVRjwj9t?Tr)0^M+6 zy3c|{v*f>*i#>Sq(6gABoJ#v2%~tD~x4!Z9|1>=}SGi;lWbfV^)Mmm;jgLz|FuG92 z@NZxIGEE*UdS!*6F%5;A_k9n$FWcRo_JtANH;~m5{$}l#ds0D^>JL)A7P|1iL70PRdRrQ2HbWja+T4xjM#W2qeKT*ZWtHh zGQRxX{jhQc4KZb14esRHR1;IpDTHAN8x{?`aU;f}aPgjh{wn_4>u$s?TT3uR0K>J^ z{7XX`+6?Z0>=b@<>@?zRM)xa|2$d_jSh)^N2G%W!h8nw8095VBCQFzED4ASNC6-35NeeObJOqAB8B(qlKrA)Ynhg}(Gnqx8^{B(lxMRItwtF{v z%0Fh_-4XUC=6J!LDhgK4n2p*o_q!`x%zTK=hdqu(qYX0#*#<-3<0W@F2Et+lryVJ~hauYwpcv6Rp657X{?xO%B9C3q4^A zqro(`l|1w|C()Z}qEvKYViUU#Xx}??a)dm+JG$Mj$3y=|D_Sa3s=s3ntTAWD6x46bwUU*i3qc zmNJssz$VPTsZ6y1uDS9`LNXa0VsgpP)`{BzH3DXa_1}Wy45ml4aDtkldxp~-VU4rl?vvUn)su) z{}rXxmKM56ddRfk%X!i>Cprk30}yS5X3rI>%;jsuOr^2_@DNepqt;kd^8!4^whf!K zSdXT}PZCtYQ*8n1oSH#^>A5*fOirWWwX}A(jId*BKn78Z`W6nAUO0;RqKYz7eFOcT z0A^=qh*o5vzdtQSk>}PhTdIfi2un+gv~e)8th`{R_t^N=B9SqK~l z86n`x$l6Hy5$%K1&ob1YQ(2196(v#$rRCH8$hZn2#ChkY7t}8(I5u85dIbOQcYljl zyy{ihbLAddplV35MF=7@PKYf-yy|XO`_PE!qAX|@<17O-USuEWyVNl91|jx z>mq}=(2oPB8hG#LK8q*L)(|mQWgjV*k?2~c^0Ttp964_#1Vwl+1@|>vDr1CB56%Jj z$b%CYdTJhdO0PNQn@!C6G3s(NL(N1`gb1{0E%Y`S$O1QPT7}_k4Nka-^?g2m@0T~> z)i2qIpFVmH-+$sXPE2SvYLX^NfW`R`*-Q=@xw#qOQNYegT3P7dN!A9< z>PSNmRpW-9Vvm=>cR@|@FztURojLVvg=Tp*R7accnXl1OFd*9o9sKM(o5<=VFmrFrgi6My;_@5DF0`$NTVCDaIj(h<;x zo^_76clJ8EkCjyTI*np`PbWisOzH8sLSuLQ7c7q7VEHhlUMpCZYEMCXX<@-+2?#xF(8I>OLsx}qLPnFQ?d3kS=XWH zz%n`=qy7*cW8%VjtQr_hHK$2$UK=Afk6`E8eti1-hj8G`94fvC!*Vt01yA)6rNNt>m`1bK zz`&|iD3nW7PnHoo(~WhJ5Hc!d@3h~|KCKd*g&J_*X zYo;M1>44z!iY(I7OcIRNQcg=mw^}Xq_Vr-P`ZWsfs~9o0l; zk7X#NWHwj8Pk;O%?)%Y$aEx}=!;`*dJ8@Siu>?80Ji}b!?@76;<6qNhb0OO6FqHxp zDstn(#0Nh3Vcc}%UaTr*(_kbovwznm0K+oN(6)_)<})c<@w^sBh6ix#%WlE9zWH4+ zOH~|*c_UK1XY_ZXSdq4`Nc~t-J{K>DQHDfM*@W3N@!)srQTqXxIY88`b&!Sl9p=owYW45J%uhY~tE}GlWu`4x#C#EMcJ3ohd!zWaU zDi1_jK2p&m7~VNF{{R4h07*naR3b#|)i59jg;<*X@^oKvjFWr5ojt4|n4P2x{a7s#3ruC+w zOd#vfnMj_cf-nlLA)*G$o_^v0G0d9kf zJOE)pp1+!+7hm|+^Z4+$9>n=(jM#9}b2qee=czGiA8sbA;IoXA7*%a?k+do(>M|`1 zkNTt^-BT{2uiS%~sVUTJbrqZl1t04yk7x_tHC*Oc7D^cxMaO^_<~(zB;@9tnwlaOff0;R^E2`T7<#f@=u9JlMH)JbXb*|C zOCQ|^jwh!M)@mVc)v;x)gqL5l2^)$5)|6c|rkdFQU{#+Nrtv0)H zr-~v=dNCb8!wODAt@G}hGFx5-W8J|ETge$21};p_;Lrc+9{kh${|8DLhhFd4HkFVY z&4w=fT#P?rrrba3zSn9s6iQ{h?RWn(?tkbZRF|sC=Gi7u5M^9f@sHUy1&&@bz>#C8 zXnI{{mr>Bb+0)}R)Jk(8y3eApot+=rzEASKO2s1P7v@(igCBnG2o4`ThO2jOqURdb zJyps~?pepZ=**J+mWN&2wqUvQz)B9}m9M<>R^q7oML64uj$Gi>)GWU8jc<{-yJ`@j zEd?n!)~;VmNTyKNMiu%;@z^larrK=~MR2n@ROS|l*oD$@M-d7dv8n?<`P{R3#cemi z&e&=YPe)%e>Y1FI!Q$c)W*6oN&(D~Ov=Vd+F|tY{5-g&(HIGs5qbG_m!?Q5_h9<{L zHoQP*Yo%JP;*m!l!Oge4l&;6LEPB7=$4}z)iBkkAwuC_>hn8*9hMu07z=4PNRE)YJoU$DTimQ^$@|zrtlEu?7dt7WVJo zk5|9u)pUMbmpp9Z4>GB=V#usV&lH?FdJLcZ@JDd{^?R{n%Qg(ID#JA+6j_K850KBs zDCJ6U`|>c0MF`Ucw{qZi3~soXm}udlC(hz45B&&_oS8#pWFZVkIby-OBF4`D6UUJO zg2raDHc1dK&CIKFBgm_|=Gn<9j1CW@w_HZXb*OhUK0c1KXU}#S;C6$w{TWKc%S0~c zq9<#j1Rrs$0uuquBsdT%47Jg~<*RM{&6_X7HxG{EYmZ*Qg;o}g$nSc8>4-B{mZqUk z9Y29hTQ_UYEKAS1sOUN-W@NPUp>!YU0ltI{l}bhOdMyYZ0^TfMea#@=_M3NO&4wX3 z(#ge(fIkh)`?z|~Fs|Re1|Rs>hj3^j#AJ)3*wceTsYs$kG83Ws4lZc0)HE%UZI;<- z5C&<@+w)uSqb3m{%KP=Im*0+$ee^@fEmId2DHku03@{qna2WjJdW76#jYfk4HARb~ zoCu+<((;W#d}@FB*YC#ne)MB1on7=SJ1P{3sb+fJq|VqdJAMZp#ERXKc@jLyHAVN} zMf$nwnHc!t0|)S>Z+r*;`HlaHV9fG&)~;TQ1PMnYb|*7Ca&DR z8IRrn146~+vZ{w^*$#~mxF#oPInme~8EoBsEtVRb%A+#!klA>%)xg>{>u83FYYlvw zgXxefrZT&h02mk?Bs8b!kpE5AZlmGh1NVLipZ@5*$d`IhuQzF+b9t`7wB1hEEn_I> zExh8F?)c#Me(?Q2?tJY<4&>H-x8C}TIuBYMv4O9B`@5K(Uqs2tq;Y2Qu%rPS8Cy*k zL&7ms`;Cdn&9NPndy9%5!nP3GCR%bc*-BSIm_~Y0(f~em-~c9n?TyIvm8p3|G!~%} zf|;0^PIZKdlF9`+rX=bkQnA2Csb0Z)QOA%OAPzk=WfbRoT2@j=Z7bY{nVA`ilzaR8 zsC*?2lr(6zX^{qq%$5QZG?|-3!ZYV5sJCFOFi1mm^PCZBpJgiYG!p)07e~K9yM`(^ z^J1k!6r8qnzM|c9m7BLwZ=kQgpP-5fJv8ZwSSF>!aXJvqu{gVk?|nu#VE@ ziyf}xqF5}DkWQ_#sHeA)>Y_8DGD6E_RE8rWO2@|f&Fc_}023Fc6r-|UZ3!!DN?t8M z<$`i)5apbSjEq2HPj?xhhCHck5`)D8!iX?Bkii>o-Aw3}58rp1_|{(Rkl9G7R3gb> z+jUcJG5NVNLsh!wjMl$2=BXY{DzO_QG7ObvdlPuY-d?=p5AQ@i9|C4W^Lj<(oRBx+ ztjghv%QLup?-rci{{k+wJWPAjsMc#lWs}L~G~z0zs*K4a3eFg38fvbZ|6vqR_9gFv zV_LX;=S~z8{gU3F;LE3~_n0uMT~loZCU;mblh6zOn?i5N_yr9c8}as1+>u+qtW`@SFJ zci;HBu759yI z#)!}m3P50xFrKP7OifQ=cxXhy_%8t{NQPQ1+onLYwp6_+36Ku=x#y4K%=ru0G_nen znOS14Ku7OLXsCH-r&8{d$ZueHXz;}|kUQ_Zi7@#xBy}x^oKDus5O)&q&^?luCk^%Ng(;jG zA4k?*M~yej;P4_3^{UbpXf~Qk@-vKJlTNrE{)r-p&_mvv#X8JmP}JedG~6;3k5-_X zm#NA@C&Ph0i71XRH$P9MIQf1&igE2V*Wt{u6EtY3$`&GxKIPA;amv)tnW66q=e&(l z6i6GuWGHBHB2`mNjA0UiCA&9YE@8$siSj{}0~t;6nuPAMwcIF6H;Y_Yxm*GD>XIgt zQ)LC|z{9|&=TxaRaR2_t@%W)<(3`bT%9to;E$U?iK}Ze3dNaUMlM@8C#k1fUo7hp2 zpl(>&Gref>B$2E%X4za8hGWw?2Xa%%=rmSNgJuLFsDYW!=M(}73_+v>leTQ#N({$m z$Iq(k(*uLa{Zf4*gUX?+2KrI3>ZIXqgfSS4R2(f-f!Hv(UZ*j!^!8^pbCAd@?)y#fsI$dAeQe}y#wX+4)kB~nLye6JLcmVavBHaQR81?q|6J>n9r>q5? zG<;eHMMBNb+^}O1fBM#!B7;S68HEU;slP)th0Hw~*ev#%xcR2-_`&g6T$pN5>9OW} z1pVwA7(l)}fJi1Ck;;-I%>|Z-g9V-s7RkM#q3(h!7&Y3c#_lJlBl%7ppHAds;#gDx zUBP;}Ac^J9HDKxt*m1M?+`oN}vLBW}jXP&0I`&)}Zkw1Bo}wHRktt$H*I{(RWC5+T z$TaU6^o*2|Vemx=PX-bljrxQoLTJG^d2!DjK7!Meli0L+Oj#dk$BHE^%+1rB1d=v` zfbNs2B_%U0+qO}uEF$7-sE0&BMW|@l>rJ0*pIM*7x*i9vYrqmc#03_U+M$s`rsaN@ zLH5kdG=_#&A?s$+VdMnT)OMiiE1S)sx>QpGv>i9kq#l-!hYmb~?Z5guluAX+&DOPI zWBW;_%IxVV10!*^TW`Gno%z>!5dLJys{X|G5+Xl02680zZQ7=S)xre5MqbN=n zKr%xR)IHpP|3lckZ4;W!Ix;rkM?O(pw^}XmNbLeLEMX`exne8z8n9Y(SZ~&_&Thbr zJuq&;v~n;b7rq}+-Mb^ESDzulm%&gr~Dl_juH36XhCYOc!LJu7k!t`#wM!pWf1Tb!?9vGc?onar{*wmgt2ys zOBf1tBG$Or3sGdgKggc42l*(sw*r6=|DB_qx)5SA1u7DqTzr5!#O# zv_~x3Hc?MebQHnU`$vI-gF}a&0p9#NQaW9nH;M6}i*31MC#kGN zAse=CSdC8SLHp>sYp=dXeyv_Ta?9@ByH{KlKdYoAFX+KX9z(0vAP15ltI`xt2oiT{ zHS3t2n!@?<)2PhP(GXuU*C20_%uo!=B3NMzj_YpNOSB9L4G7nfyW=`|;HM8^{N!0m z^JvmK@yi2%77yz1_|h-XO}^9Y>uC+@uSl^xDQM{5S7jBM??^|<<)>tJOvYLFWS z>`Z~QGN>EFEX4Y|{9=|v=rPC)!!-3^siD4p(RIQY4d2PHJRw0G1#^vsk_b+XKcj3! zejgYZf?+!p8B%#lC|Ymd3yIrkbaa&JzDx^~B~~w+&5(oHym=EvyarA0cfdiJo-}2h z_gYIfG7i)5(VK7L|GsM*UVrr%a(;v6FVt&|^h-X+h4}?C5J_38UaP6O0A{oe4P&Hr zGyeSZICJC(jy(M=q98`h4R|dd>v~M=THlLUgj#j718_i$#TMeFP+^COiMWNql9|HH zkw%OH(~}eM>UCJu<5Bu5u@04mR^Uu0yLEk>`gy;gzB#&f2-i%wTrVApjMAVlxesy; z{osc`CN_vf*Ss^(V9e;E;pw9F(TaqoRwJa|lVy=khO{Gve4Zw`GZ{DS6Smb1P5s^~ zO6j*FW&9>2>bhvF)Am51Y1Z`swP` zt5p=r%yvrpa~#sAe)BuuMmb-g4bGXyaqCFFSj=Np{{WtP`YD`w;RN+7a`hV4Y}^Rb zR#e%ga)H#~f|wIRz#v+NQQT50;qc+ZN{a!CGl+$p1f1USm-pbW{`$XR^>8n|dJXlZ zhSuuPf-NzF-)h3DO<}DQVSQd8A9{q|l2Nq7E%;!F%YAs_&`~s^0L)MWZ6b|?(B%lF zcdsrjVQOj;<(?i|B(_Fm+;q#$*mv7)gd&nR+#srw0>LU}bSfh*c?}NfX{!M`t|5I) z({~g5sbFm=DpQlk#Zn@pYqY5l7~p2}c=_!wSCjReQ$K-g?kzR6W$J~&VORzPXV|=T z3pQ`sf|>a_OwY|yROHXi!LVH_xsgDXUPTsLR-0ZC!1&@v0+mcC%H{RjhDklATqZ+V zY9X7$`b`^g=JXjPs^v0^8Wp6R4f)L zAR!|pBY9Kx(qJnnw7d`WesZI8RZ=r~tRjA$y4!Y$(efhx_*I*+cgt$r`@N@dWHFTC zhnjuSG&O<=qA=NU*&Y%flaZ@o87XBw!JUcXMS)2+ZrF_R$r(cGct`8!Nsq_<+#>;;02P2CUFWU(Rgj1-j=(C_Ar($+M?1Jhm43-T`n+&DR)A zz-u%V9g3-&{lqXGYv$jylJhfINW`h+?`aAa+sPnT z1PNVHiB9vLG2zhryG8)vw-C2H#EqJgAztZ7=tfM2MRE{%0&$!;_5y-dLm}9h3a}R{ z3m6?8A%s`pwRE*Z!MZd#OXdw&{5|b^3I>Cbn8be8fc8fGS5m|TG8)iF9yrlDB6jZyb>|Loj6&P`5Ikl1!Z-K}9`S>};A zicT4A`nQx8#Z2`rl3qZvziejc$$wQ|)DN6~p5w7%yAY~u#RO6(qL57{W@hGbeqsVU zHf~U*BSR4x5?v9PezuNsh>R(_6AFUj+Llea{vr%gXa{{KxV(X^!C;0>u%H1N@iL4R zn*=Gs5RCR}ZgCNFQ`7M44FV>l&w?@4|K*$lsClo|T~ekJx0)q+>Y2mXyJsf>Srq9b z)AxsIZFi(iUMn6A&?14bD}oh|Es@S-#$6M!RY5s7QTq)d1G7 zTZiLEk3mG7m^JB!MgosM{3u>>-8EP{GOVa5xT=NoFo0_q=(QQ_I8?)kx&|*Fd=oju zxe^vo#z+74>o`8Sh)|d?tj+t43cGN?Yi&6CiVC}XO%EM$COYWi%N<**UKrRN)g zrH z;58qn$JDT!7r_rWWoCRCT7l+_w4j>bY#?ZPunIXc)vbm{J;79#ngWejPswwo1F^y? zyOfU+x@h&>kz@GV_uY$=6LVC~T|c%OcfN8T?%4NIaONTEh;Jt+XVI(&aNWEHcXS6N zD-_V%S+D4BtfxH^-882FAZ55^?e{ZO<}LJlBt)pv=E$#zhEuzDis}OkRFLXBisBGX)**GplV{GOUiApJ%wtu0ER@UGz3Xz~v;r&wY!IJpYVX93cmV5Q&++?7{Ow<+@5&K#gO|I`)(fbRu$w`%l2)pQQn9kgue(rgM@-h{r^_S11?Z zS{Z$>t@QV~<_n^!?sUq#Bl_hA!5#&($ib^}MXWb#l5JXunT!(cR9UZ`X|`9kNSGI5 zl@&_EO4%c1fZ(H;Y2Yn4jlpqq_~)NIj|-xZqK6Ekfv@AtnUmPDX$!Vpb_D@E$~|Rl z+OZL|FT4xb#!=Y(&;*=u(FRpZPf`Ih6DW#C{P%A*Z(1h(X#9B5_X1so^ z=e&jP*=cSnZs;J5`Y}uBDP+KWj<8lE*f$Y6bb$eQs;qZ$?)u`DZ29W1e)Id-fAA3P z1)1qRUGXtKHAjT3SyQ3uKsSLrrTNsZe1;@|eP(bV#cI z>p)~B)dQXmak8{5tbzoCkcnQam7WXTKi0NmVa#!MW||TyB|@k6-DA%@i_d)SE0~#H zLOzqHAx)a;G7QS>-}|2TXiXl~KVV6DOv_7fBZ6acK-xtsM%eI`eI!&)M^<4h%f^Hf zK9^a%uwoUIn_2-jjv@QjP$wPTo{nnCD$+M}z*bm;M?FgCgd zyLWD+?1YI((K5&kNNytJ&;&ybs)5GsERKxN;H%&KGLD@(OT%%Rbx^O;i_^%Y)@o=P z630o59M^KcLWvVe$7Aqz9Z@$!J1QEq2-T#uLFH{lR?1eq|0%yu$6o7%?U}4QOG-At za=xXN4kL}=RjJCgY^)s{MTLejD@vYK*eU!r)_tLNn!Q57yyM;(THD>SEz;+9U0Vt1 zNONZ}Evlo)`}~$iEWpsSF_x(uwESH~3Zl~fu><*wiew zN{b`*O)OMB1uhT?WU`}Np;Z=Jo`(_*kFpflLGsc`wD}9~_U=Re_R*uqsF!CTgb)^C zI~Nvfn3$fyrqO;RP}^8T;4Mo-lrK>~YnQ83=56S#{ZglzaF030&mMs8gu5y}NtX<+ zBA^6P*dt3@k;FjJ*`I-i1j~$}oG;QkmySIqy&?nClT%b)H?20^e>d0F1;OeS@rz{mAhUjEJpA3fXx@Gll&z%3iA28T5aEor1&;z4u- zC{fH&sn+oH(@#@G&NMA`f^d-wbnUuzG?{7Gz%RY>PTEMVmQO$L9~e}gR6aaQRh3Oc zc~hz}h9RcrrbuQ~o?Fh%VD0*iIDPz-nko-GGHOxK!dJfdc|5#&7}s5YHP)^fMNiR% z&jhT%fGa;7GTLPrKQoE_k3Npa4?RmIr-(LSbkPP}e&lj_&4z2I{RV}~;VQxg(5zNb ztJmok_4E%=c9oIoVJs91gexawcQDVIRMt`sC5l4*ppCA?o@)vh=sYIYa8|kKViHuM zxo;h%Gm0jK5M6vZoePL-oeARQp=n{6MOid2)6-MH+O?zj$A5Sa=4aXB<%%i0BYf_Na=F>K8?bu z85PSUR84H#a2<_WOPY#ujd2r|**QG^;Df1Q$P{weeECjn-nmU>)iN?r)RosH4Re3FSM_9LOQV>yUOiWuA@zD4 z<#LJEb;3A6?-?r^_aXw6#S${S0v6}U*(g=UK)D&czPs*qhXZvoF7l-!Nxchi-0FCZ1&o2Eh2 z#_9og(2%6klmZhI=kdT#ehL=o_bX&j>hHzAeYc@p?opo=E9qN^PUJ?LG;XEiQc;?^ zTLzUQpbZ?06(5ct1}jEu^4UaTL=-QTg$0~DcaEm^EyKXJ%dbGs>JbEtlc3GZZ@!tH zfy_$6x4jkTr)DuRGlS25_H+2c=RXggmeLGW^3#hhlXGagoOHuECmofRSEmRmo6S>s ziqJGVI#c!cn2c*m_W?$ImN*R1Y{kd~A)!i0NXs%i%5WB;iOyX(2QbBNC2yDSwQ%_0 z{gm;n-Mj_Nh|}_2_s~`QNa3a;xmS9}%JU56?>jURA4fiz#nF@Zaof%^j!pL9!_Usa zcZ#GrLWWNO^@T+oI`ky2z4jX9TqlKvcCIV+7lx2C%rDl#gC3Z1pdAPU>H)+;3vsOl zJCg%!1`$T2G&kR#DTc>i`pwQ>U1Rmfkl-v?wfHU|6pFflPtXfo4Otj$U3lPwEKJFi#n zTboD7iWUk^jL;H9{jZc6H48?{5_rkmPw@R>sYoc;`uwaaN5_iWOumbf;kLWxyP$(~ z8Gs)+Z~$+6^P5Po+AW{=((uiPi7#!rC#(E3-8M33RyBg0|NtUej$Bj=dEgg zcIV%?R{4AC`R5QcTCgoA)h339jhU8(zP^4{YJpbx;v$tI4TWm{$c@VM0G%|kxm*U{ z{mR#H`kBKp$r%NZnep_QGuXIpD^`zy(@#Bxg~=(R+TcRPq0c@046Yv@h93&zvXa+s$?V3k zX@Yyra6iJZg)Lh*kin0IR=Hu&xO_HJdc36o)D@c1jKz8f8o( zxV#11U&2t4p_tEOeD?q2>^-0@%g*w^Z|@UsdiTCiuU@JgI_IPgYIV0HlrfS3fh4em zBnAl!i!opr5M~6Pg%`#!crA~NJz!>$Ocu#NtCdjfq*lkO?yBm_uU^hKowWC?{r`KP zbMC8G-GZxCs_GZ+J?HGb|M>mi*TsBNM*^SbR?;*>5@oDc;Ry^IqvH7t>9(-$O(Ks= zDbS#4YbB9-9#)qYaNz2zC=*t)g0{I1U7nsTLI+pQ8L%mieu5%FpGrz*e`%p;dc76) z@Xp(2@TJqMIM&b5b6(2se{=uGj~&NNH(ZBm=#iJQRGB;_Y%f7cAIlp(^m`)+JgW5| z?e?Jh5dkcM;Q+GgvCOTBh-D2g&8_0>S_fLxF9UlxN*5t)KPOcr&ZUK<@{zE6p=@1M zPW^ucx{1py3jbKDfw8d`lB7fLQms|6fBzJ|>$~59;YP(~Afn*?D!7ez_!#y|nbrs- z171kx*!Qd;SFvJIKcb(PmX(?S4@rWhg+)?^G&YTiLwCOR9X`3ZguH9b4y0bMqr4@| zvogm1!3Q6};^I0k&Msnet3&YZBumi{F-FG57@}{rz{%02ilE{lx){#d7z!d1Vbea( z&{TY%)mdGJrUxCEv#qd!+uG_Hoe>f6OqmN(NY~v`#+#tQoI$}Da{65p%vY-oT$*3O z>iRlHY89%KbUK@Kjzn25yBy$ZjY?1zn(Myj;rzLCc;f5dpiSe0 z*+oq>oTf3>*OqZ;ast;~e+{_uB4KsBOiI!@IeY#bHS)}+a3~U0tNRa9CPf<9;ICU(~&eDHf-i@UBGL(=Ku@4x&Ke(Q6mu%WB) z{0cECr*V&Uz%`R}jx>jO@KUgFQNP!tiEBc@IRu)?azaMR`GnF9(9RSYCpU;W*v8$4 z1RAu+gu#a$Zlw8y3e7^uyxMYh|AT2~U&rXPPBc`&faXJ*J?UY%Sr>?DIE2_OBpW${0 zRbwA>$`B=}bgwdnsmTdUPVU3i*O?&JCtIvKCqNRz%Tg*)Hl)I| zUPIH5kRgTifvQw!1b)753^mp}lqZS_{3J>d1u^{8H;c@@EyYv~E(oqvL-cx`T&u<1 zzt$4nUV^#BMT{KUPqaWC3tVzYzT$boq8SARoeeLR@k|c?5GELs+coy3V9*j-Br%8Kp~zUP4( zar)vG{^8gnHu@6dGX`C>eQG0~=aiy94hYs;tl#NK< z9}?gG41~K^$^C{bObM+bh{+(v8xD=)>!;T7L?=Vr59sHl)h;*t`ND+@G#9aJ*O+aZ zvzSntF*|gYXw0r8I5{67XvApBK9@_Y9zgxF07wM-ZH>7_jlX&3B0l^40%BRQm*>z$ z#SP_=If}*CrNHF!t^Us#LkFXj?!W(D+;QjaRCiC(9%ZUYl7Mxzlto|gGYo>j4g>CR z=4V36xuf1uDOITGizoXX5QNrmaay7#H$6GljWH+6SPY=AYQtc3m0Z_^R-t&sfa;9 zQrfcU_I?rn9fmc^P!1nHj7qI}iJIu6G;H)wdzp*6cyX2n{HUpHsU+OPm!3!bSFry! zPNUsn&24jVQfe-NxbR6|A&yb2*WnQ=BBb7ike0nvaM{9Gy2A(eVW`G^h~fRy7#({% z9{Kzum|wWa4Y!gOgto+Y+ndM(&ZGR0u>EWN-)7VMVL(l($?5B)a`Jsu1|uAATnuy8jwLZ9>q{oCg^unDzsF@+0rS@E?5zfAH9G zbo&89kul+7S7B*>fu8Bq^fX4BRVI=21?gW%(y$1zdukF#uDc$?V`FrvStnXriB-vX z_km1L7bT^EO^)1=TYRxycLhqjJr6-e;Ht3%AAjvN_>W&aj%N}um5tz;CSz2$wmLZW z!i%`>x~nlhv5Nr%D&e6oEf8yo@UzII|2 zUwCm5$2MYY_%)t?5j^-QO0*ij7v|7elNG8grdE#08yTo!BYjKx$>pW&pM}RF`oAPa z618czA&Dbf?#-B}GIv}aIm_lyb3g^Jc+jpw0LZ|@(9jU=*MW2Etc!`*t5wqGaKWpj z2EM>m*Ir8ju?d`r7*!Xrb2m%nVD#p`dnS7&;7;)gt~_Thb8>0|N3S|U;|QLu7PQ-4 zDwCTFNsXhlil3fhqWe*u;b?pTd()HfvJR@|dIK(dD=PPDcohl1At9>)GExs~qZ)7q zU{+H}ePtk1si#7e`Mz&|R_2by4ck%`N3!r8jV4%lXqxh@)#64gWj>^O~ zY9jmSS?X6|&)z+l+P(W<`DURV1;9UL+n6_+yEKP5N~tkg)-tDNZI)qVq;O1Yi_3WO ziN{HMJBnkBjg8S_o0QfpzFA0GojArl_uP&7lc%t9ahBO9JwXGB`}a-bz`;F)7^zho zWcj9NX7JYU{1^D!zxyk!%v~aeQTI~9ji!`4m^*7K?(`zaqvuoKq}dw6%+w@mUSOp> zIb1Tq)YSA8aeQf;4VVv21v`goHKd=BLgo!mb}nvJRFS&p=$suI`l5VQ+LBcI3}r3# z1f?zE)$4U?%+YYDW+aixe9YkrYgMRTM2=lp9c;f+rM`vdiCq1xOzP+w{?f7mm#gkd z2)SBbzkC*2DLP*oTm&+w7=KY$nK7xCHS z%TyN9=0h^$WP4a!UnBP0TD^kNW)tuDo_F#(4OLMM1EP{iU}pktDaowOU{<8BKn<#M z6_4U1760T?tA_S6o(QM;-UK%{Gu*eYg%@91LM&@kPUNQ|n0!-ZxNzwbhDL^{3`dY& zDG1@SzPgEcYZDJ&T*7l_syH}S#qL@KRbL`fz}jXX=jYdPZoP|+sE~F9H3cnt@Jdbe zo$qb%<-h|;QlingtZcHgAv12f;`zjcO*5Fiv{YiTg(^F&s4enu_rz}ct|XL}&PNQl zf!CSZ=Q6ZbagGlhI6w>*ga8BgNsb)8ik(NlpAQ&1xP3V!bWFGAuWr;QAv?DRT`3(_ z!)V{W{bZ{=WAp{9J$5>6d!J=n!itiijxm(AF^x5hSNae#fd~Zrs)i#D&?eaQxXfyj zOOK3Jgld&Z5|wAO=p9gTvK06dFe%FjX-8#0>hpSo{9%?2n)^@bWqaVlrFquEH<^lY zqy$Cw^Uo;XAB9d?0ufe7LU*{;O8qdXU~+QmRW}Q^d1rG$bMuRYqJq4 zg+AB8Y4^^|;CiV=Zmpjte@CAE%R>RheUHNRBAW0f?o*; zHPos#;F|~S$$WCxMcPM>96@)pjm3op#7RoD=7Hy7xKYQhkr8;BG59Hm>(uOvlZcFp z)WspnvaCR6<#QVOP_&h@WXV}`Z57F~n1raYnMUko)O(tknWWE3vZT<0CiF-Z*WYjh zUVQdB#5A~;qSYM1p=++?$$7=J@H9~A+0vgiv?lrvrFAA{+iSI%g&mS~Q^Bo~7DjjP zB3f&Y^sW=!aC8Rmeb3vFrCq8E)BRiLDI6rVqOfaR<9%=s@6ymlHijV z5%<~zV_xaTI5oFU<(tqLSyFNg5Ni*S*QEXH!MD@+(lY)IT1eODkrW)r+;~&)sIxpH zRj`?t0MYcPwr8^40_bgjW1wx%pOwUMJ_TJ?TXf`nnKk_5<722)Jj7)5{LAbPMbAo8 zW+ij-vy#A?%_1sch^q);XNOX0wgoH^AviOGS8!svdM&xCgvlh8N{JN!y6E2u%`j^% z8KiRP=phOe%o`*bOAl{sY?ii(&Yy=CDFXB`>}_DIvIReC1A!t&4OJ7wz+ik%M&(5Y z{ZRHSm;&Eq%9#aONUg7+{h{+}K&JRv#Zlj~P-KwRD)SB7$iVReIvV^O0Vue)Jio9) z_RpMsUxoyq7u!}`l-R|%9F`!I35M&zLw>DRN2Af)dB=#$zwEvhWf{($yNJLG^Cp%q zrCfo}@`}jgQT9ta8BVg-%Xks#od63JK2x}F*?oHp3>ts+VNO)A!BPLEi zB$ietBuvp78o@i>@h%LHP2d|}|2jcSwK1OBQD9`0>c?hNi4a0oS!l=eNrWe;R#5c> zR%d6aaW9bNLvSX#srnv5pUK!6mE+}{Q(;)a?RVZm2dC5PQr+7)LSrC^&@Y!=l}H8S z?lYB8pFyYY&l4MV1L2KMfsMt$f3^vv&pCu^Id!xS+Qaw~~) zEPTGKS%&Lwya7k9zKT#nK@gy2)<5tOo7CP!G?IH*B-5%>TfLF+E%m%8Z4Ff#%*rz- z+i0#h<>>#4Ue1 zg0H=_3K0Z^#*-%A@Ain!p&C}u8X6+rA(KT*a4EtIeI_p?CIm&nMy4_r{mN|3nVd!7 ztVWS_IA>vdd&PB1_4bZ zyJez~iBQ*)X@M+@!iBj@NOwN=m)Qkv5pXP)#m7PEEVmHMt+ee|t)tpKu?u(IeLKGP zwZ}^!IFZX$rD?*!fYLm02*pYWIoY9LOsAa}bJN>emK`0Jxal0ZJ*o2t0MD-b#`-#y zli3E@aMLp4QlRDuJaGSOX+TtFDcx`yCz!oBZ+ltR5v$Aw3K@p|4vvmz2;~Zd7gHc3 zt0uD!tc;^|Ze&RUD$~;h5B0(#o78CIcQ}jPwiwigv%Fs_&ZxAeQnts6WOr)}m{0|5 z-K@X)lc>1l6a-}&3yW(63JA1E0dyR9uuU1*{x4s7h;sTpllbG-y5^eqR(<;R0dG$!j99=3EQ%8EwmQV+&@a>3aIm#;YD zAcNnJ25{K!S(}YYeTOWgXX=0oL8FGr`V6JAdAgY?f&+&RRkux4hN4g796S%Z#>aDw z2;mitN|cQfcJ6mx>6KaIVV*^%QiMM9h8d%N!|o}(`N122WQFiw98K9exALsoa|}u9 zjS3>jaO;s7eC_!;io}J_BzGp#i~D_|P49F%wAWl2R+Q@8`S6zFx-*rb3GVCa;#asS zhkn`Kgmn>=azjrZJ6O`&kI+ap?%Owo!)I5px@97zqPnl09=+B$e|{E+j~u0b6dMD5 z*+VwLLSP;DD~)KBhw}3I39n#HaC-aFA`zt%ShA3EVHsQs>2Sm-aZ)|R1*RByR%1V*z&Jg-f zc=o+G@)!D5X6o%G=Ac~Cvh{mCnt{^p8RjKQE=`%hsM@OG)@!aIMLw5u8+*30vOcgi zoaT-vGK_l(c8e}g<(ptDa8d|0n{nA7l$=OFB%UBG9*HPQXzeoV7!`5a8_2C+2A(5q08GNZ6Q%t0k7U(cC(u7~H)dpBS zU+v{0AakHsR@V^tUa8qz{G8bYF^Q*WiYiPb+Ua+wxz!pO!>-BQbm-mhhF*{}PC8VH z+gT>UH-?kTH+&@hAfRc?oQ2clgI23nNn4`V-lX?85d&uc#&Zeuje)CFsA1htV`^3f zRS$L9LQQ4ZssxlBYQmZsLQO;; zCmBcBQdk4*6^2JM=G@C2XYq_vSIo7|GcZ`1lpM2?B5#&wK*(8nwWq1ZE8X*^VLnh+ zNA)6I^gW!mh$lJHXMQ1DRwyYank=f{i_mU&tcsvU(vQvYAZqlRuRlnfEJUmK6mA*Ea zM@m!SzKDS+p#ecH2crQ|ZkU&d&JcC`Xm57VY_&N1cLnca%ahY(_E#p4Of3p-`(SCABiZdsvsau$t%e~Fwpj(n zTD3;y;;0{!TAxZdSh#TEB0l*GpThM=4`F(8n$D0zKlS?&*4H;EFlo29&~9(yCqDjT z7^^qP(1kM0htsy}06`U+;h7aKy7%teF*e%5@@hNh5-aQmE0S)tJH+HLJlf>_VEb;$ zQkx(z&qyveq2R#ED~?2_$>5gEvEWKcq>{693+P3$eILccu#Q*E1g5^k&9~l+>Cs`* zRS}Zcetok|4BJXn2vw?V;46?J5IyV)B8-SObDL=ascJZRwug?4A#_9mlad}^i&vrh zH8(Vu&f)ll6)59SN*U2ByS>*Rd`YF;IK+D2X`I~2U{KN^2-*Gdw z7o39=LYErqq5vMbpppPNA^b*TC?C@Kc3$qb`dNxgbMvndoDOn|*N291|LY&5lrhyn zFN#RRg2inlvx17E#`#JwrnqoU#is6(|6K_~7^i5Wy*g=Q5alpMN@*ISnE|oEBf-X_aI~E0<^DL!= z0ORbj*i75wX4YKj_xd@qDN9mlUn3A2(=+3MIYhpMCNxk+IVZ8;DhpjsQ)0L^;iAl{ zO#31&^2~vuEK6||Ma26PMLn#ptuabXF_6LcB@P@pLQr09r=^?q5J0!|K(;s0Um?_2 zHk;ic)XlZ&v_LKEl=&K?^T&ViM+nLJ`qa}U9@x?Da4rv|6_9JiZd3Jt* zA^t;k`|T`O{&OWQKBbCkJ4#%6DTU>p>-8fnFE0~<%MIlco3#xVjG;}0KwoG0jyJvm zb&m;I-5V}1EfduaQMD+m9YTGn6jjm3cwNI&UFu<&5NoU3z$bt6VLZ0tvt%+s@H3mk zG347gjaY_=brrJ4>YJJH^0C%IWx=Ae>si89;?VMa)NF9avugM1I@Ql{$m<%ts~ z@z85-BQ^_9`d75R^qJB*A&rrd7JNo2xcc9=hri=H@i$GhvAMM|xK<^K2I+g8+PT`K z6m;MKc0h^0shO-;cY!nvoL4I;x4H63V$yga^WZG6uNwnlB{-22sPRn+Prwg7+hF28 z1``Eq_+bULW)p9D%Nq&1zS-GAltrYoo%9qQedNpbkVw)>uz3V6NUma5lBAV6Y_3o? zkVR5}{GpbHctkFzYNLABKcFbf4jGC<@k~420XMj}!x#;-XLO3h1wwWS_njOAAYaPYz1jX{9Ifs(o{Zk2%R+T!S{Qp z%SNe$B?>s{08F%as=~soQEswtpO;Ylyu6e-xBN814dY`pK-N~Q>F=t}$|f#me>wiB zX1zgk3tQ{!>{zX#ZPXfM=&Q9F;)Hp@VvCf@kLga97YwX0ktNMw347?>wxqcVkHn5- zP{BG@(T}@i*l4!NV{sxA9OZk{Mxee8-g^B6r!URn@Sc5kqo=4=s*s(nf;OX6@zVp1 zcnd+*)&(pMiRrz|WPX}K(tgoP;+pXq#4ehy+H3vV+iy-`8vZS z#k9AD5%d6bsN`iXb!98Sne`AW-UM@%Q81KQ($&B^LR+a&wj5@oe^CnJ#5SYv9Eb@L z8uc2N+Py-@rkMG$S-U3&1~{dybxj7>3jo5QOsEiB{z(mX}8Ywqg^2$4 zjpY7OifqeMsKu*fAZ?vlmA+~sUEgDQO`*At>J#`Oql_k1j&8+@LIojux#_IY8TCE|{X{O4Q^sum=$^|3+jQzOqJ05Cnpn_-UHJLx!ng9}|rX5`8;M<5pwa-WKJ5>%o#0tOHS{ zuMv_>@g6hlw3QYO?TC zni5?_97j~Ij^miK_$+0jK6||;YjFHxzRyilIwO+TS6k=dS-vG>v_^)SsMjh)cd@** zXeoNQ9HeD2@r<0cfr&p;k>ROlUcejgyvv#cO(p1BwMw$9WdxREjV%nCraKl-#q*bT0Q+veupPnTaLv>N4XQG1-y@Xe%P+r7!3mL%5`t4wfHfv!+%%SA z++T;RP|XZ!PvE(im$4Ol+z%5>=fi=Tm#bNlin8!Z)e2EKL8sC#3Xv>r|F2Q2(OGmI z2vK&|C0%v%Z|aT6vZ$O54g|2cxI_+7}U3SJwm?Jn?sCL5gr*af`BS!OMSJDkV_?9WTWjQo{1iMai0uZqgLhbm48v% zGL|Q)f8kRkl@PEX(>_UpdRH8azx<_2TOl8wq?GuTCyUJ{X>S$fBI%m8$ftIvLu=cpF&%r90xzCgfO_yY$V$wEqBe@8tUxD@-O4(T z2qFn^X{F6n=TclgL9I0LJh|!QBH?E?)1jdTMXaGxm}pfoTsJ{UpE5koeO{$>I}j2V z*#?U~4LTBS(Q}?z*DS%FRs|=PdWhlWoJOKVKV>qswbfOOjg1ny=Z&}A0v3hkGD}25 zq8ZH~nLm}ZLOm0L%qFb;pOSR3V|7pi@Yy^ow*$U}qVvi#5iUb#DTi*igY}Iygh6$O z2T`0dZ4LBek3NpU~RqkD$AfT zH@|?DjSV7a9c~Tht?ruwT*RbuCCb(_3|}$r7r^c=oCNx@sHt# zefv?#vV4a`{TMI3bdrWbQ_bKVR=Q-$l)Bf$uAu~BkXTTWfXW13ys(5$Ry4J>R9ArK zmYNzn_2|nX4<~snL{44tqp(%*`lwAbjim;v~USPd$bCg?S!g(m<=(!0w3&+;Hlsn=1hHHp_s6J(Xw#6+3K;h6hkWKC&_eV@|LV^2RrX}S5< z{=ZCGlwnircg;l4&&juiB zFo}Vl#>Pg9;};en{0asOx!vM%U%Af?-p==*P66%H=x!G4)Mvg>Eg>nUYV2U~42UhLV3LHr* z_iM81eS7!O-ZsWW*?vo2GPI~O_uru(;OVEnh1HF9Obw4%*tueZB3PChu4I}6Ul{Ic z9G*@&foOJQf0}tTcZ+-(t0s#}p2y1aKUYy0F zk3Py^2C@MexqB*KBF5f&8?AZ*>1D1#fi7zJ+*6O!-VeWo0!-CjbSj zrJJJev^yBvH9mMlzC<@=NN-@pp%`N=&{uuTUAlzP;ZYin6tXCxnAfrAp2rK%Jevtb$x4)tao`wtwXl8TBoj=y*kfABkhj4X~UI!2MWR&CcPv6>Ri0*Yb6S9*1N zJ$(HuUj~KucGltL6E9H)aL?U$QPk!-Z76lLz{yh`wEmo8v>cMWfU(?LkA03snL zLgJ{YaUv0EU+6W7myR#u;zkcLY~?bxxni9CTyu>>W5XDqnk1+*Ib+BH%Jdp~*gw&N z^wy1KCeg9WpRT$*{YTOFFcFt5bM@$=mcW4{|GqRo9``U+uh8J67*O_9ERJTQ!Hta# z%C?17VpL+#M^gX9<3U@RkFt<&qVadCjZ)fZm~*PUk!<(6L_vE2E45(ui3v!;&`1}{~L*M;wMmiA>lVs%94uuJ)L8I)v3mIS) z;a%_gPCWd@f5g^G&vHOrw$t&O>A}AbPqaH zq!bCg9h;+=NYbGPB@a68+Pe?G`13!9-L)EI!ZyJCP^vOK{p>UJ-bDLBnFJeIPiq8e z2Um|J2!fa)us+ESVxcYY(n1??tyKt^Ie54Q0qjH?c!xrDaQiiiL zV*!XeuTK)-mi2gtIUd^IZX`kTa3>8PGI5_C7{n(|L=A-Ngvpkm7MXi{4jjW zt71I%>cTu8`RmUR?Y6L7Nm^xCKX6ShjL#7dkKNCmKLaFP_$npnbgKGDHxpdCbO8si zy_PY}o*=qAIrIQ$+oJKcFNjqTv z(jscX5E(@>Oh^b+&|Pn%v%G@9s}M@9yy4A3Fq@dzJWBP@xr?(vC$ddN+Y8Aw&YgT2 z2lwqKsC1HLxucWRc#bJ^@nE@qGy@rovx(D$o<#;%c3@alV-6bJ<`iE&^TFj{pg=DAs;xR< zX0=P~Y>%+nEK$5?JX7d?MZ_5<1CN+l_*r`Png^d-lCn077lwJGlz|S4vtME}mR176 z%cqt}za1L=ZWo=kb^6;}aV4x^cytu9Qsb(Et$Q5e3CNmy*M3`Ji6D~_BFG7 z7}f+0ky02NY2qUv`~ZIExBe%UJ>x93#E`aGtZn~L6>~Yn7nLOFZET{uwN;c{Ed#b= z+$Q=UU0gE-YhWe;Vf$o>$?PO>?R7`+E5Gf2zPf6H)-s<%Bj|gY(OAkWH#17TP68GM<|!~l zPv?xc#Y^t!LYT&Ub90TFNlFAob7xyKSCLX!fUiFCFgBN$c!tyjQ%WF(q*OR?nTK%0 zP~g#gWQ=eCZ;mWW5v2(Y`lS%qTwlY5i?cX)<}5CqKaaTE<4u;N{O}zn-2P9vR9pkA zi;M7Yx*pAXoenS|{512+ld(09OJtS=ko{w?pI~um8M7B>$tYfT?RAv;_xpWpZf-E8 zh@1b=AoLh-eiIo9tgUTfePx3ot0LF2B}ZyLC8Tg9BzkAfc_PmxkOR?}UtY%ES`{G? zr?Fv(HoH_-%Q-fg{uEgpV{2;*1Dj<6Q9N>zV`vV?YbI9 z{4P{KMQ2(3>oBBw8u%3| zd6@k<8TN4BRlCvjJLIf+3i)!iH*C-OTm#)UzTF%Ou?lN#*?27iOLGI={k~a|ebpLj zPTf#QJF&0xW5^9=h4sx%Dz~}snXItaiVSGZLAl1xYiW z4@pv$i!QmdqLT=3e&_)lIdB;N<iM+qMF4#FcfA8Y`-vaLwR@-FnQYnPY|3OQ4?prSKKHrLQs5PaHJhkP zc)Ci@uc5YZO?3m!AcAKCA!53yW2xK3|M={S=!Zj;kvO9+tkyV54|qP)(WnH4jDx0t zk#F9&5PP1#s?}=Led~ zVtYsWveZwMi1vilk*AWG(u7R)cXTwb@N$W5Of<$YZSgQM}j2m2vd3bMzG{`XbjPp=D10;e9f+{F05uw7N5#Z=_1D@ArsIuh%%DdTDYCc+iG$9KR z5^VpD4yDb0$vNi%A0za#G{sQELtS{N2#+XYvTb5jZlHWP|M$P)mw)DG5T>fs zsORUDTKNKco-{nmwK}b3zQ;2eas0}!{svB*I>+Zhys}#7;5A)&^ezV! zxLHkKB9%;v`SFi^1V8n!egxy8k2KPDy@_9!AirPv>Q^aKRQVvJN5PP%GlYoIRO^@y zyYRe>(SpQh)xcMtUdH*ZhqPMB(M>MH6t=uhC1TAQJPvM~XIa7u@7Y(qUZ=C$>-Q+Q zFwVnd4Ox;QO}PIfbc3jUXj0t;^*IxFaefE4xuCLrnmdr49-u1ZGDPeu zy)GL+f-aU9G7qt+CK=MOhoa#y%iM!Rv$EgrshJ!(;vTTMjn>#OONo*MEUSH2hR3mM zVg}D2JC3vGPNTiP3D2aWl!Zx)`H^;Ybf7^jkfXSG?}%Q%O7@4FvQKlub!=ND-MSL;)K92Pm)>t)~4YWo_sFb=qx5Ondh6KR#CBo_`_8+_&f%NgU zix=@&r;SXeh*W}3EU}qYk=6oekx;Zx5x#{C3+YqjX0Dlr!HrWkd#WEnoAvWFQC$U9 z4?}(*_Z*$U%&?-8iEug;mW)#6@V_FqL4n;B1#_in`R4^=zp@pe2B@ zph8*^I<#Y{T3cr%87^&0hk{_#5>I^N>sY)vOA3x0ou$;%8y*{_l94Yx%0T;@9XeA~ zO4-)Bl%9u;^%d?-rY@ioR8EU1vu7$0>6oe|Bbkih*|lpI14vA^kR>><{{WVjmMo3E z96$(>2E6bwT&>`L{?VV}LqGU_TsJX;&<{}w0t%M$zDLHhL~)#=z12Y_tg<|@$A&YY zATP#&z0>&RfB%2t&p-3m_|rf83|Dm=&z{0%^ANB0DhBWDN_!zAF|!oSp(=jhU%nSV z_py&)%z|8Mfpu(BV|{HMfAv>?MSTxP&ge?GoK44Q=?zR{344Y)>p@V1tPbPY=@`H9 zCyybi1YGtiPv)0;O2if*d~C&uJ!3@%;L35Z{i$lTN;am~?NRoTB&oeeZl+UNKFd(A zR^f;Kj;6Mt$2u9l;F`Q`ukj=u>lj!o?L;t=2c&$X`y*C*}?Y8 z6OQI)vowJYe6&VKvAMcn8!?g=Y1i}&ruOc!nM_fyGlu!V!2@K_=FYrKP2KUC19;6t zZ^hi|1~uW+-WEKiQRb^udF0PX0wpvN^<~NpPX)xnNzGv-m_Pn37Eip0Z+HP@YY6ZD z!23ueBj*#cd(Sgzn)EO+IgYE29AZHw%9>L`&6s2LhS$FVr(QnAQ>sadTBU|P2M%K9 zzSjJ|jgEY>HO+-!1K9Z=18v3YKeAF5>c+HRu%2wQi_Clzz!Vb)axaL>eTU~V8 zXE1;15)NN;7E}{MAM_ANY}UErwR2sC{k3ipy^Pyl;wU`OUl}6X@VmY6L@ZR z6&+VTwB;~eLfUlN9V&yaEG=T*WSKUDqy8I!wY3$VYf%cZN)WI2(Ht70EQ%;ybaq*W z5QHIX?j$MoSIo;~`d3|bHI6_1OacEE1;g!uLx*gXosr=;w=`t3)z&aJ+6CmBoTudb zOjC`1*2BktIU%AL{k}eZhnr=lgV}(Lp3svW-XacDaFW0 z4L|y^58|N*@5kT#%_I2ASHFVwjZF*AkOK;Xq6DTG$W(+L*q_E>8V8mMfuq-4jSsy4 zd-476d^^U6YN$ET2Q91F^BJCa>9mzt6_BaUR+w)#M1H#73@7xVa8Zw=zAvp-a?c{*gZ9cy%W=v zC5(-Yk^wwFKZ|F+`82xgJq%SsWLcjh(8L(02!7b0Cg*eCc#4_S=tNz_X+-c&p;IcM#r<{o z(JJ2h+MDp6x4#b8965-QkzsfuL1%LVFTHdYzx(H3z*kPJqwhD-S24{ln9L?q9)0%K zRtM)!yo|>6*P~^en%zuUQqVC=3&o`ccQt3bKGIFD<`mD8qFZyCs+B7Bf28jd^Jd;# zHU}qU{xPMpL0%@_2IN5ED1}ND=^)&B=baQWc|vlt*j{Ixbjk4g7K+*Kw6PT>jIjso zp7QN>wUn>Yqg1{%GG@so6{YZ5YNA|bG9^@sj!_dE_^#L9fri`ybiZ&E(jrRSkS9vR zgW4wKGUF_{HiU3|4=}q3p|*h3=V2=4MpgtwTCTP7#J(T{6XTXW6L{p(DpmxSeT)Hg z{01C3f(Z0hmzJrqO|r+9ahLimVPJJFG}3;O8^*9!0YXV6qf)rxFilyE`MurK)9A-N z3TCQ-&-WJtciwg5>{n2j>;HR zlhv55dfUx6;?7&{!1>t^;FF*FMLhB7lL%@xG{catJ;^M?gJwDH91LW>5U{{tlydbU zi!)44jp0B3U;hs8eEVBbvpmcM@<<|H8^K2Q??3x6zV_9}?2Mbp#nd=6@u^Q3Bq=7d zMcg#4FkV+kQ;B%Bi$K(H^VR!sbWez_jRYG?T77X?#3-Zim<*B$A&qm0>P_@JJv#ti z^cT&ktW~SDU;9xzXB1}QQ=5U# zZ+B?zN-uo{B{ywLSjG$Q*>@1{`=KAkxzp#cy14;Ssp9C3H`C;|tNl5$K0ol#?l85& zzN>GbRCr~pgZcRds{4`Qk^$9syGe@HSPLKd=m+q?y?0^X^bAI*j_c((1hWk;%q`*z zpZzj^|M&h7XHK3Vn67WnNgDSFm;S=zPvF+OZbPL}Wg6_zhevZ8A(aEPF;>)=N%E4L zvNHf>XPttFk&L5U+uT666O%#pGEko-?abr59=a7D{lL3$)&6k|g~rJ;1-7Rns4&KX z)--NE^1b-X7mwi&9)1!h*D`FGZEuIB$o>g|jg>WAx-g4_heHHmg?q!cw$22IdVt)n zT#6zFS&J~s8*e4F7Th4Fhg{oMvCge!z07!M}1^{p8h3u97IsIDVX|OWC?VC39o)wW(dyc?(wLsV+WqL>EJ#CF`WDpA0rVDa@;j2kK`_Q(k5 zGExm1%y8^eGe-IRne&$8$;-0>QsWeYCa{x!T=ZG2s@(k#K7bdVeGco3%hWrgy)HFs z^#+a{Jwk+*G6-@VkTlI`g8RS4{PutQG)|s6jW2!qD|qUuZ{fs=6X zSGa>AxCI}`6q7+0_w23W&`=jD-J~F<)~q4Z5f1Ke;kW+NkK+IN^|qm0IiMMI<(DckzwuW0@rr{dul!O11@usqnpHv|jcxjxBy>~t;St<$*EpK3 zQFPOc*fx_SEud$DYxpW}Y-;oqg(%UOpIgAz<|_5zNuvTL(h_KmkK>nq@#pcjdu~CH zCI}!%w}Yqm87(6`fuZR!?EAoX_)`Z*c>WEYOp&>}yt}drXuDs`d8c812n2;$A|ivp{D^I!k!VYp=(z{=$!7 zY^1{KU*L$>qbQN!Uhs4e<6#Fs{FbZnx?8Tor+)kI@wro7Lasr_RN5g_j{?q|Iz^Ji zyLQ{T7SEwCwx^|oFGpFhy>TY2lAE^Wj2z9Tw7pklrD2Obu>JRq1CG)Jl`No!Whd&> z*9d;CFaLSi!PD08Fn#+X^#Lq3jHRk>4mB|`IYoy@SvmzWT2c{mO=*F^M9BRO>>o<- z;eT-(D*h&-)5c#9Y zPN$})aqm6%5)tC7vf}?efYJzV!Jo;PEHFNdZ?7RH%=}Xl>z` zJ8Ap)CbLafamPLn2Zy?-p-W|^Oe$3BK0$w>Hqr8W_{s0P6KChv@cHMK(3cI;A>tuD z;abGxxXg1iYZrt8=@-EcnVJl{Q44KqpEAoX1hH2HQgHd%mUP3gLi?dw4G7AeP)|h^ z=X_@qu;Xhx+w&{FgytT|KP3loZ|_;58iEo_j?AbDZ48BR21!CqLK6wmw7ch-V~*vG zHM+T+8N2k}6t&n-iPIFlUP4gN)wOj7;F0Vx>-$dZox~?U`BQkq&DWwqTv{F@4OCIJ z^U26&2t7_MuYmRWd5n(lqO{UDH>aWCdCUvN z)2s!ojqX!gEpR*kkS6Ti*H$;NzPdrW7CPx7jaKoU4_u3%`os@lY|Lg0ewBd&Duq;i zGE5{Vsd$D&Mawu^tK;Y1eJ{@c)|YW|wMPu%I^zT2(S)vt+4C2u0cs9+mS`(kQao9Q zavP|z`Z-6<%rHV9*cx{xjiis(eE;6fCUvB-GY%)<6A}S?%af~Vv zUveE@m#9C)Hk1g>%6S1=tr6_mD~V5w^q!?pHUDb8#uNEY z@2BYB7)-NQ!XU6nvXDU!;7jmD$fz-4_Gdx>9^))udg&$n&ENhl_2{UV<_pTo;l$Bw zq@JY$H9tl@Tfu!Z!0qEb49f)BMh8i^jiG(JptC*?u?oq`%)=f0@+aPkU;o|5@i$*P zgB2B0hUj!f1iT!M8+smUwGj0xlOsyoo1g((!80*#NINvnro|#^oOOX_d4z?~Q_uih zM%l6xle05q%!6f-3N*vEbEu^X6gP)K899(ac;TzP3do{sHj4p4diRG2-F&m4b#ew; zr8x;6rE)IHbF#8|LyvVT(j>v!>Ka22Re^L-oxQd!0hZI;#8yJWk|Z0@WJnY1AhJ|KZv1v6$vrRnrqHz=tyKfR2CB< zpfDRCv!^oYBhV?1G&O$wU)+L!|A(K)LL{KH$AU|WpVh*pIh?3garnp)HZo4shWAR~dKpZu|p;ivxh ze~nrYAcz%X2?%P=@70NVIDPIMHrrb?2jY7&-(W&&95}F-0tB5&GJyH6Qwk|kLRFeG z9L1a1*z6z-7pPRWZ|^iFcaI?uDV6-RVyzStz*Yi@p`Z%gsOD%z74^B-Ku#^7AkR34 zUfjpx;sTz0`dK{v^wY!y5C&EFwK_qUWe%p0dD+x6r&UDQ@3--~YpS?rEW%LML9*5- zN7tO(&6DSn`?#r{Fc&Jq-iaQ5_1`{#+i%>DpZ>3(Md}YhmC8UWOR=@pMl}dP?N`r3 z#Sf@VOy}1Prdoz@S}CbHAh7*Qhv;KqgDt{tw^MeIaHp3pwo3l`rE?h6Hq@x|eC*zwEGcA}ON%6y6-&iGB zuCj1p8E^#p{`Y?m?z!c93>ibHEd19^C+DoGCACAEI<;bgIzpvY!`pxGU*dOv=~oca z@CPFx_*yd(`!O4@*{IWJGD1Y#0jJ`9&1To4An!BLcQ5YZ(%eNzSw$!{RDTmUU%wBZ z_~GxzaJ|kZKzo)sG_Z@6L5m1Tg{3mLd`RYk&>hDMaXD_9ET%=RF-RqdP6iQDT^S;8rUMTS5)h- zP8}q48X5DEU68|*P;0X^&*5c0A6il(G>0>U$eZK#ZHPw0hoAItXGP=SwUc=5Vh?|C zdJ!)~8P@AVi0Rp-j4smLuq9wXr-v-+L$ro5TB*Yqo`piYL+DD4%t#w#oFr(syLjQmi}>spzko}ZW~m`wtyGB8J|%>f zoz!LmId*uO2W_kICiaJGc;HA2hgxma(MQ~g(Cv3HG};8JzA-}7i&4^tFu|yZA<$NeGB}sM6l+0dDhuqHj%De&{6CmQb_%$W+1Qh%ICBO@*fBJ zVxyvg+;NnDLRj<(8x%?2eSLL}&{oVi%1y$JOMu|N7lk}*1LfiWtGST zwH6{F2pN^6iW=(&Z@dl@WV32n@-FSj%g&pUt&(GH8`>*dnf6qo;gpMdV2( zo+QQG+$Bqj*Q1DPYOIET`|%HB@APgPb$g}KiH$1UTtSvVlCex^x>hF%R3{_kRW@UOV}2Svk*~K7$)>9nOPcVU^lI*`IA3dXiw3aj#E`e$w~R8XqHs zOJZ4OsS#(Kv*%l_YrE}Ao8(HRQX+J-e~fWUl9ZI+di@@;<5t6v=)-B2#BOR5E3y-@ z@FYF|*s8^)HX0Tt-AEMy6v#N}6k&H62Y03T-gn)I_rCD}8tE)V(jg?D%q$V2)rWwc ziZ0<6^L`!wHy_+o1`v%g$mSGO8UvvRgbg&0-iY3XIe0{C;;7u+!Eu!5uDoU`IIKiM zCF)=*PVmM?4To;thtDsy@s~@RI4451%`?t*=8NCcQHrSD!B)6Q_3vt}LOa_lO zCa1UI0Z-4fXtRuQ{+Z(ilf|yfe^aA`NCOd+lEi}M`6KgIalenX^>zH;pZ;yU^BwQN zfvE|`4p3h#p*31rUZ!bwo|PyjogI0jrxY3$55tWLR^ph3-@JUlPY?E|Y?Htt*32F< zS^dh&D#6`HM@KO=HAzl|B%W>NEBidb-|6?Twzh#wm*#Nl< zMRo`)b{TR~xfowQaKO8#nsvfs|^%(rF# z?HGnE3Ca0*)z;#-|0>UO6r~C4plCj$xKp)_yivF^l)aN)w@V(*EH6O^nJ)sbx&Jj7 zZ`KLbH1PYncqd`AnoNSG!F!PC3ZM^(l;C(C5}D!tx4j9^J@zOz&YeYQrBdmo8G*FE zv5rQgic08}u5$ZT+pj5w^_5l3U%13da-KveG+uY-^?2~E>zO`crnCvx(5M6X%a&+bz36qMe5CL{2j;DdIF=dHL80oICw; zey@8E9>n$6UPq8&8opF^5Yh7pa;o}0^enx!ATUPE^A?%1tiK9rdlM~h3$MR*0zdYB z_u#;;Ey!pVLZm=!GdZFv_u(Wswa|lWC`!d#XG>Lv7!A}9-v$It3Pw`1ZlVS`bqHd( z1+lhKH1CuLPcf08y?K7lu(GSdZc^%L^{`7u7{eA0&J1HD9KxTSS;1+qhE2a_C!;Oz zm~jlAN16*OOH1TXhewAoI@YwAaB;3gslF|>)yc8zqdfs>9I)UF*+kx%*;^@tS88D; zmwzvxAA)ylM*NuQH;Q9)lL)`@pZ_zSd*ONf+|T?p4vdde88z<3*le$1b$umYGdCwf zlrx-HQ7O=5i)v{Sv>RfbJVN!t04tG z{a&BsjW;*j+&@eBcdFGYd@msB*eVHZarK1!1(ne~d-GgI_jDiitc{^;6E{tUcyLCd zB3DrH1hQTqSvNwZUV-0g@B^13F9PP718L=(-BU#x?!h1a^*3>0wFj>n(ppgVRTj{5 zJw`&2Nz1UyEXm}={Sbjq>TDcxCMg4Wgbg;d-uXO!8^DqB>s4n0_2vKRo5lHd5yK*{ zuxeTqM(QNg{4AWuwqH`*L|a~3!?71m5mRQ=XSUDL-QzTvcjGNL*+yNVy=#m&lfZE# z8OV^D@ORvLBRLvRs{v$H@Z|Y( z*pAQCmR@^lX$e}5VAsefye4N+`F)zmO0uBL()>J59($1|jD41`3}uQpzwUO_yqKw< z^G#bUL5dW_fJ^4oS5066j~f_ve|1a=Epgu$E5MNI1_M z(GcQjKgRoqhjHJH`|a@=< zTsVj2#d(rjCdVy2qKCiX=GzFB=2vP&%qa;)s2KneMR@L;&#;3CLIPh*PEMjRRA-XL z+_3qCYPvK#%LJSN`nroR|Kpdjv9^Iv{K${s_FHd2l6DZMefYj000(L8*gs4_JjS5u44B%98ruVa3|)zlrmWoIpx_i+Z++8yf4leG-^#^iUNMLKy(P45BY-?kCvY zg!ULt&0Ie&7*Q0&ki!mZ*plP;^zS`^|NiHX1C>$Rw<@W&ih_W}v$nF^?IEFS_K4fc zBd$Veb&~`=gI+J@H7Dlmip@M_-WMBSPj&!@D`g=ji?s7w3>s2gjoJQVD69h+uz$IH62T8nV!Vy0es=Je}|+U(F5RxlW4ClW42bo%IXqc|E4$5nKbXw z@Bjaty$O(A*?AuJopbK?w%!}v*n!zG`wWMqC{f~~P!dTqQrS*y(ovL@%OyvVEmh={ z%eE|~6gw44<*KNpSdOhovZ;l#mQqa7Cbe+iXDH6b8O{J0?C1u1f9qY(NuB>ccX^El zIKx|o0S0*e-hJnu^RM6keF<)EZZhwZL)@f#g4XN|p1l8FN}Ggepe5YrXNry}E~Od1 zwb#eyaE$RdaYFfAvyLQ?CnNYe#dwn8%wvz@8z1|m%ZKuPYpXG`nNXf!Zf2IaXexdl zo26u$=+QSf)^YCDZ_)fh&}#AF@dGS(3Osi23YD{TZd1`7s~k47FB3!O17!UhP@^5F zbOce@*C%8QeMt0g?7^Gw0O<%Pqb}ZkW(8|sy@XLkqgX!|dq2-V|2)n-^dLcBsb3=W~sO2Gab7a=e$g*r(LJlg}tZ5()!3@?UA#30n zjWFH8@o^P*&=@8 z!~Yqdet8e6nsJo(ih~T%XqKU_)bJW;{u=M(Ih zruw56m7bf;@>0tLQ}%lLnszL2q!#58i6Hc=G57tLez`v)XnaJsSEtjp_5!Pfb3`YZ zqso`qxVeGJV2G9%aAXJ9EYCH>1h{$mDxUe;(|GD_-^CTkJSSXu$!O~<0qsk)7kX$% zJ~xF8drn2XT0jPkZFu92QM~&!41gK4Im!EAYpyB*3g| z<4wm3{LBa6j(bncU{-FCXcMT{B)Ow&yasPiv(MpZ*hSsHs=kZXD3n1V zd?fanuigg)hZsg_Z=!G<4WeMA&B7}5$};5M1t2ldiBIKKD0Uo$WGFWg1R9UfeIBtI zdybA=vg0;|6GAc$Vm`!=918L1%2E8;wH^HBS|8^{7sJRW=xqr;DH7Ig*xuSg8mFuY z;v9$#ylloJNuu;7#-kAl{*2<7?jfLZlEBUNb?z6`fPC7^XJ37lDgoLlF6yEnlFD?F zASfxtwG$;X8ka9##xH&N*YL0Z%`f8cY7bGMU@~j`3QD4heKx(6R z?bRCN1PU?qGY}r>zx$Iu+UXjeT1@e_yIMHhP7#_6M*1+Gr2hE0zlWgZ!<(C}06Guh+MAV*FO3p{_Go@ki7-hW6|JIGF9O>s=2u`kZ~0hK^%Mk({Mb7L%ATyW7@7yo?esIE``f;ze%Mxc9daRPG7P zw?a&8^dSX&7oFKnmI#3xIq+9EZs6?B9)@`e->{Ab2wmlPh_W0(oFepcKxHVDhfX)b z!tw&P_I3}Dfi;ePMKhDH@+vPs9*?lGxruB%W)3DBg;H6MDeGmHVopT}vZ13DQ1De7 zA|IDHXKt1cZb0vDLyUJx3!FmQG=s?4fce5lI!LgvvPk`#G#=ylOb^HBdbqf=<7mRC z@7X@k;b1^I+NJ@MCHfNjTrRc{a!n~j?Ji|b+G!n!esD`cl!pj`$s~47(u4aTaPvHQ zp3#P$gP`G?|bQ2U^VN93!EKjwFW42e^AC#|OUq9=!j%j^Nb%1jyHb zG-kt6(l#4eH^pBs8l*8`m01Hbe-fQzXI!Lb8mfn;!meu>q4mHysD45K0Du5VL_t); zn4GzdT4jr1Wpi+xUHP&vghgA_3Jox`fi3jI~G>!u&08; zDk2p0o)q2dT9SZZyh;*)p^4;?G?$p+rEk4}pZ@88jQ{ih_-DBH)FI5yXymD(euT7+ z(HH22K6<`@CsTMu4&_Jq_$R)E3&kwn@s9VRZH;`AQY7tBnJQz{opr8S(e(WEfPjnC z%V~Ew!pJ5^mP3k?vlb;IF~5%{_HLlC-!0w3{jFU*dQ{_)1)wW#QotfeLq2zX3Io3# z(2S1FtSPASojoaQpVWLRAiNgRd=@E)V)hbzUK5JktnBg{FpLn5HBGgMg7?xs*ghK+a>oldYm2*4$=FZALuJ zFDzo^$YGp6dlsG-63eV-BRk{4%L^iZn3D!UHiC#_u7`^e)#*(#A&4k+ieht{=;TF_ zvgB`(!XyU9IuHtKLZrh9Mmv27KSwBlxzNXvUKbMEw}D!7E!8?+FQ_+~)*2^Hp2X(H z4Q#Dlr|;{BEu4AyQNqISO@pwZnFc%8^nu2AP}_ zv3p{1d71F}xe&;boRC>Wji?it$u*og8sZm!@~`2(*%)5Fiy|9BXr7lrHAjS6UiJ>x zEu}b78*sE{G4trV0L(dOp&jCc=`<={ckA3v-4A*An}~NVAu4!(8$w$#6tv97ZHzB{ zw|A>jD?`);rqqAnWES8f#)^)y-170{{Y&`5P8Yv^b`9rMi0zg{;t9w?(eRuIoG!go zTpH23LIl~*v(7*}rU$wwA+WKvIkhIzJr5xj!CH;$-_eq01u0V!qxOFLK(&4i>7{t) z>)*gX{U^VG-}?{0hA`}4VOC&QZ^KtPW+H=LAmJ+~^5iKz_u^Ij^w0iZn9Lu+3$L!@ z$N%2{h|a7>!9yI62}PBp6WV7s;x1I9W(-sPSv#6|L(OJ1O6CqwAZ*fT*bxw%J)?#_ z4HS+Cm=_v%x5jw?gCUN$hY*tu;;fTV$JvSuQin7GAOauoed=v#*B~f+0oJJ--r$g8 z34ikE7x3ZV_&nm}lhh9}jk-;Vl6#|9*$cG4;w0w%)d1uOCo92cQzD`=x|=m|LVBg* z>n-Qvjj$V+fq%p5ykRXoQ~hFry?H?O_Uhb^Ru; z^e0FPrqWhgRG4xa4x?WM0u<@RA{f$>1AST;#0K zFh`KM0}+p$?hjR{UCkwu3>t}u5S@Gz;4eTI3ex0G3q5D&VRThmpu^v4q4l=6qJM1% zL4TLEXv!H9$)F0NBPkp-)EXAA-d94_Fx9G-;UMQMK4sq{MWC*7C@_Y}iPMaF<*4Z$dEUSLnXX*)_*%1y3Y7JDY z9l^KnpSx$OB#9LHw2_-I8*au+;ydFxkZ*qTMf}*0{WyO9=l)kbbl<&bDGepY2*em3 zHI)IA+H>E=hkx}yAZsrHiHDE>=_l~qbKk&E{Jp=A#~yv0_E)cyV>0mw`4}fY;z~Xf?c=MRw%iX8u;mcBggft)xYh0vujeJQoLMIXb<7sJ?|}6~H`=G1%CmegquCs&2H^ zq+{dph-jycbD&xXgxxM4|E{-ToTh{`-?_1lq`yrW&@cn84~9sFV}#=YqpPwUW-x)? znn0%-svSah1=UYUSH;Pp5=L(X46h})_Tn0rJ3WNs6fzU!Bs*OR&t$iipQQPZLgVVS zt5{fGbaIxW+GLQL01JXBxpRuz2(98VCy$L_=wLKtHp@x;&wTc?uUq7&J3E8Nm!F`| zC*d&7yhzBH;yabh?wO2JOvZh9<4btcVTFJGKfE3HtO)q|geUV&22e?lfe?l?{6&Sl zd*c~2N3mIJ4&D3S3@klJ{({4foabvq={U+#R#9i$$5+vM@?nf`ei5z21_^R{qTLz} zrgwFSuNqxg3%Z;7_iCW7sOm~eC{{TJGMu z-7c1vm$7+cy;?Os_lin(b8~YTjR#bAe)jnn@RL9J)A+;R{r6Z|h#2UCjQS3KVfT0k z|KgYbEk615t1y0-B72<|xO(9#{^>vYd8`~eg7?1X-FVOUzXQulv*^uqDcT;5W3*cV z`hx+)j=*rl1dplxmBm!1EnMHy_kGH2gD^ta?h=Y@$&Yb3Uc+%O#_=#gS7z{#AQ=zQ z3l&s0fCwbeWn^0(3vBH8_~h5u@sTfIM5pWFAN}=5@zw_x5r%@%fRfkA223{LvF_ZUyMKS| zDXRY-c$!L0ddDhUzRt^v(lhB>>D&)o04IfN4ZiXihwKm#5T=5Am73Z%!VHu`Fh7rX zeE&P>#*>V>lyax_Ohaq#0NJ%%n6rx@NJGP>jMSQpC&&je)^;W^-h={pyZPf380&)Q zQf%}@0z?5DP~$L9Fe$RhE)IE$2C|Y|LnH;&Ye(ah=(PRHo7WL-$q{8Y=Oi9u9LGeX zV%*TAmJ)u{fsz4wen=U`*04`T4kE9z`n zIu{vFIJ+4Iof&wMk4c=P7!Kg=Uco!=5%@_g+Y?$)Qrv%Q!r(ND!Y%1MPoiFqxU$qtYRJSpFbC5G37v6( zXNpRPy}an`Iz(mpgVuH2k;>#wXE8VW+p_oy(;Pgj-JYk2*qjUu57&vB3IaTbV^iwU zXuwPpqy}j1NP@K+H=)~Yc)}wiloQJ*v#}PAwjyv&jm6@)=g<)b-N=LV1)(pLQJ87B zasJsCP-Geb4uHFr!Ze1p*{ddAqj76Dzwh~wzCvfFhaE4Vs89I;wk8sv|LR%%tv4S5 zlADw@U}Ce26l$VjCJ8|m)tmepI*i!Z+@uD1s})TJF~p8r4ndXCc3z-}bA0S$e}>(soPwzCc7{jui|fg*Ae{q-+qEvh`qw_dfWGk z$Ib}Z!xmJTZwbABT`cWx!xNNQNoW-Px=Sp-oN2|#3U}*5a zy!9^p_O~|hv9(=nbvlS?=kkQI@g#PJW5lnV#o?7hSfhbvsO+7_h6AP}B~X$!3TW#ySTA8z~l@ zvcI_WY${DbV`^q@;}CWn7nf8*wd4b9JL%hGAjSn%(@BNbqzrgHSM5t3gso{v_G6A4 zo&}Yw@)Ra^^Drzn?WpgYNL^76JWg%) z{d<-torxhh=pZ=p2%^IeqL4i(qy(MjCb{1C?DxGFbC9zW2o!MlG0glo@5S)<|1-jk z34C2lH^`@0-$i9Od2&lvpJ56{X0bu2OIu|_p1-O=OOJ7%$?#M6FW`x#8T{s}Yk1Za zH2XvdE$bPiL>lMV+}NXRb-vpoM4V9EUo2cNfst9l4eL62Tvw-OaVB;Y+G)X{v0w6> zDRFEN*XeYKBW-h2AReda`vU*_hkpe-*EjK_f9-Af!$0@~{HuTaN4PPRD7-EOdNqJx zzr%#h(UhObc-sA;&CrhLx95Io*E$So-+<`5`{^ z&wm>~_oLsB_q}-uq8MV5_3)b?eFne!iF1fm*GYVveSo3qNUKT8T21ad8XS8@mG5!Z z(QeV;u5@Qh5$BbMZcD)ra)g1;vH@Dacnp!Z?^8MXvZB(slY_E!DoA=|AFkx*ax+qm zTQBKLnzLc#>`GYN8?7Uks2by_x$AbSTPV$lhw4U4xU_4-D>w+h;WAcXP!Y!2`|0&& zcqquxaoEsROk%8m>n!F^-UA>)Bz46^%Jp#06B4Z{NVJEv1%adUbLa(%2iBw{bVs+{ z#ijF?X)?Oy2heHknncr4ZJ9aQh@Ieb(sLU`QG>l8x&cL60OX&;HVQGI@0m)4b7_cQ z`2A1gpZ&mNI6B-#FVEmqiQe0k? z)GG;@Y)#h{ap9PtsY&nq)!c$T*-f5BYJQkHuh};s z*}V8~wVOV-nTUXT@FZsc=6kXI@lQct+eVNS=-D+Yo2CM_;8Z!-r5viOT*LC#wV9G^ zg^L=??<;MV;1^gZcJYG?Gtl?1;@opr;T>MVC@ZLhl~WbdqjKZs#tyn+L@2>bXH-we zREFK`A(<*7B|wCk9tbY)lbg4q+CQXiE+rNg7O%;EwS!;zVV^O`fG1Fo{Cpb6%v9C}4M5^PBikXdUz zM7=o-Hg{l1-vbzVExcwFKJtxA`0tM{V4=SYFCVc$oyW|n+V~_dIvaJ+32mgzl-l&! z#J*|MbA?efi_dS3@TcdmV*Te5_h*{-@O^4r`$0XON+LhRbtEgY!%{wz+`828 z1E$)RB&ek;`Bya9V2x=+*EA$3;De)}nO=`sd$qoWBFAQMd6A)rQvw^jXD{G zPthiR@FW%v{ntpo@e1@e&LX+C%Mf3o8yQ3y)$`j-9mLXsh-sTzqO!^fYI1^_x(4VJ z3Er{L!Vezp;p5vI5Wx%$v1ygDc#>hr_3myT3-dFq3tr?d3l*-vq^9)Xxw4_TQb2m| z+C|j+czhdGQ{|w!?oDBTI!|f9vDNhlo ztO?Ru-({nh(Y$#nNU(*{wSptvEXha=r5{g7f1D>g1;GzVm?a32W(gT-ljp?fIo{mB zg=fD8)$3s{>Qdu}mAV2-IgKa~3l?PRdBAe7Lquryu!K?();ntV@b7;6_i*9)vj{by zDgE=f`CUs7)ypYZKr1f{Mf9Xx=0$`ADQp}|#?+%yQ>)!0<83nUTH_ z^=j(X!9-DMD`=MyG|z#kvjh@#5e$Z3J%sSx3&3L!Ao=XqF#h6mXlpJZYTA#I`#5Ed zyrMJ}jXp?CuR;a8f)(_tQkL)>*u+4o3=7cshmYTd%`bfuUmTCH9f7oOjLIpvuxE5< zXOA3vryDsXJ2tvr6v4N9)-W{GNQQ|*S7ahp_EVNAi!^scnVt*PBxRdk=+WMrTU@}# z_BI84I?JF%#Ovi$08J(9u+J``=7g%AyU;EoW(cwu1SDlmI{7k3Zqq<#xJ3ra0CG1# zsPlqYw}sJYbrfVbf(Zl>hwf~Q5yx)$PAO+7qu~{wwLRFXi+H@@*3An$insPOeu!}GcAE_xEWs(#b_s@_8rWD z)NfcX)UB?!PJLbd+dibiU}N(J_O|zkB(4*6aOCJw_))83=hZYr;UOqIilDiKq$?K7 zVlqPi+*#b1TS63eA(j>({T8!MlzfG511xqs=qR5yyHN_cR8ZYTeC9J>z-K=3DTG8# zTpTn`amyu1TOg$e7X-vlLQc@8)rrojk-ojLgZ1;TA@2{ML_)ao-07rm8HNAv@=Xkk zzz;7i;)EU|5;@_{tpiczuoNMr1fih{>rBG{L%)kJ7ZE=CwU_W>KS8XV{=@Bk)Xy^G>7nq z-iFi@82!~N@b(fyBe7#DZc&PxLIMyicscm)3=#`(RK$Dbi~yT9gX6^zKlQ{JB+s0~ zR|<^;0=b*Mwx%gbVr*^hK&nM}?SM+H(pOZj%_vX(*R?`0`gmlen-TbNzd@H>)FYQ!wZQ6_!I>d96}Mp2ib%ID#nd0GFrf5^QM#Z;+O zI!1^CH_+?4JWn|p1P(fs$Iry~q+={L_3)y7@uof-q-6dl16;mx1v7K= zIC)~$Ic_qc)Bu0&>{U&ZyySC}tHF)$!YV;T@l;t3vl=pkCWBkffzEiaIq@W$o_l@iOk zw{ejkiRycb%4DsG5nIajSkf%RB#tX>b;?w2HZ$152Oe3%2j6uDtz?}wyKR!nxCB#jd=?ji7Gne-s$@how^qY7XaC2eS2iL_yke*C|>v3uLYo- zvK3MC6Vl6OvxjC8{Pp)Bc=Hp;zVKHVeeIj*rsZecTI;B>CZfSFR?=jY^yuZVStA>R zL6Gg?O(MomJaiO;XD{Lv5utBxZp;g|QDq=kv7*1OJE+T6+y>L`ER?})1DN1IP}Lxz1Mhgi z!JJm#?kYL@n?K1ZCpWaU1SV-hPNvoE;EP}S68c-))ZdV{#@2W@^jq-?I`-QJR z4YPNHNY6Z%?%9lMG8&>YJMYRN5~IAp7q|QP#?AnTZ>;0~-VBbmUHlEyR;Xbq4reJ()=K@=xMxgi7@ z1VU?3--L%HZPr443SE%!)5@Vkgb2BQ?K)*&Ldwdtds0@~(c{S&7l@{1 z;%HCGbri`2V(&VBQ%Dj$!au~_rr9}bc$}O!gE~)Dr{ut2k=%; z!aMyK(CRtlk8!!Fu5>#Zm4r8ViRnL=RLAbksCyGh;M5`bi|-&^`Tk$Mf?g&evICCx zLA|%Kq*gWHZ>^;98l~_t!9!hv_n(->jkO8-E~Vzu1xMJU$q2gxh1Hc6Y+k*B^>gQ; zWXNS4Xl!1(iYw>Og zU82%k87-IKV9ItfAn5UcAG6MC%4%%DBh~GP?v%%U%Y~T$N9F}KFP%rP-Nwv7vxw4u~eKN1aS2P_l~)FTae@a7bmI za`ugIJw`(XJ`L%WkD=s!vC(ysq&!z37>PlY$)aFlIYRS{VFn4F|4)C2xx4Pcq5B`e z%IVWE-8L@8L%h7Pj-AWb@Y*+Cz~IJph+fr3^(9iyNU3Xi#*KZ ze7t>aoTbDZs=Y3Z@p-6{C>3-a0h#I0x%|Jt4_N81@mQ>pwi~Tad{nynfx`8uXXa;^ zQbnU`TvHvn719*5%^oLlS}AiUX-Yj8O(l+uezk%SYKgN_=7!=Z-R`d-8Q@({+=T~D z%t1`9QublugyDyh1(_UqZ`E^YZuV(s(ikFiuO$}TAcD7g8o@me!2})3dZc-Ikf@R+`@cge@W7*5T>3~uH!Pk4^pP=%@Wwc zQwsfl->C%_Rq2%(n?MD&i2{pH+pFxNAjt`(L`x?n zR3M-ORx?$)1Ow;;K^elSJ>HHaREU}mQ8otxr*_T(mqEpWRZ#Cxk}c$nT_H&mj7DQB zHwDE5Y7ga+fM9FV> zOG@4(d*pc#LgCeTuc?ATPU0V$I>1nI5*E+a|;5QKccLNCgfB zU^ikIg-DYzeQ|PZg+WV2@I{0y899C8@oB(q$*b+>XJfPFIJ9eqGFq9er>@G$P-775)7D1MK*qs2D|Gwp_3e~u!Ss7 zsMf6zaC!+vOrs=GYq-hnff;eFks1rGeU5~1bl;-t9T_ijUaY1RVajrjNVKUNzU5VD zL-p#zhgWD=QyP!UX|C>jWOWtSF207K)h5QyEYmczVU5Aj!$&xZYKTb@ua~2qNz)vY zc#CP^b1qBKglonFcU35h76v#p-Ev&O8CMx-~~tsrl{Zj!LQ7s!UV` zPqiaaaG4fmeNLykbW~F(#q|Jfcnw|ElIiuHbIvO>h~D>hM0cOSjgNj3t*e{p<*b;f zX)0S)dsgh*b3~SCs!#j%V#rWl2eqzHGB#uSG>KkN##Xa z7A2jm9e-S$r0@xX96LuTYol(=6ik#xP81rF$C6NtMcG4Xf-VHAfW_|3Q8L2p!YqQ& z$Dz(4_&Wi~)h0<&k^a>z650h;83Hqghb?$&jf9;@-ZkS$Eq918YaTq$N%|@YyS#be z+El4T9H&U~gi3XgVM7S7o|`ZgI2rz~NKM!%yuiCzX_-WzmP_1zEix*z2&^9)Oe)7pDacFtDB11e#9WH7`=yW?IW@E!%t}CGB8MR3aEj(8du^PJ|82#PB; zt*Oo6hy4LMvojd%^%){-zo!l8bdKIkk0pi+E>uu|%MYrEeJUQc8Nu8fD~7t8B62;? zV^Y!mMz!n{2!Yv|St5MY&Oq5cDWxKwyu@kYm)IgtSCI%PhAv6+XCDKJxR;7y{^ zj^JNTvqQU}R+zFtqX;btqmVm+ul*eZ0qm3HMg!r*vpUV5RWoBT4H;<&dCQP5TxBK< zefs{&DMgY)-**wK0|fe$39hWIQ@J>vB-q~Ft0im=qg%`Dje_woyyd;_bcss$$uxqb zgZ8$mGLL40;n@fN?M6Bdx+wUSHJvsaeWo{&Zt%|j|6tI^sZ*yg*c)Pf?IuMT)+ty6 z6SX|tb>Ce$d}tXuO&oO2BjaQv=D=4piEOhBd(hfZizFQT=wn6Q3j;q|oj~@I!$#&K$Z!QN(8r?wLm(Ce9$` zdvtKt*VeGNyIXy-sMe1ogTm}fpc{L5zd1FH>Pks7zp%J~rR60zgjsHinlaWM_@l>; zVfF9|2@lzgxw?9Yg=TaekvE-)aQ&Rn_h}6T2FO(kDFTw+i~=)KgGLPr=Am zH83}wK^q(h0y6G(W#7Q=H5<4PRYeO@M3jLPI-w|7P8B^(eWSNH08Pe5Ig}8Yl}-Sh zUP1SR??W=%!`>JF3NyouN{yz9@&Ulynu*ZW&=GO+yrr?SSD0nE#}Dwm^By*@kFldd zX0j%*NU77!QR>^?{Z35!BjUK~4+m6!Jayl_1boOd&XyM!mk_mEh$q9U245oU9ys#= z1s-`(!a6lU+FQLY`rA7-Czz2%?p}4vA=umRwcQd6fJCz$9%?=$Ah>HJ@Jxd zqB$s8HshErS@HlMX~xM~%h?4OxzWVfO^DM*#z>OPE+cFO0eamo3494R@1oO;PAd9`>i3Fpwu-!awWcIBsT?|Ix=E!CiNybi(ZDT#%=wK>*AUyuQeQj42 z^UoFHauT>Qfcwhh(+}QMQ^EGVZS-hAXK`_n4Pav`z3eGjr{0__K6>;hcQbTRW5p}2 zb5n^(iHf#GA_@ceb8{3GhDs6Zs~<>*$m3?5k`8VvZJ+>>SY7Zm9zC`IOs+uXHkxGZ zhAP$G*OzWGlw#j9l!--AXiQ9i@Sgjij-RBGgmxv%TYqZ*UpD#;?hqN1A-7q9eIL_C z5FP_$m*`|*c3?~&TEummwX9bd<;si)b-CR~k@~HHP#SVJK==dSgU|rGpL+%~#QsuD zF=C6UX`}-(9Jfbx`Tl~lsjkfM*sO<7-6*iFb0;ZnDgs5D;g6FT>zkWcT|I^ePu+z{ zoFFwCvE91E0T1y>MJ1-E-trW_`Qr1~-r6MNX3y>Y58RKVM-Nkf#AYufFpT-!~d1smx|<1}G&mwD9#;Z(vOOt_((|o9JlRgu3U% z&dlS&DJi7-QZ@Q={CZ_PS1x1;UZ#T4lp)#IhfzdjT^ksatcWpG*0;{2FnPTf!7%ET zrNfw=on_^RJR|D#e4HasQ)JFzG%&dQjU5_mYA+rXeZpnET22dNI~Iall21=j_& zw%xnAar2hQ@AX`_GDg%yThuI@OwjGN8?ucBdb2#&F*_TUl!T6^gR>XzbIK|`o1N)E zojQRe9@D@LMcGXKS~?Vp$W1)uoF&e=nv&ktmczXKog|H#QwJ@w7vv zhEd>?V^qcs7HQVhNRu3I`R=!3^ZGiXpoO{l1^8h|2P?@^juf<@9!Z+=;NIH$I-!(Y zU<}zP#ovi8y?(fY)+z{mYM9$7rPu3CX(F3DunbsdM$_V?FW}}+8qUQuYTjgk1S7Y6 z=@EflTZQE8JvW?IR4J~@dK~=Ow<&g(;GRSCSo9?i3g$Mbm#h!{`WUbI{!qgwZB&~^ z5rVtVz(0LIfPhM!?0ZY&+sq2~Q8|>gLg65|lmlikq5~<9BalY`%=3FGn9SW zP$p)kVIgW^D}#pck^5|cYHXjMTh5vi*0;{JfcL!}Z3*mr>S@emn(Evnav2DaE~WvpC!hadnteR#27T1b<6HSMBT&NoG-GWbjhj%Q-qhgqd!a zVY`K<64pKU-HYw*1%lFcIvt`6w?7B4kP(+{Bn4IT3-fsVsi&~BwT;PWMAZX(t2UEa zSX`u84qen%o@jt*BAlcCLx&F0eQw;`ph#O8jf>|lgV=fbx(vPDWnUOiT4gH9ZZ>m%S6j2eW)Z- z>r5pFgwpdV__Ax=Znr9$5)N3HY25P_GckY%O^I2z+oNiS{k$UM{%`0jbUG2kcMVGm zWUiD>RUbb)t zoCIi^rm+&q?M=a3}K4qW6ZP54q~W!H;3^qQD8+lX-2T_@fwqGEtI-E{;(ya|yeB%Ecx5Rp9+qRd8VKF`&d zG9jp0ckA00(1h_%k$Vd2p1Tp8c@!YM1nHXCvK|a;)@Cbxk2D57&r8u`Kvq$)vwe>6OT{%{ZRXhLNspBzKL*2x3O(h~Rps5SAygN)dx zn^XlsZVc1-yuX!kz=B%>7*i=b!U1cf170Xc`(uOIDrP9pm}}r$R}(-ng}PwrTpLWb zqJWhXwF_2Csu*F=>9r7qUbSaDpV@eI!ScSMn&1~?KUxU5Br1rQ2~v8*NGyGif@}>Y z_0*!CATJy!fiffBIE*wbt2d5Ii?SvZ&aT(9AfhUV8&~q@A=2+)kWd^L)sx>S(<#j` zJfmyFCDEuFG`YqMss^1YYi~|YR5*PnAtRvSk z4bBxt5+6=EqAwc>iMH89rvoho0xyC$+d(+LfOc<=N*Ixl2LyfGr^twdTu?@3od@5K zCi(>4OnQpWMG8^rmh%{cBw7aQsh&E8cobu6Yl{agb3R-Zw7Oa!x8Vf$I_yCqd0iLH z%=Bh3J2OKD`M@Xk27waIvfphlaZrvg;;~Qh?Y7r}xw&=Cke_#T?+ z&HnQ12q~2GoK&qWySn=yQ1yP&)EY3xfxvs7#`l~)jL)s_VbdB)F$Ejoo;yH|bme(i zTwWrhRbFYvq=KcvH&Za@+h64wvyKRjQe#;PtzPE>5hCs+nhsOVf!KW=^;&rN;fHYT z$`uTDb{)#kIHT!;9XEJNniwwIwb@W9hD7(HBw@2LMT` zKQ=P(smgCPEM^*wEterl!3{wBFMcb+>dDh2@fps|qqDq<*8DuKUOb0*FsV3qjEmAF zDaPi=l|W}<9w*N{h`ISigi!~vFOmBJhF7nVC#MEgqw!}Ck>?2~`cg;R!fgiKs5zA+ zlQe)>(luA#)uA*f8snb{!w9RZt5oZpBr$PQ6}q6qz=1t8X%b_nl$E2k_vd>)(TB8J zOe;Y&%xby`-$at^=e9A%rTP2%4W=XjyPcw%X};h7)TVxsuvt|;!Cmt$9O@`Sgqa5Y zdI=8|NhK4w3OWKIe8s^nw$Ta=AZ z7Gre9*o(#p0cEW2eAWb^MMZ0l+XO`iwg$*qX@clW2Tn20F=)Z_RCQOX{H|hZjfML= zn;nz^881bx`$D-R{eOc=0@J$brTr`a&U0|(YO6_6k&Br|8NW#xUua!rerWQbHiJc% z`WrWmE-Nmqnj5X!Y?b<~WH<$Z z{s)zq6RnY0`*w#h4GrqjAiA}ws-T95E@d{s6Q@?O;A^O~sIoCk z?~Zyul)vqNontpJL5S$Vhk$kuI?WxE`#x)W%l)dVFJep%of`D_B<5G20>m-QGgxWTZgBsCxm~5w?G^S${R zC1iu$ocoQ`peAW#No!p>6$i5?zd`TE*(%MvxOdQE|KoN=Oj9!e!P&QxdrvgxKg2#N z6MA}0K`F;kCYTHBJHYVX<`Qmk&3<;meGhSe@#A*q6lPy?BWkFwP0Nk%LH2rRwYxaJ zdK5>}GuXU%0k6Jv0mhp!D zy>5{g^y|R;EI-B7gv(w<;Sf@UmU1-f&FBeD^JxvP1R>XQox>La%LaQ2CXUI2k`qxL zHO>je<_F}gMZ+=k(3K-I+!yIJuhDRb-JM1LOsKr;nh-PD+QFBz*g>>+77BaJ@q^C2iwp$2fjxkA; z$^#A}&sYrh`bfqTgd~T$|4C?C=Om6PE4JB5o@XR_)SFwNeOQ!=zmiL=qhU_ZzUHTI zn%Fu6H4U4QbH^k*nRihPT6^W1|8LbOIh4Eh5M)sUdaY$NJDUeK_vn&O1o zUmdBR#qIYV8YLQC_WoQ6+c@@#gPP4vso3VnR`Y~zHl(B)(WTyttkq6YmdKpeigh~n zOp{Y_k_d`kud89(=I8J4E)e)f3FNn>iZN5`Qc2eCe@)3$EvDDbBaRw-_-?}mwf_>N zd!gJUu#2Xhc$0SJCqjfvNdiK-(Fi@CYJihuLg zapMMxG$ERC>xrXIi_jMZvFmy;2&ncKD1l6I6RsWS*o$4m+zAm8?-SK5J-`nU%r0VJ zH2R)n5Vu>yZ(*jl2+iE=#Fa%4M3#qDVlWsXPBUT&CU~AZ+{PhDbY9@K^XIXCeT~Xc z_WR8(%wuV3iN0gu(*L^g#)2^LxEyHm9R1xL(lL+Y3@gWuVQF!hC(cRglC@H(neW~w z&l2J#+TGr!2(Z_k!--R;=<~`lgK={P_M?(C!OrF;)~{T}WHO}pZnau;kBdu71T(ae zpks7$q>YM<#+56VNkcm=a*EhnVTjqe4(8{(2#l)5Xf!jaW@&<+E^uVI2Pv4Ao(4sY zsPqS_{Z6%fN5OD_t+V$z30vqs`V_SExq(dw$*(!1s`r80=p{h0AcWRv0;upI%=8{a zjy6TCbR4Sjiv5R7?xQ%_7qQaI0|>DU5p{s{3X1eKD6vi8j5>obHQK9DF)n$kP_Sa4 zxcexUfAC%C|Nf__O$wXkJLokZ?B>02z~SWhC=2i;&w8jFm{kQ%&ISlBr5K~3hFI}G zxoP}d6KlYgix+T0p2FhdA@_VZ%d~c9QgkIp=?&&Y7z597=~C_oW;WYY^D9~5dfqh3Xf~ot`ahg=09AdyrDI=7UuEz zqmLn(OwbJ?L_Hvsf@B#+Nz72+(sU6)Yf&*qbBV1Ef+`X21E7C2kD?UjmkzPa!C)6A zcjahb(V6J=dW3WmN;zu$x_8T$CMU~O>xvL#1Xa#+gfgOx!k*uN8r>lAF9PADIn7Uz z0T%+k--1sBi*Dvba>haXfG7YMfFuZn;vAY16;iQ@#H5m>E-C@I+K#*5MU7e$x`5PK z6{t^Lj!3aN@04dbYbc2~T*MMIG0JlY&Bkq29Iw2tAB9Lp6TI}|3s}FgPLvswaZJdB zg+oht{D~(>&8)mAb%Vr9&%S`GS1vvymS#FEy#gc z<1reKF}ie_NZRhb?_REvmbCDWc<}o5YsAbNg>A%1PA+yNV~PkBj-b~Hi4;*(b$A9r z*v&t`Fb7?%yZ(o)G7xhc;kJX~!$b(Q4y{7WED_w4RS);giWu3Lu52{SAuyV>(dj(^ z1jh+YbzlT}U{>^caK<(|Yytjps#530OHg8$Td7lUaB<+bs}kM7Ks|Op{J(ex#cs^N zfMx(+I!CUfH0BpY-R3bo_h8q)6NOBasnqo8b^r)&2s1!qJm}-ng^LJ-78aKlnSwu0 zu(h>;*Is!Q@nl?;R3X`po`=_H=wVlX5*%|pn_R|tcUf*l3|G)uB2 zGD}pXh}IY{E-z!{=y42(eI(TUC!rChvG+Z?2XZ1(x}9(xtSRpIeZvlhv|5Pbdt5&kWDZIpf6AuWGNGw_&)?S< zG4yli&Yfd6+iJ?ym4XzPU~FXn+PU-0T3WCw+-PSD7tfs|O8mh0xCbK3VIgKqy>jU? z{S2qOTmxQTTO&jl(?q+vtNYyT?VYNOW}~0-`{|MjA!^ZK;*HDEja}c}!GI1;9LL1r zMD^xEA)Vy7@Y*Fzl9XYcPV~nR&r{jca&`563tBUu?%P9^)2{=7B8=ca@Bl@+BpO24 zM6S;}oFeZ>5{k+g+DU6%Aju*G;e&vG8Ze!ze)h(9{p~K{#ArG|a1_$N7X<>h2a38O zwa*!e68=t25WyqHGa=0BBUE-OoB2YPR2>yX6$jCbF=d#!a&-4fQ1X)_Mu0vIUWs=Izm6~2K z#27O*z-xcY-9-BSQfrl|QzWh`@>aX2!yq8p=4uwy{Y>c~>=|ENTEN1>Jk4L&XuhPE zuZxJLI;%kx;?U{}G27bL#gj<`PG7T+(>e1z4y>$E_Y{fE*&wYy7-4635B>fCyXEhD zefqO~V15vg1B89O??ZSVMzPJlu2W?u%L;;5Z|?5k($%X-vYcKceUF@orq5J_9vzGm z+}zrwfl&MN>+3g&n9`V9uDa^8x~fi9%0FP^i?Tn8@);XeB`ts1e{;i+%89u#3hNG@ z=}yN*XFLfhyt3!fXq0J#EJ-;_Pzu-9*NF2)x>L)nmZ0V^As#fB!bnub^UxoSk*5Xx zGRmS1JaChtjJ6=Olr>gJ>OzPSH26fwhX&TbxVe?ms9Wd5ild^63GLyqKU;3xn&Y3d z5a|#@J3G5rUOq$*#X7R2$Y~&OZEcMj-l2UD(LhrO?nl@Ntn6VZ<(iQM308D`NBsek z@tD>~x$=Si+`a7$6j_YGZ}USFbuWj~|19PEc7sLs;EYp7a@oyJ&A*bY&p94A4jq!? zE!czF|L=5X9Gb>axsZG}WBaAke71+hIOwQ$h>a}GDATmjtJH?pp=a8luT!n)XL+-gyrNA1q zLc3W5l6_)cC&j@BL-P4E?Y>On^#5n?Pv9*%%JX3Ssp{^t-DU3Vnnk10&PbLKl8nIE z7K_0cFu@jHf@3>)``gAFwgU+e2*Jb)7~_}#vB5yt!TEz7jLjlIAOR8xkc1?(kEGG; zcfV)t?yB!sZ@pDr-KWo;JCaEJeNO3jbnl$or%!iv)w@3L^P<^omu8;mLD-X_|`eE}=c1u~95FbL{({5Qv zI4cIN3pyjSM~!iqP|Mk! zJTtaz*+MVly=X2R%$f7c346LvhiKyGYb~u1eO1IbTgEMJHqh^NEmoW@earYM+4BJN z0@<;9_a0(o4Wpbcl)iKKE}Jk9DPCYn-_F>%3nx!55b8#80VpBugvP%8XJK}3jzSDN z%jsE>;K{jIOV}c;lK0^=|FZl{MLGE)7zQe*XMwm$iEf>960zzm8e!{PCbL0D5?bs- zbLxDH7a_U`Prvt=4&u(@(prc%WO4zrtOu=625h<_=o+DYyHSOR+U!bY+q5?K??&E~ zloXDdahQcIt#=WigNp00gqpVh3f7G-2*$c==3X)aVXD&!7z|k8n0=v@hvX_2{GE2k zLVb-9m|L0_?YOZ#r<{!_n7uMJ)uFnGEK{uS?D@?akAy^-QqUSOTefbc{b4>&#DPgf zEg;ORHPs4O6VFtsze}8y7ajKNt)s%sw|aH@-e6tOOg@D;Ouj5MK!NaNJlviOvk+F&V*? z9Ei6bs~P>Ewt~Vjk@TSn*NspzhO9F+Mew^QN{HHHZFQ{(D=6C-f6LZw!~mOcjNY}A z?6g|8I$c_BEMxG-%>=V^vuLzh=#P4Ie9E-c6owLZG8@eXl9?$Cj|r++wdb_)L84*r zbRdltZGLCXD>(-dHDU{g6Xl$#v%1bX|2#bI;)|>`pp-4>iz2j=7@KEjsB_VkG*rG+GM69g$A!P%5_U=Y^bp=Nse#i=5m7+a0jms{(oGdK| z-trrR%2l17!eg)mNE>Rq$MrqH?V0LzzdeFnSfU%!FbI)$1g8^>4?H0QB zoD%P|_@2jKg3k05Zy2{B?Q<~-W7nQDadLhUM~@yQB`hO>ef#!d|NgTm{6W?U`b4*H zHoT>XG0IWc#H-8+NX=$;k*5m15P@>ynXj4Lq1H5BAcShLtB_B7Mku+4}( zj}SG_fovTB(BUwpq;)U(YaUA_A{Yj2hY|Wa5H~MDKD-?fj&a*l#cN8FASe=<{B(q+ zO~LjAoA&QOdf!O`FledE;1_R2f;p~az;R={aw?fabRwu&+Md~A*Gdna7u0{-D6zqC zfVsJCM2lnQJEG?pQIh=L+i%0t;sV3k?WQnRy79!RPYy5j(P%(mjE9n@Hyn)TkQ&1< z8{+KqF1FXq2`HN^*F?Xud-q=Q;?*d%R3}o?xpwW|Nx2q|Ior=S5=_m`*w9WSk@UL6 zua{{>iemSkJ(iwYUo#SyaM@gGA|`Dg6jbgaa2}j1{w4dp%rs`K35}|9-NPw9W|Lsf<#LfQ&HW0O1w5-XK(KP{m`=ls8Lg7&{hgM@BQIKd|UaHREd2HQ~>dB19vkg(> z4CJE|MD-y>+aex;LLeIPEp0CbQXpv#bXsWd-GSc2%-T$f1%*Z9h6acJWRL{4f<+nQ zU?h;yevE4OxS30{k7$i4g}F^z$kL|}PGpN@XJ%$``IX;KmhHmAg8!`93sX}wLs%}hojZ15Zqr=BpCw>J zG2D!7T=ZDMZ#6T)O0H> z>@${rW>IKcw`{?1t;b1lTcl{tZ|-By?!D-ArY+elY)k4)!51Ni!_1d4vNvtnOrDO? z)neMpG8HZ?iL%$^S{sb{E7d%u?Ti}x1ltCqSelE-to5&JPp-B06~e~KJ#g_wn4X=+ zqFI1iQ}5)d>1mv`|19RxDK6Lu#aIeF^{H1;LaEWTW&Y%rvsaXhS&H|G3Tv%HU`58t z7Zw5GXd%cF&m@vYli_~uYNb~g!i7_+p%!kDIbz~CNlCh};9@qn9RALt)Uv(d{nyn99d176_^ynwHqVse{ITs{p38`0`dfGI!q%NTYra_f z;2aSaNb_Q>)P+S|dVhInjeaq3YZmI5{#kGmV+qSu&y#)Uat?PxJ2A0efju zhAh+N;aI9%yddKx5L27kQ*y$TzYJ7QFz`YyjL#Q~Q(5but^G64+=u=9_OqZCUJ*Ic zMbG4`HAcgn;-!rix1i)>ZOm6u2+Ak|n*g@0Wg{>om*SI=-KN|h-4f_q_c3lX_yO1) zf#zzFoRf)Rt1<7}D4TdCcRjbt!&R002{sSY)*Jc08Y)wqjIyvki;%F7K>6e^Dme$u ztwtH;K*=q5G?YKa#b}wTVAG~eq`0V;ys^G&l%S(taY?mp#`+nJkdIRGQS;Mb+&Mj6`HD*AmD1xUuI@d3H@%3UZwn$>vnsPM2|9-shGm zwFr$#M(r7caFC@)b#6n3oPbxk@`9WSbAx^4bI8yI*M{pkqNGsObj zD9~6~iW)j+uF!6@v3Kv8SXy49_Zo3T?SzeiY!p$JyKs`7FXJ-IZrzL&Ie}Lk*ufa& zTQ+YdG}-Aacg0NFq(EXuZ`Jk9VF_(#Q+alen4qhtF$DLmFdAEOX+CO@T(&T$NJi%> z_iZh;E*5klIJp9x6G#?8lLgV2V9^RoAeLrU@HydQ7HDo3ZCoUcwJ4|_gsbe9C_D%D zI_R@mMr~77R1^uxz)*GxCl|tk5826j%8y4loChgcmY^ zJD>%@+2GrA!B?SKQRr%gIG8jqhK{zO(cTZJ4hh_xO(*%-5iV?a*a(VIwljd(0tsV* zrxy97iiD-2_rVl=xeZHg-;8LcNp(|E@cu!8o#Ss~2q@`xn{nGNSre7lBd(|Pdj zv<`R?NjA!uN`v+ZaZDuvRh?qc3s=Fj@HYY}V_=RnpUG7z#;&wA!gTPw!`S>r+5Oj@ z_t3C&6fDpbjL4bv)ZTkx$OtY94uhl00dXeOD;2arxizM|nJ#?cUgo&*xRQM|9NNE| zLvtIsa4nvR&f#%4NeVf^rZX+jXf-I*O-`K38Og@a%H20#$`}P$WeFLjlp*6h z*?N=8KUp|=<_n|g6eB`5^l)tVVRtriTO(PQ3xThD2q)ZqEP(?ui6E#*8NW2~q|Q$>74%DZ8^4pNnNPB{V@M^NVDX%3Z*Jd#6k z1*_IJsctJ|nZiTm0dk>?U~mgZRDJZo^E|? zzLmBJHeK|vR@!o3Q6C4DTRI9|36R3Ey(+3Pq1#Wj?@3PAK=49~9?Mk9t$G!;_0_t8 zu8#2Vd>6Ug1tC^hSSXHO*dj-1EWs#YL=bFE1NB$1wJLcZh*T3gYf*)_QYqcN;A=MD zl}B?BavKosM5Z=blwC0SVatDq6q3yB?9+nY^O_tls;gT;01I%x8}Bn>nw3OPGYWAC zEmF&FOEmU5bUw17Bx;h9Qa=wwJ;-DQ$>v$;!LluQm5rkKF+e&Bj`M5oZ$VYp?HD83 z!BUzL9eTs8d$%rSk?fD6n8K1&Tfs2SG)uYSnLWBlbEjD8X~J#9BC@an7lwsxn3Zx1 zYQVDg3kM5#wyUxzpy6+d7)F4Gw4 z6pp8e8Z48s?s9l`atR_I{1@JuE}LfFvnWk@WDW2v9Rf|okQ+I+2P_;(u^(ppznHxl zH#lZEi3+?r9D}N(G@v+QKAy0#9*QinKyG9Zfm>MN89+RRgN{nqr(9VvDlKI?bxe~9 z)2zg{NtF$_+MXmv+xGj^UqHwNiCOYmqM_}g&xUvbza zYfpEIuMDj%7ZPxZefM`GOkKVPy8Jn#JTfkbbX;GylbZXyeJ@Ga^%iV(N(U#lF>>1@ zLej>ju%MI}8)>8R`Ls#(``_O_U$i zMzf_M7>i?`Z_^0KxZtnSdWH%wQ?&*ul~er|z$+aYdqxN|`e~F&o}=458>^jV#H|KL z>bL-u3-2gzARk~DX_s5S>EH3wBfDMP+`DcJCP{H3L+Co zrERwxCxSc^*KgaVZX7eV@auG7))B3tqL4RobUn0oVxkt&dbIwccAwThW#*rR=8*E~ zdi#RIR%#5$!3lj)+y}VYUic>%ID4Y8O3;)2ywxdd3uAq3uefzstB8?NC1iX?(Q1pFD1vDHPbi43IAMsWQP#0+MW}9jvm2Jr6|AzLAYM~*+4;y?L`)R zF&I(UGc`kz$ajsBcus`k;y~$>`>U(A^3^<_(P%{2aB4SNRUjyOK^KijP?JL^0vnYoHTrBPMa1I|WKa%>+p^L8Co~Mq>_9(y>+WdCo=-2tq83wa*Yq*Gt7H zKFM7j;p6JD-!COR1%oomZIKo7%nG%1WapdmPaNZyxg>tFZf_BzQ4d)@z$oo`HdE@d zDFtyO32GaV5`cmlC@rrbJ-z@j$|!_q1$IFz?$$&S6-A;V;yfV|AZp{jRb_d@f*ese zhNBTgY7?<*T<8iW?sM~DmxLP2cf!GzgXzeEmdIcuBaUM#B8(#oc~4?V)!vO}6G_ta zYsgKsb|Fo_U$lpoD=s?DIiXxymol)xMX^UT3U*#yq8@Puf&ENgY+E~DGHh*I++@Vi zS)$xG_ZWCU1-Qj-l=1os_Z77PF`tieVR|YHB0&cqi2$b%D8Iw^ITofmtsyv%nzLd? zDF-3~_k}zVZg3~KRdU?vbRJPzmEW8}+f*NaGuCIC67{mTT(vFVa`Q;t1-UN7#t>}| zW0a-z*`=jL^t!I|nPqG=o5fgT8KG@rwC28rg|#&@+R{Nr-T%zqTsm|RF$oMou~*4R zk3`1kFD>CiZ+{!&MoccY*}BGZ>aMQgj<4N>X>+|<=KBcv9C9Ra-~IPf#b=z@q9W-9 zX#oImN}6Ds1Zyqu{z}v!ID(BSeaAoB>C0@AEK9vIFpJBz5Cto7qI);j-|c!nA?JY< zaKrk@a%eS`pot|@t0r7i%b7ENyik%9`C>eS2L?GJxgCNV7r7c8g0ON(Bj->mRlAz| zF!C&q5U&E!in7IaWC^Yo-xN6odnBMzFSMszz}qhS1{tem()@`1vQfN)Md4Oj~EowuA_DIcmW_Fevzi%vr#ov0$ z+7Y7!j53hUHO@8q!yyT})s+>peg?wuAi#RhB;zOei#c|iJp6Q)}n6M zSOW9&^H^G3^qe|iK_LP!#VyE&QBYatU1$5$g|9+aV~Z+qL#3^{ci}(9u*5JqE?ut? zt^J~?1* z5cl=Y*sxfDTojRT@4j*+t0Rn*5H+F9veMHWbEW)~;P%kU((Y*?#`EvrjS-lx_IU;~ z8ld#7pv$Rd!X2%lg0L%y?Y0re%+O%j8XQbM&$AM!CURth{GJoU87H@m!VsJwS7GaD zK3|AZ^f?s8HBiC64ENPg4E?iTRjybuqC!j}U7Vev`mdrW*H6;;v>7GaACSS_@AbSf z+38GMPp#ST&|RbSjG?%;yi9wb(P&_HW)s!7xSWwi-4%*_U0EGCZL-2i;*k!s+ann%eX))I0$d#IB+5l z)ptoU88fYfmTcLA6r{@&2%tb;WXjKl?2M4AG`k`80o?am0X|kKs6I6mL0m_IwPK46_=C} z0)qs?C0RuYiB}jcGRV#do9ocr*FhKHn7SCf)DHSw=q^$H0(&>((^9txlng>ryhg{4 z$=PF3l$rfxwo5^-(t$W24lM;$Qe+m)qtO zn3991NBa1A5FX(D>Uzi9QB>fUayFuD9Ts0HxMsH);qm^s(z2}*9tO{EO!=zQ2oM0J zOzSC+tGpA`Ic#)T=S7SqeXAbt#LsW|n`#E5=f11Xjz+m=E%^7C5y9ZlAcr+MN#N^_G=iGCt&@<&+CY3L6 zNY5ycOj#bTmE~nJ+O}=mR=O@%AY3;uB6t;b9&Ub%@Ktey@GyFFi?JM=G#yw5bxT19 ziSMyL*&po2r=t-LFD~NnLx)K7Mot5|_N(03zA8v2Jf4zuu(2U(f1P-A+1s9QzB)5K zDw;8r*jLL*b3ED&y3WO&Dx|oc5b`;JQ+~Oah_@(g@(I1COU05CD5b-{`?D73_Zalx z{Ze~S`hRZo2cb1vTGOS2Ywn_Y9Osa9rG@XL<7-BB?#G_h288>%N<@^8(-^c@52 zeJhSTjRjiDeG2E%m4$oxiI(##bpp4VPp$dZ4PBdGC0^IpXL_B1>CeqwMS&1?7kR;> zweq!J)vMIHnor}9iMCOd)ozc%_%k!J78nvG6tY@bUG;QGVII=;GH)~+6siIjCQ}}i z8I%ziz*&~z8_f-A5;6kJw#ViW3t4uC0(8(LbW&8yT z|H$)+G>>|eW0W;#=CFY-^<5AX1&|QsdmA4c%b{qdWI-4j zU{p?Bh^YE)t|HwoW?gOpD=KuKPLX)1ZgG!7JBo50ApQ!YMVEr~#kG&Gc^3u{%uc7d zkWmElzMgklx!($4p>p?=KNl+Bkj25HeNaYNQIfhqH%{6r;W@&;jVt8p=YTL-9$X|i zd9>P1Y?_)ucX^q@biSaLOBei(RGJd>wjw`c^p{}D8u%`R?j9xQPd7S3 z#Z9>)p*YNInK=aKKO|S4OGaV29T`_EoYx=R>|a+dpynL5S3piVkSOd?TpH4ssKB%T z3Qm|#7Hp5yMcwEqUF)R$L1_dQagZcE)v@LAScSZ7;a{#YTja^1J6W0k65^-S;HNsf zkw=!jGYTJc2OrBo9VZ z?p-sdmMhk6HZ8kz6wsffqtve{+AmuA+@zL;=ThsnX%{Sa^!CNGh-HLqm|`Vcw)|kR zk(Ks#b)W>@`imS}h|ml_kJup6V?mZil34nv3c^l_`Y>i;h$u*84AU(e7Sf@y+{JNN zi>UI+qLGu5iIX2-r`;i>RH7p~siQM+v1LLAL15d~t<)%HG)jY;wF}So!-$D*CexYi zOMnOwHyLrsXTb`{QHaQh`&{q>bCX3$abXDz?J;4ZwrFPWLJ{756BYYJhfa?}*XZIW zP!4U6XSa*Qx2x-PiTcV`Ed*N7pr&LE^F4akwi4FymmvJZ=E=ZM|LYgx=U(t*wl{T> z*xC~Q98p7i9whA>V3jO=EtptN__`3A9Ip#~gkC~%Es8jRQoi(}3g5vTT`d7Ee&ePy zUzmGV(vf{~y-PTIDnRXM4^Aio4_Q{gKAmu1-&;@&sV(N07>)TMWlngAatO@|_`>6J zwHYyiK+1{b0$p5qhqe!Ypirbk2)Wu5yEYn0&=|}mi5*gp+neTGG3+_y+;XiU^3Axc zS*aoq6~o%b zd68wqP}&!a!lCK7ArrWWmm)Yaf~G$Il38@E7oM~e;<$=qX&H)hO+cV#(jp^GOUnR} zYYW(k;}~1EZzD3rP+lk$3R9y{Oqx|YHKa4=SAE1tIRqRm6k&}4YBoheFlW4$;zGjH z8s);nQ#tQOa5cDs)!Q3NQ4Pr2S|3S!2BM!?1;WrxN~}A&31256i{hS~Edo&%st)!m zxUkKK8w67@I8;UGy1Rv2t;X@nL3u@QTs)$FVS%J8uDtm@<7MrZp~bZ;ZxNn)Xg|HG z)Ec+0qJOe?WERtNpZjBY+7CY2+JP>HDGRkECY-4pCM*A}{=NF%B-%dV8{(AWf}zb> z8#om;-xAd1uA4saWgZjHxBjSJPZ^Ul`Mm2rWf2@x4KL|xNWmRbtsN4A{mk+2k|_0f ztGzx1pB)Rw!DYiv5%R5huX;}^&yXsOBPSN{uJ?Wf$Bv(5Lcjw36`qd()lw8i&n0iP z^8HeH!;JD97Po8-$iQ=D+aJ64?8MLi?9brY&we(}-hURR6RQH_;M(ivC@=d()RR2n z{DM`&z@Zb1_+Ov?H2&cWpTpO0`UX}`ETI{-(3A<3PONd|ka5gBO!7_g$B*x4C{eQp< zp8s67et}V*5}F{VUK$yp7mW3qrYTjnyTUm~8t96=BwN;=ebY0-3Zh2Q%tlJBBP$c? z6qNCZVsAO}0?~?NftH$OG#b)1P`{5+N*1ZPp0%|#TNs(;^d0uNj6AQ5Xwt#fq-7^| z1xE<*{p4aY`z94Vr5T-v2-NZ%{oa7!&!b$^Tu=l5rDYC}ps1?#bQiY!T9#TaBLXl4 zEGv<>_}nroFRIeInJ7s@Z^#}PmHt|e)ki4I;IJ`GeYk-Ya7A2~#hT*WTb-H%7ZA#^ zZ#kW2w$-FC*p{pc|0=XsoOb<9;MrEMuSmA>x0KlnYVW6dft5!&esxt3SC1c6L}bPy zySj3I@6~8U?X4}}(g<-g4#+Tzi!=M^nLqqYoUwho6|64bLv%@YRY_)(rsD@gY7cSG zeGlNlLx+*48Al~7_f>nUg7uYKRvS^VnHUX7pniD%=%hmYXydmqGCuKzl&`Ro_*@Zl3w|I~;C zaky!1S;~t{fZ)7Ulh$j}h%h@ng=RCs)XWrl<-Kl~%aC(XdCxjGRM3?eeZ+nb%u*O??qirL965Xx z-}u(e=&r3&Ka6eLw_)eb?G!$rnVt6gzi1k(x#cd(zOUcTqMQX_HfPHOVC#Flr)U%c zE6OcNjG29VaKWD4c;-`{j9>k?FT%a|KZGxQ{)_ngzyEtIEp`#f#O`knUwX4cL~NJ{ zV#w$;THJa;q1W%T3=yP2j7-_GWh*sw0!Lm{seo8`g`t^syrA?KrB&cUmqb#jEm3Ub zqB~lR``Zk1kzA{C+952529ZH+g+_fMso22fB~Q($vJ5wV{YKn<&%LB$^@?Z>GlLhMcw%IbAv^~&-W!FkrMFF+9EmpVwYwOX!n zq^u8&5YH4|@@p@_|NP+luxtA^-`cs*m4yW`{zxYWB8tV~EPY2-5sjNl{-RkN*7 zvxPz{*N4M|EM37Qd&(lDbS;`Af+hVco}mp-xp~ghk%da59NJV~&nYY{pY$K~_o9o0 zWpxHO8g!`_PwNS-8mbVu>y))2tnuR}c}*x?xvp9E;t>+MV8UHJnt9sjXCLYHKC(|w z+7#h=R0WzoNtp;s>CV>q^85mZ{ejIJ2%A6`ay&$yrXwsY%@f(8Gk$etKbF2r76zI` zL4MqbquN^tjStt^yKb$>;9wU`C?DfcRUsPA;mnxKk{B`)viyf?1S0{@-nkWj_PYOw zo4$E7KJ>AF#Mi%Z3x&G{<$JAsbXL7mz{0{J<%OHACZ<|#%Y{Xi-vO%2gCCTSer!bO zYweQz1teT#IilPFQY=-z`RM>7-qg@Gbs#wsA|I*UDXOpzlsrCfSrGb%d7D(_C6vbf z_ur3qzW@EiII63S=^m3IJY4vn;}E)d)%?}JY;z{bkkZH(-i%dZJ8mCpbd!1X*Lsl7 z$9Bk?*Z&>=oQDq7L*2`VdIR|su3Lw^bjK6vsgzh=T*8TC$1xoAZT*6bCWgq&+RU~mFCR%7E$>^5n2*hUFl+ZX$}4U$hHd>ktLl~7G0=<(0O%)&~oK!qI6L-Ts@XG zPyjujfbA~LBUhrYpD3*##^uC>Vj&k}CsIIS%ZzN3`^q_tQi>gF+AFQ|Kqen2g5bn@$GI_Sjg1H0 zYJ&s8oAvAk9CsG*djvtedHj5nLlAavPYw-kba)TdS)8IAoT~;r$jepMi*N*;9u-JA zQ`eWcfS_z^Z#%ehX5)@PUKe;4iFn>;f%AlUadUwUS zl`5)u6~?_k#nxWfQ@>7utQSUJ@Awe35)QErr&}2JcYjyZoiOKX6$IokFPE^*ImDC` zMY^Wgy%D?JwPk$m`s=ZO&tB}@zLkPSQU+v%Rk{-%)Huxb*k=nIV&mYRjSZ_jy4@an zy)JtF9(scwDGbl{OY+}U^4n*WIigvu3H>Qx+ikmNn8db+`BbZexy_r208z@gR1vOS zuMbcgPnyf8Jn3>g{*sIF=D&CgzH-AYNRqbic{AxsKdG2RGpa5EUh=nk1-?ofm4?|E6{2dJ$k#b>Ml_c_2C{1a$C7M=YoO z`+*K;#E$UD9ZR{bOC9CbQwX}IpTq>e$cKQXsKYT?%TLtNp_C8u`8K8iLmUgTH_cot z3jJ8{gAuk#oY?v+sR=D4WQ3{dSxilJ*wP2rXV`nM^_qn!KRBR5&;RIGs8LQQPoBWS z;sS=l0i8RFZFkc+i7ABSJT2*!DbqZm;HZk@#43?o+8*c(neTS{=&$v0{KN@@-tO3b z2FbnZaEe4LqCrF=bNue_{2IEie=}~s^L|e*>-ec$>tFMoy>1`PmZSz z?$X&@G#RE@OGS_jFpzNVb0-P1$_TxPpb)gSxs@oc&CW^jB;CEL60Li2GwyW0(mTAi z8fERE+_@Iz`w^0Qud8Zu)06szcDVAN9lMD@`KgKmElp3Li1k>LEc;K3vay(c3D*aO zlaH$4vm-Cc^+^%j`rt1}&Np zIv#&q)N(qq;S*gvp=^}W^-yQSIIh``;~T-PS94!0tE*U8n8)hcvdysw!5t`FBQQw{ zK>7;&fLe*?RFn(_&Ga!=i5gGUHjv_kdk*vmBiwuNKI+jiH@69mMzc7#vh84@Y@L|U z3JnOn>fiq!RqKD_+jrUIw6=Z6T|+X>-p|TfA6w=&As>x!`0!y2``w`3gTU&_8qV0g z8}UrS>>J$ORdHp!ug~F}jphBP^c3;g#oUA58FyTDPL=;uKjv~Jn)}V1=1`)rzSbQ= zkS3IIjApBa0~Z~j-_uIp1g8nfCDsC;`s`=%v>$yAZo2JuhV}{T1_6> zY2VtvX^&8OtVYOKCIj$o2=tTe;%di!8=`i!9DkKkj&+ImjlFE|lc@eX{CCxSj=>1F z9%zH#x6XICbJlj)`6w8i80%?S|vegJuDI_XG ztTIGO)D{(v%K*WlQ_II>i*qIAOKtm!^-RI?F1Nk?N67M&yw;cqA~&|Tj5oNrbOLwY zeFyHk`*zGP9H($hFa-LtZUc^fxJ_u)Ui{!h?P^D)Yee+ffX{zHh0(aKx&zP?112@%27 z^eI>8KB{Y>cy~^2f6o79$k83HBF%b8vpyUHlk5GV)0~-q*fW*feEu5mNm8?>5S4kOsr`wBTFF8)cSpU7EmL*rX zqE-Qnl*S;}=qZimQHm35JsetC!r_$_>a3SWF{0KqAdFIptko(CE7&s}*JP@k_vMra zMJEVIGhWbH9h5Xi!Q4!_2kt!BB)+YU#3Af1Ie741+3<(C_4Z zD9UHXiwpDEf7V$@)yA~D^B9EE>t_9_TtEDG9OH~Lc4BdU9!CxzF03>zYEvBeHW%D( zH1UmZ-Gcx6_P@sO{`#+=6-Slo)PD{s^-1@xYnY}sa-JXwTseYx!jz4Ly7+aPj;vcf zAz}7N5gFxcvdW)QsZHS6>N1Xo}<-n zp*=l~T|2g7|L$Gby>$za$`U{!9A%*-PaTe2huNAGhG#q0rO=_28>1&T_i^~}VJxjI zGa09@NQ&>C%&1dJ;mho!?phc9ULRAlGrqjRT~}s|J>OBqr>5l65qX$ zfgVr1`XOtQ$CFCQisR^vOax~(D)l*sFHTw&m0iv&4ePWN8AH@ zT-qDkW4YhO9rxddZ{2z;Zv5KUao;`n;yd5@4&l_zM%c1#D-K-py?EBse*{l`$`9ef z{kw_#CkhoAEyn!+kPfK{OQ7&VHk5-|_uQ`07K>}+btvV@?;NVyB9MfSLqEFjJ&nr) zFvt`hc<@0SonOS=2Oq$-U;Gm8`1T!Gn4h-?D1|6#Ve`(Nxcsv3#eoaX#WQ~BDL8Qc z`N)PNs;pMjl7c;~+RlB#Z)ESqagD^OhSE9GbV&1PR!MG*kJFrza%L;wJQ07*naROU>&=G<_A41voE*zVRVFH?jMB`Skx)@! zJ*3`!bM-L_igd^XanwMr(&EO26CP?zQ4qXNO55{%LNgI_u?@NjS!z9MF0s>|#+6Te zGQM%sO&AUaP%`I`SmAOhZ<)J*GNWFXN z;d|U;Tzl$u*OaG7Dk+zKCCcQm0@O+g2h+6fZt>#B&T&Rn@uGxxkKHD;p(9o*dC@Py zCNzQJF!Qne@V11MUN8*?1G{*Fy-xc%m@;dr$r1*G60ABS`h?@lYxw+^zl^{CU!TMc zU%nCZ$4)?JHiQ-%fjG)A-#dnD4}A*Xxb7Bw?7~mur(Se5e)1=tiMdux;R^xBbWq1A zyul1toub5e_d`)DtZdmBU>TLo2FtRv2*hZs0Ej+@!-ILQYQDFy%;3EYrf`?BC*Ksl zYkO>03rnY>hN4g?7y~4@1g;9dsx(dw`(@9AEcqPs^D8)VbP)>+OZe z@ElT-b4!*a)w3K<0str*$uD|X6p7lnT3KEi%J=j*eE1=B2Yp9yRlu5ge`p~*`!f*- z^1sdLV;PZgW`5?B(vsXuMp78q2s+- za=*+@%}}!_b519C%(4Bo75wD$e-yXed>7WbLqfJujflwIs;B2_jFfh(jX`&f?k|pG zY~H#V+js7;l3Q12i=#cf=5nnGOp|DhrkR^u`;c21Oyzc|_Iu8)`*=7S21Hw@9+Qfp z>V^=7a1a4g@9f+rT>gDm;QH&nRB2G^?$nl-xqbw1`13d8=5K!sfB4E*;=H|k5p#h~ zvD}njbS{qR;G%`kpnPGF@8?ec=WU?0x2n88*F|NjlkIk6mgCu8@I!DNK-haiaH~r{ zgOx%wmvD_ruv0h5in$bgQ4__6m90!&=Sc|W9IJ!G`YV~>t)<8by)wVDg3n$5RlM%i ze@Na&rX^(DvV1?`{Tt&z0mJSZZvN)Axc#=9aLuPaip@E$@E&}suSpP;Q99O4-chm53FoJ)yM z*_t&SrMUlrgIHXew_)=DJr}NRr^Knzla^@5O^76ciUbxGmvGlTcjFs3--7!eJcN_; z%jk92NMSS@2{vt-!}hJ)uw&;A?B2Bt7k|&i*l9cn8QHw8(j<%t9SI9>g{u@NgHM-& zX|Bd(VQ~>NQ#0sHb=btQN-axA*tT^JKl8%p;vYZtIUG6GwP9prOoeZ~XXDLm-ntdL zE_xj0iH!F#81!vDjSij)!^Z1|O|r8gr8I+Og7sSeDUVZ+^DHMV8^vy6k1L4q!5b@G zO{504Lo$5K#{LS%eaV|9PGY+8ox6A8@ZpE)uW>-f=PAzF!Z$bgnNMAV*{Lb~`5*rY zW||F#!%2Tl_7_Fm_{MrBvP7S-5uRSUb~5;zOG!z#27U3Pzp%pV@x@`@!nfV{il4$i zq{@sUgmqSj63o^aNoEOlNiyj@6j9TrR=BvYvR+cRyH@^t&1%pAx`He~DX`Y>zM&X2?Pp~c`2hSDAhkbnhGoPW&^xn6>5uF&+zu8WWd<>Cl z;{Xs@w9Tp9BVVVEaPzlr#!G(px3M-F(0fVJqG@$Sa%K#NssD^~v`3>MMuQY8)4m~= z5qj2R)cB*z=;#{NHbt% zX$|jr_q(yYyg~xeC_M)~4Zqj%Kxu}68*gLr_&h%P;lIT*e&9+x<;qJ*@fvxN2Kz~I zF{L@5@$igQ8%dkEcF$sJYKq&kj)#AU>L|@w@KK*_j7B*%X*sE@0-h;H-K44DGWSi|A{k?Tev#&FYPw}}WSo;o z2SYsR@{5Ud_FeD$J9FVwxGi`w(!EJZhR)2UIgAEUwje z*FA21%haNEC~sAlJa?9t&pp*&id#$}9lzOsX`W+Yc?l0Zd<3ulPyZPY-G7J#Y_4o5 zO6sDUnUe)%w}=Q5DZ{Qc`7zgi@$7 zghA>ApuH!ez4zb`D^f!;J)5(WUXhzk)=t`1k?0Kb%WH_64X9CSrTzN-b5R3iI{o{q zSu3N_2+ON$SY7L3VSX8Z^;hpCLDg!tEKNr(=mNw?C!%VWQDfoQVf^hUK87owbSZXj z+XbOcVsUMW{_apeIyb*-5@_{M5KPRQ62W3DNw^kuG~A7G!hM^qyu7lEhYvr@^$uY* zela(K!)GcTq1i~#Za2~EXZZhr;vev#zx^0)zV&vF0yGHx0Ua@#kV8to0*cL4W=}Xd zzle9f`#t!`|NI!Py6T7U<1hFzY@V4$J{({;8Zs@3t=u0=sw?ZInD1yd8(3OiLThRV z(ZEn;Ikkb#kfnWGbirAe+tk9zwI0GYA7CZf9}I|iu@OtCET?s6)@i<8=wY29iU_L@ zfyo*r;=us~`HDAQH=v0TR&z(dQ(R2wLb|GRX>%y%K}>C$!E;~mdeC*?&z^%93j#i^Zf+d&X zqTFjF?t6DQ^2R&#jO5e&plyd25$W45m$Z|FIi|8KAMewhFCcNZkg|Ml9Qi zWE(=ZAVeDyZA9j$u-0hAFC%2MNT78B=omsIw)dor=^nd-fi1R*8bxi&deFnvd8OsO9Ug16Q(k;g^tu(R!)ZQ4h92!`?lLC zOdH8@PqYX-+Utp<2%Bc-aO~JTe*5=cj+ediHTd>zccT$cAr>vfaR(x50W!A5kAkgu zdRaL_Kg@T<5gLs)t?8BS0Dt$1f5acZ;mufD?xE3|vfhEp0_DL3vf6rRuGYeOyMo## z-*wkr+|rqqa;~Y;Tz46YxgJB+VA0B!&_h zM)?RMHG*t3@ua8xAiBMQ?H-tq5K(_#UhKB~5z06gS&rUP55NE0e}H$r|HD`@ils^v zs$D)IrfbMmO^&6y&|HF?l%wKGhm5NpY=Y2(FE+?B>*q6gex@m9v@UL z*cP&`@nAHh0wf_h-y@W#VWz8;W|Q??kukIR2F;|jt1T!Rp`@9@NXsk=M_6}PW8!N~szrVFILZB=r2@LWAyO4lp{h1pQ$)yxy&uJvc<$#)mR>m| zGD+xGqzsdFFwInD9HN$xqA^yRA;SX0pdQ2-R^h8!n^GXXD(kk?_E?iZHHvIF%rG2f z_|kP>LDKA4he)xPZM;dvM5kO$FCt0b>+VzYUN-k@-d|hj;+oHYf%;%fcOYXKEwbnp3S>vuP7y%hAP6HcixZR{&%m#=f3<+%x>912rJ_eI4{Kfw913S#afZaW%hAI zyjclOo?M_jhftbimy8AV7P;~X7h%(sgj7QhF0hZ>uJwMmOH?Yx>qyN@s#5MQ%W?uE zG?R$7NF>7+yNn8qBt{+tkTF;%_b<&|2_=8dtdvHQKn{k(i7~FpBe$DjokXDP%I5@v z(O_x6i@Wc*9iRHdC-B*S`XnBF;2@TI%Q*L<1Nf<*`+3M#he9E)io)EaYfR&47Lq8Y zwvDg-Pp`#q{@zP*%fWl7FR`m9b|`~LCWMpLIz$|*4Z{jIHcqWb{A8ReoUzei1aY2G z%(nKc#uDQb?v*!!M~WfGa5N&%?S>m}#KVVziQ9^U{s2j{iCb>D6%QRcM2Tn}CQ!%#U^JmnB<#sy z*SW7;cOVH%gA-vmRQSv_*P=Tb(I2;M+lEHmAWJzT=2&e7Wv)D)FC@Lhgd7~4e2|e3 zU7c!8EG*1perbse23?UsI)E1&Lv;JrZJ3(di9dSdTkz`F{uiuvQ?%MsHlHU%`Pq_( zEfE>znn#FKgjmO<$fT0|Go}}X%e4*ZlA==rYs(9G>s$W{fBxV8217KdJ0d}KwI$i4 zw%5LGbxvVu^BPeZMNA4LA8AWHqEpHXMOtCoT!LTz*{d-fM`I{M^Q=b05l)_*C$E#v zZxq)$g^}fFQY7{fHn?+1^lCtb|KVdaD$hwMulI#2sMH*UKPfTum@`RL^eBV%Rc7N%xRr*MIAQfidq z?>_eTc*9@38Oz-s`e|m75Wy`<#7Ja=tOR2zIdZhB<%S?95Lz*)vGgcWSb3F?vCvL; z>GU%HE|KbqF`A4j>0bF&xWbYjrlNSWs=XJtP<4slzdWc}40$Zd_j@pF!3di>?zn@V z=jPBLYh8K8V1=Y4FHp%CU;6q@SXu2tvlnIgpw>s%j9+i@ zdr`G*JE=%uOW6=KY68Yir zr&1*4bsz{Hdh54urR1oCBo|fq)~m=xj^Re+JOs*b zB@HaBtfJQ+k+5qd4b0BWQ1~Uytk5z-(N&?7h08!1-yfalzB-(Tm1W9XyJJ8I-qU7= zA3Su#cHLCMTiN0Lo1-G7FxBZm$|ipQ-@O{|e9wm=qBew4dXycuNq$H2X!e5f9y20_ zw0RISPg!oO%8e{@&}8RFE5>k|wNJ!~I-@30pZVOic<%@QCt5R>2dmX?lW}3ZL!vIB z-*ei-#`?E4UuNF|i;Jr^my{0d8+it#eVn&%z$S}}(;qkE3Ypf(AthBCL<&*EZn2)-=#i{3J{P+##L-@f=I;~Jl+Gebt zC_W8}$+~}Pri-(!bBTVRp_C_1oS;~0J-pj&@)W=FOaBJ%eA}Dwr?343{D+tR|L{Er z9#>$U=@nTNoe(&7;slP&&ofi42nw4vmU|QYzuKiX(X*?%3ViiiQ{qu>aP~GjWF%{Q z-b{1=10^DvAIXNoL=nH{`w+s-*?0&Hfl*b2HUvG#`T6%b-4cP-@x?jCfZkx zW|NAm>8YtQLd*WN^gl~0tF#7WWMpSXLBy6c-p~l&cj<-1Ek?#6%6(?c7!8J4TU(<@ zLS1;XG}nIaU1-zBk;7I6A|Op1GrXBTC3e$L7MlDXm(7QEz561!sQoASfj9wSuBVd#Jq3AjLI!1Y z?huOtaWF;q(Kwc9CJiVZLFW=tL$LK)fCr3s^PC_35&YIm{w)?yJdDNTD|r0l&crZ% zCT_d+c1uUC-6qpqjHL-eYI{kGA`w`PAGqst0!UatGDnOre))Pl@Zdvu?sJ}l?Q>fw za~nzPxw%47ix#m7eOtV!Eo#hcxJdCP2j&+Si{!Q7!m=c?IDQV5#taD!H>yysO+jqE zw}`zNEvO4$cdhG=wB9fExVCU!g*>3RDfpD6xj)UlD53pooeJ(nQYDJ0=2 zbIX-&LHhyygy1?9&E$hQ{cJXy(1R3f%d0^|NLSXS##5hu6)wK$0FFF#2-#p2d-rU| zWlwkl9zJ~3<`xty1S6DVfz2}=OeakwQ9^gpX?N&6L_s0~#FP*o{%7`$xh|tzMJs=SS2`ZKcI-G-7uIm(@KGFm-~l}R z@WZ4K8;us_aU%~^7K}f%K=L%x_{+Dw3&US{5w3dj-&cdj5&}oq9 zN(uuT9P-m5_kybgKG0PoxGcTQT8l+Rq80F2&EmIg5!l>>;N-pVpOIdn*w4msMESba z)ivC8>m7&*g_Bz4!)FrIGrdDEG+M+5)7Z2H&;IEb;r(y<3*;+750Q$5^u5TnRV1JP z^yl&BxvhBhYhH=XjTU$Aa#y1;-JZho+7d=XO}zP443}?UgTmT%XbA*iiMnEcrB(Sn zrI3pbzhvyQD9x-pO#8!a@-sL^ptJ?&jokwm{{MDZ1%g7!@JWRzo~j7GTW~dNt2qrp zY^}K$_#`leZKU(Gr(K0!Zv~4>C(#@BY=W-Qv}>T;zRf=6QEELwp=-iKH44N*frV{U zbp2Ovz|}AMdE^mr-HkWl?Qi;Xs0;+#KpV!P6;<1`~YzS)3rBO$1GMud?z@k^l^1>Al2-B?)eV$1XtqBsF` z0%owF7`f6}trCevOeBXZ-2r~%cm5qt%rD^MANl}VNds||2ekW+m~qllvL1+0W_cpW zSfXUEay6VmCA`qsx_JxMmRHd4caby&7M7MsaK=QeS9oZRB%y1+=-l(Md3wgu5XBKX zoi=l&5&buxUy+;$m;4U)#5lWr7%L=Vl$pj z7fE9VH+JV@Jt9Rq<{d;lt_RWZAvmlps!^9v{o8PVe7Yg5I(N8=O{OGb!*(Lp?u3Y|3#rNSt8>G;r#K%AW zah$Q|4E*YgUyPl`ItrWn5ZmPx;p5XRrDDeNP#$)veJDna+e>xOOg-@*m9as%v@Vld z6hlme=1m%t>Oc^FgDMoKvEI%+a~DKh1CU_Xe=y2$e0~+neT~6LV)5uPeCfIy%M7Ie z*WxH_*)oTn+qRA=tVu&TMXn=U^QG(Yi~sgl(eDkRVu=rZgpGgw;&l! z?S!KFcbr?)A4S7ugk#4S@K2xp6ux}pH*v?^2l1`jZik8^B#Jxa8BZsQ@WdxR3GHdy z1jrKmSz#DUN#0JHbArB_CzFBBj$QXleAHVZkFU1$G{SsuH;N`D)75?iR{}c^lB};`ABQ79C zWg}y(uvcr_a(d^E9k}z(gJV7BNZ?70n{U4Z?de$vq0sO4aPs&(?!NCJzy*AonFGxK zJIyA3t;rQ`m-unVu&jivm z&w-N3eY9H*JaFg;{`ie=!ouy^u3o|dOkk#zIWjI8^4AB{`Pm{mT%uq;XR|gsw~dt^P@qI_rCwb z_`_HHKCah{@Kj0`0q(;=R4!CAQO#@nG} zgN^9%$ca%%t7{fy(`q(6LkDW)H$454>kGqlyn$DPAh=FBBW>4@<P6o4roX`K>>PgeXJ3S-nF*D#n+dWy zH*MO4`Gt9kS(EjY<-UR&sOc50!v!8pY!H8}Q^2R6R|-FCf2tsXp5-Z8Nvn87D3bb^ zfaqy78nig8Wj=*lTV7O>SOOHTc;cnl^r2bwx+_$59fFKojV9jxuK$5&JnLB)47#}I zzPoYj&9@@!Yj5qcR=43KrSYW89*;fSx4B$BPc8F*)!e^=qU7wTukf;$|3{pdUqN%a zW35vK{NBr7fddy`ipL)~&lYrR6tw7oPv(F1#;@URZ+{y;@ySnMwcn>r*PPx25zi3v zNTfDNn5i6_cW%Q+-}5%?+SDmsi@s-FQDUUVtdC8b$L3NI#9VnU&+dmnfRfA*HQ z;*b9HjkxH*h4}Ap{cpJ7!t<#}&#Y0}Z|RU~AEc0Iwr26V*S`tZe(8E*qE?Uu5c$_v zzYx#*f$t}tItw~bocI(r9OFU|0l-7ErxM8+2M!#-wO_gc!{G>xMzIK}s1^}!`r0>f zfE3c%`miP9IfX&%6&8;xZj8I_? zH|zab{;_q}LViu!gC`?VF$ zW-6cl15d;y2M*xnFMl<@cJs|p1OtcPgHF^*(Zn~uc?a%4aunz6*+Dc;OUpeRK65S>l~ao$6d`h&!X(h(lN{|wAD1y+nVt74zY1PbJwF+j64MYr3v z^Ap)HotntFUNEE6Pp;)LQzD%wUTj;BxHK|J+KyGHK`o(em8N-E8uesn=F1WhBNh=U zlOOz%XJYg29d_|K3i3!|`_414`^?y7p4SLpxt$~ zp%I7FB1kmQz&qdm*SO=M2hlf2$T;|%&8f`J&QP++O(-SdT*5EUgO?>V^c~r>@*GK& zo2lt7;Lh{dyzXafh&OPr;?AW>0({+RsZx-1D2M^)hfBimu@O>Z0 z4PUt#{eI3|K!GI$Yecj=E&Spy{2ZD|ohKww2PDwQDg5&8_kRHQ+;fnuWW_K_G+Ud{?regPZHTBz zR<^N731JgOXwOU`N)p5h{uSw>1(VsT3B4GgA?>EL;ao+DQxEktpO6t6oeu82_aL5k z_0{;-zwitA%eTHAw|)ED=&r63L|SDz#jIcUA6|)fzUO@eb2eu;-`k8D_?vgW3p$R7 zvnh&_fM04|d!9>T1TMMc5<*KQjbv=<$*Lb^ICStq-2Ba3amStaLYwIT* z6m~coVmKOLFd9%loWZD%{;*FdwNY;w(=CnHzUB|`eOFvetFH`zu|}tvmMo9($$$D3 zrZ>;fy4k<~Y@&84(5+ygkKO0y8T3Xrzt9#rn)#5LdzshnO%0s0ZwFw6w?<7NGk?3* zq>Kmsfo}s5{;j%Z{BI84u4OIzuJ72%k06$@)Bc4AqMf9uaeJtAO62ILBaeLW$xx0d znN1Z5fo2?&5S-mQi=X+$U&iW*le7>{o}5R&-^af1xft!uo3SKgWHGzU#u^Y3Gdl%_ zqJgBi8ZU`wRe3Zvx#$s;}0IeZ@lEU@b8hd+go<@uzr-Z-?*|Ctq=QCexH zsCGd{_WOtFD2RmzoLyr}X*`DI5@o8A+lsVAODLRo=1%dh=T#BF$citCvl8anM14~x%&A(il;y23PcEJr>7?*BtQ~Ciu59gig;BN5%0BKLH(+TT?89~ST2ga zTtKfz!BejC32-#@@P zAAApBB6Hh=N^Ag=Xr^Bw=&Q^8;3C8nt5@%h6JB&Y{(Rl_@Pu!(?Ht1|QYI{{O-w=5 zV`$@0DZ}v@EoGs;3m479+u!s?y1(ud%)5+@Pw)UP8NHPHjx@y{$1F(Hfz7+)8r3dq zWT%WE$TEN9SL%%>M#e^MBR=07^ruXztWxp??T@d0>j!vx^AMCQTM|RdwMA=H3KtgAnqzX}U8ui))CPznkAejPYiaEHvMj4hyZz;e>-gg$x zI{RZ7n~dz#f#OJATd7#j$8~?Z4JWHUUTBHc;u-+p+>?~iCuFu4|Dw*jXIUF9Hcr&>b2j1Vy{$m zupheh-?mTUiwv5kXWm@h>6UZ-Ft%Tqqj3vIw6w{}M&g1tnzZb?9=hi(Lht;=SY;c$ z%`iX`_=tr>LL&2;DI)v;e$|68n@VYv%)<7{gzR8M`jMGT8^&axnKL2@#5?iqmYw-g#m24KjO!4YhycAde z;aWVk=}9Orw9Y%RWfY#+mD#2DxZeRmj!p+`pi6;|_r3iLR6SNW%O>C3@0~q}I$H`{ z{io~k^>2S4Q7p*V3)hdb&!s67k4qAF+;uPhe%tL-I}Sr0_zJu(-U`IA#9G=iV9F6e z<&qc7{WRnV-Sqf5x&6il)^8sybe)R zr+e%NWy-MZs$vQe3;F!Q7r%}@maf2SUj0f0y2*HaFE7_oLa)^fFajAdPvF(Bei`n# z>voI}H<{BcZQ6#sA~HNC>C_NjVnJRx`Dr3Ft%>)%cK+e29yXM4OBw+#z#wJG*LhxGU#Y3fJPF5S;KP&n?X&5rp9_-rGkxHx8c4u z>yQXPFFgwi5xTEmCcbtJPs|~IkxuZxe{bfqet2HLDeaj~7*tQuH!^2RCB&Xte zd_4yG`%vxbqEfy@fO*~B__q^|!*8y-f#zI{nHphhTYlMxd)ZEJou%6 zskh@O8;UDe%DX-jlc@-fEX+dO@W^BM#wFh+2UTw-+~kXyCLm7O0QU~`V`6#=wPu5& zdh>{qWX6LArqU$)KE>}8TUzg8=_^SXx)@s>fz-S{itHmM2S8J(QA7`OlN*gj%fn=8 z^?HLGXQSE7oRgq=ji}`ow;#_m<d}*1L2n-ypWs=Nyg4Xb)HQX z1K4lheem9QpM{?8Zp!YpvS+E?eUur^iBL=xsePa3AA(8=Z+gS)@Y+*eNswe>_^?i7 zqS3(4k>M=t)E!GHyWa8qX0q(l@evH?eS->WX>s+Nc z^KEZI4*@SMpI=rCw|7NrOpNTneRto5b!*mQ``{=h8o;y$>I!JYf+%8kY}VVefSY<8yqB<^}xUYl@}@SWIi0L8G_8v zPbC^UC=rOm*9pl4h_r@fT@tGn^%En6RAQHxnV5ZzO|$5=PS;let#GUv;2kHI(f;1x z9*E2uuriAC?<3C6;3_nC3xSv-+^jZiKcqZJxXVg%nEWa z1dR?P8Ilz%_Q1!^ejm**$#w9)!Z)5=+otS6fagZ3UJjb8~4bp_xAe>d^_|Q~yr1X=b=dVRHd5yzny^9GO7a z)kCi}doPq8U3cnNSj@k&q=pXH69y8mKK12z^IQKDsfrMWen#TwIOV1FCFZ?ou0RMf z%%{KpwfNL0K8!iNT^Wipwb$sO#~-KOh%Oe!1>1#)%FXGFGOx(yL)zAXa7|vT!Qz5a z+kme(ST!|Gaw-tFkBnm|mqIF_-n;f;C>dn@WcCKp^`AF?0ggZF2&l+Xt$2#(0x5zu z`@N3Ap>4SH_S-PDeUSEinn=VtAlBwsd5BXF^+tk;+BhaBM=?4!h@r8q7#rP;1-&6& z@Vq0ay1|a!qtcioA@zla@v#X!y=fB#GhrB_ySs}@k_J5o)*BGDmgOBlIl#&1M=*Tp(zl(gWO{Ao#fn z^r?xSy>ZG5LOBeu>;UC#*(S4ZHh1!AL8PgD@mS0_iN+`=Kgp#mB9*fYk%?9o!K0}h zQLR=fA}?0!h_tOR5Sf*3q6DA)%tiR>*T0JS1N}(jm@wv^P+3hspqtr^p^;%CH!Fwb z>{|%5rr||V2Bq?JF8kiI^gm`i3eZ`1V|VAE4VI>}I`-^MvFnDbE_`O@guoQj{Ii7a z$$!4#G;H0r1HZcRYBY2mg1CTe1n&E^sZB%`s|2g|TZMmn$%#1a$W zy`AFO(5D+`-E3m(j$!=d@+uHTse?4Sc0PZ z0k0#StFdY1y}E3b3V}WL*n{fZIRr<{$_iloG7s=Q`$GxGV9d?a6p5t?>~N0?gIuZB z_`rugf*=3jzfld#@X{vzEC~5~nQYa)Cbb!z5ROhq;7xBl4fE&p;>xS8#=3P6GS8A% z;Ip%Xr6#-CZ`Iy-?Z3YYr@#I*RD`yDPmcf`iO_iL>8*Hr`&R13&>pqiNv%s**?`WL zbRD~Gdxpf$6EKLjR-S80@QzajrLGJpS}1>c5E$hYH*K1YY{0 z7ojH%5NL%|usxwQLp)v)wrt&uJ8r+t+9b)$#5C8!pJFmM;`~Zd)SER_Br;-HU&Ki2 zK2A9PDBO0}T8xY*R36K!Q5qx}n@EW#pWaC04M`)$z`z_@_h_*dj%*Q_Y--e+ME3|^ zbHL&y5j<}c%jb8YoFzL}skYv%)lB{r zJ2@PNC7x96P=q6*GKU;WldO^Dn$70-W^wqmSQ!54(?U)dW=8~N0y^R5l~M_@r4sR* zkSGzWZ%9~YZTlWh03~f*Nvtn&~6T(8@Uv8k|yj85y1aRLIaPDb;Qn{T0)k zg2>3#ig!WUzDP5PcxXzu-X`swpD2C@J40l-ZumV?bXQ6^|Kn%l{qKJl);{KBtz+=w90QTNzZya&t5jf(AL(tdTg_8Xo*B{DCSeB=Vw6#e>Iik|x!;e0R zFMRo%`0FjVBQ}wkUuL;vXXt4FOcZ7SJT?LdxQ?GRNb!(y>OO63$=yziDB=E&adN`5 zbQ2yB%LK6yMCozHYhOd>oy{DHVt}A?Vo)J5okWxwo0T0p`fS}=&eBB}Rald*1WAt1 zicASFAt4}P2c&Z&X{#Uh9B#V#Z#eSU<8j5$FUP_AucWsvA+nhZC}UO>sdI!b93)d| zs@tD&#=ql?GhT=FYahb8b?flxqmNCow58Ebql5&;H_>{++9GCchbFVs ztj{t&K297{4nmAVfEai@Ah&aP0>i^&)EiT&&Qx1sr4Th6c+F`iQ_#~;5rnU)VNL^~ zNrWe!dJ=#A>tAX9rW97#uAyj9^1WME8k3C}eyK!!-98eCxPd-Tp%hfGdPyH{pBT#5 z5TW8&pTorOk39Z3t!FRrXy%INX8eL*%x%Y1BO(BX7Z`h(5)gtQ($P3pEa*i=C#bb= zezd-ivOujQDfDJLwM7eHAR!BSLdEkcGyt{j#rckhN3}GIXRoU{iOzCRvPpNRlr%ST zz7$z%Mb5*M2m{X=)P(B)UXN$8aB9;?ozlVy%!N-a4~t@GCn<0x8p>BFS1UyJSDTt5 zBhqk!gR?zYwG6l$TCXid54SX)n3N!mguV_((MX>es0w5=D%U@Anb z19Xv_TPMaCn-BISFt4|Qla4t8CmwSI{XR+Tgs=obTU!9R_QU)(-XlS&OF~{TNh3n( zJoeN^TyW7BaqI2((A2Q>D@-9TT1z=pUiAcR!Z?kI=`>9}Oik7(gVbp=BYl_JXAUR9 z^yD-Lq!aj_MxvXTni@qPpeAoN1sRJw4!qV?K4Lr>AQaf3m~rNdgnP9h8xKa|E7u%;9!! zN7MDOstMWJA#qCZcZdKywRtn{x$gm%hV?u~Tp_2O@iH18roYi?3h8qp+N1lS+a{gc z)~(6LqL_KJtPE^Q^Cp_uERtq~#2Bmsf0(f)Ci0X)2^+UOO6&m2t^s!>QH)YoHx4=E zxh&UUo}o0saVw<~Hf`F78?L_*W0RBU8<=Z@DA%l)b{(j!XJj(SC^b$hWUeZeqLef- zw-KR{jAKc+WNHvsvZV}-l{|wWpa4}+IhGj^k|d#im~EgD3c5pyF~RWl_9Od=VtRZ+soL_?Y+PgRbzXM2a$BM@ri(l}GQnZig`2gOJEMdp zX8&FG6!v6MvzZ5GG{X?n&s?29?CVFf(LmMr$ys1V>Vo+T0yl@#njYf zW(b(l!+y72mW~{gYP<9J-;!`G7-_i0sWXV4JBnGO>=13ZER!h|kkdk>X-3pc3yv5! zC1oO%`H*6tr2Sv^nJiRVW0MMG6e%d6lZr=bV{V#Eyi^1jC8=$H+Jq=kv+EVHg5 zu4As(|MRZ9@t>!^6<_%LXRyzr`H-T?b1I&&S&Pb=va!i{A^Q7J};cM?IM(h9EPZ-UV7?rwPH3PtyiKe359+#GGbjbu!AwRp*5 zRI6nQUg-R^LOqTtz%bsL$Lv(((tP#Y~Qhy8FOvKuS{l8stbbwboUw^fGbU&0dxGpE4^(UP zcZKM!dJr{T7@?T`U=BCWDzmm%m5}v2p538a9mouV!HyHwuw(2UB5{qpIja=i^n_@g z1-5M5i0zxVU~+1LvkoZ{^!4M=7rq#M`vmZjQjL+4U2-5P5`w$rC}Wq&L~#U}hFG|G z5&8!Pux{#pH0uqb7k1%W3+<9ckC;&YrfYA&e*3J(d*1mDguZ9XDuU)ioNwsu>7&MQ zy-{cAp6BOLVJGnv(LO+ApW5a;T6-cY>kG`WxlzbsAoxIG_4J(75{h2!L7Whwrc#osPL4N;y&!eowI1r^IzW0^Sp}(t)Q%`;omM*yz?|k=r zF+4VoM%>7t&9r${f|4g_(Cdqz{UlZ{--pmdfP)|x^Jfmh-4-zStzN4^7DXF9&Xmbm z`vN`l2C(deWAKXSKMy4d-1fj@_{b+O#69=kkE$qJ9d3KAQ*KO#euya*p;Y#8jtLI|z{^1kqOk~pv?CMM9V z*T^O`8%?q)iSz9SA-eZY{p=&hb7-RJbJhkx&BIVpR zxRYkhL@J>p5)nZvk^-}XM3GdGB6N?LrU`7KQuK=i;q(-$)d9>ch4i!7vqYY5q|VNw zj89G=N)&oaC5(=YVsdgET~#kDffA_6^X&f9Xfz{~N&(TCXd4We_17hVB?DDFJQ-2u zo}b<9*#&Fii<}kP+5-9>(&KBUbb%kp1%t{;G;rStwjE(Inid@3?lf{|ltO0di4i5V zHJ-!6!&v*Te?cVdJqZyYJ;a_*v?BYM3`8@$j`DA-UY5o)8**(2IlrfQJeAX+N zSwG}!miI-9rK)xZTWpjmm6xnq9EfUlG94T6n%Q(+_AuN1nR)QZ=M%vl0)%Xkj!R^* z2?F96C4$0wTms;W)bgpS048wPtC7?!gqiQ7M|2Yr>Z!nYe{dsgGV2}4=%pw0(|IW7a*xM7%8Sx_(I{R!wQDZd`ZLDRfVG2=`Uio$)Lsn}_@#dh>#DRIdDd*P42z8rtM z?M{65vtPu9$Dbm^pUV)bzONI=K;ZpveJjp-^XV8c5x+|C+4IlAvLy@ft#5u8JBKFe znzTf`_WGC{B``+D5AoodNAQ++ycZw;=($+6&vK-`f*+I+O*d!qW7%@0F~ko&@-X`Q z`p{ji5V|fTB1}nZs#dF0Z*gL>hV9!2Y2J%ko$iekPrAdFz1;mLV_95jq$QzgK#pV? zNK3r)(3`e2_HhzZL>@I_;+}fysV6ZpF;3Sv^gQSwpp41fgD7fZ^XAP6$|3ga>c!5X zQ42B8D*#-!@#s67wH!qa3=R$A(Wjok==3P2rfL|UoaRP(q99Es6snAxiI=%X&cP!x zf+tof1iU20;xI%{!=&L6ddzFdSu%Tuo7he*7--(2MPyS<5FCa9!N)@)_Vi}h-_%w! z%=Zl>=X)~cb0~`hiw3$OHxAFZp1XJG4u85mEZcrJ`{Czx>t}v49L2UBaN}ls5g(jy zc+Zz8l|zK(3PwiwWPFj@#ww^R5;g1Cxnn2Q27RTiEGn`nGB%Nhga27$X>Nkqw$)OI zfdz}H#<^nOGQ3I!kE~e_)of&F4b5E@DrzV@?P|Voy*7>ayyxAx;FF)iOaJX8lzd5E z-Lv2UcaA2z*|~Ekv0DZ~$XSE?mcohd=lE?hq^kACv>5p8SN06sGY?&G9EIOILj7Dl z&|Yo9#;Z7myo>ggeFpbv(t_%k`<*VT_Pg}P+SRFAllu#j;d`vKCUU@+eXAr%GN)>c zFkMS8s|yj0$ORXC4wwGow`5$+I#E_i)RnEMf9_c@fRr>>;#Yip{`~WB)@iRnNin-> z$@l2HpYqbJudm~4=_Ge zLrP-f8Zv3%OP~ET-uCaW=D;P5AccbGr}*HRr{j&UJ{cE&>a+OGRezyLWRaDK6wAwk zo7%)C?*TX6avT13&w3nn{PS?`2hPO865m^7tIuVd))6U9v>e-a)HA(&I#PzuJLtEv z)b%Fol9r}K7bFgJp)}VX!jR$C3V{h2R00nnLR%KoZF`O>eI|FbbgOe|K~QR>%pFBc!Ov1{Ra?%OG_RX=a6Z28<$VJos3z(J ziX4JM+iXNyjSpjJD5}#O;b>cLEmnwFw_Fg;UzB^G(rL(&z>;NqV`_XFPd@w*ZDcmH zDYOW3e6^iyHSgP~P2<8(e;Nbx=HZ0nj%ERStK+O~-pLDeE@eIC^&bu7^-^(Kh+LU?w=7W*DLgGd!;wGA&j&b!LZ-7>1NMC2=hW1=j+qt0F zd{+s)|J`Td%-8-0Dk*bRnT*7&3(xj>jy`Z-{OX6_$CWq!8Al&;G?sPGu^d(^yDu)M zF^|OEV>=6B)>M=x7@wT9GdKwYC<`C1ck!#xFU7vTMt9T%jr0&II)&RD+GWvJ^)O9h%;}#)pCv5) zj7m6Ibvj4ce$w1$@q7glf_rne^rTH7qGR;;_!eR<3gr=?V_islOmJ!$LfCm&1Rc!+ zI)?(Q&{(h!LT9rgY+o!Y(e~1w)lqB#>`u;-4_vXfa_d86@8f$uPXY^>e@4d$hmUHd z8(~H6lhFMreGaNmAy*T#hBO#Dvjbe9Dt|%mD9-4!Ng?lW(#TR`B=e+H02vVLP zPHc%^Nq+FURs_R+-AOOU@@hXtz<%bIv<*`7&k6)hy%D4E*u)sN@7#_ZJ9iL+X%s~n z>@h7oiuatSCNyPPlhae!F|?C}s>Y|M(M)2_WK)IHUilJy^!&5&ikCefSO4nAILh>f7wg!rm*EXnf3_--XY7{9SnAfh*8e3R~Xu znU80o3OUPBT=%XN?HT80awg+I6w{iBS#%&BBG7y-9@u4hTy0^!aO;{#$Rs^Pz_UJt zkW5o7Ua&B`t`7Qa){1dFx?ab<|GbZ?2!0Ttx33q|6Js=?O&ab(W{Qg1k}2jxybAfS zw-vF{3FfOQtnd@`G^XMEDG+MJ&@`;9rQed4v!iCliiHd36Hvk28?(+(DE+ow6?ka_ zNMP2BHcpW4{VWUMtlQ==1%vk9HUswA*pScFw`MG90cBcYrBKUfJ z3}5=fXW;vv#WjDq-cHi8p{ItPCn3U+vW`8MEx|8-_&prF*K&d}`kHYuO5~HXuq1Gy zE$4aedefggD3@5IoiP;Dq-`=yjQP8EgRHhdk=@f-zIPtAw+0Pntt7Ps)3s?@rPXQ) z-90_j3%u~%XTp28ATm7Rd|E+_k^5Q7HYhcfrn&-wOTP9+Ji7h~+xVCr%Js3WC)Jj1Sry6}TDO&8T}G~RbY=D) zT4ZHc3T)VG(u{?#69lSBJ;o?$q8xBPGD$TJ)fEpr!9x()>3R*mSEk+>M5}x+Wg^IB zeWe}NGqz^cs+C}MM9wAvyB3rt_JTkE`7e0&$tMvj@8SjXXpX5qU1x-+Rkmb~Iuq@4 zwSfpXNsA-Ri#oc2J!Bov4Vzfhm_)B}XrYHhdPqe#whTRqNHJ_zn0-sLM-I}huzL0C zOsJ|>ZxU3VY=>;k8jQoOiU@PM0{G<=D&WA%oSS*Bv8%@9#c=~7?6Sd;o|%2v{ZSb0 z$jh2myekX?J1(Gzyipfs3bQ|L7Q$tGmFd0s&m`7A+hI1Ku#FzC%?G;Dl*uT}#|2>t zhaGhkHNISpoKmTB70Z?`%cBw@+M1doUqlcY4Sbv`?6G_~mhQU>JEtd+Hb=-Qc(Q^X zZ$7&EdNEd;wket@*l$}(u@E1szMBn_q`2@?pT=|c+ZQWWtUzGP8g5gW(=vvjt6IfW zy*A@o$U+mnm=)+Y_DiAiO{v&4f*5>tl4Qe1XO9WkSteGaAlGcil_$dJhPh$B}v zYd4_(x9gP?W<-BlB_GP)# zq}Bc+Ghsm&u-s9~PMgb&?2xm&$;LD>lQtR+Do@d{DkMj+9T14m_Gpn^HlH_L$9SVg z&FTexb2u0={>-IM9x{E8)T$eDZ_q||#FtgVo8I_(Jh1*V#yz#nH^9cj=f3kzpt5_? zBqG0ZP$sEao=k5Q_-el&|C>iXWzLM(zEB7>2Rt5KYhCdAX!{;H@*+(&5f>&lGf&;@ zQ->poV*+_dKf=QK^XXm&lF?2?VeT?L1>AMl-I$o1CYgY)t}6Cgu^bPsU(d$X_P0hJrDk)Q0!T57LZ!R+vS3=%8PC(% zKIwR03eU@k6fHXhTEcVj40)=1n$3;Lkfk#}25m+s2m&hq#7RuXKP;C>pP*DJBdnCs z-P_yN#LpYEj=fbl4k6pPOwIfDbkiKkvi+U|-P;QlvS_}EX#08w=rx{~55^TA8Eda5 zH6fsxLe~;}{393O!3Q5DoVb!47c=89_Z+&b-RLS;GVvVuNL8jWpAq$?Hd9DdUV2K+ z`)J0zXRp`FK1bW1=jE|fxduIJktdGS(ULh9ZtYA+b*DokV^E&u?q>H>w+SE4h6XD( z`c;7dzmvfdLeT(ZY66L79iRK$Mc8NMUWF*0S9&OYeDO0E;U&i&P3nR$QKjbm%%pOi}w;+0B`9#17T=pW|PGohr{? z?(RFS1534TZqeBI6b46zF;Q}uqFmHYz;&ht& zMQ#J@0wv_iz4oTlrC~MWID-J)l`1JGaReiPP~VpI-2HC#?2&Cm9vb)bDqcl*+YJ)nHM&ljFUsr$_9|uh(|g_@7zA@K`1Oj^k8~=$`1Aws}L5^sADWF z$C%q=Qz_~5XAw=0BoyjoGhw?p+je#=9hM!$!PbkFP`CF;d!1O*XKJn>yN){R1G)2+ zMI}2So_QdJ1#|>cNE9?K+KojfNr^mzaJ6l-IjGhxB*IdeA%VgYwOid@ZIunR6t>A@ zm!?$}Gl5*?!ZI%V zup3R~*@SDRn~2P@0;6qfKXXj^OwSOzA4zn0XI-r4cX$gf=_O>mJ!tfGcVn-;_L}kO zVd&%Jm%fyG8XlE@y`1!|aC(HIVzxJ^7C0Dhnz6|-0zgbnPh|w2RJL*bBdrS>2(5p2 zC?8IICG{HE;TJXP3r5+lS)DbC*$J5Ej*d-X+s+|u+qn~y%_&UBlW4>>CJ3}-dQ1?? z*(Fbr#}tKb+4>YYG~bs6c(cxsOK}9K7^GOI6;JDTmntY(0jeS-S_s*8kDuv&w-1-) z*M=kYe4P@Poz|J`E+NZAf>1}OAf=4b*@WU6$(@Hav~i3^6w^>61yJ^yLh$r9PbRV> zF}A=lUK>ZS*NPQDoKlHQX7FcwPqn~re}5Ga21G;Rc^V4_`ivc>nI3^0!k_AdKtjf` zhfwH71B=s1?5oGIvbh~|FpX+gnYh-ZXAFB8k>7*g-|;Xu*ED2c`%rM?q=GVogUdG<3PF+^mi)|W?kCvPRI~}NO;&eUL*J~3T3|H%H)&K zSmqVC5^@=D_f0m_qnjnig({p`ndO&ogBdb>!F{K?xXYJ9F6<3PFoqGwJr@X zrds$Oar7+OXE_G;UW&LHqTvf_Vw#O93Qi2jBcME8E2^NS2oPa%cn2=H@UwVw(>8)l za>b9M*~F5&mC9wJ!ce-kbX8O|R94Bbcug|Z!)H()%$n=j^L4JD?4CbMW81-iXUS<> z&&4(HU0JL{c`UzX|My3c-PuHoChcaq-2~gVZq3kn%2Kq9jgH_;U;8q~OeE^GO4`Jz z@>pkPA+8xUF*GuSv9U24zEs(@PxIP-CLb!98P_uWD#6-=aRQ7_)KQ;qqS;80L<+ta z!VgO{$f*Qc9`4!|?*o-!VwOW~)TS{pI)*LVc3|_i9oW8e7{g-|n4GF(vNnyW+7vb8 zr|LD56i9?bEsF4mtF9qw=%m1`oUdttbq}p0lui+jP%>G5zy^D^@U)$=b$1S3V3D3J zaD>ehq&B-Jr7=P&6;T@bHV~Zc5H_QTXi7ZJ)IVo_@BCoM^)!;CiPd{Ar=COFVZHi7 z;J$nA#ZNE)1saJ)ceRfK!GZqXjKr0KM2f^y$Rt6RN-;Mc$KJ^h4p4(w)!2l2jUj|x zRj5F4Hh>V?4{-m6L0onB-I$cjxWO5aWwBv{pKas=SuH>v5h2a<$S+-(h%o1dl%U7&nh~N@REDsiEiLCqMW;=`H;Hl5e6r^ciXiZ&tbmBJ)!z zCZ;AaGB!fjCor4U@|X!1=?b5;zNrT$JVNG}h<9*s5O?2m4{p5aW^CB70pk->q(ebe z=|ZAXu3*ukh1mZ&`{RHE_Q&Ce9ZC$o(niftc@EA&*MX6tshfH=`7OE#idR zizpH{P8AK1SdnpUQU@G#KV zgQ@8WX3Paz?tj)-bN_Wnu(|eKtPPaF?>-*Uw0KE<(VKo zV-j6hjg*kDk&dAob=w2c1bMY5&@#Kkk`j`UaFb&==E#F_!(VT~;NVb}QDoOkX|i!& z|LP@}9G$@FuQ?T^t}ZNGFb_TDGKL3tBC6M*(+H)cf&L`MG7s1@h|#Yn(Bmhllmx-1 z)3A)h^AUJe2vNf0Lk*nw^Xu?*>LUq5cm%Od+`t2|1MI)=zIe^auY_l{&o@4`32`$b zK`OpAsikX@2vrkvykRU{Dj`etoa%vq5DBSj?yM<1x^)bRuC`o9`#0o&a~Wu{9I0D( zj%Z~Wmj&L$Oy*4FC1z=}$Rv%PeG}a^PaJY3v4n~33&IWm1z|`IXl!JJH|F>_8TSPX z7hvUqhcR5Tg=o-n2uVlQCt9$>(zkpraY~+ASc7Yv;KJogF}!^%bl};At=h7T*^ksl zYr;;!HyhKq@WS)4V)+l~=8Fz2Ce>0IjY=7f={nU_cdLk0K(*vQ(;CBu|I{7P;{Sg+l;tp^J{VCTcqAV6(1Q;#Erkg%Y-&vK-8i8-{M100zZP@SxF1h3qT=SA#vZ&5AVO;U6-@*5LtX_A4iMH_Gx88LJ ze)+rK;mxmqJ)EGXF{+hnl^V3MN_R<~_rG=ItQ3NxXiq1^38f{MMhGNe_g1H~v27XZ zlcp*r$`YN#tYx0{$YvI5m1B5L8tCim!AU2*1i$#@Z)hqU#n}{_$@)s-x4*m+U8NFE zdeJe2maFIpUCj~_KSD4zibc^dIgmLeiApLE(uBW+4m}du@?-!nEEx-c?UNoZ`sFoP zH#7}ho{K>HL>ih1lg9>7o{yT2Ny2#PKp%pXkz#{`J1F4sq-XQtl!8~|TtgXSUS$gX zbNgsu(GHj}^fu)(>bBttY#*&bdi@r~mR%9-dS}l>i{^JgXLcJMo9M{P#7^e~StL=s zllHHj<{E5dY-zb3Tsq&pQuC9(g!9O_O1``l@+dZ&x=hb|Oggy^P*L>%#SN-oa6m6iXe2@0-mV z>Q?X$G5-n4XAk8*C>2%-Yip7Q8eb@b1zxG_5a>^_5 zvX{ODt5&b(VK*8=j9Z$Gx%4qAW-FC0u^#Z}7`sU4<|-&Lglra5aNN zihdC%`23f?f|FkKVl0_ImziWU0E@}4q8!!ihDbAWp|bDV4&zmB;BZDZtqL=WPEAtT z`3r^vNu3g#HrLh#Jty>ht}f_K#*J(I!YV?hPi{^{)$%+ zAcQHFQfzqqF&-Xu9W%=yU6KkxGK{^Kd+qvY8%&~j^~0iN76idtIb_1Xr#=1G0G zS8Vnn+GH;PD|x|szZyD|TWIChMqy5b;owTRHDYOvbcTZa} z(~KdHrH`KZixKwCr;FeWhwYmD%FLb6&mY_QyV1iC4OYXYKp(F`J6nlRrOlC7NImsnU!P%&E6Y#don#3u>piFG5*Isu6-u&jb z;hH~Pk6JB3;CI23RX~JPLSTx0KYJ)8nc5ycWdi~x8!FMCwR(bI|N8fM?|VOp@BjA? zsU%zN?qSqehvufkmYVZ4SyMti1!MeDDzP?d2|oAPFQel3L!wHkVd;(Jbt> zW!rZA<>tS`FL^wtWzNnIi2eg;9avPl5MH`-$)cUsTbrZI3gxT})umh9QA=H1XrlX| z{iHqW`;r5ZvLhF%!@GGQ3xJvZ5H%CJCQjwkVf?ZcCvshX3aV0#X1$KSvcLVAF^i-MDGAZPakxt3LTW;m%HIt2B5+@ znDPg3|F#Cc@Qa&p-p~GwTX)8oDi2WajMlZx_fp~k6ZI*abmEI};6D2x&@4;w^ixk^ z>y|CFzuk<6q!MKSnT#WhH)6%|3PiwYE0bC(!4IGW>CeT(j}4(Iy4pDDX0Efm39d;b zP6&m3^@v=JR<&R-TN~-cWg%L7sWVGh&yel9=a)<6OhA6dbg2|*YBr3-yR&Hvd^Z5( z%3eW0JrV73qnfA;ii?BDb0cm-n)FU{Z$!$V#o?KhH;dzv%LQ}dpFCy9; z?(Q8Tr@c+N0N?xmk8tzf|3TtQeprTXYtHZP z_Y#&fO<0YIW+TF{e*IhAb@yF(>s!yji7$R3(zvK&FCdl744wl?aCV`I4RzDgTX6rs z)>za~XtUXTHmCh?asz>oxa}Xe;?1xB4<-}U%orO4fpz5kIoanqGEUJ>-l;qG?q>Wf z^EZVaN&!6>(2ySwpP8^wn0+J^xnns?q>{u8t}vU>{GHeU8V!`YLMmIQ78NHt2-ulx zDNT?CK_&8o4}1;|J@g>lf8W1&-p6J=?fXRviNV1U{Osp{z%~6n*ninF9I&VtM=hvg zAv|;=A!jJZV@z(Hz~<=$YqpQ##`PPpajcHWuR;YLmC`A@krqsDA=R;`u;;23_`sQO zL&=Jo)uR+Q-Tc=~5**nOWkSfcO1cxrAF=>_b3zLzx6INeC^Og2FJW+7fcw^OgH~07 zgy(66F7!KgQ4tWj)h_DYDf1Vdk%IO#R08AoxM?^?*|u#vKJdY_an1)n zi02)3G<@mPLYF2rG*?mx%%8u2V55yjea5wOXp8LUc1m3oH?DJBLbvD`ld&Fh(aR^ghh@3@ar}Pc(4; z`JcdFZoG*!)Txmk`X~fb&+f1#K&OrUjsaZiMZ`ofME-Sd1ZxrM%uhSD^2T%Vq10?V#4Bxzv?n^_i1rcPG_ zbdAilE_YS&j<=tIA8CpE|8+lCE2M1s+${1IJ{6wEM{D@!$iukz{uGyqm_Rkva)rna zjUzLAojMiLV_p5y9FE{g-a=)tKxMKlFF>SYoN&VPaPfbA8V4;~3K=OxLgCNXU5BTh zc#;AJ7Vu)HZk$Uw8NvZeBOHF{LZDpBcc-wWE-er>dhydMAHd^-)^-`^A`8;Q9S&h< zo;v+nS@p%;-v_Q7EHjyiXHGjh93GXA#LRgFiwQ4FpCFlj7~r784neuQD}&}rPm*z5 zymSwgDiv}`O0u$++b@pkBM`F1H&O&5L>tba@Q{LV-tcCs6gup4SAQS&JMu7~9FSp^ zx{$hARF;EhruJjZ0^GiN3qJV%v+?KauBT~uf_`d`{!Wv#D#e^Rb1YF}D=b&H^);MU zh*mnw7`a)l@;Q*KZk|JIi+v%8-D#qCzPIhQ`De<8b|0GEiuNd?x7?;58z09XuDlA; zm(a8D?`Y#hq{10zoQ}RqsdWt)W0xut{hFSx5yaIPF{Zc3pXn0{%g05Z|2(d}?s^DP z4P@xy|-xsxQA%jGgPV$Htssl3J4!Z^K=(NWBq zJ0HEhbFgjOHf-Or5nuk&c{u2~dl$ZR+Tncde$oOH;}iJT{SWW}qUARVeV%gO6%4z* zg5coC=#+Wv@@OlU*l>><=qaRV;U#SS7PAB*#u-dbP8Ot$Xa5f7IZ49s-)5su-~H6r zoQAN{O-#TY`?>VsK|rNTh*3sUlu*|dOzR4U8a{S5LX7DW>V5@Hzluaws9a_4hZYK{ z$0(~*YG|2K*{bPD%wM_$7hm{E9B7<}=7GaC>(}ES|F|{#^j7dtiY~Pihaa>6=mLq1 zQOm{?^f0eio{!-%g+Kl6e$>o4Tc`d1a%eo55-cdG+w^VMDOy&v@C=~DJIqM1A8M-@p~W`7LT`N^+lsc-OAHlwi(4KN&9<#gSrP%A|z(6uV>Db%TXE`+IT9 zUEAR`dJkLIO-#BT?|L@FL9O%tgUCqcQr#Je(aEXggW7qsq-6rTQ=L3(wx$eYwEZ21 zuzGm((G8S+=$REp`+BB&m^Uzh|9IugN%_z*mT23&8=Dv-)Rim8rxqS*qI+$#>Y?FL zy#4KG;i{{yC7KCm+z*;e2d_~l`upl2vH70+7rH=tT)EGtE(SgP$4D-?UyharFinG4LI}drz0%$ zXGkSma7gWaEAyRy`@7#G)*iDND9f^HdEfFzXnx0T**$IDhBrRq~O1MBvYMIKeJG%&K`nqI@8C4Y%^*(X~|PymWIJluTe25cOT5lb#{n(rhE6X$t_$<&}<&OgTh?%=$NlQi*LOdNvj9?(ViiMfp`oX#sc$OFblE zD?EOkw9~QL=S-*EI{@^|#S2b-JszBY2kyG%29(nnr6kFkV8uq0aDLvhSTv^U`0lqa z!K05qjt`!5Hqk$m3(}Gzt}rZN-aIa6O-)S~0ADKG7%nPQZFOe%Guoi3%E}3(GcB#7 zF&R&sFwf5}q6W|Na-N)m_!N~K#mo|xD74VBql1NSLz&yZx>se$ac+&O6j2g0S5pwe zlO_T!8}o}8MnQ76-`Yd5v5`5fx?2=9IG zhw#utk3z~4qxiH9l{A+cY|ViI7->Sac3t){XJ8I`y1S_y7Ns%9C&w{0K0&yB#3?a9 zJ7NvedXS9wcWZ`9DiDaZkH6k>8!lP+LwxW(??hL%j8YI{+N(p=YLp#N=|+|aDjJ6# zYp7@+k3F)H>iEYWcLe@&?cgULQIwKam_S)+(bioB!(p$DoQHio&K@=%A zbktW87Lv~AnA&n&YS)@5XhsnGv}H`ldM8fe*U1kEX`N1SN?%!k0|;Pk3~nhX>NB?4 z7VFUo3ZDfoR!&+e>T3wuN98ZiYb~E@w5ByCP@(F87f==Am8bk0Rv)l0e*e3x@X(s| z#L;COzqt-R!Hto#?YdI0F05}FR%7gEzT6d+L22kuOPU6cG%#n;LY#Hx+whh*z7~u6 zyAXn%+TU-#9hY5pIjyI1xkMR^XUST99ibSsYB*n(oo-bWXHX0sC^Gh~QD#1U7@*^_bVjVQ(4_x_71X{emk>OzFo>hJ;| zFMsJt{5b+GgmKd*=#i4Ovx}ev!LqYqr3*j5;!@%@GX^@g#xxHL-1#|U&(dQjzUT!w z_uR8B99JL=`FSIgHJWOFXx)1J;_@qS$3O0)jML}h>x`i__dUE=Q=oO_AFsh-ha8BL zPkb>s$4XeDnGv%`f-qvqV3VP3l;&V;?5lDS~4BjKDX~1$HTQ~vbDJ?D#hhvn_Br|}Qcip1@3%n2O$ZfR5%pZZpa@Hq7JU{9 z510P%C)E6W#mO(@$R=fYu1B?BLDB5ap&i7Xl}c|$KAAD}BBv2?ykHKCZ-Ty9u^zIc zLTSd)EQ)aX!Z4dQGYG;uFgr^k?LsczyWOR=X~hNc3Qbc@GOJ^g)2KHh(%>c(jw}C| z#TnJ{$6x*)o40L;LYV2U@bg6B{S}Tl{4lKEYdMt5MkS#cnJcMTCvl!E`=IaO({(AA z%2@l*Bl!N0e`b3gf{=6?trEPHtP5c7qJ?ye*neNlsg_%wW*z6_xvjapR4x4-i}?7im-9CFYB z5DiH~gC?*rMuk-ld2WRQ0~${cF+4Jk=j^{P_FAzNo1WO*k`cRmMx0G{?RYZ4SHAi+ z+Xe`oV);HRaLTJr!Mon_W-OW8Pcptr3)G_sm;T~^aK~+T5W|28c1`eL7POxN zzDQ_*v0Dw{*u%Qevp|s^J7hTrO;|oLhycy09$fmnThSD~(8{CCh-iK!BUH6$uO_H$ zbisA!G=YtDO9sL=hxZ4eV~fn^x|4cK>*1S4+zA@@jEArs_A_P6=`a^f6y_|7H-pem zzRv`Mw%-7!!i55Hnmt+~0AlQ9_R@rAAw-74VlaZGs?fHkDZGp(+`UhCUk~V@lEjY0tK;C)XB%&>Wq_H^2EEGRm(x^%Nv&!`4#S z_?V5eaN#0qjz-B$$1GGUO$>^?2+P*X4RpotTWF^Le|^{(kuFM-#8~&hdc5t-_h7tM zN2Dw*Ne~c~jY;3@wFyL14G6y*(oI~s!bVn`Vfg18-tZ=Lg(Y72lH&^FKx*|GD>J4^ zo1sf7I~<#w!23UZE`~;j5qeb!y7WQ77Ax$OSCgfRx@*1 zmsNAaKh0=XkOegY&1QtL@iAiZHtVV7-Rvb{7F!PbjDxAwrm*R$4JZc^3l=RxwOYZ^ zhwqPLk9;m_%^25QcRhaf>)&By=MWinVrW*FK{>DBW=(j__4oJTIR_kqx4z|cocO{M zFn>-z_j#uUv@d}y5mlCSG%$jEDD)&sDZi35T3h_hkc*B2nd^`e?*Mq)RTz- zu;7%g_u!9zdKfp{w-rhZ7*lU$pn*iTs%G+sC}!7{JbxoSdoR;;X0BXl#P2p^LE+>4 zIF9WX)HH;Y*S?B)@6PX$Wh&ZnUh>Is6W!-wKG*oO6OUZBRRrlxsc&Jk4XW|;=PK85 zv))n{5Zs>Y{+n#W_hI&IPHyd@vou#5vicw9A$>{Yi^}(~YSlhiy<WKK2+yGoisk-2#_x%N$%E!z}h@grEHIC#X$KMx_lv`h4Y)wkL(VW?7?=9sUnE$1+O}l&RNVhaR zQ~c{+{~|}}mAa^AY%-}vGa^VNbe)^hLB$3LXek#cjE zW!MSKxS;Nu*rs=2nQ)JK?g9Ajx4(ij-*q;IMkXlpO?8xIQ`*UYntLZpxcIyeI$POhilQOr-gIU**b(ZOlHlP*HJbS zgfs`~%CP2~Q>ViN4-4ln#*v2}Mn?I8wQKCpvzB&246T*>%E8T*`*%ikZwq<08SdGs zW?|=_0?I^nK{%v!y&DhhOV5^EoK3Ne);3_O)$5p=oI-a`H+pL_yOJn|SeJo*@(-m(#!wr(M$pvfAR zELn_2bLU{$vL)Da*)r_A?|wM=xd&pwf_dmJm&xJ!B=-OePmJKsfBqA<-gY~-Z`;lp zo@6^h{8&1n@@YkQ=uJnl-|`ek9KQ&nJEo?&b~aV88Y*kq`ndIMKzOPGVa?`_<1FD>C}2oTh{V=$4HjEF(y2;ou5B)seHN zsx`&gglZlYW*?{-Ht5Fnvm2My*sLgf%7_Q8c;Lw?J;+&njD6#D|Q9HQ)% zaN;q?69~13c&d)ee|jmlZ{3Eo&pC(7 zP}=hUD21>TVvi+zVEf>9QeDflu9mgW8$jo!DqSdBF-n4&P?eo4nK=h)+q*)btGmZW z!Lr3T3XNGJSv}*s?&>H;EKwX&X;4a!W@NOIJp4r@dy<-tS>LAvP@{NC2k*BpRxX~; zT?_gJXERaS#OT&`eG z7hiBb7WMUThNJRX86{f{b8)j#&Djp>$t0p7v%?PBAK&`Q7x95}&&SS@VJ;hK26rgw zh&9bM@G-c31Q&nx8@S|~U&iA89(cM+TwoJ{<;2RUcQ~t#l)&idI9Bep97`51##2vi z=5j?rFiaKz1}yt(_GgfN-{RqEj!Mm1I{&;2NFUs{nmK8~gX#=DsB*{QO05=OuWisk z&3MfB#fcE9W{xTLFdH{|;{2=D{^96xE zCMG6Gm}~yLd8h^+li&(wNg!vd7!=TmBW!$nD{i^v@3`xpyD5+2O<-q5mQUM!~=lc7_u`4Rc85=E`e~_=gmw`dDrwS`+VaaYYm^M?1&r*ll z<)PhL?|g_nDl(6#-oWV45Y|0#KOSE50G@vG2^#RB)L*qWt=tL6aWj*xEUr28^kx)g znT5H4$~6b=26HD?q<}Ct-n?H}E)!u}tyaVSt5@?ZitnM`s6!EjxIiN1Zqs}4d$Xp) z^?$h@ci(jv^Lv?yl!U1)uaW>3E?U&`MU|~VD_x*zu-YM(X2`}lM(jz^mKmyv!qJdXXW0RVGvfO&lskuscFhwmDBVv0fE^Z#IGgs^DC~v zs4a7}(})m)%@a82@I&#4j|CK&A6DpI!7ddnb>Hp@t39C^fnIOp8= zkpR?eH2bjYvJTj8S-B^gi33lz`^US$SPft@Onw55) zs#NZ_chm*`o50n6x_cJq$KCjt(#b=cVKnTb{us? zAA-KLAQ#B`BUC++F*%cmADP6re|$3@ALR0$bWrs|FU(O$IR|1KN2L_-^F;ypEp`Ez zDJBTJbDu64hFFHx9BP1l;XxJA@x0-MRpF_D=duH4R#?bCnQOyfwGzR$UKE2v<`DLu*SJF z4h+wT&gw*_Xg;LcWx08T?VKylgz|;%qD2KSYfZTO$_;U2;8yIloFZlAKq;JhVQ|M_ zHa}y|Aq@O%0(i1EjX(bB8p?po`w#?Fr`a?y2@oc;eAG=9t59*PZ{g(Ni0`%*yso)u-{VHEVIo zNhd<;7`@#+7_N*#kEsl{%^A5Sr)-Q~tv-nrdo98(fA=6{KmY>#|A_k&C|j=bJ{14$ zy=ypAkGK2ISGq`75}LpSuw)?|8DRn#aGb>Czc5amg8^M zxG&|!`w-YC0!5;fQ`vf*Z7lLS^3B7C#F%F(KePVW?{FZ`bXZK=0ON=7+6>NVq;Atd z;q;l4$edHq8Ej!mPovdphBvJsb6&;Ic%4{Ey;S{TttYnQ8I(vFdfhCHlwcmr7Ou$^os7))-+4RzE;tTn7lxxD zDYymaM)h8z$0?|ca{Q?$aQf_7%*^bm^)0FrtcJynH{Xm`z3gt}IhP}3t;c0VRR$`o z#@fa@9)IF-3iX>RMcSSL6j((1cp=; zBN2orCHSaOvV>(4e^lKLxG1&JKpdl-+LseFHDj5Ft?u2<@51X)T86TXQ*&6PVk$i; zB;-{6ZFert_>xqEHQsp)k>$g(`A@uJ(jZAo;XQt(~24&C*k+d*Y z;9h^>!bQ@8ae`t*vcruCMcmf@D4YD2llRC#&p) zf(0*~92sCqZsGdIDSY2eF_y1tLhVmr(t`RMWbS3*QB>~1jsd%j55NCmeCBH_$mJrG zhHJNg;T?QfpV0}snc3MnmYyD6;jr?=kQE#G;BUwz9f$VZr%`)Xr)z(^X zmC?<-_CucMqan!n6|MrH;>g$o2QLEYRlACYdW2R-MOw(5<_pk-N*ai z`(7;VS-_nyeF=&GE2^EcVdG)LWvs2NhJK%qn)&QY-Soi9YbzLKL8LEwEMu75C6?2b>)dg!Bo(TtzxJ(fJ%L7Jju~^yW{)guxh~82g@69jSZu{W?iE5qGg1h0 zD1$u1>9c2uS|rPP|0b=54@ztaLw6_H9}LhN4oUMOjRj_h1~0hzCK_-ZWq$Z<$9l4n zf8yzd5Wn&(Zz03fY^3P4TWGagn4O&^ zq)nfXb?QP`k3NIOOiRG~dx21z|*XG7%V(=En!mJy*3!ULbCEJ=%UMsMd)wJhr6ppl)7PCYy1 zu4NJ{d}=Jp32`bF?C4W4mANf)JofN|=&fGzCv819jY~gUWh7vOXo`QJ-R6*m8Eg0m z8w`S^-#bSj4O6k(5J?H?o6A+?Rh6u{tPwkdVVPDTg2IeZIJ)-$&dhW;Gi}`wDFP< zVK#}exOWdy_kj7-u3nH+^{B~jK`pt5wza)QURKt$XDDh?5|N1Uo$ovWC0Z=XBPJPa zEskAt74E+C4${6?wG4^&g%G4-`1F}mq=-ikM!YjQ=S+|R8iAB!kmcA|-NxLZCJyc0 zhZn!_d+_=DzEUlv&dTJsod>l-+?auEmS7s%O+wJE)OUrovt+!%n z-!g8w>BZz|NWe|!tS=wA?+ZdPQqUy?2J4roky;p3^guPI#0(9nO?}rBjf$Y%7_%ct zu{CZsMofxv__-;nv0oomyZ=w25lK5y0;!^35_?k0SK?TSP(>;0nn z0Du5VL_t(qGw)R#Cqyw+=!{A*lo7<^@&MFFv!z)XyO-sC@c{SQtJ)5A)20;3~FQtXxSCqu&0mjyDq~1eFDjzlT0UfforiMfRZZ(W#rTi5E?&Um{> z5uN}~YrT8K?%pEep~k#F#K%7JM-);0#2eqpRL?dqGa8k|*t5KZ)2GgqgM2}OFnm$^ zO4HJ|{9VvanM?-4aoJ>|MprR*+Rs)Vf68Y}<+k$9AZWi+)a#MB_5He|G~yi}>s}A} zL2%A}O6GU&+yUOO!gqeWjw3By`fyC=s$| zpEZK%HnMDp_Dlyi-+U83f8UpPeK1JRMHEu?(TP+8yn;} zT8)h5pc2BJZ$p95 zE^^9Mb{EiHJF}SjQ4YK*7aTH(M5)Br^7!v{V9THi2h+&7>!eOX32QgGG{&PDiJgf=FhS+%b82xr1=u)|B+rl2G1NV=Y8pgV7MPNj2MkvBa)PtZvplxVKJQ$z;_$O(zz5eyD zCup+vS|u)1NgFN9EzDzeb=`Yq$&GX$VYrMVHyq3LMi~~|HAb>50DC}$zavb!N#vAC zV|%yzIK_cnv3SUo9tgaXr8S5m55Q2R9tRozLQ}7!1kBVZ`mv*Y15U4V&FDDK)GP$5 zVM=2VdwB5L`Wij-hB^G{2$Rq*I^4f+FA~8Yrv1Kffn8o`T)1?R%Y3yaY&dp4R8><} zg8MK4msH(5Uw7TLv=_;MPpLao;Guzw7cQ3XpQJHMKl?^@hl4vzzG|_<_DHvT)#7>t{rdw6-fd}?{>CteBluPNNdXZds|5V;vXg1G5=7DD{S9j@OLl3unuC5Hqft)u_|9plNKxc3p}ZDO z!TZT&9iu+x(gdx@VqgoB3eF0HLh^mLzAq<@L6nW(3|njV)2xn1M1q(xOs%Z1%6m@^ zL-NdQC`k()D$2CnA_40WB|@!R3MEKmQ#1vq0KHajUecG$TlSnVNdp` z?AUz^ft+((-&F>JK)ZgqEA*Zu2-i^QL}+5OjM^QvX6HyuYVv*S{8Kz#E_c3fsO74v z5W|%0fkI(Sd4oyzN(%VhBcK_Aq4Ht{;i>u{VXM=o*Bm@}kQ}IsiYFU%sGSczev*+A zc39v~KKe%#J-_h{Zz!*0M8Yz_{GLT@b$iq-mL8f+6fdOHAj*fh{A z*k0~r+!kM7U-xOS@N_LTop;nwfc~q{IWh0*DNu9jyLfH>5iu%AnmgKY{Ze42)hYJ~ zfgUA@bnoPuQ#7As#|+9YNVCfvF5uBchC_!B(C`)^wB$~zZ7I0K*4^%M0IE2Xan9b> zBx8x~mxPuI8g!(dU&4Wd3;J9T;;uY)0hf!`U4${zZbQ+qoDh9#!WZ?Ld7$k^cnrpo zR;aE2j(ak;43}RMQU6BNlFxVXU^Otl@N-{0JyGI={ zz;o0PdzHn?U>oZuMo%^|kSTHrjHE@bED=;{5o7Wyo}CgW2MkIGL{_5>gBjb$oLIx5 z*>xPdO5xC<2AT^^peaansUkHr3@cO|a9dJkO#r=wKl!5*c*|Qqj=tH0!bY$n@&V0v zad>VwE34p!8Mt@9XD0B-m8*hrS%z|2)$b)=mZp9(VV|W}U$PQqj$clePb{Gg{4|_0 zjXid?!NV{N7ez7l9k~kUPd*8Sh*(hP_a8*mX;Xu!FovUXzuBx$^OY9{;d*{hD(4f3 z1j}B!Ocga50%E5P=PUx+s-~Fc3yEc;s8)(|>yS3v^hBFtim?LFY_;eiY|nIXT zYjGFa4{B*iUqt0sW7w^@k!)!gNNC26HGz1r(`9#dCljuG*61uzTOXg%m~kwAcm+{? zdn~>!rhY^>$k7b%DeUU?C>aK5(SuwA%EIL`>_=yD$}7gi~8Y0D+|8p7TQwEH$_Zkw9C zr`g&6sDo_kh%Er+42iKBE9DWBBI~Bd($X}#C_4xB6?NB~qY*L3-NK%OSXr@1k;9}K zqlAa;EJ14{B+8;8G)>p4J)YwQ$Vh>?z{Uw6k#}nc@qWYTlKw0kGXQP}IsRc@Q)9U5Y zr^c3425)%Y3$U=XpBzKlX`wkcL!@qQX$voXD|}W%1MDvPDo_zT`phCwFH2$3ANH6` ztags1WNk%--XYklEXyRq_eR2k2E4Qh^eAZqXtjYXEB&JM7gh*{#l~q0n`M}rpQG&P z@y8#>aG2K~G`?Y~bWXyfE=XM)dQ{+d-}!D_IDZMR`;pf$}Aa z9wp)o$+`1qX`oM*j=dtoaELzQ!%1JiYWvR+| zE%nkW)g4H!e=!O34KJr)&`+V~c0|E}5tU_0yrk{4B0!i?5W@{Tr`5&mau0_#h8T7G zlz|$P`+le)#lo3Xp%odlV}+IyXbX#ZYjOT5i_w`bI(7)ih_bbU=84#e^dX7qd6Wxi zA(12E8F`APBcjTo5~C07ieiKoA}llo=2MNuw7^nBya|;t;)NA8`?-cme|MF*e@QFSKvIE4bbO2$ai77J#w)PwgzyBy8 zl3WUO8C;siG(J(MESf&dD+o=?J=xm1!XwNkJXqz5_+OXRL{uZz&wdg;loXvab`hL2bZMi3gNF{`?3wf2>AnWTp~b zR^X36^kK}*wD8wn@p2TlC~J`JMlLMOlPJ*>Pn{$_CyJIz;T%)0`?Ei)vP}KqztZN} zGaY$RbEIPOQ~6k5Te~d$LilNEB7TcB3sGG`Y9M{K<&14qLj#0tGb;)dN+2N;w$LfP3>)D%aL0oKUl2QONo}qG`B2JPLfWOxnRbGi zPJ(79rmSFOG?9F2>MH_;N!e+*{h|BxJ!#i?5vbWt8~c}LaP{6cW}{7XqAno%uquP- zC}Phg08o^A=V6(}J7oMafCvmsks!Bo=yn_UvwKhBUH|JVc^OeY!fZ}@l({mp2bD1tZiU@eFKH{GzxzDBR3i< zW@omeqHL-%-L!{Z*qsifI@RwwC8>#E6=l?m35>n_{^F&JoGusI_owWbCc1Ka#PBed zb0nlX>Fx>5{jiZN%XLOmPr?1%Wru5PYczBPTfr8spX&eiiN}z+b)#@y_@0HPdl|(I z-+L2PrnalW^xEN61`^m_zks6)9Vk^0dh5)YGaSVlt``YZy&>xb(VR^=cwj$k>U%Xm z&Oi&gUWq`SX-X;z>w^m>*%Y;2f-W5c0aS{Nc-LGuC{=?XyOiA+Kd7{Yp-VHULHM1*-I(vl6E+geDAO@2p;ndVefhQh$2x~Wt zaKlWWXpyyy5ql-dEim;yu zXpkT)=JD857XS74@5d*<^fb101Qkm{shUzl$4*U!t=IoyN__aftEZO-mz5Hg5VME! zY}Q0x8HCV%kX}sL(yX{C=y@cNSgi*pF)nv3$`U#M)IC2XE@GPTxLnjR8V>0Vq!3fe z20U=p?e@@^o1+DiP>8JZNDsl{^Pz6)B}i>m+V%C{ozuL5`=*W*Frl1KpUW7-oBKMa zXOb+Z1U&7AsY3ApG2q2jm5iu!UM{L@H5zmyJ2Nv>$L8s1Ms{s&ZBvb(1*FW_UJ$jC zqV{BQ*;W_-{oTKd#~wRQ6dNRxWHOTFberPXv12&#_!GFaw#JgU&OjK8em0_PXnG)T zFB7r*2Aj2*WaQoPE_S@P@@$XY&fwhHb5+@GQp3RCGsX0@z6_vAh{zdJmVK(dzRbxj z_U>E8=~E}Y^z8U`xP5x>eP6^^A9xTidhRhah!8KQsEz4JG~@dHJV425I7#V3_fKL- zHiX3H`Ia;_tEC;m9PFxToD~cfILGtBb!+~9(9-6$rB z9`iIWLW8MN`G|jKzuA-xcuOTxcV6TONVmnK=6qj?*msqn86hfNmjYzMi&E`!Oa_s5 zH#Y8x7y7DD6vxpHIBGB+X3AiIFr0D^215_?t0qzd-;eNn$3-IBD_z9x7TU87g6+!C z*pZy#m2sc7rp(Oc=#!>H2K-#Fj&D+e7#ErJPOrc^fzpUoP25^-%~g!`>) z`Am4okZ6!jM3Mu%6%&mNf| ztLjzXXm(FK8!a#tF;1U(62r9%xbyb!K_kg2g6#D*zVMe{qV?7uhHZ@xjHKsn5F>P& z5e_Zyqw_K(i5pQLZJ6cMo8Zq=BvjTYdT3)rec4viBF+V3q-fX%BBjxZ43y^nf+U@Z z2o0-=Z>|{eT#J#WD%%QKPHM-_SxFXA;=s(K;%dgi)Enl;j_sMTsqV485X#S@%mjrD z>W!*=PKB=mkT{exjBfkHlClq%9jH?1iKBCu;b`qE0)D?s|9-j9WJE=hU}Xe!-7Ohe ztoJi)cGqxip5ySfEkrZUzGN_wrJ=zja?W27VhUbUNK#suSJW${TjVPk%4-3MFjZer zB}KcFH_rLTkkKr3v={e&<0Ah1N4|tFf9nj^x(X_t4M_=)SNh=F*5Uv43=ixcJw;R?s zmUp{5lXqi${jTJ&S8)AP4{@2szOcA(?i>b1j#N~>RE}5tm}rF@h^9tj71fS!;Nbj= zBE!)mhnS-%Ry(f4dc??Azxs82?SThz+w*RL(ixRb78Vw8^2`Mc@@*pKlfsr@3uhz* z%iGbt8X1A}n?0O5^ECD}H12xA4VaCFL|v{_3+K;Wz|&8k@yxAUnu4gWCktBLdh@O5 zG}~l+tmigcU0)~1o@vW!mP(?T_i;4o;czRXOe1a@3LHe#AksY%Now57+TxjR7paM1 zMg>ji>oGN*aTFA@Q!^};7-@wZyXRBs-0jW`x6EjbFP8*kMbt%J-420LWXfzz!Trmv zOk<^IakSI+nWU`|)-zSP@-!f2CPXBTicC;{XfV*g1|-hp0^O0oiNOeawnsQP=;02% z4>#;8mJ7R05^wKPd0B>tF!puz5 zKdjQrqq2wBUYEvt2T9s9w*2!QrBXI-&M4nlHZSXhdI*Rq3-r1JeDFgb!Sipp2__p+ zf51hLon{l~F0K&6bEDbeCcdu^lg2&inP{B7w1N{)pT=_zE#kIoma#7#p%rtVE{$7w z;DJZ5zOfB7W~tZ~O$Z(ZZU686-tR5znRHD`VQpiZdR^8BK2cmC$u@ALv4v|IBUqKg zIH!}g=u4!tFf;(h&rgvVn(ps!7g*WkvTEVhkRZx($9+I((95XCN<4lqOLI~>k8wp! zGuHg~I{l_{FjSWI&DJp2Fl|>U6}jLY>!Td^Ja`d}=O4t<{4Dhygy&Xc$Z06URb^*e zSQNP-N!+!;2&Z(4i!#BbD8ZU?*`k2T3M8ldc&d9IcU`j|#};GkJJ`bfY=pUvLXt*E znz0v3VyR=!5j+f@mQcY&DGJKyoCBYse{IynnUy|1__6!&sV|(ssZD@#Mwba-#z*|v z3F^6m!`Zep_5-9AL<=u`DV_*kw5GBtQy&#XqG`QNk)99BloT-2kINUbryl`bXZ;>8 z-^tYR=QU24u@AICcX(%0t$tE*5>w_8#|r14K8=mFHS+LDq|lscVQ%jtuF<#f$#Z_> zpMR!njH0+quT&sz3x>pa$^uUxEkZq9SQMt1SdvQYfsAIM$(dQA;^#rDIEp>Dkbp`W zW6N1iNo!3tS`~=OJcqxF1X9_c+WzU2r-)+08ANyeahg)RQUoc{^4w(E^C7pXK~tkP z*Z9Eiy^l&NFZuqLB8n1%3~5tfVPOWD^kpK-qa$w0iwOjeu_d!Fmv1~VWEoXfblsS! zc~%Bw3eCwgI23L482y8t5^5$g#nyTc|L1>xJLVQ=kwyybn04E;Jf}=T8-t_!58}su zg{aRz5DZQ}f;b#7J@e3muBdh^>Aq|ErwR3Ul{a;z+j{>ESzyAxFVH%avS5T z-3-ScIfuQ8#QvEC`&z(rmfBcan8ETw2Q!@%jZTEtT!Wrpn&$!Y;`N6Gx?17X$~vCD zcpj%WMtJH{7pE_DacN_SL8g(ZCX9$n&Qy4!xJNRtQ;Vu@Q`^g=8?hiSdTA`>D^NJ`zp&#jkr zH3W*+kVS#vkP%N*FGfv~4fP%XYH&(XUVQmmA9V?zO{iEAQ|aXvqOQm60@FkQ{m}>~ zPd$y%um`2NF5eyXp@tD=XXk0NJEw42B7DQ8h^^Iiyyw6EE*^Q{A^hW?`bo~>tfpSc z{M;tfMz^_8Df92ycG#&*4pP`se6w zc6}{ZmcyjXSp|#+1>W(_-^Iz3Pval_gEvwmol1pL;K0&6W@lLCkl>^K+?29ZDq3y} z9O?iw?VKRoLbQoi60DMreB z$cOZUj9t^lT6aXxq0*jJLNL%ln20GsY6G0s<}|yYREoT?p1xjCFS69=p&2p(6Spz6 z0vARa4-Ru;%oQgGfL$eE0wa>(D;j>!x}a1WhHt3Y6I2Q+CG*sv3xEMnxQbmgZJh2iO=>q-!0Np{xq>^;GP}fupd;~-n#M&9cqGTQT>Nu+}H+K1a+95`|a z-CiGCy&jdIoWZJ#{P0F%CSkYtVkz*AFMSQa*M1NF;ZMDZ7>#9~qY+7%R6)wf_dry6 zuSzVP^qH1t!VSZgp$BCAfM=Z|rn=A5kRZF4vx>UJUoYlHI6G*!V zDKf;3CRVmbc>LTd3fX|k0c}UfM?D-$a_ki&GEqt-=yrj%KDANV$a_X$TVF@U3746V z@QiH2h?2%c+n{;k%fApj02(q65`hyXsKwOER?{jDH5cR`GC@PPCU#Z~JTqe@6a|!q zbWDf?sy-ANXn(+ik&$F#Nvlu-E8N)whN_LfG@BpfcMO<>ZU%r7F-MSl|;jYwUX^i z6b7v>*DI(WTY7Pm3cpyt+UAX?( zF)Ei5WH(9i?T5aD&HfNY?g9&!y62eDIrfVII;Ky8X;w78h{n&|xeuEngN1Ri@p}D2_6G z;hw+1`~Js!Fw<#cw$sF1yG4TiyC%S5;(9OhdrXurc00`Fyd8M0N%V>+_?4Hnm7Iyx zzNQiBO*=vN-)HZ+2W#6~Tvm__3m!l3Iuw!5zdsn@=9_NB8{Y7G%7oxqqo%cQ-FvRC ztm8M|@lO2qyWc|~y5{UGn(Zdq?Is${1W6;NflV2?uO{MT4r%KEGPQsYed1oc>TkRe z|NZ?RK_=7M9x5+`hGvaWu*AR+;UE2-pP=A`%8F5p%m_Ss{0YkDbLzoqsBDBix`$;9 z(82&pvTpYIwJrvutP~d8xoY(czCQLa6&bkP}9nP_IQo|&yHt$R_qiS{BVTEfBjxv_OtvtWqEgk0^e=OwF>b>t#sc*Kbi<6^vR%eDMShc3XD0({$nv4ylqoAHe*nIvF zZg6T%=nJBECKQ68XC(8J;>I%JqLNX0l1z6ILB%OD?XlPaeOTTCUT8v!d5CcM?10Xx zFSvVyX0z$l>KNq5a-dDrfk~Pv_8-_!k=^>*1~tCIFyplKmPWv28uxwf zFYu}7-+~{0-Csu<#TaG<#Eyuks#}L1NTbnUX4g8c*#v%n{4+2AVC?!cigpxH^fy{f zWQC+sos}`#oet(^=SVck<{6GZ@-XA>!?{PkL!(V;O(H3=a{dCo{=h?c`E4&E*;o~# zV^0S|)HS*&p9C#6p;WZa@b`Y=jrhhlAH>(c_8>JIlURBtSW!mAB(q8l&j^3~r=P+D z4}S+Qd+BZXp&$Hy>|0(Ydh|3(Df{Z>1+t;RrHkkBryqL{KJ|tBaQumr6o1*MLG=+K zCLr)sF5e|QBvAe1@^iuBDW<1F8J6e1zh3dpoYPUO}bZ}#q7va({YOZSqX zyr}kKIS*EM6g4=n_Ra5*fzZ5$9~2}h0Woe%xa4cbBy@Ki(|#qU{7P1(gYo$La&Pp^ zhZqw(+S&IZm~sudSpI8j+1Rw;+A{5lE*skXU$!#Q>=hQVYrkfsWxoAp#NA9C8ou%RDx8DACJo3N; z_^F@zS#;*-F*n!VaUKX8(^lm_f7hy7=4Ed5IGp>LcSN{%ZY#Fp6tDfkyYQNCe*?Y# zkZZ0|pxtPZ<5iKw>9ebN>0LjB?qERs!lFWI30XZr?l&%C|KNxJ0Izt-Z8VWARF#30 z1C%u*7r4lxD5v}TOaJ;`;V0kpbGUHkTq*sD(g8_kEwH>-3j;j%_$fSg{7Jm~ciw|m zvq2z^DB^5rMEcxZG9?wZI3nlBa{zX1fVJ#vd8ewS!drgv=kZT};tgoJpSw&05>K5u zhmU^Z&#~RN#FIpWXn7B_=;2sZ{r(Ui|Kul_IUxuzO$ArjvTj~|eoaumzRy^Z zf}?PXM-@45Yc$eU3W(t|?yLWI4qL19eK&;{;d@Kkmj*rcN#L9;g`y@8SuT(?hBCs1 zc8B1vL!+_nG715&qFJqR)zxG&R_Uo=BHu5n03}t<7kK)b&4w2q^Y)qo>FLjNKtSg1 z+?3+L{W2AY$&4i`br` zx=rpLSW8)nB@QSynto`>@Ps9Kbe|Q4(I;n!+*c!Q(tx7r4VYS9Ry9B8(KCrk(zZs3 z=o}AXK@+Z80-2P|@XHfIbg1%zs)*9P7a_{rO{vewczM$_jd9aWH{tZDr>GCGxMzu+ zWV6vEW8<77L5FPt5l2vIgt^5z#ElrIPMv`s4$EOo{yz zy1t6}g?Thv5l!=Q|6zn~cay2hWrVr88DzO<^%8!<*PWZkS9nTE+B={7>^)fPZ4-*E z*=S+ZAJEKACW=y&Dx9y&M4dBs4`xS2IC^LaKl`))1i$c$Z(;5;%gtiAj8y0{auxoe zCD~Hv6jX%F5M>&W`54VIUwUBD(51*;5JC#QKFNxc#=yoBBS`$!JMY9ldE*;+^524* z>5*#U^Y?uNy@3a%2)<`CW`JAgCH9&=7F!W?k;BLoPoC}K!L#cya)H?#{4>dVQ7rpR zlmALl`Rzwu_e#t*(y~Hr{0Nfu&Nm`TIE_F2=tpt#%xNUe7Oy8o9jaEctm~zZYp3&b zDxs0_wYJ-gE~mLNepUTCW^DTG1E zT2-MmGlgdF*6n*G`X z?@yRu(EWqXQHk-|$C_C!tsl+bW#Gn$Dr=>eX;#^Ck(Kc}=+-yFv>0#vRe@7ge>Hw| zAC!T3Pw!HGa~a;4sfQ(=s-?Zlm|vKMMnN~fBG>d$saMAfLq5*1yPLgl-##J%+}_^a zeb&P5TX((E;EP|pA3y$MZ$#Q^?s!d=&eM}i@6XOsRHa2fkY%UTNq5USDxClL@9Vu0 zcOUy}E4Xm+LiKkAYkN2Y5GOI_7Z2i=TW`cY_uTLG()nH+LUTw?kNxDi3lw@=Tllkk zK8x4A<~2~cMiMp9&-1e3S0V>JddU5x*X^?WYntL!ue=+X&hQ)m@i(!#x=BV))taM| zdgb*B^>|^=*8gW`78eErsXU;`@0hrW*S+R-_&5LJXV8hj4uq5~6P!GA9*>=P+7GSz zrmZwsl3iSvY-4WNhtUJVh@Tu;yzjn;uxcYNSM9Pzq?bpwN?f(Bd9o;!y#r%yv?1w@pvgQ?w9I|YDI{>;lR5KPywRz{#2aT&hKHn6fO?;% zta3;ZX4}Te9)(x;w4o|sHChtb)9j#~rf9SpSljO6>Gkcff>W(gB~JS zG*pC+vKST?`NX1dU}KTQF%IlmMAK*_Mq@bA*vt#`EC-rIV2*VyER*{^aRZA(+xIoG1aS-#?;CY6{%m)JRta|^nCh6 zUEl?os`>EpeRRRS5F1T6J{$aHy{BU;HbcRmXFY?dDG>`c;CHK1(le|L%cct0IAYAY z<{erurTXF$PX&i5QIDl=x49HWrL0{jokMfWZSY7)TsU{0HhNPe3 zdJr-iapb5BtJ+$imBuu0lTWT*OzGU8PwTbAIOL?05 zJKI}5`aX)7jLrr2Sf{vHSfbr(pSoGmFsDmL8 zHM$BrfldlvZcjU-NDXbHcOW|88`#z}d|Hp-AZA`$p==0WtrXS7g@9M7%c$YkAN2V> z{J9m{P_3FHeN|~71cT;N6cr^*0WUxhxr{Oy3tJeP>a{f#SUK~dRGC3kU+kc?Daet9 zDwH#(M1U5@9(m$PtEjc~CTGED{$r7(eV*+pac1 z3zecQrC)f7SO22iU881(T3rA9*NjFzy!6f& zW6z#B>VX8JLmtkoHog+1?*|#BSUbOlSHJ3q@u_=0hvTPC(tzsH!U7f-=ZG*cC>esM zdT2gqFdAZIbp`$27K9z)l|OhFe)U&>8MAY<)M%z*G^ld_0pm0sSAtBdAMRC!ur#D5 znzQ)-|MXAemw)z6m;n!(5_hW<*d7e=;XnN(9y#$OM|}ZjDGJQ#b==$;pp#uh(vA={ z=W!vQ$9wO21nZ_vrR939I|K|m=-9*(Nwa|?N3SOJMVD1?b-Sc_LPj{Zur4(!T+|pt zLHWJLftY))wEurIA@5Ks*12q-Iq1}!gaZG z0!c&;q|t03X{G4Q&R}+a4vl6Lv$M0(Xs+b0mNC3 z79^T+JO=y>Yp3&^1i)rHZDhj%u>;Sv+vv0#SZqjKw|4=@4(;RYwfsE(a_=|SY~k8{ zd(h6daHJ`4xT$cslVB-LkO(h;B{*x4!ppAvQ3U1nK*D-P!9j(xZ%wi|FKSHJVNmc2 z@(RvCe2L?VG7($b#QwwV6sUL{MPwN00xj3dj3+A#s)i)oQW(ajOHPrvE-9G9rN*-o z`ZTo3M^ z>h0xA{X-cN@P+w#(z3pA@gkLBNbAB6Du&A`-?PRNvZKr>nJzzVH!-`gfUV&WPj2_9 ztmC4Y^W80~qt1kWPd(e1g3QuW&>7P*5Uq<{A%K?A4ai0V%q+I?iXZ#|eCUH8flx^~ z{6*{I_J>rSUd5MGBZX6^&f+KE^pp7OfAfd&wqO6h&`5w$(Wil_!5|wu7otOlMYq?5 zK6?tw`5w&AY5f0w^p*IbSN{M$`1>EhfBT*HU}gPMDcV%Z;7-5gJFlm@t6UO<#KQ6c z{Ky-A6o3E6{x+`Pw-+r`$#-rgF0QQOL*zW3CP=XKY`xML#CaDts8t-ZtB|t5usuW0 z?~jIf`48R=BJt1~8|xQg`rC+Yj0`)=oW^H` z0yWgT*m%sFoaRBj2_5uytATw>^GLEjuAWV?R{>izN0(3l(#v}2fx$K+;Q&|pp7EHS zdWJyA)))z%T`kHWB4h^nDo=G#+ho@4G!*c)#sE7Dd}C}(t7ub2iWw_iv_}$>8Y4S| z7!?#D+D43}{x;^84^rK`C`PbCle(9Sh=$n+GLobrLN{0#YQ###bj4Ybao^ioN{b05 z>kKndG&gHi?P@@ZoRZ3sx+ugM@0|NFzU)fIa@6ahU2fjmaw;yZjWzTv@36v`5uDT9 z*yxrpM)zfFO+fB{@QsdQ#>BgbIuS9|YpvB-UR)r`{C>Yz4xz!;8r-3jD^LYgn?4G zUJ59523=%C8=)_+b|$6fJ6bXFMBefT>+@ptjdKmXHMnr$K11=`fJ0_lbb zCGqH!C-Hk9_SZv_=GhH;Zb*R0|7|!g+&-|y4hm=< zPi6e$P_iN`iKN&-r}rpc^4tdg$_+C>+(#wV7(6JWmQ>V0LBqDh!BPeZwRU}V|4>7a zc%$U9cWvu(4eZRyXDSlZFECzk%Q;(@nHJg5S#XN*jpi%Q1_fGaf==rI(m29)w@WZt z8^?%g$Wrphhe67)wS|`l@WpZI8__gx6H8!dcJz^nvR?qJ+db^P`UsXIiGxjrs}^Q4 z*J@ytHgNjVDi>63jRD!6r^(3X#oAzBr$de;Ne$-G7>hDOvbhPE5poluX9{v2X}jY~ zizqV<;to?giCjauEKmxO^s&5h7s3)3etpE12@%$pX2&G@w8kveNMUjQ=_j#z@ho~9 zTl}y$BG|bFylD9_8jTs&sSw7)(**j%0jE}ZK@UcVx~=b_1XwQ_^)hQQRGN}9_OLoH zGUa#w~AH{5U?!Df}$E$}bk&l9#BFk?xgOI$6{3E*xcNtb*`TyJjjD2jy2Ps6oU+B&YnXOH?U{VGL`KP9$3aN|C?XL z3vc^g{Q0Nv!B@ZbbzEFuWqoNC(`V2<5yX&vw z_7}YXvk59(bz1nKL;_e^7tv6gp4tKjhBuW@v zc-k7PYn`wwt$Bz3qz@M0pHEXD)j67LPvFHzBiwjRo3oB`Hp4?#r6%JI#8@39`1a{l z(o@;(bH;Z{m4VRp>7trz@js<5s|y}g!KJk@CgX>Jhr=O8&N0}XLGZ@L1}yQlWdYWE)#bzvDLBrf(Z-~7+;b3gkF7>qPkQmvQmgr}3?TCTy3&%X)pfA??W z(4J*-CT`ufy4%>?*rJFsfDx>ZjJ0zbh4D3gCJ4>4K8~Mwk|cr;A398|0JEJ4KmIpg zg&+Uhug2=u5Ko;xg;S?a;rQ_rbl;p2q2?<a%H}%m``Xv=@T1?s#`+dz;WO=dii{cVZit*Q(L-D>*74H0A?CLSKsQD<+{8`u z%lMx52q&@(8*$1ob04G`k9D?+vw}Rb$m|Gr+;JOz>+Sy;9m>Li)zww3t*_G1t|$$t z83#y&s>5!mN)HF8f{j~O7o2H>jvb-he+na!jek!;o`B}@j10NY_&6C2?7|K4B*J_}FF_-eWn>gs<>!0~^ z9K7j!AX{_v0OrtCdmQu!)bx@522{~L*FmX49Y+M|mzI%5N(ze9qBNxmXRGx%nW9&c zs?K|?Gs1_N(rEBb@6pE|!)&L8BUc^4{M$Xs^e9LK5Cr-2%pB)p+>3=eo0wPF;Jb(MqM`_d6%Uo`42ZJH@?rY%Yn{UE{-+Gv`A2lTnojd}X%w-k7{FYzB zM?di~D*H)4s27Z_P!T!j{-~g)b;G6LMFnBry=+=V`cr>f`}rgV3K3XpgS;5xxzD{8 zH{E zMH^xU0QGSM^412@W$&PZn+NTRWBQh(?i^*YCnqz z*RKh_N=C`QXl<>B6Hh;ZuYdCaJbK~;RFu-%S`m{IvA*v_`*+mGRicX*w+85B>xkV( z9vCc`HN5`j1Nf`A+>ST@-bZl%V1%wr%Av;qRS$i)k<@4;DcMt8Iv}x z4g+=u_5AtsG`ln$49S7?`vXRSPB}RRg9Ed(m>aGmYDONq%>2he zzJR}YD8<6!9KQI_acmZxu~|$q9+NyH4LLu0`XZIx+ewTw7gzE0g;i|j0!A_+r1YQS z?)$LFvA(&5W^0j#VcjZaMUIU0|8ozDuoQq;za^>w(Cm;Ovy7Zmtje1I{dr z9sA61icuUBdTDzwqJXE=)_{OYnC`l;hUnx`<`f}nW=A4wM2xJID949QY6p92gVl{q zG}>)KOi3ZJwz-ZIk3UY=OHM$FvHp=e*4Ngtxv_}@2M(Y>0aF+<-Y%lv+}z}*dO%Cn z8tvyVUclj_NAbMpJr9q5>k$^wQqdLNDEErdV2DqB=F@oT9WSBx%Zmy?$*ilAN@T;V zpshf9%45a3%j|)PjN^(BOQgvns28H50bc#;SK!NEeHaSqSSI5hra`}tfBcXC0iJvG zIViG>%WPWH09-c8d?auEHE=o5c)ffsUzr*=6aP-b;{3&n*z9g%VPO$Rj~vDP%nT9; zB*eEROUE9mph71-LeMb$+B2k*tCc)J`PAvNIPt`j6hvHFT|uuuK!a-O%uJk<#zz>Q zFOX=BtIZI%w}&{~KZ^+60B}H$zaWSQ#1?Ha#9?_7|KZ2(#IOA6SMk{k+t^f1uP(^u zm7K9um|dE~TmSv9;>9=L#5Nm-N_^)pT)<$+<(nGs@;GX5;+}<~v^5nwg&`SVj;v#w zVZp9qPxd&z@3t9;s7K7!Cg?7iajL;QZ{xG~oyG6`$+xhrnJ-M(E1+$y8U2Egf}U=v z7NCXsA`|9`EIS%^eFVqt_bHGl z8dz@3lK#3c=t9RST2TUF2bF^})^k}^)9>z7GA(fbx6eS!3q+9U-dkwyan*a7w)|IO zb13ooZ#@l5&+v%KRrEnr2{**{Y{PSFfKgsh7L_Im?OC$Lnf5v@s9dyVjT&OSFymxS zQyNl?oV$6kO|W$2Fu@QDzZ6C@%oUkN>T%!3D{)D$>Fv%cZ8z!j6SqK?``I*!8K+2_ z4d$g0)<5(iohFWLg18c`IMx5zduNy^%b8B zkn_gV)_D59z}3U$J`#TMHC;(4A;Us2g=UCiZFrH+CDSC_-)XiR$1*vCaiJ<)hA@JHf=uI%7vDxx=0rmx8P4tgb>}tQ-omS1_Hrsm zh}_V_r{SWgf(&QC%ELQ%YH({mL>e}{FKueWLv7ZVwt9uZsCNMym$tEIaSr?U?xV;x zCFCGeFp88Z#Z8@D)9`xt@)6cHHgWRgDSZ1o$8r9`B`Tq~49M-KByJGA*7!6*YQZeO zx9+eP{yDS-bO?iVf?c(T+#M}PiEAj4+-G|@)%44|H*bA#- z|L}9f|3j~R9bS3Y9kfPP6tFVEV7QHQ=g*gL`Kq-mt7iHR>kufuO~p_W7%GfJVhijM zTi9ow#4B!F!2Du{AV05M+OI;d#xQM$PVn{P8Q%Pxe}?me2s&+$B(r-K&lGO?0v_TX?F2EvS5Sg5mfiq#>dDNxL!kztbnOm#XB*vlKCv zKv_-G*4Bjsz8DtEO@%`?B4yP;s6P-xpvc$(2YPg`F3FjrW|nCyA)^Llqk&O2^5Sq@ z|CA)i=KHoDkP<~afan}VW`u{-NAxpqc&Vo;!k{lW;CAb=zR^RYp*c_`Ck&WEvNpX` z#M6dS?u)P01+=LcBhD+!S*5KP>2%b*lNQYB@3QWZD6nwo zAU1nF7&E~9@&ZMZ-EJ4HMiX#1aA^;cb^}GEh{N8PA%tN>l2&1+#CimTWXV$jB&`P3 zGHC)=G95`IB+Ydwn!SjENNK%))*rfQ!;YcRuL_I*@!P+HU-;MmipU(Ti~eh6JsWf-ge10ClsXYSqiQDlVRVU8 z(0NxpGeL7dtYDZXX{lMj8rOcdKv+V2kn^)=y8!j3YXtAa)v_{ zK;eyw@yH!h)%gQ0j{E$qD3Fh~sMm4s+(o)avokHUX&5a*8Yk8H8chLJHW*-SV;$#K zSFpX^qZ&Ik^1Y*?QXyhmPvtC_vLuI_x}OZTFhAVJ^I8H&qaF_16-0ImQLJd*g@{I_ zs!BjgLq=Oz5DonOm*0W=&urt<7Y1A!CZld33XA#H0@IKvDh!jeICJhSHoIF46Blyq z`LuSY+9Ya7RZ~-=6uzdfj6g?kVWBvV?|a@1<`*nv#LgwKEb!jQ$cY&((H+g=ZSVaA z&h{JRj5+hr&nO#FWwZP`XXvF@zZ3O-rILj71o1yfG+P{$I!8_rc%>-QtuD|L0ZNvk zj-^2Co?qiKE6)B5mA%baQ+cfFVJ-G_QY=D~Ga-j9EQVkkQ2v4MbE$x0lgv;cRg5%I z#I;5tu^v>Bzc606qtgB-iWq3Fc345dlQMNGf0Qb&@L8GmDzFQ!CQYUj(o-pV-iFZV z<{95NFdv-uc1(o~{Brk3OoE4iNc#bjlEBaG><{dC7tc#&tu1Te9Br{R9@+vg`|%$~ z)Myh&({5(ud!b+>x%s_w4wP4}L2 ze(UVL&%O6mRZGI__tq*wf?t^SuPv2_$h)9u+H8rh!{T`sQuo4moD{>&8|GO{YWiNR#p7Nw8VAShz zj{^(_h@*ru5|r|*3W-TrXQSW<(-fAQp)w&J43m=oHZru?@Aq-$%o!Zf!v7|NqfP{8 znBK#(zP&X2jbWj)G)aWyz)|MyfBa64%?PO*p=;K$TdiY9bAWAX1$$F(SO$Pyhi=Bq zzonUGT`r9Fjv5XLIAzojKfY%_etPf_OgzEStPiw`nBdHR^Wl%+lJj@rxzD@`6aJbN zaKOJ;_Ms~6nl7LJ*_^$rWo;1S6j_U=S;ocLVLa`LvzX`%V0B(1)Yxb+E3zf+mJI!3 zJO2A$eiPriV;LrC6JWzT)C7fGN3v;8gc?Sm;Rp^*2mZXeoho~UF`XBG&L-MzE@|Z> zG|~jB(?&ni7zC<%#fql<{s^gxFq1Vf)o4JaDMr?yH!u`{GD9xP@1eez)|hBCuxr~i zW=CwaEehIuscK8`6FL$+slP3YaOp&bxo(WkL`rjBk@eem8GJb<*<9L$v?l5^W0d)qXotEXP!AR-R1SpHE~I8{UDf>ZOMUT` zPsQF}_(e2Zny3q0ZL`FNGhY7*`N&~?WTgbdo7@}ID}L!Ryy)e>4rh3@)SoY=L*Va7 zWEP@{4v9Hg6`?`10}Z972>h=MhOKDbS z$d!?%xqq%Pa%{(kKYa(T`}#fyXi_hg0lyTXc`+UE2(K+~oXFLXbI}S;jQx;Mbmdg9B zY!53z`V1(IVcn1@k%;Zcv3`nonw)_}m`EC zFQnEuqNX3$E8Xl2R`yw>X^hAiu1$Kc)kvxQVT5$iNu$+RM-AG*f*8mf!+tk~L8F;r z_nuvZzFKNE3GUe&4ycDSyL}r`hqQ(hWDuH3N+M7liH!V44tgStXensnyc6-xqS_tBUckK>&XzD-U}lCkNn_k=voomKeBXLPZOaL4z4fUkY^Yk1Cc{taM9 zrLibAC1e`hZ;Rewgl3{CWp-PLb3N?F`B`?2kFNIJdtX_yake%jCRwhJSATTd9eCxd zeiI+~i)%2|$cWWAA&^8$feF#42qlP)BkHd$E-q45(Qb9gc!0`Y3_6j&Y8ptIW41SB%S5Bo&+*o`{Wsk8v!CFt zuYVQG9ZT({nj(L2ZYw7rN>F3PgsG{`(Ke^>D;FtDO{@vTJC^U(HE4_F-c8iSXTP`) z|KtCD7wbirl`NG=V-tCyE=xihc8uAYj1kd+I3`N1MZW&u8jXzgN&+pAP?sx$)wA{p zuOA1)-fsA_PbJ`fA2wpNoPW$z@=9WK+YPi6MYCQ0jBIPR$PNug#n}Gl3OYX%(uv6< zvN`@N`r}FHPj6#FKf&?TeE-ZMw(9}rF@Vi`FvWl}4$?GQHE2rDM~_&tK}TWQo|P|^ zh^bNk&hjvGJJwKfT=MCK(i}`+*wqwT1Zi)&_$Wdv>0ovF6b(C5kfg$}p&L8T);*ZY z;O5LEwn9gZG5T|^fQP1*+qiMP*UvJJ#3CnkLqRu4nLx)0I}O8Ds)FZ5)XWUELYf-& zo?9z1B96G4m?@LLorjk~dhJR$WHeIZ0O+@v4Ola4t;!liRqUNsK}}&HaZ@psql`Eu z8ycmExSrkbhi%If`Z6S_BZs-US&9f^ongn09mF}b@4ove()AGynn1hRq2VPhjIw@{ z`wbh@voSn#p<`zM^oOz2CCobMoCNhfnTH@b*C--~mC(EnQ{4OLIEyrP&TPY4t4FgU zBu?cIiLtf*pDUL=a+)^hwSW0hY@e9HuUz?LjPju{5OYN5{ALw{*7wgW)@ULivuw#1 zMO#dbvvR79I;Cf`P>SoiLsQ_)@;XOs)HKX1Y%0mrZ~-k+8sGf-ckuQ1|v3y&!B3uKxBl*b@}LKRR2kos)LG7EF&947O1G)K$W(H&sNR1fD)7no3M)D6j3 z2NuRU-Era3{wr-_I% za}R)xM-(>^xs(7IuYK)n&`KDk7D}?ETb2^#SP62&l9At`@`TqyI8dyVwCV%Bv(T$u?r{{m>i4{@m0MEayFJ#MnmAH z0wP4H($h34`Q!L9IBNE`i1=i6EQ}S*Jr<`KMkYsZ$a~74J0B=$6zHS{rWzV8>L(OP zHIPLKvdJmzKe<>rTXHxN(a|TFL)uhO!b;TJadv2>f@eg?CAh4EQ>z8Z z;j(U*#z-2eiLY4unfRs@We$G9ypu3; zlhhOy24E+Hh1|obYklYqJf-{%KD7PDnDJ<s6RX=$dC$B@=PVtsEz7dmYgBrV9Swi;sozRF9eD3pKz@J_79#Z%l z4lJ(ulXu~1PrnLJd+L?QvY6-`3Sn@L%7dzzoU%+;7sF=k2ue%jD-*p&!=O^*JN1cyn?48B-#PXCj z8G#XJ?Rf%~&fsuA#k>CRm+{FTJ%CYTnmCt~6}s)fh2?jP4=%`=jPSd!{S90&)xhfV zQdxSV$?czmnxe`m3|jdaJZjGfS6tdatk=hck=Q_3GH^LL@-}iaj}Kh?k2txO!9;>I z6h%FDT6#=on(bb-tEitQ)q_CmuEa@LWyVS>B{X-8^i&$rl(`GVNTfN?H7iiDV+ZCe z359jD46PVw33@4uHN1;&by_&Fx{lt`fT`#e?{nh{D!YaW?@;nlPK_P4GT?SVe`K&a z7;!ZNMcKn;lJYoV*>tU}4a`fy)ikg#4hp=`#mOlgyl7wlL=V>0wwSjYqdKa)-3 z?D)0S9g7Ogy(syoRGHI=Q8Jb`nm11)RUk>cF&IJReIOrjuOl{GXD;&c^8)pI#XzIV zDObp3rjpe)Ml~^Y?GTb$@d$W}7WUCPqPwkQ#4)$DiW=eDveU(k7aXNty*Y zerySUbM2?`#V>pb&-m3Vam5vn!nu2Pa#L2aX2-E9QDPGbUt}32X1KpsC@wif&LJHR zFgG}Y`Q|cqPAbf`6}Gh_6?p-+k{8_ARGS*)H69WP}^< z`w5=CcN%Yh&K1}huL9AK8Ec`jhDlt1#~S|Pi#OuO%L5e6sdD}(61Y9Vs#OFRvm9!c zYW)72UXS1YjaMQofD@-z&|6z4<0pqwp5qp4vFR@@GB?6RaT=Fz@8j_oM~JZ`oLRMC zN^y{yD<9?0;^6TNZ+_>s_|ClrMncn5<-S#KGk~kK)ag;uh45Q-$+qA`%7H5YhJ>qZ zWkW)hLrSi3nh@|K)-0k#*-e~4Cn<{4XBbRixn!fAVjYfFhsszT=|7n`Mq*EQ&b$i9RAiFE{7_Xr`dqj{2H7F;-0DUeobb<1pedde0joq6;>K zss2qUO$ss6932*DT8k-v!~In%bC7nnVi+evO}y^OrJ)Xoas_laU`|N>3eAG{?N+&V zT5*b=Br$E^m8F6ZW6l~vgHf%XXw6Lp(UhZDW$&8MX-34Cs9BbY&WHRCp)?XrNDXh~ zXi$zCgNBB;h>$S?c5a&^r*>*_ftfEzXlq;`hpE#Yiopz4QM%2&P+XI576;SYa|N=JkV8&gIMk1Zm{ zx$md@@MrINAKvwjKZYtIWLn{tTW`ZhKJsz2S`$o|7qslPLXu|a^+)*J=e~%4`0_tu z@7{B8^}o6b7o2}SrY0tdmWCk0K2!CF&MM?1o^&_XqR`wwYvcuH%qpI|XMpo(4Wgtk zLlEBibbw@%hQd}x7T>?~2(J0dui(ea5&Fp#Y4Fpqn2xcC34He6Gnl{O9=!I67h*>i zK@Sy%afWMez8mlU``fXQw1IYq9lmoi+*nZgl?Er%1SVkx(f{-tufUsL^BXi(}?1N&#a9kqc>wc7hUS%)Rnz?Hla@E9w}XBBOtw;k^Ea2c+*6GM3dI0z+`iR z%5FY0il7lCDeZhJ=ZOJ!-gZP>oNkfqE3*o4|13j8Wmq2NG*qZXM<+~|EA7zfbm$R8 zQho}-bI>h9SzcpO({*R@lS9Yw-XuKhPWm zmT)OW2KH)pGR#M4g`Jton^8)RgW#@F3rCMGW4+&p^EsDQWwJwAi*dx-u!%r+?CjS5 zATwBzt|=03>~%c&t7^t6wIR=u8kO=!G4BzXpFfDB(U6fLj=AXyAOd4jIP)8IH0o1x&G?72-bcQ&ir$%J9Qo;f#Ai+sg}Ah(U}G*D zY(m$R8~N;Z(PtB@tIL?3nWaD5ax+^hR~@?lUK~Aqm};)3U z_~)>^w1S0&1g_XDDa{QNBS5*8Pixzt5XbBEYR zpMy0#s&yLY&a5Dsaa1OAd5Xax!T!VRxa-~raNB`1`2PMy+;d_btHTCVv(03PHWwg5 zgt#4{$S3gOA0NZ%Ji@Cl*^RbM@r9r4#|Lh|AIBRVKsTUNBDR9hn28i=co4a!W;g$Y z$3F&de*J6Eu!c&I>-`?~Kd_(qP0M+L@l9vzm4@0tnqTkCuHxMGBA)ulCc2Fk*8EQ5 zS{A|U!WDxQrxu%d&nIrg=WjfUBAuqPqOysw21k^&ncCOa>3UGY<9U5U_Mru4U3Ywg z;Q)g{U-ne^TTS2sbs(Iikr)rcw9L zCC;GJCLXx^K7!5@;Ko&%l)swkiAl0;WbkPo!%9nnHElv%6$wbAr!;OlwuB4rKaIg#!l;5NDPXi+yPS z{n&HfUR-$DWwn7hHbB&Xk1aKDM{!Oy-G##oSU7SV!~QywRugmcJCUVxc;N71dWMm% zQ?2uw#ZDnI5zN{M@B8o9;CU~60iOTT7ZPVqs<~-LNHBWt+=T22=TI||)?v-iyDc>S z>VHzwq%Ai-5zzTeXn;IEgFk%BYw?V$o`|=-?HzdF(4x!>@Kn0e^%8J|EN`!GkY6^y5yt+y{{j-ux}+}CpqQV^>lecuh|tQ<_yC981Av?2nlaE3yE zA93{E9}H0xZj5G#*4GD}T47~vh@&S?pw;c7nYPK^_>B7av6C3A4>{VGeuXI#2yx^i<{R*_|f(I5G$!JL<-$Zm!-CF9tXp5T^kCLWA0b@tkddx3q+O7!3ao1 zDxXEAfRqaXKDZCNv_3#TYH;YO)EN4z^pvW0a@~f8j@mrcrM=?}6ypz;zKp+A)%cgx ztz>7Fi^+_GIeCh7tQ&D$;?YB&V@y*+g<5D-{*h|EK`ck)SphdTL}Xp9w;~PvG6fvN z>8q+^;@BJdNI%eXjNjSDJFT^;l*01qGx*Mp-zJvaZQEvv>u71|6z;v}9*Sh5NS7Hy zprv13UBU9w61trZQpj|(uxFk*eVT|Ed%a$%(iYHY2lnsB1(#mRGMuO;h+WX$P^^P; z^zb3vd+&WHdVNBHt`B>}y|rua1vuxNb8zy+A`iW7UdFNv2V?M=Pks`e=}A26na|`> zowg|5o#6Mq120TL#;!rEuSTA(+fpEQBjMs$dD5?6VX;ngGcl4(;mRjJ8t?eyKg6ql z^GyJl0>3ar&W2RMS)ZU@d@OO|*aETpW?4!oG&i<|RV}bl8-BFVBdTip9TCy0&^DGq zDt}*@D=1=2Tu!pH&;?*K{QZpw@wr=8FiKj;bw&;`kQSb;Y!sNRt^B`9GW024&uQ3o zEcz4r9_M0MAEBxkE@=Wyg%`f?+4vuC|2?z_bi;ECM~)uFk8Z!6_~ld?s650g4A|-+ zb~lgU>dPlF*&Yc6d+6N?Q#W&mz_}DQp2w&DVL!fo+hL5Nn5U(I5{j*`xgM^@vX9yx z5v9X8ZiwP1#ssDc!x8dUQW4=IqbS4D(khN0If;phN&3u{AvGA-eSr=LZLH4W-}O0K{ypJGk9 z@WJ;?)dQhPbVmbJwn9rhg{#DLV;%X^aUzuS^M zv--P=a8Hdrtg$N`YqrUC`OQzzE6x&1YvZbi!c-@2tnnDg&=k#zFcISMBZqPF*ipK_ zQNPbwlC@)oT2~{$ICc6oQDtX^*iW*L^!>83Xgf*FB1j*I#H#Ffs)30p~F%U>mpLOy58gzE*10QH0*GPfVYYV(JIp)LV9G$Nso++|d|=)m{NZ&S4U* zrXk7OH@)$7_#Yqm7zTqv_&)P;!nfLHnuZIBvLck-eDr54?xcqk4uyT#>2!#`geJ}% zk0~ZTn|44m&(YNd8i_cak;Y(@!eo=M#b`_p_v~K+nfIuI0lLyC5(g~x^biF}#_5Q0 zHKOb}Ze)1D3!jZY{e#~@D~zWoU}bd$H{Ws#t&1UJ_D~K)mCq>2hnO@+@Z>96=r;SC zOOvXa&#^kjdOn5gzkL*c{h9A!sF+=Whlwzb;CYBwkkBn8i)c!AQq7}@MKN_`p4#A? zrFUtWsp0UUV~FELE|2@qHja%dI0$7lXT-c~L~xnl_WKWWKxzyHBe1qsajEQrU*cxg zkYRujU@&Cf8dqb=u6K4InKV=D@P{Q-ater0lq5zd>GsQk+FHe>s-=e@1>6^u?+Q9G zD)1Q!kfv|}j3mRSK_97np3JGn(gzQs84_N#xgr5OD8a(3GSYmg-zTa;TQyxmc|MAy zUN4b&pc@MSO)DHOgq4y5WytKwzX2Ntcz%wq{=G&$dJuDj^z!P%=;>&Kt?qRg&OcT3RChq~+yhqRv=dUKZ+x`Ua`ikeqECOEX=F z9fi-w|BUh+`TDvj@bRIbAtphMb=qw_<;p8D(e3i4vK5h@Ei+Vy$p4N72Fq)B_n*EK zcieFYP3!kZ!%~>3Jg^R=DZkbmz!FR8xPYeHQXj;YPS)0bKy0fPYwH6nFRv2#Kx>2F zdflt=rnkHXal*W2d_dyzhf{1#!eOLP87CBl5)dE^UY&IxZ~h0rwlmYy#2w^sFc&33 zzc#7Wm_UK~#t4lzc=CE=eP|44ayXlpS@T&VxKL`9JrTtb^%h9ZH;#xPk+TLySkYRu z+BX4E94i;$r7wE{{`#8#2YaU`nWs!S8f3lc``^R;4;&=JM8kODTDG{?Ak;)QY1G4H zaSD&!<#67-rYuT|@MF|CMB3pJ3^0LzzI_Gn_@7_FkrjtL)aMLyb8Tfilr+szin^~fLPxwcq z04(X{Fl#~}uH{({RnAL6LZ)C#w2J75WG)mxgK}AGfv8rTwOCuv)X)B6A0fI)VdyZ_ z2%S3%Q|I^&qz5$02TZ#VagAL?JU3CMTagy#+wh||{g@~sW?cR=FE0OH^}wrAoZ9e% zhw-CIP^gnRcmLV8;7w-CR;-}m^W6q;KqwVjs*Pujgbr&IC*{?+Fx^?ZZtYXwW|N+Y zvE0ugZW^MSwq+Eeq^F=l_M%vsFgPNLRB1XjE+@uI+L?fx4`gS~(R91dczm{X@x>RD zyzbd|g$QqHo__t%Uwsg#7EdF~8uTmzJ-U$}ADtK0VUXuk0sE%N$Po~JxoO|xMNE%5w3>o4? z)beAA^T%N$)?wSWc`8w)v4VCv50)u|ROgsU*0HU%inzVX+0bYRj0VWf5V_W}gjGaW zpRjf{V;ruE%A3qr!48nlKup;Nm0A38S1CCXm&Ay&7*BZ8FXQcR_)WBZc3L>1w#l`^ zP2c+-Zn^C?Qt*U8cTGv#UM)fhfWy9y-3Svng$p`I@W=~eI1H*bqO0bve8g)DgZ{9E zn|_qzeILCM3q_Y^t+=dJz`0=wEgd(al`#sF(|#Z2dA{lFmGdeOH*GBCyej4binPh0 z$WO`XEB_tz<7Llk8{}LW3}dxHoewxT6PBB{#MW!K5{)XKYTYjBu|5x#;^n$@?T7kg zPFD)4&WU2G{P-)CUS$1;=3NBftlfkE5a>o@3`$jJTxKz;y0v>4fx8TP$5bM4ddtYs zZo#@rkVnT+HBo|4JwD|_YYO75a;`w(3aEtD;haqRav3tBvP1p+R!3Ymf?%KPOrPbZ zlxge`30Ps-4m2U54(n4&ze#h_D~^d{hfYY}ptrKZF^NCymdO;^V5cOiUnD6aU9g&$ zQ)Nxo6b4D7g}B|}AujI>v?gknBF|xkKPS{hX(BaEpeMSBCnmT*pEeMy7+HG)_S7=X zuEdECYz@(V`^}UqM|?M}iH`JC;w3RF_GlhqfTMFI?X+;-DV~P@kS$yjHV|f3k@5I6&WiDEiS7kyK zNl&e2C$`AO2u)^8IYYokh#Th!nX0XZX>B0;3l$E6`c`u$))vGzkt7*0c&kJqvJn~S z!dWS=GK({zrd(7*_3?_4aVB(!sBv0dR%yIVQK4fJ84Qu5&<+Wm08@n{Q9(VBG>uWX zNGRmFTA_>Z40PG=4FRnUTJvOAjc_T;?K*CN*LlhK#$y3U)P~T@hPV|O3X~-^BUK&> z60I$(K{|L40S-|xA1Af5Q8|i0*;8sJDru@KODJJ#v~11Lf0ma5hWU`9YgKTz;6$0t zMhd-QPU?2(c7aX|Br()P2fEoLWI-ZS*^$p)q+T2P2X@3TTYsIswN;eZweS;kuxV^K z2l9e$gyv5O{>yU?@Sj2R7aDe$Be$cnZr5L1BRHyGv=Y1R6o&r5jE2ag7`ZCg;bcuF zz8egHMhk;h18!;(jVxnT#G*jl@52m-a795efg|mZF50aYW@qOxw`~sHc85q2JFRvZ ziM8(8hoAiL7W5AvLOY6iwgx~WjWIDjg*0oi6E6$qx*~d%1Nn%{F2(&n{TYVqJsQNz zbAze5St58Wa#M=xBxy?e?59tkt`hTFCC^n$ONZ^~p(A+PTi%Lyz5l(Ko9NPE)q&o| zN2QKcA@h8M0s3^!lnDd@uZLNFtZVW6`{@k_xPSlsIPbjOn3~M++E>2!Sf)ZqO=4-*)oqZY6&J4t;Gqa88~(M6BR9CZh_@3ZSRPMsMLh=Q^qr)q;- z!n5Tw1R`b}p%p7U@d=N^fBygfCp`7Zzd}eHpGm&xxzEN6Ui@<0x$glOtD#IJSe^(Y z>(Feb_{3j-08hN)Vuln*ZQLKNn{N35zW2SG84gWiT4!&NBJ{dk2IWTR6pMJ|_BA~6 z(kZ0z3L8z5)iNTilk=Am3vnAizR%#V{_YN(ST;yn4Pu536yjBuhcTebme(Lzh_08T zh$uB4D{paz$?s!^--r=fs3R>EA@BVAT8#wL%?9R@jOL=&`Z<;cBOaQ~d9RMFkpbcW zV-;s9k#t@R>H7Wmq|!!LIfE=zl%r*GOeb2T5EM<8Q!p*WgAF@EYp{wmN~nTR)J6ru zz_qmuTCqw3V=7@OmiR96Qp=~j$4XtOeAcEcZ%f;u95%G$^eYJclTw;U^jacppx2X% zR;?7R>^Lh&_0HOraOpmVsB%HzUKM|mD<{D>)+zm;^Sq#_u#S51QK^6C7e4V<@Vjq) zGbZL{k+oYaV3IJFS*fZKQ$adabyK#Y$j}=rYR)0h&qvnL^ft4m4mjdBhMV>@7{(5R zq)mkZ*O6|pK{P#sW;4OS#hH6K`8mAmFW!miG^SFBZe}!y7K!|8qt&9pvqsin z6+yo#rP-WY{tj+@DT^S?iYd~J>3eo3s+ zY_*XaD+YP%-ju(tsi_%UeA%V=*-!4opl{Hfn#N;)`SFbM2~;iA=mbHp_wL<`BZm)j zX6U#nKGv@g8h5|j2kSk2@PGer{QjHX$SJc3Uun4+|15n#Fd8wvOO`N9T8%Z@9y|jX z@0IZWSnm&T-+_bJHJ@YGwq1DR@4f~*ckjTTzWcp6eP)F+IHh93{=e|B@HrNsqgN7y}aYpp7Hlkol<$ANk;W@us)`H~iDLzYF7h;1a>6DK5JBLj2h~ z{t!>PoSg^hnrMywFvkry-GrNecpLIjL3M80+Y&!*WxiE_r>hat{4^dh)5DdQPoa?x z%i@dknOK1Fi*VM+b#dfW1AqCMAK=&-OViz>(U8t<^bpQs<4X%odk7d^&}Y%r3<$%< z$L&rS7S%o!(Hwx$FtQJZlxC*eh&i6p zEGE9YSVy#vm1tyu1{H^}qaH4a`)ChYUP7yIt<0_U{L6RZ>dUq>(V^T6%EBuX$MWM< zG}{*X!9}RLNTyYp(=c#}(smJJv>sn%SC*rs10n;7QJRPXY((vWp>E)-KYRc`xa$E4 z!p2-iIpA5A1UCkN5}&9Iwd$CrDy;xWj)l&(!SunUm5}?3a!-sL`90pX6Vu)Zt_ZijhNs%$A zU$+bQ=9u4wKChV}O{Z|t)mP#Cd+x*czWk4vj1_FGNNJ3eE+6EOGE--P+3oXq>|_TA z4jjb!d(Wdmz20zGP4WglA8SZfH;y74IPd^DK|-XBmq#he@)w*cji3DR$N0uKzJX^x z>tACu98hdVhgSdtK9WzkF{Rk%QS=H-M*Kd;5?B{Ug*zvNl8%k9uN$W4<^NMI*hF297Ru@n;|YCcgQjWeSMA%j$~F^Of4&XiF7K-{WoRSt5FXW3I>f#Uif0_y0&z*u|%TYM5Ko}QUq zyD$(mY>BR7CMS7!AW5vJ!L?b3a8p|v&swEbkPf2NR)lsqGZvzUCTWTMj|Q{y#fCGPp-k|zWPm^ci~=K^^_-J zJ_Z_!;#PumY5EDA9k!)5@5Umi4{s% z<>w>llQZnr8~O8RyLjehm*TQ^3(ca3%ofzYr14Ab$N?wbc`J+5pH*#0+oNQcmR_p2 z&#?d^2!R(xrBSv_x61oD0oo*U5K#l%N;gBh0xShmgWeJtBJ@fGj~beAWIiMQ0g~*Mw?n>>D3Zko`E|wCv4nT?mk&BtXDv6P13a8I3QuYOxmY-852ZGxB zd7#)JI(I6)Fc#a20oT%Z*eGX;(10;aPR3B<4d13T&cE=Yl7d|eJwce=fMCGu@Lwl2 zejc*YW3^6%LlJsDs=l36c}?M+szyo{P`G~6#Z+cuM^YaYf~(L^wmfWF4oQ9ZH7YZv zdabf7w{5uIBH*nt{@!s4ZEoQEba-}t7jY`NVx1a#0~A;v4KdYh*3wjI(w6I?nfV{{gD1@AAj}U_v7e`Ux?>F z|9NFvPHYcOmBW*u7Saf@fEKD!L};d?hM1}LQyECO@!Z_6o3X&!+6XHv2eAMCLzteN z!OoqtH2u5(fxC&~qA)q~{(xZ3fUC!6>}1IZQN}k*<9Yozkcqo<2nEOnb7$Fv87;^41jTl_FvY>s5Ck3 zk8uCNgV=ZPy#z*>-?0;~dDV+B(`5ohf9(TP;I6yx#t(jQ3#m}L!j*V``fedwM6EIG zq`_&V9HESYwgYs`19*&_77c08hE_BK*p96P-a1ks49y z#%Ionrt%6pjRtYjMKK%YpgSJWl1g-oXw8MAi!c}3$Xu0@d9_%wA#QAIl?#F^h|g)^ z^O7n0lH#cdUxx4?f(#U-j5c$TO~od=r?&sXIUDYgxvC6M(E37riaczY@i~R zhN%;}2PYZF_WA8}_)%SpKFnXQ%J>Xo#2huidGINc zDRwJzx#KT2b`4_`B9cmn4PDrR5o5*>jYzA}pzrCeuM5}_qV-ojd~&2wg0Fu0E0n!F z{b^SU1-v2+4KlIiIy)430UfIcEx$eZOQGdNEdT(307*naRN$`k#jylS5gFbS3#V}O z*hw-7M9Ut>R5FSq7TM97q10TPR!n;zOZ^=N)6+8;4)3ELO4ZAi{a7ynprU(6-C5L( z-}r2TD}R^z6-3+@b6!sM|Ael`-^0?GRebx#n{f2x2{r~%g3-|h42xT_d*@Ef&dp)H z*Tda+-;IO&4`6k9jrAt*a74T`WlRxiYj95{!zwO6$KjC|P9WAjq0bNV9GWc@n`52ZIYsO^#<)(zV)H{AzS@>a3;FmT$ggOR;o5wSpI^Pop|1(+0f`V zQZ!wG)E1OM#BoB-BaIy5G$s{4YGn{}qArngAu6S{Nw|s6>u(w5iP|<0umv$xhrmZ^ zDGe)OhG6D~0BNrVA|p9bm2=f&&UeG9tp!ocC{+E~1Q1D$X@FU~4M(-wmd?s1hIH~l zGe=E!AcG+VI=P5ip%&b!GQr$ffZpO#YwH)I#IZ|+Y^#T~RSzUIws=@a-mjG)6F9Bv zcBj}8$m4UO0aAU8uJus3(x7$C*l$rZcO^<=s~ghd)GA7>GWwNfJ+U*8c$Je4Ac*f; zbI1uDiVgD^fj+>6zLKqh=7O@MY%S|@D50(isi7IBogb_~rwTeZTml;z znv>xv>)NI^)ea*iG!^bNum?-O1{(~Vk7@^fESx-vGbb0Y?wy8;(QGx*ZMR7VwseX$ z$Ll{U)iKP-;FH(>Em4PD`NYR_SAR!$U2D&&y(%SOCr*|%Sfkv}Iy&8(PZN3>SCZ)x2m^RwII>T-K zzeAJQXF)+V(z={Vg#G@AvX9lZbsXG(7zYm>#;N6H6o^m^7>LmutYBqj9S>vAYng@kXuD7tnijrv z(^0(V6F1<{atak!>pA=ze~4Wf2#RDExd;1Sd2L`h6+32Va< za@|BTiqME1Qf;ZR9w(ZHVQEjq5d}0#oAG|6psA|eFiNHqOf-nm)}E8=(6*{?36eo18rA$AbNsaIZ1HU9q4FLog0G6W^0 zf;V&)jdqI=_Cl4dcY zDn1e;g7N?;R0sCpfrD5$bf`>~4Y4MVXzjY)E=|d+F+{)Ip&Z_u}NqWoFuq zxctOIVh$$vra@tVA@3iUl8dPnP^vqN@}f5$SbkUGBV9Yhwqyan`sfL4n^*V8*y3BoShbCQ(3B>o!5+_Eh%wq_4j2tjLS_F zuh+gPG`d67NHr3fX^AM)b2zZLisf^5pc6NtRS$_Z6*rqxC}_4IW+YmTr9Q+LYLQ3 zFZqr2AwyuQnEBVnA&}+zxr*bG&B2j9kfOX!$zC`YAx>I6FiEJ+4os1vKOBgKtwL||!N1LE%)=(|d5V}NkJ(83}Q;Ok`7$%is?x|j{ zhhcw6FhKgr3u4*yDZMEyQ?i=^F0Tf3tYCXdS|H{dYcv|N`Bbe~V=3}8W+b9SprODt z%_xO!c3NmPGPD~l`f0Sgh}&&eE_785-V6sYqah)GNVB{cQ4~nEb?xZCt*w=Xw*pQ->RU=Xg|&qr72?@3C!7^fRJ&;PpnoLOW^=@i7*Bih zlQ`;)BDA_4tn}8Xhp}+#GiHy_|T~X{~Cqo=bm*YSkTk z5>L1^!Ja8g!(>{RbqR4rwwc$*xF&8t09^Co8*taLHRy~114`rqoO1j!LjfdNy=TEIk{*9wC#XyCO9@A>~xnejwn1VR-s2_aDQ`(kgy=eg=;{XA<+x zjO<3gH^S2T3Qn&NXa;2H_k(;!lJd1i6jUZ8cN{BL2vxB|mPQ2UZ6qnW%?3J&p#UU_ z6cST#Lmk!ic!PdsV6e2RF|e9g$VbW%I5%8l5)T0I~zk7-`(Z{u4d)28M(%l<@BPuVkHhjH9gr z)yWtN#+B8DF?}I#+_jF$n>!)ivSn{T47shhskv;mymT7Mjz~<6CeI7rjBYW28tBHMx=`X={L}^&^4t&9hKyYWpmRB;Ub&4v+rOiX?(gW>UQEH3wF6wOSDX7 zl26Xz)&J{5m_Gk}!eJ|ct9|!#pT|uf`XI2rN;PD11M&4nfNH6$O)op z%}6St%x{A@g>JVfGEqq;bFN!i$LKFP~w;k%y{u)Nk z;OL=4XpluRL;@H`F(#%b@e7yzLW%67jpox|7nf8Z3=#;k zMUES&s^m!`SF&%)jUA43>GM=6Ny|idj2K&AS;fD3<}=V~H}QopeGLa5I6;tKGcr~4 zoFZJ-jd2KspK?!MJfoT;w!Geq?bG-@5S! zICA6^jPVg|M(8MiULsf)LN@GH3a-gOwAQFL#uk&>;HZ8xq-I0GAq8^C>|&sd8$|%8QX!BIxAM6PZ;fXedes!28m#@M)`Y^TIlAJFx=&;~cFd z5s|qX*A^*D#)P0_2WH2fGwn)OvBQOmijo|a*9+L5055sOJes`$5~^S@K*hAC@Yg>% zg43gfQJ5t$tdOyy>KBi}lx=ukA_^5~Zpsppt-X&+cwFJIWjT{Ylu=8`>?~%tZ9{KVFei@Rv`NIGPlF-pmZX6bOT2I(OVeXLy{O!R zAz~lpR_Lu6x#OR0CiwS=6y;DdvW_G|Bg*Wu%o7kk9Mv()ZZ+Qm8#r=+q0a(MPEA`S z(s83=$TTxs)S;6!L3d(;=-xT4;E2@U#}!vRhI$L$ptU_01ksSy`%GV)^Y|WV0f8Uf082+3Y@0@alzSX79 zxRN;?a6y~2O)!QOt4Q0bV7m%bwUmJi@C5s=ZWB{ebAVYuBh6T1Kmo(7je+iBlqR&+ z6(v-q!BSGF)E$U8elpr}m3E7M)7bkY;||mG)>bogfrl*|v)l>&yL?_0yqc8E#;BrX zH-+oK7@J(!taVr^uU%ZG@naY6#!5S5O>{8~gTbJja@Nl9CRSYgj-<-MXeJ{~GFNMa z%LOVyCvc}Mv;fOmuPf`Swk&6D^@>D{Ue?z3ky*=J~5hFxm zA{>;1R0_#(j%bdZmN%+sSR}f)!wZ#*8s0}LjB<#gD!HMUR=QRo zz|?h9f{>XSb1$3EA-`<-wAoy77CVt2#&e!=KHAM5zb(iV1hfd4uD*z7aQ!Wd`1Ch^ ziX!b0E%{c)zux>VuTuukHoer@X>97CgkVRkDBG2yVInLQP*GZXY&L)~YUgER`G@)y zWtCA^_JiNG#T8ZM;JRU~E|@cj!j7ocFxhOQ+sxpaDfd%_XN*uzs6FHQ=LeJys$t3M zd4z|tkwV}sfqco!7lfidb*)fa(EmK{1yd5&8$JooiLnEGVVM$}J_x!2frE0)jIm4hX99i&kok zo}E!@nkR5&n34pqat+s4<$O^6VH4envq+i^q|J<>sM6b^3`9zb+)tK`X+>((6qxFC zsR17m#+^%&kvyl}yLJ=HZf|Xk>c4YybA(Fbauw@ud!v~(GVI>H8z&YPiH1Iqv#A=k znmE+f2e|fwAIAF2UV^w<&f*H^H6_b%+%u>;+97p*LVYNXU`b*S+Gsp@lS`aY7aZe8ew zk}1xM9IL$^7S5c;{r4ZhzWeXT%IZ2%u~)SzQT1J54q*tiGJAESyXl@4 z5ARahLJ!b!%Xrq~x|rWq0Cpe|NiL`q&FHhj0m%&h@#ZD`*T1|T3&R$X0J=I24aU(@ zb@LHmwvpDGkX2#$&(+v=i55hHC<-VDN}4lJL~8JHcs7DArYr;+iR?Q!7DUL^xbca~ zaUrcBPq7iDh7BcIwZ;F}ONEZ<^j(43$q7uiQVdohy)=f@8kN9~oNT?ivsERbsH%m> z)%9*h)+u1~lxQ&IN|gYi5W=WH%`btEDy?T+L!G*k(U*Cy%HkYn^A0Qp?Jbl7#v0~M zLPEC%#kO;`N%UH+^`Tz%`(rQ6nS>3VmJZAKI*@+28ec5CrPozLA*RM|v^quO3*)wU z;Psf+1tTI4#z!)(W}70tEKX>G*~!DOGTq5s{)z`?%Fl7t^sv;OwW?->oJ34cSTt>z zDy72^Q+87X*jaAauEY^k9!YP*mB^_;j@M{52^~=khviI#vWm#>#vWOCmWn|J-Z&O{ z0Tc1yP(W5hLL1?;i}`KaDC-&xSo0$^LY?p+@l-ZDn6Vn`3THf4 z3^MHaZVdFt{~x`aOtbrI%=8Gmvt^vKtwBOp)Q~2sjG{aUg#nUm8y0(0_~56$j)h?g zt5ZT^RmSBZp6OOYgSCH)La$KP$p=q<|ND$CNqHd8JCJ^_60jOWG)j4fgw~=l2#1(= zTXQ0{Qk$v(DhT^&>&e9G;cbfcH%HnOh?yMSW|M|*k;GJ*XC{G|O7#U1w{DSb?>cBUpqeKm#EOp4?sO5w zj2iKV+InN18)}@ooDndEE(m(;9ZQmBn3|eKdup1IFxH{Byo%M+%Vb;)mCzWY&6BCb zx`?T5v)njzwdjh%Gv0E3(>7Wy=zK&P=UKCjUGv+hc2A`N?;LIJ18a)p3UTC-cHH#$ zhdX9+ImY{&k}94ceww8`=jLXxuz0#$D_0J5YMy=tFkD%~KYiny__zP|ImptOsXpYr zOxM6mGAy_>RLvT>+620o2Z22^0i7!A<=gzZ!61jxa7gIF_4OY5{Q*h+GHp0#9i?WE z)^Z=NI%l%4!i0B#W!DZ_Ns+R#**pF&K98}pr0>yOIgZCZj(NAr)9jS!O>oeq;{>OB zP5k+XzJ|L_jsV?a`tdEu3aj(2#!F>1wK9g_fTz#V#Pm^lyl<*scj%l8dzOh zJqANrL@kr%QA!*1yu!{3sf-z5hpovUc1p~#LvNUsX1t*D22)y z)ub(yoX;4WHs!_}+4@x;POW4xMWS<2)}jboZydRo2dQgcTYgOi$<>k6i!HV|Hd3;Q z>3lp6#G20rGlNLkXA0l5{_lX7fC>)cb5x(C#nmgFJV+T~0BFP#A8SGDVu>atF z=;uNopd*TEnvE1Tib$|(dUlpdYyQyN(5up_uw+`#3YjfNF%2_M-sn@!Sm@Avy$zO*%+4>`czu>Rnt@8gO`UxAA+xsXJ= zLgwRY7vl{~Q52Pa%Z5D_96*3>+7hZ8#F*;AOr61mg`q3yuNmIQweVww16wp=A_RfQ@Lxm-|Vd=AJ-(=SHj zf#}$$N@)kK>;-_v2nwkPXtFg~LjxHQWj+YY2rxmvO#xGoDdyeAm3($5x<*VJ>@lkV zt6HCCtMEXsLsc1a zRW0QV!%NoLajvTHVQ#Auf?R+%P!WrkT>ZiqVVl(3A*m=0=al=@k?~7Y5k8TDK0}dQ zbr3fVPsx)~QI+2M?>KpK5yRd9`FbDuppQXsotn(n1d$$I#73H8V$WVei?zBPtmZ@H z5d#A|6O-8c$X_A?K_XgKjzy`8oEqe5oM8Lx9A;;x(Ctnz33gtfzrK#si;L*3u5kw9 z6WdG?Csd<5h3&h~rRKFuVk8aLFz^O8-?a_L9yo?W`}V^eJA#RBn{+j_caE(V(niMH zM%VSgD{YisZ$QS3NRGu&c%w#+B9y}zWFPCQQOPI`T}DRjc3Wn3 zww^`sPHugrhfja%Q~0gd{}yH^C#9S>wiP*xvn)hYYFLzwOm=Lx#s=zK1>xm1yT*D9 zS3)5}xgzxNT&bNehFm_Y4wEcfcb2nUa&AKXZ)&mv;j@f!l4aPj?`I(`TMDy{v1XR1 z!&SBHa6PCwGFO2!#!i*1Mg1F&9Xl3H(Pq;vz-Y*CIke z8C*FTA2E0$aSpq&f>x1W~;o zlrvSZAn#%LyJEuLFz^?$C{m4L2b^UI-H(nG)3?MiT3G{)v_XL`o!z*J>u=kS`wpED zg+>QEwr*yu$FcF`mb*B>2c;hqM^Ur^yDhiowx;}al+Vc78Cwbjn&017uT!nP+>C%_^*hpsH(xn)9oO^sYrAGVl>Qo zxfC#wHd)jK3eS4x)p+)=UPVJh(4kbKN~7m*ttI2`g%zD|g;GI{Fxb);9nR1#>UEq2tkCT!$Wd14RXtbneqFz65Z6zL_BwUEL0QH9W_P$uUORm8OM1O%{F zGL&&>uakM2BL|M)8(;qhUiR{r0wYuYTiN86^&J_;_m{IjVV%8^763=-7%*2?=&P@;}YZWA4Noj-{+X-yE1E2oVUATGQ z5{9l(267wY@dtaA4#F}2&?Xv_`dK!t)Hsr$g9dJ`CYEVa6zr^t4cc))L64q|s|skv zno(84Zq z=wWKA!}~F?&7z7FEpbA{D=xBcLUc)^QagiK2#c?>+kV1qNut7Js7G%ICF2???RiRE0?hpUL+suIdZh^dnx zW9)M*54u`YgRGaOHC^75sSV80aA>WZ=A0{gVxjj?Hpo?ddOV1gqN0nZU}HRUbM+}? zJKjs+W_I|S>iKDB%WQ^FZr=H$Ko=|6JrhB<1VC=3j}~SCoPtqZ+-&wHRvc7;ZsicsHLhwyC{<0jD|*B21ntOm2id;6_E6fNb7^DV+AR0 zl57EhG|7Op!2yfqva{6=Q8d8r$P!X9^ydh`kodDdAZip+9W+Mx0!3mr$ZQ9V4^lFS zm~IA~9kNtKBKAbeSt`tYB&hhCQ1&J;428LfszO?~p>2w__0{sb!We?8l@Q%ok_O(G z4t3hA;7H;)V*Q3ZM13l7RjCqQSkD6fU*g_8*tYDf5BsgP_TFc@)0Q75mIqg@<;4Ao)Qz=U_%^%Od^mF zh=4$dmO$#38c99(8}GaKo?-93)=I7K`_{0}Irlw5k;-XZz4z`tcb~o2^i99-_lpFB zJ3Bkcql`@{URvXW8F$JM4~@66@hvBDIc=?oRyeH?6bzGm$LaGk$#}kzJ`Aufu?tFw z))YzYk{7F^nwO^NgE&#J>Ey+%VdUKD`!a3D$R%R1j8q~z=f+o^gu5ZAvW#Jmg8tTf z(IR0POWTSNeFO`57v>sMud#pjA`TZTOh<-c&dV0JU%!E~4_#w9SsA`y7*qd=)5Jlg zMT^59-9>xh9MwS*%adoXgxQffb@t^ie;K#lxRG+y*I$1wpe#glBUjA1o77{#XN)7U9#0KclNBDf&qETAnoJ`t`Xuk=Bh8xUNqrNJ4s5 zI;Eh9)mhins}&}S@P>tluB$Px5BmISxM^Z)s=tj;f13rXA#)y2uxMq3h5CUuOMD^agx6<0vw` zj-N}X#E|y6|D)V&MxKj=Tw-xjAtAD?#7Xp%aG(n$w%6P#>q5lRwdBbiADysbo6$@e z6YB=z!T6$!6w;eR{W+s%YlB`egv;d;w{K=$bnkOvdcIr|WH&@3v9q4iVHD+ThP&Hd zmQsZVzxt71!MA+y0~k-61W)#ok_=AB!R{{3z4a~ZfTfg^BZp%>s!${ZgtW4aj?t=5 z0%u0~l;J2n8P?rci}bRIL<$P^h#M1$GNx&GhQ<};v#W~9akSAz#N|?22fD+g zVF;yPouKWOBv`akHKx0*%y*sg)WQ1*kfZ-eIP+!y(z}U%lm9`?TEIDa9>jxh4>N!63z>~PQy}k ziI=IUL6(kdWh6Zv0)yW1xgthFmpb*QMj)=}sLyml)pk$AZ#1~1pNLmb*9~%aV75ub z@?*G&8-kHa7e$TOGXYi9f>o}&0E0s>IXDU%hoSRDs?HCwW4_27D?w?X2)%PJiA+I6 zxvYII-Dzng6p~npETdtD+9UJSc*%!5MQbQrrdQHz!-7m$)<(naYsy@MU+jcLY)hhpcWI z0y)IA!1_T?2V`B-fZVuo1J6JAJf8X5r={tR4Ty4vihaSeqk2m0`w;^h8^7Y>j$yUd zKE00@znBZr+1HUn%R?wTvWKOG`B_ECqf+8nFm;CZhFUz0v#6}0$ikDTSSO)=M`m9; z(!4}%#|-TZbLM((79nO$2Yv|?xAN#G}#L`!ZUMG5|$`4h~?pFgTUw)0+ ze%qgjwD>+2(7B+Vipky;ikV1%#$3En<(`hHKZB#i3SW5f1|E83kBta9k06L@$|
1`Mz;3~RZo!kF{Hq2+*5`T7N!p(;Kv-a6@c2%U5|lw9yEb*4r35}rgD z?W*s&qx5 zF7)k!JYzCyMhb(5`1ASTkE7{Xqh9+m`p~I>8s$#Q(eBk9s$r0IM2EA5RFjUDCX9= zDwnvZXrt2^S4D4%A`h}~QSU%0mNTX2G?FM_$Sxg5R%mWSolcuEENbeACY3uq%IPv+ z;ds7>4}anX99b48mM!X6YH%pYL9Qc9w#~UHVV&@jx!@s)B74RC3Fnj3tZ3+j<|xTR(-JWJ#h74G zD2%t^rNcb9LJgD}5hJ?woDTI`qwDt2sVV036&2MrxjrtXi;0UYoH;&cGX{sVICm~a zhKdc7i+Ui%`>@5?zt`&7_!eol%;#7f-X+hKDa;-BBDl=`!%AKm(UA}{DV>zNAGZcW zlKvrK{u1gazuyGcUOCjJp}f2VD#DP<#qCxRccMIsrUlDsH%Z?ZmGsA>4z%%-KX(@6 z5uAZF1=UUwY|9y8pesGbnWqOQ%KoOpP7WY2N2zx)Y`VrLwA< zD~irWB1lI-NR?ob;eWOjEIi2#tfS~JGYaQAc933LP$r9}Z8RZ0m1@_LU* za`$ueCims!y;alHq$!;`oqhBc3iL_gSvVmcoXw_b!J(i@sT^bF|Cj&ruR%98HOZ4>F^1!zU#zm zJInRrR~u5J=*1x za=5PRxcfYKpM5XT-D2SBhB`VdEd89Hq3gML6;hGit9=4J~%3qQ3>9@8FccbD#S>e)uo^5Jz#I4-^v)kfssO zlOc-Kw<5WJ`h8uXs*3zzTn!OT#f5C7b?DcD2g;#76X%P0En*y|KTA(>Q$X5RDn?jd#rIYk*p%Hhn1glji4|Hu0$^+<57_}YF$jF zIB8_#S8V))^MgJRzCnY!&MX5I1y-7v5qO?CeHv?6UdCHXohn8eN+hEo4iE2br1>;f zF_UmJ!_R;A6@2WumvDA>ioIGj)0wAl~)1; zjvM~$Jd=KgvpA&z)0v%9QoRWIctmLljivADcfDiXbgnOJ>iDmyo4>~0MT?VV*oSrM zr=jr;k#1)(YIJ5yPq5&4=5sX8qJEi@DxBX{mHI#meD{4{e$WRJCpPBMYG?7K-~I$! z%-DD)-MvJ>)>|&{rBsq9z8UW`i!B9N)(S6GOkYUorDc#Js=@mN@r@x^jCL$I?^sC( za_~a-FI~Z7k6$C|l2$aVYi-cpxs6}{*?)}6IyBDG!5l(E6IJz!=OjvdqP6IGQ5PvC zt|4%)muaclcapA6PU*!E9^Ay^$;h!TNHDfe8EDI1(4ab`h6@Vf(TB)qI+;>_yX{si z@J~^d{O+0)d2nz5@An0&>&TeI9tHlxWvyz>^J;sSI?c#-lIHuEHZ1pVFfuAy2U2QOu6Smjpx?=z@WZGXMLs?ikjp}2k;X)kQc?4ZfFbG6(D%mK*`Xd82V>-WWg0a)XvQQsg|pFUd7yRjZ*~SUL8(vD z#n!THEig_sBUc;Gg@o)YbjBi}0XX}a4*r1>6;paa2 zD&FX7xDaL-jo#InfD{JS;|j0ICg8Y*lO#}zkrTqQjg zDZ~fk9rJK1e?}+Ys_Kez%aN`ly}z8YlAgK9SL*2DX_N`FM9o1M`k4FlJ~;FCV{%0r5zx<#ge zDmUv#O2KuOS_7n6)`$_ZzR+DvSrLJlh70zS-SnWA8o?o0?jHn6KYO%DT$TY)YYZM!`7Njfe`iMz31k4tSZ;D* zx$J4*vydjaKeLfXm13b=XV=l?kuSNZ1jaAAe)Aj3sz~sgln1V36X6`Upo= z#eKvroyQ_>YMXq^HQNkE(64b?Id@?@q84|N4P>?yP zwb>AL$U%p;#3pJ*&5KtaxCFOa@OuoW+A4CKd4H7l$(MKN*CP+L@z}l?@y(ekzT=Qn zr*e)ll~J*Z6@)V@I#S}4rOdg{5y;>U+&%pZk3zacWc3FIB;xUt@rb$7QqoqSpSdH^ zq^2l~GRK$I`y!P(W1`CeQ*wA01|(TaH3<324d4@`i4XbH9ySiXM=n6yY<{UeB!5iS7Ce{Q#hbt=gb>Uu5y|U zUpqNt!EZi4p*l*_j5sxf%>@z0do|BuLsXN{n-NPIKktcBo(_%5WMWJqzjMCSO;;PU zFif9qi3HXj(hU}|8bF5Qm=pojAMds084F6Jd%j|Ag{LS+)H!07yT{NckN3KEapeou z1ue#qBVLN!lth2UgDN$Q@+eJRXd-a$krEbJm6|(Q@lK!i_=7PB#v|6GT+sJ%}Q<2+!s-N5f| zx*hRI7ow5DJo(B}O8V;6i*W55EDv2V^jk|kf$D6gg!r4mHOjG1fT09kA2u;$hc-qa zDqThu?87?es|shZ7}EIrK1VVoos?g5%rdfRMeo??)V)qM5xKi)v7OuI(G=l= z&WIirkvMKh8DT7|4#x9crwC0ngA+PPq3<0*Xz9QN=i-$vrZzlp%}Q)q#B`kP;g&AJ zmksqguw^ExO;e#FP4o);qXvD&B#ZOgccG3KRP%7*V9-2%E_t^cx-$%~r!LkMzPq@2 zdi4E@`V>~ypL^00h>BJ-<- zh-XV(E}g0O@Roozi0M7FZuB6pmsz3Hyyv^;D~ZvEDiL^o?G72i6~CXZrpiPT&$&r- z+&ZT#B*MuEL)kxxo+N&fTwnHumG>D}lK~+*9_8pWFi7a$v+qyfv!2OK`yQ*+9Ln;s zY|&AwbTXMx?%m1Z(`ZG-{Sd`37IOll7%H3_Vx-k+j=|uAIlqP=*2nGj8&HCIeiq1(}Zlkzh6WIhj-Cuz&s>7B^mmsyMwK z*RjzJ?TBx)8*UWJ^?8Qe(dO{I(7F56 zksaqz3#+Ub0zWez!5R%?E!vKUL-(HkO`l}9xP)@!@lI5vuplF7p-M4*Fh<)uyY@Vi zoitpxp??^K3LL}n28=9#z45hK|cS=&-#X6>y_buIO=UR>s z3oosF1;hXH%P+w#T5df6aQ)~6H($Swr{4ND9GpFacGU`cjT%I7eB(N$M>=oO8{qc! z*RVJ`!sRPhNC)G7uY}_9Ol49op;AEsMf)5rS(mj3V$u8m)0Z!St?OQD=}34pGRaQMS~e*?efkh&=Z$DV9ake4g^{%AUrJq@(Mcqm22toKRU-9X<>yXgkm|F{ z3QG|^3Mu+|VCgJojYCE31&;fQX-W5F zpbR3YNMa~8__R34=`;2Egs<%lzhyki@xq#Ol&IblJ-zVmO2muElO}sCn_3yKq#JO` zX4Aq2BT-L97Tm{ngnK3S26IB^O)(+NFO?|ZuG5Fv(fs0*$k7kihYhbm`B z__E3Xosf-Bf=OyO_&@Z!V9;wS}d@XsIsOeQJ(X~-6J-#!N3Ji)`E1{@4WFE zcK3FfBHgJ%QL$Jmj=lA;PC{fDytLr0QQ=+I(yCpgCP|Zab0Mi2`w2>|51*l!+d7~b zIVKUJz>?SBU%qm)8-%1Vrw*3OW#Rx+0yQov?bE?i6h>n@G4G?|GmTF7#G-3{pt`Dx zDT}G&5aqRnhDynx8&PUb^6-pcv_wfx$BB-Tx03tzYtLM~s18QZQ%)y^wP*W2_kyUL zmL{fz`5b31TwpamNo}>Ojv&pnV;( ziJ6zG`vMjS>z;tMZ0Go#)LZ8uoh!;0Af7I z=n3l)U#BXZCD#koykkd98zl&;>{P__+7}Gcu`^r=qLtTH0jMb;&^|>RZ7OibS0lh2 zr5#geX@iARIHHVGEm|rdx>1{t%6HD>6uAQ+r3f63w~um|ZxF*7c+ZObAn}@n=5p0X%)}F(PVfMq_I3 ziaA~ljq$j_Xfme8I->0G9*cHGu-4;~W7=%2J+4sC#;EG>{v8-waqAX7{&T;CPyX^pV5T!pG4>~^=tyY;>UfrM(p)5Cjw(wO8_|%2MLHUdwx<8s z6ErC`N!Rt3Va82G{UPhfn$c*SA~9dcScU_kQou4I)V87k6~mjg!elhTXxv~no8ZFP zbGUNp0i3^Z9zXlf{ux%%&B>momZCn771gJ#9G)tt!XlGs*yLIc<@lL&;22$WJLe6! z&b$!&GLQ~$_R6>p5Dt+&hM!lJ=q`9#7is15&KDSMzRspw$@dZ*15ct_*$5?VbZM}q zbgbpxqSbtk8?V2?{Wh*3U@Y?AcD2BFefziKE!Q50YZt)s7DO3sx4*O>3MCSB+Vi{+ z^MG==wqx+Fhc8nP)mRg(l=q2DGa|KSTHles{o7T1mW(`YF$4p04SJ-J9 zJaRC`biRYJi;!7MHQRPv;m6+pINW%jQJV?`o1TnAq}8Ko46k2lC0KP!jvJgCaM8J> z$K-`^!lg4aV(4soFT?D^eKymY4YlUA4^k(c(%vO8xZPd2@d5tMFMI+Y`^{%1)ptr; zzW4Vx=}M(SND_>^*DAT!G$*KFguvA`_AWhu`s^-NU@6Oi_PK0073r|$hPCKISopMP z%y_)^Y-PuKeoP(txonH^#fnful^I1JydVcmjLlWRg#N<6&(?x+7-JoV-` zVYau2v*-3PnXrQ52*A9v_{IP87tpTeGCD@|^tCXoC_MDwRs7%&`~c3MJ;)7Up1Zfh zbB=_RaA#*mKVtpu5jBDZBS4L9TBq*cy57v%+H{!b=Z>{6C1OXBD=br>%i3^j^z%+& zn8o$85SPcQXU8TS9?>H;5?PQDwA8xCwJ-p;Zr;X^{P2(9#_c<(3>&UQPAN`y-!Ji9 z-~K*)-v{4|qnE#g?&eFVjt}8iD|B^(H$SkCpZI$pLQ71_z7SQ~e1&nMRIL*wKFz2w ztbwdNuu+6{a&0G~Q49+kuEEJnU;KU@emKun<0@@v@d6;8|r{JsC<sJBJ(!+GhWoKKByIoe!<=5wj?(!t1$QR^A$ zEnciEv0wZbVxT3S{t7Ov=xT&z=b*x7bw&9xPLl91Wv|s!(;Q;jOeWd65>hFPu|N&x zr;-Sz9&=|lJQlVmZv7rLoHMB#TS+>EQ^rh!#+Rxv4+n{6A(mA^A|L;x9*auU{z7Q) zw1kREutcs9WU4_?LfmtPfVuEv8`AHh4m<{d=D=&c(p zsHU!Q_WXI=c>Pr-A`C{uJM8Zruz{qMN5@TYMcOlL$o>6Y{NTUw9hi=)s2`THtJ(p_ zckZBXdqE2!7ZVlJ)M&5p)SI8gfAW|AJ&aVzl3e=qo3Hg%d%qOPy&NQ0=EKQRF(XcmrRO0#i^2=+qa2W57QJWs7 zy@~qxRaEmMpk1L`7(8=v7muGigU{WVC+IbAsr8&A!^cTSU3sB0AJP8qT7`NnX+R1T z4Q9K038Ef*Uf44RrEDNHAP^(Hx4(x6AH2#DmU9UlP{P|ejp(+{nkK5&p4P>gk;dQn z!9Ry@eQY12`7K!AiH`p;+K5IT(O3+aHSz&DIQ5DYW_VRXZt43~>Ks=((s@**RO)?4 zY!W)vXqob7a8^dNRZK%kb4yIk6J2{sr)u6LLPkrcQ84bx$ob&_Cu|$iV-x2F!om#O zaqZdc62m}mB^@bqrHiK~=S+r5m-7-;HbrBdd9b`t(G*-*F}F3KEm_Mi=QX!Jxk7DW zxOg?&nPp?-(_5seNytglwB4g+vTZ$CayBxBzTGlfMW$8sT970E=!VHId@k zC5n0anRnp!t%pgvciFb6M-7F@eRmIJlTvv9``=IDe-gY&JzGr2v79fGEb$=KA>$v$ zmj$lCNS)$6!`j$pq*cDQ<@@jT{^9p+Y92+L!>vno%QyWH>RaZzmDF2O3PZfdnEqLH z3z`2VabuBZZwsia%C31m8lfJKVMYzAriSlZD7&HqFB|~pcMkB)Z+iq^`sk;yR3nN0 z6BcOeO9}H-9oACiP&YG5J4q3e3i5SRGeM5jPnA}fO=lQ|NT=_q77|~x*$&Q~J1gwm zr>vVT(z<07tz++tws`wvSMlyQU&ZL;4QU;8f+A8|Jm(Cj9{$W1#0104Mlx<%3qO`L zBPyoaNt>D8XOeVniR4oQ0F2Ll8R>I;`g0RPyT$vLf_?lm!fBN%!ox&pT@=LSY+No| zk-PR=+4EwCCmR1^H1b%LOb@XSMCp7vH!v3Rz;0+jsEFRSc~emYeiJ9ePW?j`lb%ZSCMBz0zUgH_6*E37rYaS_t0m;?7h%aZX8 zzR=QMAD_W>d^Rpll?7Q{gyVVP#+-a@D(NmY@K#H{*Hb>4KgUV8c%EEcSBZ+FX=+O6 zRHnx9@tnvS$&g4YfYUz$6?Psw`+L}taZKK0oB|h-YnhJv;LI5uoHPr>xw@7P44rImo{^?kbQJtKqs*-(>Affq1hgulRN)HTmB%&msj4y~XgF%{3=$cz8 zIPUWrKKC$PrOLlYhKs^ERTTq9OvV1Y7cG-q)GZAB)(emTnHGfIeR z99H12Dl_SJ*3sgQ98t<6$=52Bd^KrY(_k2#7kJF~59%K=nn;t)=nE}IBzQUD{z$1- zXs{v7vb>jt+12|2vX4+hzK1c*sRce)L?C8Mm|XyRpi3mt#?;95d?y9G9yNWl=jg~g z(N!NNYvOoN3Sa|ln!SZyD7!tf9B3&Zy9el_>YASlahwLspXSYaWl>qdO(JmTK+epwjik( zMoMMJ!F7_3k)A1Eu+^3VLoOMRK3zB6!3K>*c4$bO#abL4AL01uE*dGOt5YNwXjFyH zgbw%7m~-Rups{JJay_0rKg9#*c5&y8F6mJyuQ!mbVUG|PL^Qlj5rM}~jXYk_QNY@v zoa&<3j4JQ-TJN-9zpgI^?ByG`aOb$guBm~3asP{-@=~YWUP(uk)gUDr;G< zcS)njuBo+rYWBqWP#Gx;yF7wBK1A(T+$3l%vlmlMqL+nS)7aSvwYCM%(b`K%L9Gwh zkvy!5p|{+F3-fo{!uGja6%sM@1zBQ(?VAK0ean;AsK%kGw`fy{HK#o| z(g+5`S54|}j$>Z=9_PjlzUl2x;tT)Rr_rfKbZxebmZ(n>vQLZthKgx8j?5#{2a%sj z_^P%j<;Pnn057Ng+zs3zC1j5@&*b>U7B`kx{lcp^@!?N>5#RrgM{$52y=n;&mDk0v zV&=E05vuasb@`K(Pc=&-gd`Cs%XJ6sgd9wz+CVtftg56JuFBBhWSzKhp65%ijf~0- zv?&tOB2K4-WKt4!f>fx2=u&vtu1ockUJjJ?Xho;R zBRpA-MPI^U6Fjzdv7`?2zHQO3<~TXLjgyzIqdC5dWq5Djqc$3C*TLFgR7|2`@ll)# z3XQ7hfNsD!XX?v|k($yGuR@SVg8O+tXY_>ZC?c(lrls3@A^!K|k)f)xsxQ$i?-sFt^LqGO@$it9%sb&%D zN?>M$@L${EJHO^h{Hu>Yi!XJ+sX3lm5=lsK8{V1OO$VUc9kFW@|cM=*( zM|xyf=9phU6}WGsA-)V$Fuqi9S*ln_xdCh9tb zLs3`oHEWf0VdGeVH9DV~h*H1AHdj*8R$r~3UOq?Ju;*Ql7~|Xr!+CssoOIG@4QU1} zn7Y9u4`0EzeaoML-psKTFGnoJb;0bKZ#|}l^hSv+-ouTXZ{YCoP!K&sJ(kU)`lil0 z(be>Bt^R?O)lD>(RmFWtk}_n&YMEk#KsYATPlWe)M|2U4vg6%KDL`pz;C1f{$zc9) zr!A`thB0)&bTUCxfg)v9)=<`7adm|&2jdeAZ&!>|8a~(I%@_9ZwU1uHi_hJ{blePj zQMO+@JEZ-xCH2X$zeY`i>2w#f-5ud3RM3@{CQL2SdX|c&2n(C(sd7fc`-4HVzLy#c zQr*cH6R#xl3G;LPc!9tDiD&UY|0W&H(7}L^@EAu(y|Z#e#Qq}D7>PaJC732f*pdIs zrFv{~t0Nx9F>kRw*1A^0vEN={!{YCB*dGq{2s z-3yIX^m$Z4Y^tQ;P%G-YksB<$q-326Xsx`cp{=ULyNgf2s1hTdzJWmgUcTz`DSag(NZeaoN1~IQ>^Tq*-Jn7Lb*~Y;Nsac zj4nZsTBxB-DyYT8eC8?2c=|Pca4!e|SszhICux%xf-bRPEe7b+d{@_!d*D6>JMRm2 zRjGj#lnW|8i(>}grWq4|7K#W>i!Or~CRK*(@(tD`D%J}u*A4&*d6;|7(@SB0MF^WX zPZEHWv}%X&c(*R`+gY_Ib3O3k;TtbS7oTQVyfDy_*kMfnZwoTz6 zWk_>f>r}^pG;5I-q4NcT!_#yJv%t?Exq#%y0x8t7#^;d$7;*XPIGzdxOLd0BTOrn@ z^-Ya(oZcIcM`*PoelugXrz;7*-uLupUDdFohI$mp(ekAOXJO{I@XVu^@QZ)&3R*MW zdOcsH;HdbmH4CgQSFkHi(*>ipoUbt5nW7$z)0~p{mZ&XK?~81eO65I5k?)!@M_l{L zkhm6U^lMJtK~-swszhhwI6YGv>KN=iM4_=R;OlDOyg2Do8>G-wJeQCpAyN})0f6Z| zD^HHjUqv%LKxao#mJjj1GFbHvt=d7~Oo}AluyZ^;uWVzTDBfL)K!)&RB%GWo$|xr0 z+>3NqigZN*5s@1WaZP(@?cqCHMDtPu5n2nPB+@ETL(zE`iJ%9_zg*zgv_8siQH5a& z%de-f)(-e0{L&j7J1W0H9*&(^(H<%3DkRKK_V}wQI<22f$mpH~cT~edo~sr5T2X&U zI83Xeh(g8p(TFXS)tr(kFE0s9T@-nJK~m^^&JzzFJpJ8c2%eK1XFuScMmDu(zG4Mvnl1JNt<0~x{R>NkM11O7o;a=XLp9_bcT8~oOhQt zte|ZOO6qVjKiT3Bix=EleC^wxhKerwpYY}Vy*2so&yG;$d`gUD@jah~J_g>L@wTZun%A!teoyw0F^1B%jJhoD);g*aotw?5CYA)YNL|n4@c5Wc z?b=k-I2xi0T|}3sOxWOGY>WqYHs~25V2AQq&LA#wzr~qI#^Ds!Wx2%qkJ8RH752}b zrSueoftZ<#2o)*BGS~)=Ez1e1-ds?G6dhhklPO1y;$)ZSuPQ@wm1-X4_lAEL?zF-n zA(}VOnBX)YTgL5vuZony2>zuL`X+0Ut2Bt~uM$pJ!t%H)U5To+Pq0%}&JMj@8e(K~ zlWqK}9zf-MLnZmwgg^D-&$&V%-_k_;odZD{^Y0?r~fM$#CtVq;G5hhkI=iX5%Cat%*ultlS1D4DI-YEr#nrOK3om>L?P4yAig}{OqY-JP?;q?7Gi+Xp1aT$} z^c8uQCnqP87K$Febi%40S1z8xcYf;!$Wz)#kSMGzauoUu+tyb$oYAw4FzD2U?>E<| z+LrfPzdj4y(np*adJGz>#tv@>jx*TF&aD5G0vSi zz$h5FL56`xLHmMXSY@j6=m^?EJ0->IN~JnqG|_rvHRLt1Z8Z$}7LBY6q56@-x+XMQ zg-0H`hQ&+2EyUSce1BmP_Cmm=a?>3b*iBwvaqe}ljU}xjuQu+dOu(pe&BmXn08XdD zjF}k|KX1+1M{-ee=0f{ZMrVB+se)iuV9V8<@|vtq;d=I@P4cjZ9%=c?51~ZWiYLhk z9eOxZdaQYoev>?nkPGp-UnGHy) zPrxS!Ak}-y+MGv0)iR89lXG!uAnTJpc1ouyoxhj3i8L8#C#gGMc8>e3o|i!$5~pl+ zu2u2r$j_))x86p;g1ptr{!y|n!(5i?A%iQ&HwsOLM*}w=3z)7ZiyXWwC8wRp?r zv-r&QTUZTJaY%oOlq_EBMB!664S_z2Q7oGGG%JR_ zgG{E&eKBl2BJ^~NZpKD2*;6#pcO7|EYRy4#Zs>W%&ko;TEEZh<5~}|N3S5}Q~F)#o_!NZd}vbzFk{3WIIZk4`6X zGIY+_qQ@qVIpo?l{cdn5C>`Xu1Dc{%FR{<1rRR*1>5DM3@BDtxEwixQ>a0COB84n!tIAhQX&>V=q&P%kW2;D^orqo4N?F#$5yM*iRe4pI+VBA8GzPGnSsjIt( zb0VkYCcEsRc%||9Bah;NOP9%L>Qh-?bF!fe5!6;guq~XHI*w8sY_@AF^Hk&F)|?~S zT0W~u65^)qsG!(Vl-N@QQm#pI9OU<$c+x8NMIb_WwHP8Tk)a+KuLPBrNQVwK$7!>8 zFHOyTKd!eNQ6`Fs?PEnf3snQvjL=qNEL)aFXllL>YGDZRiMM$A%0*lrY1~}`9l@RZ zf@H85N5_pGEL*^hpXF;j8B;&ea?YuW;B(?T9s52bwq&qB5$s+)(_%`22EZ%W)~c@28Z`g zYmaWRp#9t!V&tMMhgP0C;uSjdPEngccYbgqHv*?Vkz4_}S&(A&NS`?Cqlyqby@Hsyg=D`KEMfBqbHCX=+tf;WHr z@DO`fFTZ%g_p&Ox}Z$!sc{U1ju7TyiQlYNA-o4rdO|U}v@?PDF8C zqM@jut)@k9M!seREtrK&t9?fBHmiiqf_6JZ3*hVy&F9P zQXuuSV75&&1?myHqq}ewr#FqHC4KL4X>9P+#XWrS#Y3#zX*DPaa3X5$51Xt}gcBXcw((kR=(VkO)L zks7IW!9dM>b<{LOKi`Z-P{DAIXSj9J=eXFVPh30Ole$v=n?$+|L{M?g1A_aNC9WqTx zf)z_~AZBZ9>=g*M-@2rX$hB4}P%D811}~b5GL8z_A*@{bo_%EJ6Y=9)o_Z6$=imH3 z`d1h4M|xFa7~3dMn?fhn0#E6nO6ou(p|5w2^{NRCW0;iJD$gE)a_FhoL=g!~V>N!~ zUw#Px=x_cM_EZQ9InoN74qZoG!Lbup$>6Ktf%#a;;rEAo98Gp`s9Vd*r9wv#k6xzDZ62}-1O|cLOJ8tp)-~VTe z!@*&rU>?&?1c~Ls{Ca8#gcQBW0W@?w2tt~h?qV2r47PYG$K zYk6G5wb}!w@)4di+Ut6XzxYGn&ig=1U6`b+4Y4o~%-qOLit*vESooiN8(d1UT)y%E z&Yn9*b&W8>yE{8{7<$`px`*PHtX3`I`bl5K8%|@Tx}n1Zm(SzcV~^bGFP2NJyeAK`Z9CYpM^)9C^X-%zGIgn2N$wSEoYFU7hsQceX*o{@gxsGHr6JRn zY}U0n1;GcPO8cob6;`%_?G15mCtVq>(V)efFYVyb-4U+O7wARuHbKhE`4aS>Dp-@1 z%AkhmxtAutiXw8tgGOD_0^}%r8U`+-+U#>#?@3WHb)!7aOe_31|IrWPaeV`r-=q$7 z^abiI@#DwLzsrTWq7H^NjUi421`Ajf#HH~-~97iyt@x5CX)~fx{v5OtLZoSuJ=&&i*vlCj^aBh`)lYAT#T06ZMvY4_yZ8Le1 zSwK1yeN||pN0A##NAIwaYn?-XkS|m^S*UUnT4$*vwo`esGE6cyR^{}{uyCKR&DSdq zjHp}~!|CF4%Vda^h3`($(vG0OL8;nvgpx1n;L1W>IM1npGO8=`p?(q!i!AUMDfCJT z@y;gCu!kQ`fMaj1>w7el3F_Gd_O2e(o~(7!@huB4_L}&}1Ry%sN^vMV8l}SKVtInu zc!w5B8VAoxkESx%-Q7u#ADoA3E72dnp=~(@$f8d&Co+D)c)t7V-${{}9yl)=TO4PG ziAL@E`xX-KvBCbB1wT53tViFiaJ-n~=8YQ|RT>XmID`G287sK$?8Fo^)dqSOi^P^v zTb4iwmQZwUAI7+&PUUbwXg2`O2>ncv;!36_Dbattut98VbGix_-lkrmLy=?XfpWU#KP(Gt*^f;c&fcp zNiSKs^~R}TWTCfo5<6eeVHZ{){ME+xkK(W`>or=LY>2cHaWqwk{P?v$B^5UatNr%5 zNzp^`9im9KLSzU^@euK0>^;#eJ{2!zghsu$9T8Hhx`r9m91*Q)ITKzyb@uzzU`Wf@ z_KEGbGBxK$E0)kyx~2lGHy%|&@kluAqtnteCv5C=OT6n2buT72f&w zw_~KaA=qnHfy?kri73HL0oL{DGl?6h@G{pUk`VrfL$o|TJJ*xJiz!Nc6)CK6?M^t@ zf&u;V3oqb_Ymei~1DA;l$Y_tw8T{t&eik45=*RHfbI(ytZTDaw4?pxU9)9>Lo_O*} zJo?Ck*q!c?5LUZtvA5UYc(EYpYTKW= zk=M>ER8@nrJXNN&Mx#+t;8xI0P4cye9WYZ07YSg}rk_)s)35N%L+3F2%9Pc>$)6D84+ZYx{K#Nh;r|%lz(%%xrhX)dP$E} z!atZ(LW4T~X)m=7Gxqtk^`&|thBuR%sFAek!}^HLrl}_CSx(vLoVf|-b+u0T8EH&{ z?WjX^X2|o^#thw>xEP#@p|%_REI!0KjC1(Vj=Z83>3)~OOMkjAS74}BaY!H*-nMmi z4_=$@&Y$=;3%t527%_v@Cn7%OCDp))rmzu$w;l}4ifBbzqKcq)rK$o-nwaz%j9T*o z3H57Lg0OkDKyzuqu6Z^zmwr14BE8i5Ulrx*vnI*#)e?G)N$tP+-S0*L-r9Om=X$K# zC2hz`G2KO^1K&EVK8n}q(?y8x@$yB(TH7)^^h+jsH0#Ia8cJk`f5d_p6V#he>Ejeeuv_wSm!lbJ;LqQVIB;L zw6yUX6^ygEc5xSPzH|*3m#dk>7XfXgDKa@oF?&+7Ik-&#hA}4hQr&mHw(r@v zudST!Vc*Q>N6@M#^b^q-2?b9I?PEDQ!CLS8@oP5&sw=@TcU@`_bR8qcqA`q-M*Jml z00|u>2eI@1UU9~s@KrvLE54^PJ8zty;H-9A&J=p7MCXos#QPl7t%qtMF6OFp682xe z!OeriixFWwlz+#5dPBBC@!s!@uy|$Qy#WkfT<;IjAipvah-kx&KN}zfK z1&$F8Um!Ps2`z+@3Q6$to$cXzCEd23(N2oLOW*3^5KrS?OYkgC zm3iI3V}y1g(WP=yPxNVj&PFv6cPhJ`K8Vgvkwdl8NXS={8VD2deYGaK%;F+4^fP}@ zuyV%Z!gL0A^9JK^sIUy`U|w3bf~29~3u{gE>%DZIdiHp%#cI_JJ~2NLQ(s5IxZ|TE z)~9HfbU^jJ#ofb0-n_2Eaj7XQjMlK`i*$Kwn zN+V!ZaibZ*4ug^L7#PEH`79T!WXfX%6B%TJ_ww)l*pCt7#9^2wP%3ZU@LDYvgwqcj z$_jOQWY5W`hD!P8*o>w(N+^FEcal@GU;fHhaOr`|6g`A{KUyyF6aU43h1XuWNk*em zlT1dJ^2|fws+g)UroqZu{LxFV;~)L(&lA7e_k725oJg(jz?t3cCbA=8ZmVf!xLZSRA8PbM|+4o&2*?3tW<@r9$~2!USBy}nba`8 zPrFH}4m)*^XRhqw+WrWy-gXR4y-!&wdDD{K4r`k@6rEvY#&}rW;PsOhKk+~ObNmP2@h&`baR;;6jCyAvkq{GpwwNziY>M`w zzz;92Cw;G-z51Y&zVBVaon+?}HSUi&P*! zHWU`jxoH!z6-}GidDbM3x+Av3r+@V$`227E79*gNVx4$m8wZqZ70+S6O-*q_-RoSW z&n&5h+s2ggnipP-g1qI!my4A=0ImYw`8WR-CReY)uehK}V>3U(Fa69v#P9x#f04Rp zX@f-Wws^|vTr||mWLDwm)i^KO0>X>svpa5d6n)_u4RLF=OIr8_!dj|47F|ts5knD% zI3tofk5RhvMat1}GiNYR*I#`d%YznYE?lBM8D(mW%}8)j4yNyLc=Hx+pB!VgT+(56 z_Ut*FJ$II{@59_KI~McByhsOeC=fjC;2d1J`~bfG9dFyn2$qVI1l;X=%ukM~4`@`6 z=&g3JJ~m(e9?}9|jS=x8aeRFD7(26FJn_Vngc7`Ucn3fEU;lNy`to%cQ!_wEt*^6t zy)u4iF>mqDf9_x6)~m1K$A0AB#^lTd4?b`SH*O!{@OYW-BR#7tqGLgMMFXN-?uKWm ztWSH9QVd(hf&|!LLs-T%NW`$VJVDz{PR*T?Ceimv@`P*m5$0?yN%OvPV)2vz z^#4Iv-MU~_V3;)P10|GLAwb7OpJheCXCP2yRgN_B-qV^j-3kxS6#n(6AHs#&Vq9y{ zX;C+h=q_x}xA(^92ggCS<2g5bxwz6@QwFjj5K#L4{m% z(qagMvGKv7(-zKXoU9C9zkW=}*f@_-LL!V?Q=1Ji#O4Hu*H~#r{wPf__=+AZ6_$~@ zzPBx$;u}1neUC<0uss*(NW3Oo6dn3oHzo9mhn}bvmN!_0UnxUXP>kpztyOaM#L#f? zu&RXYNjfWHD;-m_hai46sxT1|skZGX0$Bxv(^;IWcVLzdQ%XNM`ZbqY{KXHJhCUAC z`^=pm>kQ6=v=Aeao>$b67z{!bU5fXlL}^g}QkyK)C*`*IvaFZ+$)LN>pkE29vsY?WGmgq`g5pYaI!q2C^H<`Q~)=MuKm@&@`ag7 zez-2T8Wa+ZTH*CqUcuMD^BXX3rqp5nSN_U>iZ8zK6;#cXXl@d#vWg5@$m{a74Mk(F z38--QaEX8Q;g90%-WmMmAOBG@{10BefLourL*3zB7wbGZElcoR@M% zaY~WFwx+yl|fSE1Xw}l0A;2)@4dKC4xkd zi9eG%rBx%!Mcm5QRU`DaMk`eKWfq0jceA!Gd`txopG_CTSUM5|$JrTNHdE~HUx7Nl z4Y%y%ehoS`#iw3;1&2<-3OAY86&t7lox5Jkr1a~>%k`dUhxJ3h>m54F99yk6I5894T(x*$JVvAEfLcmTKtuZ#p1ib!OJjw* z3xigg%JLdT8jal4d++%QhKzd3WS@1*Il8{Z!R{Uoc6ZmtH`er2D*p}-PFH-V$lud} zX=P`w3A?GadyY>Ol|t8w^&8b+l}9&@gE1rQpE-+Q78-4m>{wWXH3@2sVOt`c7avn3 zaZyN0Z>%bxYA{N<4TG`OfcQ*+IUpZJafBjj`AdIYa$JYIR7py5G9HtI3!EMyCSENQ zC3P!u#)!dJb898N>$1ezZn=zH3i~InROcQMQ1D$4lZ0e*eOFhZBsZ<2? zS`d#~r7x$M8P}`wi)$86afsfB-!a^%WU%(5B>_CX84LY=n$DW1K5f`KXa4DNn~<0Dw0WKlBz2bwW2~DyB#UY zq32Zzl*-C#;_3O=G#LK0LbFXCtd(?uPmNeHm9d!5F^e*lGQMH*``%Gwqo)5j<)$3> za=h@x7nnd$LhrWgaQn_3%Bv0vy|U?_xN-&G^}%mJzoq}MeBD00g*$g{qwiaW;5th@ zWS!`;tFnGr@)rr?r(08Bvyq0}`>kJbt%gJND_{8v9)0Xln5w~V|MsWwKmF~0NK_xz z8?JLXNqKmnx4em+6h|XvebiECdTv)Hhs7z-n718n9nbOCe)6y5cYgm<*x#Mt-S2oC zc1DIGMTYijk!&9BH~u$M3elHsDHt`1=h#BP5gr>~ITDw~Ray@QN1eygYuL(QIhx{y zm4cm2nTrh?##dak@W4Ye;G5s{0P21vP8&N=RBuq3r5G6Yg+Q%q`0&4~N(*&~c>P*Y zxBvB5>D_f)ci7mo>QlI%V|A0=Uf5DEx&wu9!f7;JnQnAr|GN+W2AjW#EKfjGn&#+6m9f4vp+_y zrD^)qUF?}tZ_-d2%hweaM1r72lqE`~x=i$zz9I}U(WNVlCKK{?sUYi>VAgq1f^{N_ zY|WGAb)?+jj%j)@R6>_t3FTj8>n*+_o>_>+IB%yDft@0fHx{$kx*|_Hrkb?;XTpEM zqUn@m&c!A5OXQHp}JhwF+AgRUFae}@*iUY!pZ zrJ<{uy7a?A+>9EoHKc>H;wG_VgZ*pW$7!?Zx+e5i*l;&*-okR(QqeseWOoklpp`tfFZ7qf`z!76m;S=Pg^PPT zlos8#-r>1l=MiGyYRQ{kPDzo|*uAyVm?la>Iu1q6y)=?_k{mtM-`WjUAzIVxG>g74^B0YL&*( z%HkJ3^o#VqO9#7n`0@pcZhK1kU=srGhrJXLvP@t);d64T%C*AM*aCy5Q)(CiSu#s!y?1is#N9$F4nsy}cO; zB)#(TYxuoSK8sghyPgD;%4a1&qf?lTckzi&d=kg=Im}4o%9YFbz0W_#x+0#Vy|TZ9 z<~6PtC$$mF01#goTFOsv3-eTj(w0df8Mjzm539M}#dEi=Yw^sn(sBL& z689fKmR;w4ApSe&+oTevX0L%acNCXlLAixNbpcqVv_Ft4JOQs|`P_qAW+3PI_ zZPm(@%WGBbmbc2As&*Z=)^^#l$&x9-vL!2+padw807!r!@(eJ!r*rpru_ROI7p9zJkQE9PjTywvdk=3xc)$ytrb z;v8gHP;AaYVLN9yG_;BaL|OBP101Sv7z>>VEx%%jubW`lRTH9n^);#p*;NuOH0Bsfr7_`7kZW2}@;&6!3E!`$9hDkIWj1Ck66%CDHEL0RFPZImhJT42U!3d-M9$b=Q zB-H{%fC8aKc9uTNf0y3?Cd)`1r`7Gy&-&M|i)tF-?q~re?F{M640Nl7{%}Z!i_m3; zMX6{GBrl=-gHRRCgJh|eXiU@QEXFy;L#CuB?oTUDS>|8-xs+wWnkY&mw-#Bqi`(uv zhrRth5((2jDy>*J>eOwwQCV$odyD2m{QI&j#q!b;!GHaCR#k!R&22`pMP)%27D|!? zV>tZGpZy%Br_!2fdSbuwCar9V_4Rdz5)%r^QPbGCn#RN3l=fXdsaeC8Mr2v$GS#n- z1b8@^1M-V!nKp;P6Hh!rlq0DiP#8YUqFyRfR9ehh3I3bE`Cs7&KJWo_6N8qr$W$qs z(<#)R zdw%^=NltOL>7PfIwJ4MJ>sAproHz$zX4PUSncYDMUDafK>+fHhrm*7?Qdgo=jYWqh z;qtN2{yQ|@6M6pb#deCaGyu8vQWHlsmbdkln>u9MxzIhjL!s_n=U+V)hF|WSTb4Yo(Qc zaaL_{@Y7cS0Du5VL_t*37LnXm1(;Y>Xd*)S^N!ojN>D5t+37|nP#l!DFs%e;W|lu( zrx*KR?gVG#6wzL39~Dp2`!2|MORW4eiARs4S&E1#Oa& z7hvu;MJ$!E@gIywvM}qiOi>ifoH|iXbD~roJgTKB?%`m7-hL0gUXLg!_O^G4+I=({ zVz9T1e9)(c$&Llg>>?Ryf76*VzQk}a0_NtidhdN$SzW@^>^xdoTS{!8$b4pI8n>J| zh55NT%r4B+P-0$H^nLkwOi{i!j#I5RX69#jF22%IwSyRAjg_T=QC;k_gMEzi0;L+kRb$HHG)nxLANnxvebQOa`Q9+c zXv|=ML4Q~yt0b`Y0VK!{l>$X}b)#7o$P!I+Qa(Uz`yi1BQ5}xW6_~drrc;NuE~zx+ zR7oYkx6Ue*{T^CVK&RWHfSJxa$vEp0gc^~WO7EEGeU=?@M91N-?W27imSq<^PC4m|n5Ju0Urk051pMXo2`k?>)rKQE1{##N;C;Uo2s=7AV z$MF-VsBBcyD`knr0lX#DWe=Ls zkW|};OFk~KKC&_ia;kw_bkrP`gi6V}9(>P(NQiOQ5T6s!5XyIme&2s-Ux*qhVwzhZ z9S>z4O@~tnNFbS&DkY2zX`gygH`juu6Ud+&ZLe(G~S z4qc8=j{7jGgi~V_d7lO#$3D;?az%wRC(q#j{}+D(U;3qAW`vspn)$w8a+fcyQD3gi z3(QSV!^n_U?4!g%+M1iqHDq(yAvM(>?L3uvs3fKj@H?r2B~ zg>kIPjxf_2;C**2;iW6r(Q^#&Al8M3Zh!N8?F37=s-fIkMRZY`CIs5Z49@^{K#IRM zxv1kGJoh%jBUhxafmqi+ihhETD7qe^IRc^@ z5G(Qj<>y6JXHVg$RO5Ax3E&WQN>&q*Z!$aLm+LMB?_P?dR7E{ZDK*V1aO6vhtEj$d zqhk$}(u9_<91R*H8C^M)_euoe7eS@aH3=3@-3D~#pzMg$#OO*$GL)y1{mOI)Gr=Yg zkpckyt@qr_KxE>$Cs`I$%{D;H!evD;Q-6Pos*DS|h9rv7(l$B+)oiLp8u)|pYQ0XV ze?<9B~#&x5~szy^7tEn^{?UN$}-0LH{gmPV8__q-a(ZQ3619W07GLH z%FT=T_#^MduYUPXeEa!VBSm*O(yWCW>-%(n5}jafYFaS*JSe6Rl==9(iN#q>LSPO- zG=*F1s3TCU*MHaX&v5@J^wAo5Hj68J`&gKoK~?U^s1%o7Ou2*izhw!({zobL4$^yY zP`0K$tdE0PR717Bk=ePZHDAId%q^v2P*z8~!MKNVRd5>CHlXWDML<(xNBHv}e+Yl} zp>vqcHvpHznw+7%P5YO<%mdYaY`n)u(_CZT_W9=*S_IBucRxom8jopF8e44*+R0{0%CKj2$25*cjXL0J( zDLnH24=~Eb)dnsg?flQY!QI;0sz1Nc>pBW{d-EryCW3;=t8V1g6Yr&1MPSe)c398TzOq-ifd6x4<0F@ZzZ=(s$oQjEq0cJ}w-R9Xklq?~B6y1al^Ck6Ue z`ie&t5T!Ss2-S_^8?nC8M>6UUV-r23S@b}_q<{xMil(DhRI@~0Nv5F8OXHaB>{n}>~6DNDfx@-Z1_-Of;r9BDToz<4c} zo792T=t5ms0vUg%Egx~qwdt6`S3*Or8?+%R(!-;1zrCSdUtbsf3B=(wCX=j)f9Dte z&fmex+#)%{QclUZ$Z*2_to{I_;RrUg-=(f!DO%C<=iQ@$0r1C}s1ve2$IE!v)itcz(F$H~;?`%pISWDS0&5YQ(X#9oy z?do8peg8b~ck@D^1`EE)>UCb}g}e}B(SyGu@8p~7fX;rtuh(EVPYDG(fp zJnX2cbJuuI1(6y zs*+!2QDKx97#3sF<0hw&CKI2`evJD>9S&!|SCDp?7DdX4wu)2i zalrMc_g9u*4rq6}Y=rB2U38LVRpI5AUnIMck4Gqqkq}tArsf%4%ks)HW@cvSJGZtr zh5U5Vup490o$7>l@$-=nagEK;)Vm4Kqn7Ix7H0YUgWqwpqaNzjjRLz&5^GXS#{ez7 zcw?L8mcyM8Qc!~&?>IAuJ5S6J`lzz{rlE9fqJPf;G)&A|#DGLzWn?4ZL(7mYsfH?D z11V%skaWzcQ<>qy^&OhKu_C%gYrJZ_Zj$BQl%A%!MwOJmbeKisxwB9lS zeG}vU|JtENgimX1Z|{(uX|9KG>UZ67I}Jn9rq*G0rb!PEvN0s%og~s%<9VF*t={tq z5#>o^$44?QLdoiIyNQw^(l6(bD2wB(%ha?=U}8Gz4`UrRE?=hR$ zml6kDh2A27CtO$uis|}Ew96Hkn(m;wS|LqLY}Bg;=>E5U_i=pSq4!X^)tC$@q@1CH zK(EHa{4$OozZFH5A|Dp`qd)$JKvF7{oN5BoSsvdC1CK4gD!p z_(n@}-O-3ZRMgYlGf}3Iq39s1k0h?rlhP;T=Qw>oJUV!Z>pq@Hcth+)~=Hr6yfvFUse70YI1?hN?9%@J)nHiuI z2bCPypqkQlCSl`fH6ILl!|DXx&MdO&HgOyI^xU8G{_YNX`&$ItrD^hR2TKdfgfz+X zA@#G8pa>V715zAjf8@+nhfPp zhr(pk&&E+ku~fwl#Hz+jl#74bMxUb)m}XU0IJUBamtVaU8_KTHm&pr@-+lZK@P(iG zNvyOw$XZ>2N*ffg^eh(^7O}E&0#3D|%``4wUBe&$$u|jjLb-~mQG1mwvAD7vqln>n zBxpbv??-d}sz5O4>b%i;$6*WRLeu&X)Y%>Qzd&cABuFcj-$kKeFLW1cV`MA?l+vmL6rQ zl;wfT3Z)O$gn7aT!er)ECBZ?8#Cij-xge_82wH9N`>(!%YgaDeZL?jRI=+n6*)CH5 z9mbKf^q(E%7phVb+eBd1=7w>}Ty)Kg)0)65{$5PabTBtRjpL^DIh# z9c9HV#bjry#`9zcYZFal3tUDq$j5$NS4jH39!A37;B& z*D0L}7A%k6MrsOv1F!sR&|P!s$OjBJ zqeQH`Os;koPNuhQ712lGXAlz_a>0B&Cd%p5S)4q58o<+$V@yS{A4T&b{eB<6@mv21 zKmUc#!k96Jdw!i~G*R7|n#Qq}RXC!s�I3`0qb~{r(79dx{-x7bZs^Y#0_u1=ggp}`GO)Va>^@;u-@O@9}6IrL+)k3AXJOb`DEwm!! zMvGcxj(cvI!|`d2S4QJVLw=M^prB44Gw zk^M%_GNBrK;7DU0y?VU|;LB({CO){Ab}U}r*n@t37p;uPUc=Nst%0&J^^MTtV17K? zz@fXQOfNYw>7~+M&yb?RO1X{y^)G!2x0{0QrPX8PszjMgkYp z4AJ^Pe0F9Et0#{^*&Wz?Oj`br#Hw~7WObb!}ifh!snzJj^#J@re<+6o z?C3RXjd`QH{avpJGPAoTZ4zAG?sVZQ9uTM_!fd?MIzy8<1Mv+$t zm{p^Enx-lK%~!sP2jBg6Jov!dV5XOWaf#XKY3lbWlTwp5FM%W12{7?Qbbf@Q$ zSN!csT4$`Sq=WaqbCNy zKQa*?LQq}}sKJ;WOwOB(wZR)>gIBi)xO=681l!D0M>7Cpoa~P9zPpxj;h9YoNRD8N zm1pKe@>U`Vtqp6x48~N-^FPiD&IIP>X0W)hh%&Fx>km*Bj!J8qsL%cNWsRq=V!Nv4 zqw;D@BLU3Y=d&wC2qS_})B7C;a196mdKfNdBdP(vwM?V$f-_;+igpUL z<}%DK&cXI}khp^9Xo%Y{#f?FVS9V#Vnn+!pR3LmrFEDo3n061gsYw$WqR`;Tl4=6L zNWWRCs-B=vxVas;nJ9;}!f?!E86 zy5teb#2`$w%x1|-ScZEFKDyS1S!zQ=N-G)4AtB;Ors(2JA9_*E&Kop(C1h~@3#$Ho z$e%Va9MTXrQ zP)flStz{yfwDNKy`lB35T{2-GO35@DMGANL{{z0hW)#)J)G6=Oz%U=7-ASQMg){@< z+$*EGN9-KLOf@os=xm{no zE~GjtPd*Y&*#*-0f*#vCh{iJ6~6_bJ?)04uMYyh4Myih@>@1yXI?VW1McyfMU3 zw@9c+C6Ln@yM2H$`6}s8 zLoY`fK!&)y!mHAUUv&Q9^Cxl#@Ze@3uC0utDvJ#);{F~vLr$O+hGM^-Y-#byPkxN1 z?)_FMZ&Gy%S(0vNe6M_#qB2}~{W8j``mSF#xO({tK}CJ~)9G|%c*jnBNH{9t`e$gQ z#l3gmfyayzF`-b04YhHGWCkj75}axvI{2dNzC~>~4lbmI$P@y#0*gkwhMHMbaMLul zQdNVZSZLr2Tf$^%Oex73&_;K}tP5$F^QY8}(V&qL^#~>;TXobfhfL819dF{L(<<^s zX;FIg4v2sRvDjbFFOb%_4XLV;hO0gBP{AsXsG-($TauJ(;FZPV$^uI7>{SJwXtX+Q z?C)?r)!Ljyr{WpNpQ`cM5J{?)(z z3hT#fJ(0*#FHRerzi=6U>u>!c9)8ce@samGh}C1Ov^I9OH}S_$K7q#`dyFGql}H~( z5uJq-s_-mJ@Drc=Eb*H8>mQBB*xT((re$l{&^U>X2^f=`C>1Hu6ju}b&`oqelu%-D zHgs)xRLxq;?B|dNE z>QZ4in~Jg|Lofa}9rgJA&8bR%KXS^BXm&=c2-I;6+8i6ikrYF$O@v0M7|o->S`Y$; zdKOd8D-tmZZ(5^R+JqzU$N(269zFW3jP7YGYf;ugoLgGefD_!bTC+iI zZp7v>_KUz;YVy739q+)@)HHg%ab0JOHI775Ln&1qHQpE|m1C5MkDA0N z(%D*8kCUErjA(phrd5Kug?ZBF@V~mdw}*bO7Z;9cq|&reNUwyQv<+wvxI@pvN0%-% z*+dJ)B!mq?+$SIG`2Z-@I%W!f{S-7k57y&ph*pJ6$KSU?w{Qcq zcx`J3x6gOr@_i&eYBg+lZ=F;4i+}2Uc=4bA9xnGK7>dtJO)LNo5*Q|trU_cD4o2fa zy?2!*?mWK^(^i6Z)?!4U+y{TXPEB}$NMEBO5>7Ry$Qul4@hy#Y83Q?4uWk~J%nn7Q zUA30)F%u=EP+BL+SU08tQYz-T5~*K5G5}?WQiLf$R8+DQ>h?zP8_H3Kl6cpIU)R^+ zn(vdtxWMLNW8YUbrCY<`AC3JWYmhTf2+3EihE=e~!ky`4}fLxiuw z!!(#%T*kuUBHHaXP80=RJAWSQH*Qd~D?F$e(S;$miA?N>G8Qd*E~ROqL-5YdN5SXr zI7e?W#OOPwd%U!=OtU*l!eu^hl-IAX)r)2F!s(BXJ@OHXv`m9T>ZoHl9&+F8h*Fpn zI+=qL<;`n+(<79zoNMTbG}e>Thwe`}%EF;H=u_|O)1Uenp8w9v46Qv-7E+Lgz7r-1 zf&vfFN)4s|M8{7M?>gdzJRk3b|I$x=0YCQ1kD)^mfk8eTw zM2aec4x))Xe7lagF9Ot%Y&)IRbp!Q7Po8qpC{QqsdZ?^GTTozyH!Ci9u?fOYo&2lll`DUIq5yI|sAYW?wwdp;DQ3l)1TeFsKbHPL*BK;eCMpM+^&We?^E zjqh-g%Q|$S>_6S@1jC@Uk$=_IC$C)!{D81}8>NK%_TeamUQ`sy39FW7k)oiZt5m7}O~Aty{}6Vj{#7{lx$#I@ecQP4%J#$c5w zw-H1(g(Y=AN$(RDX|h=cSL7J?2WV9WC~|TZl`NuWsqCOC9G803L<8S777!We$`XCc z%E~gAiOQ0wKk}l$Xt+li2+?j=TuQS}(O}@*!XjFv2B$I1OER*z+;%%P@%vkwaek$q z%C}S|nVp+uX;9$tRHdmluGdM~L)lu|owmi5><-2?U}nQ~rMsb3*r`tcVSQN`&577bRVj^YOBv!)gax z^U+nXD73*^(Z<@);7mIqZ3`d}9uCk_TX^){XYu&?wOamHf{#OoJPV$0(wLr_rhY-% zZlRqf)DtpFjBr_z-68sO$u#}?^jVj&tPP?Yr5-EPCYUI9v5=C_c-mQl=~NMIyf;_7 zdqa{oD1_Qwxw?)K=ct^+%JdW#XFC|#3Twk0qr#sb8+t<-CJzDofVC_}1l#tnfYAz1 zB{S(nsXY+72D_nsMp~5&y$|ctxrtI#$h0SE`m!iv*Wpz``>m4s=*$C6ENx%LX?MI;XjxW=9Eg>Tu(Q8MR3>>@;OyBkfruPuSC%lfyg<|Q zK6UgC#Ha0p-af9q_8R{1Kl~=_UXK_FQu4qimZOw3Pjxf%2o3#a`U*XfMw%?e+wQ)b z`b0N2H)vpPc7C2TL9So9hP|B~A_}Aupkr+m?>uJb7KmPYw|9d!_HZ;JXEQxLhp8^> zCQvWNKX_-6rd&hrv^%sJ*#I4(!lxCXqzw4IX_nDu^1nA4jySa@J5&*qCgIf$*IA8H z*toHY;eL;C-UOntkps-A1Rwn12XN2bXCv!w(`0y~Tve8y-KZF2FdRzs>n6!J8vqNu zDXZ0iqe&sI4^O5aLNgt5AeP|Ke%B1fBW&&L;>6N1{M;A+JPAWR_3iIQ=fNLXmmtFV zDQg;(=|*VwWjGGNIM4C3Km7&#?Js_wKn5*Ae{I~jipL*+9Q(aKD%Fka$_i!bL@81= z+98uS;7#+h8q(gl2bMDs2Xmy~A?M*(uGXJRUw85PRuAWHn@8a`c(9PttOCmI;=y|t zk^QQ}kRYj*$k4ud<``h6En=Y#8EBpLCqx!%kDRh62kZaMXz4Y&iEH+DBFi#lX$#$~ zjZUhuGTWx?ukyjG-le8}<)dbCJas(6>uM%V@%CfKaB_YMyJe0SclYu7);Rmt2NCu=BF*@=BBW?xQNpyPvQ8f+wkQ19RKp){s-u6jtO9OMQB9IaH|scz4a}` zE^J5pL_^gD@&6I=)(%;Xk>wCCzefM_&R<|W1)8E}EfhF%XyY042nTp;= z#E-i;P&eq0=s^}`jyrF^jfaFpAA}&e-T_gGCB>P0?#Dm)$Nv>MbF0LfWi?R)nPXMLx#H#toFVgbo9E5~+C;F+J15 zpZole(_g#;(}6|W)irP*FqF2!`i=F7%nO1zbt)_i&jKYm7mn_2g$ZG$n;&lC|9QGL zpt_nR4tI#jz}lMP!awWFm#(0jb}&0Pk1zh*7w}8}-G7hk*EWc*gGy|oGS@Uzs!@7~ zjp&lsN2{&z+0T3if90n>hs0G#Duf95&sbPR`|Zu8aPUd;%tA( zUYW(DY08tHR8OkDMDM zpe$X@V4pNiX&<%>FxNInkt1;iiOq3lWf~46tnHVKTbSr1GPdrPrCFSw)tIt9+5@*t zW!Tx%*eV?9r-bql0WK=X`%(l8wF>ORoINF(66C4OO_V^(R+uRVxO2IKAN}A5@Rkz` znCWUHX+@<{Eo!1psR%eC+apS=0j_IOBO+lANlP*epZyXt%wVbm>d%-|Q=q6j$MLIw z>&py}wyx29;o9r!${bD}pMo2$1H(N5c4&62n)|<@SL3n-=(NZI(Zs%)#`;=6KJ$cI z-N5$72CAZf&03h5>e2<~wwgd9Ns>iZwzIo~;b=_Fusd(R1@p60*dL7J;2b?D4Vv`6 zd}$3YUAY0Zuz>x-A|D$v0%d7w7R0CC)a&zqRu%os2GOC)#nMV3&qXLN1*Y48Usguo zvkB&ppMvs^FfZu_QL<|^3F#Q0PHRH~`GK_5X&w^ucY{TwBrTNd_uqK#jI)CQMXYE_wP)rLYz9l3}E!KVKQJqTP7N z7O4Y<5;Y?#IaXmRuIz-fK%+TdJCI7876%^ZUw<99-g+y}oLa#@{N-Q7S0DS2c>L>s zh|5>k>0DXZQAdq$4A9;^Hcc54}KJ%|M4Hg-Dl5WDl^b!PEM+*a{R#`K7nVR z{VpoB!Nc}dyc(kd?^WFtC(1x8&s{j6ybVb!VQrR-z82_lwGMcr#Nj`{8~&omS?Ave zUB==dKPi_X6%w1k>XM%Mm41e2ujF{oYD#=qG{7f}t}VTfKmERQc;(A~f*c(w=^XMO z@heyvucFaHo>SlkI)mwJT$t8138M=4#IkI%Mh&R(7A?zshTTD$v?rZTm|Lrib`PTGwk(q79azFh-x$uS+alpX9srOz{6#0>99MN*87s@-G>9m1lVVg^T|vI*{-`;}LTh zP0h^G-xs{GzqT%omqvp!logc0?%qD8PEHdt<@U3;PrdM2%J;tA)b1o_Y!o zeCiVz_50YbMof)uxn}1|ONeFqds(uOlnV8T#>+a^&5}-p=FHYYMA?ocj#JpWo;YpI@X2L zJ!<~y5S5k#eE8l~{G%@qq0kZDubXF%^+2zSfCDET-y;)FhX^Ja2lH8qwA`nOc^^s7 z130$01l3J&e(fr(>afOoqyHBIsU~EW$u0x+J%fjQwltpscg-o>I^);>76-UR$tAgj z1IZ|v;p)9>g_>T49v@{rS~5xq(1-hg%2jZwYxHV6L^IOwkC3#ciLjN_DNP1dqH2fAso&uB)pG_uv0kJoD_gxO_rQ;FP_+k93gW>hmw* zqo4gOcJ}*dLz4yuYmb1{14C5ZxT#IU>cZf6RU11KA)F24CU0uBS`2Tk3{43WuaSIi zBBYTt{{hxI_lqD+Ge)yGiH@|9I*q)lb3-t$dzO*S_YITva}5fkFeJcG+&8xt1Q_mhLl*!eJVPX zs%{QDM-$_t;V3$fXn6cNwG!$Ry!ZZhpvtSs0mG09DlSK?-B_b3_NH!1(}B34y%F^m z!gRWeQ|it0JcrxAe8pQIQ-zB#nQ~2yh9>72UotL1J zSya{dO<`i;aat#n>ZlbMss8UjJ7uvcNSQS+a->?QjTA%Gy~Aj?4cQEkMCKfeHqt%+i(6w=Fb_)Hi0S7>ahj~-&&`AuEsz5i<=uWc)ZK@NfR*9Xg zO__{~0kffsg^xsO9kn__cF71jzqbpGsl#!OGzkf zqr4>ZTvEPPF*^%TUBxUDKJ%~zeFp7cqIXnGPvi{BF|f8dB*|H-LKr8N4Ml=>E3*k6 zddHneF$BsH?6^mtKs5np>d?^JqUkm;)8^R_sw5b^c77X|uk1V)o^cZRg!~u0{PN3q z`#ax>>8UnmW@p)8mUaR$(_HwwyL+@))5PF`cixX*`O252ZfWGAI6_LS!*{;*9C7%! z(+q{>(iI8LOKCUMdsSmVFATA`hAgEM`Pl|$tIq6?jNT`I7_Q*p+P_)guNugoxcU6U z^v0-Q|=3fMb&XvODjH0oZ#MBk&4c{m#botoVi6E`trlto_P!ugBXUs}S+ zW5=+(IFA!2=5Y4hBcv~_M26IVM!VDIxeDiy>I%js48heE)%4#ue;!}^{jcNd)pcp3 zTUaqThmV_Gv6Qo@9I4{4=43W`^N(pR6Tcp*r(@j2zq&>bgvt9DDa4tA#h%i5bq6@J zy1-2;RYjv|)CfQG>4)%pfBY(5+RX_$ZyS_1M=jMiZEt9)-ho!8pes$r*(3#= z$>7Egy42J!ti+kc3{EOAR6-N`#w9f)&yD+!-rX;D+GME9y&FVyh&F>MoY5j}8|qg~ zj~<+fOp?f8w%lYn2NMQ|Ef1yAtPrUpddHF=Nv3h(wKd8*Djzs{TV+yO)BF8(eDvcF zD!o;`s3u+;Yol+;{)Icc3S)}b5~Bm(3e zKt3KLAC721%4Z^}F*tSdgq&~CE*CrsEr5zRx{C1##b_krN)GLIi;-}s1vlIQM@D5F ziQa>Fl$QIUb?tmyLr}|qet*zsmBRyu6~r!FqOA=+`H@EmaNq(_B5HaHB^R-=y+y$T z)!7Rh$uC1QIOLGl)zl;2=$5vkthR~)$<1)|AtmQYd=6aQq*0wvwG6c7jZi0wLAi+l zfMLK4;<$GG2G(wDU~YCAt1F8*ec}YlZkG{08KV*NaTUv2wi;78U|i%_-`T($ufL9G zp7|E`_j_cdDpv~niF;do9)T#&O+3T+|IO!W4`*VwA@O?1%F1(TlI;=lOyt86pGOsk z=fWFBiisVb1;(a@mwN*|P#CPHtOd@^R8qjktqUW(`|ed-e0m=vn{(!L;OS{Gf)pUo zd{THeM#Q;rVBSGRemjQk*jSn9i6}OA$p1 zsFbxg9H5tvXcmh;TK3|4KgaH{r20PjQ2&6^4AyjUd2I(ux0r^A`d~yDf+S^RX{5sm zfD#?FPHL={e^z;`iw)JmrETC;W{I6#w^HhdA`yKN6*iS1yJY@|%vOkU6I(;|LKLTk z^w1en7KC?bX&aT9!D|;@6GE}&y0lHccG1XC z+Tg+)Z(x3YjyB*Ie){ux<&{@49F#C({+Ofs zvTShn_|%=)IvI(jiSRCADMo?K1$0JL)eT(IaTxT`zkUU+{tg*;hVXJ`q=Hnau%CwP zOSy(XQe(D&wwYBljQ8XdPvY*k-G$R9Pm;6nU)SsRXqMu}+I1=oDIrrdPGN6r3opO? z67IP3Em%3WB7JKRE zAuaSo@i1sRqmr9*4P6v(4yp>h{T_e95#=IpUViL{e;7aT!G};+NG4-Te}BfK5w^BB zC+pT069zFFKaqkhE&o|b@5@=9ec(;uCXHhJ3+wa~TBj;G*dUsgk^mSCs;H;05nW7Z zn@J`D7fXK=jz`oFi=xDht!-@XY~z(zU&ZwF49U>W&d!p|ff57Z9iV^4JG(o$zIKC3 zxZY_L#Q2&}6d$JFlo&$Js_Wu@(+RkV#xsO&Tg9HE3e(0BrB%rvEM(2kKB6;7COq?W^CjYURm44#r0j3Nn2`(qBEW(gx>u2rET1K><)A=LefgH zt6O;C>JEPYg*Q-T(~`VenfIv3fg~x@&1+eUJ~~fZEo7}W6ngBCi66`0o7Z-6ZG8vF zy9%f04Hl*oOm$nBYBO^{7+>IZz(b7epcq)9r9?&a>uE-IL1iw4wgg=9ufe!NzW@eR zik-pp_}VkCu^fftd*4np20PpM==&eU+s>VU-8v7|--qh$$@2{?AzU#iTmjok(OsBk zb;ioCrwmU&a~0oy>H1@X(RdWraB|Ry{Ki)wd+NRi-u*+pK_9cTGi2D343nr7Y7+6{ z9Tz1%++*z}+;!)jxcjcV@zgWVOX;VkQchitjTfK8Z+_<+FuFzM9*X94W5z`E0*G0* zEChj4)fyjE(=%Mg5(fsVlL!8pgb)SBR!CKaPL|OoE<-wKGUSTOPOM}Xz$`V1T`0S$ z3CdX)(NNES=Q~ucT3lG565jgy4eV@flg>#+Jr7>oMV=$cQrx(9otmuYhG#K1KTpPY zbA25f8=LHKN=xWB=vs2N%Ey$Y&CbjqYcP}^G_Pe*VqUOjo$RwuUpepP}>f zQAe}MN?B5oANhIySYBMjU;X)?r-83%+Z*2(T*>b44i6-nx|!Nk*J~beu+TR87me4O z9Za@oIL#IZOCm&$WqzC4)jvl<&mnp)Qj_ z&|DU6JbQ73ciwpd?R1MW2qi;iSv$e~cP`_Bv-9}vmwR-5i8o?O6Hxx}sRcTJX1%sS zf>?f$=9i?u5f1L-S!wdKv#EPz`tTjp2Dt^Ed3^&(nvs&C(H7f-0#7{q3M$h^YNn7^ zVHh-(+hp;=+72Fn^*U}lHV>zBeCLe|c;VV6#_1eg6ND@g;pML~VTKIO#2Im#2Ras? zf(IW!mGU!wl}6=Ik7#YR+xO_43MLL3$rImSS-Sd@zp_m zmFnV8o_-3WT%T;-{zDGr*Z%ESe(A6O!Y_QO-JU|4wkQ(YAB>8EL|fMm6rTT)b|im_=0x+*gy9gR=#tk71B%OwDIQSxN}*OsfUAe|V!SfeibI z>g88nBTA5bG{VOE7RJLqoe%Xe#A&v&lpT~-*xB5|Gut~@SeT>p9}Ndo;-aXyWcmev z-ANGD0<&K3ZtvjO>S{F3lUoxs0bj_>2`Yx|ddKzpg$pRl0;f)%qI;W|lotAEI3lOR zQF(1myaP7M;5`q#3%8zHrbeSZ#2qBPo61t8?(fIAA>?%w3M)3zgU-HFWRxqRK@J8v zKE3!Ef~|9ooM2fMNGis;PBIf9@&Q#6Nh7Ed#tnHy$>;zO!AiL#a8`OpQdW|&ism`@ zYZMtlf{04#!n&GzW`eQ7)m(B=li&v@5cTl?wlM(7B^FENL?1jAVNOWvsEgBF`*}Gg zaUtff6SnNKproXzoD9KnbbpAGT{qUYEUMgP*wzbp_4*hqx3qzzh-<4VMwn0c@cB=@ z1K+yzBsTh*q-6t&kNYV#HfqY-0w%-kGuZSeT(Yxw$wYs42v%(~eWvH2RS zPy%MqrgS7;s@Madu`mMf&O0H2B2Ae=^s%M7KqXp!$tQJPdZ^Bnaih$W%2 z5nuvw=1SoZvQ7nZdvDVyZwyE})b~LZ1W&FM>mOOmplOq6*eb_#rt#>9-i>l^lLW); zU>BLI4h1B1CaHlwwgR+LSXI!qRO1Za{PvaoH!fX!ck}lTIS_wrZ*ODu#1sXXiwld` z*tv+d|IkCNGXDP?>o)-VR^o?x`#ax(nfY0acKYNnYUGZERuw$}is+ow>=nT(Xnj#x z3QXEnxdHj7pmwIBXuX}_TRM2NYvhlP^aWw?tyxwK{SF;@jZ~aWKk5h(fyq*uUcP$u zDrFAdfFJb^LTmU8AWO*@*{UFiGe0-a^0R(1_y`xxg%s{8m;HPecp{;kir8=uo=q6-GA-+8djE9Fg-U*QMCWAUcWESqY2gvr3c`{ANmmWO~b#3vYr-oPXAip z+CqOcym>I~4*%6k|06cgtER}$_kj|aINu=6D>d6APoW8wZQL_w5A;szpYt14R2>a< zc!yAGV+mp__nPKjlot49p!Tl6*Sdpy<;}8>kO5aDt$b8(=w>LjLXtx$j|L1_nQM_| z3L&JRYXf-nS=5ESP_j;v1SJ}@_D(ps#fg*lqDru93zT_D8@r>YpmZ67NScOiu-Q75 zV$6clbeA$?q)(&>ed)6h<5C*6RFYYeYHOk+2^THF)X-imbdcuY84>R=OH+|C4=lJa zC5&x}M6I1n^L>VKgbk7qLjt3e?z9eapPMPqyIP z(iBOS(NuS9dP?f5Ou=qdgU?^{TzUP{k!zp0_eCBvj*c{j-khYaLDJNTni@~KAH~Vk z{6mr8*38T-jvqgP)z#yKtT0AX(@qH#I1n5fpE{>qblcOInVQDj%q(SPQ{65)oi4#c zTkSSQk3NFYT5#%;zWPWlk&LUVsBfMOkiVx4SA}3gw6qoljM%9$iVugUskf@r@*YT-$-$GF4 z@Tp22b|BH=JJ&dGC5nG022F?haZSgfEHa@IF+%-8FTdw}=ItYUpJCJf@#on_(wc)w zW{C?7E@ABqg4k-ZA2w)f0C+%$znBD7lES4en5>OlYnpv8Pe4d zsc3KaQl?C{()@A;+GSXrpTQUZ(w~E>`cS4M>271kle*Nj0dkaX3^h9|nIW@^*fPU& z&t5}sT>j3%XMAJ^^35mz@c$?V!!IS>DJ(3`AN|Y^;hTT-G|s6C;jyp4oiL5 z=sF`YnhY=~l@C&<%&u9{kQ74|nfFS{8CDa8PQ`6Iw zL3k&iMPqvH+I1@3K{hJY>rIMQdx{!sL;;;Bx-Vrpjnpu`iFcePAEQu)%1ULCN5L+w z9W~8Poj%Q_y;3IBRRq=0(aKsh+_zdpLDsh3fiY!dbMwqKfVM#s&!iks)k21}k*j01gqsake1YN5u^~(!nlE zGQ7>E`=lleXP+&TFhb7OeX8c~Tag{88clR7K|(;GxyCS|4NZLKCJ;%@J`EZoSU_E3 zAmKBs7^Eo~hLx%7g9n#f`2X$1Yzm@L3Xd|(2J5a-i70f2a}!9W?3B_4M zs(?&CpKz6djD8Z?wpAE%R6(c6!lv9D4@;P18(|i=%qvH#iKzjqgrA#V6NzDRPZw27 zFv(~zVh#W6E`;Z zo;bLKj&vZey!`UD3+G?Ooo~Ab#WNM7Y=GG2&_IsF}>SAW5 zjSsy4VZ8j}3p|MC>M84H!^UqIAH}b%tPr=2Pg9jKR6-c`23Wgt_0YK)iAb|7MYr1_ zl0|QL2g97id-Cy^mHBM#cX+`~(lZlstC@9#o3Cq8mnb47$dNS5B#Pu%YI%8ye&@=i zH|lFMh6V}Kc1E=7seh#kowP;BDF6F={e6@fm+I0aBSv2WD)hfjPMtc1R@RP& z@!&xbk=FEkQ=KlMu?B;ES~RQ2mT}ix-a^y({a&BQ8%LwjWQHZ1$FH6DKl~sbe)l`& zo=LtH%6qbzimIZfKV;GwM>NcDcGyOwTM_!lMd+hCB4wZ8pc05`h=E|B{P&}-aiZVzU*L$MNZ#mImZe22ds7PhgGWa67LADvRre6$ zcZ6#@qQ>4lm`cT>NZWYf>N@T_IfYJIP7Y4#60`Fa?mxGL&F3~Sav9e-Bhlob`8VV#B;YU982+F+;wCxC4 zu?1TWcuq-$!A_=h^5=uOIoNKSxvy*i?WS@0QV;*>iD$ob)O|S8f%xO6KKnWKd*Aqm zYt3djvAT+vyRTrcH{_aGn9mUGwvWonyu|a*zlaCl{Vwuu-huq9U-?%={oX7|L?t_k z^8CW`#;w(Aal_Xy6s>XQ%q=K}Lq;kIgRWI!sK^q;a&>iu%1VAglobe(u-EJ1>g7uq z^qI?wW-AaD9M>j`k}>fjTWA;T#nr{YW@*+CU{wTJvz< z5$B?USW^|tSJqBWs9PCY4Sx@1%-A1sI3&Rv%X4UIkJKkdMN5qlf)GaM%7SLj?+ z{hbtTf=h5IIftxa@DKNloXc6?rDB0CDhs+`8?2PKRZT(EXw9et!dnod?SD5n8t1o5 zY>j7dJnh4{qBf4Uf;I!Z|Nd1x`TSMnuEoQ4ajDkaT_I!i&#KdHM=B!jfmjhTE29Sd zp#fic^u%MAaw)rcBuEHq-O?jGe0&B|wu7x>^LT!32QTdQv8Ow7=Y&7cpHEehEl6#F zTRIBwICCqZ`d+xSjOlPQyTA3D|NAF@NK8t?s;k;iS$3;ut4|) zh09m3=F#Gxh^t3USl_Gn77-|rLRixNN0O|!FS z&r!458^F*U56<9$2OhxeY?m6^tjd;BP18TDW2?(};_F|J%7(FN97+n-MTu3D7Lh$F@@Aa^;xk){NP!>X?eB}<5OTrP2YNJu;WXLRJ z1xJ2oBLhUZ55ZVFZpha|f~sCq!RQO+NfoEjRZWpC{Rmf0I*?}h3c#U8`@waK*h6rW znTycXym{07;2{pjIwI{u&;|+Lc{A;JXED{yNVkNUFAJtA3FSQ;tsr^RvxvURVVEuA zx1PR=Klh<7493xL8q(vo_{c-2@GHOlJYLzdD19`2P!x*mb97zafn;S$P+)G`H8P?o zpoT_auRaXwa((mTy@8*}H10XGiq?35W4ecqD{!o3aQA&@@b6!^h(FmXq;IO@JtVZ8 zt#EEi;jg^!F4*fAkUNJDK5!fU^W|Oq=gaHV=WrBNvKpyL67k^tA1>fuyu6NG#m<_(SLQe%%*Od(6My+Hd=bfb6RmO^)y^dN#{-rI4yd!~DmG$Uy=qktq(L=r+GSXjm& zFFe^840w$UYu5IzcOBNo1{oV0J~qaN=e2?7=XH1(gAF!0N+KhKvNRe^jy;{zO(#{o zPyMRuoO45u1|jzS@s2)6-E;fids0>X()aiM>P2(^p~D3E<(%P_uY4t?r>L2#&-d$r z7`I}|;ngZN`nRxD0NTs(LLZwpPhsQKBuT5Nc9oZ>2{-{_h0C2vvRs0+?JAKSNV#c*u^6NAIp zv1KcEo_h{943B^%DR`+WlM;})nhfGVdPnDdY3#%;-r$2 za*!;C0p$H+c{uKxoSecPOENfhOA@q#qI8+@a*^*&6iR!8FT3Ry3w7Q03-rx_)! z0^#h!yp0-72e~9YcTnxetm%wuW8J&7sHS$D=BWW?VXw-n#u+PUu912UgerhCHhpEF zu4arS6e*Ymfr^OsoH7DLxEc@71*h_!C0opO#E!}W0}`o--a=ZgSW2-e<6Y{rN~z(| z{B-Ye>9~BDSb-~QY?KC>rDP|uW$}Av_te6ErBy2|Hc#Y|L+}G(pSkF30rXL7)rwV; zNXe`BkwTOg+8ia2b?i(QyMB*p>NQ1MPk3mH5GSJm^NYSMM$5t&L1O#H057`sJcO!A ztlP|{rC7-rxX41!zc@*A${Ws<)v;lH&}GiD8#)nlL?|Q%qE*~{!8QzKZTPwTtDXOxA&b{=)9x+Uk}ShtTgE^fVIo<^k6pA4V?06B zG?LTY!iE!;AyO$a-w0H3n+Y)2$Kf!lX2`L8Ph<>+1kafq!i&!v$7a#Mc-Y2xrG={| zfY)BS6<3T6lInUs=E(?TxN4$`|MeTPL|O;WaEr)O$yg26v|9m{;RC7 z=dCQQeP+Ayb!3%VxT^Dxou}+5MDi`CRe*)~7%#ZhZV}6}>@tnS`YHs5Y8AZbg)bnm z0j1ZvPK#0xmV7alSsoftm$NK+^bTcILNF;yTasq7XSnsNA)F1^R6-TT2Su)#wBbvQ zjAh1Ohu+H1lYk1@x~MVpd)D)w?y=E5W;$vx<@8j!-=j}Ae7}3(Sgch!0y*Pmqr9Bq z+BT+@(`9F+k8AI$^`@lo(c&RaENQ2~H~k${JF|67M0nLpufWs@BYm0mH{%iOC?tyF zjwjkLQtKWI!ol%n&SGy36hojSD(yP9hy;O55w#-fr{YlyTLOvm2eot5rXC4B?o;9& zOfc+w*f}b1*!o?G#5SDX`=;x(P=ST|)&KDBnEO5za?)SZN;So#s zo{gi}l&&F&n+SXb5i0mW8+*16;EKs%gjw2IW`e{E&Y8kCE!lZum-#mi( zdFbt$>dM`hNf_-Ee7ec1jYN{A#E=>%c25%|OcDGqfkZovbI2!ic~oig@!EK#O0HX& z%^{U;q04Mx*Fse@tMSk$h%TtQU46mQmQdlit02g2x8*G_ycrkoy3jBW3*Ivt^e5_# z1|laz>uLzHzX9lPd|A(Xl>?+G*`2)YIld~f0hDvh?u)upe9E1l>Vp2}4TRR%^)w|G zg?{(9?ghX5I?Bfc%1xA=?^Wfd!v3+Yy;aJl9!+&oSq`RxFm9cX!PKA%p{hOwrL*2t zN2MiWK2A3{Qt1^;vfOCHGo;ut#i=_z5Nch;Z%0Y)=55bC>{kFTMch95_pCFr(a{TI zXtWc=nSzfD8wZEbT3P|iG6)r;h6tB#-2|rEmJwK59=enaMyQQ@+Q!5{h{4dK$giqm zY#pq^OC=dD-_~3SUY~V=8XWLUDMP^7=h{7sA{+~SRH8aTl1ca}V31V?7D#Lzu7GE8 zeqC_D-8co>2~?6=QpkiDcU!39@KO`g^|oo=ow4Bj^=xyfo>F+NuF4>8d zVB0yHkY*8u%P!ZPB-(S9W|s!*|M=cVX+RAP)bQ$8y{a4HZ!Wdk)iOo-=Gew|`!EyM1N+Bi^X%rj%X-kkL zy0WpR-!VZuX%Q`gWRi@DUR+b_?Zxeg7VSp8ZuFgDI?Po`(9;nJ`kX$G{2RZ^tQ>!><4PsUzTg>yu>#TPs+LjdcfDp=w>Cd1@tuyM# z@(9C35$@i&zoja(EVM=RV(wX1iau27_wIXJC}SF@z#92-|A^|Uzc_i;u5nu4i&Tg? zFF)|XxwH(ifJ6Fsf6g_(62eIp$#*@bsI~`rMl@M@TQ4$7xQ_$NF;X@_N`*Nro*;TD zcsh@puDSq$T(bgH#ll~Fx3rQF24&c4wT*+tOg|X4?=cH^b=PH6M*>!xMc-%r+~Wi? zN$F?QYGbz9z#}trSYaU%fo`l+;8T&G!4G{jk~WPOMo`$4cD=}^SsQT%2){EHlTZc=JRdQO4M z2HyO}+mWqILo{Zg)@Bg$tbkPVf&h|;O$Tcb8#W*pK7o#!hVi8@KQeo2VYP4V!);G% zK=$u{>d-?EJ^1c4jgcg6Y}vdK!y~nh@Ux#s@%WSbh(S|(Mwec45pKBoM(WaBPfEGn zZewn4u0yB1(QM$@v12%O@&ry#PZJIK%JK?{H?6F!An@HoQx@ICsWE5YzJq8-Eh1|y z)aB5@FO(O$N(7g=aB03^cV5@Z$|_oMMBG-*I5NNMg}>D%;>4K~C$V&D8uj@_wCZ&< zn+;P`*NdrM@EqF6KD<`FLD#JJg@pwyEG}YkVSz{%<2de0b**0rrA3)Uge0X1^nG7i zUdmy&W!R>>svB;&5!YUMxh=kxl+joYJfru`FD~TXyv<JrRyS4c4KOp<_8lBD+87StqpYRaLI?7wpc zgFg4NDLKBe{{+4>(?VRWA{eeA5(??SAoeb<;qIpnP}<6qlDMD3EF}$*NJ-rL_yIHr zCLssMzz2p1#`^H-C4A%HF(kfC)01A0NnJHtEi)&XBEBo5252-E2diP90i+0pDFwr~ z4xGZn%Mx{O04x{+R1Hym5Z`}l1)qB2C>oCH#O_jfcwjNZXOFDnNHBq=Ud z{MlnC@n{kfMGE!fQ~k-O^)HE6@a8wY3YYBK2sM8QY~dihbj8rtxjE)0w>DN)58l*f zV6fuymXNZBd+*+lzx~LU|MKa^$V=8yaH~| z%ysQ*k2A?Kq>|yGN1s5oHi(Mn;r5r`W{^M1Snr-R4Ch3l5~q4lR?uw6ICkO$re|l! ztCw*?xkMcW>3O!ixQvRZf@Ru+@k|kqg{Ze;96EZ8zEejqPHL4YXMt0M72hH(Olrf` z9cUW!)W}YDO(uQb+Hgh9HZ?lNI`WVSP44E}3KmYEM!Qir5Zg3C(yXJovW(g38Hxl_ zr4UiLEfFhVt=YnAy-94mhEcagk&9mR=e03eSj-D*odJ|A3b7XDd5HOX<8(kqUmBSJm)Tp^sA{%j%N z<*Qu{MZabkf{SxD9pcPh6Ehvmwy^Hrm)XB7S6`!oIdY16x<@(F0ZM7G?w*IvOpmjY`3g zvJm$lTfqOie?Ok|$M85G#%JbY{NsZMu#j*F=3D)GBV(K~g@h~0KEU&GDkRA>N;L*P z@8jDsFzHbP=HaA*Kl;q~@$h^N%WG9MS2*rJR>ylj`&~S>CLz6`K)Kll?PbRCz10Tp zT~Rocj$vOmgug%D#3S_-ZSLD~V-jDa4k2S~o*c%1`uU$mwY7k-wScrfM{rP#Be&~OClba@~ zwSDvCMjSnH5-ZIXvV<9yOE*BRH^qgIgGY|x-0kOL)7S=_yJH9Lyz>|Fp$~qjw^(Zj zwD!8Er>8MIGJ?~mXH4-nHB{~{EKZG6iYQC4)^4Mr7pKJLSGmAyy^XA7o}?8ulH7)! zS)32#vWG}1t<2D16N{@`0ZOA{ZFLQmFfeojmV|L}b`CSs(?mV*^FkgLSxSayd3gy* z8e=3GL0GNOnd#{1)buog2YM|yqr$fX8Tht1*O?|MM^Od&92xM`cy~?!Da(uB=K8rO zuzAZCTzlmecE##SE83r}sD;xrrwz=nt5%R^MJ71a_pA?5Ab|p)ECXw|FIsdw=R|&` zW0&H^(uoy>G=`9m<+TMR3!M<@(v~@$iRsW+C3+Ee@5<2KqC4G%UQN5H)?ZA+t91Y!HS_szS3utsnib zx4E{q2por(GF+)@2&Ic`EQ_l)B(jC8F4=;MwvXUD`x<1(k+T8hXccqoEJ9E_ih7Lt z!q1P>bvImPEvF^#j%iFoZ$>owD~#cz_dSf^z$fV7QlpKQ5Ad?f6PS|;N5X*Uq#js`kcO39;7uP($04HPp(>jA z(|7$kHV-q%Q?tk#^Trugh6aV27F{}CU&&Av-o{Zq=uDrGVqn<-zWBw5@c2`6Z$6tb z^0X0%+kWlN|8!e@b(P>{mC(no3w9Feo4zQwu#~qXQv~1q>b+k>${4&b#0y_|6Rx=Y zx#sqi2D5{e>TPar4ku2Wz|ztpEugNh(LEovr@7pyR#sQBxVT9B^3oEfPfgR$e(4Q;Zeb2a4-2Yb%i($Elp z(K_G?_sl!H)lH4eQKP6nMRW%^kX9=}mZ1F-%QN zP_B|VmZ%p%Q4w8cru|Y%hA5Tz?gI~!+F_+Oh*!P(RVIbhQ#WBYyp`0XX9&iG<;CGS z%82fP;8bYWc6Gz>5QeH%RJ?#V^YwyMtyJ>I$87hvazzhfFZF2ACqAac=#wKe6x!Nr zqT#2ZXQbub2vSH5)1RQh+56!-0WgVaATXM<3`w->PL;n8fIpvv9`8q zY6?y@j2X>?vTixI|L&JZFA!NadXlCPAK5~@)7{qLx9=N@d6K*J1kO2kCvJVwOTk%h zQ~+x>^>OFt7ZInKsoyAr9!sS8w{&pitf+JJ7O|zxUg*d&qo3|FoVs7X&6KpvL~_r8 zTh}wqic}+VmfwsD>)KEgrx+o|WRHYymYMU-a?vuGM+@5UyH0e|>2A`4?mbyn)H@2e zZ$_VUsawZmR^EEa3j09CRX(bXp0~;ndU}b9z(kfC0;9>$TSV8(QyjMmnyQTl%UVR= zo!2K}Q%Yx{z0!|XMsHE3u!s=f+c%Aj4G`BF96}5V!eda$GG6e!E!a?11OQN!R^hn; zFOOY4o|04&#ZIdonSL@#Jt}0~l}A-Vu}PKtcFgvOKn37gg?3N-;@qcxG-?D?M%h%l z|C}SCV`F9+unCYN2*z$&Ok5P}wEGQuE2VWQp;Iu#jVxz8=G>z$`F>kQq?>}k0D)e| zyb40*A@C~jgIa!mCarH38(kgA=6&`0CxlNDb!}%o)V#u)fW?(}DP=lic8qgM-sS1*081MVQ$MBW!KK6;H9V~YHfegs! zKJ}@Oedgaj`8k4vWf^vE-;RO6H-lF{dP1crS~!$t>eYj9@BTwLJv&E;s*84Ci1)qs z&oH@ZlN}E{HxkM*o)V#;D376zJtf^}(Lf$tYL*iOilB7=yO*)l7L)tA4LRh|oth9q#d2ZmNK8kW2xSsBndfFj0u- z>0Qxy6@A8na!j|_i2W>AYI1@=7WtSjfR&J-k>#i^eVl&zYfYr!=I|Ntb!!uq6tmpDA|l z9>9m+|5ohW5+Q4yK(cfgs?o4?B@A2}44#lP!nj1r6{?dP!KzjJ2=yvDgwKBIDg4P_ zefrHS_13*-7#deSO&pX>H~#n)x8T|m6k_W2yH1|AZjtFw88*P~M@o#$mRT!`>A7zFMZoxX* z#?CFw6G$MDMmm;?|8a^W(cZcC5>x81jk6f3G%)5kN=Z9BPLMSPTjn`bibV9%^HDizV00UUG|*BLDwQZLhrBQ2e)0}`0ddXewS^6eWS6w8bF zuA7~9((B;lu{&i>p9@r(HYN$c5>z-4=w~Wa59__LR~)`uL=jW}gD0~b3NM8rR3#|{ z0tHwGVJ;1Y1v5l?%*hhu#)IW)U@sUXq)$_9#F6O)o*2r>NokSPm+VP|l?Xp^%XtXB zmf>3>Pm*`+X7P55E=MDYQP;+w2?CcSWcYcBUcX3#_KS}6U8Cket}vh?j6|!nSJSqM zvbk9xi6os_@PjgHk|e>vKs6?0jNYRiSIJb#xl@+ez>Ek@LEgoW=|1;D@BRCaBPTUO@GhmC@{9sunNnB<`397{{EwPf9mvn{j5?~ zZab?YIz6Z~B|PvqzW()hUw8N2J8!x5wm0j0I5IGRvC%=yEi4hlFZX!5Y9e!UbGYxm z`*6+kt{^Y(>Z`BDrO&w(4}Sk)lU@<7Xl?ydrARwH3$bvf2!}9=Le`%s;E|KhBz+k zqRWu7RHQXiP@gyTkGK?!vNayXauP}|JX)}>yY^aa9JSGhEY*KXx-C&0;n2~emiNji z${C7xLtTA*@D*EGboz(I_=Ml4gq)Ueco@(9vacgkEbh1Id(yJ<%dXZ@ zZ?w^h_4D$eGN%!)9ToHJxTP+kENbo<7gZJ3{*H3e#- zTNcJ^pf2o!;^E=P_d@squ72JXL_6?nzw%bR?|uI#9)0vt6NSh!8bjr2krr>ER;$6U zgoGlhR;zg-iP=tXJt$Iw$VxIaDAi2dFUlk^EiAMof@JaOq)na~O#xKYC);Dj_bv>|j3{ZAn@x%W z0zb@?F8Y&4j~~Ya4?cuO)UpRQo|BG3wACeJo28C}Iq!!k-D0yuO5Op!q39ts{R}#) zt_2>dzQE{E)%5Q4j5b&NV8n2&j}c`lmg`L%I6RG3({vLPw-_OgGSb2KpbBmxDG|7N zCsQLu+kvDi3<#l>+~Ur z?rJ14o-LrFU3$-h@;P|m*)Av6la_>KQy_Z6C<0^?JX!;*FJLHM!zSq=5_KX%6qe78 z8WeT($0e%F7I*=DEOAgNjaSsMG2z7M=BWFi9|r?GYK#E%8LdeRhkn71xJjn zLmbbW9K>xGjAC0=!RHxXxN8a{gC0Koy&0s!iaY5DLpx6C3eE{6UUTCWxMAlwDl9`I z6L{iu9iM#Q2);kNhE$I!rbw37rSmAl(Bs)Nbo(yLq3>0k#04iED+~v@B6=RBZ472> zc>Pru;3u!%j&|C{m-jE?UmrM*6RJi`7UiBNgRWG$Y_t0|C+{7I24457tMMCec{wV{ z0{nCy$*I#2t8Ks!twk@!7)5He3c~Yt0Rtlx-D+bP%OR|;jo=-B^e2Bjw^V;`&s2Px z?W_&R$&;rJmH+)u|M++Bc;lPi{0IRspgzFZ<-_yuetft zSY2Dg;NTD)FO4JWZP~y7AeI&v4Sk5kmut@`OO3&xdPkb(AxlmXZ$SXa5kb5t^6JIO z_bDeYWk&ia)k;A3BP4D6^|SH%^Q0ZG6dA7h#U(usA=?prCTWooDkcLGz%`#))R{DO zD{~uS@{Z}8WPXyl0U+{1d%u|K42e2O6T232^EkPU&Wb*bG&A3osie3k+%TX9dNF4i zMc3RqGE7GR3aA7U!%AUffP?!C%=ajLtDKNpPaWJWGNiDYw#k4XX~IwI*jkeq_f14D z^u4kThQebA0}p}Vs3Ilp4|A{e%9z-+4m0R4&!m}Hprg^4`nmY{5Rfe(iE5|}k7~Et zq(VHiYU*R0zjGLu?AnZd(e`zNI`lmS83D{a3EN*4&=>=tTYc4LL|6g zYy)LOTZLo2YrPbcAtwYCI&IEJWcfzKd9k3!xfy^ zHjYPUmXNxr1;sj$*>FnFZPVLQ)Vy+3REds+URR9L^EDOY+8txK?KzvUXEdVm?Csu! zgGXnuFrOlY-?8B5QLRH?=IH`SAc@_(#_;NwUyI3M2GMFj)>olgO$xo49~7UV2+|;x@CJ&6j-NPzgGcsJb7m4J^u~m=-4jzpbcKJb zKX1-DGbs+es)Qm(1_eQoqsh4C+jfv*0y)@|F$fh#2DMTR33yNm19*Pa7mbxK7MxKr zztw8u#K9+Usa)yoP)Eq`s%_@pHNG(_3p#=9wfMA@)0cs)fVo3p;8aCG$1QCAmO+Su% z9{3>UTB-w}(Yu-271hgv3N`G-X$r1VREg)QT;zsz?27B(I>2$mC~m)O2e!lu@anUW?Ixvohy5yU+PoEC2{RmwENEOw zOE2Q+;CV+uy!*OX*Yh!sN`f1A`gqsRUWrM64MEyOn5KBnXoff6cma-lZa9!gN;7kgVADom z%Qlh#M%JciFH=MK%H4;A*-RBXHE%ID&@e)HGxu6O=1xK||v zjSjV`_^WJD6@m))%gakRXLyJ@b6ZK9XyEnwn#k0U0!n3RAiBa|x?1fy#As$QGCgsH zLY75_;+&eXOo*73)Z8R9#Vcy;3Oq}7&e3eNsmQNG<2Z|Gah0}^mg%TKhvV%w^@oIE zV2IE%V>qa%BsFO_6YUt3hbi(GUOjT7Q0TgoIktZ*b11<@DUPX@nG{sUIJQh}#%(Wq zi8d8jU0p&Jn_7X-JUZV{gtTWW&1ZnquQ!M5%)PT*oWbRLZ4ut4{+KP`k_l#vCN=fN z1Po!t_J4SSV9_pe>ANY8dOb#Cc^S#l9LCYa#6%S}ZbkG7Qev%uyRx)ij;z&2tjqob#RXt{Vmk9rlSdw*QIrY zOucQc5hq78Sa__^1}QXYRwgKgm}RbKrgO>ixJS{vmqk!n6HFu~|E&!v7zGA-^4#?q zEnd6Ai>#gsGx`tbye-$QzRS3Cez%PhM3~$@W5GH&dqM>qi1FB!T=2*BM zr3?%)p8uQ)y!$uqz@F_@u=WDbT17Uu3@_Eus-I_GOoYK{VycO8V8>1~-ZCTb=OsR( zWDvjq&X2s~nHmrLpfKKk+t2^p>shl|C)$G1kwKiZZ8O1DY4NamSD=%ISud3BB*S+e zco4y0l|1?zuDuqo{khkm9h?07dSXMXt7|xZ?6~QBK$g=mm=7t+wacs!ccDp0h!f;~ zgOQ+AoehJmBrw$B6kELtuE=9|^$l7$Y$ILZ2b98S)|(V5(XNf%>I#}s+p30fs&DAQ zO$BBvflIw8mcHFE{KmNw9?(q|*;y~hWN*V;`LrF1<(g}+K{c$9u^PBDv(Lp4=G*Rh!T@HN@~Kx1Ye% z(Vm{v!jM!7;lUT2`~^=a>JRdmXMb|@lQXjx3PlaEjGEtM$Ik6;{*BVkiaLgMOkI>> z)@7Ej)i$In7I0oN7;sk1!byo8mC<6EF(dS&SeiRA`Q7Pg+>`{nHi5f~o;XDfv@?VD zDxI7;9$7Ho>^ubX0=tLSIOsiu3PSoxd3E3Z?p2JwI6+=LaD?{!mC@<;Bs{-~+kfIJ z3YEy#;uKxYv8o7Wu}d$$2+KBWU1-3{EYNG zPL%=t3|9wH5dk4g2^Gc%uqtbKWT}l~^#+|^N3ReJJy2$aqBx7NZBu~X`>%K4!fgRq zx(KwEpjPL=33=+5rX{oYIz#Mv5F4lHnM!V7X996LjK6sQC-B6+<9~lvlX;!(hl)Vl z_O7@6?#=Ib&wK7klNcB5*iHuf@bOb74Avo&%@Z?Q0CHO7k002NdfdQ^ufGWu-^0)T z^sBMFxP;Gr`tycSv=8=OFKx@qD`@X;Lxci*cJD@D5m~OIm?8jM3n?#9+@g~xy<&x{ z@+L!Bq_3F6%$GFW(@2oz-$Lg_)G1L+*V1aVFn4+e3$wGfF2IaFdy-(^fkU`t z&u$D3)Qndzj0jSr9Z`B_W^NA6W{YYmuyEqR!9k3Uj`r0UD5tQ}B(dp~-p$o+J>lWS zFM2U4KGs)ODJoK`l+w%7GuHFT50oFU_q!eIs&%`aY_KYI6)Twjg)&~=kk45dX_^tk zvT2$u=e6x&En=wMZc%zoM+sG*Q!1xI`Y#~;aEd;xh>l4?Wn@qUvpIiSGcC^QkOuT_c{RfM@1Da}yFuw~n~sT#_j^e-IW@Zt5+$;iINgYJWHX z_U(5Hx0gm4HO-pumdtRTaiJIY7H)mX9z4GHYW(#--j8P6K!#2FM|9Nm2u&z$lc@8; zISMIj*T6}%M4jXDR9oRMzHlFY>gtPe&5j8Su^9El!&mm6#>c;R1d(2}sbFl39e9d} zh4|#7$B{+}e&OOR7!pmav|^l$6dqVOh5gHG;Nbv8>PCZEs zorP;ymI#FapSufg+ohv4x^$i@<~rB$evCIW3dGihsTCPk3Q1TyeZ-L@SV9<%F&K3DBD4BdD)RQk*~F-yN8vks$1Mv%w`RWBt^C*MxdTqk6Hz7lCq2RE~+ zsoanVZs|pw!7eceLKS@&ls4kVK$9b5Ak#^#&)F;3b=`6FqULd&T3j$X*3MAqukkDr z4;G4R`v@pyC`3mJi%!FH=Z&H<$QmJTX_zHXjsAp-oVblDY*a;!c}zfN5gBbytl}+i z`bm89vk&6P=`1JnV|EOy(g1c026_KSX)F^&-BRe}r3HJxGp~xntrp(*r3dkXJzH?$ zmNDGF?+6~5siWl$Sd~u;lx8NGDH+2G5AdnI$AET(E4NMI$owiEIJJPojRY%RNRFNk z1%@yCPRSx>~?UylpO zIK>l>JVp!I`4{b>a8EBZQLEMWc*KK4IwMA)#&wtH>V9VuH&UiXp%=92>1m2s^Z?RP z$^86`VbQdWj7GNB&>ckW7$;9o)8{r$jMGD$nP0%d;tC1tJ7h^AQELz=D=RCQnw+FM zi|j^X`~cgw?c336HK=z*4=nwCRE(F3YCTFvYnHiHd$z9JunR3p;i}qu_&lHj=6#kge^|c z-8VfF#JwcR>#x*2Tz%PY9GiLEQuMFW_v2`Nl%c0Fkr)V|5@)8X-Z+w_A;-vngtQIG zUp_p8yARGGO?5;yVrWEc%)|0Ryg>x&?^^RJ_}8P0`249=g3)IB8oVl@25Bq`PANr> zB6SHMkCLPf^Hr%vR(3)Z^WZFpJ579ld@H8@T|rr?i_1Dm!H%dbLWIplye8N^?I z=o1g0nyG*KOzXzkZqJ$ldFsh0?|tJPuV#PuiBBluha@DmbH{e zjL~qPtp*iTsuhIQDyjoDY#19ywNkZzR(R^EgVZ$XP-f=5UDRN1&1H2vPmWU?y;i6u>5C~3&nDId}0z_=9 z4IRrYYR8feWx%`75n zG>r0UU{QSJ$Vf|doFWZ4yva!*@S!}0lw}A|1NX=A$xl3txBbCKuIM|7pTYL584$Pq z;qUzR&A;;pe{he!mTgm0#JG9n#7Qs}k8104{4q2 zDvRC}UX&$0v=NcxxN>-Sy(k)w-+X3ZXpk1mR-8hqf-x6eF`8SzxXYs}bU`6Y|LT{j zv#2*c64zE6v>U1znb)5RNU%#SvXVh9w%lNMJUia?6dB^EWztrzHlQOt)h4u@_OK;$ z$1q$%+%BTjc-mvGi<_dK>%pTZap>>~`^-$!8|*c>4+%>RFH~j`SFB)O&14SZ$m=aA zJ_}nA=e7oK>8{UYPS%xvl82Qau3F-~}v0@+C^i zMv~he0h!Atl%BGIxXS?lcF$gTECpAYm6t7QE&0JMQcl=DEqyGOUofmQmvuZ>P~@MH z0ZFB_N}&R+$f*8t-;8?$=n%QXMv<%+qD?FW9yp=O7_e?E<>r{VzuG~HGlhuj#nf@; z^?p{_D2XB$lijxS+0?y+a?jAXbTX%&HK|SbjEe7$T1D7qRO+rb3{(a|Dh$Zo6E5Nu zd5V-c@kA-rTVfaIWm@6<8N0!)Y&(N?)o>I<-=PH@Ok04MT|vy-E?I7{NEcaS(s{XCL{-I=BBBZO^&^+4scW zd;hQZzklqFzw}E_A{^MX^W1IJ#P;M<2jTgl75x`^@l07_eWQoY@Uc&Q2B+trha0ZC z6qj6dF5dG;Z^yrX?jHR8KYR?0)fFn3Ys;OfBIQKW&=&WSBq@wXXFba{~ zbGy*n&v8|Ba=6px=*Ue2MaOu@CXMIXk;!D8`S>!egekxyPms8cv+ffpj>URjN7wx{ z#)ilT?SHV#Owk2dd?Q7mBBUfmYX8(gDGis`KvaG!7+3*nv2-;keI^H%*acEK?i*1+ z(MZqRLd^_1*EC_Wavq-BF(swT4t8iLbbt!1Rt++jGF52D8CpCgDjd_GEP9@!{*qK0 z!>OGII=WDZr7|f&W$QB~?}$>iIuL-w$Y714m{wambw1S%JHFpFMeDEiJ!*!g_qgZ; zCKJLPV8=BA=WPVtmC9a^kBlG7$k{q)a|Lhxh5v}3y7e+_@fRVQ^T5h6$muz#dII59 ztqLMa&&m=N4-CMc+6ru#Ko(YPT2-%UHGJ`N58;3O-rwHyz!N8L!LztM>jp$`_uhTa z(yg!f$xYiXxRCThCdVhRys3`axp|^KE+D$rg;z5q-{c;(Aq zii!F4GLEQ8()HmMcn6!11vS z*fcds+(0=DR66gta94Sb0AjsYY^hVsdGn4NRFvRZ*K7R@i0t!vAM|%w7>X6&YZkul z`d1{@4R@jHxD`wl6ItfRV7T|(;k`I; zVhJhZy|J)0lzKjU+9AyJ<)kHgGmb13Hl^J8>=`;`sSAgXm4=7D0TF^w;;!F#D{g<$ zF7V_e{N`z(H4iy`8fZ!g7UVUzV!fn_4mDv06O+K`I1mmR;4Age)$s5`C-Ao4|0^6j zo%}-Yfc)XMABh3c+h4x-eK-Htx4-?K;fYP;(C^rB4lSMwi!0RdQs!=w`MO~i(6Ba) zd%yJ%HjZq>wU=E6;ni`;g zbtm=MIvU%(uSr)7?6U-)tvX)ruqswg2pK&GxbdSbBE}j0l94F)T%fG=Qo5lfBD?O< zqn_qm-S>5*y3PQ`fL-K!UZTw+0Z~xiIM+wQrBu34kmqA0b9cbikyzQyM{WvO_dO}U z52aL0x2H_CGiL~#^VLc9;_3)Azb(?srF+?WteHR#n|C*x(vhBkN4+NbS)p6MQX70B z6KtIr#W*sIg^VJy(TNE>wb;O#D%J>>%BEU3bVj@b1cVtIw(w|@6SpZeY-3$IxJ z=0Ef8M`l1y96x^V(Zh$aXLyV_tdM3n@7$db;v|lroCXUF_lxPcwDOn8tVTJb=BEGW zlV8B>N`xP~=~|2l;CXw_#~ZW+?c%T+~s(OI0cT=TKEPJ;4y>s{Zi z%IDV;!sRKDZd=+nB2}HAG;*z7=}e{a*jAM!m%%zScO5g7d+>Qki=>L$rl!aQ(bSKI z0-j6}SStOVdzvv~ciyvoBh<_^Mtl%8{&1y=5v-Fx65#@Y@YcIb5fABkZ;<*=0U?4pSEFD2MdlcEilA(6zArOT5ohJ;TFHEn8 z0K!e1fzeIiwE-fwqz?MDN)+vX^WlHM7r*-WkHmNY=&8y*tJ{eqNADRN8GB=;S_KaR zDr(jS2N2e(SY53b9)ilaNBD2+_0*#Xlwp!V-cM-0du(Kwv`_k_ zF0cOSU~mzcu&_&$s^qd8XIOd5uzM-5HM3Q#Q?;J3A{AtX~skpL}!w zW1ZpneT*vWd!zcLQ~TYEbr02b;Br$MLwAy?<2%ph`&hUC+ts+4r(Vu*NAN#A`?2n+>2QJ^1RYonQ@rYWyyI7sZ+GKtMKhDy{zE%Z_4 zK+Ox#mK>+*4O5`z+%!xwg@Mq+xnmV<4t;o0oBFwY>Wz}{MTk}+kx&C+mIEQUcGhGY zcl`JZFi`c7CPO&67~snf%;B&9`P;bf=o%!eb{KGGoigTtE0&H&zAGl&ifJ$@}a^?xh*`rV^%ka6;N+Q)j+au(Vo}>^OdI&a6fR9fB)e(A+VgC&g zx2yP@|NArelMj6CKd#hU53T=Hf4J>zhw~4$-FC-o?!N9NFTU9y7$RPO&kLzQc3}Sj zg1<6un$naq#WMmB<$Bddr+lCq;P#(*8LrrK9(Wd`(N1t`b{U`k^k?zWfBF})>Ym_G z(!e8mX>oFeJy$euWf6I{1Eb^EedRR>CN@DS4~=6-@YvVBjGAY#^dtpoVV8mfyLO(7 zY7iKdhO#32&Tv>wj?_g8q#shnz`#JBW@3_AHNBL&$cc)*)L-EVK@=%WbOwfWtTL>Z zF*@E_eT_Qg-RAoOgJDRM#8E%E;yF>GM-f)*^_&IPQK7iHO+SUieqOKJ%6jIm;jo{^ zd0tp8$>}*zMQq-LN^=P!YG5h|F;%O9RcrXnu>~Anse=U}y2)=mNL+vEh1i^~AZ)Cl z76u5l$q+skMHL@?_#oP-Li#>3&opO_RRpd&X98QeglMc7kL&D;U}-(gNdO4k3De!drz!lvE?BZ0TMz$d!6+x(_>E@ZD4}9sr0iM8RYnh zTb_$w{rQ*T+zE*=n+IE-2CL6NuFSzB;;*V<1K@;&F+6@&xQtS0LsfViCxDSLAQ%Ez zXsAlyBV7&gyMOq1_}4EybjN`MCqC9YYM#CAN0!1WZ=e0=kK8i8d6T;2c~`>|A$4bZ zTw&L)^RWNm0qT!nOq4uE%VlN5TTd{oHJbRFfA|RA@S6XC>n`7eszo-v{td6g)-Bud zfe-v8T8$>Em5^veN{N)>H@o0hEbFCOUdE$e{yf?$g^Cq%)l`xgOoxnRw2&>niPFMu zY7~VMqBb7wcAFS^_2i|r9nl|nIZG~u=Dt-h{DAjr<)I}n_9zdIUNM4 z)`a$|8AV&>ABHiOg%wKvtTpS_m^#BM*YzmRIs##RUh95eKgXT1T>HK6f%`^9F7os)aJfzQFkS7#j6D^^4eAn?;UVFH;XLnGhw0eppZL-G}K@ zr!YJ`iqX+A8bIfqa}I94`Gpi+96fRbQ4&+GpY|auapGGF+_~9K zV&_Epo`;EvaU&JlRS0(Jr1|-IEG#b4^{lL|qSUFeQEwi`V zGzf#R(sAz`#kNygWHyBr#ZgI#(2&d3!b%T~3%9JH%u=uiVPN#cRU!7(L&t#vxq@}} zZiBYoqSI|a3W{`NeBJdGe?uuLU!0NSM#=|DzI7Gqf7B_g{t3W!zoX-`XKiPSg_jG% z-6kw0On%SS`7(-PyNyVn6qefzs?n)e_C^goXJ+X)|t(hO785TmsUr03(&g;gxJ6R1Aic%XC_u)XG^CKP1|LZ4xH zWC*en;#-GKqa}@ov!nAGt_0XVK7uMy;&3{K@+(NVkNHN719Qt*s5ilcPnTejd(t)( zdPF1{TLhq>h28XWP&NSBr% zveYnhi-4Tzj4N;*c?i?*k=flJ8v>u01O`VSg-@<_#;V||ilkM+$N%YWyz_k@z30&J zrI%%C)_%HC_QP)MrW>DsF&Jxq?d#v_;YE4D4cA|3Z^ecqM~}76J?ETDIOktp|LfES zrLe*NtSG4SzWJ^1+~xjy&2=}t{rL3szrW;_uexvdWtVxG9_Wly>Sl3e1&61nktPgY z8bGSpqCPTPd^7M{qFdKqzRa+5`&PX0rt5L>g*yn&7e_IcmRIrZ`|ij8`iGC;#PMm; z#18#{;KT;-rs%!q85bSrZh93VEAPOp>nLdJmd)5OSR*kc{eje`Z89lwdU~3SM`|mm zuBmX(MSHMi^A?jTA`(W!792$pmX?-q>cmMh9_GS6w8e|;#AKUGw&RzgtCdY&&6ifjZsmq z`IH-lOj!MVOH0HMW6RbNyz*t&;umhe3R?!6s3eO(YY|voM7*>LkFpL{kWE?0m|&0< zLoGvVpr?@4LUsmnWD=Py zxRRtlO??@*3C(GObli~A3r^dj76^=uj}u$2JFj+>;ONn##CY4;9zGX*5`fw3`kkzD_3SL zr9;ZSF#mrrAV28V%lOc1K+5l9r6$tu8VwnQ)SaI(AWqW2oqIvJS)>(ZA6_%_{l-Bn zD{c}EBNrHC5*Kfu!UmsX)bl8HwcLpCg?$GQGvA0y+4UfY$!4IsUywC%>vfl6$Hozg z_8&ib3}5`-lc)!^LhMVqOaPF?De6vSLJd^FLr`5UVY_A+W zcKlfhiEg>|<-dIQ7e4*|JKuKKqv!0p;L>&!Q^&R*ob`HygNKe2_Y#HgoEILA^Kzt1 zMWwWOGM-@9h3DbAtFFebo#zqpLlP%wCT*NJy?`%#>1+7**T0EV$Bt115Rfi%YAn5u zOL(b}*l~Bt6-SvLHz3l$fHczezg;w82nXFnG!sluwz07>iYQ3IkGPWBSYBGDh$YLB zRHa2uF;uHHY#7^M7*eePagw{F6`!UQi4|1z?z4(`sVZTZOYr9WSPEpkbo7$BPI2kF z@=d2fZ@#^EaZ_g*f%L1VxYuO4(AsAh=C?wrj^m=sfb<3>{SC<3jP!n2bhZ=ZOf3!_ z@B80qK+5-m_ee20=Yn?>);sz-tTu;oB4xJbSB@f!g1gGS=zM&XBaocutv{P0Max-6 zi@J{XMV28@0!f;oPSH8HY`O)E-P}5(^TM(S13`vOV?zX6KfSb!Hme{ZCIbVp=J0x8 z=0vL_gqgc!Tm%^rgJF=JNJ|s*89E-j_@`Mya9<^31T4n3ts8LV^UlZZKYl%KymUK; z)iTgH4Y@iE*{VY}oA4rKP@cjPZkBsgh&a}iK+>uJAFM%4PJs;#1EKX z4?gl3>Z@%^1v%B6s7!a1EcV$nHObLAW{F=xs-o} z=ro251$wV1axKj*7&!+e+u&Inn>P;O^{;ywUiP9(v1@A;etQmlZ4p>nK(;!Mthq|* zE+M%=Sz1bX(uzT$3@K%5*qEyx_?AgyI7Y_ldT5IjkO{!wh~+up&bPcBAOG^+|IgO( z|7}I|o&z)fRIAmj?|)+NyBl$Q^Y)!P&e7hwq+W`YitAaDQoo29$GlvHr3OSnB#Jut z_6VErfn)mQX*~A$V;C5$VbjC}_0456CjP9Q=kLJPFSs6?woc*1^eHT_uH^z#=(lLG zBL7@OBit5qIWlEwrWMaoWR9N-IXy!u-(OC}IRuu~eke~VCWIaAEGG~|$#AUG=4IYv zQ*t>ScJ4W;QfE8zMWM40-fz)*<}2fw3`l25_vsDDdgt^5)}%{7inIIl*`(%vu=9{5 zdCv8?&7bZY%b)FY@A@Lg&h#9MQPM$E(gQ;2bCw;)DyK79$8~ff_E?|qwzM}2RG8*D zFhW=fSn>a|_vJB`U1fg1bI!eQ>8e*<)wOkXb=%c@8=LkR?;Z#S5@4LnkeCc0ML~)t z41b`RY$O>aQVg1mG)R*WGFUMaB_xwbBaAXIBt{Iv0tegO#t>uKxNY2fw|noZ?%LnG z_ns5o?|f&u@4YIUwcAk+GS&6!z5DL^t-tU0^NNC+=eG?{-O~&{MedRXt|WnDEOWmg z*tM&G3a>97w8n+7-=`2whl_v)4c};WWu(cFx0f-P7Pxum7(V*Jcj42Y{JVJ1UE46x zT!LCU19AB^6h{w3T$+PeS+U2XBuW3W03|s(EdvQB^QOrJJIQMkmUlMw#La*XK?U)-^UN1e-Wn+{}`)_bIjOkZ&nfY(Pba12ZKlp zoZm;f;tZwKQFA^{`z!EV9UeOO9fg}rs zPU8*iOZWEs9AowGaf#;jzR6{QYoD0l`1=^+er2-C+xVLH-Ho*hW#aYs+s|?Y!yCSr zUo+_9UwZx3ch02GyhxnI^qU#-SarXDx`Mn5+jYT9_xZ01dKpC7`a1jH*EvQPL`1t+ z8fbT$Yia0|DwbbR#Hwt?;H*HJ<(n&=MJ)lEiN@x!0o;G@e*DV*8QgK(9!zf@MUt-o z%a?%JW6*QwP%JM)v^z)(z@!SIrf7(<;Ab&$&v{}8Bc*1OkC@zH#SGubnt@s5v_EFMsVL zAO6>O+;^`fs4xk1r$E|lLZmf3{nXRcKX8PI-Ggj)8d-#??;dpzxKYriv4fB z3B$uJEUmPWwSjh?L#i4@0%uR1#n-<27kK)62Qho$BJ}bKY8&cMO8W&-({>iT`Ad*k2VND1ZtJCL;;NWTLYZ_I)CnslGCjp;2h_3G#w&jHnY(xrNeG#0k9Hd*VfH?nN6Rsj_<*K*S6mPVZZnF86xd2YY8JGgWivEAyE4pfeMJ` zXI{d|47<{9PUnj3hDiMPm;cf0SsSgiz_#&Wy#4OC;;-F*2lnjQhSAAkC{Y@aMIc`Q zE}q8nnPZSkbC6k1`&(}`SXx-1P^?O+Y^h2*J$=56y39suHK@%~!1Okt*$TK_Qd%vL zCWCnGWE;Qv(cgdaz=4vq36mLga!u2iWmI_V*a@6DbA~iO(loWy%#6;kf;%FD2nuRAXC(U5!T8t^ z_T97x`}Xa_maW@te!z{wL?%SjJbPsh7cX4IOF#M%PM$u6Lx)}=)xp)(6?(W5EuoC8 z&MH(gjDnC((h<(FK7cV{Rw35b_799Py>l2+Rw1=(tm*^S>yY|6kiPY7)2E`@&oT6V z)w(n&m9kZD#iM6`gb)=*A^3CF>wtSd&&JbJ;&km38;|}rNmTX$-Vj#`j64bNznFe# z4%uT$5Mlc9Plc&`pK}<7aPruM1vd2Ay$WbRtl8U<(R@$O8=?kucnv+M?S2vg(N}v1 zLRuMI>g~=-+%poFv|NyfHG-_4#ewup&>PotVe%U(DB%|j>4nqNHw92wMa!Dd*tKml zc5mN`y}P#Jj$8I%_qJ^q8c5No<>WAm)ypVW7onG~LNCt)?RnJl6{v!_!K9r#F-lsN z4H+DWP{K1om{dYHQxuIFYEw4=gCi&!Efg|EQXYJ!s7KpwCwT0CJ&n&l^40Hr`^lHD zNwa*db(T}g>NYZIzNXzgp8 zUbM+KH#G}Bp|2*m6k%wN#C+C_)TEy%_pr(eT+?m%jg>S)FuV4U8v0}wSR=bo4lsbA zwT@_LaJ}vKWFV1~>egxRdFOgGD4y}aohR-0QS{9MBc@(=9ZjFVS9)Fi4_)-`oy%Hf zG=*OQ(dnLG!k?3VbRO5ZF>+B(vj z_g5u5?-!J;b6@tJH~ak1fL?hVh1K(=Vp7oo@*Iu2L^A>2x^D;Ge)rq3f8SngpB%!} zNCQfAptDsH!8Pq=6zwG}T|5c3d;v94*uI9)b}r27KDdLVoMv_>Q^Dm;TGy6DJOh-MIa4-}5(q{bRS?dG~vp8wSWoYZf3qK7yNL03kNhiy7W(YA>}3Dx7MHeDmpBffgB`ERH9BLma7JY?XoU5MePy#Fo&4F*wgWusqI zT%B-tZpPAk2g-lr2`wFb)qP++8^u4%vlJH3?vdr zCCoqyHM|jWavEq368$`hL{TtOhtwN@VgsId>Sa9kmj|E!ci9M?l?%JIqjlR7e2Gmc|mmU9 z)wZ3yDUo3E18rFUUPlNhTNw&ZKRH!jQY?J5pXGrpva01_g8^g0-^md~*;p;D+0O}! z(4*r?lrLc+H~6ZM3$kgJd)JTft1^BTX_M)CHL~f%vM11vyM>#zHt{aD4}_OgG{hvNv2Rt zNRQ-;&%5Nnf~Lw@ZP18rmf}#7vrAR(&#{Yyk3MoI0_D zFFgDxKKsS5Y*|@ppT%{&t~&=(uD|~4A9!?j{^~alJn^0X>|UGNfA?+K;@ofi*6;m` zPmN7&wgOOv)dDFa;#4Il?~B(?ox-cH9!A#DK6MSoubZo1@~~9F%)ez?S|H1oD9uf3 z1~=cl7rS@w#`x$sh6V>Qkf!z_h_)?*t*~_QInQ<&L6jn7JP7YyFb#x4L6AUcA%8IHf3 zvUpR_G24A-_@jd;nxYHC9}HXNI(5yhxYa8L<(yfyL$|zdK#bpgD-sM%=xQxi;Ih&!F9f_O9uj?02#e~ za{30jFn@LY^zox#pV+o@^Of0)mo`pre&~v{iI+ij;SgM7|u`!Ge4Pawy5W^dX zF*LjpW1}Nz)Dp&*+xNNe0;>-O!)PNcQZ=j$TY;KoR`f>rxe(#fV8c`s&M--a4zs~C zTyiT{o&`f$8p#5M|J`RDu=@o>=qA>sH(hmTo!eu{i`*I(p)1>SZ`Fl0AX(n=WhTXC z6vKwpp6@V>C5=H(;~K84)!XM#eLMPr>IIP_GCDMijXS`od}y5TBvHW>3=c1?VMh>j zvWD9*D*`(mC0PGkFwjPX{j=>jI^KF2IXl{&N^iGdBhFgkd1f6=VR_QvgtLrA654BJ zP#8Aylz{|;E%{m~y5<_S^8V#*h_3Fq^!*MEWpX=ZVKUpekNQu>xB{d`p|emTWiW`~ z9+*4NiqT&VQmb6asd5d%_lqc#rBYWk^CY4YU%3*g*pa!w&)o<1zLy#m$Z+Wtq{%HL zKLWRuZEH^ zo8NqC>&%V^N+%NkZE#}C=68SiBWLgXEAN0*DLQ%1G0Gj>At)mLA|(WA$RO}F6w z2}R37pbFSf9DQ+9)je>g$|00MqrI|(BuNR~RT}fb0g18kO&A>=!|3oR1_lR6-ZoLT zf8vcAPus>Nh4!)~GNf`78{U;pn|>BPk`Ga%HD>lkHr}~OddSLURTFI{q^KFEslE!I z(b(fzT3Y7U3aiZ?OAJS)DGL^eZ@`jk3o5T5Y7jWs)EE&%7rbyG24TT$z60CC$FBoM|t%Fmb z=gJOJ+NK@x{Aa|*Nm}d1{;hoY5|yG>Pi?P6*ofRowAm9taVy zOX2PNLbl0z;c`E_o9X?O<5-$kc+C>ArUOZQRHmb$F_)eMGCsH4 zy=)dHfCfahwB@s>icuH7v{7l4yXaU1>U$ZH1tPS08EwhKcM{QjvvoS;#6%eC^yM(u z)ONzteDk;8C5BT_}_ORJ0xGtPk+ z56BQY4_PH=?IMyBh@FM|Z27FluXz+8Yj7lzGYs!s3j17^=ZqyaOnFXZ!Nj626Dfnu zgr6hnu?(76gZUnk%up<%h7M7>8{`7Wl`UZ<8&c>^EvTVE$c-(aHAr7v*p!RhvMpLk z$b1+VFBJIikA8RI3;*e{e=|F``qbswg>_iYuitfDI}o?-yyMP)RBvoJ@#K?F{P|k% z*|KZjLpR@b*Zuo{^}hS3rlzb`SrKGot>_cbUhUxQ`SUn??i{YnT_q!Ho5u;HNW&l^ z&v6)Uq&2r#t5LJ?hJ;i1sQg4f6+dLuv}I9z#5HvlJrMj|q2d+5CZ>O1z`lU+5E}R3 zM-Q$m8#v?akw5D#Yuu#c>WZ44j`^2ICT-onjCw`2U*q|%JWD_*aTHN$!#fFAf-qLY z#4a6u7Cjk3KUk-#9GecMD456zbR(gF#28=hb|@wxOc!jqG|)Qgqj`BNuE<37^XNJv zivDddP;dsL{alHC-Be(@u2x?f@|tUuyKu^;mc|d)Q+Z1zkj<1- z0rR02(v2;MO+yfM6`YMxFh+5}mq6!(SeO@h=HSct$~Rv4)4zQD+3%dcF#l&OEA3yn zX2&mby(t_>x!(W&2h5LNda-%xU+HepHxLVPcH zPzr(_gZr+KnMe=#aT!R|mGzDzD6#0Sc>&uHuCDIckZJ}cBPSe{YrNqO&D@Z!biZaJ zY{mQN`O#!>AU?Vlx>D|{%xFZfrT)A|`^{x9i|m2VOyI9hb=JgfyEN=IM*Jyq*g9lB z@wS~a<9k4{15Dr?5BL~MeBFjw6}e3^W@+J+-}!T4EdNR1V~>kgmhz39|MEeB)cM z;6HxxFYxk_^Ei6^@|(u)zTSFM9mtLyJ07}p>GB`U&tL6Z&T?yD)5NAi zec#PDfBc4-8_4M8Mxs;LkCuUT=|u8QLD9$Z$}-NMzlg=fMJ&uMkX|{szD;E*b|}_? zSSW7jNsEYTgN=-GP6NLADqRi1qfq#9^j9Ie93qZFBL@-@H2O*a>}x*y6xGJDQ-ia= zazCV7Q4omg%Ta{Quf#H`!o5=?nyNRIMk`zijQSE0&4{QvkT|1=BX<97riz&K`_SV+ zLWbh5iS)>>;4Eb1Km=mL1uio1`;71bfZGq{<}G9dEp|p+mcHFxE*3Tb4eE6T7JbmiWdhe|@2a=@L_ziQ{hQ!y?*2%Cnx7(bc} zbBb;Kh8OAlL@cewuQ@8(3euGTN8LjL1Ep zNbvtsE?+q#PNgpH&v+DCSMN#sb1eM6#t1!&Y-Fda3xChW{gW8R?rL#1-VvxLsm~JK z=cH%5b~7xTfd~`Ie!?IXd_#K2We8zuv2X@OswjiVq(R!OLzNC=AOV^w?4z!DpiRLEf67LL#I*aXwV_75M>$8(U8^w_oPT-*tqv-FEuPpRqr897`$t8 z>a&3R0>mf253ERC1PRC`8t}*Yi5*Uj?-}Xl8p~^z zCj2@o8Hg+K!j);Ysa!k1eNPi)xCg@9h_Y6lw=Kdfkx+@GEF;;_uy3d*z)-7vXn zJklhgK{2Hge!z^C^JLu*6jq)#H$`YLy5T>=wf$J%+tWR`7s3#$OXH2CEK!K*NRtTbmmTqjqO zjSNJXF(`7#!f7`MA0aEI=17UpOtb16xM9uM72P4430uc!wp21a$~p*wu5x{wKCdBk z#&Z)zDAh1L)}B^TDHkEiRCK1hdQui9DG zjiLckn7ApT?eLrkWlVu844TLRxi296aob~X&ayP5TDo#mz^FMAQfe$4$xv~ZCA*TV zPbFbw;T@m!5P1d<8*7MV#K!DUZ+v5h2LVwd$o)--s)dT)1`+GGm&d|-AV#{$NBei0 z1m`AD=_JbkQVCN}ZBw0{lt>f)F72WP=tRIIOp7XN(5Ru14P>T)tkc4GpMC|;A3Bc% zKRkZnxtC6SwcXZVzB2dbL{t5=Yh4HOJD>RY4k5(0fB*SMp19}kJMTI8%(Dl7dAnxU zC;#E^+*M?G@!5y}Lq+R%=Z;+uZJwI`+EY(G@!U_d-N8*0Hw}zUJdhdlhcZo*ox6A8 zH$LzIv^EY=uOqG1=z+;QnVliw>2g&XY}XquG&zupwe{JA?t-6^q;u6U$7q|V4(zBR zj4yC;gA&!rbkdz-CSu8uTT~j6yzOqxEJeVk(qrjXdaF7R1jG@SibR)KmMR#RCD?#V zE+s@&Ii(|K$*(J;y#rC+_~+IL!XV{|UXSfJmeNB(vqQF@la!$t-_Vz()6yL_;I7Q1 zsh8yWs*EM}3%h9NAkc)H&&MwQsMM;E&n`4=Q~z73|W!Y^`Meotvm*f)EWsyDwsVa zRx6YNnMm^@hVDF()JampBu&gM8eGi>@ye-X{L#Z-!l`rfSXk0HcW(BY2jSOpUDpTy zp%4D8JzJ-zZqS7`2cLQN*un4rz^8;`n?@V0*3duKMn8Gt!4;q+8-d_Fs`1ICKPHo+Cdqd%m_uh8<$9C@7H9fFlz-qKRZYVa=c_+7} zC>h<@+#7HP*)>t=mgE|;=Oc0tJ39x_Mfc$0k7d-UZN1kmfuIEuPt~g)wp-B^-8wx2 zMyJGdA`uUx@c#;tsTg0n(nwH0XeJk;Q6(9shV3M%P%bQMvv zg!4$pO}18AbPf=K1)}~`v{WMFtHsfM+#9fcC&j2C@a<$T?hI(4V_FyY!iM`}*dn`gyOZEz=|A&n?r_BSOgA kg%CRg%v=6%00030|99iDKo*q}{Qv*}07*qoM6N<$f@HV{^#A|> literal 0 HcmV?d00001 diff --git a/mcpgateway/templates/admin.html b/mcpgateway/templates/admin.html index 335fe01fb..1e622b86f 100644 --- a/mcpgateway/templates/admin.html +++ b/mcpgateway/templates/admin.html @@ -8,21 +8,9 @@ - MCP Gateway Admin + Context Forge - Gateway Administration - - - - - - - + + + + + + + + + + + + + {% endfor %} @@ -3530,64 +4177,422 @@

- - {% if a2a_enabled %} -

- Private teams require invitations to add new members. Use the team invitation system instead. + You are a member of this private team. Only team owners can directly add new members. Use the team invitation system to request access for others.

""" @@ -2851,7 +2922,7 @@ async def admin_delete_team( @admin_router.post("/teams/{team_id}/add-member") -@require_permission("admin.user_management") +@require_permission("teams.write") # Team write permission instead of admin user management async def admin_add_team_member( team_id: str, request: Request, @@ -2885,9 +2956,12 @@ async def admin_add_team_member( if not team: return HTMLResponse(content='
Team not found
', status_code=404) - # Private teams cannot have members added directly - they need invitations + # For private teams, only team owners can add members directly + user_email_from_jwt = get_user_email(user) if team.visibility == "private": - return HTMLResponse(content='
Cannot add members to private teams. Use the invitation system instead.
', status_code=403) + user_role = await team_service.get_user_role_in_team(user_email_from_jwt, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can add members to private teams. Use the invitation system instead.
', status_code=403) form = await request.form() user_email = form.get("user_email") @@ -2902,7 +2976,6 @@ async def admin_add_team_member( return HTMLResponse(content=f'
User {user_email} not found
', status_code=400) # Add member to team - user_email_from_jwt = get_user_email(user) await team_service.add_member_to_team(team_id=team_id, user_email=user_email, role=role, invited_by=user_email_from_jwt) # Return success message with script to refresh modal @@ -2928,7 +3001,7 @@ async def admin_add_team_member( @admin_router.post("/teams/{team_id}/update-member-role") -@require_permission("admin.user_management") +@require_permission("teams.write") async def admin_update_team_member_role( team_id: str, request: Request, @@ -2955,6 +3028,17 @@ async def admin_update_team_member_role( team_service = TeamManagementService(db) + # Check if team exists and validate user permissions + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + # Only team owners can modify member roles + user_email_from_jwt = get_user_email(user) + user_role = await team_service.get_user_role_in_team(user_email_from_jwt, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can modify member roles
', status_code=403) + form = await request.form() user_email = form.get("user_email") new_role = form.get("role", "member") @@ -2966,7 +3050,6 @@ async def admin_update_team_member_role( return HTMLResponse(content='
Role is required
', status_code=400) # Update member role - user_email_from_jwt = get_user_email(user) await team_service.update_member_role(team_id=team_id, user_email=user_email, new_role=new_role, updated_by=user_email_from_jwt) # Return success message with auto-close and refresh @@ -2997,7 +3080,7 @@ async def admin_update_team_member_role( @admin_router.post("/teams/{team_id}/remove-member") -@require_permission("admin.user_management") +@require_permission("teams.write") # Team write permission instead of admin user management async def admin_remove_team_member( team_id: str, request: Request, @@ -3019,12 +3102,22 @@ async def admin_remove_team_member( return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: - # First-Party # First-Party from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel team_service = TeamManagementService(db) + # Check if team exists and validate user permissions + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + # Only team owners can remove members + user_email_from_jwt = get_user_email(user) + user_role = await team_service.get_user_role_in_team(user_email_from_jwt, team_id) + if user_role != "owner": + return HTMLResponse(content='
Only team owners can remove members
', status_code=403) + form = await request.form() user_email = form.get("user_email") @@ -3032,7 +3125,6 @@ async def admin_remove_team_member( return HTMLResponse(content='
User email is required
', status_code=400) # Remove member from team - user_email_from_jwt = get_user_email(user) try: success = await team_service.remove_member_from_team(team_id=team_id, user_email=user_email, removed_by=user_email_from_jwt) From 1e101deffcbd95253fbd58391697dc3eca561544 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 07:34:09 +0100 Subject: [PATCH 20/49] Fix number of team members Signed-off-by: Mihai Criveti --- mcpgateway/admin.py | 29 ++++++++++++++++++++++++----- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index af745177d..edca162d8 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -1829,7 +1829,7 @@ async def admin_ui( "name": str(team.name) if team.name else "", "type": str(getattr(team, "type", "organization")), "is_personal": bool(getattr(team, "is_personal", False)), - "member_count": len(getattr(team, "members", []) or []), + "member_count": team.get_member_count() if hasattr(team, "get_member_count") else 0, } user_teams.append(team_dict) except Exception as team_error: @@ -2124,7 +2124,7 @@ async def _generate_unified_teams_view(team_service, current_user, root_path): for team in user_teams: user_role = await team_service.get_user_role_in_team(current_user.email, team.id) relationship = "owner" if user_role == "owner" else "member" - all_teams.append({"team": team, "relationship": relationship, "member_count": len(team.members) if team.members else 0}) + all_teams.append({"team": team, "relationship": relationship, "member_count": team.get_member_count()}) # Add public teams user can join - check for pending requests for team in public_teams: @@ -2132,7 +2132,7 @@ async def _generate_unified_teams_view(team_service, current_user, root_path): user_requests = await team_service.get_user_join_requests(current_user.email, team.id) pending_request = next((req for req in user_requests if req.status == "pending"), None) - relationship_data = {"team": team, "relationship": "join", "member_count": len(team.members) if team.members else 0, "pending_request": pending_request} + relationship_data = {"team": team, "relationship": "join", "member_count": team.get_member_count(), "pending_request": pending_request} all_teams.append(relationship_data) # Generate HTML for unified team view @@ -2320,7 +2320,7 @@ async def admin_list_teams( # Generate HTML for teams (traditional view) teams_html = "" for team in teams: - member_count = len(team.members) if team.members else 0 + member_count = team.get_member_count() teams_html += f"""
@@ -2989,6 +2989,12 @@ async def admin_add_team_member( target: '#team-edit-modal-content', swap: 'innerHTML' }}); + + // Also refresh the teams list to update member counts + const teamsList = document.getElementById('teams-list'); + if (teamsList) {{ + htmx.trigger(teamsList, 'load'); + }} }}, 1000);
@@ -3058,17 +3064,24 @@ async def admin_update_team_member_role(

Role updated successfully for {user_email}

""" @@ -3145,6 +3158,12 @@ async def admin_remove_team_member( target: '#team-edit-modal-content', swap: 'innerHTML' }}); + + // Also refresh the teams list to update member counts + const teamsList = document.getElementById('teams-list'); + if (teamsList) {{ + htmx.trigger(teamsList, 'load'); + }} }}, 1000); From 95a815b4818644b66a295ad2061b2ac5b9112978 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 08:38:12 +0100 Subject: [PATCH 21/49] Fix team approval workflow Signed-off-by: Mihai Criveti --- .../langchain_agent/agent_langchain.py | 19 +++- agent_runtimes/langchain_agent/app.py | 42 +++---- agent_runtimes/langchain_agent/config.py | 8 +- agent_runtimes/langchain_agent/demo.py | 4 +- agent_runtimes/langchain_agent/mcp_client.py | 5 +- agent_runtimes/langchain_agent/models.py | 8 +- agent_runtimes/langchain_agent/start_agent.py | 13 ++- .../langchain_agent/tests/conftest.py | 7 +- .../langchain_agent/tests/test_app.py | 8 +- .../langchain_agent/tests/test_config.py | 8 +- async_testing/async_validator.py | 6 +- async_testing/benchmarks.py | 6 +- async_testing/monitor_runner.py | 6 +- async_testing/profile_compare.py | 8 +- async_testing/profiler.py | 12 +- mcpgateway/admin.py | 107 ++++++++++++++++-- mcpgateway/bootstrap_db.py | 12 +- mcpgateway/db.py | 12 +- .../services/team_management_service.py | 79 ++++++++++--- plugin_templates/external/tests/test_all.py | 10 +- plugins/deny_filter/deny.py | 9 +- plugins/pii_filter/pii_filter.py | 14 ++- plugins/regex_filter/search_replace.py | 2 +- plugins/resource_filter/resource_filter.py | 3 + run_mutmut.py | 10 +- tests/async/test_async_safety.py | 7 +- tests/conftest.py | 10 +- tests/e2e/test_admin_apis.py | 13 ++- tests/e2e/test_main_apis.py | 43 ++++--- tests/fuzz/conftest.py | 4 +- tests/fuzz/fuzzers/fuzz_config_parser.py | 12 +- tests/fuzz/fuzzers/fuzz_jsonpath.py | 12 +- tests/fuzz/fuzzers/fuzz_jsonrpc.py | 10 +- tests/fuzz/scripts/generate_fuzz_report.py | 7 +- tests/fuzz/scripts/run_restler_docker.py | 12 +- tests/fuzz/test_api_schema_fuzz.py | 6 +- tests/fuzz/test_jsonpath_fuzz.py | 8 +- tests/fuzz/test_jsonrpc_fuzz.py | 10 +- tests/fuzz/test_schema_validation_fuzz.py | 15 ++- tests/fuzz/test_security_fuzz.py | 8 +- tests/integration/helpers/trace_generator.py | 9 +- tests/integration/test_integration.py | 19 +++- .../integration/test_metadata_integration.py | 32 ++++-- .../test_resource_plugin_integration.py | 17 ++- tests/integration/test_tag_endpoints.py | 9 +- tests/integration/test_translate_echo.py | 3 +- tests/migration/add_version.py | 7 +- tests/migration/conftest.py | 13 ++- .../test_compose_postgres_migrations.py | 12 +- .../test_docker_sqlite_migrations.py | 10 +- tests/migration/test_migration_performance.py | 11 +- tests/migration/utils/container_manager.py | 7 +- tests/migration/utils/data_seeder.py | 6 +- tests/migration/utils/migration_runner.py | 8 +- tests/migration/utils/reporting.py | 10 +- tests/migration/utils/schema_validator.py | 7 +- tests/migration/version_config.py | 3 +- tests/migration/version_status.py | 3 +- tests/security/test_configurable_headers.py | 10 +- tests/security/test_security_cookies.py | 15 ++- tests/security/test_security_headers.py | 8 +- .../test_security_middleware_comprehensive.py | 13 ++- ...test_security_performance_compatibility.py | 16 ++- tests/security/test_standalone_middleware.py | 10 +- .../mcpgateway/cache/test_session_registry.py | 2 + .../cache/test_session_registry_extended.py | 16 ++- .../middleware/test_token_scoping.py | 4 +- .../plugins/fixtures/plugins/passthrough.py | 2 + .../external/mcp/test_client_stdio.py | 10 +- .../mcp/test_client_streamable_http.py | 6 +- .../framework/loader/test_plugin_loader.py | 7 +- .../plugins/framework/test_errors.py | 3 + .../plugins/framework/test_manager.py | 8 +- .../framework/test_manager_extended.py | 45 +++++--- .../plugins/framework/test_registry.py | 8 +- .../plugins/framework/test_resource_hooks.py | 6 + .../plugins/framework/test_utils.py | 11 +- .../resource_filter/test_resource_filter.py | 2 + .../unit/mcpgateway/plugins/tools/test_cli.py | 4 +- .../mcpgateway/routers/test_oauth_router.py | 23 +++- .../mcpgateway/routers/test_reverse_proxy.py | 14 ++- .../services/test_email_auth_basic.py | 10 +- .../services/test_export_service.py | 27 +++-- .../test_gateway_resources_prompts.py | 9 +- .../services/test_gateway_service.py | 6 +- .../services/test_gateway_service_extended.py | 5 + .../test_gateway_service_health_oauth.py | 1 + .../services/test_import_service.py | 27 +++-- .../services/test_log_storage_service.py | 3 + .../test_logging_service_comprehensive.py | 17 ++- .../services/test_permission_fallback.py | 4 +- .../services/test_prompt_service_extended.py | 14 +++ .../services/test_resource_service.py | 1 + .../services/test_resource_service_plugins.py | 4 + .../services/test_server_service.py | 4 + .../services/test_sso_admin_assignment.py | 2 +- .../services/test_sso_approval_workflow.py | 2 +- .../services/test_team_invitation_service.py | 2 +- .../services/test_team_management_service.py | 65 ++++++++++- tests/unit/mcpgateway/test_admin.py | 82 +++++++++++--- .../test_cli_export_import_coverage.py | 99 +++++++++++----- tests/unit/mcpgateway/test_coverage_push.py | 37 +++--- .../test_display_name_uuid_features.py | 37 +++++- .../mcpgateway/test_final_coverage_push.py | 35 ++++-- tests/unit/mcpgateway/test_main.py | 19 +++- tests/unit/mcpgateway/test_main_extended.py | 12 +- tests/unit/mcpgateway/test_oauth_manager.py | 15 ++- tests/unit/mcpgateway/test_observability.py | 7 ++ tests/unit/mcpgateway/test_reverse_proxy.py | 28 ++--- .../test_rpc_backward_compatibility.py | 5 +- .../mcpgateway/test_rpc_tool_invocation.py | 7 +- .../mcpgateway/test_simple_coverage_boost.py | 23 +++- .../test_streamable_closedresource_filter.py | 3 + tests/unit/mcpgateway/test_translate.py | 5 + tests/unit/mcpgateway/test_ui_version.py | 5 + tests/unit/mcpgateway/test_version.py | 11 +- tests/unit/mcpgateway/test_well_known.py | 15 ++- tests/unit/mcpgateway/test_wrapper.py | 8 +- .../test_streamablehttp_transport.py | 56 +++++---- .../utils/test_passthrough_headers_fixed.py | 4 +- .../unit/mcpgateway/utils/test_proxy_auth.py | 27 ++++- .../utils/test_verify_credentials.py | 2 +- tests/utils/rbac_mocks.py | 9 +- 123 files changed, 1305 insertions(+), 473 deletions(-) diff --git a/agent_runtimes/langchain_agent/agent_langchain.py b/agent_runtimes/langchain_agent/agent_langchain.py index 826556355..c6d01b23c 100644 --- a/agent_runtimes/langchain_agent/agent_langchain.py +++ b/agent_runtimes/langchain_agent/agent_langchain.py @@ -1,22 +1,26 @@ # -*- coding: utf-8 -*- +# Standard import asyncio import json import logging -from typing import List, Dict, Any, Optional, AsyncGenerator +from typing import Any, AsyncGenerator, Dict, List, Optional +# Third-Party from langchain.agents import AgentExecutor, create_openai_functions_agent from langchain.tools import Tool -from langchain_core.messages import HumanMessage, AIMessage, SystemMessage +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import AIMessage, HumanMessage, SystemMessage from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.tools import BaseTool -from langchain_core.language_models.chat_models import BaseChatModel -from pydantic import BaseModel, Field # LLM Provider imports -from langchain_openai import ChatOpenAI, AzureChatOpenAI +from langchain_openai import AzureChatOpenAI, ChatOpenAI +from pydantic import BaseModel, Field + try: - from langchain_community.chat_models import BedrockChat, ChatOllama + # Third-Party from langchain_anthropic import ChatAnthropic + from langchain_community.chat_models import BedrockChat, ChatOllama except ImportError: # Optional dependencies - will be checked at runtime BedrockChat = None @@ -24,9 +28,11 @@ ChatAnthropic = None try: + # Local from .mcp_client import MCPClient, ToolDef from .models import AgentConfig except ImportError: + # Third-Party from mcp_client import MCPClient, ToolDef from models import AgentConfig @@ -391,4 +397,5 @@ async def stream_async( """Stream agent response asynchronously""" if not self._initialized: raise RuntimeError("Agent not initialized. Call initialize() first.") + # Standard import asyncio diff --git a/agent_runtimes/langchain_agent/app.py b/agent_runtimes/langchain_agent/app.py index e30b4382f..3eb444fb7 100644 --- a/agent_runtimes/langchain_agent/app.py +++ b/agent_runtimes/langchain_agent/app.py @@ -1,41 +1,28 @@ # -*- coding: utf-8 -*- -from fastapi import FastAPI, HTTPException, BackgroundTasks -from fastapi.responses import StreamingResponse -from fastapi.middleware.cors import CORSMiddleware +# Standard +import asyncio +from datetime import datetime import json +import logging import time +from typing import Any, AsyncGenerator, Dict, List, Optional import uuid -from typing import List, Dict, Any, Optional, AsyncGenerator -from datetime import datetime -import asyncio -import logging + +# Third-Party +from fastapi import BackgroundTasks, FastAPI, HTTPException +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import StreamingResponse try: - from .models import ( - ChatCompletionRequest, - ChatCompletionResponse, - ChatCompletionChoice, - ChatMessage, - Usage, - HealthResponse, - ReadyResponse, - ToolListResponse - ) + # Local from .agent_langchain import LangchainMCPAgent from .config import get_settings + from .models import ChatCompletionChoice, ChatCompletionRequest, ChatCompletionResponse, ChatMessage, HealthResponse, ReadyResponse, ToolListResponse, Usage except ImportError: - from models import ( - ChatCompletionRequest, - ChatCompletionResponse, - ChatCompletionChoice, - ChatMessage, - Usage, - HealthResponse, - ReadyResponse, - ToolListResponse - ) + # Third-Party from agent_langchain import LangchainMCPAgent from config import get_settings + from models import ChatCompletionChoice, ChatCompletionRequest, ChatCompletionResponse, ChatMessage, HealthResponse, ReadyResponse, ToolListResponse, Usage # Configure logging logging.basicConfig(level=logging.INFO) @@ -307,5 +294,6 @@ async def agent_to_agent(request: Dict[str, Any]): } if __name__ == "__main__": + # Third-Party import uvicorn uvicorn.run(app, host="0.0.0.0", port=8000) diff --git a/agent_runtimes/langchain_agent/config.py b/agent_runtimes/langchain_agent/config.py index a9366e5e3..709d86d28 100644 --- a/agent_runtimes/langchain_agent/config.py +++ b/agent_runtimes/langchain_agent/config.py @@ -1,10 +1,12 @@ # -*- coding: utf-8 -*- -import os +# Standard from functools import lru_cache -from typing import Optional, List +import os +from typing import List, Optional # Load .env file if it exists try: + # Third-Party from dotenv import load_dotenv load_dotenv() except ImportError: @@ -12,8 +14,10 @@ pass try: + # Local from .models import AgentConfig except ImportError: + # Third-Party from models import AgentConfig def _parse_tools_list(tools_str: str) -> Optional[List[str]]: diff --git a/agent_runtimes/langchain_agent/demo.py b/agent_runtimes/langchain_agent/demo.py index 08c3cc813..ee98863bb 100755 --- a/agent_runtimes/langchain_agent/demo.py +++ b/agent_runtimes/langchain_agent/demo.py @@ -6,12 +6,14 @@ both programmatically and via HTTP API calls. """ +# Standard import asyncio import json import os import sys -from typing import Dict, Any +from typing import Any, Dict +# Third-Party import httpx diff --git a/agent_runtimes/langchain_agent/mcp_client.py b/agent_runtimes/langchain_agent/mcp_client.py index ace7df7d2..5151c3d65 100644 --- a/agent_runtimes/langchain_agent/mcp_client.py +++ b/agent_runtimes/langchain_agent/mcp_client.py @@ -1,10 +1,13 @@ # -*- coding: utf-8 -*- +# Future from __future__ import annotations +# Standard from dataclasses import dataclass -from typing import Any, Dict, List, Optional import os +from typing import Any, Dict, List, Optional +# Third-Party import httpx diff --git a/agent_runtimes/langchain_agent/models.py b/agent_runtimes/langchain_agent/models.py index 5aaed7880..9c0c8d9f1 100644 --- a/agent_runtimes/langchain_agent/models.py +++ b/agent_runtimes/langchain_agent/models.py @@ -1,7 +1,11 @@ # -*- coding: utf-8 -*- -from pydantic import BaseModel, Field -from typing import List, Dict, Any, Optional, Union +# Standard from datetime import datetime +from typing import Any, Dict, List, Optional, Union + +# Third-Party +from pydantic import BaseModel, Field + # OpenAI Chat API Models class ChatMessage(BaseModel): diff --git a/agent_runtimes/langchain_agent/start_agent.py b/agent_runtimes/langchain_agent/start_agent.py index d46c71114..6e0a57940 100755 --- a/agent_runtimes/langchain_agent/start_agent.py +++ b/agent_runtimes/langchain_agent/start_agent.py @@ -4,18 +4,22 @@ Startup script for the MCP Langchain Agent """ +# Standard import asyncio import logging -import sys from pathlib import Path +import sys -import uvicorn +# Third-Party from dotenv import load_dotenv +import uvicorn try: - from .config import get_settings, validate_environment, get_example_env + # Local + from .config import get_example_env, get_settings, validate_environment except ImportError: - from config import get_settings, validate_environment, get_example_env + # Third-Party + from config import get_example_env, get_settings, validate_environment # Configure logging logging.basicConfig( @@ -56,6 +60,7 @@ def setup_environment(): async def test_agent_initialization(): """Test that the agent can be initialized""" try: + # Local from .agent_langchain import LangchainMCPAgent settings = get_settings() diff --git a/agent_runtimes/langchain_agent/tests/conftest.py b/agent_runtimes/langchain_agent/tests/conftest.py index 9101d9881..7a39e4069 100644 --- a/agent_runtimes/langchain_agent/tests/conftest.py +++ b/agent_runtimes/langchain_agent/tests/conftest.py @@ -1,10 +1,13 @@ # -*- coding: utf-8 -*- """Pytest configuration and fixtures for MCP LangChain Agent tests.""" +# Standard import os -import pytest -from unittest.mock import Mock, AsyncMock +from unittest.mock import AsyncMock, Mock + +# Third-Party from fastapi.testclient import TestClient +import pytest # Set test environment variables before any imports os.environ["OPENAI_API_KEY"] = "test-key" diff --git a/agent_runtimes/langchain_agent/tests/test_app.py b/agent_runtimes/langchain_agent/tests/test_app.py index 708dde817..f934e4b30 100644 --- a/agent_runtimes/langchain_agent/tests/test_app.py +++ b/agent_runtimes/langchain_agent/tests/test_app.py @@ -1,10 +1,14 @@ # -*- coding: utf-8 -*- """Tests for the FastAPI application.""" -import pytest -from fastapi.testclient import TestClient +# Standard from unittest.mock import Mock, patch +# Third-Party +from fastapi.testclient import TestClient +import pytest + +# First-Party from agent_runtimes.langchain_agent import app diff --git a/agent_runtimes/langchain_agent/tests/test_config.py b/agent_runtimes/langchain_agent/tests/test_config.py index 37c752718..f97dc2b45 100644 --- a/agent_runtimes/langchain_agent/tests/test_config.py +++ b/agent_runtimes/langchain_agent/tests/test_config.py @@ -1,11 +1,15 @@ # -*- coding: utf-8 -*- """Tests for configuration management.""" +# Standard import os -import pytest from unittest.mock import patch -from agent_runtimes.langchain_agent.config import get_settings, validate_environment, _parse_tools_list +# Third-Party +import pytest + +# First-Party +from agent_runtimes.langchain_agent.config import _parse_tools_list, get_settings, validate_environment class TestParseToolsList: diff --git a/async_testing/async_validator.py b/async_testing/async_validator.py index 0e8c3d826..f0cc53db4 100644 --- a/async_testing/async_validator.py +++ b/async_testing/async_validator.py @@ -3,11 +3,13 @@ Validate async code patterns and detect common pitfalls. """ -import ast +# Standard import argparse +import ast import json from pathlib import Path -from typing import List, Dict, Any +from typing import Any, Dict, List + class AsyncCodeValidator: """Validate async code for common patterns and pitfalls.""" diff --git a/async_testing/benchmarks.py b/async_testing/benchmarks.py index 83f4be8b9..640eadd51 100644 --- a/async_testing/benchmarks.py +++ b/async_testing/benchmarks.py @@ -2,13 +2,15 @@ """ Run async performance benchmarks and output results. """ +# Standard +import argparse import asyncio -import time import json -import argparse from pathlib import Path +import time from typing import Any, Dict + class AsyncBenchmark: """Run async performance benchmarks.""" diff --git a/async_testing/monitor_runner.py b/async_testing/monitor_runner.py index f9871c3e7..3fef28abc 100644 --- a/async_testing/monitor_runner.py +++ b/async_testing/monitor_runner.py @@ -2,10 +2,14 @@ """ Runtime async monitoring with aiomonitor integration. """ +# Standard +import argparse import asyncio from typing import Any, Dict + +# Third-Party import aiomonitor -import argparse + class AsyncMonitor: """Monitor live async operations in mcpgateway.""" diff --git a/async_testing/profile_compare.py b/async_testing/profile_compare.py index c900e2bb2..700dd9623 100644 --- a/async_testing/profile_compare.py +++ b/async_testing/profile_compare.py @@ -3,11 +3,13 @@ Compare async performance profiles between builds. """ -import pstats -import json +# Standard import argparse +import json from pathlib import Path -from typing import Dict, Any +import pstats +from typing import Any, Dict + class ProfileComparator: """Compare performance profiles and detect regressions.""" diff --git a/async_testing/profiler.py b/async_testing/profiler.py index 47b14c325..e92b45163 100644 --- a/async_testing/profiler.py +++ b/async_testing/profiler.py @@ -2,16 +2,20 @@ """ Comprehensive async performance profiler for mcpgateway. """ +# Standard +import argparse import asyncio import cProfile +import json +from pathlib import Path import pstats import time +from typing import Any, Dict, List, Union + +# Third-Party import aiohttp import websockets -import argparse -import json -from pathlib import Path -from typing import Dict, List, Any, Union + class AsyncProfiler: """Profile async operations in mcpgateway.""" diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index edca162d8..934f28c01 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -2991,10 +2991,10 @@ async def admin_add_team_member( }}); // Also refresh the teams list to update member counts - const teamsList = document.getElementById('teams-list'); - if (teamsList) {{ - htmx.trigger(teamsList, 'load'); - }} + htmx.ajax('GET', window.ROOT_PATH + '/admin/teams?unified=true', {{ + target: '#unified-teams-list', + swap: 'innerHTML' + }}); }}, 1000); @@ -3077,10 +3077,10 @@ async def admin_update_team_member_role( }} // Refresh teams list if visible - const teamsList = document.getElementById('teams-list'); - if (teamsList) {{ - htmx.trigger(teamsList, 'load'); - }} + htmx.ajax('GET', window.ROOT_PATH + '/admin/teams?unified=true', {{ + target: '#unified-teams-list', + swap: 'innerHTML' + }}); }}, 1000); @@ -3160,10 +3160,10 @@ async def admin_remove_team_member( }}); // Also refresh the teams list to update member counts - const teamsList = document.getElementById('teams-list'); - if (teamsList) {{ - htmx.trigger(teamsList, 'load'); - }} + htmx.ajax('GET', window.ROOT_PATH + '/admin/teams?unified=true', {{ + target: '#unified-teams-list', + swap: 'innerHTML' + }}); }}, 1000); @@ -3175,6 +3175,89 @@ async def admin_remove_team_member( return HTMLResponse(content=f'
Error removing member: {str(e)}
', status_code=400) +@admin_router.post("/teams/{team_id}/leave") +@require_permission("teams.join") # Users who can join can also leave +async def admin_leave_team( + team_id: str, + request: Request, # pylint: disable=unused-argument + db: Session = Depends(get_db), + user=Depends(get_current_user_with_permissions), +) -> HTMLResponse: + """Leave a team via admin UI. + + Args: + team_id: ID of the team to leave + request: FastAPI request object + db: Database session + user: Current authenticated user context + + Returns: + HTMLResponse: Success message or error response + """ + if not settings.email_auth_enabled: + return HTMLResponse(content='
Email authentication is disabled
', status_code=403) + + try: + # First-Party + from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel + + team_service = TeamManagementService(db) + + # Check if team exists + team = await team_service.get_team_by_id(team_id) + if not team: + return HTMLResponse(content='
Team not found
', status_code=404) + + # Get current user email + user_email = get_user_email(user) + + # Check if user is a member of the team + user_role = await team_service.get_user_role_in_team(user_email, team_id) + if not user_role: + return HTMLResponse(content='
You are not a member of this team
', status_code=400) + + # Prevent leaving personal teams + if team.is_personal: + return HTMLResponse(content='
Cannot leave your personal team
', status_code=400) + + # Check if user is the last owner + if user_role == "owner": + members = await team_service.get_team_members(team_id) + owner_count = sum(1 for _, membership in members if membership.role == "owner") + if owner_count <= 1: + return HTMLResponse(content='
Cannot leave team as the last owner. Transfer ownership or delete the team instead.
', status_code=400) + + # Remove user from team + success = await team_service.remove_member_from_team(team_id=team_id, user_email=user_email, removed_by=user_email) + if not success: + return HTMLResponse(content='
Failed to leave team
', status_code=400) + + # Return success message with redirect + success_html = """ +
+

Successfully left the team

+ +
+ """ + return HTMLResponse(content=success_html) + + except Exception as e: + LOGGER.error(f"Error leaving team {team_id}: {e}") + return HTMLResponse(content=f'
Error leaving team: {str(e)}
', status_code=400) + + # ============================================================================ # # TEAM JOIN REQUEST ADMIN ROUTES # # ============================================================================ # diff --git a/mcpgateway/bootstrap_db.py b/mcpgateway/bootstrap_db.py index a60f5ee06..3b084273d 100644 --- a/mcpgateway/bootstrap_db.py +++ b/mcpgateway/bootstrap_db.py @@ -142,17 +142,23 @@ async def bootstrap_default_roles() -> None: "name": "team_admin", "description": "Team administrator with team management permissions", "scope": "team", - "permissions": ["teams.read", "teams.update", "teams.manage_members", "tools.read", "tools.execute", "resources.read", "prompts.read"], + "permissions": ["teams.read", "teams.update", "teams.join", "teams.manage_members", "tools.read", "tools.execute", "resources.read", "prompts.read"], "is_system_role": True, }, { "name": "developer", "description": "Developer with tool and resource access", "scope": "team", - "permissions": ["tools.read", "tools.execute", "resources.read", "prompts.read"], + "permissions": ["teams.join", "tools.read", "tools.execute", "resources.read", "prompts.read"], + "is_system_role": True, + }, + { + "name": "viewer", + "description": "Read-only access to resources", + "scope": "team", + "permissions": ["teams.join", "tools.read", "resources.read", "prompts.read"], "is_system_role": True, }, - {"name": "viewer", "description": "Read-only access to resources", "scope": "team", "permissions": ["tools.read", "resources.read", "prompts.read"], "is_system_role": True}, ] # Create default roles diff --git a/mcpgateway/db.py b/mcpgateway/db.py index 02eb2a74c..a91efe934 100644 --- a/mcpgateway/db.py +++ b/mcpgateway/db.py @@ -309,6 +309,7 @@ class Permissions: TEAMS_READ = "teams.read" TEAMS_UPDATE = "teams.update" TEAMS_DELETE = "teams.delete" + TEAMS_JOIN = "teams.join" TEAMS_MANAGE_MEMBERS = "teams.manage_members" # Tool permissions @@ -1096,7 +1097,16 @@ def is_expired(self) -> bool: Returns: bool: True if the request has expired, False otherwise. """ - return utc_now() > self.expires_at + now = utc_now() + expires_at = self.expires_at + + # Handle timezone awareness mismatch + if now.tzinfo is not None and expires_at.tzinfo is None: + expires_at = expires_at.replace(tzinfo=timezone.utc) + elif now.tzinfo is None and expires_at.tzinfo is not None: + now = now.replace(tzinfo=timezone.utc) + + return now > expires_at def is_pending(self) -> bool: """Check if the join request is still pending. diff --git a/mcpgateway/services/team_management_service.py b/mcpgateway/services/team_management_service.py index 2157aee7c..eafba89bc 100644 --- a/mcpgateway/services/team_management_service.py +++ b/mcpgateway/services/team_management_service.py @@ -149,16 +149,50 @@ async def create_team(self, name: str, description: Optional[str], created_by: s if max_members is None: max_members = getattr(settings, "max_members_per_team", 100) - # Create the team (slug will be auto-generated by event listener) - team = EmailTeam(name=name, description=description, created_by=created_by, is_personal=False, visibility=visibility, max_members=max_members, is_active=True) + # Check for existing inactive team with same name + # First-Party + from mcpgateway.utils.create_slug import slugify # pylint: disable=import-outside-toplevel + + potential_slug = slugify(name) + existing_inactive_team = self.db.query(EmailTeam).filter(EmailTeam.slug == potential_slug, EmailTeam.is_active.is_(False)).first() + + if existing_inactive_team: + # Reactivate the existing team with new details + existing_inactive_team.name = name + existing_inactive_team.description = description + existing_inactive_team.created_by = created_by + existing_inactive_team.visibility = visibility + existing_inactive_team.max_members = max_members + existing_inactive_team.is_active = True + existing_inactive_team.updated_at = utc_now() + team = existing_inactive_team + + # Check if the creator already has an inactive membership + existing_membership = self.db.query(EmailTeamMember).filter(EmailTeamMember.team_id == team.id, EmailTeamMember.user_email == created_by).first() + + if existing_membership: + # Reactivate existing membership as owner + existing_membership.role = "owner" + existing_membership.joined_at = utc_now() + existing_membership.is_active = True + membership = existing_membership + else: + # Create new membership + membership = EmailTeamMember(team_id=team.id, user_email=created_by, role="owner", joined_at=utc_now(), is_active=True) + self.db.add(membership) + + logger.info(f"Reactivated existing team with slug {potential_slug}") + else: + # Create the team (slug will be auto-generated by event listener) + team = EmailTeam(name=name, description=description, created_by=created_by, is_personal=False, visibility=visibility, max_members=max_members, is_active=True) + self.db.add(team) - self.db.add(team) - self.db.flush() # Get the team ID + self.db.flush() # Get the team ID - # Add the creator as owner - membership = EmailTeamMember(team_id=team.id, user_email=created_by, role="owner", joined_at=utc_now(), is_active=True) + # Add the creator as owner + membership = EmailTeamMember(team_id=team.id, user_email=created_by, role="owner", joined_at=utc_now(), is_active=True) + self.db.add(membership) - self.db.add(membership) self.db.commit() logger.info(f"Created team '{team.name}' by {created_by}") @@ -684,18 +718,27 @@ async def create_join_request(self, team_id: str, user_email: str, message: Opti if existing_member: raise ValueError("User is already a member of this team") - # Check for existing pending request - existing_request = ( - self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.team_id == team_id, EmailTeamJoinRequest.user_email == user_email, EmailTeamJoinRequest.status == "pending").first() - ) - - if existing_request and not existing_request.is_expired(): - raise ValueError("User already has a pending join request for this team") - - # Create join request - join_request = EmailTeamJoinRequest(team_id=team_id, user_email=user_email, message=message, expires_at=utc_now() + timedelta(days=7)) # 7 day expiry + # Check for existing requests (any status) + existing_request = self.db.query(EmailTeamJoinRequest).filter(EmailTeamJoinRequest.team_id == team_id, EmailTeamJoinRequest.user_email == user_email).first() + + if existing_request: + if existing_request.status == "pending" and not existing_request.is_expired(): + raise ValueError("User already has a pending join request for this team") + + # Update existing request (cancelled, rejected, expired) to pending + existing_request.message = message or "" + existing_request.status = "pending" + existing_request.requested_at = utc_now() + existing_request.expires_at = utc_now() + timedelta(days=7) + existing_request.reviewed_at = None + existing_request.reviewed_by = None + existing_request.notes = None + join_request = existing_request + else: + # Create new join request + join_request = EmailTeamJoinRequest(team_id=team_id, user_email=user_email, message=message, expires_at=utc_now() + timedelta(days=7)) + self.db.add(join_request) - self.db.add(join_request) self.db.commit() self.db.refresh(join_request) diff --git a/plugin_templates/external/tests/test_all.py b/plugin_templates/external/tests/test_all.py index 8accde750..39987cbe7 100644 --- a/plugin_templates/external/tests/test_all.py +++ b/plugin_templates/external/tests/test_all.py @@ -1,20 +1,22 @@ # -*- coding: utf-8 -*- """Tests for registered plugins.""" -# Third-Party +# Standard import asyncio + +# Third-Party import pytest # First-Party from mcpgateway.models import Message, PromptResult, Role, TextContent from mcpgateway.plugins.framework import ( - PluginManager, GlobalContext, - PromptPrehookPayload, + PluginManager, PromptPosthookPayload, + PromptPrehookPayload, PromptResult, - ToolPreInvokePayload, ToolPostInvokePayload, + ToolPreInvokePayload, ) diff --git a/plugins/deny_filter/deny.py b/plugins/deny_filter/deny.py index c89aa0d69..81a6d442b 100644 --- a/plugins/deny_filter/deny.py +++ b/plugins/deny_filter/deny.py @@ -11,14 +11,7 @@ from pydantic import BaseModel # First-Party -from mcpgateway.plugins.framework import ( - Plugin, - PluginConfig, - PluginContext, - PluginViolation, - PromptPrehookPayload, - PromptPrehookResult -) +from mcpgateway.plugins.framework import Plugin, PluginConfig, PluginContext, PluginViolation, PromptPrehookPayload, PromptPrehookResult from mcpgateway.services.logging_service import LoggingService # Initialize logging service first diff --git a/plugins/pii_filter/pii_filter.py b/plugins/pii_filter/pii_filter.py index a7d7c1fc8..d9d10b59b 100644 --- a/plugins/pii_filter/pii_filter.py +++ b/plugins/pii_filter/pii_filter.py @@ -10,9 +10,9 @@ """ # Standard -import re from enum import Enum -from typing import Any, Pattern, Dict, List, Tuple +import re +from typing import Any, Dict, List, Pattern, Tuple # Third-Party from pydantic import BaseModel, Field @@ -27,10 +27,10 @@ PromptPosthookResult, PromptPrehookPayload, PromptPrehookResult, - ToolPreInvokePayload, - ToolPreInvokeResult, ToolPostInvokePayload, ToolPostInvokeResult, + ToolPreInvokePayload, + ToolPreInvokeResult, ) from mcpgateway.services.logging_service import LoggingService @@ -455,11 +455,14 @@ def _apply_mask(self, value: str, pii_type: PIIType, strategy: MaskingStrategy) return self.config.redaction_text elif strategy == MaskingStrategy.HASH: + # Standard import hashlib return f"[HASH:{hashlib.sha256(value.encode()).hexdigest()[:8]}]" elif strategy == MaskingStrategy.TOKENIZE: + # Standard import uuid + # In production, you'd store the mapping return f"[TOKEN:{uuid.uuid4().hex[:8]}]" @@ -862,6 +865,7 @@ def _process_nested_data_for_pii(self, data: Any, path: str, all_detections: dic # Try to parse as JSON and process nested content try: + # Standard import json parsed_json = json.loads(data) json_modified, json_detections = self._process_nested_data_for_pii(parsed_json, f"{path}(json)", all_detections) @@ -890,6 +894,7 @@ def _process_nested_data_for_pii(self, data: Any, path: str, all_detections: dic json_path = f"{current_path}(json)" if any(path.startswith(json_path) for path in all_detections.keys()): try: + # Standard import json parsed_json = json.loads(value) # Apply masking to the parsed JSON @@ -921,6 +926,7 @@ def _process_nested_data_for_pii(self, data: Any, path: str, all_detections: dic json_path = f"{current_path}(json)" if any(path.startswith(json_path) for path in all_detections.keys()): try: + # Standard import json parsed_json = json.loads(item) # Apply masking to the parsed JSON diff --git a/plugins/regex_filter/search_replace.py b/plugins/regex_filter/search_replace.py index 12c34849f..b4ce33c6d 100644 --- a/plugins/regex_filter/search_replace.py +++ b/plugins/regex_filter/search_replace.py @@ -25,7 +25,7 @@ ToolPostInvokePayload, ToolPostInvokeResult, ToolPreInvokePayload, - ToolPreInvokeResult + ToolPreInvokeResult, ) diff --git a/plugins/resource_filter/resource_filter.py b/plugins/resource_filter/resource_filter.py index 7d118e78e..8d42e2724 100644 --- a/plugins/resource_filter/resource_filter.py +++ b/plugins/resource_filter/resource_filter.py @@ -13,9 +13,11 @@ - Add metadata to resources """ +# Standard import re from urllib.parse import urlparse +# First-Party from mcpgateway.plugins.framework import ( Plugin, PluginConfig, @@ -242,6 +244,7 @@ async def resource_post_fetch( # Update content if it was modified if filtered_text != original_text: # Create new content object with filtered text + # First-Party from mcpgateway.models import ResourceContent modified_content = ResourceContent( type=payload.content.type, diff --git a/run_mutmut.py b/run_mutmut.py index 81d55fb4e..938cb9273 100755 --- a/run_mutmut.py +++ b/run_mutmut.py @@ -5,11 +5,13 @@ Generates mutants and then runs them despite stats failure. """ -import subprocess -import sys -import os +# Standard import json +import os from pathlib import Path +import subprocess +import sys + def run_command(cmd): """Run a shell command and return output.""" @@ -38,6 +40,7 @@ def main(): # Show some output to indicate progress if "done in" in stdout: + # Standard import re match = re.search(r'done in (\d+)ms', stdout) if match: @@ -60,6 +63,7 @@ def main(): return 1 # Sample mutants for quicker testing + # Standard import random print(f"🔍 Found {len(all_mutants)} total mutants") diff --git a/tests/async/test_async_safety.py b/tests/async/test_async_safety.py index e7945b7d8..108d8abde 100644 --- a/tests/async/test_async_safety.py +++ b/tests/async/test_async_safety.py @@ -7,10 +7,13 @@ Comprehensive async safety tests for mcpgateway. """ -from typing import Any, List -import pytest +# Standard import asyncio import time +from typing import Any, List + +# Third-Party +import pytest class TestAsyncSafety: diff --git a/tests/conftest.py b/tests/conftest.py index f7ff44d8f..49fd7c496 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -18,15 +18,17 @@ from sqlalchemy.orm import sessionmaker from sqlalchemy.pool import StaticPool +# First-Party +from mcpgateway.config import Settings +from mcpgateway.db import Base + +# Local # Test utilities - import before mcpgateway modules from tests.utils.rbac_mocks import patch_rbac_decorators, restore_rbac_decorators # Skip session-level RBAC patching for now - let individual tests handle it # _session_rbac_originals = patch_rbac_decorators() -# First-Party -from mcpgateway.config import Settings -from mcpgateway.db import Base @pytest.fixture(scope="session") @@ -88,6 +90,7 @@ def app(): url = f"sqlite:///{path}" # 2) patch settings + # First-Party from mcpgateway.config import settings mp.setattr(settings, "database_url", url, raising=False) @@ -100,6 +103,7 @@ def app(): mp.setattr(db_mod, "SessionLocal", TestSessionLocal, raising=False) # 4) patch the already‑imported main module **without reloading** + # First-Party import mcpgateway.main as main_mod mp.setattr(main_mod, "SessionLocal", TestSessionLocal, raising=False) # (patch engine too if your code references it) diff --git a/tests/e2e/test_admin_apis.py b/tests/e2e/test_admin_apis.py index 8cd0a3550..d9de4a1b4 100644 --- a/tests/e2e/test_admin_apis.py +++ b/tests/e2e/test_admin_apis.py @@ -64,9 +64,12 @@ def setup_logging(): # ------------------------- def create_test_jwt_token(): """Create a proper JWT token for testing with required audience and issuer.""" - import jwt + # Standard import datetime + # Third-Party + import jwt + expire = datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(minutes=60) payload = { 'sub': 'admin@example.com', @@ -83,8 +86,10 @@ def create_test_jwt_token(): TEST_JWT_TOKEN = create_test_jwt_token() TEST_AUTH_HEADER = {"Authorization": f"Bearer {TEST_JWT_TOKEN}"} +# Local # Test user for the updated authentication system from tests.utils.rbac_mocks import create_mock_email_user + TEST_USER = create_mock_email_user( email="admin@example.com", full_name="Test Admin", @@ -100,10 +105,12 @@ def create_test_jwt_token(): async def client(app_with_temp_db): # First-Party from mcpgateway.auth import get_current_user - from mcpgateway.middleware.rbac import get_current_user_with_permissions from mcpgateway.db import get_db - from mcpgateway.utils.verify_credentials import require_admin_auth + from mcpgateway.middleware.rbac import get_current_user_with_permissions from mcpgateway.utils.create_jwt_token import get_jwt_token + from mcpgateway.utils.verify_credentials import require_admin_auth + + # Local from tests.utils.rbac_mocks import create_mock_user_context # Get the actual test database session from the app diff --git a/tests/e2e/test_main_apis.py b/tests/e2e/test_main_apis.py index e958d5838..56abddb61 100644 --- a/tests/e2e/test_main_apis.py +++ b/tests/e2e/test_main_apis.py @@ -50,30 +50,27 @@ from unittest.mock import MagicMock, patch # Third-Party -from httpx import AsyncClient from fastapi import Request from fastapi.security import HTTPAuthorizationCredentials -from sqlalchemy.orm import Session +from httpx import AsyncClient # --- Test Auth Header: Use a real JWT for authenticated requests --- import jwt import pytest import pytest_asyncio from sqlalchemy import create_engine -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker from sqlalchemy.pool import StaticPool -# Test utilities - must import BEFORE mcpgateway modules -from tests.utils.rbac_mocks import ( - setup_rbac_mocks_for_app, - teardown_rbac_mocks_for_app, - patch_rbac_decorators, - restore_rbac_decorators -) - +# First-Party # Completely replace RBAC decorators with no-op versions import mcpgateway.middleware.rbac as rbac_module +# Local +# Test utilities - must import BEFORE mcpgateway modules +from tests.utils.rbac_mocks import patch_rbac_decorators, restore_rbac_decorators, setup_rbac_mocks_for_app, teardown_rbac_mocks_for_app + + def noop_decorator(*args, **kwargs): """No-op decorator that just returns the function unchanged.""" def decorator(func): @@ -90,8 +87,10 @@ def decorator(func): rbac_module.require_admin_permission = noop_decorator rbac_module.require_any_permission = noop_decorator +# Standard # Patch bootstrap_db to prevent it from running during tests from unittest.mock import patch as mock_patch + with mock_patch('mcpgateway.bootstrap_db.main'): # First-Party from mcpgateway.config import settings @@ -161,8 +160,9 @@ async def temp_db(): # Import all model classes to ensure they're registered with Base.metadata # This is necessary for create_all() to create all tables - import mcpgateway.models # Import all model definitions + # First-Party import mcpgateway.db # Import email auth models and other db models + import mcpgateway.models # Import all model definitions # Create all tables - use create_all for test environment to avoid migration conflicts Base.metadata.create_all(bind=engine) @@ -182,11 +182,13 @@ def override_get_db(): # Override authentication for all tests # First-Party - from mcpgateway.utils.verify_credentials import require_auth, require_admin_auth - from mcpgateway.utils.create_jwt_token import get_jwt_token from mcpgateway.auth import get_current_user from mcpgateway.middleware.rbac import get_current_user_with_permissions - from tests.utils.rbac_mocks import create_mock_user_context, create_mock_email_user + from mcpgateway.utils.create_jwt_token import get_jwt_token + from mcpgateway.utils.verify_credentials import require_admin_auth, require_auth + + # Local + from tests.utils.rbac_mocks import create_mock_email_user, create_mock_user_context def override_auth(): return TEST_USER @@ -223,7 +225,10 @@ async def simple_mock_user_with_permissions(): return test_user_context # Create a mock PermissionService that always grants permission + # First-Party from mcpgateway.middleware.rbac import get_permission_service + + # Local from tests.utils.rbac_mocks import MockPermissionService def mock_get_permission_service(*args, **kwargs): @@ -473,6 +478,7 @@ async def test_completion(self, client: AsyncClient): # TODO: Fix RBAC mocking to make this test properly pass if response.status_code == 422: # Skip this test for now due to RBAC decorator issues + # Third-Party import pytest pytest.skip("RBAC decorator issue - endpoint expects args/kwargs parameters") @@ -492,6 +498,7 @@ async def test_sampling_create_message(self, client: AsyncClient): # TODO: Fix RBAC mocking to make this test properly pass if response.status_code == 422: # Skip this test for now due to RBAC decorator issues + # Third-Party import pytest pytest.skip("RBAC decorator issue - endpoint expects args/kwargs parameters") @@ -510,6 +517,7 @@ async def test_get_servers_no_auth(self, client: AsyncClient): # TODO: Fix RBAC mocking to make this test properly pass if response.status_code == 422: # Skip this test for now due to RBAC decorator issues + # Third-Party import pytest pytest.skip("RBAC decorator issue - endpoint expects args/kwargs parameters") @@ -546,6 +554,7 @@ async def test_create_virtual_server(self, client: AsyncClient, mock_auth): # TODO: Fix RBAC mocking to make this test properly pass if response.status_code == 422: # Skip this test for now due to RBAC decorator issues + # Third-Party import pytest pytest.skip("RBAC decorator issue - endpoint expects args/kwargs parameters") @@ -1785,9 +1794,9 @@ async def test_protected_endpoints_require_auth(self, client: AsyncClient): """Test that protected endpoints require authentication when auth is enabled.""" # First, let's remove ALL auth overrides to test real auth behavior # First-Party - from mcpgateway.utils.verify_credentials import require_auth - from mcpgateway.middleware.rbac import get_current_user_with_permissions from mcpgateway.auth import get_current_user + from mcpgateway.middleware.rbac import get_current_user_with_permissions + from mcpgateway.utils.verify_credentials import require_auth # Remove all auth-related overrides temporarily original_overrides = {} diff --git a/tests/fuzz/conftest.py b/tests/fuzz/conftest.py index 5d9e61096..6ab1f12db 100644 --- a/tests/fuzz/conftest.py +++ b/tests/fuzz/conftest.py @@ -6,8 +6,9 @@ Fuzzing test configuration. """ +# Third-Party +from hypothesis import HealthCheck, settings, Verbosity import pytest -from hypothesis import settings, Verbosity, HealthCheck # Mark all tests in this directory as fuzz tests pytestmark = pytest.mark.fuzz @@ -37,6 +38,7 @@ @pytest.fixture(scope="session") def fuzz_settings(): """Configure fuzzing settings based on environment.""" + # Standard import os profile = os.getenv("HYPOTHESIS_PROFILE", "dev") settings.load_profile(profile) diff --git a/tests/fuzz/fuzzers/fuzz_config_parser.py b/tests/fuzz/fuzzers/fuzz_config_parser.py index 2c8918b19..09073081a 100755 --- a/tests/fuzz/fuzzers/fuzz_config_parser.py +++ b/tests/fuzz/fuzzers/fuzz_config_parser.py @@ -7,17 +7,23 @@ Coverage-guided fuzzing for configuration parsing using Atheris. """ -import atheris -import sys +# Standard import os +import sys import tempfile +# Third-Party +import atheris + # Ensure the project is in the path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../..')) try: - from mcpgateway.config import Settings, get_settings + # Third-Party from pydantic import ValidationError + + # First-Party + from mcpgateway.config import get_settings, Settings except ImportError as e: print(f"Import error: {e}") sys.exit(1) diff --git a/tests/fuzz/fuzzers/fuzz_jsonpath.py b/tests/fuzz/fuzzers/fuzz_jsonpath.py index 62354d474..901f705c9 100755 --- a/tests/fuzz/fuzzers/fuzz_jsonpath.py +++ b/tests/fuzz/fuzzers/fuzz_jsonpath.py @@ -7,18 +7,24 @@ Coverage-guided fuzzing for JSONPath processing using Atheris. """ -import atheris -import sys +# Standard import json import os +import sys from typing import Any +# Third-Party +import atheris + # Ensure the project is in the path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../..')) try: - from mcpgateway.config import jsonpath_modifier + # Third-Party from fastapi import HTTPException + + # First-Party + from mcpgateway.config import jsonpath_modifier except ImportError as e: print(f"Import error: {e}") sys.exit(1) diff --git a/tests/fuzz/fuzzers/fuzz_jsonrpc.py b/tests/fuzz/fuzzers/fuzz_jsonrpc.py index c6761dcce..98bc0e359 100755 --- a/tests/fuzz/fuzzers/fuzz_jsonrpc.py +++ b/tests/fuzz/fuzzers/fuzz_jsonrpc.py @@ -7,16 +7,20 @@ Coverage-guided fuzzing for JSON-RPC validation using Atheris. """ -import atheris -import sys +# Standard import json import os +import sys + +# Third-Party +import atheris # Ensure the project is in the path sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../../..')) try: - from mcpgateway.validation.jsonrpc import validate_request, validate_response, JSONRPCError + # First-Party + from mcpgateway.validation.jsonrpc import JSONRPCError, validate_request, validate_response except ImportError as e: print(f"Import error: {e}") sys.exit(1) diff --git a/tests/fuzz/scripts/generate_fuzz_report.py b/tests/fuzz/scripts/generate_fuzz_report.py index 607088292..9db2b93a7 100755 --- a/tests/fuzz/scripts/generate_fuzz_report.py +++ b/tests/fuzz/scripts/generate_fuzz_report.py @@ -7,12 +7,13 @@ Generate comprehensive fuzzing report for MCP Gateway. """ +# Standard +from datetime import datetime import json import os -import sys from pathlib import Path -from datetime import datetime -from typing import Dict, List, Any, Optional +import sys +from typing import Any, Dict, List, Optional def collect_hypothesis_stats() -> Dict[str, Any]: diff --git a/tests/fuzz/scripts/run_restler_docker.py b/tests/fuzz/scripts/run_restler_docker.py index 13c2d8d84..81a57bcb5 100755 --- a/tests/fuzz/scripts/run_restler_docker.py +++ b/tests/fuzz/scripts/run_restler_docker.py @@ -19,18 +19,20 @@ CLI options mirror these and take precedence over env values. """ +# Future from __future__ import annotations +# Standard import argparse -import os -import sys -import time import json +import os +from pathlib import Path import shutil import subprocess -from pathlib import Path +import sys +import time +from urllib.error import HTTPError, URLError from urllib.request import Request, urlopen -from urllib.error import URLError, HTTPError def project_root() -> Path: diff --git a/tests/fuzz/test_api_schema_fuzz.py b/tests/fuzz/test_api_schema_fuzz.py index 19acd6b9b..02860150f 100644 --- a/tests/fuzz/test_api_schema_fuzz.py +++ b/tests/fuzz/test_api_schema_fuzz.py @@ -6,8 +6,11 @@ Schemathesis-based API endpoint fuzzing. """ -import pytest +# Third-Party from fastapi.testclient import TestClient +import pytest + +# First-Party from mcpgateway.main import app @@ -123,6 +126,7 @@ def test_unicode_fuzzing(self): def test_concurrent_request_fuzzing(self): """Test concurrent requests to check for race conditions.""" + # Standard import threading import time diff --git a/tests/fuzz/test_jsonpath_fuzz.py b/tests/fuzz/test_jsonpath_fuzz.py index 3a5ad081c..742da8691 100644 --- a/tests/fuzz/test_jsonpath_fuzz.py +++ b/tests/fuzz/test_jsonpath_fuzz.py @@ -6,9 +6,13 @@ Property-based fuzz testing for JSONPath processing. """ -from hypothesis import given, strategies as st, assume -import pytest +# Third-Party from fastapi import HTTPException +from hypothesis import assume, given +from hypothesis import strategies as st +import pytest + +# First-Party from mcpgateway.config import jsonpath_modifier diff --git a/tests/fuzz/test_jsonrpc_fuzz.py b/tests/fuzz/test_jsonrpc_fuzz.py index 9276a0c67..9ac9d6d7a 100644 --- a/tests/fuzz/test_jsonrpc_fuzz.py +++ b/tests/fuzz/test_jsonrpc_fuzz.py @@ -6,10 +6,16 @@ Property-based fuzz testing for JSON-RPC validation. """ +# Standard import json -from hypothesis import given, strategies as st, settings, example + +# Third-Party +from hypothesis import example, given, settings +from hypothesis import strategies as st import pytest -from mcpgateway.validation.jsonrpc import validate_request, validate_response, JSONRPCError + +# First-Party +from mcpgateway.validation.jsonrpc import JSONRPCError, validate_request, validate_response class TestJSONRPCRequestFuzzing: diff --git a/tests/fuzz/test_schema_validation_fuzz.py b/tests/fuzz/test_schema_validation_fuzz.py index fd85a87d3..ff83e943e 100644 --- a/tests/fuzz/test_schema_validation_fuzz.py +++ b/tests/fuzz/test_schema_validation_fuzz.py @@ -6,14 +6,17 @@ Property-based fuzz testing for Pydantic schema validation. """ +# Standard import json -from hypothesis import given, strategies as st -import pytest + +# Third-Party +from hypothesis import given +from hypothesis import strategies as st from pydantic import ValidationError -from mcpgateway.schemas import ( - ToolCreate, ResourceCreate, PromptCreate, GatewayCreate, - AuthenticationValues, AdminToolCreate, ServerCreate -) +import pytest + +# First-Party +from mcpgateway.schemas import AdminToolCreate, AuthenticationValues, GatewayCreate, PromptCreate, ResourceCreate, ServerCreate, ToolCreate class TestToolCreateSchemaFuzzing: diff --git a/tests/fuzz/test_security_fuzz.py b/tests/fuzz/test_security_fuzz.py index d1ed51247..be3b5c69f 100644 --- a/tests/fuzz/test_security_fuzz.py +++ b/tests/fuzz/test_security_fuzz.py @@ -6,9 +6,13 @@ Security-focused fuzz testing for MCP Gateway. """ -from hypothesis import given, strategies as st -import pytest +# Third-Party from fastapi.testclient import TestClient +from hypothesis import given +from hypothesis import strategies as st +import pytest + +# First-Party from mcpgateway.main import app diff --git a/tests/integration/helpers/trace_generator.py b/tests/integration/helpers/trace_generator.py index a1762ada7..666f2d916 100755 --- a/tests/integration/helpers/trace_generator.py +++ b/tests/integration/helpers/trace_generator.py @@ -14,6 +14,7 @@ python tests/integration/helpers/trace_generator.py """ +# Standard import asyncio import os import sys @@ -21,9 +22,13 @@ # Add the project root to path so we can import mcpgateway sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))) -from mcpgateway.observability import init_telemetry, create_span -import time +# Standard import random +import time + +# First-Party +from mcpgateway.observability import create_span, init_telemetry + async def test_phoenix_integration(): """Send some test traces to Phoenix.""" diff --git a/tests/integration/test_integration.py b/tests/integration/test_integration.py index d723b2b6e..3af6ecfd7 100644 --- a/tests/integration/test_integration.py +++ b/tests/integration/test_integration.py @@ -34,6 +34,8 @@ from mcpgateway.main import app, require_auth from mcpgateway.models import InitializeResult, ResourceContent, ServerCapabilities from mcpgateway.schemas import ResourceRead, ServerRead, ToolMetrics, ToolRead + +# Local from tests.utils.rbac_mocks import MockPermissionService @@ -43,8 +45,11 @@ @pytest.fixture def test_client() -> TestClient: """FastAPI TestClient with proper database setup and auth dependency overridden.""" - import tempfile + # Standard import os + import tempfile + + # Third-Party from _pytest.monkeypatch import MonkeyPatch from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker @@ -57,9 +62,11 @@ def test_client() -> TestClient: url = f"sqlite:///{path}" # Patch settings + # First-Party from mcpgateway.config import settings mp.setattr(settings, "database_url", url, raising=False) + # First-Party import mcpgateway.db as db_mod import mcpgateway.main as main_mod @@ -77,11 +84,15 @@ def test_client() -> TestClient: app.dependency_overrides[require_auth] = lambda: "integration-test-user" # Also need to override RBAC and basic authentication - from mcpgateway.middleware.rbac import get_current_user_with_permissions, get_permission_service, get_db as rbac_get_db - from mcpgateway.auth import get_current_user - + # Standard # Create mock user for basic auth from unittest.mock import MagicMock + + # First-Party + from mcpgateway.auth import get_current_user + from mcpgateway.middleware.rbac import get_current_user_with_permissions + from mcpgateway.middleware.rbac import get_db as rbac_get_db + from mcpgateway.middleware.rbac import get_permission_service mock_email_user = MagicMock() mock_email_user.email = "integration-test-user@example.com" mock_email_user.full_name = "Integration Test User" diff --git a/tests/integration/test_metadata_integration.py b/tests/integration/test_metadata_integration.py index abeb9e1f7..c5a74daaf 100644 --- a/tests/integration/test_metadata_integration.py +++ b/tests/integration/test_metadata_integration.py @@ -14,22 +14,25 @@ import asyncio from datetime import datetime import json -import uuid from typing import Dict +import uuid # Third-Party -import pytest from fastapi import FastAPI from fastapi.testclient import TestClient +import pytest from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker # First-Party -from mcpgateway.db import Base, get_db, Tool as DbTool +from mcpgateway.db import Base, get_db +from mcpgateway.db import Tool as DbTool from mcpgateway.main import app from mcpgateway.schemas import ToolCreate from mcpgateway.services.tool_service import ToolService from mcpgateway.utils.verify_credentials import require_auth + +# Local from tests.utils.rbac_mocks import MockPermissionService @@ -37,11 +40,14 @@ def test_app(): """Create test app with proper database setup.""" # Use file-based SQLite database for better compatibility - import tempfile + # Standard import os + import tempfile + from unittest.mock import MagicMock, patch + + # Third-Party from _pytest.monkeypatch import MonkeyPatch from sqlalchemy.pool import StaticPool - from unittest.mock import MagicMock, patch mp = MonkeyPatch() @@ -50,9 +56,11 @@ def test_app(): url = f"sqlite:///{path}" # Patch settings + # First-Party from mcpgateway.config import settings mp.setattr(settings, "database_url", url, raising=False) + # First-Party import mcpgateway.db as db_mod import mcpgateway.main as main_mod @@ -67,8 +75,11 @@ def test_app(): Base.metadata.create_all(bind=engine) # Set up comprehensive authentication overrides - from mcpgateway.middleware.rbac import get_current_user_with_permissions, get_permission_service, get_db as rbac_get_db + # First-Party from mcpgateway.auth import get_current_user + from mcpgateway.middleware.rbac import get_current_user_with_permissions + from mcpgateway.middleware.rbac import get_db as rbac_get_db + from mcpgateway.middleware.rbac import get_permission_service # Create mock user for basic auth mock_email_user = MagicMock() @@ -250,11 +261,13 @@ def test_metadata_backwards_compatibility(self, client, auth_headers): def test_auth_disabled_metadata(self, client, test_app, auth_headers): """Test metadata capture when authentication is disabled.""" # Import the RBAC dependency that tools endpoint actually uses + # First-Party from mcpgateway.middleware.rbac import get_current_user_with_permissions # Override RBAC auth to return anonymous user context async def mock_anonymous_user(): # Need to import here to get the same SessionLocal the test is using + # First-Party import mcpgateway.db as db_mod db_session = db_mod.SessionLocal() return { @@ -351,11 +364,16 @@ def test_tool_list_includes_metadata(self, client, auth_headers): @pytest.mark.asyncio async def test_service_layer_metadata_handling(self, test_app): """Test metadata handling at the service layer.""" - from mcpgateway.utils.metadata_capture import MetadataCapture + # Standard from types import SimpleNamespace + + # Third-Party from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker + # First-Party + from mcpgateway.utils.metadata_capture import MetadataCapture + # Create test database session engine = create_engine("sqlite:///:memory:", connect_args={"check_same_thread": False}) TestingSessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) diff --git a/tests/integration/test_resource_plugin_integration.py b/tests/integration/test_resource_plugin_integration.py index c92431eef..12ac033d6 100644 --- a/tests/integration/test_resource_plugin_integration.py +++ b/tests/integration/test_resource_plugin_integration.py @@ -7,13 +7,18 @@ Integration tests for resource plugin functionality. """ +# Standard import os from unittest.mock import MagicMock, patch + +# Third-Party import pytest from sqlalchemy import create_engine from sqlalchemy.orm import Session, sessionmaker -from mcpgateway.db import Base, Resource as DbResource +# First-Party +from mcpgateway.db import Base +from mcpgateway.db import Resource as DbResource from mcpgateway.models import ResourceContent from mcpgateway.schemas import ResourceCreate from mcpgateway.services.resource_service import ResourceService @@ -37,6 +42,7 @@ def resource_service_with_mock_plugins(self): """Create ResourceService with mocked plugin manager.""" with patch.dict(os.environ, {"PLUGINS_ENABLED": "true", "PLUGIN_CONFIG_FILE": "test.yaml"}): with patch("mcpgateway.services.resource_service.PluginManager") as MockPluginManager: + # Standard from unittest.mock import AsyncMock mock_manager = MagicMock() mock_manager._initialized = True @@ -52,6 +58,7 @@ async def test_full_resource_lifecycle_with_plugins(self, test_db, resource_serv service, mock_manager = resource_service_with_mock_plugins # Configure mock plugin manager for all operations + # Standard from unittest.mock import AsyncMock pre_result = MagicMock() pre_result.continue_processing = True @@ -100,6 +107,7 @@ async def test_full_resource_lifecycle_with_plugins(self, test_db, resource_serv assert resources[0].uri == "test://integration" # 4. Update the resource + # First-Party from mcpgateway.schemas import ResourceUpdate update_data = ResourceUpdate( @@ -126,6 +134,7 @@ async def test_resource_filtering_integration(self, test_db): ): # Use real plugin manager but mock its initialization with patch("mcpgateway.services.resource_service.PluginManager") as MockPluginManager: + # First-Party from mcpgateway.plugins.framework.manager import PluginManager from mcpgateway.plugins.framework.models import ( ResourcePostFetchPayload, @@ -156,6 +165,7 @@ async def resource_pre_fetch(self, payload, global_context): {"validated": True}, ) else: + # First-Party from mcpgateway.plugins.framework.models import PluginViolation return ( @@ -219,6 +229,7 @@ async def resource_post_fetch(self, payload, global_context, contexts): assert "port: 8080" in content.text # Try to read a blocked protocol + # First-Party from mcpgateway.services.resource_service import ResourceError blocked_resource = ResourceCreate( @@ -289,7 +300,9 @@ async def test_template_resource_with_plugins(self, test_db, resource_service_wi service, mock_manager = resource_service_with_mock_plugins # Configure plugin manager + # Standard from unittest.mock import AsyncMock + # Create proper mock results pre_result = MagicMock() pre_result.continue_processing = True @@ -328,6 +341,7 @@ async def test_inactive_resource_handling(self, test_db, resource_service_with_m service, mock_manager = resource_service_with_mock_plugins # Configure mock plugin manager + # Standard from unittest.mock import AsyncMock pre_result = MagicMock() pre_result.continue_processing = True @@ -351,6 +365,7 @@ async def test_inactive_resource_handling(self, test_db, resource_service_with_m await service.toggle_resource_status(test_db, created.id, activate=False) # Try to read inactive resource + # First-Party from mcpgateway.services.resource_service import ResourceNotFoundError with pytest.raises(ResourceNotFoundError) as exc_info: diff --git a/tests/integration/test_tag_endpoints.py b/tests/integration/test_tag_endpoints.py index 007eb30e5..60e1467c7 100644 --- a/tests/integration/test_tag_endpoints.py +++ b/tests/integration/test_tag_endpoints.py @@ -8,7 +8,7 @@ """ # Standard -from unittest.mock import AsyncMock, patch, MagicMock +from unittest.mock import AsyncMock, MagicMock, patch # Third-Party from fastapi.testclient import TestClient @@ -17,6 +17,8 @@ # First-Party from mcpgateway.main import app, require_auth from mcpgateway.schemas import TaggedEntity, TagInfo, TagStats + +# Local from tests.utils.rbac_mocks import MockPermissionService @@ -26,7 +28,10 @@ def test_client() -> TestClient: app.dependency_overrides[require_auth] = lambda: "integration-test-user" # Also need to override RBAC authentication - from mcpgateway.middleware.rbac import get_current_user_with_permissions, get_permission_service, get_db as rbac_get_db + # First-Party + from mcpgateway.middleware.rbac import get_current_user_with_permissions + from mcpgateway.middleware.rbac import get_db as rbac_get_db + from mcpgateway.middleware.rbac import get_permission_service async def mock_user_with_permissions(): """Mock user context for RBAC.""" diff --git a/tests/integration/test_translate_echo.py b/tests/integration/test_translate_echo.py index eabd19159..53fd239db 100644 --- a/tests/integration/test_translate_echo.py +++ b/tests/integration/test_translate_echo.py @@ -28,7 +28,6 @@ # First-Party from mcpgateway.translate import _build_fastapi, _PubSub, _run_stdio_to_sse, StdIOEndpoint - # Test configuration TEST_PORT = 19999 # Use high port to avoid conflicts TEST_HOST = "127.0.0.1" @@ -90,6 +89,7 @@ async def echo_server(): """ # Write script to temp file + # Standard import tempfile with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f: f.write(echo_script) @@ -98,6 +98,7 @@ async def echo_server(): yield f"{sys.executable} {script_path}" # Cleanup + # Standard import os os.unlink(script_path) diff --git a/tests/migration/add_version.py b/tests/migration/add_version.py index 843850f52..4a100c35c 100755 --- a/tests/migration/add_version.py +++ b/tests/migration/add_version.py @@ -9,11 +9,12 @@ python3 tests/migration/add_version.py 0.7.0 """ -import sys +# Standard +from datetime import datetime import json from pathlib import Path -from datetime import datetime -from typing import Dict, Any +import sys +from typing import Any, Dict def show_instructions(new_version: str): diff --git a/tests/migration/conftest.py b/tests/migration/conftest.py index 9c114d6f5..68747a456 100644 --- a/tests/migration/conftest.py +++ b/tests/migration/conftest.py @@ -5,12 +5,16 @@ including container management, test data generation, and cleanup utilities. """ +# Standard import logging -import pytest -import tempfile from pathlib import Path +import tempfile from typing import Dict, Generator +# Third-Party +import pytest + +# Local from .utils.container_manager import ContainerManager from .utils.migration_runner import MigrationTestRunner from .version_config import VersionConfig @@ -48,6 +52,7 @@ def migration_test_dir(): @pytest.fixture(scope="session") def container_runtime(): """Detect and return the available container runtime.""" + # Standard import subprocess # Try Docker first @@ -407,6 +412,7 @@ def collect_result(result): # Save results at end of test if results: results_file = Path("tests/migration/reports/test_results.json") + # Standard import json with open(results_file, 'w') as f: json.dump(results, f, indent=2) @@ -440,7 +446,8 @@ def pytest_generate_tests(metafunc): @pytest.fixture def mock_container_manager(): """Mock container manager for testing without actual containers.""" - from unittest.mock import Mock, MagicMock + # Standard + from unittest.mock import MagicMock, Mock mock_cm = Mock(spec=ContainerManager) mock_cm.runtime = "mock" diff --git a/tests/migration/test_compose_postgres_migrations.py b/tests/migration/test_compose_postgres_migrations.py index 88605f238..edc7c5612 100644 --- a/tests/migration/test_compose_postgres_migrations.py +++ b/tests/migration/test_compose_postgres_migrations.py @@ -5,12 +5,16 @@ stacks across different MCP Gateway versions with comprehensive validation. """ +# Standard import logging -import pytest -import time from pathlib import Path +import time + +# Third-Party +import pytest -from .utils.data_seeder import DataSeeder, DataGenerationConfig +# Local +from .utils.data_seeder import DataGenerationConfig, DataSeeder from .utils.schema_validator import SchemaValidator logger = logging.getLogger(__name__) @@ -413,6 +417,7 @@ def _seed_compose_test_data(self, container_manager, gateway_container, test_dat base_url = f"http://localhost:{port}" # Seed data using REST API + # Third-Party import requests session = requests.Session() session.timeout = 15 @@ -462,6 +467,7 @@ def _count_postgres_records(self, container_manager, gateway_container): f"print(resp.read().decode())" ], capture_output=True) + # Standard import json data = json.loads(result.stdout.strip()) diff --git a/tests/migration/test_docker_sqlite_migrations.py b/tests/migration/test_docker_sqlite_migrations.py index 8cda2164d..17b1b2cd9 100644 --- a/tests/migration/test_docker_sqlite_migrations.py +++ b/tests/migration/test_docker_sqlite_migrations.py @@ -5,12 +5,16 @@ different MCP Gateway versions with comprehensive validation. """ +# Standard import logging -import pytest -import time from pathlib import Path +import time + +# Third-Party +import pytest -from .utils.data_seeder import DataSeeder, DataGenerationConfig +# Local +from .utils.data_seeder import DataGenerationConfig, DataSeeder from .utils.schema_validator import SchemaValidator logger = logging.getLogger(__name__) diff --git a/tests/migration/test_migration_performance.py b/tests/migration/test_migration_performance.py index 1487be13f..f1b0d1f20 100644 --- a/tests/migration/test_migration_performance.py +++ b/tests/migration/test_migration_performance.py @@ -5,12 +5,16 @@ including benchmarking, stress testing, and resource monitoring. """ +# Standard import logging -import pytest -import time from pathlib import Path +import time + +# Third-Party +import pytest -from .utils.data_seeder import DataSeeder, DataGenerationConfig +# Local +from .utils.data_seeder import DataGenerationConfig, DataSeeder from .utils.schema_validator import SchemaValidator logger = logging.getLogger(__name__) @@ -455,6 +459,7 @@ def test_migration_benchmark_suite(self, migration_runner, sample_test_data, lar logger.info("") # Save benchmark results for comparison + # Standard import json benchmark_file = Path("tests/migration/reports/benchmark_results.json") benchmark_file.parent.mkdir(parents=True, exist_ok=True) diff --git a/tests/migration/utils/container_manager.py b/tests/migration/utils/container_manager.py index be1e30187..1ce76b6a8 100644 --- a/tests/migration/utils/container_manager.py +++ b/tests/migration/utils/container_manager.py @@ -5,14 +5,15 @@ for testing database migrations across different MCP Gateway versions. """ +# Standard +from dataclasses import dataclass import json import logging import os +from pathlib import Path import subprocess import tempfile import time -from dataclasses import dataclass -from pathlib import Path from typing import Dict, List, Optional, Tuple logger = logging.getLogger(__name__) @@ -194,8 +195,10 @@ def start_sqlite_container(self, version: str, logger.info(f"📁 Created new data directory: {temp_dir}") # Set ownership and permissions so the app user (uid=1001) can write to it try: + # Standard import os import stat + # Change ownership to match the container app user (uid=1001, gid=1001) os.chown(temp_dir, 1001, 1001) # Also set write permissions for good measure diff --git a/tests/migration/utils/data_seeder.py b/tests/migration/utils/data_seeder.py index fe52457f1..a70515298 100644 --- a/tests/migration/utils/data_seeder.py +++ b/tests/migration/utils/data_seeder.py @@ -5,13 +5,14 @@ capabilities for validating data integrity across migrations. """ +# Standard +from dataclasses import dataclass import json import logging +from pathlib import Path import random import string import time -from dataclasses import dataclass -from pathlib import Path from typing import Any, Dict, List, Optional, Union from uuid import uuid4 @@ -517,6 +518,7 @@ def create_version_specific_datasets(self, base_dataset: Dict[str, List[Dict]], for version in versions: # Create a copy of the base dataset + # Standard import copy dataset = copy.deepcopy(base_dataset) diff --git a/tests/migration/utils/migration_runner.py b/tests/migration/utils/migration_runner.py index 09801b836..0598bd86a 100644 --- a/tests/migration/utils/migration_runner.py +++ b/tests/migration/utils/migration_runner.py @@ -5,13 +5,15 @@ MCP Gateway versions with detailed logging and validation. """ +# Standard +from dataclasses import dataclass, field import json import logging -import time -from dataclasses import dataclass, field from pathlib import Path +import time from typing import Dict, List, Optional, Tuple +# Local from .container_manager import ContainerManager logger = logging.getLogger(__name__) @@ -309,6 +311,7 @@ def _seed_test_data(self, container_id: str, test_data: Dict) -> None: base_url = f"http://localhost:{port}" # Seed data using REST API + # Third-Party import requests session = requests.Session() session.timeout = 10 @@ -358,6 +361,7 @@ def _count_records(self, container_id: str) -> Dict[str, int]: base_url = f"http://localhost:{port}" # Count records using REST API + # Third-Party import requests session = requests.Session() session.timeout = 10 diff --git a/tests/migration/utils/reporting.py b/tests/migration/utils/reporting.py index 3d18c49d4..3a352dbdd 100644 --- a/tests/migration/utils/reporting.py +++ b/tests/migration/utils/reporting.py @@ -5,13 +5,14 @@ including HTML dashboards, JSON reports, and performance visualizations. """ -import json -import logging -import time +# Standard from dataclasses import asdict from datetime import datetime +import json +import logging from pathlib import Path -from typing import Dict, List, Optional, Any +import time +from typing import Any, Dict, List, Optional logger = logging.getLogger(__name__) @@ -864,6 +865,7 @@ def save_test_results(self, test_results: List[Dict], filename: str = None) -> P def main(): """Command-line interface for report generation.""" + # Standard import argparse import sys diff --git a/tests/migration/utils/schema_validator.py b/tests/migration/utils/schema_validator.py index 3fe04f0b8..0a9b4bc03 100644 --- a/tests/migration/utils/schema_validator.py +++ b/tests/migration/utils/schema_validator.py @@ -5,12 +5,13 @@ capabilities for ensuring migration integrity across MCP Gateway versions. """ +# Standard +from dataclasses import dataclass import difflib import logging +from pathlib import Path import re import tempfile -from dataclasses import dataclass -from pathlib import Path from typing import Dict, List, Optional, Set, Tuple logger = logging.getLogger(__name__) @@ -533,6 +534,7 @@ def save_schema_snapshot(self, schema: Dict[str, TableSchema], "foreign_keys": table_schema.foreign_keys } + # Standard import json with open(output_path, 'w') as f: json.dump({ @@ -555,6 +557,7 @@ def load_schema_snapshot(self, snapshot_file: Path) -> Dict[str, TableSchema]: """ logger.info(f"📂 Loading schema snapshot: {snapshot_file}") + # Standard import json with open(snapshot_file, 'r') as f: data = json.load(f) diff --git a/tests/migration/version_config.py b/tests/migration/version_config.py index 99486a666..5eb6ee234 100644 --- a/tests/migration/version_config.py +++ b/tests/migration/version_config.py @@ -6,8 +6,9 @@ and the two previous versions. """ -from typing import List, Tuple, Dict, Any +# Standard from datetime import datetime +from typing import Any, Dict, List, Tuple class VersionConfig: diff --git a/tests/migration/version_status.py b/tests/migration/version_status.py index c25dc6bc9..4f29dbd2b 100755 --- a/tests/migration/version_status.py +++ b/tests/migration/version_status.py @@ -2,7 +2,8 @@ # -*- coding: utf-8 -*- """Show current migration testing version configuration.""" -from version_config import VersionConfig, get_supported_versions, get_migration_pairs +# Third-Party +from version_config import get_migration_pairs, get_supported_versions, VersionConfig def main(): diff --git a/tests/security/test_configurable_headers.py b/tests/security/test_configurable_headers.py index 4c2198c37..35f9299e3 100644 --- a/tests/security/test_configurable_headers.py +++ b/tests/security/test_configurable_headers.py @@ -9,13 +9,17 @@ This module tests the configurable security headers implementation for issue #533. """ -import pytest +# Standard +from unittest.mock import patch + +# Third-Party from fastapi import FastAPI from fastapi.testclient import TestClient -from unittest.mock import patch +import pytest -from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware +# First-Party from mcpgateway.config import settings +from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware def test_security_headers_can_be_disabled(): diff --git a/tests/security/test_security_cookies.py b/tests/security/test_security_cookies.py index 5eecc5bfd..ed8b22634 100644 --- a/tests/security/test_security_cookies.py +++ b/tests/security/test_security_cookies.py @@ -9,18 +9,17 @@ This module contains tests for secure cookie configuration and handling. """ -import pytest +# Standard +from unittest.mock import patch + +# Third-Party from fastapi import Response from fastapi.testclient import TestClient -from unittest.mock import patch +import pytest -from mcpgateway.utils.security_cookies import ( - set_auth_cookie, - clear_auth_cookie, - set_session_cookie, - clear_session_cookie -) +# First-Party from mcpgateway.config import settings +from mcpgateway.utils.security_cookies import clear_auth_cookie, clear_session_cookie, set_auth_cookie, set_session_cookie class TestSecureCookies: diff --git a/tests/security/test_security_headers.py b/tests/security/test_security_headers.py index 0e9094ed0..08561cc7a 100644 --- a/tests/security/test_security_headers.py +++ b/tests/security/test_security_headers.py @@ -9,10 +9,14 @@ This module contains comprehensive tests for security headers middleware and CORS configuration. """ -import pytest -from fastapi.testclient import TestClient +# Standard from unittest.mock import patch +# Third-Party +from fastapi.testclient import TestClient +import pytest + +# First-Party from mcpgateway.config import settings diff --git a/tests/security/test_security_middleware_comprehensive.py b/tests/security/test_security_middleware_comprehensive.py index ba5b52ee6..4cd89b014 100644 --- a/tests/security/test_security_middleware_comprehensive.py +++ b/tests/security/test_security_middleware_comprehensive.py @@ -10,13 +10,17 @@ including all configuration combinations, edge cases, and integration scenarios. """ -import pytest -from fastapi import FastAPI, Response, Request +# Standard +from unittest.mock import Mock, patch + +# Third-Party +from fastapi import FastAPI, Request, Response from fastapi.testclient import TestClient -from unittest.mock import patch, Mock +import pytest -from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware +# First-Party from mcpgateway.config import settings +from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware class TestSecurityHeadersConfiguration: @@ -375,6 +379,7 @@ def success_endpoint(): @app.get("/not-found") def not_found_endpoint(): + # Third-Party from fastapi import HTTPException raise HTTPException(status_code=404, detail="Not found") diff --git a/tests/security/test_security_performance_compatibility.py b/tests/security/test_security_performance_compatibility.py index e26829a66..020908afc 100644 --- a/tests/security/test_security_performance_compatibility.py +++ b/tests/security/test_security_performance_compatibility.py @@ -10,15 +10,19 @@ of the security implementation. """ -import pytest +# Standard +import re import time +from unittest.mock import patch + +# Third-Party from fastapi import FastAPI from fastapi.testclient import TestClient -from unittest.mock import patch -import re +import pytest -from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware +# First-Party from mcpgateway.config import settings +from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware class TestPerformanceImpact: @@ -212,6 +216,7 @@ class TestStaticAnalysisToolCompatibility: def test_csp_meta_tag_format(self): """Test CSP meta tag format for static analysis tools.""" # This tests the meta tag in admin.html indirectly + # First-Party from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware app = FastAPI() @@ -286,6 +291,7 @@ class TestCORSPerformanceAndCompatibility: def test_cors_origin_matching_performance(self): """Test CORS origin matching doesn't impact performance.""" + # Third-Party from fastapi.middleware.cors import CORSMiddleware # Create app with many allowed origins @@ -451,6 +457,7 @@ def test_security_headers_with_content_types(self, content_type: str, content: s @app.get("/test") def test_endpoint(): + # Third-Party from fastapi import Response return Response(content=content, media_type=content_type) @@ -477,6 +484,7 @@ def test_security_headers_with_binary_content(self): def binary_endpoint(): # Simulate binary content (like images, PDFs, etc.) binary_data = b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00\x01' + # Third-Party from fastapi import Response return Response(content=binary_data, media_type="image/png") diff --git a/tests/security/test_standalone_middleware.py b/tests/security/test_standalone_middleware.py index d8c3586f2..9aefdbbe6 100644 --- a/tests/security/test_standalone_middleware.py +++ b/tests/security/test_standalone_middleware.py @@ -9,13 +9,17 @@ This module tests the security middleware in isolation without the full app. """ -import pytest +# Standard +from unittest.mock import patch + +# Third-Party from fastapi import FastAPI, Response from fastapi.testclient import TestClient -from unittest.mock import patch +import pytest -from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware +# First-Party from mcpgateway.config import settings +from mcpgateway.middleware.security_headers import SecurityHeadersMiddleware def test_security_headers_middleware_basic(): diff --git a/tests/unit/mcpgateway/cache/test_session_registry.py b/tests/unit/mcpgateway/cache/test_session_registry.py index 58ea826b3..66bf90366 100644 --- a/tests/unit/mcpgateway/cache/test_session_registry.py +++ b/tests/unit/mcpgateway/cache/test_session_registry.py @@ -1588,6 +1588,7 @@ async def test_generate_response_jsonrpc_error(registry: SessionRegistry): message = {"method": "test_method", "id": 1, "params": {}} # Mock ResilientHttpClient to raise JSONRPCError + # First-Party from mcpgateway.validation.jsonrpc import JSONRPCError class MockAsyncClient: @@ -1657,6 +1658,7 @@ async def __aexit__(self, exc_type, exc_val, exc_tb): @pytest.mark.asyncio async def test_session_backend_docstring_examples(): """Test the docstring examples in SessionBackend.""" + # First-Party from mcpgateway.cache.session_registry import SessionBackend # Test memory backend example diff --git a/tests/unit/mcpgateway/cache/test_session_registry_extended.py b/tests/unit/mcpgateway/cache/test_session_registry_extended.py index 89a8036f3..29655a845 100644 --- a/tests/unit/mcpgateway/cache/test_session_registry_extended.py +++ b/tests/unit/mcpgateway/cache/test_session_registry_extended.py @@ -13,13 +13,15 @@ from __future__ import annotations # Standard -import sys +import asyncio import json -import time import logging -from unittest.mock import patch, AsyncMock, Mock +import sys +import time +from unittest.mock import AsyncMock, Mock, patch + +# Third-Party import pytest -import asyncio # First-Party from mcpgateway.cache.session_registry import SessionRegistry @@ -31,7 +33,10 @@ class TestImportErrors: def test_redis_import_error_flag(self): """Test REDIS_AVAILABLE flag when redis import fails.""" with patch.dict(sys.modules, {'redis.asyncio': None}): + # Standard import importlib + + # First-Party import mcpgateway.cache.session_registry importlib.reload(mcpgateway.cache.session_registry) @@ -41,7 +46,10 @@ def test_redis_import_error_flag(self): def test_sqlalchemy_import_error_flag(self): """Test SQLALCHEMY_AVAILABLE flag when sqlalchemy import fails.""" with patch.dict(sys.modules, {'sqlalchemy': None}): + # Standard import importlib + + # First-Party import mcpgateway.cache.session_registry importlib.reload(mcpgateway.cache.session_registry) diff --git a/tests/unit/mcpgateway/middleware/test_token_scoping.py b/tests/unit/mcpgateway/middleware/test_token_scoping.py index 202dd9094..f7d1da632 100644 --- a/tests/unit/mcpgateway/middleware/test_token_scoping.py +++ b/tests/unit/mcpgateway/middleware/test_token_scoping.py @@ -7,16 +7,16 @@ """ # Standard -import pytest from unittest.mock import AsyncMock, MagicMock, patch # Third-Party from fastapi import HTTPException, Request, status import jwt +import pytest # First-Party -from mcpgateway.middleware.token_scoping import TokenScopingMiddleware from mcpgateway.db import Permissions +from mcpgateway.middleware.token_scoping import TokenScopingMiddleware class TestTokenScopingMiddleware: diff --git a/tests/unit/mcpgateway/plugins/fixtures/plugins/passthrough.py b/tests/unit/mcpgateway/plugins/fixtures/plugins/passthrough.py index 803a7642e..ed03ee1c6 100644 --- a/tests/unit/mcpgateway/plugins/fixtures/plugins/passthrough.py +++ b/tests/unit/mcpgateway/plugins/fixtures/plugins/passthrough.py @@ -8,6 +8,7 @@ """ +# First-Party from mcpgateway.plugins.framework import ( Plugin, PluginContext, @@ -25,6 +26,7 @@ ToolPreInvokeResult, ) + class PassThroughPlugin(Plugin): """A simple pass through plugin.""" diff --git a/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_stdio.py b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_stdio.py index d35655142..1f731b5c5 100644 --- a/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_stdio.py +++ b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_stdio.py @@ -6,25 +6,29 @@ Tests for external client on stdio. """ +# Standard from contextlib import AsyncExitStack import json import os import sys from typing import Optional -import pytest + +# Third-Party from mcp import ClientSession, StdioServerParameters from mcp.client.stdio import stdio_client +import pytest +# First-Party from mcpgateway.models import Message, PromptResult, ResourceContent, Role, TextContent from mcpgateway.plugins.framework import ( ConfigLoader, GlobalContext, PluginConfig, + PluginContext, PluginLoader, PluginManager, - PluginContext, - PromptPrehookPayload, PromptPosthookPayload, + PromptPrehookPayload, ResourcePostFetchPayload, ResourcePreFetchPayload, ToolPostInvokePayload, diff --git a/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_streamable_http.py b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_streamable_http.py index 46ecb7c31..5c492d3d4 100644 --- a/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_streamable_http.py +++ b/tests/unit/mcpgateway/plugins/framework/external/mcp/test_client_streamable_http.py @@ -6,15 +6,19 @@ Tests for external client on streamable http. """ +# Standard import os import subprocess import sys import time +# Third-Party import pytest +# First-Party from mcpgateway.models import Message, PromptResult, Role, TextContent -from mcpgateway.plugins.framework import ConfigLoader, PluginLoader, PluginContext, PromptPrehookPayload, PromptPosthookPayload +from mcpgateway.plugins.framework import ConfigLoader, PluginContext, PluginLoader, PromptPosthookPayload, PromptPrehookPayload + @pytest.fixture(autouse=True) def server_proc(): diff --git a/tests/unit/mcpgateway/plugins/framework/loader/test_plugin_loader.py b/tests/unit/mcpgateway/plugins/framework/loader/test_plugin_loader.py index 2fa8ef453..5e3566495 100644 --- a/tests/unit/mcpgateway/plugins/framework/loader/test_plugin_loader.py +++ b/tests/unit/mcpgateway/plugins/framework/loader/test_plugin_loader.py @@ -7,6 +7,9 @@ Unit tests for config and plugin loaders. """ +# Standard +from unittest.mock import MagicMock, patch + # Third-Party import pytest @@ -16,7 +19,6 @@ from mcpgateway.plugins.framework.loader.plugin import PluginLoader from mcpgateway.plugins.framework.models import PluginContext, PluginMode, PromptPosthookPayload, PromptPrehookPayload from plugins.regex_filter.search_replace import SearchReplaceConfig, SearchReplacePlugin -from unittest.mock import patch, MagicMock def test_config_loader_load(): @@ -104,6 +106,7 @@ async def test_plugin_loader_duplicate_registration(): @pytest.mark.asyncio async def test_plugin_loader_get_plugin_type_error(): """Test error handling in __get_plugin_type method.""" + # First-Party from mcpgateway.plugins.framework.models import PluginConfig loader = PluginLoader() @@ -130,6 +133,7 @@ async def test_plugin_loader_get_plugin_type_error(): @pytest.mark.asyncio async def test_plugin_loader_none_plugin_type(): """Test handling when plugin type resolves to None.""" + # First-Party from mcpgateway.plugins.framework.models import PluginConfig loader = PluginLoader() @@ -191,6 +195,7 @@ async def test_plugin_loader_shutdown_with_existing_types(): @pytest.mark.asyncio async def test_plugin_loader_registration_branch_coverage(): """Test plugin registration path coverage.""" + # First-Party from mcpgateway.plugins.framework.models import PluginConfig loader = PluginLoader() diff --git a/tests/unit/mcpgateway/plugins/framework/test_errors.py b/tests/unit/mcpgateway/plugins/framework/test_errors.py index c99dcf9f0..4afaad458 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_errors.py +++ b/tests/unit/mcpgateway/plugins/framework/test_errors.py @@ -7,7 +7,10 @@ Tests for errors module. """ +# Third-Party import pytest + +# First-Party from mcpgateway.plugins.framework.errors import convert_exception_to_error, PluginError diff --git a/tests/unit/mcpgateway/plugins/framework/test_manager.py b/tests/unit/mcpgateway/plugins/framework/test_manager.py index 18bfd8673..81296cc93 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_manager.py +++ b/tests/unit/mcpgateway/plugins/framework/test_manager.py @@ -12,13 +12,7 @@ # First-Party from mcpgateway.models import Message, PromptResult, Role, TextContent from mcpgateway.plugins.framework.manager import PluginManager -from mcpgateway.plugins.framework.models import ( - GlobalContext, - PromptPosthookPayload, - PromptPrehookPayload, - ToolPostInvokePayload, - ToolPreInvokePayload -) +from mcpgateway.plugins.framework.models import GlobalContext, PromptPosthookPayload, PromptPrehookPayload, ToolPostInvokePayload, ToolPreInvokePayload from plugins.regex_filter.search_replace import SearchReplaceConfig diff --git a/tests/unit/mcpgateway/plugins/framework/test_manager_extended.py b/tests/unit/mcpgateway/plugins/framework/test_manager_extended.py index 0f6430de6..361fd94e2 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_manager_extended.py +++ b/tests/unit/mcpgateway/plugins/framework/test_manager_extended.py @@ -6,11 +6,14 @@ Extended tests for plugin manager to achieve 100% coverage. """ +# Standard import asyncio from unittest.mock import AsyncMock, MagicMock, patch +# Third-Party import pytest +# First-Party from mcpgateway.models import Message, PromptResult, Role, TextContent from mcpgateway.plugins.framework.base import Plugin from mcpgateway.plugins.framework.manager import PluginManager @@ -22,8 +25,8 @@ PluginConfig, PluginContext, PluginMode, - PluginViolation, PluginResult, + PluginViolation, PromptPosthookPayload, PromptPrehookPayload, ToolPostInvokePayload, @@ -452,8 +455,9 @@ async def test_manager_shutdown_behavior(): @pytest.mark.asyncio async def test_manager_payload_size_validation(): """Test payload size validation functionality.""" - from mcpgateway.plugins.framework.manager import PayloadSizeError, MAX_PAYLOAD_SIZE, PluginExecutor - from mcpgateway.plugins.framework.models import PromptPrehookPayload, PromptPosthookPayload + # First-Party + from mcpgateway.plugins.framework.manager import MAX_PAYLOAD_SIZE, PayloadSizeError, PluginExecutor + from mcpgateway.plugins.framework.models import PromptPosthookPayload, PromptPrehookPayload # Test payload size validation directly on executor (covers lines 252, 258) executor = PluginExecutor[PromptPrehookPayload]() @@ -467,7 +471,8 @@ async def test_manager_payload_size_validation(): executor._validate_payload_size(large_prompt) # Test large result payload (covers line 258) - from mcpgateway.models import PromptResult, Message, TextContent, Role + # First-Party + from mcpgateway.models import Message, PromptResult, Role, TextContent large_text = "y" * (MAX_PAYLOAD_SIZE + 1) message = Message(role=Role.USER, content=TextContent(type="text", text=large_text)) large_result = PromptResult(messages=[message]) @@ -495,8 +500,9 @@ async def test_manager_initialization_edge_cases(): await manager.shutdown() # Test plugin instantiation failure (covers lines 495-501) - from mcpgateway.plugins.framework.models import PluginConfig, PluginMode, PluginSettings + # First-Party from mcpgateway.plugins.framework.loader.plugin import PluginLoader + from mcpgateway.plugins.framework.models import PluginConfig, PluginMode, PluginSettings manager2 = PluginManager() manager2._config = Config( @@ -550,9 +556,12 @@ async def test_manager_initialization_edge_cases(): @pytest.mark.asyncio async def test_manager_context_cleanup(): """Test context cleanup functionality.""" - from mcpgateway.plugins.framework.manager import CONTEXT_MAX_AGE + # Standard import time + # First-Party + from mcpgateway.plugins.framework.manager import CONTEXT_MAX_AGE + manager = PluginManager("./tests/unit/mcpgateway/plugins/fixtures/configs/valid_no_plugin.yaml") await manager.initialize() @@ -598,13 +607,20 @@ def test_manager_constructor_context_init(): @pytest.mark.asyncio async def test_base_plugin_coverage(): """Test base plugin functionality for complete coverage.""" + # First-Party + from mcpgateway.models import Message, PromptResult, Role, TextContent from mcpgateway.plugins.framework.base import Plugin, PluginRef - from mcpgateway.plugins.framework.models import PluginConfig, HookType, PluginMode from mcpgateway.plugins.framework.models import ( - PluginContext, GlobalContext, PromptPrehookPayload, PromptPosthookPayload, - ToolPreInvokePayload, ToolPostInvokePayload + GlobalContext, + HookType, + PluginConfig, + PluginContext, + PluginMode, + PromptPosthookPayload, + PromptPrehookPayload, + ToolPostInvokePayload, + ToolPreInvokePayload, ) - from mcpgateway.models import PromptResult, Message, TextContent, Role # Test plugin with tags property (covers line 130) config = PluginConfig( @@ -659,10 +675,9 @@ async def test_base_plugin_coverage(): @pytest.mark.asyncio async def test_plugin_types_coverage(): """Test plugin types functionality for complete coverage.""" - from mcpgateway.plugins.framework.models import ( - PluginContext, PluginViolation - ) + # First-Party from mcpgateway.plugins.framework.errors import PluginViolationError + from mcpgateway.plugins.framework.models import PluginContext, PluginViolation # Test PluginContext state methods (covers lines 266, 275) plugin_ctx = PluginContext(request_id="test", user="testuser") @@ -701,8 +716,9 @@ async def test_plugin_types_coverage(): @pytest.mark.asyncio async def test_plugin_loader_return_none(): """Test plugin loader return None case.""" + # First-Party from mcpgateway.plugins.framework.loader.plugin import PluginLoader - from mcpgateway.plugins.framework.models import PluginConfig, HookType + from mcpgateway.plugins.framework.models import HookType, PluginConfig loader = PluginLoader() @@ -727,6 +743,7 @@ async def test_plugin_loader_return_none(): def test_plugin_violation_setter_validation(): """Test PluginViolation plugin_name setter validation.""" + # First-Party from mcpgateway.plugins.framework.models import PluginViolation violation = PluginViolation( diff --git a/tests/unit/mcpgateway/plugins/framework/test_registry.py b/tests/unit/mcpgateway/plugins/framework/test_registry.py index 709b5e201..cb76fa5ec 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_registry.py +++ b/tests/unit/mcpgateway/plugins/framework/test_registry.py @@ -6,16 +6,18 @@ Unit tests for plugin registry. """ +# Standard +from unittest.mock import AsyncMock, patch + # Third-Party import pytest # First-Party +from mcpgateway.plugins.framework.base import Plugin from mcpgateway.plugins.framework.loader.config import ConfigLoader from mcpgateway.plugins.framework.loader.plugin import PluginLoader -from mcpgateway.plugins.framework.registry import PluginInstanceRegistry from mcpgateway.plugins.framework.models import HookType, PluginConfig -from mcpgateway.plugins.framework.base import Plugin -from unittest.mock import AsyncMock, patch +from mcpgateway.plugins.framework.registry import PluginInstanceRegistry @pytest.mark.asyncio diff --git a/tests/unit/mcpgateway/plugins/framework/test_resource_hooks.py b/tests/unit/mcpgateway/plugins/framework/test_resource_hooks.py index d5bf3bb58..98e16f8f9 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_resource_hooks.py +++ b/tests/unit/mcpgateway/plugins/framework/test_resource_hooks.py @@ -7,13 +7,18 @@ Tests for resource hook functionality in the plugin framework. """ +# Standard import asyncio from unittest.mock import AsyncMock, MagicMock, patch + +# Third-Party import pytest +# First-Party from mcpgateway.models import ResourceContent from mcpgateway.plugins.framework.base import Plugin, PluginRef from mcpgateway.plugins.framework.manager import PluginManager + # Registry is imported for mocking from mcpgateway.plugins.framework.models import ( GlobalContext, @@ -218,6 +223,7 @@ class TestResourceHookIntegration: def clear_plugin_manager_state(self): """Clear the PluginManager shared state before and after each test.""" # Clear before test + # First-Party from mcpgateway.plugins.framework.manager import PluginManager PluginManager._PluginManager__shared_state.clear() yield diff --git a/tests/unit/mcpgateway/plugins/framework/test_utils.py b/tests/unit/mcpgateway/plugins/framework/test_utils.py index 2a41fa36b..af957abfa 100644 --- a/tests/unit/mcpgateway/plugins/framework/test_utils.py +++ b/tests/unit/mcpgateway/plugins/framework/test_utils.py @@ -6,11 +6,12 @@ Unit tests for utilities. """ +# Standard import sys +# First-Party +from mcpgateway.plugins.framework.models import GlobalContext, PluginCondition, PromptPosthookPayload, PromptPrehookPayload, ToolPostInvokePayload, ToolPreInvokePayload from mcpgateway.plugins.framework.utils import import_module, matches, parse_class_name, post_prompt_matches, post_tool_matches, pre_prompt_matches, pre_tool_matches -from mcpgateway.plugins.framework.models import GlobalContext, PluginCondition, PromptPrehookPayload, PromptPosthookPayload, ToolPostInvokePayload, ToolPreInvokePayload - def test_server_ids(): @@ -108,7 +109,8 @@ def test_parse_class_name(): def test_post_prompt_matches(): """Test the post_prompt_matches function.""" # Import required models - from mcpgateway.models import PromptResult, Message, TextContent + # First-Party + from mcpgateway.models import Message, PromptResult, TextContent # Test basic matching msg = Message(role="assistant", content=TextContent(type="text", text="Hello")) @@ -136,7 +138,8 @@ def test_post_prompt_matches(): def test_post_prompt_matches_multiple_conditions(): """Test post_prompt_matches with multiple conditions (OR logic).""" - from mcpgateway.models import PromptResult, Message, TextContent + # First-Party + from mcpgateway.models import Message, PromptResult, TextContent # Create the payload msg = Message(role="assistant", content=TextContent(type="text", text="Hello")) diff --git a/tests/unit/mcpgateway/plugins/plugins/resource_filter/test_resource_filter.py b/tests/unit/mcpgateway/plugins/plugins/resource_filter/test_resource_filter.py index 8aee5cd68..0ca1b5db9 100644 --- a/tests/unit/mcpgateway/plugins/plugins/resource_filter/test_resource_filter.py +++ b/tests/unit/mcpgateway/plugins/plugins/resource_filter/test_resource_filter.py @@ -7,8 +7,10 @@ Tests for the ResourceFilterPlugin. """ +# Third-Party import pytest +# First-Party from mcpgateway.models import ResourceContent from mcpgateway.plugins.framework.models import ( HookType, diff --git a/tests/unit/mcpgateway/plugins/tools/test_cli.py b/tests/unit/mcpgateway/plugins/tools/test_cli.py index f7d60f664..08ecd5ee4 100644 --- a/tests/unit/mcpgateway/plugins/tools/test_cli.py +++ b/tests/unit/mcpgateway/plugins/tools/test_cli.py @@ -10,12 +10,10 @@ # Future from __future__ import annotations -# Standard -import yaml - # Third-Party import pytest from typer.testing import CliRunner +import yaml # First-Party import mcpgateway.plugins.tools.cli as cli diff --git a/tests/unit/mcpgateway/routers/test_oauth_router.py b/tests/unit/mcpgateway/routers/test_oauth_router.py index 67977d4be..f21b5e724 100644 --- a/tests/unit/mcpgateway/routers/test_oauth_router.py +++ b/tests/unit/mcpgateway/routers/test_oauth_router.py @@ -9,18 +9,18 @@ """ # Standard -import pytest from unittest.mock import AsyncMock, Mock, patch # Third-Party from fastapi import HTTPException, Request from fastapi.responses import HTMLResponse, RedirectResponse from fastapi.testclient import TestClient +import pytest from sqlalchemy.orm import Session # First-Party -from mcpgateway.routers.oauth_router import oauth_router from mcpgateway.db import Gateway +from mcpgateway.routers.oauth_router import oauth_router from mcpgateway.services.oauth_manager import OAuthError, OAuthManager from mcpgateway.services.token_storage_service import TokenStorageService @@ -81,6 +81,7 @@ async def test_initiate_oauth_flow_success(self, mock_db, mock_request, mock_gat mock_token_storage_class.return_value = mock_token_storage # Import the function to test + # First-Party from mcpgateway.routers.oauth_router import initiate_oauth_flow # Execute @@ -102,6 +103,7 @@ async def test_initiate_oauth_flow_gateway_not_found(self, mock_db, mock_request # Setup mock_db.execute.return_value.scalar_one_or_none.return_value = None + # First-Party from mcpgateway.routers.oauth_router import initiate_oauth_flow # Execute & Assert @@ -120,6 +122,7 @@ async def test_initiate_oauth_flow_no_oauth_config(self, mock_db, mock_request): mock_gateway.oauth_config = None mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import initiate_oauth_flow # Execute & Assert @@ -138,6 +141,7 @@ async def test_initiate_oauth_flow_wrong_grant_type(self, mock_db, mock_request) mock_gateway.oauth_config = {"grant_type": "client_credentials"} mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import initiate_oauth_flow # Execute & Assert @@ -161,6 +165,7 @@ async def test_initiate_oauth_flow_oauth_manager_error(self, mock_db, mock_reque mock_oauth_manager_class.return_value = mock_oauth_manager with patch('mcpgateway.routers.oauth_router.TokenStorageService'): + # First-Party from mcpgateway.routers.oauth_router import initiate_oauth_flow # Execute & Assert @@ -191,6 +196,7 @@ async def test_oauth_callback_success(self, mock_db, mock_gateway): mock_token_storage = Mock() mock_token_storage_class.return_value = mock_token_storage + # First-Party from mcpgateway.routers.oauth_router import oauth_callback # Execute @@ -225,6 +231,7 @@ async def test_oauth_callback_gateway_not_found(self, mock_db): # Setup mock_db.execute.return_value.scalar_one_or_none.return_value = None + # First-Party from mcpgateway.routers.oauth_router import oauth_callback # Execute @@ -249,6 +256,7 @@ async def test_oauth_callback_no_oauth_config(self, mock_db): mock_gateway.oauth_config = None mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import oauth_callback # Execute @@ -278,6 +286,7 @@ async def test_oauth_callback_oauth_error(self, mock_db, mock_gateway): mock_oauth_manager_class.return_value = mock_oauth_manager with patch('mcpgateway.routers.oauth_router.TokenStorageService'): + # First-Party from mcpgateway.routers.oauth_router import oauth_callback # Execute @@ -308,6 +317,7 @@ async def test_oauth_callback_unexpected_error(self, mock_db, mock_gateway): mock_oauth_manager_class.return_value = mock_oauth_manager with patch('mcpgateway.routers.oauth_router.TokenStorageService'): + # First-Party from mcpgateway.routers.oauth_router import oauth_callback # Execute @@ -330,6 +340,7 @@ async def test_get_oauth_status_success_authorization_code(self, mock_db, mock_g # Setup mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import get_oauth_status # Execute @@ -359,6 +370,7 @@ async def test_get_oauth_status_success_client_credentials(self, mock_db): } mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import get_oauth_status # Execute @@ -380,6 +392,7 @@ async def test_get_oauth_status_gateway_not_found(self, mock_db): # Setup mock_db.execute.return_value.scalar_one_or_none.return_value = None + # First-Party from mcpgateway.routers.oauth_router import get_oauth_status # Execute & Assert @@ -397,6 +410,7 @@ async def test_get_oauth_status_no_oauth_config(self, mock_db): mock_gateway.oauth_config = None mock_db.execute.return_value.scalar_one_or_none.return_value = mock_gateway + # First-Party from mcpgateway.routers.oauth_router import get_oauth_status # Execute @@ -415,6 +429,7 @@ async def test_get_oauth_status_database_error(self, mock_db): # Setup mock_db.execute.side_effect = Exception("Database connection failed") + # First-Party from mcpgateway.routers.oauth_router import get_oauth_status # Execute & Assert @@ -441,6 +456,7 @@ async def test_fetch_tools_after_oauth_success(self, mock_db): mock_gateway_service.fetch_tools_after_oauth = AsyncMock(return_value=mock_tools_result) mock_gateway_service_class.return_value = mock_gateway_service + # First-Party from mcpgateway.routers.oauth_router import fetch_tools_after_oauth # Execute @@ -466,6 +482,7 @@ async def test_fetch_tools_after_oauth_no_tools(self, mock_db): mock_gateway_service.fetch_tools_after_oauth = AsyncMock(return_value=mock_tools_result) mock_gateway_service_class.return_value = mock_gateway_service + # First-Party from mcpgateway.routers.oauth_router import fetch_tools_after_oauth # Execute @@ -489,6 +506,7 @@ async def test_fetch_tools_after_oauth_service_error(self, mock_db): ) mock_gateway_service_class.return_value = mock_gateway_service + # First-Party from mcpgateway.routers.oauth_router import fetch_tools_after_oauth # Execute & Assert @@ -510,6 +528,7 @@ async def test_fetch_tools_after_oauth_malformed_result(self, mock_db): mock_gateway_service.fetch_tools_after_oauth = AsyncMock(return_value=mock_tools_result) mock_gateway_service_class.return_value = mock_gateway_service + # First-Party from mcpgateway.routers.oauth_router import fetch_tools_after_oauth # Execute diff --git a/tests/unit/mcpgateway/routers/test_reverse_proxy.py b/tests/unit/mcpgateway/routers/test_reverse_proxy.py index db374203e..303889b20 100644 --- a/tests/unit/mcpgateway/routers/test_reverse_proxy.py +++ b/tests/unit/mcpgateway/routers/test_reverse_proxy.py @@ -23,14 +23,13 @@ # First-Party from mcpgateway.routers.reverse_proxy import ( + manager, ReverseProxyManager, ReverseProxySession, - manager, router, ) from mcpgateway.utils.verify_credentials import require_auth - # --------------------------------------------------------------------------- # # Test Fixtures # # --------------------------------------------------------------------------- # @@ -266,6 +265,7 @@ async def test_websocket_accept(self, mock_websocket): mock_websocket.headers = {"X-Session-ID": "test-session"} mock_websocket.receive_text.side_effect = asyncio.CancelledError() + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -284,6 +284,7 @@ async def test_websocket_generates_session_id(self, mock_websocket): mock_websocket.headers = {} # No X-Session-ID header mock_websocket.receive_text.side_effect = asyncio.CancelledError() + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db, \ @@ -308,6 +309,7 @@ async def test_websocket_register_message(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -331,6 +333,7 @@ async def test_websocket_unregister_message(self, mock_websocket): unregister_msg = {"type": "unregister"} mock_websocket.receive_text.return_value = json.dumps(unregister_msg) + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -348,6 +351,7 @@ async def test_websocket_heartbeat_message(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -374,6 +378,7 @@ async def test_websocket_response_message(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -394,6 +399,7 @@ async def test_websocket_notification_message(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -414,6 +420,7 @@ async def test_websocket_unknown_message_type(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -433,6 +440,7 @@ async def test_websocket_invalid_json(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -460,6 +468,7 @@ async def test_websocket_general_exception(self, mock_websocket): asyncio.CancelledError() ] + # First-Party from mcpgateway.routers.reverse_proxy import websocket_endpoint with patch("mcpgateway.routers.reverse_proxy.get_db") as mock_get_db: @@ -484,6 +493,7 @@ class TestHTTPEndpoints: @pytest.fixture def client(self): """Create test client.""" + # Third-Party from fastapi import FastAPI app = FastAPI() diff --git a/tests/unit/mcpgateway/services/test_email_auth_basic.py b/tests/unit/mcpgateway/services/test_email_auth_basic.py index 5705577ee..b6e2cb192 100644 --- a/tests/unit/mcpgateway/services/test_email_auth_basic.py +++ b/tests/unit/mcpgateway/services/test_email_auth_basic.py @@ -9,20 +9,14 @@ # Standard from unittest.mock import MagicMock, patch -import pytest # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party -from mcpgateway.services.email_auth_service import ( - EmailAuthService, - EmailValidationError, - PasswordValidationError, - UserExistsError, - AuthenticationError -) from mcpgateway.services.argon2_service import Argon2PasswordService +from mcpgateway.services.email_auth_service import AuthenticationError, EmailAuthService, EmailValidationError, PasswordValidationError, UserExistsError class TestEmailAuthBasic: diff --git a/tests/unit/mcpgateway/services/test_export_service.py b/tests/unit/mcpgateway/services/test_export_service.py index 061b2a8e4..bdcf32d56 100644 --- a/tests/unit/mcpgateway/services/test_export_service.py +++ b/tests/unit/mcpgateway/services/test_export_service.py @@ -8,20 +8,17 @@ """ # Standard -import json from datetime import datetime, timezone +import json from unittest.mock import AsyncMock, MagicMock, patch # Third-Party import pytest # First-Party -from mcpgateway.services.export_service import ExportService, ExportError, ExportValidationError -from mcpgateway.schemas import ( - ToolRead, GatewayRead, ServerRead, PromptRead, ResourceRead, - ToolMetrics, ServerMetrics, PromptMetrics, ResourceMetrics -) from mcpgateway.models import Root +from mcpgateway.schemas import GatewayRead, PromptMetrics, PromptRead, ResourceMetrics, ResourceRead, ServerMetrics, ServerRead, ToolMetrics, ToolRead +from mcpgateway.services.export_service import ExportError, ExportService, ExportValidationError from mcpgateway.utils.services_auth import encode_auth @@ -89,6 +86,7 @@ def mock_db(): @pytest.fixture def sample_tool(): """Create a sample tool for testing.""" + # First-Party from mcpgateway.schemas import ToolMetrics return ToolRead( id="tool1", @@ -268,6 +266,7 @@ async def test_export_selective(export_service, mock_db, sample_tool): async def test_export_tools_filters_mcp(export_service, mock_db): """Test that export filters out MCP tools from gateways.""" # Create a mix of tools + # First-Party from mcpgateway.schemas import ToolMetrics local_tool = ToolRead( @@ -401,8 +400,9 @@ async def test_extract_dependencies(export_service, mock_db): @pytest.mark.asyncio async def test_export_with_masked_auth_data(export_service, mock_db): """Test export handling of masked authentication data.""" - from mcpgateway.schemas import ToolRead, ToolMetrics, AuthenticationValues + # First-Party from mcpgateway.config import settings + from mcpgateway.schemas import AuthenticationValues, ToolMetrics, ToolRead # Create tool with masked auth data tool_with_masked_auth = ToolRead( @@ -529,6 +529,7 @@ async def test_export_with_exclude_types(export_service, mock_db): @pytest.mark.asyncio async def test_export_roots_functionality(export_service): """Test root export functionality.""" + # First-Party from mcpgateway.models import Root # Mock root service @@ -582,8 +583,9 @@ async def test_export_with_include_inactive(export_service, mock_db): @pytest.mark.asyncio async def test_export_tools_with_non_masked_auth(export_service, mock_db): """Test export tools with non-masked authentication data.""" - from mcpgateway.schemas import ToolRead, ToolMetrics, AuthenticationValues + # First-Party from mcpgateway.config import settings + from mcpgateway.schemas import AuthenticationValues, ToolMetrics, ToolRead # Create tool with non-masked auth data tool_with_auth = ToolRead( @@ -675,6 +677,7 @@ async def test_export_gateways_with_tag_filtering(export_service, mock_db): @pytest.mark.asyncio async def test_export_gateways_with_masked_auth(export_service, mock_db): """Test gateway export with masked authentication data.""" + # First-Party from mcpgateway.config import settings # Create gateway with masked auth @@ -877,7 +880,8 @@ async def test_validate_export_data_invalid_metadata(export_service): @pytest.mark.asyncio async def test_export_selective_all_entity_types(export_service, mock_db): """Test selective export with all entity types.""" - from mcpgateway.schemas import ToolRead, GatewayRead, ServerRead, PromptRead, ResourceRead, ToolMetrics + # First-Party + from mcpgateway.schemas import GatewayRead, PromptRead, ResourceRead, ServerRead, ToolMetrics, ToolRead # Mock entities for each type sample_tool = ToolRead( @@ -935,6 +939,7 @@ async def test_export_selective_all_entity_types(export_service, mock_db): export_service.prompt_service.list_prompts.return_value = [sample_prompt] export_service.resource_service.list_resources.return_value = [sample_resource] + # First-Party from mcpgateway.models import Root mock_roots = [Root(uri="file:///workspace", name="Workspace")] export_service.root_service.list_roots.return_value = mock_roots @@ -1021,6 +1026,7 @@ async def test_export_selected_gateways_error_handling(export_service, mock_db): @pytest.mark.asyncio async def test_export_selected_servers(export_service, mock_db): """Test selective server export.""" + # First-Party from mcpgateway.schemas import ServerRead sample_server = ServerRead( @@ -1053,6 +1059,7 @@ async def test_export_selected_servers_error_handling(export_service, mock_db): @pytest.mark.asyncio async def test_export_selected_prompts(export_service, mock_db): """Test selective prompt export.""" + # First-Party from mcpgateway.schemas import PromptRead sample_prompt = PromptRead( @@ -1085,6 +1092,7 @@ async def test_export_selected_prompts_error_handling(export_service, mock_db): @pytest.mark.asyncio async def test_export_selected_resources(export_service, mock_db): """Test selective resource export.""" + # First-Party from mcpgateway.schemas import ResourceRead sample_resource = ResourceRead( @@ -1116,6 +1124,7 @@ async def test_export_selected_resources_error_handling(export_service, mock_db) @pytest.mark.asyncio async def test_export_selected_roots(export_service): """Test selective root export.""" + # First-Party from mcpgateway.models import Root mock_roots = [ diff --git a/tests/unit/mcpgateway/services/test_gateway_resources_prompts.py b/tests/unit/mcpgateway/services/test_gateway_resources_prompts.py index 14ccf0db2..7b0b1cc94 100644 --- a/tests/unit/mcpgateway/services/test_gateway_resources_prompts.py +++ b/tests/unit/mcpgateway/services/test_gateway_resources_prompts.py @@ -7,10 +7,15 @@ Tests for gateway service resource and prompt fetching functionality. """ -import pytest +# Standard from unittest.mock import AsyncMock, MagicMock, Mock, patch + +# Third-Party +import pytest + +# First-Party +from mcpgateway.schemas import GatewayCreate, PromptCreate, ResourceCreate, ToolCreate from mcpgateway.services.gateway_service import GatewayService -from mcpgateway.schemas import GatewayCreate, ResourceCreate, PromptCreate, ToolCreate class TestGatewayResourcesPrompts: diff --git a/tests/unit/mcpgateway/services/test_gateway_service.py b/tests/unit/mcpgateway/services/test_gateway_service.py index 73e963680..86de527af 100644 --- a/tests/unit/mcpgateway/services/test_gateway_service.py +++ b/tests/unit/mcpgateway/services/test_gateway_service.py @@ -17,8 +17,8 @@ # Standard import asyncio from datetime import datetime, timezone -from unittest.mock import AsyncMock, MagicMock, Mock, patch, mock_open import socket +from unittest.mock import AsyncMock, MagicMock, Mock, mock_open, patch # Third-Party import httpx @@ -1521,6 +1521,7 @@ async def test_init_with_redis_unavailable(self, monkeypatch): with patch('mcpgateway.services.gateway_service.logging') as mock_logging: # Import should trigger the ImportError path + # First-Party from mcpgateway.services.gateway_service import GatewayService service = GatewayService() assert service._redis_client is None @@ -1538,6 +1539,7 @@ async def test_init_with_redis_enabled(self, monkeypatch): mock_settings.cache_type = 'redis' mock_settings.redis_url = 'redis://localhost:6379' + # First-Party from mcpgateway.services.gateway_service import GatewayService service = GatewayService() @@ -1562,6 +1564,7 @@ async def test_init_file_cache_path_adjustment(self, monkeypatch): mock_splitdrive.return_value = ('C:', '/home/user/.mcpgateway/health_checks.lock') mock_relpath.return_value = 'home/user/.mcpgateway/health_checks.lock' + # First-Party from mcpgateway.services.gateway_service import GatewayService service = GatewayService() @@ -1578,6 +1581,7 @@ async def test_init_with_cache_disabled(self, monkeypatch): with patch('mcpgateway.services.gateway_service.settings') as mock_settings: mock_settings.cache_type = 'none' + # First-Party from mcpgateway.services.gateway_service import GatewayService service = GatewayService() diff --git a/tests/unit/mcpgateway/services/test_gateway_service_extended.py b/tests/unit/mcpgateway/services/test_gateway_service_extended.py index 6a59ddb4f..b46076a58 100644 --- a/tests/unit/mcpgateway/services/test_gateway_service_extended.py +++ b/tests/unit/mcpgateway/services/test_gateway_service_extended.py @@ -428,7 +428,9 @@ async def test_validate_gateway_url_exists(self): async def test_redis_import_error_handling(self): """Test Redis import error handling path (lines 64-66).""" # This test verifies the REDIS_AVAILABLE flag functionality + # First-Party from mcpgateway.services.gateway_service import REDIS_AVAILABLE + # Just verify the flag exists and is boolean assert isinstance(REDIS_AVAILABLE, bool) @@ -498,6 +500,7 @@ async def test_validate_gateway_redirect_auth_failure(self): service = GatewayService() # Test method exists with proper signature + # Standard import inspect sig = inspect.signature(service._validate_gateway_url) assert len(sig.parameters) >= 3 # url and other params @@ -508,6 +511,7 @@ async def test_validate_gateway_sse_content_type(self): service = GatewayService() # Test method is async + # Standard import asyncio assert asyncio.iscoroutinefunction(service._validate_gateway_url) @@ -531,6 +535,7 @@ async def test_initialize_with_redis_logging(self): assert callable(getattr(service, 'initialize')) # Test it's an async method + # Standard import asyncio assert asyncio.iscoroutinefunction(service.initialize) diff --git a/tests/unit/mcpgateway/services/test_gateway_service_health_oauth.py b/tests/unit/mcpgateway/services/test_gateway_service_health_oauth.py index 3dedf1445..addb5e1ef 100644 --- a/tests/unit/mcpgateway/services/test_gateway_service_health_oauth.py +++ b/tests/unit/mcpgateway/services/test_gateway_service_health_oauth.py @@ -44,6 +44,7 @@ def _make_execute_result(*, scalar=None, scalars_list=None): @pytest.fixture(autouse=True) def _bypass_validation(monkeypatch): """Bypass Pydantic validation for mock objects.""" + # First-Party from mcpgateway.schemas import GatewayRead monkeypatch.setattr(GatewayRead, "model_validate", staticmethod(lambda x: x)) diff --git a/tests/unit/mcpgateway/services/test_import_service.py b/tests/unit/mcpgateway/services/test_import_service.py index 439cc87df..b0b7f892b 100644 --- a/tests/unit/mcpgateway/services/test_import_service.py +++ b/tests/unit/mcpgateway/services/test_import_service.py @@ -8,24 +8,21 @@ """ # Standard +from datetime import datetime, timedelta, timezone import json -from datetime import datetime, timezone, timedelta from unittest.mock import AsyncMock, MagicMock, patch # Third-Party import pytest # First-Party -from mcpgateway.services.import_service import ( - ImportService, ImportError, ImportValidationError, ImportConflictError, - ConflictStrategy, ImportStatus -) -from mcpgateway.services.tool_service import ToolNameConflictError +from mcpgateway.schemas import GatewayCreate, ToolCreate from mcpgateway.services.gateway_service import GatewayNameConflictError -from mcpgateway.services.server_service import ServerNameConflictError +from mcpgateway.services.import_service import ConflictStrategy, ImportConflictError, ImportError, ImportService, ImportStatus, ImportValidationError from mcpgateway.services.prompt_service import PromptNameConflictError from mcpgateway.services.resource_service import ResourceURIConflictError -from mcpgateway.schemas import ToolCreate, GatewayCreate +from mcpgateway.services.server_service import ServerNameConflictError +from mcpgateway.services.tool_service import ToolNameConflictError @pytest.fixture @@ -344,8 +341,9 @@ async def test_validate_import_data_invalid_entity_structure(import_service): @pytest.mark.asyncio async def test_rekey_auth_data_success(import_service): """Test successful authentication data re-keying.""" - from mcpgateway.utils.services_auth import encode_auth + # First-Party from mcpgateway.config import settings + from mcpgateway.utils.services_auth import encode_auth # Store original secret original_secret = settings.auth_encryption_secret @@ -595,6 +593,7 @@ async def test_import_service_initialization(import_service): @pytest.mark.asyncio async def test_import_with_rekey_secret(import_service, mock_db): """Test import with authentication re-keying.""" + # First-Party from mcpgateway.utils.services_auth import encode_auth # Create tool with auth data @@ -1910,7 +1909,10 @@ async def test_root_conflict_update_or_rename_strategy(import_service): @pytest.mark.asyncio async def test_gateway_auth_conversion_basic(import_service): """Test gateway conversion with basic auth.""" + # Standard import base64 + + # First-Party from mcpgateway.utils.services_auth import encode_auth # Create basic auth data @@ -1934,6 +1936,7 @@ async def test_gateway_auth_conversion_basic(import_service): @pytest.mark.asyncio async def test_gateway_auth_conversion_bearer(import_service): """Test gateway conversion with bearer auth.""" + # First-Party from mcpgateway.utils.services_auth import encode_auth # Create bearer auth data @@ -1956,6 +1959,7 @@ async def test_gateway_auth_conversion_bearer(import_service): @pytest.mark.asyncio async def test_gateway_auth_conversion_authheaders_single(import_service): """Test gateway conversion with single custom auth header.""" + # First-Party from mcpgateway.utils.services_auth import encode_auth # Create auth headers data (single header) @@ -1979,6 +1983,7 @@ async def test_gateway_auth_conversion_authheaders_single(import_service): @pytest.mark.asyncio async def test_gateway_auth_conversion_authheaders_multiple(import_service): """Test gateway conversion with multiple custom auth headers.""" + # First-Party from mcpgateway.utils.services_auth import encode_auth # Create auth headers data (multiple headers) @@ -2018,6 +2023,7 @@ async def test_gateway_auth_conversion_decode_error(import_service): @pytest.mark.asyncio async def test_gateway_update_auth_conversion(import_service): """Test gateway update conversion with auth data.""" + # First-Party from mcpgateway.utils.services_auth import encode_auth # Test with bearer auth @@ -2142,7 +2148,10 @@ async def test_resource_update_conversion(import_service): @pytest.mark.asyncio async def test_gateway_update_auth_conversion_basic_and_headers(import_service): """Test gateway update conversion with basic auth and custom headers.""" + # Standard import base64 + + # First-Party from mcpgateway.utils.services_auth import encode_auth # Test basic auth in gateway update diff --git a/tests/unit/mcpgateway/services/test_log_storage_service.py b/tests/unit/mcpgateway/services/test_log_storage_service.py index f08ef72f2..7f8df491a 100644 --- a/tests/unit/mcpgateway/services/test_log_storage_service.py +++ b/tests/unit/mcpgateway/services/test_log_storage_service.py @@ -13,6 +13,8 @@ import json import sys from unittest.mock import patch + +# Third-Party import pytest # First-Party @@ -722,6 +724,7 @@ async def test_notify_subscribers_dead_queue(): service = LogStorageService() # Create a mock queue that raises an exception + # Standard from unittest.mock import MagicMock mock_queue = MagicMock() mock_queue.put_nowait.side_effect = Exception("Queue is broken") diff --git a/tests/unit/mcpgateway/services/test_logging_service_comprehensive.py b/tests/unit/mcpgateway/services/test_logging_service_comprehensive.py index 1caf21ac6..3dc4f1527 100644 --- a/tests/unit/mcpgateway/services/test_logging_service_comprehensive.py +++ b/tests/unit/mcpgateway/services/test_logging_service_comprehensive.py @@ -486,9 +486,12 @@ async def test_file_handler_no_folder(): @pytest.mark.asyncio async def test_storage_handler_emit(): """Test StorageHandler emit function.""" - from mcpgateway.services.logging_service import StorageHandler + # Standard from unittest.mock import AsyncMock, MagicMock + # First-Party + from mcpgateway.services.logging_service import StorageHandler + # Create mock storage mock_storage = AsyncMock() handler = StorageHandler(mock_storage) @@ -525,6 +528,7 @@ async def test_storage_handler_emit(): @pytest.mark.asyncio async def test_storage_handler_emit_no_storage(): """Test StorageHandler emit with no storage.""" + # First-Party from mcpgateway.services.logging_service import StorageHandler handler = StorageHandler(None) @@ -547,9 +551,12 @@ async def test_storage_handler_emit_no_storage(): @pytest.mark.asyncio async def test_storage_handler_emit_no_loop(): """Test StorageHandler emit without a running event loop.""" - from mcpgateway.services.logging_service import StorageHandler + # Standard from unittest.mock import AsyncMock + # First-Party + from mcpgateway.services.logging_service import StorageHandler + mock_storage = AsyncMock() handler = StorageHandler(mock_storage) @@ -573,9 +580,12 @@ async def test_storage_handler_emit_no_loop(): @pytest.mark.asyncio async def test_storage_handler_emit_format_error(): """Test StorageHandler emit with format error.""" - from mcpgateway.services.logging_service import StorageHandler + # Standard from unittest.mock import AsyncMock, MagicMock + # First-Party + from mcpgateway.services.logging_service import StorageHandler + mock_storage = AsyncMock() handler = StorageHandler(mock_storage) @@ -655,6 +665,7 @@ async def test_get_storage(): @pytest.mark.asyncio async def test_notify_with_storage(): """Test notify method with storage enabled.""" + # Standard from unittest.mock import AsyncMock service = LoggingService() diff --git a/tests/unit/mcpgateway/services/test_permission_fallback.py b/tests/unit/mcpgateway/services/test_permission_fallback.py index de9b9c56c..fbd90ef34 100644 --- a/tests/unit/mcpgateway/services/test_permission_fallback.py +++ b/tests/unit/mcpgateway/services/test_permission_fallback.py @@ -2,10 +2,10 @@ """Test permission fallback functionality for regular users.""" # Standard -import pytest from unittest.mock import AsyncMock, MagicMock, patch # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party @@ -103,6 +103,7 @@ async def test_explicit_rbac_permissions_override_fallback(self, permission_serv @pytest.mark.asyncio async def test_platform_admin_virtual_user_recognition(self, permission_service): """Test that platform admin virtual user is recognized by RBAC checks.""" + # First-Party from mcpgateway.config import settings platform_admin_email = getattr(settings, "platform_admin_email", "admin@example.com") @@ -120,6 +121,7 @@ async def test_platform_admin_virtual_user_recognition(self, permission_service) @pytest.mark.asyncio async def test_platform_admin_check_admin_permission(self, permission_service): """Test that platform admin passes check_admin_permission even when virtual.""" + # First-Party from mcpgateway.config import settings platform_admin_email = getattr(settings, "platform_admin_email", "admin@example.com") diff --git a/tests/unit/mcpgateway/services/test_prompt_service_extended.py b/tests/unit/mcpgateway/services/test_prompt_service_extended.py index ccbc29136..deb025244 100644 --- a/tests/unit/mcpgateway/services/test_prompt_service_extended.py +++ b/tests/unit/mcpgateway/services/test_prompt_service_extended.py @@ -91,10 +91,12 @@ async def test_register_prompt_name_conflict(self): # Test method exists and is async assert hasattr(service, 'register_prompt') assert callable(getattr(service, 'register_prompt')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.register_prompt) # Test method parameters + # Standard import inspect sig = inspect.signature(service.register_prompt) assert 'db' in sig.parameters @@ -126,6 +128,7 @@ async def test_get_prompt_not_found(self): # Test method exists and is async assert hasattr(service, 'get_prompt') assert callable(getattr(service, 'get_prompt')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.get_prompt) @@ -135,6 +138,7 @@ async def test_get_prompt_inactive_without_include_inactive(self): service = PromptService() # Test method signature + # Standard import inspect sig = inspect.signature(service.get_prompt) assert 'name' in sig.parameters @@ -148,6 +152,7 @@ async def test_update_prompt_not_found(self): # Test method exists and is async assert hasattr(service, 'update_prompt') assert callable(getattr(service, 'update_prompt')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.update_prompt) @@ -157,6 +162,7 @@ async def test_update_prompt_name_conflict(self): service = PromptService() # Test method parameters + # Standard import inspect sig = inspect.signature(service.update_prompt) assert 'name' in sig.parameters @@ -187,6 +193,7 @@ async def test_toggle_prompt_status_no_change_needed(self): service = PromptService() # Test method is async + # Standard import asyncio assert asyncio.iscoroutinefunction(service.toggle_prompt_status) @@ -198,6 +205,7 @@ async def test_delete_prompt_not_found(self): # Test method exists and is async assert hasattr(service, 'delete_prompt') assert callable(getattr(service, 'delete_prompt')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.delete_prompt) @@ -207,6 +215,7 @@ async def test_delete_prompt_rollback_on_error(self): service = PromptService() # Test method parameters + # Standard import inspect sig = inspect.signature(service.delete_prompt) assert 'name' in sig.parameters @@ -220,6 +229,7 @@ async def test_render_prompt_template_rendering_error(self): # Test method exists and is async (get_prompt does the rendering) assert hasattr(service, 'get_prompt') assert callable(getattr(service, 'get_prompt')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.get_prompt) @@ -232,6 +242,7 @@ async def test_render_prompt_plugin_violation(self): assert hasattr(service, '_plugin_manager') # Test method parameters + # Standard import inspect sig = inspect.signature(service.get_prompt) assert 'name' in sig.parameters @@ -245,6 +256,7 @@ async def test_record_prompt_metric_error_handling(self): # Test method exists and is async assert hasattr(service, 'aggregate_metrics') assert callable(getattr(service, 'aggregate_metrics')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.aggregate_metrics) @@ -256,6 +268,7 @@ async def test_get_prompt_metrics_not_found(self): # Test method exists and is async assert hasattr(service, 'reset_metrics') assert callable(getattr(service, 'reset_metrics')) + # Standard import asyncio assert asyncio.iscoroutinefunction(service.reset_metrics) @@ -265,6 +278,7 @@ async def test_get_prompt_metrics_inactive_without_include_inactive(self): service = PromptService() # Test method signature + # Standard import inspect sig = inspect.signature(service.get_prompt_details) assert 'name' in sig.parameters diff --git a/tests/unit/mcpgateway/services/test_resource_service.py b/tests/unit/mcpgateway/services/test_resource_service.py index d0cdb6fc8..eb4786c29 100644 --- a/tests/unit/mcpgateway/services/test_resource_service.py +++ b/tests/unit/mcpgateway/services/test_resource_service.py @@ -1304,6 +1304,7 @@ class TestResourceServiceMetricsExtended: @pytest.mark.asyncio async def test_list_resources_with_tags(self, resource_service, mock_db, mock_resource): """Test listing resources with tag filtering.""" + # Third-Party from sqlalchemy import func # Mock query chain diff --git a/tests/unit/mcpgateway/services/test_resource_service_plugins.py b/tests/unit/mcpgateway/services/test_resource_service_plugins.py index ab95a267e..b5c43bf9e 100644 --- a/tests/unit/mcpgateway/services/test_resource_service_plugins.py +++ b/tests/unit/mcpgateway/services/test_resource_service_plugins.py @@ -7,11 +7,15 @@ Tests for ResourceService plugin integration. """ +# Standard import os from unittest.mock import AsyncMock, MagicMock, patch + +# Third-Party import pytest from sqlalchemy.orm import Session +# First-Party from mcpgateway.models import ResourceContent from mcpgateway.plugins.framework.models import ( PluginViolation, diff --git a/tests/unit/mcpgateway/services/test_server_service.py b/tests/unit/mcpgateway/services/test_server_service.py index e853fd9e9..77e9b2d26 100644 --- a/tests/unit/mcpgateway/services/test_server_service.py +++ b/tests/unit/mcpgateway/services/test_server_service.py @@ -548,6 +548,7 @@ async def test_reset_metrics(self, server_service, test_db): @pytest.mark.asyncio async def test_register_server_uuid_normalization_standard_format(self, server_service, test_db): """Test server registration with standard UUID format (with dashes) normalizes to hex format.""" + # Standard import uuid as uuid_module # Standard UUID format (with dashes) @@ -621,6 +622,7 @@ def capture_add(server): @pytest.mark.asyncio async def test_register_server_uuid_normalization_hex_format(self, server_service, test_db): """Test server registration with hex UUID format works correctly.""" + # Standard import uuid as uuid_module # Standard UUID that will be normalized @@ -778,6 +780,7 @@ async def test_register_server_uuid_normalization_error_handling(self, server_se @pytest.mark.asyncio async def test_update_server_uuid_normalization(self, server_service, test_db): """Test server update with UUID normalization.""" + # Standard import uuid as uuid_module # Mock existing server @@ -849,6 +852,7 @@ async def test_update_server_uuid_normalization(self, server_service, test_db): def test_uuid_normalization_edge_cases(self, server_service): """Test edge cases in UUID normalization logic.""" + # Standard import uuid as uuid_module # Test various UUID formats that should all normalize correctly diff --git a/tests/unit/mcpgateway/services/test_sso_admin_assignment.py b/tests/unit/mcpgateway/services/test_sso_admin_assignment.py index c11acf008..8979d7d88 100644 --- a/tests/unit/mcpgateway/services/test_sso_admin_assignment.py +++ b/tests/unit/mcpgateway/services/test_sso_admin_assignment.py @@ -2,10 +2,10 @@ """Test SSO admin privilege assignment functionality.""" # Standard -import pytest from unittest.mock import AsyncMock, MagicMock, patch # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party diff --git a/tests/unit/mcpgateway/services/test_sso_approval_workflow.py b/tests/unit/mcpgateway/services/test_sso_approval_workflow.py index 530dc3a4f..dcf716703 100644 --- a/tests/unit/mcpgateway/services/test_sso_approval_workflow.py +++ b/tests/unit/mcpgateway/services/test_sso_approval_workflow.py @@ -2,11 +2,11 @@ """Test SSO user approval workflow functionality.""" # Standard -import pytest from datetime import datetime, timedelta from unittest.mock import AsyncMock, MagicMock, patch # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party diff --git a/tests/unit/mcpgateway/services/test_team_invitation_service.py b/tests/unit/mcpgateway/services/test_team_invitation_service.py index 2da87f263..4d9cf4694 100644 --- a/tests/unit/mcpgateway/services/test_team_invitation_service.py +++ b/tests/unit/mcpgateway/services/test_team_invitation_service.py @@ -10,9 +10,9 @@ # Standard from datetime import datetime, timedelta from unittest.mock import MagicMock, patch -import pytest # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party diff --git a/tests/unit/mcpgateway/services/test_team_management_service.py b/tests/unit/mcpgateway/services/test_team_management_service.py index cc095528d..67828ddba 100644 --- a/tests/unit/mcpgateway/services/test_team_management_service.py +++ b/tests/unit/mcpgateway/services/test_team_management_service.py @@ -9,9 +9,9 @@ # Standard from unittest.mock import MagicMock, patch -import pytest # Third-Party +import pytest from sqlalchemy.orm import Session # First-Party @@ -112,10 +112,15 @@ async def test_create_team_success(self, service, mock_db): mock_db.flush.return_value = None mock_db.commit.return_value = None + # Mock the query for existing inactive teams to return None (no existing team) + mock_db.query.return_value.filter.return_value.first.return_value = None + with patch('mcpgateway.services.team_management_service.EmailTeam') as MockTeam, \ - patch('mcpgateway.services.team_management_service.EmailTeamMember') as MockMember: + patch('mcpgateway.services.team_management_service.EmailTeamMember') as MockMember, \ + patch('mcpgateway.utils.create_slug.slugify') as mock_slugify: MockTeam.return_value = mock_team + mock_slugify.return_value = "test-team" result = await service.create_team( name="Test Team", @@ -143,9 +148,13 @@ async def test_create_team_invalid_visibility(self, service): @pytest.mark.asyncio async def test_create_team_database_error(self, service, mock_db): """Test team creation with database error.""" + # Mock the query for existing inactive teams to return None first + mock_db.query.return_value.filter.return_value.first.return_value = None mock_db.add.side_effect = Exception("Database error") - with patch('mcpgateway.services.team_management_service.EmailTeam'): + with patch('mcpgateway.services.team_management_service.EmailTeam'), \ + patch('mcpgateway.utils.create_slug.slugify') as mock_slugify: + mock_slugify.return_value = "test-team" with pytest.raises(Exception): await service.create_team( name="Test Team", @@ -160,12 +169,17 @@ async def test_create_team_with_settings_defaults(self, service, mock_db): """Test team creation uses settings defaults.""" mock_team = MagicMock(spec=EmailTeam) + # Mock the query for existing inactive teams to return None + mock_db.query.return_value.filter.return_value.first.return_value = None + with patch('mcpgateway.services.team_management_service.settings') as mock_settings, \ patch('mcpgateway.services.team_management_service.EmailTeam') as MockTeam, \ - patch('mcpgateway.services.team_management_service.EmailTeamMember'): + patch('mcpgateway.services.team_management_service.EmailTeamMember'), \ + patch('mcpgateway.utils.create_slug.slugify') as mock_slugify: mock_settings.max_members_per_team = 50 MockTeam.return_value = mock_team + mock_slugify.return_value = "test-team" await service.create_team( name="Test Team", @@ -177,6 +191,49 @@ async def test_create_team_with_settings_defaults(self, service, mock_db): call_kwargs = MockTeam.call_args[1] assert call_kwargs['max_members'] == 50 + @pytest.mark.asyncio + async def test_create_team_reactivates_existing_inactive_team(self, service, mock_db): + """Test that creating a team with same name as inactive team reactivates it.""" + # Mock existing inactive team + mock_existing_team = MagicMock(spec=EmailTeam) + mock_existing_team.id = "existing_team_id" + mock_existing_team.name = "Old Team Name" + mock_existing_team.is_active = False + + # Mock existing inactive membership + mock_existing_membership = MagicMock(spec=EmailTeamMember) + mock_existing_membership.team_id = "existing_team_id" + mock_existing_membership.user_email = "admin@example.com" + mock_existing_membership.is_active = False + + # Setup mock queries to return existing inactive team and membership + mock_queries = [mock_existing_team, mock_existing_membership] + mock_db.query.return_value.filter.return_value.first.side_effect = mock_queries + + with patch('mcpgateway.utils.create_slug.slugify') as mock_slugify, \ + patch('mcpgateway.services.team_management_service.utc_now') as mock_utc_now: + + mock_slugify.return_value = "test-team" + mock_utc_now.return_value = "2023-01-01T00:00:00Z" + + result = await service.create_team( + name="Test Team", + description="A reactivated team", + created_by="admin@example.com", + visibility="public" + ) + + # Verify the existing team was reactivated with new details + assert result == mock_existing_team + assert mock_existing_team.name == "Test Team" + assert mock_existing_team.description == "A reactivated team" + assert mock_existing_team.visibility == "public" + assert mock_existing_team.is_active is True + + # Verify existing membership was reactivated + assert mock_existing_membership.role == "owner" + assert mock_existing_membership.is_active is True + # ========================================================================= # Team Retrieval Tests # ========================================================================= diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py index fcc9cb44b..02521a5cf 100644 --- a/tests/unit/mcpgateway/test_admin.py +++ b/tests/unit/mcpgateway/test_admin.py @@ -26,8 +26,7 @@ from sqlalchemy.orm import Session # First-Party -from mcpgateway.db import GlobalConfig -from mcpgateway.admin import ( +from mcpgateway.admin import ( # admin_get_metrics, admin_add_a2a_agent, admin_add_gateway, admin_add_prompt, @@ -48,26 +47,23 @@ admin_export_selective, admin_get_gateway, admin_get_import_status, - admin_get_logs, admin_get_log_file, - admin_stream_logs, - admin_import_configuration, - admin_list_import_statuses, - # admin_get_metrics, - get_aggregated_metrics, - get_global_passthrough_headers, + admin_get_logs, admin_get_prompt, admin_get_resource, admin_get_server, admin_get_tool, + admin_import_configuration, admin_import_tools, admin_list_a2a_agents, admin_list_gateways, + admin_list_import_statuses, admin_list_prompts, admin_list_resources, admin_list_servers, admin_list_tools, admin_reset_metrics, + admin_stream_logs, admin_test_a2a_agent, admin_test_gateway, admin_toggle_a2a_agent, @@ -77,8 +73,11 @@ admin_toggle_server, admin_toggle_tool, admin_ui, + get_aggregated_metrics, + get_global_passthrough_headers, update_global_passthrough_headers, ) +from mcpgateway.db import GlobalConfig from mcpgateway.schemas import ( GatewayTestRequest, GlobalConfigRead, @@ -88,7 +87,13 @@ ServerMetrics, ToolMetrics, ) +from mcpgateway.services.a2a_service import A2AAgentError, A2AAgentNameConflictError, A2AAgentService +from mcpgateway.services.export_service import ExportError, ExportService from mcpgateway.services.gateway_service import GatewayConnectionError, GatewayService +from mcpgateway.services.import_service import ConflictStrategy +from mcpgateway.services.import_service import ImportError as ImportServiceError +from mcpgateway.services.import_service import ImportService +from mcpgateway.services.logging_service import LoggingService from mcpgateway.services.prompt_service import PromptService from mcpgateway.services.resource_service import ResourceService from mcpgateway.services.root_service import RootService @@ -98,13 +103,9 @@ ToolNotFoundError, ToolService, ) -from mcpgateway.services.a2a_service import A2AAgentError, A2AAgentNameConflictError, A2AAgentService -from mcpgateway.services.export_service import ExportError, ExportService -from mcpgateway.services.import_service import ImportError as ImportServiceError, ConflictStrategy, ImportService -from mcpgateway.services.logging_service import LoggingService -from mcpgateway.utils.passthrough_headers import PassthroughHeadersError from mcpgateway.utils.error_formatter import ErrorFormatter from mcpgateway.utils.metadata_capture import MetadataCapture +from mcpgateway.utils.passthrough_headers import PassthroughHeadersError class FakeForm(dict): @@ -552,6 +553,7 @@ class TestAdminBulkImportRoutes: def setup_method(self): """Clear rate limit storage before each test.""" + # First-Party from mcpgateway.admin import rate_limit_storage rate_limit_storage.clear() @@ -593,9 +595,12 @@ async def test_bulk_import_success(self, mock_register_tool, mock_request, mock_ @patch.object(ToolService, "register_tool") async def test_bulk_import_partial_failure(self, mock_register_tool, mock_request, mock_db): """Test bulk import with some tools failing validation.""" - from mcpgateway.services.tool_service import ToolError + # Third-Party from sqlalchemy.exc import IntegrityError + # First-Party + from mcpgateway.services.tool_service import ToolError + # First tool succeeds, second fails with IntegrityError, third fails with ToolError mock_register_tool.side_effect = [ None, # First tool succeeds @@ -769,6 +774,7 @@ async def test_bulk_import_unexpected_exception(self, mock_register_tool, mock_r async def test_bulk_import_rate_limiting(self, mock_request, mock_db): """Test that bulk import endpoint has rate limiting.""" + # First-Party from mcpgateway.admin import admin_import_tools # Check that the function has rate_limit decorator @@ -1549,11 +1555,13 @@ class TestRateLimiting: def setup_method(self): """Clear rate limit storage before each test.""" + # First-Party from mcpgateway.admin import rate_limit_storage rate_limit_storage.clear() async def test_rate_limit_exceeded(self, mock_request, mock_db): """Test rate limiting when limit is exceeded.""" + # First-Party from mcpgateway.admin import rate_limit # Create a test function with rate limiting @@ -1578,6 +1586,7 @@ async def test_endpoint(*args, request=None, **kwargs): async def test_rate_limit_with_no_client(self, mock_db): """Test rate limiting when request has no client.""" + # First-Party from mcpgateway.admin import rate_limit @rate_limit(requests_per_minute=1) @@ -1594,9 +1603,12 @@ async def test_endpoint(*args, request=None, **kwargs): async def test_rate_limit_cleanup(self, mock_request, mock_db): """Test that old rate limit entries are cleaned up.""" - from mcpgateway.admin import rate_limit, rate_limit_storage + # Standard import time + # First-Party + from mcpgateway.admin import rate_limit, rate_limit_storage + @rate_limit(requests_per_minute=10) async def test_endpoint(*args, request=None, **kwargs): return "success" @@ -1632,6 +1644,7 @@ async def _test_get_global_passthrough_headers_existing_config(self, mock_db): mock_config.passthrough_headers = ["X-Custom-Header", "X-Auth-Token"] mock_db.query.return_value.first.return_value = mock_config + # First-Party from mcpgateway.admin import get_global_passthrough_headers result = await get_global_passthrough_headers(db=mock_db, _user="test-user") @@ -1644,6 +1657,7 @@ async def _test_get_global_passthrough_headers_no_config(self, mock_db): # Mock no existing config mock_db.query.return_value.first.return_value = None + # First-Party from mcpgateway.admin import get_global_passthrough_headers result = await get_global_passthrough_headers(db=mock_db, _user="test-user") @@ -1658,6 +1672,7 @@ async def _test_update_global_passthrough_headers_new_config(self, mock_request, config_update = GlobalConfigUpdate(passthrough_headers=["X-New-Header"]) + # First-Party from mcpgateway.admin import update_global_passthrough_headers result = await update_global_passthrough_headers(request=mock_request, config_update=config_update, db=mock_db, _user="test-user") @@ -1677,6 +1692,7 @@ async def _test_update_global_passthrough_headers_existing_config(self, mock_req config_update = GlobalConfigUpdate(passthrough_headers=["X-Updated-Header"]) + # First-Party from mcpgateway.admin import update_global_passthrough_headers result = await update_global_passthrough_headers(request=mock_request, config_update=config_update, db=mock_db, _user="test-user") @@ -1694,6 +1710,7 @@ async def _test_update_global_passthrough_headers_integrity_error(self, mock_req config_update = GlobalConfigUpdate(passthrough_headers=["X-Header"]) + # First-Party from mcpgateway.admin import update_global_passthrough_headers with pytest.raises(HTTPException) as excinfo: await update_global_passthrough_headers(request=mock_request, config_update=config_update, db=mock_db, _user="test-user") @@ -1710,6 +1727,7 @@ async def _test_update_global_passthrough_headers_validation_error(self, mock_re config_update = GlobalConfigUpdate(passthrough_headers=["X-Header"]) + # First-Party from mcpgateway.admin import update_global_passthrough_headers with pytest.raises(HTTPException) as excinfo: await update_global_passthrough_headers(request=mock_request, config_update=config_update, db=mock_db, _user="test-user") @@ -1726,6 +1744,7 @@ async def _test_update_global_passthrough_headers_passthrough_error(self, mock_r config_update = GlobalConfigUpdate(passthrough_headers=["X-Header"]) + # First-Party from mcpgateway.admin import update_global_passthrough_headers with pytest.raises(HTTPException) as excinfo: await update_global_passthrough_headers(request=mock_request, config_update=config_update, db=mock_db, _user="test-user") @@ -1741,6 +1760,7 @@ class TestA2AAgentManagement: @patch.object(A2AAgentService, "list_agents") async def _test_admin_list_a2a_agents_enabled(self, mock_list_agents, mock_db): """Test listing A2A agents when A2A is enabled.""" + # First-Party from mcpgateway.admin import admin_list_a2a_agents # Mock agent data @@ -1763,6 +1783,7 @@ async def _test_admin_list_a2a_agents_enabled(self, mock_list_agents, mock_db): @patch("mcpgateway.admin.a2a_service", None) async def test_admin_list_a2a_agents_disabled(self, mock_db): """Test listing A2A agents when A2A is disabled.""" + # First-Party from mcpgateway.admin import admin_list_a2a_agents result = await admin_list_a2a_agents(include_inactive=False, tags=None, db=mock_db, user="test-user") @@ -1774,6 +1795,7 @@ async def test_admin_list_a2a_agents_disabled(self, mock_db): @patch("mcpgateway.admin.a2a_service") async def _test_admin_add_a2a_agent_success(self, mock_a2a_service, mock_request, mock_db): """Test successfully adding A2A agent.""" + # First-Party from mcpgateway.admin import admin_add_a2a_agent # Mock form data @@ -1797,6 +1819,7 @@ async def _test_admin_add_a2a_agent_success(self, mock_a2a_service, mock_request @patch.object(A2AAgentService, "register_agent") async def test_admin_add_a2a_agent_validation_error(self, mock_register_agent, mock_request, mock_db): """Test adding A2A agent with validation error.""" + # First-Party from mcpgateway.admin import admin_add_a2a_agent mock_register_agent.side_effect = ValidationError.from_exception_data("test", []) @@ -1814,6 +1837,7 @@ async def test_admin_add_a2a_agent_validation_error(self, mock_register_agent, m @patch.object(A2AAgentService, "register_agent") async def test_admin_add_a2a_agent_name_conflict_error(self, mock_register_agent, mock_request, mock_db): """Test adding A2A agent with name conflict.""" + # First-Party from mcpgateway.admin import admin_add_a2a_agent mock_register_agent.side_effect = A2AAgentNameConflictError("Agent name already exists") @@ -1831,6 +1855,7 @@ async def test_admin_add_a2a_agent_name_conflict_error(self, mock_register_agent @patch.object(A2AAgentService, "toggle_agent_status") async def test_admin_toggle_a2a_agent_success(self, mock_toggle_status, mock_request, mock_db): """Test toggling A2A agent status.""" + # First-Party from mcpgateway.admin import admin_toggle_a2a_agent form_data = FakeForm({"activate": "true"}) @@ -1847,6 +1872,7 @@ async def test_admin_toggle_a2a_agent_success(self, mock_toggle_status, mock_req @patch.object(A2AAgentService, "delete_agent") async def test_admin_delete_a2a_agent_success(self, mock_delete_agent, mock_request, mock_db): """Test deleting A2A agent.""" + # First-Party from mcpgateway.admin import admin_delete_a2a_agent form_data = FakeForm({}) @@ -1864,6 +1890,7 @@ async def test_admin_delete_a2a_agent_success(self, mock_delete_agent, mock_requ @patch.object(A2AAgentService, "invoke_agent") async def test_admin_test_a2a_agent_success(self, mock_invoke_agent, mock_get_agent, mock_request, mock_db): """Test testing A2A agent.""" + # First-Party from mcpgateway.admin import admin_test_a2a_agent # Mock agent and invocation @@ -1892,6 +1919,7 @@ class TestExportImportEndpoints: @patch.object(LoggingService, "get_storage") async def _test_admin_export_logs_json(self, mock_get_storage, mock_db): """Test exporting logs in JSON format.""" + # First-Party from mcpgateway.admin import admin_export_logs # Mock log storage @@ -1921,6 +1949,7 @@ async def _test_admin_export_logs_json(self, mock_get_storage, mock_db): @patch.object(LoggingService, "get_storage") async def _test_admin_export_logs_csv(self, mock_get_storage, mock_db): """Test exporting logs in CSV format.""" + # First-Party from mcpgateway.admin import admin_export_logs # Mock log storage @@ -1949,6 +1978,7 @@ async def _test_admin_export_logs_csv(self, mock_get_storage, mock_db): async def test_admin_export_logs_invalid_format(self, mock_db): """Test exporting logs with invalid format.""" + # First-Party from mcpgateway.admin import admin_export_logs with pytest.raises(HTTPException) as excinfo: @@ -1967,6 +1997,7 @@ async def test_admin_export_logs_invalid_format(self, mock_db): @patch.object(ExportService, "export_configuration") async def _test_admin_export_configuration_success(self, mock_export_config, mock_db): """Test successful configuration export.""" + # First-Party from mcpgateway.admin import admin_export_configuration mock_export_config.return_value = { @@ -1996,6 +2027,7 @@ async def _test_admin_export_configuration_success(self, mock_export_config, moc @patch.object(ExportService, "export_configuration") async def _test_admin_export_configuration_export_error(self, mock_export_config, mock_db): """Test configuration export with ExportError.""" + # First-Party from mcpgateway.admin import admin_export_configuration mock_export_config.side_effect = ExportError("Export failed") @@ -2017,6 +2049,7 @@ async def _test_admin_export_configuration_export_error(self, mock_export_config @patch.object(ExportService, "export_selective") async def _test_admin_export_selective_success(self, mock_export_selective, mock_request, mock_db): """Test successful selective export.""" + # First-Party from mcpgateway.admin import admin_export_selective mock_export_selective.return_value = { @@ -2047,6 +2080,7 @@ class TestLoggingEndpoints: @patch.object(LoggingService, "get_storage") async def _test_admin_get_logs_success(self, mock_get_storage, mock_db): """Test getting logs successfully.""" + # First-Party from mcpgateway.admin import admin_get_logs # Mock log storage @@ -2079,6 +2113,7 @@ async def _test_admin_get_logs_success(self, mock_get_storage, mock_db): @patch.object(LoggingService, "get_storage") async def _test_admin_get_logs_stream(self, mock_get_storage, mock_db): """Test getting log stream.""" + # First-Party from mcpgateway.admin import admin_stream_logs # Mock log storage @@ -2105,6 +2140,7 @@ async def _test_admin_get_logs_stream(self, mock_get_storage, mock_db): @patch('mcpgateway.config.settings') async def _test_admin_get_logs_file_enabled(self, mock_settings, mock_db): """Test getting log file when file logging is enabled.""" + # First-Party from mcpgateway.admin import admin_get_log_file # Mock settings to enable file logging @@ -2122,6 +2158,7 @@ async def _test_admin_get_logs_file_enabled(self, mock_settings, mock_db): @patch('mcpgateway.config.settings') async def test_admin_get_logs_file_disabled(self, mock_settings, mock_db): """Test getting log file when file logging is disabled.""" + # First-Party from mcpgateway.admin import admin_get_log_file # Mock settings to disable file logging @@ -2431,6 +2468,7 @@ async def test_admin_add_gateway_generic_exception(self, mock_register_gateway, async def test_admin_add_gateway_validation_error_with_context(self, mock_register_gateway, mock_request, mock_db): """Test adding gateway with ValidationError containing context.""" # Create a ValidationError with context + # Third-Party from pydantic_core import InitErrorDetails error_details = [InitErrorDetails( type="value_error", @@ -2463,6 +2501,7 @@ class TestImportConfigurationEndpoints: @patch.object(ImportService, "import_configuration") async def test_admin_import_configuration_success(self, mock_import_config, mock_request, mock_db): """Test successful configuration import.""" + # First-Party from mcpgateway.admin import admin_import_configuration # Mock import status @@ -2498,6 +2537,7 @@ async def test_admin_import_configuration_success(self, mock_import_config, mock async def test_admin_import_configuration_missing_import_data(self, mock_request, mock_db): """Test import configuration with missing import_data.""" + # First-Party from mcpgateway.admin import admin_import_configuration # Mock request body without import_data @@ -2515,6 +2555,7 @@ async def test_admin_import_configuration_missing_import_data(self, mock_request async def test_admin_import_configuration_invalid_conflict_strategy(self, mock_request, mock_db): """Test import configuration with invalid conflict strategy.""" + # First-Party from mcpgateway.admin import admin_import_configuration request_body = { @@ -2532,6 +2573,7 @@ async def test_admin_import_configuration_invalid_conflict_strategy(self, mock_r @patch.object(ImportService, "import_configuration") async def test_admin_import_configuration_import_service_error(self, mock_import_config, mock_request, mock_db): """Test import configuration with ImportServiceError.""" + # First-Party from mcpgateway.admin import admin_import_configuration mock_import_config.side_effect = ImportServiceError("Import validation failed") @@ -2551,6 +2593,7 @@ async def test_admin_import_configuration_import_service_error(self, mock_import @patch.object(ImportService, "import_configuration") async def test_admin_import_configuration_with_user_dict(self, mock_import_config, mock_request, mock_db): """Test import configuration with user as dict.""" + # First-Party from mcpgateway.admin import admin_import_configuration mock_status = MagicMock() @@ -2577,6 +2620,7 @@ async def test_admin_import_configuration_with_user_dict(self, mock_import_confi @patch.object(ImportService, "get_import_status") async def test_admin_get_import_status_success(self, mock_get_status, mock_db): """Test getting import status successfully.""" + # First-Party from mcpgateway.admin import admin_get_import_status mock_status = MagicMock() @@ -2598,6 +2642,7 @@ async def test_admin_get_import_status_success(self, mock_get_status, mock_db): @patch.object(ImportService, "get_import_status") async def test_admin_get_import_status_not_found(self, mock_get_status, mock_db): """Test getting import status when not found.""" + # First-Party from mcpgateway.admin import admin_get_import_status mock_get_status.return_value = None @@ -2611,6 +2656,7 @@ async def test_admin_get_import_status_not_found(self, mock_get_status, mock_db) @patch.object(ImportService, "list_import_statuses") async def test_admin_list_import_statuses(self, mock_list_statuses, mock_db): """Test listing all import statuses.""" + # First-Party from mcpgateway.admin import admin_list_import_statuses mock_status1 = MagicMock() @@ -2659,7 +2705,8 @@ class TestSetLoggingService: def test_set_logging_service(self): """Test setting the logging service.""" - from mcpgateway.admin import set_logging_service, logging_service, LOGGER + # First-Party + from mcpgateway.admin import LOGGER, logging_service, set_logging_service # Create mock logging service mock_service = MagicMock(spec=LoggingService) @@ -2670,6 +2717,7 @@ def test_set_logging_service(self): set_logging_service(mock_service) # Verify global variables were updated + # First-Party from mcpgateway import admin assert admin.logging_service == mock_service assert admin.LOGGER == mock_logger diff --git a/tests/unit/mcpgateway/test_cli_export_import_coverage.py b/tests/unit/mcpgateway/test_cli_export_import_coverage.py index 323453923..dfa759cb5 100644 --- a/tests/unit/mcpgateway/test_cli_export_import_coverage.py +++ b/tests/unit/mcpgateway/test_cli_export_import_coverage.py @@ -9,19 +9,17 @@ # Standard import argparse +import json import os -import tempfile from pathlib import Path -from unittest.mock import AsyncMock, patch, MagicMock -import json +import tempfile +from unittest.mock import AsyncMock, MagicMock, patch # Third-Party import pytest # First-Party -from mcpgateway.cli_export_import import ( - create_parser, get_auth_token, AuthenticationError, CLIError -) +from mcpgateway.cli_export_import import AuthenticationError, CLIError, create_parser, get_auth_token @pytest.mark.asyncio @@ -190,9 +188,12 @@ def test_parser_subcommands_exist(): def test_main_with_subcommands_export(): """Test main_with_subcommands with export.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + with patch.object(sys, 'argv', ['mcpgateway', 'export', '--help']): with patch('mcpgateway.cli_export_import.asyncio.run') as mock_run: mock_run.side_effect = SystemExit(0) # Simulate help exit @@ -202,9 +203,12 @@ def test_main_with_subcommands_export(): def test_main_with_subcommands_import(): """Test main_with_subcommands with import.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + with patch.object(sys, 'argv', ['mcpgateway', 'import', '--help']): with patch('mcpgateway.cli_export_import.asyncio.run') as mock_run: mock_run.side_effect = SystemExit(0) # Simulate help exit @@ -214,9 +218,12 @@ def test_main_with_subcommands_import(): def test_main_with_subcommands_fallback(): """Test main_with_subcommands fallback to original CLI.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + with patch.object(sys, 'argv', ['mcpgateway', '--version']): with patch('mcpgateway.cli.main') as mock_main: main_with_subcommands() @@ -226,6 +233,7 @@ def test_main_with_subcommands_fallback(): @pytest.mark.asyncio async def test_make_authenticated_request_no_auth(): """Test make_authenticated_request when no auth is configured.""" + # First-Party from mcpgateway.cli_export_import import make_authenticated_request with patch('mcpgateway.cli_export_import.get_auth_token', return_value=None): @@ -236,6 +244,7 @@ async def test_make_authenticated_request_no_auth(): # Test the authentication flow by testing the token logic without the full HTTP call def test_make_authenticated_request_auth_logic(): """Test the authentication logic in make_authenticated_request.""" + # First-Party from mcpgateway.cli_export_import import make_authenticated_request # Test that the function creates the right headers for basic auth @@ -265,10 +274,12 @@ async def mock_make_request(method, url, json_data=None, params=None): return {"success": True, "headers": headers} # Replace the function temporarily + # First-Party import mcpgateway.cli_export_import mcpgateway.cli_export_import.make_authenticated_request = mock_make_request try: + # Standard import asyncio result = asyncio.run(mock_make_request("GET", "/test")) assert result["success"] is True @@ -280,6 +291,7 @@ async def mock_make_request(method, url, json_data=None, params=None): def test_make_authenticated_request_bearer_auth_logic(): """Test the bearer authentication logic in make_authenticated_request.""" + # First-Party from mcpgateway.cli_export_import import make_authenticated_request # Test that the function creates the right headers for bearer auth @@ -309,10 +321,12 @@ async def mock_make_request(method, url, json_data=None, params=None): return {"success": True, "headers": headers} # Replace the function temporarily + # First-Party import mcpgateway.cli_export_import mcpgateway.cli_export_import.make_authenticated_request = mock_make_request try: + # Standard import asyncio result = asyncio.run(mock_make_request("POST", "/api")) assert result["success"] is True @@ -325,9 +339,12 @@ async def mock_make_request(method, url, json_data=None, params=None): @pytest.mark.asyncio async def test_export_command_success(): """Test successful export command execution.""" - from mcpgateway.cli_export_import import export_command - import tempfile + # Standard import os + import tempfile + + # First-Party + from mcpgateway.cli_export_import import export_command # Mock export data export_data = { @@ -378,9 +395,12 @@ async def test_export_command_success(): @pytest.mark.asyncio async def test_export_command_with_output_file(): """Test export command with specified output file.""" - from mcpgateway.cli_export_import import export_command - import tempfile + # Standard import json + import tempfile + + # First-Party + from mcpgateway.cli_export_import import export_command export_data = { "metadata": {"entity_counts": {"tools": 1}}, @@ -414,9 +434,12 @@ async def test_export_command_with_output_file(): @pytest.mark.asyncio async def test_export_command_error_handling(): """Test export command error handling.""" - from mcpgateway.cli_export_import import export_command + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import export_command + args = MagicMock() args.types = None args.exclude_types = None @@ -438,9 +461,12 @@ async def test_export_command_error_handling(): @pytest.mark.asyncio async def test_import_command_file_not_found(): """Test import command when input file doesn't exist.""" - from mcpgateway.cli_export_import import import_command + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import import_command + args = MagicMock() args.input_file = "/nonexistent/file.json" @@ -455,9 +481,12 @@ async def test_import_command_file_not_found(): @pytest.mark.asyncio async def test_import_command_success_dry_run(): """Test successful import command in dry-run mode.""" - from mcpgateway.cli_export_import import import_command - import tempfile + # Standard import json + import tempfile + + # First-Party + from mcpgateway.cli_export_import import import_command # Create test import data import_data = { @@ -518,9 +547,12 @@ async def test_import_command_success_dry_run(): @pytest.mark.asyncio async def test_import_command_with_include_parameter(): """Test import command with selective import parameter.""" - from mcpgateway.cli_export_import import import_command - import tempfile + # Standard import json + import tempfile + + # First-Party + from mcpgateway.cli_export_import import import_command import_data = {"tools": [{"name": "test_tool"}]} api_response = { @@ -562,10 +594,13 @@ async def test_import_command_with_include_parameter(): @pytest.mark.asyncio async def test_import_command_with_errors_and_failures(): """Test import command with errors and failures.""" - from mcpgateway.cli_export_import import import_command - import tempfile + # Standard import json import sys + import tempfile + + # First-Party + from mcpgateway.cli_export_import import import_command import_data = {"tools": [{"name": "test_tool"}]} api_response = { @@ -607,9 +642,12 @@ async def test_import_command_with_errors_and_failures(): @pytest.mark.asyncio async def test_import_command_json_parse_error(): """Test import command with invalid JSON file.""" - from mcpgateway.cli_export_import import import_command - import tempfile + # Standard import sys + import tempfile + + # First-Party + from mcpgateway.cli_export_import import import_command # Create file with invalid JSON with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: @@ -641,9 +679,12 @@ async def test_import_command_json_parse_error(): def test_main_with_subcommands_no_func_attribute(): """Test main_with_subcommands when args don't have func attribute.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + # Mock parser that returns args without func attribute mock_parser = MagicMock() mock_args = MagicMock() @@ -662,9 +703,12 @@ def test_main_with_subcommands_no_func_attribute(): def test_main_with_subcommands_keyboard_interrupt(): """Test main_with_subcommands handling KeyboardInterrupt.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + mock_parser = MagicMock() mock_args = MagicMock() mock_args.func = MagicMock() @@ -684,9 +728,12 @@ def test_main_with_subcommands_keyboard_interrupt(): def test_main_with_subcommands_include_dependencies_handling(): """Test main_with_subcommands handling of include_dependencies flag.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + mock_parser = MagicMock() mock_args = MagicMock() mock_args.func = MagicMock() diff --git a/tests/unit/mcpgateway/test_coverage_push.py b/tests/unit/mcpgateway/test_coverage_push.py index 0183691f0..f7ca24224 100644 --- a/tests/unit/mcpgateway/test_coverage_push.py +++ b/tests/unit/mcpgateway/test_coverage_push.py @@ -8,12 +8,12 @@ """ # Standard -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch # Third-Party -import pytest -from fastapi.testclient import TestClient from fastapi import HTTPException +from fastapi.testclient import TestClient +import pytest # First-Party from mcpgateway.main import app, require_api_key @@ -58,11 +58,8 @@ def test_app_basic_properties(): def test_error_handlers(): """Test error handler functions exist.""" - from mcpgateway.main import ( - validation_exception_handler, - request_validation_exception_handler, - database_exception_handler - ) + # First-Party + from mcpgateway.main import database_exception_handler, request_validation_exception_handler, validation_exception_handler # Test handlers exist and are callable assert callable(validation_exception_handler) @@ -72,6 +69,7 @@ def test_error_handlers(): def test_middleware_classes(): """Test middleware classes can be instantiated.""" + # First-Party from mcpgateway.main import DocsAuthMiddleware, MCPPathRewriteMiddleware # Test DocsAuthMiddleware @@ -85,6 +83,7 @@ def test_middleware_classes(): def test_mcp_path_rewrite_middleware(): """Test MCPPathRewriteMiddleware initialization.""" + # First-Party from mcpgateway.main import MCPPathRewriteMiddleware app_mock = MagicMock() @@ -95,11 +94,8 @@ def test_mcp_path_rewrite_middleware(): def test_service_instances(): """Test that service instances exist.""" - from mcpgateway.main import ( - tool_service, resource_service, prompt_service, - gateway_service, root_service, completion_service, - export_service, import_service - ) + # First-Party + from mcpgateway.main import completion_service, export_service, gateway_service, import_service, prompt_service, resource_service, root_service, tool_service # Test all services exist assert tool_service is not None @@ -114,11 +110,8 @@ def test_service_instances(): def test_router_instances(): """Test that router instances exist.""" - from mcpgateway.main import ( - protocol_router, tool_router, resource_router, - prompt_router, gateway_router, root_router, - export_import_router - ) + # First-Party + from mcpgateway.main import export_import_router, gateway_router, prompt_router, protocol_router, resource_router, root_router, tool_router # Test all routers exist assert protocol_router is not None @@ -132,6 +125,7 @@ def test_router_instances(): def test_database_dependency(): """Test database dependency function.""" + # First-Party from mcpgateway.main import get_db # Test function exists and is generator @@ -141,6 +135,7 @@ def test_database_dependency(): def test_cors_settings(): """Test CORS configuration.""" + # First-Party from mcpgateway.main import cors_origins assert isinstance(cors_origins, list) @@ -148,6 +143,7 @@ def test_cors_settings(): def test_template_and_static_setup(): """Test template and static file setup.""" + # First-Party from mcpgateway.main import templates assert templates is not None @@ -156,7 +152,8 @@ def test_template_and_static_setup(): def test_feature_flags(): """Test feature flag variables.""" - from mcpgateway.main import UI_ENABLED, ADMIN_API_ENABLED + # First-Party + from mcpgateway.main import ADMIN_API_ENABLED, UI_ENABLED assert isinstance(UI_ENABLED, bool) assert isinstance(ADMIN_API_ENABLED, bool) @@ -164,6 +161,7 @@ def test_feature_flags(): def test_lifespan_function_exists(): """Test lifespan function exists.""" + # First-Party from mcpgateway.main import lifespan assert callable(lifespan) @@ -171,6 +169,7 @@ def test_lifespan_function_exists(): def test_cache_instances(): """Test cache instances exist.""" + # First-Party from mcpgateway.main import resource_cache, session_registry assert resource_cache is not None diff --git a/tests/unit/mcpgateway/test_display_name_uuid_features.py b/tests/unit/mcpgateway/test_display_name_uuid_features.py index 47802130b..17f087cfb 100644 --- a/tests/unit/mcpgateway/test_display_name_uuid_features.py +++ b/tests/unit/mcpgateway/test_display_name_uuid_features.py @@ -1,15 +1,21 @@ # -*- coding: utf-8 -*- """Tests for displayName and UUID editing features.""" +# Standard +from unittest.mock import AsyncMock, Mock + +# Third-Party import pytest from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker -from unittest.mock import Mock, AsyncMock -from mcpgateway.db import Base, Tool as DbTool, Server as DbServer -from mcpgateway.schemas import ToolCreate, ToolUpdate, ToolRead, ServerCreate, ServerUpdate, ServerRead -from mcpgateway.services.tool_service import ToolService +# First-Party +from mcpgateway.db import Base +from mcpgateway.db import Server as DbServer +from mcpgateway.db import Tool as DbTool +from mcpgateway.schemas import ServerCreate, ServerRead, ServerUpdate, ToolCreate, ToolRead, ToolUpdate from mcpgateway.services.server_service import ServerService +from mcpgateway.services.tool_service import ToolService @pytest.fixture @@ -295,6 +301,7 @@ def test_server_update_schema_with_uuid(self): def test_server_uuid_validation(self): """Test UUID validation in schemas.""" + # First-Party from mcpgateway.schemas import ServerCreate, ServerUpdate # Test valid UUID @@ -326,9 +333,12 @@ class TestServerUUIDNormalization: @pytest.mark.asyncio async def test_server_create_uuid_normalization_standard_format(self, db_session, server_service): """Test server creation with standard UUID format (with dashes) gets normalized to hex format.""" + # Standard import uuid as uuid_module - from mcpgateway.schemas import ServerCreate + + # First-Party from mcpgateway.db import Server as DbServer + from mcpgateway.schemas import ServerCreate # Standard UUID format (with dashes) standard_uuid = "550e8400-e29b-41d4-a716-446655440000" @@ -395,7 +405,10 @@ def capture_add(server): @pytest.mark.asyncio async def test_server_create_uuid_normalization_hex_format(self, db_session, server_service): """Test server creation with UUID in hex format (without dashes) works unchanged.""" + # Standard import uuid as uuid_module + + # First-Party from mcpgateway.schemas import ServerCreate # Hex UUID format (without dashes) - but we need to provide a valid UUID @@ -464,6 +477,7 @@ def capture_add(server): @pytest.mark.asyncio async def test_server_create_auto_generated_uuid(self, db_session, server_service): """Test server creation without custom UUID generates UUID automatically.""" + # First-Party from mcpgateway.schemas import ServerCreate # Mock database operations @@ -524,9 +538,12 @@ def capture_add(server): @pytest.mark.asyncio async def test_server_create_invalid_uuid_format(self, db_session, server_service): """Test server creation with invalid UUID format raises validation error.""" - from mcpgateway.schemas import ServerCreate + # Third-Party from pydantic import ValidationError + # First-Party + from mcpgateway.schemas import ServerCreate + # Test various invalid UUID formats that should raise validation errors invalid_uuids = [ "invalid-uuid-format", @@ -566,6 +583,7 @@ async def test_server_create_invalid_uuid_format(self, db_session, server_servic def test_uuid_normalization_logic(self): """Test the UUID normalization logic directly.""" + # Standard import uuid as uuid_module # Test cases for UUID normalization @@ -596,6 +614,7 @@ def test_uuid_normalization_logic(self): def test_database_storage_format_verification(self, db_session): """Test that UUIDs are stored in the database in the expected hex format.""" + # Standard import uuid as uuid_module # Create a server with standard UUID format @@ -623,7 +642,10 @@ def test_database_storage_format_verification(self, db_session): @pytest.mark.asyncio async def test_comprehensive_uuid_scenarios_with_service(self, db_session, server_service): """Test comprehensive UUID scenarios that would be encountered in practice.""" + # Standard import uuid as uuid_module + + # First-Party from mcpgateway.schemas import ServerCreate test_scenarios = [ @@ -767,6 +789,7 @@ class TestSmartDisplayNameGeneration: def test_generate_display_name_function(self): """Test the display name generation utility function.""" + # First-Party from mcpgateway.utils.display_name import generate_display_name test_cases = [ @@ -787,6 +810,7 @@ def test_generate_display_name_function(self): def test_manual_tool_displayname_preserved(self): """Test that manually specified displayName is preserved.""" + # First-Party from mcpgateway.schemas import ToolCreate # Manual tool with explicit displayName should keep it @@ -803,6 +827,7 @@ def test_manual_tool_displayname_preserved(self): def test_manual_tool_without_displayname(self): """Test that manual tools without displayName get service defaults.""" + # First-Party from mcpgateway.schemas import ToolCreate # Manual tool without displayName (service layer will set default) diff --git a/tests/unit/mcpgateway/test_final_coverage_push.py b/tests/unit/mcpgateway/test_final_coverage_push.py index fb5dcd59f..4be62ee81 100644 --- a/tests/unit/mcpgateway/test_final_coverage_push.py +++ b/tests/unit/mcpgateway/test_final_coverage_push.py @@ -8,15 +8,15 @@ """ # Standard -import tempfile import json -from unittest.mock import patch, MagicMock, AsyncMock +import tempfile +from unittest.mock import AsyncMock, MagicMock, patch # Third-Party import pytest # First-Party -from mcpgateway.models import Role, LogLevel, TextContent, ImageContent, ResourceContent +from mcpgateway.models import ImageContent, LogLevel, ResourceContent, Role, TextContent from mcpgateway.schemas import BaseModelWithConfigDict @@ -99,9 +99,12 @@ class TestModel(BaseModelWithConfigDict): @pytest.mark.asyncio async def test_cli_export_import_main_flows(): """Test CLI export/import main execution flows.""" - from mcpgateway.cli_export_import import main_with_subcommands + # Standard import sys + # First-Party + from mcpgateway.cli_export_import import main_with_subcommands + # Test with no subcommands (should fall back to main CLI) with patch.object(sys, 'argv', ['mcpgateway', '--version']): with patch('mcpgateway.cli.main') as mock_main: @@ -117,9 +120,12 @@ async def test_cli_export_import_main_flows(): @pytest.mark.asyncio async def test_export_command_parameter_building(): """Test export command parameter building logic.""" - from mcpgateway.cli_export_import import export_command + # Standard import argparse + # First-Party + from mcpgateway.cli_export_import import export_command + # Test with all parameters set args = argparse.Namespace( types="tools,gateways", @@ -159,9 +165,12 @@ async def test_export_command_parameter_building(): @pytest.mark.asyncio async def test_import_command_parameter_parsing(): """Test import command parameter parsing logic.""" - from mcpgateway.cli_export_import import import_command + # Standard import argparse + # First-Party + from mcpgateway.cli_export_import import import_command + # Create temp file with valid JSON with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: test_data = { @@ -206,6 +215,7 @@ async def test_import_command_parameter_parsing(): def test_utils_coverage(): """Test various utility functions for coverage.""" + # First-Party from mcpgateway.utils.create_slug import slugify # Test slugify variations @@ -224,6 +234,7 @@ def test_utils_coverage(): def test_config_properties(): """Test config module properties.""" + # First-Party from mcpgateway.config import settings # Test basic properties exist @@ -245,6 +256,7 @@ def test_config_properties(): def test_schemas_basic(): """Test basic schema imports.""" + # First-Party from mcpgateway.schemas import ToolCreate # Test class exists @@ -253,9 +265,12 @@ def test_schemas_basic(): def test_db_utility_functions(): """Test database utility functions.""" - from mcpgateway.db import utc_now + # Standard from datetime import datetime, timezone + # First-Party + from mcpgateway.db import utc_now + # Test utc_now function now = utc_now() assert isinstance(now, datetime) @@ -264,7 +279,8 @@ def test_db_utility_functions(): def test_validation_imports(): """Test validation module imports.""" - from mcpgateway.validation import tags, jsonrpc + # First-Party + from mcpgateway.validation import jsonrpc, tags # Test modules can be imported assert tags is not None @@ -273,6 +289,7 @@ def test_validation_imports(): def test_services_init(): """Test services module initialization.""" + # First-Party from mcpgateway.services import __init__ # Just test the module exists @@ -281,8 +298,10 @@ def test_services_init(): def test_cli_module_main_execution(): """Test CLI module main execution path.""" + # Standard import sys + # First-Party # Test __main__ execution path exists from mcpgateway import cli_export_import assert hasattr(cli_export_import, 'main_with_subcommands') diff --git a/tests/unit/mcpgateway/test_main.py b/tests/unit/mcpgateway/test_main.py index 277a00c61..3bac54bff 100644 --- a/tests/unit/mcpgateway/test_main.py +++ b/tests/unit/mcpgateway/test_main.py @@ -175,11 +175,10 @@ def test_client(app): Also overrides RBAC dependencies to bypass permission checks for tests. """ # First-Party - from mcpgateway.main import require_auth - from mcpgateway.middleware.rbac import get_current_user_with_permissions - # Mock user object for RBAC system from mcpgateway.db import EmailUser + from mcpgateway.main import require_auth + from mcpgateway.middleware.rbac import get_current_user_with_permissions mock_user = EmailUser( email="test_user@example.com", full_name="Test User", @@ -192,13 +191,21 @@ def test_client(app): app.dependency_overrides[require_auth] = lambda: "test_user" # Patch the auth function used by DocsAuthMiddleware - from unittest.mock import patch, AsyncMock - from mcpgateway.utils.verify_credentials import require_auth_override + # Standard + from unittest.mock import AsyncMock, patch + + # Third-Party from fastapi import HTTPException, status + # First-Party + from mcpgateway.utils.verify_credentials import require_auth_override + # Create a mock that validates JWT tokens properly async def mock_require_auth_override(auth_header=None, jwt_token=None): + # Third-Party import jwt as jwt_lib + + # First-Party from mcpgateway.config import settings # Try to get token from auth_header or jwt_token @@ -226,6 +233,7 @@ async def mock_require_auth_override(auth_header=None, jwt_token=None): patcher.start() # Override the core auth function used by RBAC system + # First-Party from mcpgateway.auth import get_current_user app.dependency_overrides[get_current_user] = lambda credentials=None, db=None: mock_user @@ -242,6 +250,7 @@ def mock_get_current_user_with_permissions(request=None, credentials=None, jwt_t app.dependency_overrides[get_current_user_with_permissions] = mock_get_current_user_with_permissions # Mock the permission service to always return True for tests + # First-Party from mcpgateway.services.permission_service import PermissionService # Store original method diff --git a/tests/unit/mcpgateway/test_main_extended.py b/tests/unit/mcpgateway/test_main_extended.py index 9d242bc7c..7e3a93a56 100644 --- a/tests/unit/mcpgateway/test_main_extended.py +++ b/tests/unit/mcpgateway/test_main_extended.py @@ -73,6 +73,7 @@ def test_resource_endpoints_error_conditions(self, test_client, auth_headers): """Test resource endpoints with various error conditions.""" # Test resource not found scenario with patch("mcpgateway.main.resource_service.read_resource") as mock_read: + # First-Party from mcpgateway.services.resource_service import ResourceNotFoundError mock_read.side_effect = ResourceNotFoundError("Resource not found") @@ -149,6 +150,7 @@ async def test_startup_without_plugin_manager(self, mock_logging_service): service.shutdown = AsyncMock() # Test lifespan without plugin manager + # First-Party from mcpgateway.main import lifespan async with lifespan(app): pass @@ -237,6 +239,7 @@ def test_websocket_error_scenarios(self, mock_settings): mock_settings.port = 4444 with patch("mcpgateway.main.ResilientHttpClient") as mock_client: + # Standard from types import SimpleNamespace mock_instance = mock_client.return_value @@ -282,6 +285,7 @@ def test_server_toggle_edge_cases(self, test_client, auth_headers): """Test server toggle endpoint edge cases.""" with patch("mcpgateway.main.server_service.toggle_server_status") as mock_toggle: # Create a proper ServerRead model response + # First-Party from mcpgateway.schemas import ServerRead mock_server_data = { @@ -324,11 +328,14 @@ def test_server_toggle_edge_cases(self, test_client, auth_headers): @pytest.fixture def test_client(app): """Test client with auth override for testing protected endpoints.""" + # Standard from unittest.mock import patch + + # First-Party + from mcpgateway.auth import get_current_user + from mcpgateway.db import EmailUser from mcpgateway.main import require_auth from mcpgateway.middleware.rbac import get_current_user_with_permissions - from mcpgateway.db import EmailUser - from mcpgateway.auth import get_current_user # Mock user object for RBAC system mock_user = EmailUser( @@ -363,6 +370,7 @@ def mock_get_current_user_with_permissions(request=None, credentials=None, jwt_t app.dependency_overrides[get_current_user_with_permissions] = mock_get_current_user_with_permissions # Mock the permission service to always return True for tests + # First-Party from mcpgateway.services.permission_service import PermissionService if not hasattr(PermissionService, '_original_check_permission'): PermissionService._original_check_permission = PermissionService.check_permission diff --git a/tests/unit/mcpgateway/test_oauth_manager.py b/tests/unit/mcpgateway/test_oauth_manager.py index 2445e05d7..49eb85007 100644 --- a/tests/unit/mcpgateway/test_oauth_manager.py +++ b/tests/unit/mcpgateway/test_oauth_manager.py @@ -7,14 +7,19 @@ Unit tests for OAuth Manager and Token Storage Service. """ -import pytest +# Standard from datetime import datetime, timedelta -from unittest.mock import AsyncMock, patch, MagicMock, Mock +from unittest.mock import AsyncMock, MagicMock, Mock, patch + +# Third-Party import aiohttp -from mcpgateway.services.oauth_manager import OAuthManager, OAuthError +import pytest + +# First-Party +from mcpgateway.db import OAuthToken +from mcpgateway.services.oauth_manager import OAuthError, OAuthManager from mcpgateway.services.token_storage_service import TokenStorageService from mcpgateway.utils.oauth_encryption import OAuthEncryption -from mcpgateway.db import OAuthToken class TestOAuthManager: @@ -2552,6 +2557,7 @@ def test_is_encrypted_valid_base64_but_not_encrypted(self): encryption = OAuthEncryption("test_key") # Create base64 data that's long enough but not encrypted + # Standard import base64 fake_data = b"a" * 40 # 40 bytes of 'a' base64_fake = base64.urlsafe_b64encode(fake_data).decode() @@ -2579,6 +2585,7 @@ def test_is_encrypted_exception_handling(self): def test_get_oauth_encryption_function(self): """Test the get_oauth_encryption utility function.""" + # First-Party from mcpgateway.utils.oauth_encryption import get_oauth_encryption encryption = get_oauth_encryption("test_secret") diff --git a/tests/unit/mcpgateway/test_observability.py b/tests/unit/mcpgateway/test_observability.py index 98be46e1e..0a6ed06a4 100644 --- a/tests/unit/mcpgateway/test_observability.py +++ b/tests/unit/mcpgateway/test_observability.py @@ -37,7 +37,9 @@ def setup_method(self): def teardown_method(self): """Clean up after each test.""" # Reset global tracer + # First-Party import mcpgateway.observability + # pylint: disable=protected-access mcpgateway.observability._TRACER = None @@ -135,7 +137,9 @@ def test_init_telemetry_otlp_headers_parsing(self): def test_create_span_no_tracer(self): """Test create_span when tracer is not initialized.""" + # First-Party import mcpgateway.observability + # pylint: disable=protected-access mcpgateway.observability._TRACER = None @@ -173,7 +177,9 @@ def test_create_span_with_exception(self): @pytest.mark.asyncio async def test_trace_operation_decorator_no_tracer(self): """Test trace_operation decorator when tracer is not initialized.""" + # First-Party import mcpgateway.observability + # pylint: disable=protected-access mcpgateway.observability._TRACER = None @@ -280,6 +286,7 @@ def test_init_telemetry_exception_handling(self): def test_create_span_none_attributes_filtered(self): """Test that None values in attributes are filtered out.""" + # First-Party import mcpgateway.observability # Setup mock tracer diff --git a/tests/unit/mcpgateway/test_reverse_proxy.py b/tests/unit/mcpgateway/test_reverse_proxy.py index a1e915258..86b9f45a7 100644 --- a/tests/unit/mcpgateway/test_reverse_proxy.py +++ b/tests/unit/mcpgateway/test_reverse_proxy.py @@ -13,7 +13,7 @@ import os import signal import sys -from unittest.mock import AsyncMock, MagicMock, Mock, patch, call +from unittest.mock import AsyncMock, call, MagicMock, Mock, patch # Third-Party import pytest @@ -21,21 +21,21 @@ # First-Party from mcpgateway.reverse_proxy import ( ConnectionState, + DEFAULT_KEEPALIVE_INTERVAL, + DEFAULT_MAX_RETRIES, + DEFAULT_RECONNECT_DELAY, + DEFAULT_REQUEST_TIMEOUT, + ENV_GATEWAY, + ENV_LOG_LEVEL, + ENV_MAX_RETRIES, + ENV_RECONNECT_DELAY, + ENV_TOKEN, + main, MessageType, - ReverseProxyClient, - StdioProcess, parse_args, - main, + ReverseProxyClient, run, - ENV_GATEWAY, - ENV_TOKEN, - ENV_RECONNECT_DELAY, - ENV_MAX_RETRIES, - ENV_LOG_LEVEL, - DEFAULT_RECONNECT_DELAY, - DEFAULT_MAX_RETRIES, - DEFAULT_KEEPALIVE_INTERVAL, - DEFAULT_REQUEST_TIMEOUT, + StdioProcess, ) @@ -515,6 +515,7 @@ async def test_receive_websocket_connection_closed(self): # Import the actual exception class try: + # Third-Party from websockets.exceptions import ConnectionClosed mock_connection.__aiter__.side_effect = ConnectionClosed(None, None) except ImportError: @@ -985,5 +986,6 @@ def test_default_values(self): # Helper function for mocking file operations def mock_open(read_data=""): """Create a mock for open() that returns read_data.""" + # Standard from unittest.mock import mock_open as _mock_open return _mock_open(read_data=read_data) diff --git a/tests/unit/mcpgateway/test_rpc_backward_compatibility.py b/tests/unit/mcpgateway/test_rpc_backward_compatibility.py index e8a0606e3..d2045320c 100644 --- a/tests/unit/mcpgateway/test_rpc_backward_compatibility.py +++ b/tests/unit/mcpgateway/test_rpc_backward_compatibility.py @@ -7,12 +7,15 @@ Test backward compatibility for tool invocation after PR #746. """ +# Standard from unittest.mock import AsyncMock, MagicMock, patch -import pytest +# Third-Party from fastapi.testclient import TestClient +import pytest from sqlalchemy.orm import Session +# First-Party from mcpgateway.main import app diff --git a/tests/unit/mcpgateway/test_rpc_tool_invocation.py b/tests/unit/mcpgateway/test_rpc_tool_invocation.py index 710c37f6f..0a707330e 100644 --- a/tests/unit/mcpgateway/test_rpc_tool_invocation.py +++ b/tests/unit/mcpgateway/test_rpc_tool_invocation.py @@ -7,17 +7,20 @@ Test RPC tool invocation after PR #746 changes. """ +# Standard import json from unittest.mock import AsyncMock, MagicMock, patch -import pytest +# Third-Party from fastapi.testclient import TestClient +import pytest from sqlalchemy.orm import Session +# First-Party +from mcpgateway.config import settings from mcpgateway.main import app from mcpgateway.models import Tool from mcpgateway.services.tool_service import ToolService -from mcpgateway.config import settings @pytest.fixture diff --git a/tests/unit/mcpgateway/test_simple_coverage_boost.py b/tests/unit/mcpgateway/test_simple_coverage_boost.py index 0330842ce..807972322 100644 --- a/tests/unit/mcpgateway/test_simple_coverage_boost.py +++ b/tests/unit/mcpgateway/test_simple_coverage_boost.py @@ -9,7 +9,7 @@ # Standard import sys -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch # Third-Party import pytest @@ -34,9 +34,12 @@ def test_exception_classes(): @pytest.mark.asyncio async def test_export_command_basic_structure(): """Test export command basic structure without execution.""" - from mcpgateway.cli_export_import import export_command + # Standard import argparse + # First-Party + from mcpgateway.cli_export_import import export_command + # Create minimal args structure args = argparse.Namespace( types=None, @@ -59,10 +62,13 @@ async def test_export_command_basic_structure(): @pytest.mark.asyncio async def test_import_command_basic_structure(): """Test import command basic structure without execution.""" - from mcpgateway.cli_export_import import import_command + # Standard import argparse - import tempfile import json + import tempfile + + # First-Party + from mcpgateway.cli_export_import import import_command # Create test file with tempfile.NamedTemporaryFile(mode='w', suffix='.json', delete=False) as f: @@ -89,6 +95,7 @@ async def test_import_command_basic_structure(): def test_cli_export_import_constants(): """Test CLI module constants and basic imports.""" + # First-Party from mcpgateway.cli_export_import import logger # Test logger exists @@ -100,6 +107,7 @@ def test_cli_export_import_constants(): @pytest.mark.asyncio async def test_make_authenticated_request_structure(): """Test make_authenticated_request basic structure.""" + # First-Party from mcpgateway.cli_export_import import make_authenticated_request # Mock auth token to return None (no auth configured) @@ -110,9 +118,12 @@ async def test_make_authenticated_request_structure(): def test_import_command_file_not_found(): """Test import command with non-existent file.""" - from mcpgateway.cli_export_import import import_command + # Standard import argparse + # First-Party + from mcpgateway.cli_export_import import import_command + # Args with non-existent file args = argparse.Namespace( input_file="/nonexistent/file.json", @@ -124,6 +135,7 @@ def test_import_command_file_not_found(): ) # Should exit with error + # Standard import asyncio with pytest.raises(SystemExit) as exc_info: asyncio.run(import_command(args)) @@ -133,6 +145,7 @@ def test_import_command_file_not_found(): def test_cli_module_imports(): """Test CLI module can be imported and has expected attributes.""" + # First-Party import mcpgateway.cli_export_import as cli_module # Test required functions exist diff --git a/tests/unit/mcpgateway/test_streamable_closedresource_filter.py b/tests/unit/mcpgateway/test_streamable_closedresource_filter.py index ad95ebc87..26faead71 100644 --- a/tests/unit/mcpgateway/test_streamable_closedresource_filter.py +++ b/tests/unit/mcpgateway/test_streamable_closedresource_filter.py @@ -5,10 +5,13 @@ do not spam ERROR logs via the upstream MCP logger. """ +# Standard import logging +# Third-Party import anyio +# First-Party from mcpgateway.services.logging_service import LoggingService diff --git a/tests/unit/mcpgateway/test_translate.py b/tests/unit/mcpgateway/test_translate.py index 896b25724..67742719f 100644 --- a/tests/unit/mcpgateway/test_translate.py +++ b/tests/unit/mcpgateway/test_translate.py @@ -78,7 +78,10 @@ def test_translate_importerror(monkeypatch, translate): monkeypatch.setattr(translate, "httpx", None) # Test that _run_sse_to_stdio raises ImportError when httpx is None + # Standard import asyncio + + # Third-Party import pytest async def test_sse_without_httpx(): @@ -1392,6 +1395,7 @@ def __init__(self, **kwargs): pass # Mock the import path for CORS middleware + # Standard import types cors_module = types.ModuleType('cors') cors_module.CORSMiddleware = MockCORSMiddleware @@ -1400,6 +1404,7 @@ def __init__(self, **kwargs): starlette_module = types.ModuleType('starlette') starlette_module.middleware = middleware_module + # Standard import sys sys.modules['starlette'] = starlette_module sys.modules['starlette.middleware'] = middleware_module diff --git a/tests/unit/mcpgateway/test_ui_version.py b/tests/unit/mcpgateway/test_ui_version.py index 779486d43..283f0facd 100644 --- a/tests/unit/mcpgateway/test_ui_version.py +++ b/tests/unit/mcpgateway/test_ui_version.py @@ -34,7 +34,10 @@ @pytest.fixture(scope="session") def test_client() -> TestClient: """Spin up the FastAPI test client once for the whole session with proper database setup.""" + # Standard import tempfile + + # Third-Party from _pytest.monkeypatch import MonkeyPatch from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker @@ -47,9 +50,11 @@ def test_client() -> TestClient: url = f"sqlite:///{path}" # Patch settings + # First-Party from mcpgateway.config import settings mp.setattr(settings, "database_url", url, raising=False) + # First-Party import mcpgateway.db as db_mod import mcpgateway.main as main_mod diff --git a/tests/unit/mcpgateway/test_version.py b/tests/unit/mcpgateway/test_version.py index 2bf71d33e..421a67183 100644 --- a/tests/unit/mcpgateway/test_version.py +++ b/tests/unit/mcpgateway/test_version.py @@ -258,6 +258,7 @@ def test_psutil_import_error(monkeypatch: pytest.MonkeyPatch) -> None: """Test the ImportError branch for psutil.""" # Simply test by setting psutil to None after import - this simulates # the ImportError case without needing complex import mocking + # First-Party from mcpgateway import version as ver_mod # Set psutil to None to simulate ImportError @@ -270,6 +271,7 @@ def test_psutil_import_error(monkeypatch: pytest.MonkeyPatch) -> None: def test_redis_import_error(monkeypatch: pytest.MonkeyPatch) -> None: """Test the ImportError branch for redis.""" + # First-Party from mcpgateway import version as ver_mod # Set aioredis to None and REDIS_AVAILABLE to False to simulate ImportError @@ -283,6 +285,7 @@ def test_redis_import_error(monkeypatch: pytest.MonkeyPatch) -> None: def test_sanitize_url_none_and_empty() -> None: """Test _sanitize_url with None and empty string.""" + # First-Party from mcpgateway import version as ver_mod # Test None input @@ -293,6 +296,7 @@ def test_sanitize_url_none_and_empty() -> None: def test_sanitize_url_no_username() -> None: """Test _sanitize_url when password exists but no username.""" + # First-Party from mcpgateway import version as ver_mod # URL with password but no username @@ -303,6 +307,7 @@ def test_sanitize_url_no_username() -> None: def test_system_metrics_with_exceptions(monkeypatch: pytest.MonkeyPatch) -> None: """Test _system_metrics with various exception paths.""" + # First-Party from mcpgateway import version as ver_mod class _FailingPsutil: @@ -368,6 +373,7 @@ def mock_getloadavg(): def test_system_metrics_no_psutil(monkeypatch: pytest.MonkeyPatch) -> None: """Test _system_metrics when psutil is None.""" + # First-Party from mcpgateway import version as ver_mod monkeypatch.setattr(ver_mod, "psutil", None) @@ -377,6 +383,7 @@ def test_system_metrics_no_psutil(monkeypatch: pytest.MonkeyPatch) -> None: def test_login_html_rendering() -> None: """Test _login_html function.""" + # First-Party from mcpgateway import version as ver_mod next_url = "/version?format=html" @@ -400,7 +407,6 @@ def test_version_endpoint_redis_conditions() -> None: # Test the Redis health check conditions directly # This tests the logic branches without async complexity - # Test 1: Redis not available assert not (False and "redis" == "redis" and "redis://localhost") @@ -416,6 +422,7 @@ def test_version_endpoint_redis_conditions() -> None: def test_is_secret_comprehensive() -> None: """Test _is_secret with comprehensive coverage of all branches.""" + # First-Party from mcpgateway import version as ver_mod # Test secret keywords (case insensitive) @@ -439,11 +446,11 @@ def test_is_secret_comprehensive() -> None: def test_import_error_branches() -> None: """Test import error coverage by checking the current state.""" + # First-Party from mcpgateway import version as ver_mod # These tests check the current runtime state to ensure # the import branches were properly executed at module load time - # psutil should be available in test environment, but if it wasn't # the code would set it to None in the except block (lines 80-81) psutil_available = ver_mod.psutil is not None diff --git a/tests/unit/mcpgateway/test_well_known.py b/tests/unit/mcpgateway/test_well_known.py index 9523ed676..471939cee 100644 --- a/tests/unit/mcpgateway/test_well_known.py +++ b/tests/unit/mcpgateway/test_well_known.py @@ -7,11 +7,15 @@ Test cases for well-known URI endpoints. """ +# Standard import json -import pytest -from fastapi.testclient import TestClient from unittest.mock import patch +# Third-Party +from fastapi.testclient import TestClient +import pytest + +# First-Party # Import the main FastAPI app from mcpgateway.main import app @@ -65,6 +69,7 @@ class TestSecurityTxtValidation: def test_validate_security_txt_empty(self): """Test validation with empty content.""" + # First-Party from mcpgateway.routers.well_known import validate_security_txt result = validate_security_txt("") @@ -75,6 +80,7 @@ def test_validate_security_txt_empty(self): def test_validate_security_txt_adds_expires(self): """Test that validation adds Expires field.""" + # First-Party from mcpgateway.routers.well_known import validate_security_txt content = "Contact: security@example.com" @@ -87,6 +93,7 @@ def test_validate_security_txt_adds_expires(self): def test_validate_security_txt_preserves_expires(self): """Test that validation preserves existing Expires field.""" + # First-Party from mcpgateway.routers.well_known import validate_security_txt content = "Contact: security@example.com\nExpires: 2025-12-31T23:59:59Z" @@ -99,6 +106,7 @@ def test_validate_security_txt_preserves_expires(self): def test_validate_security_txt_preserves_comments(self): """Test that validation preserves existing comments.""" + # First-Party from mcpgateway.routers.well_known import validate_security_txt content = "# Custom security information\nContact: security@example.com" @@ -240,6 +248,7 @@ class TestWellKnownAdminEndpoint: @pytest.fixture def auth_client(self): """Create a test client with auth dependency override.""" + # First-Party from mcpgateway.utils.verify_credentials import require_auth app.dependency_overrides[require_auth] = lambda: "test_user" client = TestClient(app) @@ -331,6 +340,7 @@ class TestWellKnownRegistry: def test_registry_contains_standard_files(self): """Test that registry contains expected standard files.""" + # First-Party from mcpgateway.routers.well_known import WELL_KNOWN_REGISTRY expected_files = ["robots.txt", "security.txt", "ai.txt", "dnt-policy.txt", "change-password"] @@ -343,6 +353,7 @@ def test_registry_contains_standard_files(self): def test_registry_content_types(self): """Test that registry has correct content types.""" + # First-Party from mcpgateway.routers.well_known import WELL_KNOWN_REGISTRY # Most should be text/plain diff --git a/tests/unit/mcpgateway/test_wrapper.py b/tests/unit/mcpgateway/test_wrapper.py index 2842c596f..6ace0cb6b 100644 --- a/tests/unit/mcpgateway/test_wrapper.py +++ b/tests/unit/mcpgateway/test_wrapper.py @@ -10,14 +10,18 @@ *mcpgateway.wrapper*. """ +# Standard import asyncio +import contextlib +import errno import json import sys import types -import errno + +# Third-Party import pytest -import contextlib +# First-Party import mcpgateway.wrapper as wrapper diff --git a/tests/unit/mcpgateway/transports/test_streamablehttp_transport.py b/tests/unit/mcpgateway/transports/test_streamablehttp_transport.py index 86e1112fb..d31b6600c 100644 --- a/tests/unit/mcpgateway/transports/test_streamablehttp_transport.py +++ b/tests/unit/mcpgateway/transports/test_streamablehttp_transport.py @@ -357,11 +357,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_list_prompts_with_server_id(monkeypatch): """Test list_prompts returns prompts for a server_id.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import list_prompts, server_id_var, prompt_service # Third-Party from mcp.types import PromptArgument + # First-Party + from mcpgateway.transports.streamablehttp_transport import list_prompts, prompt_service, server_id_var + mock_db = MagicMock() mock_prompt = MagicMock() mock_prompt.name = "prompt1" @@ -391,7 +392,7 @@ async def fake_get_db(): async def test_list_prompts_no_server_id(monkeypatch): """Test list_prompts returns prompts when no server_id is set.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_prompts, server_id_var, prompt_service + from mcpgateway.transports.streamablehttp_transport import list_prompts, prompt_service, server_id_var mock_db = MagicMock() mock_prompt = MagicMock() @@ -420,7 +421,7 @@ async def fake_get_db(): async def test_list_prompts_exception_with_server_id(monkeypatch, caplog): """Test list_prompts returns [] and logs exception when server_id is set.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_prompts, server_id_var, prompt_service + from mcpgateway.transports.streamablehttp_transport import list_prompts, prompt_service, server_id_var mock_db = MagicMock() @@ -443,7 +444,7 @@ async def fake_get_db(): async def test_list_prompts_exception_no_server_id(monkeypatch, caplog): """Test list_prompts returns [] and logs exception when no server_id.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_prompts, server_id_var, prompt_service + from mcpgateway.transports.streamablehttp_transport import list_prompts, prompt_service, server_id_var mock_db = MagicMock() @@ -470,11 +471,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_get_prompt_success(monkeypatch): """Test get_prompt returns prompt result on success.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import get_prompt, prompt_service, types # Third-Party from mcp.types import PromptMessage, TextContent + # First-Party + from mcpgateway.transports.streamablehttp_transport import get_prompt, prompt_service, types + mock_db = MagicMock() # Create proper PromptMessage structure mock_message = PromptMessage(role="user", content=TextContent(type="text", text="test message")) @@ -566,6 +568,7 @@ async def test_get_prompt_outer_exception(monkeypatch, caplog): """Test get_prompt returns [] and logs exception from outer try-catch.""" # Standard from contextlib import asynccontextmanager + # First-Party from mcpgateway.transports.streamablehttp_transport import get_prompt @@ -592,7 +595,7 @@ async def failing_get_db(): async def test_list_resources_with_server_id(monkeypatch): """Test list_resources returns resources for a server_id.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_resources, server_id_var, resource_service + from mcpgateway.transports.streamablehttp_transport import list_resources, resource_service, server_id_var mock_db = MagicMock() mock_resource = MagicMock() @@ -623,7 +626,7 @@ async def fake_get_db(): async def test_list_resources_no_server_id(monkeypatch): """Test list_resources returns resources when no server_id is set.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_resources, server_id_var, resource_service + from mcpgateway.transports.streamablehttp_transport import list_resources, resource_service, server_id_var mock_db = MagicMock() mock_resource = MagicMock() @@ -653,7 +656,7 @@ async def fake_get_db(): async def test_list_resources_exception_with_server_id(monkeypatch, caplog): """Test list_resources returns [] and logs exception when server_id is set.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_resources, server_id_var, resource_service + from mcpgateway.transports.streamablehttp_transport import list_resources, resource_service, server_id_var mock_db = MagicMock() @@ -676,7 +679,7 @@ async def fake_get_db(): async def test_list_resources_exception_no_server_id(monkeypatch, caplog): """Test list_resources returns [] and logs exception when no server_id.""" # First-Party - from mcpgateway.transports.streamablehttp_transport import list_resources, server_id_var, resource_service + from mcpgateway.transports.streamablehttp_transport import list_resources, resource_service, server_id_var mock_db = MagicMock() @@ -703,11 +706,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_read_resource_success(monkeypatch): """Test read_resource returns resource content on success.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service # Third-Party from pydantic import AnyUrl + # First-Party + from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service + mock_db = MagicMock() mock_result = MagicMock() mock_result.text = "resource content here" @@ -728,11 +732,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_read_resource_no_content(monkeypatch, caplog): """Test read_resource returns [] and logs warning if no content.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service # Third-Party from pydantic import AnyUrl + # First-Party + from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service + mock_db = MagicMock() mock_result = MagicMock() mock_result.text = "" @@ -754,11 +759,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_read_resource_no_result(monkeypatch, caplog): """Test read_resource returns [] and logs warning if no result.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service # Third-Party from pydantic import AnyUrl + # First-Party + from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service + mock_db = MagicMock() @asynccontextmanager @@ -778,11 +784,12 @@ async def fake_get_db(): @pytest.mark.asyncio async def test_read_resource_service_exception(monkeypatch, caplog): """Test read_resource returns [] and logs exception from service.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service # Third-Party from pydantic import AnyUrl + # First-Party + from mcpgateway.transports.streamablehttp_transport import read_resource, resource_service + mock_db = MagicMock() @asynccontextmanager @@ -804,11 +811,13 @@ async def test_read_resource_outer_exception(monkeypatch, caplog): """Test read_resource returns [] and logs exception from outer try-catch.""" # Standard from contextlib import asynccontextmanager - # First-Party - from mcpgateway.transports.streamablehttp_transport import read_resource + # Third-Party from pydantic import AnyUrl + # First-Party + from mcpgateway.transports.streamablehttp_transport import read_resource + # Cause an exception during get_db context management @asynccontextmanager async def failing_get_db(): @@ -1079,11 +1088,12 @@ async def handle_request(self, scope, receive, send_func): @pytest.mark.asyncio async def test_session_manager_wrapper_handle_streamable_http_no_server_id(monkeypatch): """Test handle_streamable_http without server_id match in path.""" - # First-Party - from mcpgateway.transports.streamablehttp_transport import server_id_var # Standard from contextlib import asynccontextmanager + # First-Party + from mcpgateway.transports.streamablehttp_transport import server_id_var + async def send(msg): sent.append(msg) diff --git a/tests/unit/mcpgateway/utils/test_passthrough_headers_fixed.py b/tests/unit/mcpgateway/utils/test_passthrough_headers_fixed.py index 359b746bd..9687a0bbb 100644 --- a/tests/unit/mcpgateway/utils/test_passthrough_headers_fixed.py +++ b/tests/unit/mcpgateway/utils/test_passthrough_headers_fixed.py @@ -13,12 +13,14 @@ # Standard import logging from unittest.mock import Mock, patch + +# Third-Party import pytest # First-Party from mcpgateway.db import Gateway as DbGateway from mcpgateway.db import GlobalConfig -from mcpgateway.utils.passthrough_headers import get_passthrough_headers, set_global_passthrough_headers, PassthroughHeadersError +from mcpgateway.utils.passthrough_headers import get_passthrough_headers, PassthroughHeadersError, set_global_passthrough_headers class TestPassthroughHeaders: diff --git a/tests/unit/mcpgateway/utils/test_proxy_auth.py b/tests/unit/mcpgateway/utils/test_proxy_auth.py index bc418f4ea..60db24081 100644 --- a/tests/unit/mcpgateway/utils/test_proxy_auth.py +++ b/tests/unit/mcpgateway/utils/test_proxy_auth.py @@ -9,12 +9,16 @@ Tests the new MCP_CLIENT_AUTH_ENABLED and proxy authentication features. """ +# Standard import asyncio -import pytest -from unittest.mock import Mock, patch, AsyncMock +from unittest.mock import AsyncMock, Mock, patch + +# Third-Party from fastapi import HTTPException, Request from fastapi.security import HTTPAuthorizationCredentials +import pytest +# First-Party from mcpgateway.utils import verify_credentials as vc @@ -132,6 +136,7 @@ async def test_backwards_compatibility(self, mock_settings, mock_request): @pytest.mark.asyncio async def test_mixed_auth_scenario(self, mock_settings, mock_request): """Test scenario with both proxy header and JWT token.""" + # Third-Party import jwt mock_settings.mcp_client_auth_enabled = False @@ -154,9 +159,12 @@ class TestWebSocketAuthentication: @pytest.mark.asyncio async def test_websocket_auth_required(self): """Test that WebSocket requires authentication when enabled.""" - from fastapi import WebSocket + # Standard from unittest.mock import AsyncMock + # Third-Party + from fastapi import WebSocket + # Create mock WebSocket websocket = AsyncMock(spec=WebSocket) websocket.query_params = {} @@ -170,6 +178,7 @@ async def test_websocket_auth_required(self): mock_settings.trust_proxy_auth = False # Import and call the websocket_endpoint function + # First-Party from mcpgateway.main import websocket_endpoint # Should close connection due to missing auth @@ -179,8 +188,11 @@ async def test_websocket_auth_required(self): @pytest.mark.asyncio async def test_websocket_with_token_query_param(self): """Test WebSocket authentication with token in query parameters.""" - from fastapi import WebSocket + # Standard from unittest.mock import AsyncMock + + # Third-Party + from fastapi import WebSocket import jwt # Create mock WebSocket @@ -199,6 +211,7 @@ async def test_websocket_with_token_query_param(self): # Mock verify_jwt_token to succeed with patch('mcpgateway.main.verify_jwt_token', new=AsyncMock(return_value={'sub': 'test-user'})): + # First-Party from mcpgateway.main import websocket_endpoint try: @@ -213,9 +226,12 @@ async def test_websocket_with_token_query_param(self): @pytest.mark.asyncio async def test_websocket_with_proxy_auth(self): """Test WebSocket authentication with proxy headers.""" - from fastapi import WebSocket + # Standard from unittest.mock import AsyncMock + # Third-Party + from fastapi import WebSocket + # Create mock WebSocket websocket = AsyncMock(spec=WebSocket) websocket.query_params = {} @@ -231,6 +247,7 @@ async def test_websocket_with_proxy_auth(self): mock_settings.auth_required = False mock_settings.port = 8000 + # First-Party from mcpgateway.main import websocket_endpoint try: diff --git a/tests/unit/mcpgateway/utils/test_verify_credentials.py b/tests/unit/mcpgateway/utils/test_verify_credentials.py index 10a84e7be..94be00ccf 100644 --- a/tests/unit/mcpgateway/utils/test_verify_credentials.py +++ b/tests/unit/mcpgateway/utils/test_verify_credentials.py @@ -27,6 +27,7 @@ # Standard import base64 from datetime import datetime, timedelta, timezone +from unittest.mock import Mock # Third-Party from fastapi import HTTPException, Request, status @@ -34,7 +35,6 @@ from fastapi.testclient import TestClient import jwt import pytest -from unittest.mock import Mock # First-Party from mcpgateway.utils import verify_credentials as vc # module under test diff --git a/tests/utils/rbac_mocks.py b/tests/utils/rbac_mocks.py index 2be69fd25..80aaaaa36 100644 --- a/tests/utils/rbac_mocks.py +++ b/tests/utils/rbac_mocks.py @@ -192,11 +192,12 @@ def create_rbac_dependency_overrides() -> Dict: Dict: Dictionary mapping dependencies to mock implementations """ # Import here to avoid circular imports + # First-Party + from mcpgateway.auth import get_current_user, get_db from mcpgateway.middleware.rbac import ( get_current_user_with_permissions, get_permission_service, ) - from mcpgateway.auth import get_current_user, get_db return { get_current_user_with_permissions: mock_get_current_user_with_permissions, @@ -326,6 +327,7 @@ async def custom_user_mock(*args, **kwargs): print(f"DEBUG: custom_user_mock called with args={args}, kwargs={kwargs}") return custom_user_context + # First-Party from mcpgateway.middleware.rbac import get_current_user_with_permissions overrides[get_current_user_with_permissions] = custom_user_mock @@ -340,6 +342,7 @@ def patch_rbac_decorators(): Returns: Dict: Original functions for restoration later """ + # First-Party import mcpgateway.middleware.rbac as rbac_module # Store original functions @@ -363,6 +366,7 @@ def restore_rbac_decorators(originals: Dict): Args: originals: Dictionary of original functions returned by patch_rbac_decorators """ + # First-Party import mcpgateway.middleware.rbac as rbac_module rbac_module.require_permission = originals['require_permission'] @@ -379,11 +383,12 @@ def teardown_rbac_mocks_for_app(app): Args: app: FastAPI application instance """ + # First-Party + from mcpgateway.auth import get_current_user, get_db from mcpgateway.middleware.rbac import ( get_current_user_with_permissions, get_permission_service, ) - from mcpgateway.auth import get_current_user, get_db # Remove the specific RBAC-related overrides rbac_dependencies = [ From c524a4734516ed4da8d9b7ac83dbef0b43786c81 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 09:34:48 +0100 Subject: [PATCH 22/49] Fix logs Signed-off-by: Mihai Criveti --- mcpgateway/admin.py | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index 934f28c01..184dd9b64 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -33,7 +33,7 @@ # Third-Party from fastapi import APIRouter, Depends, HTTPException, Request, Response -from fastapi.responses import FileResponse, HTMLResponse, JSONResponse, RedirectResponse, StreamingResponse +from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse, StreamingResponse import httpx import jwt from pydantic import ValidationError @@ -7437,12 +7437,21 @@ async def admin_get_log_file( if not (file_path.suffix in [".log", ".jsonl", ".json"] or file_path.stem.startswith(Path(settings.log_file).stem)): raise HTTPException(403, "Not a log file") - # Return file for download - return FileResponse( - path=file_path, - filename=file_path.name, - media_type="application/octet-stream", - ) + # Return file for download using Response with file content + try: + with open(file_path, "rb") as f: + file_content = f.read() + + return Response( + content=file_content, + media_type="application/octet-stream", + headers={ + "Content-Disposition": f'attachment; filename="{file_path.name}"', + }, + ) + except Exception as e: + LOGGER.error(f"Error reading file for download: {e}") + raise HTTPException(500, f"Error reading file for download: {e}") # List available log files log_files = [] @@ -7465,7 +7474,7 @@ async def admin_get_log_file( if settings.log_rotation_enabled: pattern = f"{Path(settings.log_file).stem}.*" for file in log_dir.glob(pattern): - if file.is_file(): + if file.is_file() and file.name != main_log.name: # Exclude main log file stat = file.stat() log_files.append( { @@ -7505,7 +7514,7 @@ async def admin_get_log_file( @admin_router.get("/logs/export") async def admin_export_logs( - export_format: str = "json", + format: str = "json", entity_type: Optional[str] = None, entity_id: Optional[str] = None, level: Optional[str] = None, @@ -7536,8 +7545,8 @@ async def admin_export_logs( """ # Standard # Validate format - if export_format not in ["json", "csv"]: - raise HTTPException(400, f"Invalid format: {export_format}. Use 'json' or 'csv'") + if format not in ["json", "csv"]: + raise HTTPException(400, f"Invalid format: {format}. Use 'json' or 'csv'") # Get log storage from logging service storage = logging_service.get_storage() @@ -7583,9 +7592,9 @@ async def admin_export_logs( # Generate filename timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"logs_export_{timestamp}.{export_format}" + filename = f"logs_export_{timestamp}.{format}" - if export_format == "json": + if format == "json": # Export as JSON content = json.dumps(logs, indent=2, default=str) return Response( From 06748116084011e0f11cfbc35b5b7bd61e5db1b9 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 10:16:13 +0100 Subject: [PATCH 23/49] Fix logs Signed-off-by: Mihai Criveti --- mcpgateway/admin.py | 12 ++++++------ tests/unit/mcpgateway/test_admin.py | 22 ++++++++++++++-------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index 184dd9b64..c39bf9191 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -32,7 +32,7 @@ import uuid # Third-Party -from fastapi import APIRouter, Depends, HTTPException, Request, Response +from fastapi import APIRouter, Depends, HTTPException, Query, Request, Response from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse, StreamingResponse import httpx import jwt @@ -7514,7 +7514,7 @@ async def admin_get_log_file( @admin_router.get("/logs/export") async def admin_export_logs( - format: str = "json", + export_format: str = Query("json", alias="format"), entity_type: Optional[str] = None, entity_id: Optional[str] = None, level: Optional[str] = None, @@ -7545,8 +7545,8 @@ async def admin_export_logs( """ # Standard # Validate format - if format not in ["json", "csv"]: - raise HTTPException(400, f"Invalid format: {format}. Use 'json' or 'csv'") + if export_format not in ["json", "csv"]: + raise HTTPException(400, f"Invalid format: {export_format}. Use 'json' or 'csv'") # Get log storage from logging service storage = logging_service.get_storage() @@ -7592,9 +7592,9 @@ async def admin_export_logs( # Generate filename timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") - filename = f"logs_export_{timestamp}.{format}" + filename = f"logs_export_{timestamp}.{export_format}" - if format == "json": + if export_format == "json": # Export as JSON content = json.dumps(logs, indent=2, default=str) return Response( diff --git a/tests/unit/mcpgateway/test_admin.py b/tests/unit/mcpgateway/test_admin.py index 02521a5cf..0f425d684 100644 --- a/tests/unit/mcpgateway/test_admin.py +++ b/tests/unit/mcpgateway/test_admin.py @@ -13,11 +13,11 @@ # Standard from datetime import datetime, timezone import json -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, MagicMock, mock_open, patch # Third-Party from fastapi import HTTPException, Request -from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse, StreamingResponse +from fastapi.responses import HTMLResponse, JSONResponse, RedirectResponse, Response, StreamingResponse from pydantic import ValidationError from pydantic_core import InitErrorDetails from pydantic_core import ValidationError as CoreValidationError @@ -2137,7 +2137,7 @@ async def _test_admin_get_logs_stream(self, mock_get_storage, mock_db): assert len(result) == 1 assert result[0]["message"] == "Test log message" - @patch('mcpgateway.config.settings') + @patch('mcpgateway.admin.settings') async def _test_admin_get_logs_file_enabled(self, mock_settings, mock_db): """Test getting log file when file logging is enabled.""" # First-Party @@ -2148,14 +2148,19 @@ async def _test_admin_get_logs_file_enabled(self, mock_settings, mock_db): mock_settings.log_file = "test.log" mock_settings.log_folder = "logs" - # Mock file exists - with patch('pathlib.Path.exists', return_value=True): + # Mock file exists and reading + with patch('pathlib.Path.exists', return_value=True), \ + patch('pathlib.Path.stat') as mock_stat, \ + patch('builtins.open', mock_open(read_data=b"test log content")): + + mock_stat.return_value.st_size = 16 result = await admin_get_log_file(filename=None, user="test-user") - assert isinstance(result, FileResponse) - assert "test.log" in str(result.path) + assert isinstance(result, Response) + assert result.media_type == "application/octet-stream" + assert "test.log" in result.headers["content-disposition"] - @patch('mcpgateway.config.settings') + @patch('mcpgateway.admin.settings') async def test_admin_get_logs_file_disabled(self, mock_settings, mock_db): """Test getting log file when file logging is disabled.""" # First-Party @@ -2163,6 +2168,7 @@ async def test_admin_get_logs_file_disabled(self, mock_settings, mock_db): # Mock settings to disable file logging mock_settings.log_to_file = False + mock_settings.log_file = None with pytest.raises(HTTPException) as excinfo: await admin_get_log_file(filename=None, user="test-user") From 619beb5cd10020f624b92a09cf3d1c24d19b3d40 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 18:16:22 +0100 Subject: [PATCH 24/49] Add multitenancy scripts to check migration Signed-off-by: Mihai Criveti --- CHANGELOG.md | 13 + MIGRATION-0.7.0.md | 599 ++++++++++++++++++ ...a0fb2_consolidated_multiuser_team_rbac_.py | 458 +++++++++++++ scripts/fix_multitenancy_0_7_0_resources.py | 146 +++++ .../verify_multitenancy_0_7_0_migration.py | 253 ++++++++ 5 files changed, 1469 insertions(+) create mode 100644 MIGRATION-0.7.0.md create mode 100755 scripts/fix_multitenancy_0_7_0_resources.py create mode 100755 scripts/verify_multitenancy_0_7_0_migration.py diff --git a/CHANGELOG.md b/CHANGELOG.md index 9e6537cf8..6872cd865 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,19 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) **Impact:** Complete architectural transformation enabling secure team collaboration, enterprise SSO integration, and scalable multi-tenant deployments. +### 🚀 **Migration Guide** + +**⚠️ IMPORTANT**: This is a **major architectural change** requiring database migration. + +**📖 Complete migration instructions**: See **[MIGRATION-0.7.0.md](./MIGRATION-0.7.0.md)** for detailed upgrade guidance from v0.6.0 to v0.7.0. + +**📋 Migration includes**: +- Automated database schema upgrade +- Team assignment for existing servers/resources +- Platform admin user creation +- Configuration export/import tools +- Comprehensive verification and troubleshooting + ### Added #### **🔐 Authentication & Authorization System** diff --git a/MIGRATION-0.7.0.md b/MIGRATION-0.7.0.md new file mode 100644 index 000000000..91e5f5645 --- /dev/null +++ b/MIGRATION-0.7.0.md @@ -0,0 +1,599 @@ +# Migration Guide: Upgrading to Multi-Tenancy (v0.6.0 to v0.7.0) + +This guide walks you through upgrading from MCP Gateway v0.6.0 to v0.7.0 that implements comprehensive multi-tenancy, team management, and RBAC. + +## Overview + +Version 0.7.0 introduces major architectural changes: +- **Multi-tenant architecture** with team-based resource isolation +- **Email-based authentication** alongside existing basic auth +- **Personal teams** automatically created for each user +- **Role-Based Access Control (RBAC)** with granular permissions +- **Team visibility controls** (private/public teams, private/team/public resources) +- **SSO integration** with GitHub, Google, and generic OIDC providers + +## 🛠️ Migration Tools + +This migration includes **2 essential scripts** to help you: + +### `scripts/verify_multitenancy_0_7_0_migration.py` +- **Purpose**: Verify v0.6.0 → v0.7.0 migration completed successfully +- **Checks**: Admin user, personal team, resource assignments, visibility settings +- **When**: Run after migration to confirm everything worked + +### `scripts/fix_multitenancy_0_7_0_resources.py` +- **Purpose**: Fix resources missing team assignments after v0.6.0 → v0.7.0 upgrade +- **Fixes**: Assigns orphaned servers/tools/resources to admin's personal team +- **When**: Use if verification shows unassigned resources + +## Pre-Migration Checklist + +### 1. Backup Your Database & Configuration +**⚠️ CRITICAL: Always backup your database AND configuration before upgrading** + +#### Database Backup +```bash +# For SQLite (default) +cp mcp.db mcp.db.backup.$(date +%Y%m%d_%H%M%S) + +# For PostgreSQL +pg_dump -h localhost -U postgres -d mcp > mcp_backup_$(date +%Y%m%d_%H%M%S).sql + +# For MySQL +mysqldump -u mysql -p mcp > mcp_backup_$(date +%Y%m%d_%H%M%S).sql +``` + +#### Configuration Export (Recommended) +**💡 Export your current configuration via the Admin UI before migration:** + +```bash +# 1. Start your current MCP Gateway +make dev # or however you normally run it + +# 2. Access the admin UI +open http://localhost:4444/admin + +# 3. Navigate to Export/Import section +# 4. Click "Export Configuration" +# 5. Save the JSON file (contains servers, tools, resources, etc.) + +# Or use direct API call (if you have a bearer token): +curl -H "Authorization: Bearer YOUR_TOKEN" \ + "http://localhost:4444/admin/export/configuration" \ + -o mcp_config_backup_$(date +%Y%m%d_%H%M%S).json + +# Or with basic auth: +curl -u admin:changeme \ + "http://localhost:4444/admin/export/configuration" \ + -o mcp_config_backup_$(date +%Y%m%d_%H%M%S).json +``` + +**✅ Benefits**: +- Preserves all your servers, tools, resources, and settings +- Can be imported after migration if needed +- Human-readable JSON format + +### 2. Setup Environment Configuration + +**⚠️ CRITICAL: You must setup your `.env` file before running the migration** + +The migration uses your `.env` configuration to create the platform admin user. + +#### If you don't have a `.env` file: +```bash +# Copy the example file +cp .env.example .env + +# Edit .env to set your admin credentials +nano .env # or your preferred editor +``` + +#### If you already have a `.env` file: +```bash +# Backup your current .env +cp .env .env.backup.$(date +%Y%m%d_%H%M%S) + +# Check if you have the required settings +grep -E "PLATFORM_ADMIN_EMAIL|PLATFORM_ADMIN_PASSWORD|EMAIL_AUTH_ENABLED" .env + +# If missing, add them or merge from .env.example +``` + +### 3. Configure Required Settings + +**⚠️ REQUIRED: Configure these settings in your `.env` file before migration** + +```bash +# Platform Administrator (will be created by migration) +PLATFORM_ADMIN_EMAIL=your-admin@yourcompany.com +PLATFORM_ADMIN_PASSWORD=your-secure-password +PLATFORM_ADMIN_FULL_NAME="Your Name" + +# Enable email authentication (required for multi-tenancy) +EMAIL_AUTH_ENABLED=true + +# Personal team settings (recommended defaults) +AUTO_CREATE_PERSONAL_TEAMS=true +PERSONAL_TEAM_PREFIX=personal +``` + +**💡 Tips**: +- Use a **real email address** for `PLATFORM_ADMIN_EMAIL` (you'll use this to log in) +- Choose a **strong password** (minimum 8 characters) +- Set `EMAIL_AUTH_ENABLED=true` to enable the multitenancy features + +**🔍 Verify your configuration**: +```bash +# Check your settings are loaded correctly +python3 -c " +from mcpgateway.config import settings +print(f'Admin email: {settings.platform_admin_email}') +print(f'Email auth: {settings.email_auth_enabled}') +print(f'Personal teams: {settings.auto_create_personal_teams}') +" +``` + +## Migration Process + +> **🚨 IMPORTANT**: Before starting the migration, you **must** have a properly configured `.env` file with `PLATFORM_ADMIN_EMAIL` and other required settings. The migration will use these settings to create your admin user. See the Pre-Migration Checklist above. + +### Step 1: Update Codebase + +```bash +# Pull the latest changes +git fetch origin main +git checkout main +git pull origin main + +# Update dependencies +make install-dev +``` + +### Step 2: Run Database Migration + +The migration process is automated and handles: +- Creating multi-tenancy database schema +- Creating platform admin user and personal team +- **Migrating existing servers** to the admin's personal team +- Setting up default RBAC roles + +**⚠️ PREREQUISITE**: Ensure `.env` file is configured with `PLATFORM_ADMIN_EMAIL` etc. (see step 3 above) +**✅ Configuration**: Uses your `.env` settings automatically +**✅ Database Compatibility**: Works with **SQLite**, **PostgreSQL**, and **MySQL** + +```bash +# IMPORTANT: Setup .env first (if not already done) +cp .env.example .env # then edit with your admin credentials + +# Run the migration (uses settings from your .env file) +python3 -m mcpgateway.bootstrap_db + +# Or using make +make dev # This runs bootstrap_db automatically + +# Verify migration completed successfully +python3 scripts/verify_multitenancy_0_7_0_migration.py +``` + +### Step 3: Verify Migration Results + +After migration, verify the results using our verification script: + +```bash +# Run comprehensive verification +python3 scripts/verify_multitenancy_0_7_0_migration.py +``` + +This will check: +- ✅ Platform admin user creation +- ✅ Personal team creation and membership +- ✅ Resource team assignments +- ✅ Visibility settings +- ✅ Database integrity + +**Expected Output**: All checks should pass. If any fail, see the troubleshooting section below. + +## Post-Migration Configuration + +### 1. Verify Server Visibility + +Old servers should now be visible in the Virtual Servers list. They will be: +- **Owned by**: Your platform admin user +- **Assigned to**: Admin's personal team +- **Visibility**: Public (visible to all authenticated users) + +### 2. Import Configuration (If Needed) + +If you exported your configuration before migration and need to restore specific settings: + +```bash +# Access the admin UI +open http://localhost:4444/admin + +# Navigate to Export/Import section → Import Configuration +# Upload your backup JSON file from step 1 + +# Or use API: +curl -X POST "http://localhost:4444/admin/import/configuration" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d @mcp_config_backup_YYYYMMDD_HHMMSS.json + +# Or with basic auth: +curl -X POST "http://localhost:4444/admin/import/configuration" \ + -u admin:changeme \ + -H "Content-Type: application/json" \ + -d @mcp_config_backup_YYYYMMDD_HHMMSS.json +``` + +**📋 Import Options**: +- **Merge**: Adds missing resources without overwriting existing ones +- **Replace**: Overwrites existing resources with backup versions +- **Selective**: Choose specific servers/tools/resources to import + +### 2. Configure SSO (Optional) + +If you want to enable SSO authentication: + +```bash +# In .env file - Example for GitHub +SSO_ENABLED=true +SSO_PROVIDERS=["github"] + +# GitHub configuration +GITHUB_CLIENT_ID=your-github-app-id +GITHUB_CLIENT_SECRET=your-github-app-secret + +# Admin assignment (optional) +SSO_AUTO_ADMIN_DOMAINS=["yourcompany.com"] +SSO_GITHUB_ADMIN_ORGS=["your-org"] +``` + +### 3. Create Additional Teams + +After migration, you can create organizational teams: + +```bash +# Via API (with admin token) +curl -X POST http://localhost:4444/admin/teams \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "Engineering Team", + "description": "Development and engineering resources", + "visibility": "private" + }' + +# Or use the Admin UI at http://localhost:4444/admin +``` + +## Understanding the Migration + +### What Happened to My Old Data? + +The consolidated migration automatically handles your existing resources in a single, seamless process: + +1. **Schema Creation**: Creates all multitenancy tables (users, teams, roles, etc.) +2. **Column Addition**: Adds `team_id`, `owner_email`, and `visibility` columns to existing resource tables +3. **Admin User Creation**: Creates platform admin user (from `PLATFORM_ADMIN_EMAIL`) +4. **Personal Team Creation**: Creates personal team for the admin user +5. **Data Population**: **Automatically assigns old resources** to admin's personal team with "public" visibility + +This all happens in the consolidated migration `cfc3d6aa0fb2`, so no additional steps are needed. + +### Team Assignment Logic + +``` +Old Server (pre-migration): +├── team_id: NULL +├── owner_email: NULL +└── visibility: NULL + +Migrated Server (post-migration): +├── team_id: "admin-personal-team-id" +├── owner_email: "your-admin@yourcompany.com" +└── visibility: "public" +``` + +### Why "Public" Visibility? + +Old servers are set to "public" visibility to ensure they remain accessible to all users immediately after migration. You can adjust visibility per resource: + +- **Private**: Only the owner can access +- **Team**: All team members can access +- **Public**: All authenticated users can access + +## Customizing Resource Ownership + +### Reassign Resources to Specific Teams + +After migration, you may want to move resources to appropriate teams: + +```bash +# Example: Move a server to a specific team +curl -X PUT http://localhost:4444/admin/servers/SERVER_ID \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "team_id": "target-team-id", + "visibility": "team" + }' +``` + +### Change Resource Visibility + +```bash +# Make a resource private (owner only) +curl -X PUT http://localhost:4444/admin/servers/SERVER_ID \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"visibility": "private"}' + +# Make it visible to team members +curl -X PUT http://localhost:4444/admin/servers/SERVER_ID \ + -H "Authorization: Bearer YOUR_JWT_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"visibility": "team"}' +``` + +## Troubleshooting + +### Issue: Servers Not Visible After Migration + +**Problem**: Old servers don't appear in the Virtual Servers list. + +**Solution**: This should not happen with the current migration. If it does, check: + +```bash +# Check if servers have team assignments +python3 -c " +from mcpgateway.db import SessionLocal, Server +with SessionLocal() as db: + total_servers = db.query(Server).count() + servers_without_team = db.query(Server).filter(Server.team_id == None).count() + print(f'Total servers: {total_servers}') + print(f'Servers without team: {servers_without_team}') + if servers_without_team > 0: + print('ISSUE: Some servers lack team assignment') + print('Re-run the migration: python3 -m mcpgateway.bootstrap_db') + else: + print('✓ All servers have team assignments') +" +``` + +**Root Cause**: The consolidated migration should handle this automatically. If you still see issues: + +1. **First, try the fix script** (recommended): + ```bash + python3 scripts/fix_multitenancy_0_7_0_resources.py + ``` + +2. **If that doesn't work**, ensure `PLATFORM_ADMIN_EMAIL` is set and re-run migration: + ```bash + export PLATFORM_ADMIN_EMAIL="your-admin@company.com" + python3 -m mcpgateway.bootstrap_db + ``` + +### Issue: Migration Uses Wrong Admin Email + +**Problem**: Migration created admin user with default email (`admin@example.com`) instead of your configured email. + +**Root Cause**: `.env` file not properly configured before migration. + +**Solution**: +1. **Check your `.env` configuration**: + ```bash + # Verify your settings are loaded + python3 -c " + from mcpgateway.config import settings + print(f'Admin email: {settings.platform_admin_email}') + print(f'Email auth enabled: {settings.email_auth_enabled}') + " + ``` + +2. **If settings are wrong, update `.env` and re-run**: + ```bash + # Edit your .env file + nano .env # Set PLATFORM_ADMIN_EMAIL=your-email@company.com + + # Re-run migration + python3 -m mcpgateway.bootstrap_db + ``` + +### Issue: Admin User Not Created + +**Problem**: Platform admin user was not created during migration. + +**Solution**: Check configuration and re-run: + +```bash +# First, verify .env configuration +python3 -c " +from mcpgateway.config import settings +print(f'Admin email: {settings.platform_admin_email}') +print(f'Email auth: {settings.email_auth_enabled}') +" + +# If EMAIL_AUTH_ENABLED=false, the admin won't be created +# Set EMAIL_AUTH_ENABLED=true in .env and re-run: +python3 -m mcpgateway.bootstrap_db + +# Or manually create using bootstrap function: +python3 -c " +import asyncio +from mcpgateway.bootstrap_db import bootstrap_admin_user +asyncio.run(bootstrap_admin_user()) +" +``` + +### Issue: Personal Team Not Created + +**Problem**: Admin user exists but has no personal team. + +**Solution**: Create personal team manually: + +```bash +python3 -c " +import asyncio +from mcpgateway.db import SessionLocal, EmailUser +from mcpgateway.services.personal_team_service import PersonalTeamService + +async def create_admin_team(): + with SessionLocal() as db: + # Replace with your admin email + admin_email = 'admin@example.com' + admin = db.query(EmailUser).filter(EmailUser.email == admin_email).first() + if admin: + service = PersonalTeamService(db) + team = await service.create_personal_team(admin) + print(f'Created personal team: {team.name} (id: {team.id})') + +asyncio.run(create_admin_team()) +" +``` + +### Issue: Migration Fails During Execution + +**Problem**: Migration encounters errors during execution. + +**Solution**: Check the logs and fix common issues: + +```bash +# Check database connectivity +python3 -c " +from mcpgateway.db import engine +try: + with engine.connect() as conn: + result = conn.execute('SELECT 1') + print('Database connection: OK') +except Exception as e: + print(f'Database error: {e}') +" + +# Check required environment variables +python3 -c " +from mcpgateway.config import settings +print(f'Database URL: {settings.database_url}') +print(f'Admin email: {settings.platform_admin_email}') +print(f'Email auth enabled: {settings.email_auth_enabled}') +" + +# Run migration with verbose output +export LOG_LEVEL=DEBUG +python3 -m mcpgateway.bootstrap_db +``` + +## Rollback Procedure + +If you need to rollback the migration: + +### 1. Restore Database Backup + +```bash +# For SQLite +cp mcp.db.backup.YYYYMMDD_HHMMSS mcp.db + +# For PostgreSQL +dropdb mcp +createdb mcp +psql -d mcp < mcp_backup_YYYYMMDD_HHMMSS.sql + +# For MySQL +mysql -u mysql -p -e "DROP DATABASE mcp; CREATE DATABASE mcp;" +mysql -u mysql -p mcp < mcp_backup_YYYYMMDD_HHMMSS.sql +``` + +### 2. Revert Environment Configuration + +```bash +# Restore previous environment +cp .env.backup.YYYYMMDD_HHMMSS .env + +# Disable email auth if you want to go back to basic auth only +EMAIL_AUTH_ENABLED=false +``` + +### 3. Use Previous Codebase Version + +```bash +# Check out the previous version +git checkout v0.6.0 # or your previous version tag + +# Reinstall dependencies +make install-dev +``` + +## Verification Checklist + +After completing the migration, verify using the automated verification script: + +```bash +# Run comprehensive verification +python3 scripts/verify_multitenancy_0_7_0_migration.py +``` + +Manual checks (if needed): +- [ ] Database migration completed without errors +- [ ] Platform admin user created successfully +- [ ] Personal team created for admin user +- [ ] Old servers are visible in Virtual Servers list +- [ ] Admin UI accessible at `/admin` endpoint +- [ ] Authentication works (email + password) +- [ ] Basic auth still works (if `AUTH_REQUIRED=true`) +- [ ] API endpoints respond correctly +- [ ] Resource creation works and assigns to teams + +**If verification fails**: Use the fix script: +```bash +python3 scripts/fix_multitenancy_0_7_0_resources.py +``` + +## Getting Help + +If you encounter issues during migration: + +1. **Check the logs**: Set `LOG_LEVEL=DEBUG` for verbose output +2. **Review troubleshooting section** above for common issues +3. **File an issue**: https://github.com/anthropics/claude-code/issues +4. **Include information**: Database type, error messages, relevant logs + +## Next Steps + +After successful migration: + +1. **Review team structure**: Plan how to organize your teams +2. **Configure SSO**: Set up integration with your identity provider +3. **Set up RBAC**: Configure roles and permissions as needed +4. **Train users**: Introduce team-based workflows +5. **Monitor usage**: Use the new audit logs and metrics + +The multi-tenant architecture provides much more flexibility and security for managing resources across teams and users. Take time to explore the new admin UI and team management features. + +## Quick Reference + +### Essential Commands +```bash +# 1. BACKUP (before migration) +cp mcp.db mcp.db.backup.$(date +%Y%m%d_%H%M%S) +curl -u admin:changeme "http://localhost:4444/admin/export/configuration" -o config_backup.json + +# 2. SETUP .ENV (required) +cp .env.example .env # then edit with your admin credentials + +# 3. VERIFY CONFIG +python3 -c "from mcpgateway.config import settings; print(f'Admin: {settings.platform_admin_email}')" + +# 4. MIGRATE +python3 -m mcpgateway.bootstrap_db + +# 5. VERIFY SUCCESS +python3 scripts/verify_multitenancy_0_7_0_migration.py + +# 6. FIX IF NEEDED +python3 scripts/fix_multitenancy_0_7_0_resources.py +``` + +### Important URLs +- **Admin UI**: http://localhost:4444/admin +- **Export Config**: http://localhost:4444/admin/export/configuration +- **Import Config**: http://localhost:4444/admin/import/configuration \ No newline at end of file diff --git a/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py b/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py index 7c80aafdb..924d19530 100644 --- a/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py +++ b/mcpgateway/alembic/versions/cfc3d6aa0fb2_consolidated_multiuser_team_rbac_.py @@ -16,6 +16,7 @@ # Third-Party from alembic import op import sqlalchemy as sa +from sqlalchemy import text # revision identifiers, used by Alembic. revision: str = "cfc3d6aa0fb2" @@ -435,6 +436,463 @@ def add_team_columns_if_not_exists(table_name: str): # Ensure index on email for quick lookup (safe on both SQLite/PostgreSQL) safe_create_index(op.f("ix_pending_user_approvals_email"), "pending_user_approvals", ["email"]) + # =============================== + # STEP 9: Populate Team Data for Existing Resources + # =============================== + + # This step ensures old resources (created before multitenancy) get assigned + # to the platform admin's personal team, making them visible in the UI + + # =============================== + # VALIDATION & CONFIGURATION + # =============================== + + print("🔧 Starting team data population for existing resources...") + + # Get platform admin configuration from settings (consistent with bootstrap_db.py) + try: + # First-Party + from mcpgateway.config import settings + + platform_admin_email = settings.platform_admin_email + platform_admin_password = settings.platform_admin_password + platform_admin_full_name = settings.platform_admin_full_name + print(f"📧 Using platform admin email from settings: {platform_admin_email}") + except Exception as e: + print(f"⚠️ Warning: Could not load settings: {e}") + print("🔄 Falling back to environment variables...") + + # Fallback to direct environment reading + # Standard + import os + + platform_admin_email = os.getenv("PLATFORM_ADMIN_EMAIL", "admin@example.com") + platform_admin_password = os.getenv("PLATFORM_ADMIN_PASSWORD", "changeme") + platform_admin_full_name = os.getenv("PLATFORM_ADMIN_FULL_NAME", "Platform Administrator") + print(f"📧 Using platform admin email from environment: {platform_admin_email}") + + # Validate admin email format + # Standard + import re + + email_pattern = r"^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$" + if not re.match(email_pattern, platform_admin_email): + print(f"❌ ERROR: Invalid admin email format: {platform_admin_email}") + print("⚠️ Skipping team data population - please fix PLATFORM_ADMIN_EMAIL") + return + + # Validate password strength + if len(platform_admin_password) < 8: + print(f"⚠️ Warning: Admin password is short ({len(platform_admin_password)} chars). Consider using a stronger password.") + + # Get current timestamp for database operations + # Standard + from datetime import datetime, timezone + + current_timestamp = datetime.now(timezone.utc) + print(f"⏰ Migration timestamp: {current_timestamp.isoformat()}") + + # Database connection validation + try: + # Test database connection + db_type = str(bind.engine.url).split(":")[0].lower() + print(f"🗄️ Database type detected: {db_type}") + + # Test basic query + test_result = bind.execute(text("SELECT 1")).scalar() + if test_result != 1: + raise Exception("Database test query failed") + print("✅ Database connection verified") + except Exception as e: + print(f"❌ ERROR: Database connection test failed: {e}") + print("⚠️ Aborting team data population") + return + + # =============================== + # ADMIN USER CREATION + # =============================== + + print("👤 Checking platform admin user...") + + if "email_users" not in existing_tables: + print("⚠️ Warning: email_users table not found - multitenancy tables may not be created yet") + print("🔄 This is normal for fresh installations") + else: + try: + # Check if admin user exists + result = bind.execute( + text("SELECT email, is_admin, is_active FROM email_users WHERE email = :email"), + {"email": platform_admin_email}, + ).fetchone() + + if result: + email, is_admin, is_active = result + print(f"✅ Admin user found: {email}") + print(f" - Is admin: {is_admin}") + print(f" - Is active: {is_active}") + + if not is_admin: + print("⚠️ Warning: User exists but is not admin - updating admin status") + bind.execute(text("UPDATE email_users SET is_admin = :is_admin WHERE email = :email"), {"is_admin": True, "email": platform_admin_email}) + print("✅ Admin status updated") + else: + print(f"👤 Creating platform admin user: {platform_admin_email}") + + # Hash password using the same method as the application + password_hash_type = "argon2id" + try: + # First-Party + from mcpgateway.services.argon2_service import Argon2PasswordService + + password_service = Argon2PasswordService() + password_hash = password_service.hash_password(platform_admin_password) + print("🔐 Using Argon2 password hashing") + except ImportError as e: + # Fallback to a basic hash if the service is not available + # Standard + import hashlib + + password_hash = hashlib.sha256(platform_admin_password.encode()).hexdigest() + password_hash_type = "sha256" + print(f"⚠️ Warning: Argon2 not available ({e}), using SHA256 fallback") + + # Validate password hash was created + if not password_hash or len(password_hash) < 20: + print("❌ ERROR: Password hashing failed - aborting admin user creation") + print("⚠️ Please check password service configuration") + return + + bind.execute( + text( + """ + INSERT INTO email_users ( + email, password_hash, full_name, is_admin, is_active, + auth_provider, password_hash_type, failed_login_attempts, + created_at, updated_at, email_verified_at + ) VALUES ( + :email, :password_hash, :full_name, :is_admin, :is_active, + :auth_provider, :password_hash_type, :failed_login_attempts, + :created_at, :updated_at, :email_verified_at + ) + """ + ), + { + "email": platform_admin_email, + "password_hash": password_hash, + "full_name": platform_admin_full_name, + "is_admin": True, + "is_active": True, + "auth_provider": "local", + "password_hash_type": password_hash_type, + "failed_login_attempts": 0, + "created_at": current_timestamp, + "updated_at": current_timestamp, + "email_verified_at": current_timestamp, + }, + ) + + # Verify user was created + verify_result = bind.execute(text("SELECT email FROM email_users WHERE email = :email"), {"email": platform_admin_email}).fetchone() + + if verify_result: + print("✅ Admin user created successfully") + else: + print("❌ ERROR: Admin user creation failed - user not found after INSERT") + return + + except Exception as e: + print(f"❌ ERROR: Admin user creation failed: {e}") + print("⚠️ Continuing with migration, but admin user may not be available") + # Standard + import traceback + + traceback.print_exc() + + # =============================== + # ADMIN PERSONAL TEAM CREATION + # =============================== + + print("🏢 Checking admin personal team...") + admin_team_id = None + + if "email_teams" not in existing_tables: + print("⚠️ Warning: email_teams table not found - multitenancy tables may not be created yet") + else: + try: + # Check if admin has a personal team + result = bind.execute( + text( + """ + SELECT id, name, slug, visibility, is_active FROM email_teams + WHERE created_by = :email AND is_personal = true AND is_active = true + """ + ), + {"email": platform_admin_email}, + ).fetchone() + + if result: + admin_team_id, team_name, team_slug, visibility, is_active = result + print("✅ Found existing admin personal team:") + print(f" - ID: {admin_team_id}") + print(f" - Name: {team_name}") + print(f" - Slug: {team_slug}") + print(f" - Visibility: {visibility}") + print(f" - Active: {is_active}") + else: + print("👥 Creating personal team for admin user...") + + # Generate a unique team ID and slug + # Standard + import uuid + + admin_team_id = str(uuid.uuid4()) + + # Create safe slug from email + safe_email = platform_admin_email.replace("@", "-").replace(".", "-").lower() + # Remove any potentially problematic characters + safe_email = re.sub(r"[^a-z0-9-]", "-", safe_email) + team_slug = f"personal-{safe_email}" + + # Ensure slug is not too long (database constraint) + if len(team_slug) > 255: + team_slug = team_slug[:255] + print(f"⚠️ Team slug truncated to fit database constraint: {len(team_slug)} chars") + + team_name = f"{platform_admin_full_name}'s Team" + if len(team_name) > 255: + team_name = team_name[:252] + "..." + print("⚠️ Team name truncated to fit database constraint") + + print(f" - Team ID: {admin_team_id}") + print(f" - Team name: {team_name}") + print(f" - Team slug: {team_slug}") + + # Check for slug conflicts (though unlikely) + conflict_check = bind.execute(text("SELECT id FROM email_teams WHERE slug = :slug"), {"slug": team_slug}).fetchone() + + if conflict_check: + # Add timestamp suffix to make unique + # Standard + import time + + team_slug = f"{team_slug}-{int(time.time())}" + print(f"⚠️ Slug conflict detected, using: {team_slug}") + + bind.execute( + text( + """ + INSERT INTO email_teams ( + id, name, slug, description, created_by, is_personal, + visibility, is_active, created_at, updated_at + ) VALUES ( + :id, :name, :slug, :description, :created_by, :is_personal, + :visibility, :is_active, :created_at, :updated_at + ) + """ + ), + { + "id": admin_team_id, + "name": team_name, + "slug": team_slug, + "description": "Personal team for platform administrator", + "created_by": platform_admin_email, + "is_personal": True, + "visibility": "private", + "is_active": True, + "created_at": current_timestamp, + "updated_at": current_timestamp, + }, + ) + + # Verify team was created + verify_team = bind.execute(text("SELECT id, name FROM email_teams WHERE id = :team_id"), {"team_id": admin_team_id}).fetchone() + + if not verify_team: + print("❌ ERROR: Team creation failed - team not found after INSERT") + return + + print("✅ Admin personal team created successfully") + + # Add admin as owner of the personal team + if "email_team_members" in existing_tables: + print("👥 Adding admin as team owner...") + member_id = str(uuid.uuid4()) + + bind.execute( + text( + """ + INSERT INTO email_team_members ( + id, team_id, user_email, role, joined_at, is_active + ) VALUES ( + :id, :team_id, :user_email, :role, :joined_at, :is_active + ) + """ + ), + {"id": member_id, "team_id": admin_team_id, "user_email": platform_admin_email, "role": "owner", "joined_at": current_timestamp, "is_active": True}, + ) + + # Verify membership was created + verify_member = bind.execute( + text("SELECT role FROM email_team_members WHERE team_id = :team_id AND user_email = :email"), {"team_id": admin_team_id, "email": platform_admin_email} + ).fetchone() + + if verify_member: + print(f"✅ Admin added as team {verify_member[0]}") + else: + print("❌ ERROR: Team membership creation failed") + # Continue anyway, team exists + else: + print("⚠️ email_team_members table not found - membership not created") + + except Exception as e: + print(f"❌ ERROR: Personal team creation failed: {e}") + print("⚠️ Continuing with migration, but team assignments may not work") + # Standard + import traceback + + traceback.print_exc() + + # =============================== + # RESOURCE TEAM ASSIGNMENT + # =============================== + + if not admin_team_id: + print("❌ ERROR: No admin team available - cannot assign resources") + print("⚠️ Old resources will remain unassigned and may not be visible") + print("💡 Run the fix script after migration to resolve this") + return + + print("📦 Starting resource team assignment...") + print(f"🎯 Target team: {admin_team_id}") + + # Track migration statistics + migration_stats = {"tables_processed": 0, "resources_found": 0, "resources_migrated": 0, "errors": 0} + + # Validate resource tables exist and have required columns + valid_tables = [] + for table_name in resource_tables: + if table_name in existing_tables: + # Validate table name to prevent SQL injection (whitelist approach) + if table_name not in ["prompts", "resources", "servers", "tools", "gateways", "a2a_agents"]: + print(f"⚠️ Skipping unknown table: {table_name}") + continue + + # Check if table has the multitenancy columns + try: + columns = [col["name"] for col in inspector.get_columns(table_name)] + if "team_id" in columns and "owner_email" in columns and "visibility" in columns: + valid_tables.append(table_name) + print(f"✅ {table_name}: multitenancy columns present") + else: + missing_cols = [] + for col in ["team_id", "owner_email", "visibility"]: + if col not in columns: + missing_cols.append(col) + print(f"⚠️ {table_name}: missing columns {missing_cols} - skipping") + except Exception as e: + print(f"❌ {table_name}: column inspection failed - {e}") + migration_stats["errors"] += 1 + else: + print(f"⚠️ {table_name}: table not found - skipping") + + if not valid_tables: + print("⚠️ No valid resource tables found for migration") + return + + print(f"📋 Processing {len(valid_tables)} resource tables: {', '.join(valid_tables)}") + + # Process each resource table + for table_name in valid_tables: + try: + print(f"\\n🔄 Processing {table_name}...") + migration_stats["tables_processed"] += 1 + + # Count total resources in table + total_count = bind.execute(text(f"SELECT COUNT(*) FROM {table_name}")).scalar() + print(f" 📊 Total {table_name}: {total_count}") + + # Find resources needing migration + select_sql = f"SELECT id, name FROM {table_name} WHERE team_id IS NULL OR owner_email IS NULL OR visibility IS NULL" + old_resources = bind.execute(text(select_sql)).fetchall() + + if not old_resources: + print(f" ✅ {table_name}: all resources already have team assignments") + continue + + migration_stats["resources_found"] += len(old_resources) + print(f" 🔧 Found {len(old_resources)} {table_name} needing migration") + + # Show sample of resources being migrated (first 3) + for i, (resource_id, resource_name) in enumerate(old_resources[:3]): + name_display = resource_name[:50] + "..." if len(resource_name) > 50 else resource_name + print(f" • {name_display} (ID: {resource_id})") + + if len(old_resources) > 3: + print(f" • ... and {len(old_resources) - 3} more") + + # Perform the migration + update_sql = f""" + UPDATE {table_name} + SET team_id = :team_id, + owner_email = :owner_email, + visibility = :visibility + WHERE team_id IS NULL OR owner_email IS NULL OR visibility IS NULL + """ + + result = bind.execute(text(update_sql), {"team_id": admin_team_id, "owner_email": platform_admin_email, "visibility": "public"}) # Make visible to all users initially + + rows_updated = result.rowcount + migration_stats["resources_migrated"] += rows_updated + + if rows_updated == len(old_resources): + print(f" ✅ Successfully migrated {rows_updated} {table_name}") + else: + print(f" ⚠️ Expected {len(old_resources)}, updated {rows_updated} {table_name}") + + # Verify migration + remaining = bind.execute(text(select_sql)).fetchall() + if remaining: + print(f" ⚠️ {len(remaining)} {table_name} still need migration") + else: + print(f" ✅ All {table_name} successfully migrated") + + except Exception as e: + print(f" ❌ ERROR migrating {table_name}: {e}") + migration_stats["errors"] += 1 + # Standard + import traceback + + traceback.print_exc() + continue + + # =============================== + # MIGRATION SUMMARY + # =============================== + + print("\\n" + "=" * 60) + print("📊 TEAM DATA POPULATION SUMMARY") + print("=" * 60) + print(f"✅ Tables processed: {migration_stats['tables_processed']}") + print(f"🔍 Resources found: {migration_stats['resources_found']}") + print(f"📦 Resources migrated: {migration_stats['resources_migrated']}") + print(f"❌ Errors encountered: {migration_stats['errors']}") + + if migration_stats["errors"] == 0: + print("🎉 Team data population completed successfully!") + else: + print("⚠️ Team data population completed with errors") + + print(f"👤 All migrated resources assigned to: {platform_admin_email}") + print(f"🏢 Target team: {admin_team_id}") + print("👁️ Default visibility: public") + print("=" * 60) + + if migration_stats["resources_migrated"] > 0: + print("💡 Next steps:") + print(" 1. Run verification: python3 scripts/verify_multitenancy_0_7_0_migration.py") + print(" 2. Check admin UI: /admin to see your resources") + print(" 3. Adjust visibility settings as needed") + # Note: Foreign key constraints are intentionally omitted for SQLite compatibility # The ORM models handle the relationships properly diff --git a/scripts/fix_multitenancy_0_7_0_resources.py b/scripts/fix_multitenancy_0_7_0_resources.py new file mode 100755 index 000000000..f865d311c --- /dev/null +++ b/scripts/fix_multitenancy_0_7_0_resources.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""MCP Gateway v0.7.0 Multitenancy Resource Fix + +This script finds and fixes resources that lack proper team assignments +after the v0.6.0 → v0.7.0 multitenancy migration. This can happen if: +- Resources were created after the initial migration +- Migration was incomplete for some resources +- Database had edge cases not handled by the main migration + +Fixes: servers, tools, resources, prompts, gateways, a2a_agents + +Usage: + python3 scripts/fix_multitenancy_0_7_0_resources.py +""" + +import sys +import os +from pathlib import Path + +# Add project root to Python path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +try: + from mcpgateway.db import SessionLocal, EmailUser, EmailTeam, Server, Tool, Resource + from mcpgateway.config import settings + from sqlalchemy import text +except ImportError as e: + print(f"❌ Import error: {e}") + print("Make sure you're running this from the project root directory") + sys.exit(1) + + +def fix_unassigned_resources(): + """Fix resources that lack proper team assignments.""" + + print("🔧 MCP Gateway - Fix Unassigned Resources") + print("=" * 50) + + try: + with SessionLocal() as db: + + # 1. Find admin user and personal team + print("🔍 Finding admin user and personal team...") + admin_email = settings.platform_admin_email + admin_user = db.query(EmailUser).filter( + EmailUser.email == admin_email, + EmailUser.is_admin == True + ).first() + + if not admin_user: + print(f"❌ Admin user not found: {admin_email}") + print("Make sure the migration has run and admin user exists") + return False + + personal_team = db.query(EmailTeam).filter( + EmailTeam.created_by == admin_user.email, + EmailTeam.is_personal == True, + EmailTeam.is_active == True + ).first() + + if not personal_team: + print(f"❌ Personal team not found for admin: {admin_user.email}") + return False + + print(f"✅ Found admin: {admin_user.email}") + print(f"✅ Found personal team: {personal_team.name} ({personal_team.id})") + + # 2. Fix each resource type + resource_types = [ + ("servers", Server), + ("tools", Tool), + ("resources", Resource) + ] + + total_fixed = 0 + + for table_name, resource_model in resource_types: + print(f"\n📋 Processing {table_name}...") + + # Find unassigned resources + unassigned = db.query(resource_model).filter( + (resource_model.team_id == None) | + (resource_model.owner_email == None) | + (resource_model.visibility == None) + ).all() + + if not unassigned: + print(f" ✅ No unassigned {table_name} found") + continue + + print(f" 🔧 Fixing {len(unassigned)} unassigned {table_name}...") + + for resource in unassigned: + resource_name = getattr(resource, 'name', 'Unknown') + print(f" - Assigning: {resource_name}") + + # Assign to admin's personal team + resource.team_id = personal_team.id + resource.owner_email = admin_user.email + + # Set visibility to public if not already set + if not hasattr(resource, 'visibility') or resource.visibility is None: + resource.visibility = "public" + + total_fixed += 1 + + # Commit changes for this resource type + db.commit() + print(f" ✅ Fixed {len(unassigned)} {table_name}") + + print(f"\n🎉 Successfully fixed {total_fixed} resources!") + print(f" All resources now assigned to: {personal_team.name}") + print(f" Owner email: {admin_user.email}") + print(f" Default visibility: public") + + return True + + except Exception as e: + print(f"\n❌ Fix operation failed: {e}") + import traceback + traceback.print_exc() + return False + + +def main(): + """Main function with user confirmation.""" + + print("This script will assign unassigned resources to the platform admin's personal team.") + print("This is safe and will make resources visible in the team-based UI.\n") + + response = input("Continue? (y/N): ").lower().strip() + if response not in ('y', 'yes'): + print("Operation cancelled.") + return + + if fix_unassigned_resources(): + print("\n✅ Fix completed successfully!") + print("🔍 Run verification script to confirm: python3 scripts/verify_multitenancy_0_7_0_migration.py") + else: + print("\n❌ Fix operation failed. Check the errors above.") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/scripts/verify_multitenancy_0_7_0_migration.py b/scripts/verify_multitenancy_0_7_0_migration.py new file mode 100755 index 000000000..fee9ca8b8 --- /dev/null +++ b/scripts/verify_multitenancy_0_7_0_migration.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +"""MCP Gateway v0.7.0 Multitenancy Migration Verification + +This script verifies that the v0.6.0 → v0.7.0 multitenancy migration +completed successfully and that old servers/resources are visible in +the new team-based system. + +Checks: +- Platform admin user creation +- Personal team setup +- Resource team assignments (servers, tools, resources) +- Visibility settings +- Team membership + +Usage: + python3 scripts/verify_multitenancy_0_7_0_migration.py +""" + +import sys +import os +from pathlib import Path + +# Add project root to Python path +project_root = Path(__file__).parent.parent +sys.path.insert(0, str(project_root)) + +try: + from mcpgateway.db import ( + SessionLocal, EmailUser, EmailTeam, EmailTeamMember, + Server, Tool, Resource, Role, UserRole + ) + from mcpgateway.config import settings + from sqlalchemy import text +except ImportError as e: + print(f"❌ Import error: {e}") + print("Make sure you're running this from the project root directory") + sys.exit(1) + + +def verify_migration(): + """Verify the multitenancy migration was successful.""" + + print("🔍 MCP Gateway v0.7.0 Multitenancy Migration Verification") + print("📅 Migration: v0.6.0 → v0.7.0") + print("=" * 65) + + success = True + + try: + with SessionLocal() as db: + + # 1. Check admin user exists + print("\n📋 1. ADMIN USER CHECK") + admin_email = settings.platform_admin_email + admin_user = db.query(EmailUser).filter( + EmailUser.email == admin_email, + EmailUser.is_admin == True + ).first() + + if admin_user: + print(f" ✅ Admin user found: {admin_user.email}") + print(f" Full name: {admin_user.full_name}") + print(f" Is admin: {admin_user.is_admin}") + print(f" Is active: {admin_user.is_active}") + else: + print(f" ❌ Admin user not found: {admin_email}") + success = False + + # 2. Check personal team exists + print("\n🏢 2. PERSONAL TEAM CHECK") + if admin_user: + personal_team = db.query(EmailTeam).filter( + EmailTeam.created_by == admin_user.email, + EmailTeam.is_personal == True, + EmailTeam.is_active == True + ).first() + + if personal_team: + print(f" ✅ Personal team found: {personal_team.name}") + print(f" Team ID: {personal_team.id}") + print(f" Slug: {personal_team.slug}") + print(f" Visibility: {personal_team.visibility}") + else: + print(f" ❌ Personal team not found for admin: {admin_user.email}") + success = False + else: + personal_team = None + print(" ⚠️ Cannot check personal team (admin user missing)") + + # 3. Check resource assignments + print("\n📦 3. RESOURCE ASSIGNMENT CHECK") + resource_types = [ + ("Servers", Server), + ("Tools", Tool), + ("Resources", Resource) + ] + + for resource_name, resource_model in resource_types: + total_count = db.query(resource_model).count() + assigned_count = db.query(resource_model).filter( + resource_model.team_id != None, + resource_model.owner_email != None, + resource_model.visibility != None + ).count() + unassigned_count = total_count - assigned_count + + print(f" {resource_name}:") + print(f" Total: {total_count}") + print(f" Assigned to teams: {assigned_count}") + print(f" Unassigned: {unassigned_count}") + + if unassigned_count > 0: + print(f" ❌ {unassigned_count} {resource_name.lower()} lack team assignment!") + success = False + + # Show details of unassigned resources + unassigned = db.query(resource_model).filter( + (resource_model.team_id == None) | + (resource_model.owner_email == None) | + (resource_model.visibility == None) + ).limit(3).all() + + for resource in unassigned: + name = getattr(resource, 'name', 'Unknown') + print(f" - {name} (ID: {resource.id})") + print(f" team_id: {getattr(resource, 'team_id', 'N/A')}") + print(f" owner_email: {getattr(resource, 'owner_email', 'N/A')}") + print(f" visibility: {getattr(resource, 'visibility', 'N/A')}") + else: + print(f" ✅ All {resource_name.lower()} properly assigned") + + # 4. Check visibility distribution + if personal_team: + print("\n👁️ 4. VISIBILITY DISTRIBUTION") + + for resource_name, resource_model in resource_types: + if hasattr(resource_model, 'visibility'): + visibility_counts = {} + resources = db.query(resource_model).all() + + for resource in resources: + vis = getattr(resource, 'visibility', 'unknown') + visibility_counts[vis] = visibility_counts.get(vis, 0) + 1 + + print(f" {resource_name}:") + for visibility, count in visibility_counts.items(): + print(f" {visibility}: {count}") + + # 5. Database schema validation + print("\n🗄️ 5. DATABASE SCHEMA VALIDATION") + + # Check if we can access multitenancy models (proves schema exists) + schema_checks = [] + try: + user_count = db.query(EmailUser).count() + team_count = db.query(EmailTeam).count() + member_count = db.query(EmailTeamMember).count() + print(f" ✅ EmailUser model: {user_count} records") + print(f" ✅ EmailTeam model: {team_count} records") + print(f" ✅ EmailTeamMember model: {member_count} records") + schema_checks.append("core_auth") + except Exception as e: + print(f" ❌ Core auth models inaccessible: {e}") + success = False + + try: + role_count = db.query(Role).count() + user_role_count = db.query(UserRole).count() + print(f" ✅ Role model: {role_count} records") + print(f" ✅ UserRole model: {user_role_count} records") + schema_checks.append("rbac") + except Exception as e: + print(f" ❌ RBAC models inaccessible: {e}") + success = False + + # Verify resource models have team attributes + resource_models = [ + ("Server", Server), + ("Tool", Tool), + ("Resource", Resource) + ] + + for model_name, model_class in resource_models: + try: + # Check if model has team attributes + sample = db.query(model_class).first() + if sample: + has_team_id = hasattr(sample, 'team_id') + has_owner_email = hasattr(sample, 'owner_email') + has_visibility = hasattr(sample, 'visibility') + + if has_team_id and has_owner_email and has_visibility: + print(f" ✅ {model_name}: has multitenancy attributes") + else: + missing_attrs = [] + if not has_team_id: missing_attrs.append('team_id') + if not has_owner_email: missing_attrs.append('owner_email') + if not has_visibility: missing_attrs.append('visibility') + print(f" ❌ {model_name}: missing {missing_attrs}") + success = False + else: + print(f" ⚠️ {model_name}: no records to check") + except Exception as e: + print(f" ❌ {model_name}: model access failed - {e}") + success = False + + if "core_auth" in schema_checks and "rbac" in schema_checks: + print(" ✅ Multitenancy schema fully operational") + + # 6. Team membership check + print("\n👥 6. TEAM MEMBERSHIP CHECK") + if admin_user and personal_team: + membership = db.query(EmailTeamMember).filter( + EmailTeamMember.team_id == personal_team.id, + EmailTeamMember.user_email == admin_user.email, + EmailTeamMember.is_active == True + ).first() + + if membership: + print(f" ✅ Admin is member of personal team") + print(f" Role: {membership.role}") + print(f" Joined: {membership.joined_at}") + else: + print(f" ❌ Admin is not a member of personal team") + success = False + + except Exception as e: + print(f"\n❌ Verification failed with error: {e}") + import traceback + traceback.print_exc() + return False + + print("\n" + "=" * 65) + if success: + print("🎉 MIGRATION VERIFICATION: SUCCESS!") + print("\n✅ All checks passed. Your migration completed successfully.") + print("✅ Old servers should now be visible in the Virtual Servers list.") + print("✅ Resources are properly assigned to teams with appropriate visibility.") + print(f"\n🚀 You can now access the admin UI at: /admin") + print(f"📧 Login with admin email: {settings.platform_admin_email}") + return True + else: + print("❌ MIGRATION VERIFICATION: FAILED!") + print("\n⚠️ Some issues were detected. Please check the details above.") + print("💡 You may need to re-run the migration or check your configuration.") + print(f"\n📋 To re-run migration: python3 -m mcpgateway.bootstrap_db") + print(f"🔧 Make sure PLATFORM_ADMIN_EMAIL is set in your .env file") + return False + + +if __name__ == "__main__": + verify_migration() \ No newline at end of file From 84c0820b6986a6c829b56b873faece2fd45dc366 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Wed, 3 Sep 2025 18:18:29 +0100 Subject: [PATCH 25/49] Add multitenancy scripts to check migration Signed-off-by: Mihai Criveti --- MANIFEST.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/MANIFEST.in b/MANIFEST.in index 2662aa441..f327fad44 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -64,6 +64,10 @@ recursive-include alembic *.py include mcpgateway/cli_export_import.py include mcpgateway/services/export_service.py include mcpgateway/services/import_service.py + +# 📦 Migration scripts (v0.7.0 multitenancy migration tools) +recursive-include scripts *.py + # recursive-include deployment * # recursive-include mcp-servers * recursive-include plugins *.py From 7d48bff98526b4e44eb0ddc1fdd192147b3460de Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 08:47:53 +0100 Subject: [PATCH 26/49] Add manual testing Signed-off-by: Mihai Criveti --- MANIFEST.in | 3 + MIGRATION-0.7.0.md | 42 +- scripts/fix_multitenancy_0_7_0_resources.py | 52 +- .../verify_multitenancy_0_7_0_migration.py | 78 +-- tests/manual/.gitignore | 1 + tests/manual/README.md | 172 +++++++ tests/manual/admin_ui_tests.py | 450 ++++++++++++++++++ tests/manual/api_authentication_tests.py | 420 ++++++++++++++++ tests/manual/api_servers_tests.py | 276 +++++++++++ tests/manual/api_teams_tests.py | 308 ++++++++++++ tests/manual/database_tests.py | 284 +++++++++++ tests/manual/generate_test_plan.sh | 71 +++ tests/manual/generate_test_plan_xlsx.py | 344 +++++++++++++ tests/manual/migration_tests.py | 405 ++++++++++++++++ tests/manual/run_all_tests.py | 331 +++++++++++++ tests/manual/security_tests.py | 380 +++++++++++++++ tests/manual/setup_instructions.py | 342 +++++++++++++ 17 files changed, 3873 insertions(+), 86 deletions(-) create mode 100644 tests/manual/.gitignore create mode 100644 tests/manual/README.md create mode 100644 tests/manual/admin_ui_tests.py create mode 100644 tests/manual/api_authentication_tests.py create mode 100644 tests/manual/api_servers_tests.py create mode 100644 tests/manual/api_teams_tests.py create mode 100644 tests/manual/database_tests.py create mode 100755 tests/manual/generate_test_plan.sh create mode 100644 tests/manual/generate_test_plan_xlsx.py create mode 100644 tests/manual/migration_tests.py create mode 100644 tests/manual/run_all_tests.py create mode 100644 tests/manual/security_tests.py create mode 100644 tests/manual/setup_instructions.py diff --git a/MANIFEST.in b/MANIFEST.in index f327fad44..0f6b5218a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -68,6 +68,9 @@ include mcpgateway/services/import_service.py # 📦 Migration scripts (v0.7.0 multitenancy migration tools) recursive-include scripts *.py +# 🧪 Testing documentation and plans +recursive-include tests/manual *.py *.md + # recursive-include deployment * # recursive-include mcp-servers * recursive-include plugins *.py diff --git a/MIGRATION-0.7.0.md b/MIGRATION-0.7.0.md index 91e5f5645..18a5b638f 100644 --- a/MIGRATION-0.7.0.md +++ b/MIGRATION-0.7.0.md @@ -21,7 +21,7 @@ This migration includes **2 essential scripts** to help you: - **Checks**: Admin user, personal team, resource assignments, visibility settings - **When**: Run after migration to confirm everything worked -### `scripts/fix_multitenancy_0_7_0_resources.py` +### `scripts/fix_multitenancy_0_7_0_resources.py` - **Purpose**: Fix resources missing team assignments after v0.6.0 → v0.7.0 upgrade - **Fixes**: Assigns orphaned servers/tools/resources to admin's personal team - **When**: Use if verification shows unassigned resources @@ -54,7 +54,7 @@ make dev # or however you normally run it open http://localhost:4444/admin # 3. Navigate to Export/Import section -# 4. Click "Export Configuration" +# 4. Click "Export Configuration" # 5. Save the JSON file (contains servers, tools, resources, etc.) # Or use direct API call (if you have a bearer token): @@ -68,7 +68,7 @@ curl -u admin:changeme \ -o mcp_config_backup_$(date +%Y%m%d_%H%M%S).json ``` -**✅ Benefits**: +**✅ Benefits**: - Preserves all your servers, tools, resources, and settings - Can be imported after migration if needed - Human-readable JSON format @@ -106,7 +106,7 @@ grep -E "PLATFORM_ADMIN_EMAIL|PLATFORM_ADMIN_PASSWORD|EMAIL_AUTH_ENABLED" .env ```bash # Platform Administrator (will be created by migration) PLATFORM_ADMIN_EMAIL=your-admin@yourcompany.com -PLATFORM_ADMIN_PASSWORD=your-secure-password +PLATFORM_ADMIN_PASSWORD=your-secure-password PLATFORM_ADMIN_FULL_NAME="Your Name" # Enable email authentication (required for multi-tenancy) @@ -119,7 +119,7 @@ PERSONAL_TEAM_PREFIX=personal **💡 Tips**: - Use a **real email address** for `PLATFORM_ADMIN_EMAIL` (you'll use this to log in) -- Choose a **strong password** (minimum 8 characters) +- Choose a **strong password** (minimum 8 characters) - Set `EMAIL_AUTH_ENABLED=true` to enable the multitenancy features **🔍 Verify your configuration**: @@ -154,11 +154,11 @@ make install-dev The migration process is automated and handles: - Creating multi-tenancy database schema - Creating platform admin user and personal team -- **Migrating existing servers** to the admin's personal team +- **Migrating existing servers** to the admin's personal team - Setting up default RBAC roles **⚠️ PREREQUISITE**: Ensure `.env` file is configured with `PLATFORM_ADMIN_EMAIL` etc. (see step 3 above) -**✅ Configuration**: Uses your `.env` settings automatically +**✅ Configuration**: Uses your `.env` settings automatically **✅ Database Compatibility**: Works with **SQLite**, **PostgreSQL**, and **MySQL** ```bash @@ -186,7 +186,7 @@ python3 scripts/verify_multitenancy_0_7_0_migration.py This will check: - ✅ Platform admin user creation -- ✅ Personal team creation and membership +- ✅ Personal team creation and membership - ✅ Resource team assignments - ✅ Visibility settings - ✅ Database integrity @@ -199,7 +199,7 @@ This will check: Old servers should now be visible in the Virtual Servers list. They will be: - **Owned by**: Your platform admin user -- **Assigned to**: Admin's personal team +- **Assigned to**: Admin's personal team - **Visibility**: Public (visible to all authenticated users) ### 2. Import Configuration (If Needed) @@ -286,7 +286,7 @@ This all happens in the consolidated migration `cfc3d6aa0fb2`, so no additional ``` Old Server (pre-migration): ├── team_id: NULL -├── owner_email: NULL +├── owner_email: NULL └── visibility: NULL Migrated Server (post-migration): @@ -300,7 +300,7 @@ Migrated Server (post-migration): Old servers are set to "public" visibility to ensure they remain accessible to all users immediately after migration. You can adjust visibility per resource: - **Private**: Only the owner can access -- **Team**: All team members can access +- **Team**: All team members can access - **Public**: All authenticated users can access ## Customizing Resource Ownership @@ -380,7 +380,7 @@ with SessionLocal() as db: **Root Cause**: `.env` file not properly configured before migration. -**Solution**: +**Solution**: 1. **Check your `.env` configuration**: ```bash # Verify your settings are loaded @@ -395,7 +395,7 @@ with SessionLocal() as db: ```bash # Edit your .env file nano .env # Set PLATFORM_ADMIN_EMAIL=your-email@company.com - + # Re-run migration python3 -m mcpgateway.bootstrap_db ``` @@ -474,7 +474,7 @@ except Exception as e: python3 -c " from mcpgateway.config import settings print(f'Database URL: {settings.database_url}') -print(f'Admin email: {settings.platform_admin_email}') +print(f'Admin email: {settings.platform_admin_email}') print(f'Email auth enabled: {settings.email_auth_enabled}') " @@ -495,7 +495,7 @@ cp mcp.db.backup.YYYYMMDD_HHMMSS mcp.db # For PostgreSQL dropdb mcp -createdb mcp +createdb mcp psql -d mcp < mcp_backup_YYYYMMDD_HHMMSS.sql # For MySQL @@ -528,13 +528,13 @@ make install-dev After completing the migration, verify using the automated verification script: ```bash -# Run comprehensive verification +# Run comprehensive verification python3 scripts/verify_multitenancy_0_7_0_migration.py ``` Manual checks (if needed): - [ ] Database migration completed without errors -- [ ] Platform admin user created successfully +- [ ] Platform admin user created successfully - [ ] Personal team created for admin user - [ ] Old servers are visible in Virtual Servers list - [ ] Admin UI accessible at `/admin` endpoint @@ -553,7 +553,7 @@ python3 scripts/fix_multitenancy_0_7_0_resources.py If you encounter issues during migration: 1. **Check the logs**: Set `LOG_LEVEL=DEBUG` for verbose output -2. **Review troubleshooting section** above for common issues +2. **Review troubleshooting section** above for common issues 3. **File an issue**: https://github.com/anthropics/claude-code/issues 4. **Include information**: Database type, error messages, relevant logs @@ -583,7 +583,7 @@ cp .env.example .env # then edit with your admin credentials # 3. VERIFY CONFIG python3 -c "from mcpgateway.config import settings; print(f'Admin: {settings.platform_admin_email}')" -# 4. MIGRATE +# 4. MIGRATE python3 -m mcpgateway.bootstrap_db # 5. VERIFY SUCCESS @@ -595,5 +595,5 @@ python3 scripts/fix_multitenancy_0_7_0_resources.py ### Important URLs - **Admin UI**: http://localhost:4444/admin -- **Export Config**: http://localhost:4444/admin/export/configuration -- **Import Config**: http://localhost:4444/admin/import/configuration \ No newline at end of file +- **Export Config**: http://localhost:4444/admin/export/configuration +- **Import Config**: http://localhost:4444/admin/import/configuration diff --git a/scripts/fix_multitenancy_0_7_0_resources.py b/scripts/fix_multitenancy_0_7_0_resources.py index f865d311c..53436568d 100755 --- a/scripts/fix_multitenancy_0_7_0_resources.py +++ b/scripts/fix_multitenancy_0_7_0_resources.py @@ -34,13 +34,13 @@ def fix_unassigned_resources(): """Fix resources that lack proper team assignments.""" - + print("🔧 MCP Gateway - Fix Unassigned Resources") print("=" * 50) - + try: with SessionLocal() as db: - + # 1. Find admin user and personal team print("🔍 Finding admin user and personal team...") admin_email = settings.platform_admin_email @@ -48,75 +48,75 @@ def fix_unassigned_resources(): EmailUser.email == admin_email, EmailUser.is_admin == True ).first() - + if not admin_user: print(f"❌ Admin user not found: {admin_email}") print("Make sure the migration has run and admin user exists") return False - + personal_team = db.query(EmailTeam).filter( EmailTeam.created_by == admin_user.email, EmailTeam.is_personal == True, EmailTeam.is_active == True ).first() - + if not personal_team: print(f"❌ Personal team not found for admin: {admin_user.email}") return False - + print(f"✅ Found admin: {admin_user.email}") print(f"✅ Found personal team: {personal_team.name} ({personal_team.id})") - + # 2. Fix each resource type resource_types = [ ("servers", Server), - ("tools", Tool), + ("tools", Tool), ("resources", Resource) ] - + total_fixed = 0 - + for table_name, resource_model in resource_types: print(f"\n📋 Processing {table_name}...") - + # Find unassigned resources unassigned = db.query(resource_model).filter( (resource_model.team_id == None) | (resource_model.owner_email == None) | (resource_model.visibility == None) ).all() - + if not unassigned: print(f" ✅ No unassigned {table_name} found") continue - + print(f" 🔧 Fixing {len(unassigned)} unassigned {table_name}...") - + for resource in unassigned: resource_name = getattr(resource, 'name', 'Unknown') print(f" - Assigning: {resource_name}") - + # Assign to admin's personal team resource.team_id = personal_team.id resource.owner_email = admin_user.email - + # Set visibility to public if not already set if not hasattr(resource, 'visibility') or resource.visibility is None: resource.visibility = "public" - + total_fixed += 1 - + # Commit changes for this resource type db.commit() print(f" ✅ Fixed {len(unassigned)} {table_name}") - + print(f"\n🎉 Successfully fixed {total_fixed} resources!") print(f" All resources now assigned to: {personal_team.name}") print(f" Owner email: {admin_user.email}") print(f" Default visibility: public") - + return True - + except Exception as e: print(f"\n❌ Fix operation failed: {e}") import traceback @@ -126,15 +126,15 @@ def fix_unassigned_resources(): def main(): """Main function with user confirmation.""" - + print("This script will assign unassigned resources to the platform admin's personal team.") print("This is safe and will make resources visible in the team-based UI.\n") - + response = input("Continue? (y/N): ").lower().strip() if response not in ('y', 'yes'): print("Operation cancelled.") return - + if fix_unassigned_resources(): print("\n✅ Fix completed successfully!") print("🔍 Run verification script to confirm: python3 scripts/verify_multitenancy_0_7_0_migration.py") @@ -143,4 +143,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/scripts/verify_multitenancy_0_7_0_migration.py b/scripts/verify_multitenancy_0_7_0_migration.py index fee9ca8b8..3a1fef0c6 100755 --- a/scripts/verify_multitenancy_0_7_0_migration.py +++ b/scripts/verify_multitenancy_0_7_0_migration.py @@ -2,8 +2,8 @@ # -*- coding: utf-8 -*- """MCP Gateway v0.7.0 Multitenancy Migration Verification -This script verifies that the v0.6.0 → v0.7.0 multitenancy migration -completed successfully and that old servers/resources are visible in +This script verifies that the v0.6.0 → v0.7.0 multitenancy migration +completed successfully and that old servers/resources are visible in the new team-based system. Checks: @@ -27,7 +27,7 @@ try: from mcpgateway.db import ( - SessionLocal, EmailUser, EmailTeam, EmailTeamMember, + SessionLocal, EmailUser, EmailTeam, EmailTeamMember, Server, Tool, Resource, Role, UserRole ) from mcpgateway.config import settings @@ -40,16 +40,16 @@ def verify_migration(): """Verify the multitenancy migration was successful.""" - + print("🔍 MCP Gateway v0.7.0 Multitenancy Migration Verification") - print("📅 Migration: v0.6.0 → v0.7.0") + print("📅 Migration: v0.6.0 → v0.7.0") print("=" * 65) - + success = True - + try: with SessionLocal() as db: - + # 1. Check admin user exists print("\n📋 1. ADMIN USER CHECK") admin_email = settings.platform_admin_email @@ -57,7 +57,7 @@ def verify_migration(): EmailUser.email == admin_email, EmailUser.is_admin == True ).first() - + if admin_user: print(f" ✅ Admin user found: {admin_user.email}") print(f" Full name: {admin_user.full_name}") @@ -66,7 +66,7 @@ def verify_migration(): else: print(f" ❌ Admin user not found: {admin_email}") success = False - + # 2. Check personal team exists print("\n🏢 2. PERSONAL TEAM CHECK") if admin_user: @@ -75,7 +75,7 @@ def verify_migration(): EmailTeam.is_personal == True, EmailTeam.is_active == True ).first() - + if personal_team: print(f" ✅ Personal team found: {personal_team.name}") print(f" Team ID: {personal_team.id}") @@ -87,7 +87,7 @@ def verify_migration(): else: personal_team = None print(" ⚠️ Cannot check personal team (admin user missing)") - + # 3. Check resource assignments print("\n📦 3. RESOURCE ASSIGNMENT CHECK") resource_types = [ @@ -95,7 +95,7 @@ def verify_migration(): ("Tools", Tool), ("Resources", Resource) ] - + for resource_name, resource_model in resource_types: total_count = db.query(resource_model).count() assigned_count = db.query(resource_model).filter( @@ -104,23 +104,23 @@ def verify_migration(): resource_model.visibility != None ).count() unassigned_count = total_count - assigned_count - + print(f" {resource_name}:") print(f" Total: {total_count}") print(f" Assigned to teams: {assigned_count}") print(f" Unassigned: {unassigned_count}") - + if unassigned_count > 0: print(f" ❌ {unassigned_count} {resource_name.lower()} lack team assignment!") success = False - + # Show details of unassigned resources unassigned = db.query(resource_model).filter( (resource_model.team_id == None) | (resource_model.owner_email == None) | (resource_model.visibility == None) ).limit(3).all() - + for resource in unassigned: name = getattr(resource, 'name', 'Unknown') print(f" - {name} (ID: {resource.id})") @@ -129,41 +129,41 @@ def verify_migration(): print(f" visibility: {getattr(resource, 'visibility', 'N/A')}") else: print(f" ✅ All {resource_name.lower()} properly assigned") - + # 4. Check visibility distribution if personal_team: print("\n👁️ 4. VISIBILITY DISTRIBUTION") - + for resource_name, resource_model in resource_types: if hasattr(resource_model, 'visibility'): visibility_counts = {} resources = db.query(resource_model).all() - + for resource in resources: vis = getattr(resource, 'visibility', 'unknown') visibility_counts[vis] = visibility_counts.get(vis, 0) + 1 - + print(f" {resource_name}:") for visibility, count in visibility_counts.items(): print(f" {visibility}: {count}") - - # 5. Database schema validation + + # 5. Database schema validation print("\n🗄️ 5. DATABASE SCHEMA VALIDATION") - + # Check if we can access multitenancy models (proves schema exists) schema_checks = [] try: user_count = db.query(EmailUser).count() - team_count = db.query(EmailTeam).count() + team_count = db.query(EmailTeam).count() member_count = db.query(EmailTeamMember).count() print(f" ✅ EmailUser model: {user_count} records") - print(f" ✅ EmailTeam model: {team_count} records") + print(f" ✅ EmailTeam model: {team_count} records") print(f" ✅ EmailTeamMember model: {member_count} records") schema_checks.append("core_auth") except Exception as e: print(f" ❌ Core auth models inaccessible: {e}") success = False - + try: role_count = db.query(Role).count() user_role_count = db.query(UserRole).count() @@ -173,23 +173,23 @@ def verify_migration(): except Exception as e: print(f" ❌ RBAC models inaccessible: {e}") success = False - + # Verify resource models have team attributes resource_models = [ ("Server", Server), - ("Tool", Tool), + ("Tool", Tool), ("Resource", Resource) ] - + for model_name, model_class in resource_models: try: # Check if model has team attributes sample = db.query(model_class).first() if sample: has_team_id = hasattr(sample, 'team_id') - has_owner_email = hasattr(sample, 'owner_email') + has_owner_email = hasattr(sample, 'owner_email') has_visibility = hasattr(sample, 'visibility') - + if has_team_id and has_owner_email and has_visibility: print(f" ✅ {model_name}: has multitenancy attributes") else: @@ -204,11 +204,11 @@ def verify_migration(): except Exception as e: print(f" ❌ {model_name}: model access failed - {e}") success = False - + if "core_auth" in schema_checks and "rbac" in schema_checks: print(" ✅ Multitenancy schema fully operational") - - # 6. Team membership check + + # 6. Team membership check print("\n👥 6. TEAM MEMBERSHIP CHECK") if admin_user and personal_team: membership = db.query(EmailTeamMember).filter( @@ -216,7 +216,7 @@ def verify_migration(): EmailTeamMember.user_email == admin_user.email, EmailTeamMember.is_active == True ).first() - + if membership: print(f" ✅ Admin is member of personal team") print(f" Role: {membership.role}") @@ -224,13 +224,13 @@ def verify_migration(): else: print(f" ❌ Admin is not a member of personal team") success = False - + except Exception as e: print(f"\n❌ Verification failed with error: {e}") import traceback traceback.print_exc() return False - + print("\n" + "=" * 65) if success: print("🎉 MIGRATION VERIFICATION: SUCCESS!") @@ -250,4 +250,4 @@ def verify_migration(): if __name__ == "__main__": - verify_migration() \ No newline at end of file + verify_migration() diff --git a/tests/manual/.gitignore b/tests/manual/.gitignore new file mode 100644 index 000000000..7c1222033 --- /dev/null +++ b/tests/manual/.gitignore @@ -0,0 +1 @@ +*.xlsx diff --git a/tests/manual/README.md b/tests/manual/README.md new file mode 100644 index 000000000..a1bd12912 --- /dev/null +++ b/tests/manual/README.md @@ -0,0 +1,172 @@ +# 🧪 MCP Gateway v0.7.0 - Manual Testing Suite + +**Complete manual testing for post-migration validation** + +## 📁 Directory Contents + +### 🧪 **Test Files** (Run Individually) +| File | Purpose | Priority | Time | +|------|---------|----------|------| +| `setup_instructions.py` | Environment setup | CRITICAL | 30-60 min | +| `migration_tests.py` | **Migration validation (MAIN TEST)** | CRITICAL | 60-90 min | +| `admin_ui_tests.py` | Admin UI testing | CRITICAL | 60-120 min | +| `api_authentication_tests.py` | Authentication API | HIGH | 30-60 min | +| `api_teams_tests.py` | Teams API | HIGH | 30-60 min | +| `api_servers_tests.py` | Servers API | HIGH | 45-90 min | +| `database_tests.py` | Database compatibility | HIGH | 60-120 min | +| `security_tests.py` | Security testing | MEDIUM | 90-180 min | + +### 🎯 **Coordination Files** +| File | Purpose | +|------|---------| +| `run_all_tests.py` | Master test coordinator | +| `generate_test_plan.sh` | **Excel generator entrypoint** | +| `generate_test_plan_xlsx.py` | Excel generator (Python) | + +### 📊 **Output Files** +| File | Purpose | +|------|---------| +| `test-plan.xlsx` | **Complete Excel test plan (8 worksheets, 54 tests)** | +| `README.md` | This documentation | + +## 🚀 **Quick Start** + +### **Generate Excel Test Plan** +```bash +# Generate clean Excel file from Python test files +./generate_test_plan.sh + +# Result: test-plan.xlsx (ready for 10 testers) +``` + +### **For Testers - Option 1: Excel File** +```bash +# Open the generated Excel file +open test-plan.xlsx # or double-click in file manager + +# Follow worksheets in order: +# 1. Setup Instructions +# 2. Migration Tests (MAIN TEST - server visibility) +# 3. Admin UI Tests +# 4. API Authentication +# 5. API Teams +# 6. API Servers +# 7. Database Tests +# 8. Security Tests +``` + +### **For Testers - Option 2: Python Files** +```bash +# Run individual test areas +python3 setup_instructions.py # Environment setup +python3 migration_tests.py # Critical migration tests +python3 admin_ui_tests.py # UI validation (server visibility) + +# Get help for any test file +python3 .py --help +``` + +### **For Testers - Option 3: Coordinated** +```bash +# Interactive test coordination +python3 run_all_tests.py + +# Quick critical tests only +python3 run_all_tests.py --critical-only +``` + +## 🎯 **Main Migration Test** + +**THE KEY TEST**: Verify old servers are visible after migration + +**Primary Test Files**: +- `migration_tests.py` → **MIG-003**: "OLD SERVERS VISIBLE" +- `admin_ui_tests.py` → **UI-003**: "Server List View" +- `test-plan.xlsx` → **Migration Tests** worksheet + +**What to validate**: +1. ✅ Admin UI shows all servers (including pre-migration) +2. ✅ Server details are accessible +3. ✅ No empty server list + +## 📋 **Test Execution Guide** + +### **For New Testers** +1. **Setup**: `python3 setup_instructions.py` (interactive guide) +2. **Migration**: `python3 migration_tests.py` (critical validation) +3. **UI**: `python3 admin_ui_tests.py` (main server visibility test) +4. **APIs**: Run remaining test files as time permits + +### **For Experienced Testers** +1. **Excel**: Open `test-plan.xlsx` and work through worksheets +2. **Filter**: Use Excel table filtering for specific test areas +3. **Critical**: Focus on CRITICAL priority tests first + +### **For Test Coordinators** +1. **Generate**: `./generate_test_plan.sh` (create fresh Excel) +2. **Assign**: Distribute test files to 10 testers +3. **Track**: Collect JSON result files from testers +4. **Summary**: Use `run_all_tests.py` for overall results + +## 🔧 **Technical Details** + +### **File Dependencies** +- All test Python files are **independent** (no dependencies between them) +- `generate_test_plan_xlsx.py` reads test data from Python files +- `run_all_tests.py` coordinates execution of individual files +- Each test file generates its own JSON results file + +### **Excel Generation Process** +```bash +./generate_test_plan.sh + ↓ + Calls: python3 generate_test_plan_xlsx.py + ↓ + Reads: All *_tests.py files + ↓ + Generates: test-plan.xlsx (8 worksheets, Excel tables) + ↓ + Result: Clean file, no corruption, ready for testers +``` + +### **Test Result Tracking** +Each test file can generate JSON results: +- `migration_test_results.json` +- `auth_test_results.json` +- `admin_ui_test_results.json` +- etc. + +## ⚠️ **Critical Success Criteria** + +### **MUST PASS for Production** +1. ✅ **Migration Tests**: All critical tests pass +2. ✅ **Server Visibility**: Old servers visible in admin UI +3. ✅ **Authentication**: Email and basic auth work +4. ✅ **Team Assignments**: All resources have proper teams + +### **SHOULD PASS for Quality** +1. ✅ API endpoints respond correctly +2. ✅ Admin UI fully functional +3. ✅ Security defenses active +4. ✅ Performance acceptable + +## 💡 **Pro Tips** + +- **Start with setup_instructions.py** - it guides environment preparation +- **Focus on migration_tests.py** - contains the main server visibility test +- **Use --help** with any test file for detailed usage +- **Take screenshots** of UI issues for debugging +- **Record exact error messages** for troubleshooting +- **Test both SQLite and PostgreSQL** if possible + +## 🎯 **Expected Outcomes** + +After successful testing: +- ✅ Old servers are visible in admin UI (main migration fix) +- ✅ All multitenancy features work correctly +- ✅ APIs respond with proper team-based filtering +- ✅ Admin interface is fully functional +- ✅ Database migration completed without issues +- ✅ Security measures are active and effective + +This testing suite ensures your MCP Gateway v0.7.0 migration was successful! \ No newline at end of file diff --git a/tests/manual/admin_ui_tests.py b/tests/manual/admin_ui_tests.py new file mode 100644 index 000000000..c148396c7 --- /dev/null +++ b/tests/manual/admin_ui_tests.py @@ -0,0 +1,450 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Admin UI Manual Tests + +Comprehensive testing of the admin web interface including: +- Login and authentication +- Dashboard and navigation +- Server management UI (CRITICAL for migration validation) +- Team management interface +- User administration +- Export/import interface + +Usage: + python3 tests/manual/admin_ui_tests.py +""" + +import sys +import json +from pathlib import Path +from datetime import datetime + +# Admin UI test cases +ADMIN_UI_TESTS = [ + { + "id": "UI-001", + "section": "Authentication", + "component": "Login Page", + "action": "Test admin login interface", + "steps": [ + "1. Open web browser (Chrome or Firefox recommended)", + "2. Navigate to: http://localhost:4444/admin", + "3. Observe login page layout and components", + "4. Check for email and password input fields", + "5. Look for 'Login' or 'Sign In' button", + "6. Test form validation (empty fields)", + "7. Enter admin email from .env file", + "8. Enter admin password from .env file", + "9. Click Login button", + "10. Verify successful redirect to admin dashboard" + ], + "expected": "Login page loads, form validation works, authentication successful", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": True + }, + { + "id": "UI-002", + "section": "Dashboard", + "component": "Main Dashboard", + "action": "Navigate and test admin dashboard", + "steps": [ + "1. After successful login, observe dashboard layout", + "2. Count the number of statistics cards displayed", + "3. Check navigation menu on left side or top", + "4. Click on each statistic card to test interactions", + "5. Test responsive design (resize browser window)", + "6. Check for any error messages or warnings", + "7. Verify user menu/profile in top right", + "8. Test logout functionality" + ], + "expected": "Dashboard functional with stats, navigation menu works, responsive design", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": False + }, + { + "id": "UI-003", + "section": "Virtual Servers", + "component": "Server List View", + "action": "View and verify server list - CRITICAL MIGRATION TEST", + "steps": [ + "1. Click 'Virtual Servers' in navigation menu", + "2. Observe server list/grid layout", + "3. COUNT the total number of servers displayed", + "4. IDENTIFY servers created before migration (older creation dates)", + "5. Click on each server card/row to view details", + "6. Verify server information is accessible and complete", + "7. Check server actions (start/stop/restart if available)", + "8. Test server filtering and search if available", + "9. TAKE SCREENSHOT of server list showing all servers", + "10. Record server names and their visibility status" + ], + "expected": "ALL servers visible including pre-migration servers, details accessible", + "browser": "Chrome/Firefox", + "screenshot": "REQUIRED", + "critical": True, + "main_migration_test": True + }, + { + "id": "UI-004", + "section": "Teams", + "component": "Team Management Interface", + "action": "Test team management functionality", + "steps": [ + "1. Navigate to 'Teams' section in admin interface", + "2. View team list/grid display", + "3. Find your personal team (usually ''s Team')", + "4. Click on personal team to view details", + "5. Check team information display", + "6. Click 'View Members' or 'Members' tab", + "7. Verify you're listed as 'Owner'", + "8. Test 'Create Team' functionality", + "9. Fill out team creation form", + "10. Verify new team appears in list" + ], + "expected": "Team interface functional, personal team visible, team creation works", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": False + }, + { + "id": "UI-005", + "section": "Tools", + "component": "Tool Registry Interface", + "action": "Test tool management and invocation", + "steps": [ + "1. Navigate to 'Tools' section", + "2. View available tools list", + "3. Check team-based filtering is working", + "4. Click on any tool to view details", + "5. Look for 'Invoke' or 'Execute' button", + "6. Test tool invocation interface", + "7. Fill in tool parameters if prompted", + "8. Submit tool execution", + "9. Verify results are displayed properly", + "10. Test tool creation form if available" + ], + "expected": "Tools accessible by team permissions, invocation interface works", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": False + }, + { + "id": "UI-006", + "section": "Resources", + "component": "Resource Management Interface", + "action": "Test resource browser and management", + "steps": [ + "1. Navigate to 'Resources' section", + "2. Browse available resources", + "3. Check team-based resource filtering", + "4. Click on any resource to view details", + "5. Test resource download functionality", + "6. Try 'Upload Resource' button if available", + "7. Test file upload interface", + "8. Fill in resource metadata", + "9. Verify upload completes successfully", + "10. Check new resource appears in list" + ], + "expected": "Resource browser functional, upload/download works, team filtering applied", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": False + }, + { + "id": "UI-007", + "section": "User Management", + "component": "User Administration Interface", + "action": "Test user management (admin only)", + "steps": [ + "1. Navigate to 'Users' section (admin only)", + "2. View user list display", + "3. Click on any user to view details", + "4. Check user profile information", + "5. Test 'Create User' functionality if available", + "6. Fill user creation form", + "7. Test role assignment interface", + "8. Verify user permissions management", + "9. Check user activity/audit information", + "10. Test user status changes (active/inactive)" + ], + "expected": "User management interface functional, role assignment works", + "browser": "Chrome/Firefox", + "screenshot": "Optional", + "critical": False, + "requires": "Platform admin privileges" + }, + { + "id": "UI-008", + "section": "Export/Import", + "component": "Configuration Management Interface", + "action": "Test configuration backup and restore", + "steps": [ + "1. Navigate to 'Export/Import' section", + "2. Locate 'Export Configuration' button/link", + "3. Click export and select export options", + "4. Download the configuration JSON file", + "5. Open JSON file and verify contents", + "6. Locate 'Import Configuration' button/link", + "7. Select the downloaded JSON file", + "8. Choose import options (merge/replace)", + "9. Execute the import process", + "10. Verify import completion and success" + ], + "expected": "Export downloads complete JSON, import processes successfully", + "browser": "Chrome/Firefox", + "screenshot": "Recommended", + "critical": False + }, + { + "id": "UI-009", + "section": "Mobile Compatibility", + "component": "Responsive Design", + "action": "Test mobile device compatibility", + "steps": [ + "1. Resize browser window to mobile width (<768px)", + "2. OR open admin UI on actual mobile device", + "3. Test navigation menu (hamburger menu?)", + "4. Check form input usability on mobile", + "5. Test touch interactions and gestures", + "6. Verify text readability and sizing", + "7. Check all features remain accessible", + "8. Test portrait and landscape orientations", + "9. Verify no horizontal scrolling required", + "10. Check mobile-specific UI adaptations" + ], + "expected": "Interface adapts to mobile screens while maintaining full functionality", + "browser": "Mobile Chrome/Safari", + "screenshot": "Optional", + "critical": False + }, + { + "id": "UI-010", + "section": "Error Handling", + "component": "UI Error Scenarios", + "action": "Test error handling and user experience", + "steps": [ + "1. Trigger network error (disconnect internet briefly)", + "2. Submit forms with invalid data", + "3. Try accessing resources without permission", + "4. Test session timeout scenarios", + "5. Check error message display", + "6. Verify error messages are user-friendly", + "7. Test error recovery mechanisms", + "8. Check browser console for JavaScript errors", + "9. Verify graceful degradation", + "10. Test error logging and reporting" + ], + "expected": "Graceful error handling, helpful error messages, no JavaScript crashes", + "browser": "Chrome/Firefox", + "screenshot": "For errors", + "critical": False + } +] + + +def run_admin_ui_tests(): + """Run comprehensive admin UI tests.""" + + print("🖥️ ADMIN UI COMPREHENSIVE TESTING") + print("=" * 60) + print("🎯 Testing every admin interface component") + print("🚨 Includes critical migration validation (server visibility)") + + results = [] + + print("\\n🔧 Pre-test Requirements:") + print("1. MCP Gateway running (make dev)") + print("2. Admin login credentials available") + print("3. Browser with developer tools (F12)") + + input("\\nPress Enter when ready to begin UI testing...") + + for test in ADMIN_UI_TESTS: + print(f"\\n{'='*60}") + print(f"🧪 TEST {test['id']}: {test['component']}") + print(f"Section: {test['section']}") + print(f"Action: {test['action']}") + + if test.get('critical'): + print("🚨 CRITICAL TEST") + + if test.get('main_migration_test'): + print("🎯 MAIN MIGRATION VALIDATION TEST!") + + if test.get('requires'): + print(f"⚠️ Requires: {test['requires']}") + + print(f"\\n📋 Detailed Steps:") + for step in test['steps']: + print(f" {step}") + + print(f"\\n✅ Expected Result:") + print(f" {test['expected']}") + + print(f"\\n🌐 Browser: {test['browser']}") + print(f"📸 Screenshot: {test['screenshot']}") + + # Manual execution + response = input(f"\\nExecute UI test {test['id']}? (y/n/skip): ").lower() + + if response == 'skip' or response == 's': + print(f"⚠️ {test['id']}: SKIPPED") + results.append({"id": test['id'], "status": "SKIP", "timestamp": datetime.now().isoformat()}) + continue + elif response != 'y': + print(f"❌ {test['id']}: ABORTED") + break + + # Get test results + print(f"\\n📝 Record Results for {test['id']}:") + ui_result = input("Did the UI behave as expected? (y/n): ").lower() + + if ui_result == 'y': + status = "PASS" + print(f"✅ {test['id']}: PASSED") + else: + status = "FAIL" + print(f"❌ {test['id']}: FAILED") + failure_details = input("Describe what went wrong: ") + + if test.get('critical') or test.get('main_migration_test'): + print("🚨 CRITICAL UI TEST FAILED!") + print("This may indicate migration issues") + + # Record detailed results + result_data = { + "id": test['id'], + "section": test['section'], + "component": test['component'], + "status": status, + "browser": test['browser'], + "timestamp": datetime.now().isoformat() + } + + if status == "FAIL": + result_data['failure_details'] = failure_details + + if test.get('screenshot') == "REQUIRED" or test.get('screenshot') == "Recommended": + screenshot_taken = input("Screenshot taken? (y/n): ").lower() == 'y' + result_data['screenshot_taken'] = screenshot_taken + + results.append(result_data) + + # Generate UI test summary + generate_ui_summary(results) + + return results + + +def generate_ui_summary(results): + """Generate UI testing summary.""" + + print(f"\\n{'='*60}") + print("📊 ADMIN UI TEST SUMMARY") + print("=" * 60) + + passed = len([r for r in results if r['status'] == 'PASS']) + failed = len([r for r in results if r['status'] == 'FAIL']) + skipped = len([r for r in results if r['status'] == 'SKIP']) + total = len(results) + + print(f"📈 UI Test Results:") + print(f" ✅ Passed: {passed}/{total}") + print(f" ❌ Failed: {failed}/{total}") + print(f" ⚠️ Skipped: {skipped}/{total}") + + # Check critical UI tests + critical_results = [r for r in results if 'UI-001' in r['id'] or 'UI-003' in r['id']] # Login and server visibility + critical_passed = len([r for r in critical_results if r['status'] == 'PASS']) + + print(f"\\n🚨 Critical UI Tests:") + print(f" ✅ Critical Passed: {critical_passed}/{len(critical_results)}") + + # Look for main migration test result + server_visibility_test = next((r for r in results if 'UI-003' in r['id']), None) + if server_visibility_test: + if server_visibility_test['status'] == 'PASS': + print("\\n🎯 MAIN MIGRATION TEST: ✅ PASSED") + print(" Old servers are visible in admin UI!") + else: + print("\\n🎯 MAIN MIGRATION TEST: ❌ FAILED") + print(" Old servers may not be visible - check migration") + + # Overall assessment + if failed == 0 and critical_passed == len(critical_results): + print(f"\\n🎉 ADMIN UI: FULLY FUNCTIONAL!") + print("✅ All critical UI tests passed") + print("✅ Admin interface ready for production use") + else: + print(f"\\n⚠️ ADMIN UI: ISSUES DETECTED") + print("🔧 Review failed tests and resolve issues") + + # Save results + results_file = Path("tests/manual/admin_ui_test_results.json") + with open(results_file, 'w') as f: + json.dump({ + "summary": {"passed": passed, "failed": failed, "skipped": skipped}, + "results": results, + "timestamp": datetime.now().isoformat() + }, f, indent=2) + + print(f"\\n📄 Results saved: {results_file}") + + +def test_specific_ui_component(component_id): + """Test specific UI component.""" + + test = next((t for t in ADMIN_UI_TESTS if t['id'] == component_id), None) + + if not test: + print(f"❌ Component {component_id} not found") + available = [t['id'] for t in ADMIN_UI_TESTS] + print(f"Available: {available}") + return False + + print(f"🧪 TESTING UI COMPONENT: {component_id}") + print("=" * 50) + print(f"Section: {test['section']}") + print(f"Component: {test['component']}") + print(f"Action: {test['action']}") + + if test.get('main_migration_test'): + print("🎯 THIS IS THE MAIN MIGRATION TEST!") + + print(f"\\n📋 Steps:") + for step in test['steps']: + print(f" {step}") + + print(f"\\n✅ Expected: {test['expected']}") + + return True + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("🖥️ Admin UI Tests") + print("Usage:") + print(" python3 tests/manual/admin_ui_tests.py # Run all UI tests") + print(" python3 tests/manual/admin_ui_tests.py --component UI-003 # Test specific component") + print(" python3 tests/manual/admin_ui_tests.py --help # This help") + elif sys.argv[1] == "--component" and len(sys.argv) > 2: + test_specific_ui_component(sys.argv[2]) + else: + print("❌ Unknown option. Use --help for usage.") + else: + try: + print("🖥️ Starting admin UI testing...") + print("💡 Focus on UI-003 (server visibility) - this is the main migration test!") + results = run_admin_ui_tests() + print("\\n🎉 Admin UI testing complete!") + print("Next: python3 tests/manual/database_tests.py") + except KeyboardInterrupt: + print("\\n❌ Testing cancelled by user") + sys.exit(1) + except Exception as e: + print(f"❌ Testing error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_authentication_tests.py b/tests/manual/api_authentication_tests.py new file mode 100644 index 000000000..47ae7a9ff --- /dev/null +++ b/tests/manual/api_authentication_tests.py @@ -0,0 +1,420 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Authentication API Tests + +Comprehensive testing of all authentication endpoints including: +- Email registration and login +- JWT token management +- SSO integration (GitHub, Google) +- Password management +- Profile operations + +Usage: + python3 tests/manual/api_authentication_tests.py + python3 tests/manual/api_authentication_tests.py --endpoint /auth/login +""" + +import sys +import subprocess +import json +import requests +from pathlib import Path +from datetime import datetime + +# Add project root to path +project_root = Path(__file__).parent.parent.parent +sys.path.insert(0, str(project_root)) + +# Authentication test cases +AUTH_TESTS = [ + { + "id": "AUTH-001", + "endpoint": "/auth/register", + "method": "POST", + "description": "User registration endpoint", + "curl_command": 'curl -X POST http://localhost:4444/auth/register -H "Content-Type: application/json"', + "request_body": '{"email":"testuser@example.com","password":"TestPass123","full_name":"Test User"}', + "expected_status": 201, + "expected_response": "User created successfully with personal team", + "test_steps": [ + "1. Execute the cURL command with test user data", + "2. Verify HTTP status code is 201", + "3. Check response contains user ID and email", + "4. Verify personal team was created for user", + "5. Record exact response content" + ], + "validation": "Response should include user_id, email, and personal_team_id" + }, + { + "id": "AUTH-002", + "endpoint": "/auth/login", + "method": "POST", + "description": "Email authentication login", + "curl_command": 'curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json"', + "request_body": '{"email":"admin@example.com","password":"changeme"}', + "expected_status": 200, + "expected_response": "JWT token returned in response", + "test_steps": [ + "1. Use admin credentials from .env file", + "2. Execute login request", + "3. Verify HTTP 200 status code", + "4. Check response contains 'token' field", + "5. Verify token is valid JWT format", + "6. Save token for subsequent API tests" + ], + "validation": "Response must contain valid JWT token", + "critical": True + }, + { + "id": "AUTH-003", + "endpoint": "/auth/logout", + "method": "POST", + "description": "User logout endpoint", + "curl_command": 'curl -X POST http://localhost:4444/auth/logout -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Logout successful, token invalidated", + "test_steps": [ + "1. Use JWT token from login test", + "2. Execute logout request with Authorization header", + "3. Verify HTTP 200 status", + "4. Try using the token again (should fail)", + "5. Verify token is now invalid" + ], + "validation": "Token becomes invalid after logout" + }, + { + "id": "AUTH-004", + "endpoint": "/auth/refresh", + "method": "POST", + "description": "JWT token refresh", + "curl_command": 'curl -X POST http://localhost:4444/auth/refresh -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "New JWT token issued", + "test_steps": [ + "1. Use valid JWT token", + "2. Request token refresh", + "3. Verify new token returned", + "4. Test both old and new tokens", + "5. Verify new token works, old may be invalidated" + ], + "validation": "New token returned and functional" + }, + { + "id": "AUTH-005", + "endpoint": "/auth/profile", + "method": "GET", + "description": "Get user profile information", + "curl_command": 'curl http://localhost:4444/auth/profile -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "User profile data including email, teams, roles", + "test_steps": [ + "1. Use valid JWT token", + "2. Request user profile", + "3. Verify profile contains user email", + "4. Check team membership information", + "5. Verify role assignments if applicable" + ], + "validation": "Profile includes email, teams, and role data" + }, + { + "id": "AUTH-006", + "endpoint": "/auth/change-password", + "method": "POST", + "description": "Change user password", + "curl_command": 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"old_password":"changeme","new_password":"NewPassword123"}', + "expected_status": 200, + "expected_response": "Password updated successfully", + "test_steps": [ + "1. Use current password as old_password", + "2. Provide strong new password", + "3. Execute password change request", + "4. Verify success response", + "5. Test login with new password", + "6. IMPORTANT: Change password back for other tests" + ], + "validation": "Password change works, can login with new password" + }, + { + "id": "AUTH-007", + "endpoint": "/auth/sso/github", + "method": "GET", + "description": "GitHub SSO authentication initiation", + "curl_command": 'curl -I http://localhost:4444/auth/sso/github', + "request_body": "", + "expected_status": 302, + "expected_response": "Redirect to GitHub OAuth authorization", + "test_steps": [ + "1. Execute request to GitHub SSO endpoint", + "2. Verify HTTP 302 redirect status", + "3. Check Location header contains github.com", + "4. Verify OAuth parameters in redirect URL", + "5. Note: Full OAuth flow requires GitHub app setup" + ], + "validation": "Redirects to GitHub OAuth (if SSO enabled)", + "requires_config": "SSO_GITHUB_ENABLED=true, GitHub OAuth app" + }, + { + "id": "AUTH-008", + "endpoint": "/auth/sso/google", + "method": "GET", + "description": "Google SSO authentication initiation", + "curl_command": 'curl -I http://localhost:4444/auth/sso/google', + "request_body": "", + "expected_status": 302, + "expected_response": "Redirect to Google OAuth authorization", + "test_steps": [ + "1. Execute request to Google SSO endpoint", + "2. Verify HTTP 302 redirect status", + "3. Check Location header contains accounts.google.com", + "4. Verify OAuth parameters in redirect URL", + "5. Note: Full OAuth flow requires Google OAuth app" + ], + "validation": "Redirects to Google OAuth (if SSO enabled)", + "requires_config": "SSO_GOOGLE_ENABLED=true, Google OAuth app" + }, + { + "id": "AUTH-009", + "endpoint": "/auth/verify-email", + "method": "POST", + "description": "Email address verification", + "curl_command": 'curl -X POST http://localhost:4444/auth/verify-email -H "Content-Type: application/json"', + "request_body": '{"token":""}', + "expected_status": 200, + "expected_response": "Email verified successfully", + "test_steps": [ + "1. Register new user first (to get verification token)", + "2. Check email for verification token (if email configured)", + "3. Use token in verification request", + "4. Verify email verification status updated", + "5. Check user can now perform email-verified actions" + ], + "validation": "Email verification updates user status", + "requires_config": "Email delivery configured" + }, + { + "id": "AUTH-010", + "endpoint": "/auth/forgot-password", + "method": "POST", + "description": "Password reset request", + "curl_command": 'curl -X POST http://localhost:4444/auth/forgot-password -H "Content-Type: application/json"', + "request_body": '{"email":"admin@example.com"}', + "expected_status": 200, + "expected_response": "Password reset email sent", + "test_steps": [ + "1. Request password reset for known user", + "2. Verify HTTP 200 response", + "3. Check email for reset link (if email configured)", + "4. Test reset token functionality", + "5. Verify password can be reset via token" + ], + "validation": "Password reset process initiated", + "requires_config": "Email delivery configured" + } +] + + +def run_auth_tests(): + """Run all authentication tests.""" + + print("🔐 AUTHENTICATION API TESTING") + print("=" * 60) + print("🎯 Testing all authentication endpoints") + + # Get base URL and setup + base_url = "http://localhost:4444" + results = [] + + print("\\n🔧 Pre-test Setup:") + print("1. Ensure MCP Gateway is running (make dev)") + print("2. Ensure migration completed successfully") + print("3. Have admin credentials from .env file ready") + + input("\\nPress Enter when ready to begin testing...") + + for test in AUTH_TESTS: + print(f"\\n{'='*60}") + print(f"🧪 TEST {test['id']}: {test['endpoint']}") + print(f"Method: {test['method']}") + print(f"Description: {test['description']}") + + if test.get('critical'): + print("🚨 CRITICAL TEST") + + if test.get('requires_config'): + print(f"⚠️ Requires: {test['requires_config']}") + + print(f"\\n📋 Test Steps:") + for step in test['test_steps']: + print(f" {step}") + + print(f"\\n💻 cURL Command:") + print(f" {test['curl_command']}") + if test['request_body']: + print(f" Data: {test['request_body']}") + + print(f"\\n✅ Expected:") + print(f" Status: {test['expected_status']}") + print(f" Response: {test['expected_response']}") + + # Manual execution + response = input(f"\\nExecute test {test['id']}? (y/n/skip): ").lower() + + if response == 'skip' or response == 's': + print(f"⚠️ {test['id']}: SKIPPED") + results.append({"id": test['id'], "status": "SKIP", "timestamp": datetime.now().isoformat()}) + continue + elif response != 'y': + print(f"❌ {test['id']}: ABORTED") + break + + # Get actual results from user + print(f"\\n📝 Record Results:") + actual_status = input("Actual HTTP status code: ") + actual_response = input("Actual response (summary): ") + + # Determine pass/fail + expected_str = str(test['expected_status']) + passed = actual_status == expected_str + status = "PASS" if passed else "FAIL" + + print(f"\\n{'✅' if passed else '❌'} {test['id']}: {status}") + + if not passed and test.get('critical'): + print("🚨 CRITICAL TEST FAILED!") + continue_testing = input("Continue with remaining tests? (y/N): ").lower() + if continue_testing != 'y': + break + + # Record result + results.append({ + "id": test['id'], + "endpoint": test['endpoint'], + "status": status, + "expected_status": test['expected_status'], + "actual_status": actual_status, + "actual_response": actual_response, + "timestamp": datetime.now().isoformat() + }) + + # Generate summary + generate_auth_summary(results) + return results + + +def generate_auth_summary(results): + """Generate authentication test summary.""" + + print(f"\\n{'='*60}") + print("📊 AUTHENTICATION API TEST SUMMARY") + print("=" * 60) + + passed = len([r for r in results if r['status'] == 'PASS']) + failed = len([r for r in results if r['status'] == 'FAIL']) + skipped = len([r for r in results if r['status'] == 'SKIP']) + total = len(results) + + print(f"📈 Results:") + print(f" ✅ Passed: {passed}/{total}") + print(f" ❌ Failed: {failed}/{total}") + print(f" ⚠️ Skipped: {skipped}/{total}") + + if failed == 0: + print(f"\\n🎉 ALL AUTHENTICATION TESTS PASSED!") + print("✅ Authentication system fully functional") + else: + print(f"\\n⚠️ SOME AUTHENTICATION TESTS FAILED") + print("🔧 Review failed tests before production deployment") + + # Save results + results_file = Path("tests/manual/auth_test_results.json") + with open(results_file, 'w') as f: + json.dump({ + "summary": {"passed": passed, "failed": failed, "skipped": skipped, "total": total}, + "results": results, + "timestamp": datetime.now().isoformat() + }, f, indent=2) + + print(f"\\n📄 Results saved: {results_file}") + + +def test_specific_endpoint(endpoint): + """Test a specific authentication endpoint.""" + + test = next((t for t in AUTH_TESTS if t['endpoint'] == endpoint), None) + + if not test: + print(f"❌ Endpoint {endpoint} not found in test suite") + available = [t['endpoint'] for t in AUTH_TESTS] + print(f"Available endpoints: {available}") + return False + + print(f"🧪 TESTING SPECIFIC ENDPOINT: {endpoint}") + print("=" * 50) + print(f"Test ID: {test['id']}") + print(f"Method: {test['method']}") + print(f"Description: {test['description']}") + + print(f"\\n💻 cURL Command:") + print(f"{test['curl_command']}") + if test['request_body']: + print(f"Data: {test['request_body']}") + + print(f"\\n📋 Test Steps:") + for step in test['test_steps']: + print(f" {step}") + + print(f"\\n✅ Expected:") + print(f" Status: {test['expected_status']}") + print(f" Response: {test['expected_response']}") + + return True + + +def list_all_endpoints(): + """List all authentication endpoints.""" + + print("📋 ALL AUTHENTICATION ENDPOINTS") + print("=" * 50) + + for test in AUTH_TESTS: + critical_marker = " 🚨 CRITICAL" if test.get('critical') else "" + config_marker = f" ⚠️ Requires: {test.get('requires_config')}" if test.get('requires_config') else "" + + print(f"\\n{test['id']}: {test['endpoint']} ({test['method']}){critical_marker}{config_marker}") + print(f" Description: {test['description']}") + print(f" Expected: {test['expected_status']} - {test['expected_response']}") + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("🔐 Authentication API Tests") + print("Usage:") + print(" python3 tests/manual/api_authentication_tests.py # Run all tests") + print(" python3 tests/manual/api_authentication_tests.py --endpoint # Test specific endpoint") + print(" python3 tests/manual/api_authentication_tests.py --list # List all endpoints") + print(" python3 tests/manual/api_authentication_tests.py --help # This help") + elif sys.argv[1] == "--list": + list_all_endpoints() + elif sys.argv[1] == "--endpoint" and len(sys.argv) > 2: + test_specific_endpoint(sys.argv[2]) + else: + print("❌ Unknown option. Use --help for usage.") + else: + # Run all authentication tests + try: + print("🔐 Starting authentication API testing...") + results = run_auth_tests() + print("\\n🎉 Authentication testing complete!") + print("Next: python3 tests/manual/api_teams_tests.py") + except KeyboardInterrupt: + print("\\n❌ Testing cancelled by user") + sys.exit(1) + except Exception as e: + print(f"❌ Testing error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_servers_tests.py b/tests/manual/api_servers_tests.py new file mode 100644 index 000000000..1918bd58d --- /dev/null +++ b/tests/manual/api_servers_tests.py @@ -0,0 +1,276 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Virtual Servers API Tests + +Comprehensive testing of virtual server management including: +- Server listing and creation +- Server configuration and updates +- Transport endpoints (SSE, WebSocket) +- Server status and health monitoring + +Usage: + python3 tests/manual/api_servers_tests.py +""" + +import sys +import json +from pathlib import Path +from datetime import datetime + +# Virtual Servers API test cases +SERVERS_TESTS = [ + { + "id": "SRV-001", + "endpoint": "/servers", + "method": "GET", + "description": "List virtual servers with team filtering", + "curl_command": 'curl http://localhost:4444/servers -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of virtual servers user can access", + "test_steps": [ + "1. Use valid JWT token", + "2. Execute servers list request", + "3. Verify HTTP 200 status", + "4. Check response contains server array", + "5. Verify team-based filtering applied", + "6. Check server metadata (name, transport, team, etc.)" + ], + "validation": "Servers listed with proper team-based access control", + "critical": True + }, + { + "id": "SRV-002", + "endpoint": "/servers", + "method": "POST", + "description": "Create new virtual server", + "curl_command": 'curl -X POST http://localhost:4444/servers -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"name":"Manual Test Server","description":"Server created during manual testing","transport":"sse","config":{"timeout":30}}', + "expected_status": 201, + "expected_response": "Virtual server created with ID and team assignment", + "test_steps": [ + "1. Prepare server configuration data", + "2. Execute server creation request", + "3. Verify HTTP 201 status", + "4. Check response contains server ID", + "5. Verify server appears in servers list", + "6. Check automatic team assignment", + "7. Save server ID for subsequent tests" + ], + "validation": "Server created with automatic team assignment" + }, + { + "id": "SRV-003", + "endpoint": "/servers/{id}", + "method": "GET", + "description": "Get server details and configuration", + "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Server details with full configuration", + "test_steps": [ + "1. Use server ID from creation test or existing server", + "2. Request server details", + "3. Verify HTTP 200 status", + "4. Check detailed server information", + "5. Verify configuration data is included", + "6. Check team and ownership information" + ], + "validation": "Server details accessible with complete metadata" + }, + { + "id": "SRV-004", + "endpoint": "/servers/{id}", + "method": "PUT", + "description": "Update server configuration", + "curl_command": 'curl -X PUT http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"name":"Updated Server Name","description":"Updated during manual testing","config":{"timeout":60}}', + "expected_status": 200, + "expected_response": "Server updated successfully", + "test_steps": [ + "1. Use server ID from previous tests", + "2. Prepare update configuration", + "3. Execute server update request", + "4. Verify HTTP 200 status", + "5. Check server details show updates", + "6. Verify permissions enforced (owner/team access)" + ], + "validation": "Server updates work with proper authorization" + }, + { + "id": "SRV-005", + "endpoint": "/servers/{id}/sse", + "method": "GET", + "description": "Server-Sent Events connection test", + "curl_command": 'curl -N http://localhost:4444/servers/{SERVER_ID}/sse -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "SSE stream established, events received", + "test_steps": [ + "1. Use server ID with SSE transport", + "2. Execute SSE connection request", + "3. Verify HTTP 200 status", + "4. Check for SSE headers (text/event-stream)", + "5. Monitor stream for events", + "6. Test connection stability" + ], + "validation": "SSE connection works, events stream properly" + }, + { + "id": "SRV-006", + "endpoint": "/servers/{id}/ws", + "method": "WebSocket", + "description": "WebSocket connection test", + "curl_command": "Use WebSocket client or browser developer tools", + "request_body": "WebSocket upgrade request with Authorization header", + "expected_status": 101, + "expected_response": "WebSocket connection established", + "test_steps": [ + "1. Use WebSocket client tool or browser dev tools", + "2. Connect to ws://localhost:4444/servers/{SERVER_ID}/ws", + "3. Include Authorization header with JWT token", + "4. Verify WebSocket upgrade (status 101)", + "5. Test bidirectional communication", + "6. Check connection stability and message handling" + ], + "validation": "WebSocket connection works, bidirectional communication" + }, + { + "id": "SRV-007", + "endpoint": "/servers/{id}/tools", + "method": "GET", + "description": "List tools available on server", + "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/tools -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of tools available on the server", + "test_steps": [ + "1. Use server ID with available tools", + "2. Request server tools", + "3. Verify HTTP 200 status", + "4. Check tools array in response", + "5. Verify tool details and schemas", + "6. Check team-based tool access" + ], + "validation": "Server tools listed with proper access control" + }, + { + "id": "SRV-008", + "endpoint": "/servers/{id}/resources", + "method": "GET", + "description": "List resources available on server", + "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/resources -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of resources available on the server", + "test_steps": [ + "1. Use server ID with available resources", + "2. Request server resources", + "3. Verify HTTP 200 status", + "4. Check resources array", + "5. Verify resource URIs and metadata", + "6. Test resource access permissions" + ], + "validation": "Server resources listed with access control" + }, + { + "id": "SRV-009", + "endpoint": "/servers/{id}/status", + "method": "GET", + "description": "Get server status and health", + "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/status -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Server status, health, and connection info", + "test_steps": [ + "1. Use any valid server ID", + "2. Request server status", + "3. Verify HTTP 200 status", + "4. Check status information", + "5. Verify health indicators", + "6. Check connection and performance metrics" + ], + "validation": "Server status and health data provided" + }, + { + "id": "SRV-010", + "endpoint": "/servers/{id}", + "method": "DELETE", + "description": "Delete virtual server", + "curl_command": 'curl -X DELETE http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 204, + "expected_response": "Server deleted successfully", + "test_steps": [ + "1. Use test server ID (not production server)", + "2. Execute server deletion request", + "3. Verify HTTP 204 status", + "4. Check server no longer in list", + "5. Verify permissions enforced", + "6. Check cleanup of associated resources" + ], + "validation": "Server deletion works with proper authorization" + } +] + + +def run_servers_tests(): + """Run all servers API tests.""" + + print("🖥️ VIRTUAL SERVERS API TESTING") + print("=" * 60) + + results = [] + + # Get JWT token + token = input("Enter JWT token: ").strip() + if not token: + print("❌ Token required") + return [] + + for test in SERVERS_TESTS: + print(f"\\n{'='*60}") + print(f"🧪 {test['id']}: {test['endpoint']} ({test['method']})") + + if test.get('critical'): + print("🚨 CRITICAL TEST") + + # Show steps and execute + print(f"\\nSteps:") + for step in test['test_steps']: + print(f" {step}") + + curl_cmd = test['curl_command'].replace('', token) + print(f"\\nCommand: {curl_cmd}") + + response = input(f"\\nExecute {test['id']}? (y/n/skip): ") + + if response.lower() == 'skip': + results.append({"id": test['id'], "status": "SKIP"}) + elif response.lower() == 'y': + status_code = input("HTTP status: ") + response_summary = input("Response summary: ") + + passed = status_code == str(test['expected_status']) + results.append({ + "id": test['id'], + "status": "PASS" if passed else "FAIL", + "actual_status": status_code, + "response": response_summary + }) + + # Save results + with open("tests/manual/servers_test_results.json", 'w') as f: + json.dump(results, f, indent=2) + + return results + + +if __name__ == "__main__": + try: + results = run_servers_tests() + print("\\n🎉 Servers API testing complete!") + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_teams_tests.py b/tests/manual/api_teams_tests.py new file mode 100644 index 000000000..87f6c8cf6 --- /dev/null +++ b/tests/manual/api_teams_tests.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Teams API Tests + +Comprehensive testing of team management endpoints including: +- Team creation and management +- Team membership operations +- Team invitations +- Team visibility and permissions + +Usage: + python3 tests/manual/api_teams_tests.py +""" + +import sys +import json +from pathlib import Path +from datetime import datetime + +# Teams API test cases +TEAMS_TESTS = [ + { + "id": "TEAM-001", + "endpoint": "/teams", + "method": "GET", + "description": "List user's teams", + "curl_command": 'curl http://localhost:4444/teams -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of teams user belongs to", + "test_steps": [ + "1. Get JWT token from login first", + "2. Execute teams list request", + "3. Verify HTTP 200 status", + "4. Check response is JSON array", + "5. Verify personal team is included", + "6. Check team data includes name, id, visibility" + ], + "validation": "Returns user's teams including personal team" + }, + { + "id": "TEAM-002", + "endpoint": "/teams", + "method": "POST", + "description": "Create new team", + "curl_command": 'curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"name":"Manual Test Team","description":"Team created during manual testing","visibility":"private","max_members":20}', + "expected_status": 201, + "expected_response": "Team created successfully with generated ID", + "test_steps": [ + "1. Prepare team creation data", + "2. Execute team creation request", + "3. Verify HTTP 201 status", + "4. Check response contains team ID", + "5. Verify team appears in teams list", + "6. Save team ID for subsequent tests" + ], + "validation": "Team created and accessible" + }, + { + "id": "TEAM-003", + "endpoint": "/teams/{id}", + "method": "GET", + "description": "Get team details", + "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Team details with member information", + "test_steps": [ + "1. Use team ID from creation test or personal team", + "2. Request team details", + "3. Verify HTTP 200 status", + "4. Check response includes team metadata", + "5. Verify member list is included", + "6. Check permissions are enforced" + ], + "validation": "Team details accessible to members" + }, + { + "id": "TEAM-004", + "endpoint": "/teams/{id}", + "method": "PUT", + "description": "Update team information", + "curl_command": 'curl -X PUT http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"name":"Updated Team Name","description":"Updated during manual testing"}', + "expected_status": 200, + "expected_response": "Team updated successfully", + "test_steps": [ + "1. Use team ID from creation test", + "2. Prepare update data", + "3. Execute team update request", + "4. Verify HTTP 200 status", + "5. Check team details show updated information", + "6. Verify only team owners can update" + ], + "validation": "Team update works for owners" + }, + { + "id": "TEAM-005", + "endpoint": "/teams/{id}/members", + "method": "GET", + "description": "List team members", + "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of team members with roles", + "test_steps": [ + "1. Use valid team ID", + "2. Request member list", + "3. Verify HTTP 200 status", + "4. Check members array in response", + "5. Verify member roles (owner/member)", + "6. Check join dates and status" + ], + "validation": "Member list shows users with correct roles" + }, + { + "id": "TEAM-006", + "endpoint": "/teams/{id}/members", + "method": "POST", + "description": "Add team member", + "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"user_email":"newmember@example.com","role":"member"}', + "expected_status": 201, + "expected_response": "Member added to team successfully", + "test_steps": [ + "1. Create test user first (if needed)", + "2. Prepare member addition data", + "3. Execute add member request", + "4. Verify HTTP 201 status", + "5. Check member appears in member list", + "6. Verify only team owners can add members" + ], + "validation": "Member addition works for team owners" + }, + { + "id": "TEAM-007", + "endpoint": "/teams/{id}/invitations", + "method": "GET", + "description": "List team invitations", + "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Array of pending invitations", + "test_steps": [ + "1. Use valid team ID", + "2. Request invitations list", + "3. Verify HTTP 200 status", + "4. Check invitations array", + "5. Verify invitation details (email, role, status)", + "6. Test permissions (team owners only)" + ], + "validation": "Invitation list accessible to team owners" + }, + { + "id": "TEAM-008", + "endpoint": "/teams/{id}/invitations", + "method": "POST", + "description": "Create team invitation", + "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer " -H "Content-Type: application/json"', + "request_body": '{"email":"invitee@example.com","role":"member","message":"Join our testing team!"}', + "expected_status": 201, + "expected_response": "Invitation created and sent", + "test_steps": [ + "1. Prepare invitation data", + "2. Execute invitation creation", + "3. Verify HTTP 201 status", + "4. Check invitation created in database", + "5. Verify email sent (if email configured)", + "6. Test invitation token functionality" + ], + "validation": "Invitation created with valid token" + }, + { + "id": "TEAM-009", + "endpoint": "/teams/{id}/leave", + "method": "POST", + "description": "Leave team", + "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/leave -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 200, + "expected_response": "Successfully left team (or 403 if personal team)", + "test_steps": [ + "1. Use non-personal team ID", + "2. Execute leave team request", + "3. Verify appropriate response", + "4. Check user no longer in member list", + "5. Test that personal teams cannot be left", + "6. Verify access to team resources is removed" + ], + "validation": "Team leave functionality works, personal teams protected" + }, + { + "id": "TEAM-010", + "endpoint": "/teams/{id}", + "method": "DELETE", + "description": "Delete team", + "curl_command": 'curl -X DELETE http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "', + "request_body": "", + "expected_status": 204, + "expected_response": "Team deleted successfully (or 403 if personal team)", + "test_steps": [ + "1. Use test team ID (not personal team)", + "2. Execute team deletion request", + "3. Verify appropriate HTTP status", + "4. Check team no longer exists", + "5. Test that personal teams cannot be deleted", + "6. Verify team resources are handled properly" + ], + "validation": "Team deletion works, personal teams protected" + } +] + + +def run_teams_tests(): + """Run all teams API tests.""" + + print("👥 TEAMS API TESTING") + print("=" * 60) + print("🎯 Testing team management endpoints") + + results = [] + + print("\\n🔧 Pre-test Requirements:") + print("1. MCP Gateway running (make dev)") + print("2. Valid JWT token (from login)") + print("3. Admin access for team operations") + + # Get JWT token + token = input("\\nEnter JWT token (from auth login test): ").strip() + if not token: + print("❌ JWT token required for team API testing") + return [] + + print("\\n🧪 Executing Teams API Tests...") + + for test in TEAMS_TESTS: + print(f"\\n{'='*60}") + print(f"🧪 TEST {test['id']}: {test['endpoint']} ({test['method']})") + print(f"Description: {test['description']}") + + print(f"\\n📋 Test Steps:") + for step in test['test_steps']: + print(f" {step}") + + # Show curl command with token + curl_cmd = test['curl_command'].replace('', token) + print(f"\\n💻 cURL Command:") + print(f" {curl_cmd}") + if test['request_body']: + print(f" Data: {test['request_body']}") + + print(f"\\n✅ Expected:") + print(f" Status: {test['expected_status']}") + print(f" Response: {test['expected_response']}") + + # Manual execution + response = input(f"\\nExecute test {test['id']}? (y/n/skip): ").lower() + + if response == 'skip' or response == 's': + results.append({"id": test['id'], "status": "SKIP"}) + continue + elif response != 'y': + break + + # Get results + actual_status = input("Actual HTTP status: ") + actual_response = input("Response summary: ") + + passed = actual_status == str(test['expected_status']) + status = "PASS" if passed else "FAIL" + + print(f"\\n{'✅' if passed else '❌'} {test['id']}: {status}") + + results.append({ + "id": test['id'], + "endpoint": test['endpoint'], + "status": status, + "actual_status": actual_status, + "actual_response": actual_response, + "timestamp": datetime.now().isoformat() + }) + + # Save results + results_file = Path("tests/manual/teams_test_results.json") + with open(results_file, 'w') as f: + json.dump(results, f, indent=2) + + print(f"\\n📄 Results saved: {results_file}") + return results + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "--help": + print("👥 Teams API Tests") + print("Usage:") + print(" python3 tests/manual/api_teams_tests.py # Run all tests") + print(" python3 tests/manual/api_teams_tests.py --help # This help") + else: + try: + results = run_teams_tests() + print("\\n🎉 Teams API testing complete!") + except KeyboardInterrupt: + print("\\n❌ Testing cancelled") + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/database_tests.py b/tests/manual/database_tests.py new file mode 100644 index 000000000..af885450f --- /dev/null +++ b/tests/manual/database_tests.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Database Compatibility Tests + +Testing both SQLite and PostgreSQL compatibility including: +- Migration execution and rollback +- Data integrity and constraints +- Performance characteristics +- Advanced database features + +Usage: + python3 tests/manual/database_tests.py --sqlite + python3 tests/manual/database_tests.py --postgresql + python3 tests/manual/database_tests.py --both +""" + +import sys +import subprocess +import json +from pathlib import Path +from datetime import datetime + +# Database test cases +DATABASE_TESTS = { + "sqlite": [ + { + "id": "SQLite-001", + "feature": "Migration Execution", + "description": "Test migration on SQLite database", + "commands": [ + "# Set SQLite database URL", + "export DATABASE_URL=sqlite:///./test_migration.db", + "# Run migration", + "python3 -m mcpgateway.bootstrap_db", + "# Check tables created", + "sqlite3 test_migration.db '.tables'" + ], + "expected": "All multitenancy tables created: email_users, email_teams, etc.", + "performance": "Fast", + "validation": "sqlite3 test_migration.db 'SELECT COUNT(*) FROM email_users;'" + }, + { + "id": "SQLite-002", + "feature": "Team Data Population", + "description": "Verify old resources get team assignments", + "commands": [ + "# Check servers have team assignments", + "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NOT NULL;'", + "# Check tools have team assignments", + "sqlite3 mcp.db 'SELECT COUNT(*) FROM tools WHERE team_id IS NOT NULL;'", + "# Check for any NULL team assignments", + "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NULL;'" + ], + "expected": "All resources have team_id populated, no NULL values", + "performance": "Fast", + "validation": "Zero NULL team_id values in resource tables" + }, + { + "id": "SQLite-003", + "feature": "Connection Pool Management", + "description": "Test SQLite connection handling", + "commands": [ + "# Set connection pool size", + "export DB_POOL_SIZE=50", + "# Start gateway and test concurrent connections", + "make dev &", + "# Run multiple concurrent API calls", + "for i in {1..20}; do curl http://localhost:4444/health & done; wait" + ], + "expected": "Connections managed within SQLite limits (~50 max)", + "performance": "Good with limitations", + "validation": "No connection errors, stable performance" + }, + { + "id": "SQLite-004", + "feature": "JSON Field Operations", + "description": "Test JSON data storage and querying", + "commands": [ + "# Check JSON fields in tools table", + "sqlite3 mcp.db 'SELECT name, schema FROM tools LIMIT 5;'", + "# Test JSON field updates", + "sqlite3 mcp.db 'UPDATE tools SET schema = json_set(schema, \"$.test\", \"value\") WHERE id = (SELECT id FROM tools LIMIT 1);'" + ], + "expected": "JSON data stored and queried correctly", + "performance": "Good", + "validation": "JSON fields readable and updateable" + }, + { + "id": "SQLite-005", + "feature": "Backup and Restore", + "description": "Test file-based backup/restore", + "commands": [ + "# Create backup", + "cp mcp.db backup_test.db", + "# Make some changes", + "sqlite3 mcp.db 'INSERT INTO email_teams (id, name, slug, created_by, is_personal, visibility, is_active, created_at, updated_at) VALUES (\"test-backup\", \"Backup Test\", \"backup-test\", \"admin@example.com\", 0, \"private\", 1, datetime(\"now\"), datetime(\"now\"));'", + "# Restore from backup", + "cp backup_test.db mcp.db", + "# Verify restore worked", + "sqlite3 mcp.db 'SELECT COUNT(*) FROM email_teams WHERE name = \"Backup Test\";'" + ], + "expected": "File-based backup and restore works perfectly", + "performance": "Excellent", + "validation": "Data restored exactly, test data should be gone" + } + ], + "postgresql": [ + { + "id": "PG-001", + "feature": "Migration Execution", + "description": "Test migration on PostgreSQL database", + "commands": [ + "# Set PostgreSQL database URL", + "export DATABASE_URL=postgresql://postgres:password@localhost:5432/mcp_test", + "# Create test database", + "createdb mcp_test", + "# Run migration", + "python3 -m mcpgateway.bootstrap_db", + "# Check tables", + "psql mcp_test -c '\\\\dt' | grep email" + ], + "expected": "All tables created with PostgreSQL-specific data types", + "performance": "Fast", + "validation": "psql mcp_test -c 'SELECT COUNT(*) FROM email_users;'" + }, + { + "id": "PG-002", + "feature": "Advanced Data Types", + "description": "Test UUID, JSONB, and advanced PostgreSQL features", + "commands": [ + "# Check UUID columns", + "psql mcp_test -c 'SELECT id FROM email_teams LIMIT 1;'", + "# Test JSONB operations", + "psql mcp_test -c 'SELECT config FROM servers WHERE config IS NOT NULL LIMIT 1;'", + "# Test advanced queries", + "psql mcp_test -c 'SELECT * FROM tools WHERE schema @> \\'{\"type\":\"object\"}\\';'" + ], + "expected": "Advanced PostgreSQL data types work correctly", + "performance": "Excellent", + "validation": "UUIDs valid, JSONB queries work" + }, + { + "id": "PG-003", + "feature": "High Concurrency", + "description": "Test PostgreSQL connection pool and concurrency", + "commands": [ + "# Set high connection pool", + "export DB_POOL_SIZE=200", + "# Start gateway", + "make dev &", + "# Run high concurrency test", + "for i in {1..100}; do curl http://localhost:4444/health & done; wait" + ], + "expected": "High concurrency supported (200+ connections)", + "performance": "Excellent", + "validation": "All requests succeed, no connection errors" + }, + { + "id": "PG-004", + "feature": "JSONB Advanced Operations", + "description": "Test JSONB indexing and complex queries", + "commands": [ + "# Test JSONB containment", + "psql mcp_test -c 'SELECT name FROM tools WHERE schema @> \\'{\"type\":\"object\"}\\';'", + "# Test JSONB path queries", + "psql mcp_test -c 'SELECT name FROM tools WHERE schema #> \\'{properties}\\' IS NOT NULL;'", + "# Create JSONB index", + "psql mcp_test -c 'CREATE INDEX IF NOT EXISTS idx_tools_schema_gin ON tools USING gin(schema);'" + ], + "expected": "JSONB indexing and querying work efficiently", + "performance": "Excellent", + "validation": "Complex JSONB queries execute quickly" + }, + { + "id": "PG-005", + "feature": "Full-Text Search", + "description": "Test PostgreSQL full-text search capabilities", + "commands": [ + "# Test full-text search", + "psql mcp_test -c 'SELECT name FROM tools WHERE to_tsvector(name) @@ plainto_tsquery(\"time\");'", + "# Test search ranking", + "psql mcp_test -c 'SELECT name, ts_rank(to_tsvector(name), plainto_tsquery(\"time\")) as rank FROM tools WHERE to_tsvector(name) @@ plainto_tsquery(\"time\") ORDER BY rank DESC;'" + ], + "expected": "Advanced full-text search with ranking works", + "performance": "Excellent", + "validation": "FTS returns relevant results with ranking" + } + ] +} + + +def run_database_tests(db_type="both"): + """Run database compatibility tests.""" + + print(f"🗄️ DATABASE COMPATIBILITY TESTING") + print("=" * 60) + print(f"🎯 Testing: {db_type.upper()}") + + if db_type == "both": + print("\\n🔧 Testing both SQLite and PostgreSQL") + sqlite_results = run_db_test_suite("sqlite") + postgresql_results = run_db_test_suite("postgresql") + return {"sqlite": sqlite_results, "postgresql": postgresql_results} + else: + return run_db_test_suite(db_type) + + +def run_db_test_suite(db_type): + """Run tests for specific database type.""" + + tests = DATABASE_TESTS.get(db_type, []) + if not tests: + print(f"❌ No tests defined for {db_type}") + return [] + + print(f"\\n🗄️ {db_type.upper()} TESTING") + print("=" * 40) + + results = [] + + for test in tests: + print(f"\\n{'='*50}") + print(f"🧪 {test['id']}: {test['feature']}") + print(f"Description: {test['description']}") + + print(f"\\n💻 Commands to execute:") + for cmd in test['commands']: + if cmd.startswith('#'): + print(f" {cmd}") # Comment + else: + print(f" $ {cmd}") # Command + + print(f"\\n✅ Expected: {test['expected']}") + print(f"⚡ Performance: {test['performance']}") + + # Manual execution + response = input(f"\\nExecute {test['id']}? (y/n/skip): ").lower() + + if response == 'skip': + results.append({"id": test['id'], "status": "SKIP"}) + continue + elif response == 'y': + success = input("Did test complete successfully? (y/n): ").lower() + performance = input(f"Performance rating (Fast/Good/Slow): ") or test['performance'] + + status = "PASS" if success == 'y' else "FAIL" + + results.append({ + "id": test['id'], + "feature": test['feature'], + "status": status, + "performance": performance, + "timestamp": datetime.now().isoformat() + }) + + return results + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("🗄️ Database Tests") + print("Usage:") + print(" python3 tests/manual/database_tests.py --sqlite # SQLite tests only") + print(" python3 tests/manual/database_tests.py --postgresql # PostgreSQL tests only") + print(" python3 tests/manual/database_tests.py --both # Both databases") + print(" python3 tests/manual/database_tests.py --help # This help") + elif sys.argv[1] == "--sqlite": + run_database_tests("sqlite") + elif sys.argv[1] == "--postgresql": + run_database_tests("postgresql") + elif sys.argv[1] == "--both": + run_database_tests("both") + else: + print("❌ Unknown option. Use --help") + else: + # Default to both + try: + results = run_database_tests("both") + print("\\n🎉 Database testing complete!") + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/generate_test_plan.sh b/tests/manual/generate_test_plan.sh new file mode 100755 index 000000000..f50e63bc4 --- /dev/null +++ b/tests/manual/generate_test_plan.sh @@ -0,0 +1,71 @@ +#!/bin/bash +# -*- coding: utf-8 -*- +# MCP Gateway v0.7.0 - Test Plan Generator +# +# Generates comprehensive Excel test plan from Python test files. +# Creates clean, non-corrupted Excel file ready for 10 testers. + +set -e # Exit on any error + +echo "🎯 MCP GATEWAY TEST PLAN GENERATOR" +echo "==================================" +echo "📊 Generating Excel from Python test files" +echo "👥 Ready for 10 manual testers" +echo "" + +# Check prerequisites +echo "🔧 Checking prerequisites..." + +if ! command -v python3 &> /dev/null; then + echo "❌ python3 not found. Please install Python 3.11+" + exit 1 +fi + +# Check openpyxl +if ! python3 -c "import openpyxl" 2>/dev/null; then + echo "📦 Installing openpyxl..." + pip install openpyxl +fi + +echo "✅ Prerequisites OK" + +# Generate Excel file +echo "" +echo "📊 Generating Excel test plan..." +python3 generate_test_plan_xlsx.py + +if [ $? -eq 0 ]; then + echo "" + echo "🎉 SUCCESS!" + echo "📄 Excel file created: test-plan.xlsx" + + if [ -f "test-plan.xlsx" ]; then + file_size=$(stat -c%s "test-plan.xlsx" 2>/dev/null || stat -f%z "test-plan.xlsx" 2>/dev/null || echo "unknown") + echo "📏 File size: $file_size bytes" + + # Test file opens + if python3 -c "import openpyxl; wb=openpyxl.load_workbook('test-plan.xlsx'); print(f'✅ Verified: {len(wb.worksheets)} worksheets'); wb.close()" 2>/dev/null; then + echo "✅ File verification: Opens cleanly" + else + echo "⚠️ File verification: Could not verify" + fi + fi + + echo "" + echo "🎯 Next Steps:" + echo " 1. Open test-plan.xlsx in Excel or LibreOffice" + echo " 2. Review all worksheets (8 total)" + echo " 3. Focus on 'Migration Tests' worksheet (main server visibility test)" + echo " 4. Distribute to 10 testers for execution" + echo "" + echo "👥 Tester Options:" + echo " • Excel file: Open test-plan.xlsx and follow worksheets" + echo " • Python files: Run individual test files directly" + echo " • Coordinated: python3 run_all_tests.py" + echo "" + echo "🚀 READY FOR COMPREHENSIVE TESTING!" + +else + echo "❌ Excel generation failed" + exit 1 +fi \ No newline at end of file diff --git a/tests/manual/generate_test_plan_xlsx.py b/tests/manual/generate_test_plan_xlsx.py new file mode 100644 index 000000000..9d4845a09 --- /dev/null +++ b/tests/manual/generate_test_plan_xlsx.py @@ -0,0 +1,344 @@ +#!/usr/bin/env python3 +""" +Working Excel Test Plan Generator + +Creates clean Excel file that opens without corruption. +All worksheets fully populated with real test data. +""" + +import openpyxl +from openpyxl.styles import PatternFill, Font +from openpyxl.utils import get_column_letter +from pathlib import Path + + +def create_working_excel(): + """Create working Excel file.""" + + print("🔧 Creating Working Excel Test Plan") + print("=" * 50) + + # Create workbook + wb = openpyxl.Workbook() + wb.remove(wb.active) + + # Styles + header_fill = PatternFill(start_color="4F81BD", end_color="4F81BD", fill_type="solid") + header_font = Font(color="FFFFFF", bold=True) + critical_fill = PatternFill(start_color="C5504B", end_color="C5504B", fill_type="solid") + critical_font = Font(color="FFFFFF", bold=True) + + # 1. Setup Instructions + print(" 1. Setup Instructions") + sheet1 = wb.create_sheet("Setup Instructions") + create_setup_data(sheet1, header_fill, header_font) + + # 2. Migration Tests (CRITICAL) + print(" 2. Migration Tests") + sheet2 = wb.create_sheet("Migration Tests") + create_migration_data(sheet2, header_fill, header_font, critical_fill, critical_font) + + # 3. API Authentication + print(" 3. API Authentication") + sheet3 = wb.create_sheet("API Authentication") + create_auth_data(sheet3, header_fill, header_font) + + # 4. API Teams + print(" 4. API Teams") + sheet4 = wb.create_sheet("API Teams") + create_teams_data(sheet4, header_fill, header_font) + + # 5. API Servers + print(" 5. API Servers") + sheet5 = wb.create_sheet("API Servers") + create_servers_data(sheet5, header_fill, header_font) + + # 6. Admin UI + print(" 6. Admin UI") + sheet6 = wb.create_sheet("Admin UI") + create_ui_data(sheet6, header_fill, header_font) + + # 7. Database Tests + print(" 7. Database Tests") + sheet7 = wb.create_sheet("Database Tests") + create_db_data(sheet7, header_fill, header_font) + + # 8. Security Tests + print(" 8. Security Tests") + sheet8 = wb.create_sheet("Security Tests") + create_security_data(sheet8, header_fill, header_font, critical_fill, critical_font) + + # Save file properly + filepath = Path("test-plan.xlsx") + print("\\n💾 Saving file...") + + wb.save(filepath) + print("✅ File saved") + + # CRITICAL: Close workbook properly + wb.close() + print("✅ File closed") + + # Verify + print("\\n🔍 Verifying...") + try: + test_wb = openpyxl.load_workbook(filepath) + print(f"✅ Opens successfully: {len(test_wb.worksheets)} worksheets") + + # Check key worksheets + for sheet in test_wb.worksheets: + test_count = max(0, sheet.max_row - 1) + print(f" 📄 {sheet.title}: {test_count} tests") + + test_wb.close() + print("✅ Test file closed") + + print("\\n🎊 SUCCESS! Working Excel file created!") + return True + + except Exception as e: + print(f"❌ Verification failed: {e}") + return False + + +def create_setup_data(sheet, header_fill, header_font): + """Create setup instructions data.""" + + headers = ["Step", "Action", "Command", "Expected", "Status", "Notes"] + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["1", "Check Prerequisites", "python3 --version && git --version", "Python 3.11+ and Git installed", "☐", "Must have both"], + ["2", "Clone Repository", "git clone ", "Repository downloaded", "☐", "Get URL from admin"], + ["3", "Enter Directory", "cd mcp-context-forge", "Directory changed", "☐", ""], + ["4", "Copy Environment", "cp .env.example .env", "Environment file created", "☐", ""], + ["5", "Edit Configuration", "vi .env", "Configuration edited", "☐", "Set admin email/password"], + ["6", "Install Dependencies", "make install-dev", "Dependencies installed", "☐", "May take 5-10 minutes"], + ["7", "Run Migration", "python3 -m mcpgateway.bootstrap_db", "Migration completed", "☐", "CRITICAL STEP"], + ["8", "Verify Migration", "python3 scripts/verify_multitenancy_0_7_0_migration.py", "All checks pass", "☐", "Must pass"], + ["9", "Start Gateway", "make dev", "Server running on port 4444", "☐", "Keep terminal open"], + ["10", "Test Health", "curl http://localhost:4444/health", '{"status":"ok"}', "☐", "Basic connectivity"], + ["11", "Access Admin UI", "Open http://localhost:4444/admin", "Login page loads", "☐", ""], + ["12", "Test Login", "Login with admin credentials", "Dashboard appears", "☐", "Main validation"] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_migration_data(sheet, header_fill, header_font, critical_fill, critical_font): + """Create migration test data.""" + + headers = ["Test ID", "Priority", "Component", "Description", "Steps", "Expected", "Actual", "Status", "Tester", "Comments", "SQLite", "PostgreSQL"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["MIG-001", "CRITICAL", "Admin User", "Platform admin created", "Check admin user exists in database", "Admin user found with is_admin=true", "", "☐", "", "", "✓", "✓"], + ["MIG-002", "CRITICAL", "Personal Team", "Admin personal team exists", "Run verification script", "Personal team found", "", "☐", "", "", "✓", "✓"], + ["MIG-003", "CRITICAL", "Server Visibility", "OLD SERVERS VISIBLE - MAIN TEST", "Open admin UI, navigate to Virtual Servers", "ALL servers visible including old ones", "", "☐", "", "MAIN MIGRATION TEST", "✓", "✓"], + ["MIG-004", "CRITICAL", "Resource Teams", "Resources assigned to teams", "Check team assignments in UI and DB", "All resources have team_id populated", "", "☐", "", "", "✓", "✓"], + ["MIG-005", "CRITICAL", "Email Auth", "Email authentication works", "Test login with email/password", "Email login successful", "", "☐", "", "", "✓", "✓"], + ["MIG-006", "HIGH", "Basic Auth", "Basic auth compatibility", "Test basic authentication", "Basic auth still works", "", "☐", "", "", "✓", "✓"], + ["MIG-007", "HIGH", "API Functionality", "APIs respond correctly", "Test core API endpoints", "All APIs return expected responses", "", "☐", "", "", "✓", "✓"], + ["MIG-008", "MEDIUM", "Team Membership", "Admin team ownership", "Check admin is team owner", "Admin listed as owner of personal team", "", "☐", "", "", "✓", "✓"] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + cell = sheet.cell(row=i, column=j, value=value) + if j == 2 and value == "CRITICAL": # Priority column + cell.fill = critical_fill + cell.font = critical_font + + auto_size_columns(sheet) + + +def create_auth_data(sheet, header_fill, header_font): + """Create authentication API data.""" + + headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Expected Response", "Actual Status", "Actual Response", "Status", "Tester", "Comments"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["AUTH-001", "/auth/register", "POST", "User registration", 'curl -X POST http://localhost:4444/auth/register -d \'{"email":"test@example.com","password":"Test123"}\'', "201", "User created", "", "", "☐", "", ""], + ["AUTH-002", "/auth/login", "POST", "Email login", 'curl -X POST http://localhost:4444/auth/login -d \'{"email":"admin@example.com","password":"changeme"}\'', "200", "JWT token", "", "", "☐", "", ""], + ["AUTH-003", "/auth/logout", "POST", "User logout", 'curl -X POST http://localhost:4444/auth/logout -H "Authorization: Bearer "', "200", "Logout success", "", "", "☐", "", ""], + ["AUTH-004", "/auth/refresh", "POST", "Token refresh", 'curl -X POST http://localhost:4444/auth/refresh -H "Authorization: Bearer "', "200", "New token", "", "", "☐", "", ""], + ["AUTH-005", "/auth/profile", "GET", "User profile", 'curl http://localhost:4444/auth/profile -H "Authorization: Bearer "', "200", "Profile data", "", "", "☐", "", ""], + ["AUTH-006", "/auth/sso/github", "GET", "GitHub SSO", 'curl -I http://localhost:4444/auth/sso/github', "302", "GitHub redirect", "", "", "☐", "", ""], + ["AUTH-007", "/auth/sso/google", "GET", "Google SSO", 'curl -I http://localhost:4444/auth/sso/google', "302", "Google redirect", "", "", "☐", "", ""], + ["AUTH-008", "/auth/change-password", "POST", "Password change", 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -d password_data', "200", "Password updated", "", "", "☐", "", ""] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_teams_data(sheet, header_fill, header_font): + """Create teams API data.""" + + headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Expected Response", "Status", "Tester", "Comments"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["TEAM-001", "/teams", "GET", "List teams", 'curl http://localhost:4444/teams -H "Authorization: Bearer "', "200", "Team array", "☐", "", ""], + ["TEAM-002", "/teams", "POST", "Create team", 'curl -X POST http://localhost:4444/teams -d team_data -H "Authorization: Bearer "', "201", "Team created", "☐", "", ""], + ["TEAM-003", "/teams/{id}", "GET", "Team details", 'curl http://localhost:4444/teams/{ID} -H "Authorization: Bearer "', "200", "Team details", "☐", "", ""], + ["TEAM-004", "/teams/{id}/members", "GET", "Team members", 'curl http://localhost:4444/teams/{ID}/members -H "Authorization: Bearer "', "200", "Member list", "☐", "", ""], + ["TEAM-005", "/teams/{id}/invitations", "POST", "Create invitation", 'curl -X POST http://localhost:4444/teams/{ID}/invitations -d invite_data', "201", "Invitation sent", "☐", "", ""] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_servers_data(sheet, header_fill, header_font): + """Create servers API data.""" + + headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Status", "Tester"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["SRV-001", "/servers", "GET", "List servers", 'curl http://localhost:4444/servers -H "Authorization: Bearer "', "200", "☐", ""], + ["SRV-002", "/servers", "POST", "Create server", 'curl -X POST http://localhost:4444/servers -d server_data', "201", "☐", ""], + ["SRV-003", "/servers/{id}", "GET", "Server details", 'curl http://localhost:4444/servers/{ID} -H "Authorization: Bearer "', "200", "☐", ""], + ["SRV-004", "/servers/{id}/sse", "GET", "SSE connection", 'curl -N http://localhost:4444/servers/{ID}/sse -H "Authorization: Bearer "', "200", "☐", ""], + ["SRV-005", "/servers/{id}/tools", "GET", "Server tools", 'curl http://localhost:4444/servers/{ID}/tools -H "Authorization: Bearer "', "200", "☐", ""] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_ui_data(sheet, header_fill, header_font): + """Create UI test data.""" + + headers = ["Test ID", "Component", "Action", "Steps", "Expected", "Status", "Tester", "Browser", "Screenshot"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["UI-001", "Login", "Test login", "Login with admin creds", "Dashboard loads", "☐", "", "Chrome", "Optional"], + ["UI-002", "Dashboard", "View dashboard", "Check stats and navigation", "Dashboard functional", "☐", "", "Chrome", "Optional"], + ["UI-003", "Servers", "View servers - MAIN TEST", "Navigate to Virtual Servers", "ALL SERVERS VISIBLE", "☐", "", "Chrome", "REQUIRED"], + ["UI-004", "Teams", "Team management", "Navigate to Teams", "Teams functional", "☐", "", "Chrome", "Optional"], + ["UI-005", "Tools", "Tool interface", "View and invoke tools", "Tools accessible", "☐", "", "Chrome", "Optional"], + ["UI-006", "Export", "Config export", "Export configuration", "Export works", "☐", "", "Chrome", "Recommended"] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_db_data(sheet, header_fill, header_font): + """Create database test data.""" + + headers = ["Test ID", "Database", "Feature", "Command", "Expected", "Status", "Performance", "Notes"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["DB-001", "SQLite", "Migration", "python3 -m mcpgateway.bootstrap_db", "Success", "☐", "Fast", ""], + ["DB-002", "SQLite", "Data Check", "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers;'", "Count > 0", "☐", "Fast", ""], + ["DB-003", "PostgreSQL", "Migration", "Set PG URL, run migration", "Success", "☐", "Fast", ""], + ["DB-004", "PostgreSQL", "Advanced Types", "Test UUID, JSONB", "Advanced features work", "☐", "Excellent", ""], + ["DB-005", "Both", "Performance", "Large dataset test", "Good performance", "☐", "Variable", ""] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + sheet.cell(row=i, column=j, value=value) + + auto_size_columns(sheet) + + +def create_security_data(sheet, header_fill, header_font, critical_fill, critical_font): + """Create security test data.""" + + headers = ["Test ID", "Attack Type", "Target", "Description", "Expected Defense", "Risk Level", "Status", "Tester", "Notes"] + + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = header_fill + cell.font = header_font + + data = [ + ["SEC-001", "SQL Injection", "API", "SQL injection attempt", "Input sanitized", "Critical", "☐", "", ""], + ["SEC-002", "JWT Tampering", "Auth", "Token manipulation", "Token rejected", "Critical", "☐", "", ""], + ["SEC-003", "Team Bypass", "Authorization", "Cross-team access", "Access denied", "Critical", "☐", "", ""], + ["SEC-004", "XSS Attack", "UI", "Script injection", "Scripts escaped", "High", "☐", "", ""], + ["SEC-005", "Brute Force", "Login", "Password attack", "Account locked", "Medium", "☐", "", ""] + ] + + for i, row in enumerate(data, 2): + for j, value in enumerate(row, 1): + cell = sheet.cell(row=i, column=j, value=value) + if j == 6 and value == "Critical": # Risk Level + cell.fill = critical_fill + cell.font = critical_font + + auto_size_columns(sheet) + + +def auto_size_columns(sheet): + """Auto-size columns.""" + + for col in range(1, sheet.max_column + 1): + max_length = 0 + for row in range(1, min(sheet.max_row + 1, 20)): + cell_value = sheet.cell(row=row, column=col).value + if cell_value: + max_length = max(max_length, len(str(cell_value))) + + width = min(max(max_length + 2, 10), 50) + sheet.column_dimensions[get_column_letter(col)].width = width + + +if __name__ == "__main__": + try: + success = create_working_excel() + if not success: + sys.exit(1) + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/migration_tests.py b/tests/manual/migration_tests.py new file mode 100644 index 000000000..07c86587d --- /dev/null +++ b/tests/manual/migration_tests.py @@ -0,0 +1,405 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Migration Validation Tests + +Critical post-migration validation tests to ensure the v0.6.0 → v0.7.0 +upgrade completed successfully and old servers are now visible. + +Usage: + python3 tests/manual/migration_tests.py + python3 tests/manual/migration_tests.py --run-all +""" + +import sys +import subprocess +import json +from pathlib import Path +from datetime import datetime + +# Add project root to path +project_root = Path(__file__).parent.parent.parent +sys.path.insert(0, str(project_root)) + +# Migration test cases +MIGRATION_TESTS = [ + { + "id": "MIG-001", + "priority": "CRITICAL", + "component": "Admin User Creation", + "description": "Verify platform admin user was created during migration", + "steps": [ + "1. Check expected admin email from configuration:", + ' python3 -c "from mcpgateway.config import settings; print(f\'Expected admin: {settings.platform_admin_email}\')"', + "2. Check actual admin user in database:", + ' python3 -c "from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.is_admin==True).first(); print(f\'Found admin: {admin.email if admin else None}, is_admin: {admin.is_admin if admin else False}\'); db.close()"', + "3. Compare expected vs actual results", + "4. Record both outputs exactly" + ], + "expected": "Expected admin email matches found admin email, is_admin=True", + "validation_command": 'python3 -c "from mcpgateway.config import settings; from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email, EmailUser.is_admin==True).first(); result = \'PASS\' if admin else \'FAIL\'; print(f\'Result: {result} - Admin {settings.platform_admin_email} exists: {admin is not None}\'); db.close()"' + }, + { + "id": "MIG-002", + "priority": "CRITICAL", + "component": "Personal Team Creation", + "description": "Verify admin user has personal team created automatically", + "steps": [ + "1. Run the verification script:", + " python3 scripts/verify_multitenancy_0_7_0_migration.py", + "2. Look for 'PERSONAL TEAM CHECK' section in the output", + "3. Record the team ID, name, and slug shown", + "4. Verify there are no error messages", + "5. Note team visibility (should be 'private')" + ], + "expected": "✅ Personal team found: (Team ID: , Slug: , Visibility: private)", + "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailTeam, EmailUser; from mcpgateway.config import settings; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email).first(); team=db.query(EmailTeam).filter(EmailTeam.created_by==settings.platform_admin_email, EmailTeam.is_personal==True).first() if admin else None; result = \'PASS\' if team else \'FAIL\'; print(f\'Result: {result} - Personal team exists: {team is not None}\'); db.close()"' + }, + { + "id": "MIG-003", + "priority": "CRITICAL", + "component": "Server Visibility Fix", + "description": "OLD SERVERS NOW VISIBLE - This is the main issue being fixed", + "steps": [ + "1. Open web browser (Chrome or Firefox recommended)", + "2. Navigate to: http://localhost:4444/admin", + "3. Login using admin email and password from your .env file", + "4. Click 'Virtual Servers' in the navigation menu", + "5. Count the total number of servers displayed", + "6. Look for servers with older creation dates (pre-migration)", + "7. Click on each server to verify details are accessible", + "8. Take screenshot of the server list showing all servers", + "9. Record server names, creation dates, and visibility" + ], + "expected": "ALL pre-migration servers visible in admin UI server list, details accessible", + "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, Server; db=SessionLocal(); total=db.query(Server).count(); with_teams=db.query(Server).filter(Server.team_id!=None).count(); print(f\'Server visibility: {with_teams}/{total} servers have team assignments\'); result = \'PASS\' if with_teams == total else \'FAIL\'; print(f\'Result: {result}\'); db.close()"', + "main_test": True, + "screenshot_required": True + }, + { + "id": "MIG-004", + "priority": "CRITICAL", + "component": "Resource Team Assignment", + "description": "All resources assigned to teams (no NULL team_id values)", + "steps": [ + "1. In admin UI, navigate to Tools section", + "2. Click on any tool to view its details", + "3. Verify 'Team' field shows a team name (not empty or NULL)", + "4. Verify 'Owner' field shows the admin email address", + "5. Verify 'Visibility' field has a value (private/team/public)", + "6. Repeat this check for Resources and Prompts sections", + "7. Run database verification command to check for NULL team assignments", + "8. Record the count of unassigned resources" + ], + "expected": "All resources show Team/Owner/Visibility, database query shows 0 unassigned", + "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, Tool, Resource, Prompt; db=SessionLocal(); tool_null=db.query(Tool).filter(Tool.team_id==None).count(); res_null=db.query(Resource).filter(Resource.team_id==None).count(); prompt_null=db.query(Prompt).filter(Prompt.team_id==None).count(); total_null=tool_null+res_null+prompt_null; print(f\'Unassigned resources: Tools={tool_null}, Resources={res_null}, Prompts={prompt_null}, Total={total_null}\'); result = \'PASS\' if total_null == 0 else \'FAIL\'; print(f\'Result: {result}\'); db.close()"' + }, + { + "id": "MIG-005", + "priority": "CRITICAL", + "component": "Email Authentication", + "description": "Email-based authentication functional after migration", + "steps": [ + "1. Open new private/incognito browser window", + "2. Navigate to http://localhost:4444/admin", + "3. Look for email login form or 'Email Login' option", + "4. Enter the admin email address from your .env file", + "5. Enter the admin password from your .env file", + "6. Click the Login/Submit button", + "7. Verify successful redirect to admin dashboard", + "8. Check that user menu/profile shows the correct email address" + ], + "expected": "Email authentication successful, dashboard loads, correct email displayed in UI", + "validation_command": 'curl -s -X POST http://localhost:4444/auth/login -H "Content-Type: application/json" -d \'{"email":"admin@example.com","password":"changeme"}\' | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\'Email auth result: {\"PASS\" if \"token\" in data else \"FAIL\"} - Token present: {\"token\" in data}\')"' + }, + { + "id": "MIG-006", + "priority": "HIGH", + "component": "Basic Auth Compatibility", + "description": "Basic authentication still works alongside email auth", + "steps": [ + "1. Open a new browser window (not incognito)", + "2. Navigate to http://localhost:4444/admin", + "3. When browser prompts for authentication, use basic auth:", + " Username: admin", + " Password: changeme", + "4. Verify access is granted to admin interface", + "5. Navigate to different admin sections to test functionality", + "6. Confirm no conflicts with email authentication" + ], + "expected": "Basic auth continues to work, no conflicts with email auth system", + "validation_command": 'curl -s -u admin:changeme http://localhost:4444/admin/teams | python3 -c "import json, sys; try: data=json.load(sys.stdin); print(\'Basic auth result: PASS - API accessible\'); except: print(\'Basic auth result: FAIL - API not accessible\')"' + }, + { + "id": "MIG-007", + "priority": "HIGH", + "component": "Database Schema Validation", + "description": "All multitenancy tables created with proper structure", + "steps": [ + "1. Check multitenancy tables exist:", + " SQLite: sqlite3 mcp.db '.tables' | grep email", + " PostgreSQL: psql -d mcp -c '\\\\dt' | grep email", + "2. Verify required tables: email_users, email_teams, email_team_members, roles, user_roles", + "3. Check table row counts:", + ' python3 -c "from mcpgateway.db import SessionLocal, EmailUser, EmailTeam; db=SessionLocal(); users=db.query(EmailUser).count(); teams=db.query(EmailTeam).count(); print(f\'Users: {users}, Teams: {teams}\'); db.close()"', + "4. Test foreign key relationships work properly" + ], + "expected": "All multitenancy tables exist with proper data and working relationships", + "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailUser, EmailTeam, EmailTeamMember; db=SessionLocal(); users=db.query(EmailUser).count(); teams=db.query(EmailTeam).count(); members=db.query(EmailTeamMember).count(); result = \'PASS\' if users > 0 and teams > 0 and members > 0 else \'FAIL\'; print(f\'Schema validation: {result} - Users: {users}, Teams: {teams}, Members: {members}\'); db.close()"' + }, + { + "id": "MIG-008", + "priority": "HIGH", + "component": "Team Membership Validation", + "description": "Admin user properly added to personal team as owner", + "steps": [ + "1. In admin UI, navigate to the Teams section", + "2. Find the personal team (usually named ''s Team')", + "3. Click on the personal team to view its details", + "4. Click 'View Members' or 'Members' tab", + "5. Verify admin user is listed with role 'Owner'", + "6. Check the join date is recent (around migration execution time)", + "7. Test basic team management functions" + ], + "expected": "Admin user listed as Owner in personal team with recent join date", + "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailTeamMember, EmailUser; from mcpgateway.config import settings; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email).first(); membership=db.query(EmailTeamMember).filter(EmailTeamMember.user_email==settings.platform_admin_email, EmailTeamMember.role==\'owner\').first() if admin else None; result = \'PASS\' if membership else \'FAIL\'; print(f\'Team membership: {result} - Admin is owner: {membership is not None}\'); db.close()"' + }, + { + "id": "MIG-009", + "priority": "MEDIUM", + "component": "API Functionality Validation", + "description": "Core APIs respond correctly after migration", + "steps": [ + "1. Test health endpoint:", + " curl http://localhost:4444/health", + "2. Get authentication token:", + ' curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json" -d \'{"email":"","password":""}\'', + "3. Test teams API with the token:", + ' curl -H "Authorization: Bearer " http://localhost:4444/teams', + "4. Test servers API:", + ' curl -H "Authorization: Bearer " http://localhost:4444/servers', + "5. Record all HTTP status codes and response content" + ], + "expected": "Health=200, Login=200 with JWT token, Teams=200 with team data, Servers=200 with server data", + "validation_command": 'curl -s http://localhost:4444/health | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\'Health check: {\"PASS\" if data.get(\"status\") == \"ok\" else \"FAIL\"} - Status: {data.get(\"status\")}\') if isinstance(data, dict) else print(\'Health check: FAIL - Invalid response\')"' + }, + { + "id": "MIG-010", + "priority": "MEDIUM", + "component": "Post-Migration Resource Creation", + "description": "New resources created after migration get proper team assignments", + "steps": [ + "1. In admin UI, navigate to Tools section", + "2. Click 'Create Tool' or 'Add Tool' button", + "3. Fill in tool details:", + " Name: 'Post-Migration Test Tool'", + " Description: 'Tool created after v0.7.0 migration'", + " Visibility: 'Team'", + "4. Save the new tool", + "5. Verify tool appears in the tools list", + "6. Check tool details show automatic team assignment", + "7. Delete the test tool when validation is complete" + ], + "expected": "New tool created successfully with automatic team assignment to creator's team", + "validation_command": "# Manual test - check via UI that new resources get team assignments" + } +] + + +def run_migration_validation(): + """Run interactive migration validation.""" + + print("🔄 MCP GATEWAY MIGRATION VALIDATION") + print("=" * 60) + print("🎯 Purpose: Validate v0.6.0 → v0.7.0 migration success") + print("🚨 Critical: These tests must pass for production use") + + results = [] + + print("\\n📋 MIGRATION TEST EXECUTION") + + for test in MIGRATION_TESTS: + print(f"\\n{'='*60}") + print(f"🧪 TEST {test['id']}: {test['component']}") + print(f"Priority: {test['priority']}") + print(f"Description: {test['description']}") + + if test.get('main_test'): + print("🎯 THIS IS THE MAIN MIGRATION TEST!") + + print(f"\\n📋 Test Steps:") + for step in test['steps']: + print(f" {step}") + + print(f"\\n✅ Expected Result:") + print(f" {test['expected']}") + + # Run validation command if available + if 'validation_command' in test and not test['validation_command'].startswith('#'): + print(f"\\n🔍 Running automated validation...") + try: + result = subprocess.run(test['validation_command'], shell=True, + capture_output=True, text=True, timeout=30) + print(f" Validation output: {result.stdout.strip()}") + if result.stderr: + print(f" Validation errors: {result.stderr.strip()}") + except subprocess.TimeoutExpired: + print(" ⚠️ Validation timeout") + except Exception as e: + print(f" ❌ Validation error: {e}") + + # Get user confirmation + print(f"\\n📝 Manual Verification Required:") + response = input(f"Did test {test['id']} PASS? (y/n/skip): ").lower() + + if response == 'y': + status = "PASS" + print(f"✅ {test['id']}: PASSED") + elif response == 'n': + status = "FAIL" + print(f"❌ {test['id']}: FAILED") + if test['priority'] == 'CRITICAL': + print(f"🚨 CRITICAL TEST FAILED!") + print(f"🛑 Migration may not be successful") + break_early = input("Continue with remaining tests? (y/N): ").lower() + if break_early != 'y': + break + else: + status = "SKIP" + print(f"⚠️ {test['id']}: SKIPPED") + + # Record result + result_data = { + "test_id": test['id'], + "component": test['component'], + "status": status, + "timestamp": datetime.now().isoformat(), + "priority": test['priority'] + } + + if response == 'n': # Failed test + details = input("Please describe what failed: ") + result_data['failure_details'] = details + + results.append(result_data) + + # Generate summary + generate_test_summary(results) + + return results + + +def generate_test_summary(results): + """Generate test execution summary.""" + + print(f"\\n{'='*60}") + print("📊 MIGRATION VALIDATION SUMMARY") + print("=" * 60) + + # Count results + passed = len([r for r in results if r['status'] == 'PASS']) + failed = len([r for r in results if r['status'] == 'FAIL']) + skipped = len([r for r in results if r['status'] == 'SKIP']) + total = len(results) + + print(f"📈 Test Results:") + print(f" ✅ Passed: {passed}/{total}") + print(f" ❌ Failed: {failed}/{total}") + print(f" ⚠️ Skipped: {skipped}/{total}") + + # Check critical tests + critical_results = [r for r in results if r['priority'] == 'CRITICAL'] + critical_passed = len([r for r in critical_results if r['status'] == 'PASS']) + critical_total = len(critical_results) + + print(f"\\n🚨 Critical Test Results:") + print(f" ✅ Critical Passed: {critical_passed}/{critical_total}") + + # Overall assessment + if failed == 0 and critical_passed == critical_total: + print(f"\\n🎉 MIGRATION VALIDATION: SUCCESS!") + print("✅ All critical tests passed") + print("✅ Migration completed successfully") + print("✅ Ready for production use") + elif critical_passed == critical_total: + print(f"\\n⚠️ MIGRATION VALIDATION: PARTIAL SUCCESS") + print("✅ All critical tests passed") + print("⚠️ Some non-critical tests failed") + print("💡 Review failed tests but migration core is successful") + else: + print(f"\\n❌ MIGRATION VALIDATION: FAILED") + print("❌ Critical tests failed") + print("🛑 Migration may not be successful") + print("🔧 Please investigate failures before production use") + + # Save results + save_results(results) + + +def save_results(results): + """Save test results to file.""" + + results_file = Path("tests/manual/migration_test_results.json") + + summary = { + "test_execution": { + "timestamp": datetime.now().isoformat(), + "total_tests": len(results), + "passed": len([r for r in results if r['status'] == 'PASS']), + "failed": len([r for r in results if r['status'] == 'FAIL']), + "skipped": len([r for r in results if r['status'] == 'SKIP']) + }, + "test_results": results + } + + with open(results_file, 'w') as f: + json.dump(summary, f, indent=2) + + print(f"\\n📄 Results saved: {results_file}") + + +def list_all_tests(): + """List all migration tests.""" + + print("📋 ALL MIGRATION VALIDATION TESTS") + print("=" * 50) + + for test in MIGRATION_TESTS: + priority_indicator = "🚨" if test['priority'] == 'CRITICAL' else "🔧" if test['priority'] == 'HIGH' else "📝" + main_indicator = " 🎯 MAIN TEST" if test.get('main_test') else "" + + print(f"\\n{test['id']}: {test['component']} {priority_indicator}{main_indicator}") + print(f" Priority: {test['priority']}") + print(f" Description: {test['description']}") + print(f" Expected: {test['expected']}") + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("📋 Migration Validation Tests") + print("Usage:") + print(" python3 tests/manual/migration_tests.py # Interactive testing") + print(" python3 tests/manual/migration_tests.py --list # List all tests") + print(" python3 tests/manual/migration_tests.py --help # This help") + elif sys.argv[1] == "--list": + list_all_tests() + elif sys.argv[1] == "--run-all": + print("🚀 Running all migration tests...") + run_migration_validation() + else: + print("❌ Unknown option. Use --help for usage.") + else: + # Interactive mode + print("🔄 Starting interactive migration validation...") + print("💡 Tip: Use --list to see all tests first") + + try: + results = run_migration_validation() + print("\\n🎉 Migration validation complete!") + except KeyboardInterrupt: + print("\\n❌ Testing cancelled by user") + sys.exit(1) + except Exception as e: + print(f"❌ Testing error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/run_all_tests.py b/tests/manual/run_all_tests.py new file mode 100644 index 000000000..a34a97537 --- /dev/null +++ b/tests/manual/run_all_tests.py @@ -0,0 +1,331 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Master Test Runner + +Coordinates execution of all manual test suites. +Designed for comprehensive validation after v0.6.0 → v0.7.0 migration. + +Usage: + python3 tests/manual/run_all_tests.py + python3 tests/manual/run_all_tests.py --quick + python3 tests/manual/run_all_tests.py --critical-only +""" + +import sys +import subprocess +import json +from pathlib import Path +from datetime import datetime + +# Test suite configuration +TEST_SUITES = [ + { + "name": "Setup Instructions", + "file": "setup_instructions.py", + "description": "Environment setup and validation", + "priority": "CRITICAL", + "estimated_time": "30-60 minutes", + "prerequisite": True + }, + { + "name": "Migration Validation", + "file": "migration_tests.py", + "description": "Post-migration validation tests", + "priority": "CRITICAL", + "estimated_time": "45-90 minutes", + "main_test": True + }, + { + "name": "Admin UI Testing", + "file": "admin_ui_tests.py", + "description": "Complete admin interface testing", + "priority": "CRITICAL", + "estimated_time": "60-120 minutes", + "includes_main_test": True + }, + { + "name": "API Authentication", + "file": "api_authentication_tests.py", + "description": "Authentication endpoint testing", + "priority": "HIGH", + "estimated_time": "30-60 minutes" + }, + { + "name": "API Teams", + "file": "api_teams_tests.py", + "description": "Team management API testing", + "priority": "HIGH", + "estimated_time": "30-60 minutes" + }, + { + "name": "API Servers", + "file": "api_servers_tests.py", + "description": "Virtual servers API testing", + "priority": "HIGH", + "estimated_time": "45-90 minutes" + }, + { + "name": "Database Testing", + "file": "database_tests.py", + "description": "SQLite and PostgreSQL compatibility", + "priority": "HIGH", + "estimated_time": "60-120 minutes" + }, + { + "name": "Security Testing", + "file": "security_tests.py", + "description": "Security and penetration testing", + "priority": "MEDIUM", + "estimated_time": "90-180 minutes", + "warning": "Performs actual attack scenarios" + } +] + +# Tester assignment suggestions +TESTER_ASSIGNMENTS = [ + { + "tester": "Tester 1", + "focus": "Critical Path", + "assignments": ["Setup Instructions", "Migration Validation", "Admin UI Testing"], + "database": "SQLite", + "estimated_time": "3-5 hours" + }, + { + "tester": "Tester 2", + "focus": "API Testing", + "assignments": ["API Authentication", "API Teams", "API Servers"], + "database": "SQLite", + "estimated_time": "2-4 hours" + }, + { + "tester": "Tester 3", + "focus": "Database Compatibility", + "assignments": ["Database Testing", "Migration Validation"], + "database": "PostgreSQL", + "estimated_time": "2-3 hours" + }, + { + "tester": "Tester 4", + "focus": "Security Validation", + "assignments": ["Security Testing", "API Authentication"], + "database": "Both", + "estimated_time": "3-5 hours" + } +] + + +def main(): + """Main test coordination.""" + + print("🎯 MCP GATEWAY COMPREHENSIVE MANUAL TESTING") + print("=" * 70) + print("🔄 Post-Migration Validation Suite") + print("👥 Designed for multiple testers") + + print("\\n📋 Available Test Suites:") + for i, suite in enumerate(TEST_SUITES, 1): + priority_icon = "🚨" if suite['priority'] == 'CRITICAL' else "🔧" if suite['priority'] == 'HIGH' else "📝" + main_icon = " 🎯" if suite.get('main_test') or suite.get('includes_main_test') else "" + + print(f" {i:2}. {suite['name']} {priority_icon}{main_icon}") + print(f" {suite['description']}") + print(f" Time: {suite['estimated_time']}") + + if suite.get('warning'): + print(f" ⚠️ {suite['warning']}") + + print("\\n👥 Suggested Tester Assignments:") + for assignment in TESTER_ASSIGNMENTS: + print(f" {assignment['tester']} ({assignment['focus']}):") + print(f" Tests: {', '.join(assignment['assignments'])}") + print(f" Database: {assignment['database']}") + print(f" Time: {assignment['estimated_time']}") + print() + + +def run_quick_validation(): + """Run quick critical tests only.""" + + print("⚡ QUICK VALIDATION - Critical Tests Only") + print("=" * 50) + + critical_suites = [s for s in TEST_SUITES if s['priority'] == 'CRITICAL'] + + for suite in critical_suites: + print(f"\\n🚨 {suite['name']}") + print(f" {suite['description']}") + + response = input(f"\\nRun {suite['name']}? (y/n): ").lower() + if response == 'y': + run_test_suite(suite) + + +def run_test_suite(suite): + """Run a specific test suite.""" + + print(f"\\n🧪 RUNNING: {suite['name']}") + print("=" * 50) + + test_file = Path("tests/manual") / suite['file'] + + if not test_file.exists(): + print(f"❌ Test file not found: {test_file}") + return False + + print(f"📄 Executing: {test_file}") + print(f"⏱️ Estimated time: {suite['estimated_time']}") + + if suite.get('warning'): + print(f"⚠️ Warning: {suite['warning']}") + proceed = input("Proceed? (y/N): ").lower() + if proceed != 'y': + print("⚠️ Test suite skipped") + return False + + try: + # Execute test file + result = subprocess.run([sys.executable, str(test_file)], + capture_output=True, text=True, timeout=1800) # 30 min timeout + + if result.returncode == 0: + print(f"✅ {suite['name']}: Completed successfully") + if result.stdout: + print("Output summary:") + print(result.stdout[-500:]) # Last 500 chars + else: + print(f"❌ {suite['name']}: Failed or incomplete") + if result.stderr: + print("Errors:") + print(result.stderr[-500:]) + + return result.returncode == 0 + + except subprocess.TimeoutExpired: + print(f"⏰ {suite['name']}: Timeout (exceeded 30 minutes)") + return False + except Exception as e: + print(f"❌ {suite['name']}: Execution error - {e}") + return False + + +def interactive_testing(): + """Interactive test suite selection and execution.""" + + print("🎯 INTERACTIVE TESTING MODE") + print("=" * 50) + + print("\\nSelect test suites to run:") + for i, suite in enumerate(TEST_SUITES, 1): + print(f" {i}. {suite['name']} ({suite['priority']})") + + print("\\nOptions:") + print(" a - Run all test suites") + print(" c - Run critical tests only") + print(" 1,2,3 - Run specific test suites") + print(" q - Quit") + + selection = input("\\nYour choice: ").lower().strip() + + if selection == 'q': + print("❌ Testing cancelled") + return + elif selection == 'a': + print("🚀 Running ALL test suites...") + for suite in TEST_SUITES: + run_test_suite(suite) + elif selection == 'c': + run_quick_validation() + else: + # Parse specific selections + try: + indices = [int(x.strip()) for x in selection.split(',')] + for idx in indices: + if 1 <= idx <= len(TEST_SUITES): + suite = TEST_SUITES[idx - 1] + run_test_suite(suite) + else: + print(f"❌ Invalid selection: {idx}") + except ValueError: + print("❌ Invalid input format") + + +def generate_overall_summary(): + """Generate comprehensive test summary.""" + + print("\\n📊 GENERATING OVERALL TEST SUMMARY") + print("=" * 60) + + # Collect results from all test files + results_files = list(Path("tests/manual").glob("*_test_results.json")) + + overall_summary = { + "test_execution": { + "timestamp": datetime.now().isoformat(), + "total_suites": len(TEST_SUITES), + "results_files": len(results_files) + }, + "suite_results": {} + } + + for results_file in results_files: + try: + with open(results_file, 'r') as f: + data = json.load(f) + suite_name = results_file.stem.replace('_test_results', '') + overall_summary['suite_results'][suite_name] = data + except Exception as e: + print(f"⚠️ Could not read {results_file}: {e}") + + # Save overall summary + summary_file = Path("tests/manual/overall_test_summary.json") + with open(summary_file, 'w') as f: + json.dump(overall_summary, f, indent=2) + + print(f"📄 Overall summary saved: {summary_file}") + + # Print summary + if overall_summary['suite_results']: + print("\\n📈 Test Suite Results:") + for suite_name, data in overall_summary['suite_results'].items(): + if 'summary' in data: + summary = data['summary'] + passed = summary.get('passed', 0) + total = summary.get('total', 0) + print(f" {suite_name}: {passed}/{total} passed") + else: + print(f" {suite_name}: Results available") + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("🎯 MCP Gateway Manual Test Runner") + print("Usage:") + print(" python3 tests/manual/run_all_tests.py # Interactive mode") + print(" python3 tests/manual/run_all_tests.py --quick # Critical tests only") + print(" python3 tests/manual/run_all_tests.py --critical-only # Same as --quick") + print(" python3 tests/manual/run_all_tests.py --list # List all test suites") + print(" python3 tests/manual/run_all_tests.py --help # This help") + print("\\n🎯 Individual test suites can be run directly:") + for suite in TEST_SUITES: + print(f" python3 tests/manual/{suite['file']}") + elif sys.argv[1] == "--list": + main() # Show test suites + elif sys.argv[1] == "--quick" or sys.argv[1] == "--critical-only": + run_quick_validation() + generate_overall_summary() + else: + print("❌ Unknown option. Use --help for usage.") + else: + try: + main() + print("\\n🚀 Starting interactive testing...") + interactive_testing() + generate_overall_summary() + print("\\n🎉 Manual testing session complete!") + except KeyboardInterrupt: + print("\\n❌ Testing cancelled by user") + except Exception as e: + print(f"❌ Testing error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/security_tests.py b/tests/manual/security_tests.py new file mode 100644 index 000000000..9c3197f35 --- /dev/null +++ b/tests/manual/security_tests.py @@ -0,0 +1,380 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Security & Penetration Tests + +Security testing including attack scenarios: +- SQL injection attempts +- Authentication bypass +- Authorization violations +- XSS and CSRF attacks +- Input validation testing + +Usage: + python3 tests/manual/security_tests.py + python3 tests/manual/security_tests.py --attack-type sql-injection +""" + +import sys +import json +import subprocess +from pathlib import Path +from datetime import datetime + +# Security test scenarios +SECURITY_TESTS = [ + { + "id": "SEC-001", + "attack_type": "SQL Injection", + "target": "Teams API", + "risk_level": "Critical", + "description": "Attempt SQL injection via team creation", + "attack_steps": [ + "1. Get valid JWT token from admin login", + "2. Prepare malicious team name with SQL injection:", + ' {"name":"\'; DROP TABLE users; --","description":"injection test"}', + "3. Execute attack:", + ' curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -H "Content-Type: application/json" -d \'{"name":"\\"; DROP TABLE users; --","description":"test"}\'', + "4. Check database integrity:", + " sqlite3 mcp.db '.tables' (verify users table still exists)", + "5. Check error response handling" + ], + "expected_defense": "Input sanitized, parameterized queries prevent injection, error handled gracefully", + "validation": "Database remains intact, no SQL executed, proper error returned" + }, + { + "id": "SEC-002", + "attack_type": "JWT Token Manipulation", + "target": "Authentication System", + "risk_level": "Critical", + "description": "Attempt to manipulate JWT token claims", + "attack_steps": [ + "1. Obtain valid JWT token through normal login", + "2. Decode JWT payload (use jwt.io or similar tool)", + "3. Modify claims (e.g., change user email, add admin role)", + "4. Re-encode JWT with different signature", + "5. Attempt to use modified token:", + ' curl -H "Authorization: Bearer " http://localhost:4444/admin/users', + "6. Verify access is denied" + ], + "expected_defense": "Token signature validation prevents tampering, access denied", + "validation": "Modified tokens rejected, signature verification works" + }, + { + "id": "SEC-003", + "attack_type": "Team Isolation Bypass", + "target": "Multi-tenancy Authorization", + "risk_level": "Critical", + "description": "Attempt to access resources from other teams", + "attack_steps": [ + "1. Create two test users in different teams", + "2. User A creates a private resource in Team 1", + "3. Get User B's JWT token", + "4. User B attempts to access User A's resource:", + ' curl -H "Authorization: Bearer " http://localhost:4444/resources/{USER_A_RESOURCE_ID}', + "5. Verify access is denied", + "6. Test with direct resource ID guessing" + ], + "expected_defense": "Team boundaries strictly enforced, cross-team access blocked", + "validation": "Access denied, team isolation maintained" + }, + { + "id": "SEC-004", + "attack_type": "Privilege Escalation", + "target": "RBAC System", + "risk_level": "Critical", + "description": "Attempt to elevate privileges or access admin functions", + "attack_steps": [ + "1. Login as regular user (non-admin)", + "2. Attempt to access admin-only endpoints:", + ' curl -H "Authorization: Bearer " http://localhost:4444/admin/users', + "3. Try to modify own user role in database", + "4. Attempt direct admin API calls", + "5. Test admin UI access with regular user" + ], + "expected_defense": "Admin privileges protected, privilege escalation prevented", + "validation": "Admin functions inaccessible to regular users" + }, + { + "id": "SEC-005", + "attack_type": "Cross-Site Scripting (XSS)", + "target": "Admin UI", + "risk_level": "High", + "description": "Attempt script injection in web interface", + "attack_steps": [ + "1. Access admin UI with valid credentials", + "2. Create tool with malicious name:", + ' Name: ', + "3. Save tool and navigate to tools list", + "4. Check if JavaScript executes in browser", + "5. Test other input fields for XSS vulnerabilities", + "6. Check browser console for script execution" + ], + "expected_defense": "Script tags escaped or sanitized, no JavaScript execution", + "validation": "No alert boxes, scripts properly escaped in HTML" + }, + { + "id": "SEC-006", + "attack_type": "Cross-Site Request Forgery (CSRF)", + "target": "State-Changing Operations", + "risk_level": "High", + "description": "Attempt CSRF attack on admin operations", + "attack_steps": [ + "1. Create malicious HTML page with form posting to gateway", + "2. Form targets state-changing endpoint (e.g., team creation)", + "3. Get authenticated user to visit malicious page", + "4. Check if operation executes without user consent", + "5. Verify CSRF token requirements", + "6. Test cross-origin request blocking" + ], + "expected_defense": "CSRF tokens required, cross-origin requests properly blocked", + "validation": "Operations require explicit user consent and CSRF protection" + }, + { + "id": "SEC-007", + "attack_type": "Brute Force Attack", + "target": "Login Endpoint", + "risk_level": "Medium", + "description": "Attempt password brute force attack", + "attack_steps": [ + "1. Script multiple rapid login attempts with wrong passwords:", + ' for i in {1..10}; do curl -X POST http://localhost:4444/auth/login -d \'{"email":"admin@example.com","password":"wrong$i"}\'; done', + "2. Monitor response times and status codes", + "3. Check for rate limiting implementation", + "4. Test account lockout after failed attempts", + "5. Verify lockout duration enforcement" + ], + "expected_defense": "Account locked after multiple failures, rate limiting enforced", + "validation": "Brute force attacks mitigated by lockout and rate limiting" + }, + { + "id": "SEC-008", + "attack_type": "File Upload Attack", + "target": "Resource Management", + "risk_level": "High", + "description": "Attempt to upload malicious files", + "attack_steps": [ + "1. Try uploading executable file (.exe, .sh)", + "2. Attempt script file upload (.py, .js, .php)", + "3. Test oversized file upload", + "4. Try files with malicious names", + "5. Attempt path traversal in filenames (../../../etc/passwd)", + "6. Check file type and size validation" + ], + "expected_defense": "File type validation, size limits enforced, path sanitization", + "validation": "Malicious uploads blocked, validation errors returned" + }, + { + "id": "SEC-009", + "attack_type": "API Rate Limiting", + "target": "DoS Prevention", + "risk_level": "Medium", + "description": "Test API rate limiting and DoS protection", + "attack_steps": [ + "1. Script rapid API requests to test rate limiting:", + ' for i in {1..100}; do curl -s http://localhost:4444/health; done', + "2. Monitor response times and status codes", + "3. Check for rate limit headers in responses", + "4. Verify throttling and backoff mechanisms", + "5. Test rate limiting on authenticated endpoints" + ], + "expected_defense": "Rate limits enforced, DoS protection active, proper HTTP status codes", + "validation": "Rate limiting prevents abuse, service remains stable" + }, + { + "id": "SEC-010", + "attack_type": "Information Disclosure", + "target": "Error Handling", + "risk_level": "Medium", + "description": "Check for sensitive information in error responses", + "attack_steps": [ + "1. Trigger various error conditions:", + " - Invalid JSON syntax", + " - Missing required fields", + " - Invalid authentication", + " - Access denied scenarios", + "2. Analyze error messages for sensitive information", + "3. Check for stack traces in responses", + "4. Look for database connection strings", + "5. Verify no internal paths or system info disclosed" + ], + "expected_defense": "No sensitive information disclosed in error responses", + "validation": "Error messages are user-friendly without exposing system internals" + } +] + + +def run_security_tests(): + """Run comprehensive security testing.""" + + print("🛡️ SECURITY & PENETRATION TESTING") + print("=" * 60) + print("⚠️ WARNING: This performs actual attack scenarios") + print("🎯 Purpose: Validate security defenses") + + print("\\n🔧 Security Testing Prerequisites:") + print("1. Test environment (not production)") + print("2. Database backup available") + print("3. MCP Gateway running") + print("4. Valid admin credentials") + + proceed = input("\\nProceed with security testing? (yes/no): ").lower() + if proceed != 'yes': + print("❌ Security testing cancelled") + return [] + + results = [] + + for test in SECURITY_TESTS: + print(f"\\n{'='*60}") + print(f"🛡️ SECURITY TEST {test['id']}") + print(f"Attack Type: {test['attack_type']}") + print(f"Target: {test['target']}") + print(f"Risk Level: {test['risk_level']}") + print(f"Description: {test['description']}") + + if test['risk_level'] == 'Critical': + print("🚨 CRITICAL SECURITY TEST") + + print(f"\\n⚔️ Attack Steps:") + for step in test['attack_steps']: + print(f" {step}") + + print(f"\\n🛡️ Expected Defense:") + print(f" {test['expected_defense']}") + + print(f"\\n✅ Validation Criteria:") + print(f" {test['validation']}") + + # Manual execution + response = input(f"\\nExecute security test {test['id']}? (y/n/skip): ").lower() + + if response == 'skip': + results.append({"id": test['id'], "status": "SKIP"}) + continue + elif response == 'y': + print("\\n🔍 Execute the attack steps above and observe results...") + + # Get results + defense_worked = input("Did the expected defense work? (y/n): ").lower() + vulnerability_found = input("Any vulnerability discovered? (y/n): ").lower() + + if defense_worked == 'y' and vulnerability_found == 'n': + status = "PASS" + print(f"✅ {test['id']}: Security defense PASSED") + else: + status = "FAIL" + print(f"❌ {test['id']}: Security vulnerability DETECTED") + vuln_details = input("Describe the vulnerability: ") + + if test['risk_level'] == 'Critical': + print("🚨 CRITICAL VULNERABILITY FOUND!") + print("🛑 Do not deploy to production until fixed") + + # Record results + result_data = { + "id": test['id'], + "attack_type": test['attack_type'], + "risk_level": test['risk_level'], + "status": status, + "timestamp": datetime.now().isoformat() + } + + if status == "FAIL": + result_data['vulnerability_details'] = vuln_details + + results.append(result_data) + + # Generate security summary + generate_security_summary(results) + + return results + + +def generate_security_summary(results): + """Generate security test summary.""" + + print(f"\\n{'='*60}") + print("🛡️ SECURITY TEST SUMMARY") + print("=" * 60) + + passed = len([r for r in results if r['status'] == 'PASS']) + failed = len([r for r in results if r['status'] == 'FAIL']) + skipped = len([r for r in results if r['status'] == 'SKIP']) + + # Check by risk level + critical_tests = [r for r in results if r.get('risk_level') == 'Critical'] + critical_passed = len([r for r in critical_tests if r['status'] == 'PASS']) + + print(f"📈 Security Test Results:") + print(f" ✅ Defenses Passed: {passed}/{len(results)}") + print(f" ❌ Vulnerabilities Found: {failed}/{len(results)}") + print(f" ⚠️ Tests Skipped: {skipped}/{len(results)}") + + print(f"\\n🚨 Critical Security Tests:") + print(f" ✅ Critical Defenses: {critical_passed}/{len(critical_tests)}") + + # Security assessment + if failed == 0 and critical_passed == len(critical_tests): + print(f"\\n🎉 SECURITY ASSESSMENT: EXCELLENT!") + print("✅ All security defenses working") + print("✅ No vulnerabilities detected") + print("✅ Ready for production deployment") + elif critical_passed == len(critical_tests): + print(f"\\n⚠️ SECURITY ASSESSMENT: GOOD") + print("✅ Critical defenses working") + print("⚠️ Some non-critical issues found") + print("💡 Review non-critical findings") + else: + print(f"\\n❌ SECURITY ASSESSMENT: VULNERABLE") + print("❌ Critical vulnerabilities detected") + print("🛑 DO NOT DEPLOY TO PRODUCTION") + print("🔧 Fix vulnerabilities before deployment") + + # Save results + results_file = Path("tests/manual/security_test_results.json") + with open(results_file, 'w') as f: + json.dump({ + "summary": { + "passed": passed, + "failed": failed, + "skipped": skipped, + "critical_passed": critical_passed, + "critical_total": len(critical_tests) + }, + "results": results, + "timestamp": datetime.now().isoformat() + }, f, indent=2) + + print(f"\\n📄 Security results saved: {results_file}") + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print("🛡️ Security & Penetration Tests") + print("Usage:") + print(" python3 tests/manual/security_tests.py # Run all security tests") + print(" python3 tests/manual/security_tests.py --list # List all tests") + print(" python3 tests/manual/security_tests.py --help # This help") + print("\\n⚠️ WARNING: These tests perform actual attack scenarios") + print("🎯 Only run in test environments, never production") + elif sys.argv[1] == "--list": + print("🛡️ All Security Tests:") + for test in SECURITY_TESTS: + print(f" {test['id']}: {test['attack_type']} ({test['risk_level']})") + print(f" Target: {test['target']}") + print(f" Description: {test['description']}") + else: + print("❌ Unknown option. Use --help") + else: + try: + print("🛡️ Starting security testing...") + print("⚠️ This will perform actual attack scenarios") + results = run_security_tests() + print("\\n🎉 Security testing complete!") + except KeyboardInterrupt: + print("\\n❌ Security testing cancelled") + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/setup_instructions.py b/tests/manual/setup_instructions.py new file mode 100644 index 000000000..74d51dbbd --- /dev/null +++ b/tests/manual/setup_instructions.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Setup Instructions for Manual Testing + +Complete environment setup guide for testers. +This file contains step-by-step instructions for setting up +the MCP Gateway for comprehensive manual testing. + +Usage: + python3 tests/manual/setup_instructions.py +""" + +import sys +import subprocess +from pathlib import Path +from datetime import datetime + +# Test case data structure +SETUP_TESTS = [ + { + "id": "SETUP-001", + "step": "Check Prerequisites", + "action": "Verify Python 3.11+, Git, and curl installed", + "command": "python3 --version && git --version && curl --version", + "expected": "All tools show version numbers", + "troubleshooting": "Install missing tools via package manager", + "required": True + }, + { + "id": "SETUP-002", + "step": "Clone Repository", + "action": "Download MCP Gateway source code", + "command": "git clone https://github.com/anthropics/mcp-context-forge.git", + "expected": "Repository cloned successfully", + "troubleshooting": "Check git credentials and network access", + "required": True + }, + { + "id": "SETUP-003", + "step": "Enter Directory", + "action": "Navigate to project directory", + "command": "cd mcp-context-forge", + "expected": "Directory changed, can see project files", + "troubleshooting": "Use 'ls' to verify files like README.md, .env.example", + "required": True + }, + { + "id": "SETUP-004", + "step": "Copy Environment", + "action": "Create environment configuration file", + "command": "cp .env.example .env", + "expected": ".env file created", + "troubleshooting": "Check file exists: ls -la .env", + "required": True + }, + { + "id": "SETUP-005", + "step": "Edit Configuration", + "action": "Configure platform admin credentials", + "command": "vi .env", + "expected": "File opens in vi editor", + "troubleshooting": "Use :wq to save and quit vi", + "required": True, + "details": [ + "Set PLATFORM_ADMIN_EMAIL=", + "Set PLATFORM_ADMIN_PASSWORD=", + "Set EMAIL_AUTH_ENABLED=true", + "Save file with :wq" + ] + }, + { + "id": "SETUP-006", + "step": "Verify Configuration", + "action": "Check settings are loaded correctly", + "command": 'python3 -c "from mcpgateway.config import settings; print(f\'Admin: {settings.platform_admin_email}\')"', + "expected": "Shows your configured admin email", + "troubleshooting": "If error, check .env file syntax", + "required": True + }, + { + "id": "SETUP-007", + "step": "Install Dependencies", + "action": "Install Python packages", + "command": "make install-dev", + "expected": "All dependencies installed successfully", + "troubleshooting": "May take 5-15 minutes, check internet connection", + "required": True + }, + { + "id": "SETUP-008", + "step": "Run Migration", + "action": "Execute database migration (CRITICAL STEP)", + "command": "python3 -m mcpgateway.bootstrap_db", + "expected": "'Database ready' message at end", + "troubleshooting": "MUST complete successfully - get help if fails", + "required": True, + "critical": True + }, + { + "id": "SETUP-009", + "step": "Verify Migration", + "action": "Validate migration completed correctly", + "command": "python3 scripts/verify_multitenancy_0_7_0_migration.py", + "expected": "'🎉 MIGRATION VERIFICATION: SUCCESS!' at end", + "troubleshooting": "All checks must pass - use fix script if needed", + "required": True, + "critical": True + }, + { + "id": "SETUP-010", + "step": "Start Gateway", + "action": "Start MCP Gateway server", + "command": "make dev", + "expected": "'Uvicorn running on http://0.0.0.0:4444' message", + "troubleshooting": "Keep this terminal window open during testing", + "required": True + }, + { + "id": "SETUP-011", + "step": "Test Health Check", + "action": "Verify server is responding", + "command": "curl http://localhost:4444/health", + "expected": '{"status":"ok"}', + "troubleshooting": "If fails, check server started correctly", + "required": True + }, + { + "id": "SETUP-012", + "step": "Access Admin UI", + "action": "Open admin interface in browser", + "command": "Open http://localhost:4444/admin in browser", + "expected": "Login page appears", + "troubleshooting": "Try both http:// and https://", + "required": True + }, + { + "id": "SETUP-013", + "step": "Test Admin Login", + "action": "Authenticate with admin credentials", + "command": "Login with admin email/password from .env", + "expected": "Dashboard loads successfully", + "troubleshooting": "Main authentication validation test", + "required": True, + "critical": True + }, + { + "id": "SETUP-014", + "step": "Verify Servers Visible", + "action": "Check old servers appear in UI (MAIN MIGRATION TEST)", + "command": "Navigate to Virtual Servers section", + "expected": "Servers listed, including pre-migration servers", + "troubleshooting": "If empty list, migration failed - get help immediately", + "required": True, + "critical": True, + "main_test": True + } +] + +# Tester information template +TESTER_INFO = { + "name": "", + "email": "", + "start_date": "", + "database_type": "SQLite/PostgreSQL", + "os": "", + "browser": "Chrome/Firefox", + "experience": "Beginner/Intermediate/Expert", + "time_available": "", + "organization": "", + "contact": "" +} + +# Prerequisites checklist +PREREQUISITES = [ + "Python 3.11+ installed (python3 --version)", + "Git installed (git --version)", + "curl installed (curl --version)", + "Modern web browser (Chrome/Firefox recommended)", + "Text editor (vi/vim/VSCode)", + "Terminal/command line access", + "4+ hours dedicated testing time", + "Reliable internet connection", + "Admin/sudo access for package installation", + "Basic understanding of web applications and APIs" +] + + +def run_setup_validation(): + """Interactive setup validation.""" + + print("🚀 MCP GATEWAY SETUP VALIDATION") + print("=" * 60) + + print("\\n👤 TESTER INFORMATION") + print("Please provide your information:") + + tester_info = {} + for key, default in TESTER_INFO.items(): + prompt = f"{key.replace('_', ' ').title()}" + if default: + prompt += f" ({default})" + prompt += ": " + + value = input(prompt).strip() + tester_info[key] = value + + print("\\n⚠️ PREREQUISITES CHECK") + print("Verify you have all prerequisites:") + + for i, prereq in enumerate(PREREQUISITES, 1): + print(f" {i:2}. {prereq}") + + response = input("\\nDo you have all prerequisites? (y/N): ").lower() + if response != 'y': + print("❌ Please install missing prerequisites before continuing") + return False + + print("\\n🔧 SETUP EXECUTION") + print("Follow these steps exactly:") + + for i, test in enumerate(SETUP_TESTS, 1): + print(f"\\n--- STEP {i}: {test['step']} ---") + print(f"Action: {test['action']}") + print(f"Command: {test['command']}") + print(f"Expected: {test['expected']}") + + if test.get('details'): + print("Details:") + for detail in test['details']: + print(f" - {detail}") + + if test.get('critical'): + print("🚨 CRITICAL: This step must succeed!") + + if test.get('main_test'): + print("🎯 MAIN TEST: This validates the migration fix!") + + # Wait for user confirmation + response = input(f"\\nCompleted step {i}? (y/n/q): ").lower() + + if response == 'q': + print("❌ Setup cancelled by user") + return False + elif response == 'n': + print(f"⚠️ Step {i} not completed") + if test.get('critical'): + print("🚨 Critical step failed - please resolve before continuing") + troubleshoot = input("Need troubleshooting help? (y/N): ").lower() + if troubleshoot == 'y': + print(f"💡 Troubleshooting: {test['troubleshooting']}") + return False + else: + print(f"✅ Step {i} completed") + + print("\\n🎊 SETUP COMPLETE!") + print("✅ All setup steps completed successfully") + print("🧪 Ready to begin manual testing") + + # Save tester info for reference + save_tester_info(tester_info) + + return True + + +def save_tester_info(info): + """Save tester information for tracking.""" + + info_file = Path("tests/manual/tester_info.txt") + + with open(info_file, 'w') as f: + f.write(f"Tester Information\\n") + f.write(f"Generated: {datetime.now().isoformat()}\\n") + f.write("=" * 40 + "\\n") + + for key, value in info.items(): + f.write(f"{key.replace('_', ' ').title()}: {value}\\n") + + print(f"\\n📄 Tester info saved: {info_file}") + + +def print_usage(): + """Print usage instructions.""" + + print("📋 SETUP INSTRUCTIONS USAGE") + print("=" * 40) + print() + print("This script guides you through complete environment setup.") + print() + print("Options:") + print(" python3 tests/manual/setup_instructions.py # Interactive setup") + print(" python3 tests/manual/setup_instructions.py --list # Show all steps") + print(" python3 tests/manual/setup_instructions.py --help # This help") + print() + print("Next steps after setup:") + print(" python3 tests/manual/migration_tests.py # Critical migration tests") + print(" python3 tests/manual/api_authentication_tests.py # API authentication") + print(" python3 tests/manual/admin_ui_tests.py # Admin UI testing") + print() + + +def list_all_steps(): + """List all setup steps.""" + + print("📋 ALL SETUP STEPS") + print("=" * 40) + + for i, test in enumerate(SETUP_TESTS, 1): + status = "🚨 CRITICAL" if test.get('critical') else "📋 Required" if test.get('required') else "📝 Optional" + print(f"\\n{i:2}. {test['step']} ({status})") + print(f" Action: {test['action']}") + print(f" Command: {test['command']}") + print(f" Expected: {test['expected']}") + + if test.get('main_test'): + print(" 🎯 THIS IS THE MAIN MIGRATION TEST!") + + +if __name__ == "__main__": + if len(sys.argv) > 1: + if sys.argv[1] == "--help": + print_usage() + elif sys.argv[1] == "--list": + list_all_steps() + else: + print("❌ Unknown option. Use --help for usage.") + else: + # Run interactive setup + try: + success = run_setup_validation() + if success: + print("\\n🎉 Setup complete! Ready for testing.") + print("Next: python3 tests/manual/migration_tests.py") + else: + print("❌ Setup incomplete. Please resolve issues.") + sys.exit(1) + except KeyboardInterrupt: + print("\\n❌ Setup cancelled by user") + sys.exit(1) + except Exception as e: + print(f"❌ Setup error: {e}") + sys.exit(1) \ No newline at end of file From d3877ef70f31594d80668df98260b507fec02940 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 09:23:40 +0100 Subject: [PATCH 27/49] Add manual testing Signed-off-by: Mihai Criveti --- tests/manual/README.md | 270 ++++++----- tests/manual/admin_ui_tests.py | 450 ------------------ tests/manual/api_authentication_tests.py | 420 ---------------- tests/manual/api_servers_tests.py | 276 ----------- tests/manual/api_teams_tests.py | 308 ------------ tests/manual/database_tests.py | 284 ----------- tests/manual/generate_test_plan.py | 191 ++++++++ tests/manual/generate_test_plan.sh | 71 --- tests/manual/generate_test_plan_xlsx.py | 344 ------------- tests/manual/migration_tests.py | 405 ---------------- tests/manual/run_all_tests.py | 331 ------------- tests/manual/security_tests.py | 380 --------------- tests/manual/setup_instructions.py | 342 ------------- tests/manual/testcases/admin_ui_tests.yaml | 218 +++++++++ tests/manual/testcases/api_a2a.yaml | 149 ++++++ .../manual/testcases/api_authentication.yaml | 179 +++++++ tests/manual/testcases/api_export_import.yaml | 145 ++++++ tests/manual/testcases/api_federation.yaml | 115 +++++ tests/manual/testcases/api_prompts.yaml | 115 +++++ tests/manual/testcases/api_resources.yaml | 132 +++++ tests/manual/testcases/api_servers.yaml | 115 +++++ tests/manual/testcases/api_teams.yaml | 184 +++++++ tests/manual/testcases/api_tools.yaml | 140 ++++++ tests/manual/testcases/database_tests.yaml | 176 +++++++ tests/manual/testcases/edge_cases.yaml | 209 ++++++++ tests/manual/testcases/migration_tests.yaml | 157 ++++++ tests/manual/testcases/performance_tests.yaml | 107 +++++ tests/manual/testcases/security_tests.yaml | 173 +++++++ .../manual/testcases/setup_instructions.yaml | 173 +++++++ 29 files changed, 2821 insertions(+), 3738 deletions(-) delete mode 100644 tests/manual/admin_ui_tests.py delete mode 100644 tests/manual/api_authentication_tests.py delete mode 100644 tests/manual/api_servers_tests.py delete mode 100644 tests/manual/api_teams_tests.py delete mode 100644 tests/manual/database_tests.py create mode 100755 tests/manual/generate_test_plan.py delete mode 100755 tests/manual/generate_test_plan.sh delete mode 100644 tests/manual/generate_test_plan_xlsx.py delete mode 100644 tests/manual/migration_tests.py delete mode 100644 tests/manual/run_all_tests.py delete mode 100644 tests/manual/security_tests.py delete mode 100644 tests/manual/setup_instructions.py create mode 100644 tests/manual/testcases/admin_ui_tests.yaml create mode 100644 tests/manual/testcases/api_a2a.yaml create mode 100644 tests/manual/testcases/api_authentication.yaml create mode 100644 tests/manual/testcases/api_export_import.yaml create mode 100644 tests/manual/testcases/api_federation.yaml create mode 100644 tests/manual/testcases/api_prompts.yaml create mode 100644 tests/manual/testcases/api_resources.yaml create mode 100644 tests/manual/testcases/api_servers.yaml create mode 100644 tests/manual/testcases/api_teams.yaml create mode 100644 tests/manual/testcases/api_tools.yaml create mode 100644 tests/manual/testcases/database_tests.yaml create mode 100644 tests/manual/testcases/edge_cases.yaml create mode 100644 tests/manual/testcases/migration_tests.yaml create mode 100644 tests/manual/testcases/performance_tests.yaml create mode 100644 tests/manual/testcases/security_tests.yaml create mode 100644 tests/manual/testcases/setup_instructions.yaml diff --git a/tests/manual/README.md b/tests/manual/README.md index a1bd12912..1f100edba 100644 --- a/tests/manual/README.md +++ b/tests/manual/README.md @@ -1,172 +1,188 @@ -# 🧪 MCP Gateway v0.7.0 - Manual Testing Suite +# 🧪 MCP Gateway v0.7.0 - YAML-Based Manual Testing Suite -**Complete manual testing for post-migration validation** +**Maintainable, scalable manual testing with YAML test definitions** -## 📁 Directory Contents +## 📁 Clean Directory Structure -### 🧪 **Test Files** (Run Individually) -| File | Purpose | Priority | Time | -|------|---------|----------|------| -| `setup_instructions.py` | Environment setup | CRITICAL | 30-60 min | -| `migration_tests.py` | **Migration validation (MAIN TEST)** | CRITICAL | 60-90 min | -| `admin_ui_tests.py` | Admin UI testing | CRITICAL | 60-120 min | -| `api_authentication_tests.py` | Authentication API | HIGH | 30-60 min | -| `api_teams_tests.py` | Teams API | HIGH | 30-60 min | -| `api_servers_tests.py` | Servers API | HIGH | 45-90 min | -| `database_tests.py` | Database compatibility | HIGH | 60-120 min | -| `security_tests.py` | Security testing | MEDIUM | 90-180 min | +### 🧪 **YAML Test Definitions** (`testcases/` directory) +| File | Purpose | Tests | Priority | +|------|---------|-------|----------| +| `testcases/setup_instructions.yaml` | Environment setup | 17 | CRITICAL | +| `testcases/migration_tests.yaml` | **Migration validation (MAIN TEST)** | 8 | CRITICAL | +| `testcases/admin_ui_tests.yaml` | Admin UI testing | 10 | CRITICAL | +| `testcases/api_authentication.yaml` | Authentication API | 10 | HIGH | +| `testcases/api_teams.yaml` | Teams API | 10 | HIGH | +| `testcases/api_servers.yaml` | Servers API | 10 | HIGH | +| `testcases/security_tests.yaml` | Security testing | 10 | HIGH | -### 🎯 **Coordination Files** +### 🎯 **Generation & Output** | File | Purpose | |------|---------| -| `run_all_tests.py` | Master test coordinator | -| `generate_test_plan.sh` | **Excel generator entrypoint** | -| `generate_test_plan_xlsx.py` | Excel generator (Python) | - -### 📊 **Output Files** -| File | Purpose | -|------|---------| -| `test-plan.xlsx` | **Complete Excel test plan (8 worksheets, 54 tests)** | +| `generate_test_plan.py` | **Single generator script** | +| `test-plan.xlsx` | Generated Excel file | | `README.md` | This documentation | ## 🚀 **Quick Start** ### **Generate Excel Test Plan** ```bash -# Generate clean Excel file from Python test files -./generate_test_plan.sh +# Generate Excel file from YAML definitions +python3 generate_test_plan.py -# Result: test-plan.xlsx (ready for 10 testers) +# Result: test-plan.xlsx (clean, formatted, no corruption) ``` -### **For Testers - Option 1: Excel File** +### **Use Excel File** ```bash -# Open the generated Excel file -open test-plan.xlsx # or double-click in file manager - -# Follow worksheets in order: -# 1. Setup Instructions -# 2. Migration Tests (MAIN TEST - server visibility) -# 3. Admin UI Tests -# 4. API Authentication -# 5. API Teams -# 6. API Servers -# 7. Database Tests -# 8. Security Tests +# Open generated Excel file +open test-plan.xlsx + +# Features: +# - 7+ worksheets with complete test data +# - Excel table formatting for filtering/sorting +# - Priority color coding (Critical/High/Medium) +# - Tester tracking columns +# - Complete step-by-step instructions ``` -### **For Testers - Option 2: Python Files** +### **Update Tests** ```bash -# Run individual test areas -python3 setup_instructions.py # Environment setup -python3 migration_tests.py # Critical migration tests -python3 admin_ui_tests.py # UI validation (server visibility) +# Edit YAML files to modify tests +vi testcases/migration_tests.yaml # Edit migration tests +vi testcases/api_authentication.yaml # Edit auth API tests -# Get help for any test file -python3 .py --help +# Regenerate Excel +python3 generate_test_plan.py # Fresh Excel with updates ``` -### **For Testers - Option 3: Coordinated** -```bash -# Interactive test coordination -python3 run_all_tests.py - -# Quick critical tests only -python3 run_all_tests.py --critical-only +## 🎯 **Key Advantages** + +### ✅ **Maintainable** +- **YAML files**: Easy to read and edit +- **One file per worksheet**: Clean separation of concerns +- **Version controllable**: Track changes in individual files +- **No Excel editing**: Update YAML, regenerate Excel + +### ✅ **Scalable** +- **Add new worksheets**: Create new YAML file +- **Modify tests**: Edit YAML and regenerate +- **Bulk updates**: Script-friendly YAML format +- **Template driven**: Consistent test structure + +### ✅ **Tester Friendly** +- **Clean Excel output**: No corruption issues +- **Table filtering**: Excel tables for easy sorting +- **Complete instructions**: Step-by-step guidance +- **Progress tracking**: Status, tester, date columns + +## 📋 **YAML File Structure** + +Each YAML file follows this structure: + +```yaml +worksheet_name: "Test Area Name" +description: "What this worksheet tests" +priority: "CRITICAL|HIGH|MEDIUM|LOW" +estimated_time: "Time estimate" + +headers: + - "Test ID" + - "Description" + - "Steps" + - "Expected" + - "Status" + - "Tester" + # ... more columns + +tests: + - test_id: "TEST-001" + description: "Test description" + steps: | + 1. Step one + 2. Step two + expected: "Expected result" + priority: "CRITICAL" + # ... more fields ``` ## 🎯 **Main Migration Test** -**THE KEY TEST**: Verify old servers are visible after migration - -**Primary Test Files**: -- `migration_tests.py` → **MIG-003**: "OLD SERVERS VISIBLE" -- `admin_ui_tests.py` → **UI-003**: "Server List View" -- `test-plan.xlsx` → **Migration Tests** worksheet +**Focus**: Verify old servers are visible after migration -**What to validate**: -1. ✅ Admin UI shows all servers (including pre-migration) -2. ✅ Server details are accessible -3. ✅ No empty server list +**Key Files**: +- `migration_tests.yaml` → **MIG-003**: "OLD SERVERS VISIBLE" +- `admin_ui_tests.yaml` → **UI-003**: "Server List View" -## 📋 **Test Execution Guide** +**Critical Test**: Ensure all pre-migration servers appear in admin UI -### **For New Testers** -1. **Setup**: `python3 setup_instructions.py` (interactive guide) -2. **Migration**: `python3 migration_tests.py` (critical validation) -3. **UI**: `python3 admin_ui_tests.py` (main server visibility test) -4. **APIs**: Run remaining test files as time permits +## 👥 **For 10 Testers** -### **For Experienced Testers** -1. **Excel**: Open `test-plan.xlsx` and work through worksheets -2. **Filter**: Use Excel table filtering for specific test areas -3. **Critical**: Focus on CRITICAL priority tests first +### **Test Coordinators** +```bash +# Generate fresh Excel for distribution +python3 generate_test_plan.py -### **For Test Coordinators** -1. **Generate**: `./generate_test_plan.sh` (create fresh Excel) -2. **Assign**: Distribute test files to 10 testers -3. **Track**: Collect JSON result files from testers -4. **Summary**: Use `run_all_tests.py` for overall results +# Distribute test-plan.xlsx to testers +# Assign different worksheets to different testers +``` -## 🔧 **Technical Details** +### **Individual Testers** +```bash +# Open Excel file +open test-plan.xlsx -### **File Dependencies** -- All test Python files are **independent** (no dependencies between them) -- `generate_test_plan_xlsx.py` reads test data from Python files -- `run_all_tests.py` coordinates execution of individual files -- Each test file generates its own JSON results file +# Work through assigned worksheets +# Record results in Status/Actual/Comments columns +# Focus on CRITICAL tests first +``` -### **Excel Generation Process** +### **Test Maintainers** ```bash -./generate_test_plan.sh - ↓ - Calls: python3 generate_test_plan_xlsx.py - ↓ - Reads: All *_tests.py files - ↓ - Generates: test-plan.xlsx (8 worksheets, Excel tables) - ↓ - Result: Clean file, no corruption, ready for testers +# Update test definitions +vi .yaml + +# Add new test areas +cp template.yaml new_test_area.yaml + +# Regenerate Excel +python3 generate_test_plan.py ``` -### **Test Result Tracking** -Each test file can generate JSON results: -- `migration_test_results.json` -- `auth_test_results.json` -- `admin_ui_test_results.json` -- etc. +## 🔧 **Technical Benefits** -## ⚠️ **Critical Success Criteria** +### **Easy Maintenance** +- Edit YAML files instead of complex Python code +- Clear, readable test definitions +- No Excel corruption from manual editing +- Version control friendly -### **MUST PASS for Production** -1. ✅ **Migration Tests**: All critical tests pass -2. ✅ **Server Visibility**: Old servers visible in admin UI -3. ✅ **Authentication**: Email and basic auth work -4. ✅ **Team Assignments**: All resources have proper teams +### **Quality Control** +- YAML validation catches syntax errors +- Consistent test structure across all areas +- Easy to review changes in pull requests +- Template-driven test creation -### **SHOULD PASS for Quality** -1. ✅ API endpoints respond correctly -2. ✅ Admin UI fully functional -3. ✅ Security defenses active -4. ✅ Performance acceptable +### **Flexibility** +- Add new test areas by creating YAML files +- Modify test structure by updating YAML schema +- Generate different output formats (Excel, CSV, HTML) +- Script-friendly for automation -## 💡 **Pro Tips** +## 📊 **Generated Excel Features** -- **Start with setup_instructions.py** - it guides environment preparation -- **Focus on migration_tests.py** - contains the main server visibility test -- **Use --help** with any test file for detailed usage -- **Take screenshots** of UI issues for debugging -- **Record exact error messages** for troubleshooting -- **Test both SQLite and PostgreSQL** if possible +- **Clean formatting**: Professional appearance +- **Excel tables**: Built-in filtering and sorting +- **Priority coding**: Visual priority indicators +- **Progress tracking**: Tester name, date, status columns +- **No corruption**: Proper file handling prevents Excel repair warnings +- **Complete coverage**: All test areas included -## 🎯 **Expected Outcomes** +## 💡 **Pro Tips** -After successful testing: -- ✅ Old servers are visible in admin UI (main migration fix) -- ✅ All multitenancy features work correctly -- ✅ APIs respond with proper team-based filtering -- ✅ Admin interface is fully functional -- ✅ Database migration completed without issues -- ✅ Security measures are active and effective +- **Edit YAML files** to modify tests (much easier than Excel) +- **Regenerate often** to get fresh, clean Excel files +- **Use vi/vim** for YAML editing with syntax highlighting +- **Validate YAML** before generating (python3 -c "import yaml; yaml.safe_load(open('file.yaml'))") +- **Version control** YAML files to track test evolution -This testing suite ensures your MCP Gateway v0.7.0 migration was successful! \ No newline at end of file +This YAML-based approach makes the test suite much more maintainable and scalable for ongoing MCP Gateway validation! diff --git a/tests/manual/admin_ui_tests.py b/tests/manual/admin_ui_tests.py deleted file mode 100644 index c148396c7..000000000 --- a/tests/manual/admin_ui_tests.py +++ /dev/null @@ -1,450 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Admin UI Manual Tests - -Comprehensive testing of the admin web interface including: -- Login and authentication -- Dashboard and navigation -- Server management UI (CRITICAL for migration validation) -- Team management interface -- User administration -- Export/import interface - -Usage: - python3 tests/manual/admin_ui_tests.py -""" - -import sys -import json -from pathlib import Path -from datetime import datetime - -# Admin UI test cases -ADMIN_UI_TESTS = [ - { - "id": "UI-001", - "section": "Authentication", - "component": "Login Page", - "action": "Test admin login interface", - "steps": [ - "1. Open web browser (Chrome or Firefox recommended)", - "2. Navigate to: http://localhost:4444/admin", - "3. Observe login page layout and components", - "4. Check for email and password input fields", - "5. Look for 'Login' or 'Sign In' button", - "6. Test form validation (empty fields)", - "7. Enter admin email from .env file", - "8. Enter admin password from .env file", - "9. Click Login button", - "10. Verify successful redirect to admin dashboard" - ], - "expected": "Login page loads, form validation works, authentication successful", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": True - }, - { - "id": "UI-002", - "section": "Dashboard", - "component": "Main Dashboard", - "action": "Navigate and test admin dashboard", - "steps": [ - "1. After successful login, observe dashboard layout", - "2. Count the number of statistics cards displayed", - "3. Check navigation menu on left side or top", - "4. Click on each statistic card to test interactions", - "5. Test responsive design (resize browser window)", - "6. Check for any error messages or warnings", - "7. Verify user menu/profile in top right", - "8. Test logout functionality" - ], - "expected": "Dashboard functional with stats, navigation menu works, responsive design", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": False - }, - { - "id": "UI-003", - "section": "Virtual Servers", - "component": "Server List View", - "action": "View and verify server list - CRITICAL MIGRATION TEST", - "steps": [ - "1. Click 'Virtual Servers' in navigation menu", - "2. Observe server list/grid layout", - "3. COUNT the total number of servers displayed", - "4. IDENTIFY servers created before migration (older creation dates)", - "5. Click on each server card/row to view details", - "6. Verify server information is accessible and complete", - "7. Check server actions (start/stop/restart if available)", - "8. Test server filtering and search if available", - "9. TAKE SCREENSHOT of server list showing all servers", - "10. Record server names and their visibility status" - ], - "expected": "ALL servers visible including pre-migration servers, details accessible", - "browser": "Chrome/Firefox", - "screenshot": "REQUIRED", - "critical": True, - "main_migration_test": True - }, - { - "id": "UI-004", - "section": "Teams", - "component": "Team Management Interface", - "action": "Test team management functionality", - "steps": [ - "1. Navigate to 'Teams' section in admin interface", - "2. View team list/grid display", - "3. Find your personal team (usually ''s Team')", - "4. Click on personal team to view details", - "5. Check team information display", - "6. Click 'View Members' or 'Members' tab", - "7. Verify you're listed as 'Owner'", - "8. Test 'Create Team' functionality", - "9. Fill out team creation form", - "10. Verify new team appears in list" - ], - "expected": "Team interface functional, personal team visible, team creation works", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": False - }, - { - "id": "UI-005", - "section": "Tools", - "component": "Tool Registry Interface", - "action": "Test tool management and invocation", - "steps": [ - "1. Navigate to 'Tools' section", - "2. View available tools list", - "3. Check team-based filtering is working", - "4. Click on any tool to view details", - "5. Look for 'Invoke' or 'Execute' button", - "6. Test tool invocation interface", - "7. Fill in tool parameters if prompted", - "8. Submit tool execution", - "9. Verify results are displayed properly", - "10. Test tool creation form if available" - ], - "expected": "Tools accessible by team permissions, invocation interface works", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": False - }, - { - "id": "UI-006", - "section": "Resources", - "component": "Resource Management Interface", - "action": "Test resource browser and management", - "steps": [ - "1. Navigate to 'Resources' section", - "2. Browse available resources", - "3. Check team-based resource filtering", - "4. Click on any resource to view details", - "5. Test resource download functionality", - "6. Try 'Upload Resource' button if available", - "7. Test file upload interface", - "8. Fill in resource metadata", - "9. Verify upload completes successfully", - "10. Check new resource appears in list" - ], - "expected": "Resource browser functional, upload/download works, team filtering applied", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": False - }, - { - "id": "UI-007", - "section": "User Management", - "component": "User Administration Interface", - "action": "Test user management (admin only)", - "steps": [ - "1. Navigate to 'Users' section (admin only)", - "2. View user list display", - "3. Click on any user to view details", - "4. Check user profile information", - "5. Test 'Create User' functionality if available", - "6. Fill user creation form", - "7. Test role assignment interface", - "8. Verify user permissions management", - "9. Check user activity/audit information", - "10. Test user status changes (active/inactive)" - ], - "expected": "User management interface functional, role assignment works", - "browser": "Chrome/Firefox", - "screenshot": "Optional", - "critical": False, - "requires": "Platform admin privileges" - }, - { - "id": "UI-008", - "section": "Export/Import", - "component": "Configuration Management Interface", - "action": "Test configuration backup and restore", - "steps": [ - "1. Navigate to 'Export/Import' section", - "2. Locate 'Export Configuration' button/link", - "3. Click export and select export options", - "4. Download the configuration JSON file", - "5. Open JSON file and verify contents", - "6. Locate 'Import Configuration' button/link", - "7. Select the downloaded JSON file", - "8. Choose import options (merge/replace)", - "9. Execute the import process", - "10. Verify import completion and success" - ], - "expected": "Export downloads complete JSON, import processes successfully", - "browser": "Chrome/Firefox", - "screenshot": "Recommended", - "critical": False - }, - { - "id": "UI-009", - "section": "Mobile Compatibility", - "component": "Responsive Design", - "action": "Test mobile device compatibility", - "steps": [ - "1. Resize browser window to mobile width (<768px)", - "2. OR open admin UI on actual mobile device", - "3. Test navigation menu (hamburger menu?)", - "4. Check form input usability on mobile", - "5. Test touch interactions and gestures", - "6. Verify text readability and sizing", - "7. Check all features remain accessible", - "8. Test portrait and landscape orientations", - "9. Verify no horizontal scrolling required", - "10. Check mobile-specific UI adaptations" - ], - "expected": "Interface adapts to mobile screens while maintaining full functionality", - "browser": "Mobile Chrome/Safari", - "screenshot": "Optional", - "critical": False - }, - { - "id": "UI-010", - "section": "Error Handling", - "component": "UI Error Scenarios", - "action": "Test error handling and user experience", - "steps": [ - "1. Trigger network error (disconnect internet briefly)", - "2. Submit forms with invalid data", - "3. Try accessing resources without permission", - "4. Test session timeout scenarios", - "5. Check error message display", - "6. Verify error messages are user-friendly", - "7. Test error recovery mechanisms", - "8. Check browser console for JavaScript errors", - "9. Verify graceful degradation", - "10. Test error logging and reporting" - ], - "expected": "Graceful error handling, helpful error messages, no JavaScript crashes", - "browser": "Chrome/Firefox", - "screenshot": "For errors", - "critical": False - } -] - - -def run_admin_ui_tests(): - """Run comprehensive admin UI tests.""" - - print("🖥️ ADMIN UI COMPREHENSIVE TESTING") - print("=" * 60) - print("🎯 Testing every admin interface component") - print("🚨 Includes critical migration validation (server visibility)") - - results = [] - - print("\\n🔧 Pre-test Requirements:") - print("1. MCP Gateway running (make dev)") - print("2. Admin login credentials available") - print("3. Browser with developer tools (F12)") - - input("\\nPress Enter when ready to begin UI testing...") - - for test in ADMIN_UI_TESTS: - print(f"\\n{'='*60}") - print(f"🧪 TEST {test['id']}: {test['component']}") - print(f"Section: {test['section']}") - print(f"Action: {test['action']}") - - if test.get('critical'): - print("🚨 CRITICAL TEST") - - if test.get('main_migration_test'): - print("🎯 MAIN MIGRATION VALIDATION TEST!") - - if test.get('requires'): - print(f"⚠️ Requires: {test['requires']}") - - print(f"\\n📋 Detailed Steps:") - for step in test['steps']: - print(f" {step}") - - print(f"\\n✅ Expected Result:") - print(f" {test['expected']}") - - print(f"\\n🌐 Browser: {test['browser']}") - print(f"📸 Screenshot: {test['screenshot']}") - - # Manual execution - response = input(f"\\nExecute UI test {test['id']}? (y/n/skip): ").lower() - - if response == 'skip' or response == 's': - print(f"⚠️ {test['id']}: SKIPPED") - results.append({"id": test['id'], "status": "SKIP", "timestamp": datetime.now().isoformat()}) - continue - elif response != 'y': - print(f"❌ {test['id']}: ABORTED") - break - - # Get test results - print(f"\\n📝 Record Results for {test['id']}:") - ui_result = input("Did the UI behave as expected? (y/n): ").lower() - - if ui_result == 'y': - status = "PASS" - print(f"✅ {test['id']}: PASSED") - else: - status = "FAIL" - print(f"❌ {test['id']}: FAILED") - failure_details = input("Describe what went wrong: ") - - if test.get('critical') or test.get('main_migration_test'): - print("🚨 CRITICAL UI TEST FAILED!") - print("This may indicate migration issues") - - # Record detailed results - result_data = { - "id": test['id'], - "section": test['section'], - "component": test['component'], - "status": status, - "browser": test['browser'], - "timestamp": datetime.now().isoformat() - } - - if status == "FAIL": - result_data['failure_details'] = failure_details - - if test.get('screenshot') == "REQUIRED" or test.get('screenshot') == "Recommended": - screenshot_taken = input("Screenshot taken? (y/n): ").lower() == 'y' - result_data['screenshot_taken'] = screenshot_taken - - results.append(result_data) - - # Generate UI test summary - generate_ui_summary(results) - - return results - - -def generate_ui_summary(results): - """Generate UI testing summary.""" - - print(f"\\n{'='*60}") - print("📊 ADMIN UI TEST SUMMARY") - print("=" * 60) - - passed = len([r for r in results if r['status'] == 'PASS']) - failed = len([r for r in results if r['status'] == 'FAIL']) - skipped = len([r for r in results if r['status'] == 'SKIP']) - total = len(results) - - print(f"📈 UI Test Results:") - print(f" ✅ Passed: {passed}/{total}") - print(f" ❌ Failed: {failed}/{total}") - print(f" ⚠️ Skipped: {skipped}/{total}") - - # Check critical UI tests - critical_results = [r for r in results if 'UI-001' in r['id'] or 'UI-003' in r['id']] # Login and server visibility - critical_passed = len([r for r in critical_results if r['status'] == 'PASS']) - - print(f"\\n🚨 Critical UI Tests:") - print(f" ✅ Critical Passed: {critical_passed}/{len(critical_results)}") - - # Look for main migration test result - server_visibility_test = next((r for r in results if 'UI-003' in r['id']), None) - if server_visibility_test: - if server_visibility_test['status'] == 'PASS': - print("\\n🎯 MAIN MIGRATION TEST: ✅ PASSED") - print(" Old servers are visible in admin UI!") - else: - print("\\n🎯 MAIN MIGRATION TEST: ❌ FAILED") - print(" Old servers may not be visible - check migration") - - # Overall assessment - if failed == 0 and critical_passed == len(critical_results): - print(f"\\n🎉 ADMIN UI: FULLY FUNCTIONAL!") - print("✅ All critical UI tests passed") - print("✅ Admin interface ready for production use") - else: - print(f"\\n⚠️ ADMIN UI: ISSUES DETECTED") - print("🔧 Review failed tests and resolve issues") - - # Save results - results_file = Path("tests/manual/admin_ui_test_results.json") - with open(results_file, 'w') as f: - json.dump({ - "summary": {"passed": passed, "failed": failed, "skipped": skipped}, - "results": results, - "timestamp": datetime.now().isoformat() - }, f, indent=2) - - print(f"\\n📄 Results saved: {results_file}") - - -def test_specific_ui_component(component_id): - """Test specific UI component.""" - - test = next((t for t in ADMIN_UI_TESTS if t['id'] == component_id), None) - - if not test: - print(f"❌ Component {component_id} not found") - available = [t['id'] for t in ADMIN_UI_TESTS] - print(f"Available: {available}") - return False - - print(f"🧪 TESTING UI COMPONENT: {component_id}") - print("=" * 50) - print(f"Section: {test['section']}") - print(f"Component: {test['component']}") - print(f"Action: {test['action']}") - - if test.get('main_migration_test'): - print("🎯 THIS IS THE MAIN MIGRATION TEST!") - - print(f"\\n📋 Steps:") - for step in test['steps']: - print(f" {step}") - - print(f"\\n✅ Expected: {test['expected']}") - - return True - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("🖥️ Admin UI Tests") - print("Usage:") - print(" python3 tests/manual/admin_ui_tests.py # Run all UI tests") - print(" python3 tests/manual/admin_ui_tests.py --component UI-003 # Test specific component") - print(" python3 tests/manual/admin_ui_tests.py --help # This help") - elif sys.argv[1] == "--component" and len(sys.argv) > 2: - test_specific_ui_component(sys.argv[2]) - else: - print("❌ Unknown option. Use --help for usage.") - else: - try: - print("🖥️ Starting admin UI testing...") - print("💡 Focus on UI-003 (server visibility) - this is the main migration test!") - results = run_admin_ui_tests() - print("\\n🎉 Admin UI testing complete!") - print("Next: python3 tests/manual/database_tests.py") - except KeyboardInterrupt: - print("\\n❌ Testing cancelled by user") - sys.exit(1) - except Exception as e: - print(f"❌ Testing error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_authentication_tests.py b/tests/manual/api_authentication_tests.py deleted file mode 100644 index 47ae7a9ff..000000000 --- a/tests/manual/api_authentication_tests.py +++ /dev/null @@ -1,420 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Authentication API Tests - -Comprehensive testing of all authentication endpoints including: -- Email registration and login -- JWT token management -- SSO integration (GitHub, Google) -- Password management -- Profile operations - -Usage: - python3 tests/manual/api_authentication_tests.py - python3 tests/manual/api_authentication_tests.py --endpoint /auth/login -""" - -import sys -import subprocess -import json -import requests -from pathlib import Path -from datetime import datetime - -# Add project root to path -project_root = Path(__file__).parent.parent.parent -sys.path.insert(0, str(project_root)) - -# Authentication test cases -AUTH_TESTS = [ - { - "id": "AUTH-001", - "endpoint": "/auth/register", - "method": "POST", - "description": "User registration endpoint", - "curl_command": 'curl -X POST http://localhost:4444/auth/register -H "Content-Type: application/json"', - "request_body": '{"email":"testuser@example.com","password":"TestPass123","full_name":"Test User"}', - "expected_status": 201, - "expected_response": "User created successfully with personal team", - "test_steps": [ - "1. Execute the cURL command with test user data", - "2. Verify HTTP status code is 201", - "3. Check response contains user ID and email", - "4. Verify personal team was created for user", - "5. Record exact response content" - ], - "validation": "Response should include user_id, email, and personal_team_id" - }, - { - "id": "AUTH-002", - "endpoint": "/auth/login", - "method": "POST", - "description": "Email authentication login", - "curl_command": 'curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json"', - "request_body": '{"email":"admin@example.com","password":"changeme"}', - "expected_status": 200, - "expected_response": "JWT token returned in response", - "test_steps": [ - "1. Use admin credentials from .env file", - "2. Execute login request", - "3. Verify HTTP 200 status code", - "4. Check response contains 'token' field", - "5. Verify token is valid JWT format", - "6. Save token for subsequent API tests" - ], - "validation": "Response must contain valid JWT token", - "critical": True - }, - { - "id": "AUTH-003", - "endpoint": "/auth/logout", - "method": "POST", - "description": "User logout endpoint", - "curl_command": 'curl -X POST http://localhost:4444/auth/logout -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Logout successful, token invalidated", - "test_steps": [ - "1. Use JWT token from login test", - "2. Execute logout request with Authorization header", - "3. Verify HTTP 200 status", - "4. Try using the token again (should fail)", - "5. Verify token is now invalid" - ], - "validation": "Token becomes invalid after logout" - }, - { - "id": "AUTH-004", - "endpoint": "/auth/refresh", - "method": "POST", - "description": "JWT token refresh", - "curl_command": 'curl -X POST http://localhost:4444/auth/refresh -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "New JWT token issued", - "test_steps": [ - "1. Use valid JWT token", - "2. Request token refresh", - "3. Verify new token returned", - "4. Test both old and new tokens", - "5. Verify new token works, old may be invalidated" - ], - "validation": "New token returned and functional" - }, - { - "id": "AUTH-005", - "endpoint": "/auth/profile", - "method": "GET", - "description": "Get user profile information", - "curl_command": 'curl http://localhost:4444/auth/profile -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "User profile data including email, teams, roles", - "test_steps": [ - "1. Use valid JWT token", - "2. Request user profile", - "3. Verify profile contains user email", - "4. Check team membership information", - "5. Verify role assignments if applicable" - ], - "validation": "Profile includes email, teams, and role data" - }, - { - "id": "AUTH-006", - "endpoint": "/auth/change-password", - "method": "POST", - "description": "Change user password", - "curl_command": 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"old_password":"changeme","new_password":"NewPassword123"}', - "expected_status": 200, - "expected_response": "Password updated successfully", - "test_steps": [ - "1. Use current password as old_password", - "2. Provide strong new password", - "3. Execute password change request", - "4. Verify success response", - "5. Test login with new password", - "6. IMPORTANT: Change password back for other tests" - ], - "validation": "Password change works, can login with new password" - }, - { - "id": "AUTH-007", - "endpoint": "/auth/sso/github", - "method": "GET", - "description": "GitHub SSO authentication initiation", - "curl_command": 'curl -I http://localhost:4444/auth/sso/github', - "request_body": "", - "expected_status": 302, - "expected_response": "Redirect to GitHub OAuth authorization", - "test_steps": [ - "1. Execute request to GitHub SSO endpoint", - "2. Verify HTTP 302 redirect status", - "3. Check Location header contains github.com", - "4. Verify OAuth parameters in redirect URL", - "5. Note: Full OAuth flow requires GitHub app setup" - ], - "validation": "Redirects to GitHub OAuth (if SSO enabled)", - "requires_config": "SSO_GITHUB_ENABLED=true, GitHub OAuth app" - }, - { - "id": "AUTH-008", - "endpoint": "/auth/sso/google", - "method": "GET", - "description": "Google SSO authentication initiation", - "curl_command": 'curl -I http://localhost:4444/auth/sso/google', - "request_body": "", - "expected_status": 302, - "expected_response": "Redirect to Google OAuth authorization", - "test_steps": [ - "1. Execute request to Google SSO endpoint", - "2. Verify HTTP 302 redirect status", - "3. Check Location header contains accounts.google.com", - "4. Verify OAuth parameters in redirect URL", - "5. Note: Full OAuth flow requires Google OAuth app" - ], - "validation": "Redirects to Google OAuth (if SSO enabled)", - "requires_config": "SSO_GOOGLE_ENABLED=true, Google OAuth app" - }, - { - "id": "AUTH-009", - "endpoint": "/auth/verify-email", - "method": "POST", - "description": "Email address verification", - "curl_command": 'curl -X POST http://localhost:4444/auth/verify-email -H "Content-Type: application/json"', - "request_body": '{"token":""}', - "expected_status": 200, - "expected_response": "Email verified successfully", - "test_steps": [ - "1. Register new user first (to get verification token)", - "2. Check email for verification token (if email configured)", - "3. Use token in verification request", - "4. Verify email verification status updated", - "5. Check user can now perform email-verified actions" - ], - "validation": "Email verification updates user status", - "requires_config": "Email delivery configured" - }, - { - "id": "AUTH-010", - "endpoint": "/auth/forgot-password", - "method": "POST", - "description": "Password reset request", - "curl_command": 'curl -X POST http://localhost:4444/auth/forgot-password -H "Content-Type: application/json"', - "request_body": '{"email":"admin@example.com"}', - "expected_status": 200, - "expected_response": "Password reset email sent", - "test_steps": [ - "1. Request password reset for known user", - "2. Verify HTTP 200 response", - "3. Check email for reset link (if email configured)", - "4. Test reset token functionality", - "5. Verify password can be reset via token" - ], - "validation": "Password reset process initiated", - "requires_config": "Email delivery configured" - } -] - - -def run_auth_tests(): - """Run all authentication tests.""" - - print("🔐 AUTHENTICATION API TESTING") - print("=" * 60) - print("🎯 Testing all authentication endpoints") - - # Get base URL and setup - base_url = "http://localhost:4444" - results = [] - - print("\\n🔧 Pre-test Setup:") - print("1. Ensure MCP Gateway is running (make dev)") - print("2. Ensure migration completed successfully") - print("3. Have admin credentials from .env file ready") - - input("\\nPress Enter when ready to begin testing...") - - for test in AUTH_TESTS: - print(f"\\n{'='*60}") - print(f"🧪 TEST {test['id']}: {test['endpoint']}") - print(f"Method: {test['method']}") - print(f"Description: {test['description']}") - - if test.get('critical'): - print("🚨 CRITICAL TEST") - - if test.get('requires_config'): - print(f"⚠️ Requires: {test['requires_config']}") - - print(f"\\n📋 Test Steps:") - for step in test['test_steps']: - print(f" {step}") - - print(f"\\n💻 cURL Command:") - print(f" {test['curl_command']}") - if test['request_body']: - print(f" Data: {test['request_body']}") - - print(f"\\n✅ Expected:") - print(f" Status: {test['expected_status']}") - print(f" Response: {test['expected_response']}") - - # Manual execution - response = input(f"\\nExecute test {test['id']}? (y/n/skip): ").lower() - - if response == 'skip' or response == 's': - print(f"⚠️ {test['id']}: SKIPPED") - results.append({"id": test['id'], "status": "SKIP", "timestamp": datetime.now().isoformat()}) - continue - elif response != 'y': - print(f"❌ {test['id']}: ABORTED") - break - - # Get actual results from user - print(f"\\n📝 Record Results:") - actual_status = input("Actual HTTP status code: ") - actual_response = input("Actual response (summary): ") - - # Determine pass/fail - expected_str = str(test['expected_status']) - passed = actual_status == expected_str - status = "PASS" if passed else "FAIL" - - print(f"\\n{'✅' if passed else '❌'} {test['id']}: {status}") - - if not passed and test.get('critical'): - print("🚨 CRITICAL TEST FAILED!") - continue_testing = input("Continue with remaining tests? (y/N): ").lower() - if continue_testing != 'y': - break - - # Record result - results.append({ - "id": test['id'], - "endpoint": test['endpoint'], - "status": status, - "expected_status": test['expected_status'], - "actual_status": actual_status, - "actual_response": actual_response, - "timestamp": datetime.now().isoformat() - }) - - # Generate summary - generate_auth_summary(results) - return results - - -def generate_auth_summary(results): - """Generate authentication test summary.""" - - print(f"\\n{'='*60}") - print("📊 AUTHENTICATION API TEST SUMMARY") - print("=" * 60) - - passed = len([r for r in results if r['status'] == 'PASS']) - failed = len([r for r in results if r['status'] == 'FAIL']) - skipped = len([r for r in results if r['status'] == 'SKIP']) - total = len(results) - - print(f"📈 Results:") - print(f" ✅ Passed: {passed}/{total}") - print(f" ❌ Failed: {failed}/{total}") - print(f" ⚠️ Skipped: {skipped}/{total}") - - if failed == 0: - print(f"\\n🎉 ALL AUTHENTICATION TESTS PASSED!") - print("✅ Authentication system fully functional") - else: - print(f"\\n⚠️ SOME AUTHENTICATION TESTS FAILED") - print("🔧 Review failed tests before production deployment") - - # Save results - results_file = Path("tests/manual/auth_test_results.json") - with open(results_file, 'w') as f: - json.dump({ - "summary": {"passed": passed, "failed": failed, "skipped": skipped, "total": total}, - "results": results, - "timestamp": datetime.now().isoformat() - }, f, indent=2) - - print(f"\\n📄 Results saved: {results_file}") - - -def test_specific_endpoint(endpoint): - """Test a specific authentication endpoint.""" - - test = next((t for t in AUTH_TESTS if t['endpoint'] == endpoint), None) - - if not test: - print(f"❌ Endpoint {endpoint} not found in test suite") - available = [t['endpoint'] for t in AUTH_TESTS] - print(f"Available endpoints: {available}") - return False - - print(f"🧪 TESTING SPECIFIC ENDPOINT: {endpoint}") - print("=" * 50) - print(f"Test ID: {test['id']}") - print(f"Method: {test['method']}") - print(f"Description: {test['description']}") - - print(f"\\n💻 cURL Command:") - print(f"{test['curl_command']}") - if test['request_body']: - print(f"Data: {test['request_body']}") - - print(f"\\n📋 Test Steps:") - for step in test['test_steps']: - print(f" {step}") - - print(f"\\n✅ Expected:") - print(f" Status: {test['expected_status']}") - print(f" Response: {test['expected_response']}") - - return True - - -def list_all_endpoints(): - """List all authentication endpoints.""" - - print("📋 ALL AUTHENTICATION ENDPOINTS") - print("=" * 50) - - for test in AUTH_TESTS: - critical_marker = " 🚨 CRITICAL" if test.get('critical') else "" - config_marker = f" ⚠️ Requires: {test.get('requires_config')}" if test.get('requires_config') else "" - - print(f"\\n{test['id']}: {test['endpoint']} ({test['method']}){critical_marker}{config_marker}") - print(f" Description: {test['description']}") - print(f" Expected: {test['expected_status']} - {test['expected_response']}") - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("🔐 Authentication API Tests") - print("Usage:") - print(" python3 tests/manual/api_authentication_tests.py # Run all tests") - print(" python3 tests/manual/api_authentication_tests.py --endpoint # Test specific endpoint") - print(" python3 tests/manual/api_authentication_tests.py --list # List all endpoints") - print(" python3 tests/manual/api_authentication_tests.py --help # This help") - elif sys.argv[1] == "--list": - list_all_endpoints() - elif sys.argv[1] == "--endpoint" and len(sys.argv) > 2: - test_specific_endpoint(sys.argv[2]) - else: - print("❌ Unknown option. Use --help for usage.") - else: - # Run all authentication tests - try: - print("🔐 Starting authentication API testing...") - results = run_auth_tests() - print("\\n🎉 Authentication testing complete!") - print("Next: python3 tests/manual/api_teams_tests.py") - except KeyboardInterrupt: - print("\\n❌ Testing cancelled by user") - sys.exit(1) - except Exception as e: - print(f"❌ Testing error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_servers_tests.py b/tests/manual/api_servers_tests.py deleted file mode 100644 index 1918bd58d..000000000 --- a/tests/manual/api_servers_tests.py +++ /dev/null @@ -1,276 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Virtual Servers API Tests - -Comprehensive testing of virtual server management including: -- Server listing and creation -- Server configuration and updates -- Transport endpoints (SSE, WebSocket) -- Server status and health monitoring - -Usage: - python3 tests/manual/api_servers_tests.py -""" - -import sys -import json -from pathlib import Path -from datetime import datetime - -# Virtual Servers API test cases -SERVERS_TESTS = [ - { - "id": "SRV-001", - "endpoint": "/servers", - "method": "GET", - "description": "List virtual servers with team filtering", - "curl_command": 'curl http://localhost:4444/servers -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of virtual servers user can access", - "test_steps": [ - "1. Use valid JWT token", - "2. Execute servers list request", - "3. Verify HTTP 200 status", - "4. Check response contains server array", - "5. Verify team-based filtering applied", - "6. Check server metadata (name, transport, team, etc.)" - ], - "validation": "Servers listed with proper team-based access control", - "critical": True - }, - { - "id": "SRV-002", - "endpoint": "/servers", - "method": "POST", - "description": "Create new virtual server", - "curl_command": 'curl -X POST http://localhost:4444/servers -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"name":"Manual Test Server","description":"Server created during manual testing","transport":"sse","config":{"timeout":30}}', - "expected_status": 201, - "expected_response": "Virtual server created with ID and team assignment", - "test_steps": [ - "1. Prepare server configuration data", - "2. Execute server creation request", - "3. Verify HTTP 201 status", - "4. Check response contains server ID", - "5. Verify server appears in servers list", - "6. Check automatic team assignment", - "7. Save server ID for subsequent tests" - ], - "validation": "Server created with automatic team assignment" - }, - { - "id": "SRV-003", - "endpoint": "/servers/{id}", - "method": "GET", - "description": "Get server details and configuration", - "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Server details with full configuration", - "test_steps": [ - "1. Use server ID from creation test or existing server", - "2. Request server details", - "3. Verify HTTP 200 status", - "4. Check detailed server information", - "5. Verify configuration data is included", - "6. Check team and ownership information" - ], - "validation": "Server details accessible with complete metadata" - }, - { - "id": "SRV-004", - "endpoint": "/servers/{id}", - "method": "PUT", - "description": "Update server configuration", - "curl_command": 'curl -X PUT http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"name":"Updated Server Name","description":"Updated during manual testing","config":{"timeout":60}}', - "expected_status": 200, - "expected_response": "Server updated successfully", - "test_steps": [ - "1. Use server ID from previous tests", - "2. Prepare update configuration", - "3. Execute server update request", - "4. Verify HTTP 200 status", - "5. Check server details show updates", - "6. Verify permissions enforced (owner/team access)" - ], - "validation": "Server updates work with proper authorization" - }, - { - "id": "SRV-005", - "endpoint": "/servers/{id}/sse", - "method": "GET", - "description": "Server-Sent Events connection test", - "curl_command": 'curl -N http://localhost:4444/servers/{SERVER_ID}/sse -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "SSE stream established, events received", - "test_steps": [ - "1. Use server ID with SSE transport", - "2. Execute SSE connection request", - "3. Verify HTTP 200 status", - "4. Check for SSE headers (text/event-stream)", - "5. Monitor stream for events", - "6. Test connection stability" - ], - "validation": "SSE connection works, events stream properly" - }, - { - "id": "SRV-006", - "endpoint": "/servers/{id}/ws", - "method": "WebSocket", - "description": "WebSocket connection test", - "curl_command": "Use WebSocket client or browser developer tools", - "request_body": "WebSocket upgrade request with Authorization header", - "expected_status": 101, - "expected_response": "WebSocket connection established", - "test_steps": [ - "1. Use WebSocket client tool or browser dev tools", - "2. Connect to ws://localhost:4444/servers/{SERVER_ID}/ws", - "3. Include Authorization header with JWT token", - "4. Verify WebSocket upgrade (status 101)", - "5. Test bidirectional communication", - "6. Check connection stability and message handling" - ], - "validation": "WebSocket connection works, bidirectional communication" - }, - { - "id": "SRV-007", - "endpoint": "/servers/{id}/tools", - "method": "GET", - "description": "List tools available on server", - "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/tools -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of tools available on the server", - "test_steps": [ - "1. Use server ID with available tools", - "2. Request server tools", - "3. Verify HTTP 200 status", - "4. Check tools array in response", - "5. Verify tool details and schemas", - "6. Check team-based tool access" - ], - "validation": "Server tools listed with proper access control" - }, - { - "id": "SRV-008", - "endpoint": "/servers/{id}/resources", - "method": "GET", - "description": "List resources available on server", - "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/resources -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of resources available on the server", - "test_steps": [ - "1. Use server ID with available resources", - "2. Request server resources", - "3. Verify HTTP 200 status", - "4. Check resources array", - "5. Verify resource URIs and metadata", - "6. Test resource access permissions" - ], - "validation": "Server resources listed with access control" - }, - { - "id": "SRV-009", - "endpoint": "/servers/{id}/status", - "method": "GET", - "description": "Get server status and health", - "curl_command": 'curl http://localhost:4444/servers/{SERVER_ID}/status -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Server status, health, and connection info", - "test_steps": [ - "1. Use any valid server ID", - "2. Request server status", - "3. Verify HTTP 200 status", - "4. Check status information", - "5. Verify health indicators", - "6. Check connection and performance metrics" - ], - "validation": "Server status and health data provided" - }, - { - "id": "SRV-010", - "endpoint": "/servers/{id}", - "method": "DELETE", - "description": "Delete virtual server", - "curl_command": 'curl -X DELETE http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 204, - "expected_response": "Server deleted successfully", - "test_steps": [ - "1. Use test server ID (not production server)", - "2. Execute server deletion request", - "3. Verify HTTP 204 status", - "4. Check server no longer in list", - "5. Verify permissions enforced", - "6. Check cleanup of associated resources" - ], - "validation": "Server deletion works with proper authorization" - } -] - - -def run_servers_tests(): - """Run all servers API tests.""" - - print("🖥️ VIRTUAL SERVERS API TESTING") - print("=" * 60) - - results = [] - - # Get JWT token - token = input("Enter JWT token: ").strip() - if not token: - print("❌ Token required") - return [] - - for test in SERVERS_TESTS: - print(f"\\n{'='*60}") - print(f"🧪 {test['id']}: {test['endpoint']} ({test['method']})") - - if test.get('critical'): - print("🚨 CRITICAL TEST") - - # Show steps and execute - print(f"\\nSteps:") - for step in test['test_steps']: - print(f" {step}") - - curl_cmd = test['curl_command'].replace('', token) - print(f"\\nCommand: {curl_cmd}") - - response = input(f"\\nExecute {test['id']}? (y/n/skip): ") - - if response.lower() == 'skip': - results.append({"id": test['id'], "status": "SKIP"}) - elif response.lower() == 'y': - status_code = input("HTTP status: ") - response_summary = input("Response summary: ") - - passed = status_code == str(test['expected_status']) - results.append({ - "id": test['id'], - "status": "PASS" if passed else "FAIL", - "actual_status": status_code, - "response": response_summary - }) - - # Save results - with open("tests/manual/servers_test_results.json", 'w') as f: - json.dump(results, f, indent=2) - - return results - - -if __name__ == "__main__": - try: - results = run_servers_tests() - print("\\n🎉 Servers API testing complete!") - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/api_teams_tests.py b/tests/manual/api_teams_tests.py deleted file mode 100644 index 87f6c8cf6..000000000 --- a/tests/manual/api_teams_tests.py +++ /dev/null @@ -1,308 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Teams API Tests - -Comprehensive testing of team management endpoints including: -- Team creation and management -- Team membership operations -- Team invitations -- Team visibility and permissions - -Usage: - python3 tests/manual/api_teams_tests.py -""" - -import sys -import json -from pathlib import Path -from datetime import datetime - -# Teams API test cases -TEAMS_TESTS = [ - { - "id": "TEAM-001", - "endpoint": "/teams", - "method": "GET", - "description": "List user's teams", - "curl_command": 'curl http://localhost:4444/teams -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of teams user belongs to", - "test_steps": [ - "1. Get JWT token from login first", - "2. Execute teams list request", - "3. Verify HTTP 200 status", - "4. Check response is JSON array", - "5. Verify personal team is included", - "6. Check team data includes name, id, visibility" - ], - "validation": "Returns user's teams including personal team" - }, - { - "id": "TEAM-002", - "endpoint": "/teams", - "method": "POST", - "description": "Create new team", - "curl_command": 'curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"name":"Manual Test Team","description":"Team created during manual testing","visibility":"private","max_members":20}', - "expected_status": 201, - "expected_response": "Team created successfully with generated ID", - "test_steps": [ - "1. Prepare team creation data", - "2. Execute team creation request", - "3. Verify HTTP 201 status", - "4. Check response contains team ID", - "5. Verify team appears in teams list", - "6. Save team ID for subsequent tests" - ], - "validation": "Team created and accessible" - }, - { - "id": "TEAM-003", - "endpoint": "/teams/{id}", - "method": "GET", - "description": "Get team details", - "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Team details with member information", - "test_steps": [ - "1. Use team ID from creation test or personal team", - "2. Request team details", - "3. Verify HTTP 200 status", - "4. Check response includes team metadata", - "5. Verify member list is included", - "6. Check permissions are enforced" - ], - "validation": "Team details accessible to members" - }, - { - "id": "TEAM-004", - "endpoint": "/teams/{id}", - "method": "PUT", - "description": "Update team information", - "curl_command": 'curl -X PUT http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"name":"Updated Team Name","description":"Updated during manual testing"}', - "expected_status": 200, - "expected_response": "Team updated successfully", - "test_steps": [ - "1. Use team ID from creation test", - "2. Prepare update data", - "3. Execute team update request", - "4. Verify HTTP 200 status", - "5. Check team details show updated information", - "6. Verify only team owners can update" - ], - "validation": "Team update works for owners" - }, - { - "id": "TEAM-005", - "endpoint": "/teams/{id}/members", - "method": "GET", - "description": "List team members", - "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of team members with roles", - "test_steps": [ - "1. Use valid team ID", - "2. Request member list", - "3. Verify HTTP 200 status", - "4. Check members array in response", - "5. Verify member roles (owner/member)", - "6. Check join dates and status" - ], - "validation": "Member list shows users with correct roles" - }, - { - "id": "TEAM-006", - "endpoint": "/teams/{id}/members", - "method": "POST", - "description": "Add team member", - "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"user_email":"newmember@example.com","role":"member"}', - "expected_status": 201, - "expected_response": "Member added to team successfully", - "test_steps": [ - "1. Create test user first (if needed)", - "2. Prepare member addition data", - "3. Execute add member request", - "4. Verify HTTP 201 status", - "5. Check member appears in member list", - "6. Verify only team owners can add members" - ], - "validation": "Member addition works for team owners" - }, - { - "id": "TEAM-007", - "endpoint": "/teams/{id}/invitations", - "method": "GET", - "description": "List team invitations", - "curl_command": 'curl http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Array of pending invitations", - "test_steps": [ - "1. Use valid team ID", - "2. Request invitations list", - "3. Verify HTTP 200 status", - "4. Check invitations array", - "5. Verify invitation details (email, role, status)", - "6. Test permissions (team owners only)" - ], - "validation": "Invitation list accessible to team owners" - }, - { - "id": "TEAM-008", - "endpoint": "/teams/{id}/invitations", - "method": "POST", - "description": "Create team invitation", - "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer " -H "Content-Type: application/json"', - "request_body": '{"email":"invitee@example.com","role":"member","message":"Join our testing team!"}', - "expected_status": 201, - "expected_response": "Invitation created and sent", - "test_steps": [ - "1. Prepare invitation data", - "2. Execute invitation creation", - "3. Verify HTTP 201 status", - "4. Check invitation created in database", - "5. Verify email sent (if email configured)", - "6. Test invitation token functionality" - ], - "validation": "Invitation created with valid token" - }, - { - "id": "TEAM-009", - "endpoint": "/teams/{id}/leave", - "method": "POST", - "description": "Leave team", - "curl_command": 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/leave -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 200, - "expected_response": "Successfully left team (or 403 if personal team)", - "test_steps": [ - "1. Use non-personal team ID", - "2. Execute leave team request", - "3. Verify appropriate response", - "4. Check user no longer in member list", - "5. Test that personal teams cannot be left", - "6. Verify access to team resources is removed" - ], - "validation": "Team leave functionality works, personal teams protected" - }, - { - "id": "TEAM-010", - "endpoint": "/teams/{id}", - "method": "DELETE", - "description": "Delete team", - "curl_command": 'curl -X DELETE http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "', - "request_body": "", - "expected_status": 204, - "expected_response": "Team deleted successfully (or 403 if personal team)", - "test_steps": [ - "1. Use test team ID (not personal team)", - "2. Execute team deletion request", - "3. Verify appropriate HTTP status", - "4. Check team no longer exists", - "5. Test that personal teams cannot be deleted", - "6. Verify team resources are handled properly" - ], - "validation": "Team deletion works, personal teams protected" - } -] - - -def run_teams_tests(): - """Run all teams API tests.""" - - print("👥 TEAMS API TESTING") - print("=" * 60) - print("🎯 Testing team management endpoints") - - results = [] - - print("\\n🔧 Pre-test Requirements:") - print("1. MCP Gateway running (make dev)") - print("2. Valid JWT token (from login)") - print("3. Admin access for team operations") - - # Get JWT token - token = input("\\nEnter JWT token (from auth login test): ").strip() - if not token: - print("❌ JWT token required for team API testing") - return [] - - print("\\n🧪 Executing Teams API Tests...") - - for test in TEAMS_TESTS: - print(f"\\n{'='*60}") - print(f"🧪 TEST {test['id']}: {test['endpoint']} ({test['method']})") - print(f"Description: {test['description']}") - - print(f"\\n📋 Test Steps:") - for step in test['test_steps']: - print(f" {step}") - - # Show curl command with token - curl_cmd = test['curl_command'].replace('', token) - print(f"\\n💻 cURL Command:") - print(f" {curl_cmd}") - if test['request_body']: - print(f" Data: {test['request_body']}") - - print(f"\\n✅ Expected:") - print(f" Status: {test['expected_status']}") - print(f" Response: {test['expected_response']}") - - # Manual execution - response = input(f"\\nExecute test {test['id']}? (y/n/skip): ").lower() - - if response == 'skip' or response == 's': - results.append({"id": test['id'], "status": "SKIP"}) - continue - elif response != 'y': - break - - # Get results - actual_status = input("Actual HTTP status: ") - actual_response = input("Response summary: ") - - passed = actual_status == str(test['expected_status']) - status = "PASS" if passed else "FAIL" - - print(f"\\n{'✅' if passed else '❌'} {test['id']}: {status}") - - results.append({ - "id": test['id'], - "endpoint": test['endpoint'], - "status": status, - "actual_status": actual_status, - "actual_response": actual_response, - "timestamp": datetime.now().isoformat() - }) - - # Save results - results_file = Path("tests/manual/teams_test_results.json") - with open(results_file, 'w') as f: - json.dump(results, f, indent=2) - - print(f"\\n📄 Results saved: {results_file}") - return results - - -if __name__ == "__main__": - if len(sys.argv) > 1 and sys.argv[1] == "--help": - print("👥 Teams API Tests") - print("Usage:") - print(" python3 tests/manual/api_teams_tests.py # Run all tests") - print(" python3 tests/manual/api_teams_tests.py --help # This help") - else: - try: - results = run_teams_tests() - print("\\n🎉 Teams API testing complete!") - except KeyboardInterrupt: - print("\\n❌ Testing cancelled") - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/database_tests.py b/tests/manual/database_tests.py deleted file mode 100644 index af885450f..000000000 --- a/tests/manual/database_tests.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Database Compatibility Tests - -Testing both SQLite and PostgreSQL compatibility including: -- Migration execution and rollback -- Data integrity and constraints -- Performance characteristics -- Advanced database features - -Usage: - python3 tests/manual/database_tests.py --sqlite - python3 tests/manual/database_tests.py --postgresql - python3 tests/manual/database_tests.py --both -""" - -import sys -import subprocess -import json -from pathlib import Path -from datetime import datetime - -# Database test cases -DATABASE_TESTS = { - "sqlite": [ - { - "id": "SQLite-001", - "feature": "Migration Execution", - "description": "Test migration on SQLite database", - "commands": [ - "# Set SQLite database URL", - "export DATABASE_URL=sqlite:///./test_migration.db", - "# Run migration", - "python3 -m mcpgateway.bootstrap_db", - "# Check tables created", - "sqlite3 test_migration.db '.tables'" - ], - "expected": "All multitenancy tables created: email_users, email_teams, etc.", - "performance": "Fast", - "validation": "sqlite3 test_migration.db 'SELECT COUNT(*) FROM email_users;'" - }, - { - "id": "SQLite-002", - "feature": "Team Data Population", - "description": "Verify old resources get team assignments", - "commands": [ - "# Check servers have team assignments", - "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NOT NULL;'", - "# Check tools have team assignments", - "sqlite3 mcp.db 'SELECT COUNT(*) FROM tools WHERE team_id IS NOT NULL;'", - "# Check for any NULL team assignments", - "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NULL;'" - ], - "expected": "All resources have team_id populated, no NULL values", - "performance": "Fast", - "validation": "Zero NULL team_id values in resource tables" - }, - { - "id": "SQLite-003", - "feature": "Connection Pool Management", - "description": "Test SQLite connection handling", - "commands": [ - "# Set connection pool size", - "export DB_POOL_SIZE=50", - "# Start gateway and test concurrent connections", - "make dev &", - "# Run multiple concurrent API calls", - "for i in {1..20}; do curl http://localhost:4444/health & done; wait" - ], - "expected": "Connections managed within SQLite limits (~50 max)", - "performance": "Good with limitations", - "validation": "No connection errors, stable performance" - }, - { - "id": "SQLite-004", - "feature": "JSON Field Operations", - "description": "Test JSON data storage and querying", - "commands": [ - "# Check JSON fields in tools table", - "sqlite3 mcp.db 'SELECT name, schema FROM tools LIMIT 5;'", - "# Test JSON field updates", - "sqlite3 mcp.db 'UPDATE tools SET schema = json_set(schema, \"$.test\", \"value\") WHERE id = (SELECT id FROM tools LIMIT 1);'" - ], - "expected": "JSON data stored and queried correctly", - "performance": "Good", - "validation": "JSON fields readable and updateable" - }, - { - "id": "SQLite-005", - "feature": "Backup and Restore", - "description": "Test file-based backup/restore", - "commands": [ - "# Create backup", - "cp mcp.db backup_test.db", - "# Make some changes", - "sqlite3 mcp.db 'INSERT INTO email_teams (id, name, slug, created_by, is_personal, visibility, is_active, created_at, updated_at) VALUES (\"test-backup\", \"Backup Test\", \"backup-test\", \"admin@example.com\", 0, \"private\", 1, datetime(\"now\"), datetime(\"now\"));'", - "# Restore from backup", - "cp backup_test.db mcp.db", - "# Verify restore worked", - "sqlite3 mcp.db 'SELECT COUNT(*) FROM email_teams WHERE name = \"Backup Test\";'" - ], - "expected": "File-based backup and restore works perfectly", - "performance": "Excellent", - "validation": "Data restored exactly, test data should be gone" - } - ], - "postgresql": [ - { - "id": "PG-001", - "feature": "Migration Execution", - "description": "Test migration on PostgreSQL database", - "commands": [ - "# Set PostgreSQL database URL", - "export DATABASE_URL=postgresql://postgres:password@localhost:5432/mcp_test", - "# Create test database", - "createdb mcp_test", - "# Run migration", - "python3 -m mcpgateway.bootstrap_db", - "# Check tables", - "psql mcp_test -c '\\\\dt' | grep email" - ], - "expected": "All tables created with PostgreSQL-specific data types", - "performance": "Fast", - "validation": "psql mcp_test -c 'SELECT COUNT(*) FROM email_users;'" - }, - { - "id": "PG-002", - "feature": "Advanced Data Types", - "description": "Test UUID, JSONB, and advanced PostgreSQL features", - "commands": [ - "# Check UUID columns", - "psql mcp_test -c 'SELECT id FROM email_teams LIMIT 1;'", - "# Test JSONB operations", - "psql mcp_test -c 'SELECT config FROM servers WHERE config IS NOT NULL LIMIT 1;'", - "# Test advanced queries", - "psql mcp_test -c 'SELECT * FROM tools WHERE schema @> \\'{\"type\":\"object\"}\\';'" - ], - "expected": "Advanced PostgreSQL data types work correctly", - "performance": "Excellent", - "validation": "UUIDs valid, JSONB queries work" - }, - { - "id": "PG-003", - "feature": "High Concurrency", - "description": "Test PostgreSQL connection pool and concurrency", - "commands": [ - "# Set high connection pool", - "export DB_POOL_SIZE=200", - "# Start gateway", - "make dev &", - "# Run high concurrency test", - "for i in {1..100}; do curl http://localhost:4444/health & done; wait" - ], - "expected": "High concurrency supported (200+ connections)", - "performance": "Excellent", - "validation": "All requests succeed, no connection errors" - }, - { - "id": "PG-004", - "feature": "JSONB Advanced Operations", - "description": "Test JSONB indexing and complex queries", - "commands": [ - "# Test JSONB containment", - "psql mcp_test -c 'SELECT name FROM tools WHERE schema @> \\'{\"type\":\"object\"}\\';'", - "# Test JSONB path queries", - "psql mcp_test -c 'SELECT name FROM tools WHERE schema #> \\'{properties}\\' IS NOT NULL;'", - "# Create JSONB index", - "psql mcp_test -c 'CREATE INDEX IF NOT EXISTS idx_tools_schema_gin ON tools USING gin(schema);'" - ], - "expected": "JSONB indexing and querying work efficiently", - "performance": "Excellent", - "validation": "Complex JSONB queries execute quickly" - }, - { - "id": "PG-005", - "feature": "Full-Text Search", - "description": "Test PostgreSQL full-text search capabilities", - "commands": [ - "# Test full-text search", - "psql mcp_test -c 'SELECT name FROM tools WHERE to_tsvector(name) @@ plainto_tsquery(\"time\");'", - "# Test search ranking", - "psql mcp_test -c 'SELECT name, ts_rank(to_tsvector(name), plainto_tsquery(\"time\")) as rank FROM tools WHERE to_tsvector(name) @@ plainto_tsquery(\"time\") ORDER BY rank DESC;'" - ], - "expected": "Advanced full-text search with ranking works", - "performance": "Excellent", - "validation": "FTS returns relevant results with ranking" - } - ] -} - - -def run_database_tests(db_type="both"): - """Run database compatibility tests.""" - - print(f"🗄️ DATABASE COMPATIBILITY TESTING") - print("=" * 60) - print(f"🎯 Testing: {db_type.upper()}") - - if db_type == "both": - print("\\n🔧 Testing both SQLite and PostgreSQL") - sqlite_results = run_db_test_suite("sqlite") - postgresql_results = run_db_test_suite("postgresql") - return {"sqlite": sqlite_results, "postgresql": postgresql_results} - else: - return run_db_test_suite(db_type) - - -def run_db_test_suite(db_type): - """Run tests for specific database type.""" - - tests = DATABASE_TESTS.get(db_type, []) - if not tests: - print(f"❌ No tests defined for {db_type}") - return [] - - print(f"\\n🗄️ {db_type.upper()} TESTING") - print("=" * 40) - - results = [] - - for test in tests: - print(f"\\n{'='*50}") - print(f"🧪 {test['id']}: {test['feature']}") - print(f"Description: {test['description']}") - - print(f"\\n💻 Commands to execute:") - for cmd in test['commands']: - if cmd.startswith('#'): - print(f" {cmd}") # Comment - else: - print(f" $ {cmd}") # Command - - print(f"\\n✅ Expected: {test['expected']}") - print(f"⚡ Performance: {test['performance']}") - - # Manual execution - response = input(f"\\nExecute {test['id']}? (y/n/skip): ").lower() - - if response == 'skip': - results.append({"id": test['id'], "status": "SKIP"}) - continue - elif response == 'y': - success = input("Did test complete successfully? (y/n): ").lower() - performance = input(f"Performance rating (Fast/Good/Slow): ") or test['performance'] - - status = "PASS" if success == 'y' else "FAIL" - - results.append({ - "id": test['id'], - "feature": test['feature'], - "status": status, - "performance": performance, - "timestamp": datetime.now().isoformat() - }) - - return results - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("🗄️ Database Tests") - print("Usage:") - print(" python3 tests/manual/database_tests.py --sqlite # SQLite tests only") - print(" python3 tests/manual/database_tests.py --postgresql # PostgreSQL tests only") - print(" python3 tests/manual/database_tests.py --both # Both databases") - print(" python3 tests/manual/database_tests.py --help # This help") - elif sys.argv[1] == "--sqlite": - run_database_tests("sqlite") - elif sys.argv[1] == "--postgresql": - run_database_tests("postgresql") - elif sys.argv[1] == "--both": - run_database_tests("both") - else: - print("❌ Unknown option. Use --help") - else: - # Default to both - try: - results = run_database_tests("both") - print("\\n🎉 Database testing complete!") - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/generate_test_plan.py b/tests/manual/generate_test_plan.py new file mode 100755 index 000000000..e7268f1df --- /dev/null +++ b/tests/manual/generate_test_plan.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +MCP Gateway v0.7.0 - Test Plan Generator from YAML + +Generates Excel test plan from YAML test definition files. +Much cleaner and more maintainable approach. + +Usage: + python3 generate_test_plan.py +""" + +import sys +import yaml +from pathlib import Path +from datetime import datetime + +try: + import openpyxl + from openpyxl.styles import PatternFill, Font + from openpyxl.utils import get_column_letter + from openpyxl.worksheet.table import Table, TableStyleInfo +except ImportError: + print("❌ Install: pip install openpyxl pyyaml") + sys.exit(1) + + +def generate_excel_from_yaml(): + """Generate Excel file from YAML test definitions.""" + + print("📊 GENERATING EXCEL FROM YAML TEST FILES") + print("=" * 60) + print("📁 Reading from testcases/ directory") + + # Find YAML files in testcases directory + testcases_dir = Path("testcases") + if not testcases_dir.exists(): + print("❌ testcases/ directory not found") + return False + + yaml_files = list(testcases_dir.glob("*.yaml")) + yaml_files = sorted(yaml_files) + + if not yaml_files: + print("❌ No YAML test files found") + return False + + print(f"📄 Found {len(yaml_files)} YAML files:") + for yf in yaml_files: + print(f" 📄 {yf.name}") + + # Create Excel workbook + wb = openpyxl.Workbook() + wb.remove(wb.active) + + # Styles + styles = { + 'title': Font(size=16, bold=True, color="1F4E79"), + 'header_fill': PatternFill(start_color="4F81BD", end_color="4F81BD", fill_type="solid"), + 'header_font': Font(color="FFFFFF", bold=True), + 'critical_fill': PatternFill(start_color="C5504B", end_color="C5504B", fill_type="solid"), + 'critical_font': Font(color="FFFFFF", bold=True) + } + + # Process each YAML file + for yaml_file in yaml_files: + try: + with open(yaml_file, 'r') as f: + yaml_data = yaml.safe_load(f) + + worksheet_name = yaml_data.get('worksheet_name', yaml_file.stem) + headers = yaml_data.get('headers', []) + tests = yaml_data.get('tests', []) + + print(f"\n 📄 {yaml_file.name} → {worksheet_name}") + print(f" 📊 {len(tests)} tests") + + # Create worksheet + sheet = wb.create_sheet(worksheet_name) + + # Add headers + for i, header in enumerate(headers, 1): + cell = sheet.cell(row=1, column=i, value=header) + cell.fill = styles['header_fill'] + cell.font = styles['header_font'] + + # Add test data + for row_idx, test in enumerate(tests, 2): + for col_idx, header in enumerate(headers, 1): + value = get_yaml_value(test, header) + cell = sheet.cell(row=row_idx, column=col_idx, value=value) + + # Apply formatting + if header.lower() == "priority" and value == "CRITICAL": + cell.fill = styles['critical_fill'] + cell.font = styles['critical_font'] + elif header.lower() == "status": + cell.value = "☐" + + # Auto-size columns + for col in range(1, len(headers) + 1): + max_len = 0 + for row in range(1, min(len(tests) + 2, 20)): + val = sheet.cell(row=row, column=col).value + if val: + max_len = max(max_len, len(str(val))) + width = min(max(max_len + 2, 10), 60) + sheet.column_dimensions[get_column_letter(col)].width = width + + print(f" ✅ Created") + + except Exception as e: + print(f" ❌ Failed: {e}") + + # Save file + output_path = Path("test-plan.xlsx") + + try: + print(f"\n💾 Saving Excel file...") + wb.save(output_path) + wb.close() # CRITICAL: Close properly + + print(f"✅ File saved: {output_path}") + + # Verify + test_wb = openpyxl.load_workbook(output_path) + print(f"✅ Verified: {len(test_wb.worksheets)} worksheets") + test_wb.close() + + print("\n🎊 SUCCESS! Excel generated from YAML files!") + return True + + except Exception as e: + print(f"❌ Save failed: {e}") + return False + + +def get_yaml_value(test, header): + """Get value from YAML test data for Excel header.""" + + mappings = { + "Test ID": "test_id", + "Priority": "priority", + "Component": "component", + "Description": "description", + "Detailed Steps": "steps", + "Steps": "steps", + "Expected Result": "expected", + "Expected": "expected", + "Endpoint": "endpoint", + "Method": "method", + "cURL Command": "curl_command", + "Request Body": "request_body", + "Expected Status": "expected_status", + "Expected Response": "expected_response", + "Attack Type": "attack_type", + "Target": "target", + "Risk Level": "risk_level", + "Attack Steps": "attack_steps", + "Expected Defense": "expected_defense" + } + + yaml_key = mappings.get(header, header.lower().replace(' ', '_')) + value = test.get(yaml_key, "") + + # Handle special cases + if header in ["SQLite", "PostgreSQL"]: + return "✓" if test.get(f'{header.lower()}_support', True) else "❌" + elif header in ["Actual Output", "Actual Status", "Actual Response", "Tester", "Date", "Comments"]: + return "" # Empty for tester to fill + elif header == "Status": + return "☐" + + return str(value) if value else "" + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1] == "--help": + print("📊 Test Plan Generator from YAML") + print("Usage:") + print(" python3 generate_test_plan.py # Generate Excel from YAML") + print(" python3 generate_test_plan.py --help # This help") + print("\nEdit YAML files to update tests, then regenerate Excel.") + else: + try: + success = generate_excel_from_yaml() + if not success: + sys.exit(1) + except Exception as e: + print(f"❌ Error: {e}") + sys.exit(1) \ No newline at end of file diff --git a/tests/manual/generate_test_plan.sh b/tests/manual/generate_test_plan.sh deleted file mode 100755 index f50e63bc4..000000000 --- a/tests/manual/generate_test_plan.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# -*- coding: utf-8 -*- -# MCP Gateway v0.7.0 - Test Plan Generator -# -# Generates comprehensive Excel test plan from Python test files. -# Creates clean, non-corrupted Excel file ready for 10 testers. - -set -e # Exit on any error - -echo "🎯 MCP GATEWAY TEST PLAN GENERATOR" -echo "==================================" -echo "📊 Generating Excel from Python test files" -echo "👥 Ready for 10 manual testers" -echo "" - -# Check prerequisites -echo "🔧 Checking prerequisites..." - -if ! command -v python3 &> /dev/null; then - echo "❌ python3 not found. Please install Python 3.11+" - exit 1 -fi - -# Check openpyxl -if ! python3 -c "import openpyxl" 2>/dev/null; then - echo "📦 Installing openpyxl..." - pip install openpyxl -fi - -echo "✅ Prerequisites OK" - -# Generate Excel file -echo "" -echo "📊 Generating Excel test plan..." -python3 generate_test_plan_xlsx.py - -if [ $? -eq 0 ]; then - echo "" - echo "🎉 SUCCESS!" - echo "📄 Excel file created: test-plan.xlsx" - - if [ -f "test-plan.xlsx" ]; then - file_size=$(stat -c%s "test-plan.xlsx" 2>/dev/null || stat -f%z "test-plan.xlsx" 2>/dev/null || echo "unknown") - echo "📏 File size: $file_size bytes" - - # Test file opens - if python3 -c "import openpyxl; wb=openpyxl.load_workbook('test-plan.xlsx'); print(f'✅ Verified: {len(wb.worksheets)} worksheets'); wb.close()" 2>/dev/null; then - echo "✅ File verification: Opens cleanly" - else - echo "⚠️ File verification: Could not verify" - fi - fi - - echo "" - echo "🎯 Next Steps:" - echo " 1. Open test-plan.xlsx in Excel or LibreOffice" - echo " 2. Review all worksheets (8 total)" - echo " 3. Focus on 'Migration Tests' worksheet (main server visibility test)" - echo " 4. Distribute to 10 testers for execution" - echo "" - echo "👥 Tester Options:" - echo " • Excel file: Open test-plan.xlsx and follow worksheets" - echo " • Python files: Run individual test files directly" - echo " • Coordinated: python3 run_all_tests.py" - echo "" - echo "🚀 READY FOR COMPREHENSIVE TESTING!" - -else - echo "❌ Excel generation failed" - exit 1 -fi \ No newline at end of file diff --git a/tests/manual/generate_test_plan_xlsx.py b/tests/manual/generate_test_plan_xlsx.py deleted file mode 100644 index 9d4845a09..000000000 --- a/tests/manual/generate_test_plan_xlsx.py +++ /dev/null @@ -1,344 +0,0 @@ -#!/usr/bin/env python3 -""" -Working Excel Test Plan Generator - -Creates clean Excel file that opens without corruption. -All worksheets fully populated with real test data. -""" - -import openpyxl -from openpyxl.styles import PatternFill, Font -from openpyxl.utils import get_column_letter -from pathlib import Path - - -def create_working_excel(): - """Create working Excel file.""" - - print("🔧 Creating Working Excel Test Plan") - print("=" * 50) - - # Create workbook - wb = openpyxl.Workbook() - wb.remove(wb.active) - - # Styles - header_fill = PatternFill(start_color="4F81BD", end_color="4F81BD", fill_type="solid") - header_font = Font(color="FFFFFF", bold=True) - critical_fill = PatternFill(start_color="C5504B", end_color="C5504B", fill_type="solid") - critical_font = Font(color="FFFFFF", bold=True) - - # 1. Setup Instructions - print(" 1. Setup Instructions") - sheet1 = wb.create_sheet("Setup Instructions") - create_setup_data(sheet1, header_fill, header_font) - - # 2. Migration Tests (CRITICAL) - print(" 2. Migration Tests") - sheet2 = wb.create_sheet("Migration Tests") - create_migration_data(sheet2, header_fill, header_font, critical_fill, critical_font) - - # 3. API Authentication - print(" 3. API Authentication") - sheet3 = wb.create_sheet("API Authentication") - create_auth_data(sheet3, header_fill, header_font) - - # 4. API Teams - print(" 4. API Teams") - sheet4 = wb.create_sheet("API Teams") - create_teams_data(sheet4, header_fill, header_font) - - # 5. API Servers - print(" 5. API Servers") - sheet5 = wb.create_sheet("API Servers") - create_servers_data(sheet5, header_fill, header_font) - - # 6. Admin UI - print(" 6. Admin UI") - sheet6 = wb.create_sheet("Admin UI") - create_ui_data(sheet6, header_fill, header_font) - - # 7. Database Tests - print(" 7. Database Tests") - sheet7 = wb.create_sheet("Database Tests") - create_db_data(sheet7, header_fill, header_font) - - # 8. Security Tests - print(" 8. Security Tests") - sheet8 = wb.create_sheet("Security Tests") - create_security_data(sheet8, header_fill, header_font, critical_fill, critical_font) - - # Save file properly - filepath = Path("test-plan.xlsx") - print("\\n💾 Saving file...") - - wb.save(filepath) - print("✅ File saved") - - # CRITICAL: Close workbook properly - wb.close() - print("✅ File closed") - - # Verify - print("\\n🔍 Verifying...") - try: - test_wb = openpyxl.load_workbook(filepath) - print(f"✅ Opens successfully: {len(test_wb.worksheets)} worksheets") - - # Check key worksheets - for sheet in test_wb.worksheets: - test_count = max(0, sheet.max_row - 1) - print(f" 📄 {sheet.title}: {test_count} tests") - - test_wb.close() - print("✅ Test file closed") - - print("\\n🎊 SUCCESS! Working Excel file created!") - return True - - except Exception as e: - print(f"❌ Verification failed: {e}") - return False - - -def create_setup_data(sheet, header_fill, header_font): - """Create setup instructions data.""" - - headers = ["Step", "Action", "Command", "Expected", "Status", "Notes"] - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["1", "Check Prerequisites", "python3 --version && git --version", "Python 3.11+ and Git installed", "☐", "Must have both"], - ["2", "Clone Repository", "git clone ", "Repository downloaded", "☐", "Get URL from admin"], - ["3", "Enter Directory", "cd mcp-context-forge", "Directory changed", "☐", ""], - ["4", "Copy Environment", "cp .env.example .env", "Environment file created", "☐", ""], - ["5", "Edit Configuration", "vi .env", "Configuration edited", "☐", "Set admin email/password"], - ["6", "Install Dependencies", "make install-dev", "Dependencies installed", "☐", "May take 5-10 minutes"], - ["7", "Run Migration", "python3 -m mcpgateway.bootstrap_db", "Migration completed", "☐", "CRITICAL STEP"], - ["8", "Verify Migration", "python3 scripts/verify_multitenancy_0_7_0_migration.py", "All checks pass", "☐", "Must pass"], - ["9", "Start Gateway", "make dev", "Server running on port 4444", "☐", "Keep terminal open"], - ["10", "Test Health", "curl http://localhost:4444/health", '{"status":"ok"}', "☐", "Basic connectivity"], - ["11", "Access Admin UI", "Open http://localhost:4444/admin", "Login page loads", "☐", ""], - ["12", "Test Login", "Login with admin credentials", "Dashboard appears", "☐", "Main validation"] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_migration_data(sheet, header_fill, header_font, critical_fill, critical_font): - """Create migration test data.""" - - headers = ["Test ID", "Priority", "Component", "Description", "Steps", "Expected", "Actual", "Status", "Tester", "Comments", "SQLite", "PostgreSQL"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["MIG-001", "CRITICAL", "Admin User", "Platform admin created", "Check admin user exists in database", "Admin user found with is_admin=true", "", "☐", "", "", "✓", "✓"], - ["MIG-002", "CRITICAL", "Personal Team", "Admin personal team exists", "Run verification script", "Personal team found", "", "☐", "", "", "✓", "✓"], - ["MIG-003", "CRITICAL", "Server Visibility", "OLD SERVERS VISIBLE - MAIN TEST", "Open admin UI, navigate to Virtual Servers", "ALL servers visible including old ones", "", "☐", "", "MAIN MIGRATION TEST", "✓", "✓"], - ["MIG-004", "CRITICAL", "Resource Teams", "Resources assigned to teams", "Check team assignments in UI and DB", "All resources have team_id populated", "", "☐", "", "", "✓", "✓"], - ["MIG-005", "CRITICAL", "Email Auth", "Email authentication works", "Test login with email/password", "Email login successful", "", "☐", "", "", "✓", "✓"], - ["MIG-006", "HIGH", "Basic Auth", "Basic auth compatibility", "Test basic authentication", "Basic auth still works", "", "☐", "", "", "✓", "✓"], - ["MIG-007", "HIGH", "API Functionality", "APIs respond correctly", "Test core API endpoints", "All APIs return expected responses", "", "☐", "", "", "✓", "✓"], - ["MIG-008", "MEDIUM", "Team Membership", "Admin team ownership", "Check admin is team owner", "Admin listed as owner of personal team", "", "☐", "", "", "✓", "✓"] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - cell = sheet.cell(row=i, column=j, value=value) - if j == 2 and value == "CRITICAL": # Priority column - cell.fill = critical_fill - cell.font = critical_font - - auto_size_columns(sheet) - - -def create_auth_data(sheet, header_fill, header_font): - """Create authentication API data.""" - - headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Expected Response", "Actual Status", "Actual Response", "Status", "Tester", "Comments"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["AUTH-001", "/auth/register", "POST", "User registration", 'curl -X POST http://localhost:4444/auth/register -d \'{"email":"test@example.com","password":"Test123"}\'', "201", "User created", "", "", "☐", "", ""], - ["AUTH-002", "/auth/login", "POST", "Email login", 'curl -X POST http://localhost:4444/auth/login -d \'{"email":"admin@example.com","password":"changeme"}\'', "200", "JWT token", "", "", "☐", "", ""], - ["AUTH-003", "/auth/logout", "POST", "User logout", 'curl -X POST http://localhost:4444/auth/logout -H "Authorization: Bearer "', "200", "Logout success", "", "", "☐", "", ""], - ["AUTH-004", "/auth/refresh", "POST", "Token refresh", 'curl -X POST http://localhost:4444/auth/refresh -H "Authorization: Bearer "', "200", "New token", "", "", "☐", "", ""], - ["AUTH-005", "/auth/profile", "GET", "User profile", 'curl http://localhost:4444/auth/profile -H "Authorization: Bearer "', "200", "Profile data", "", "", "☐", "", ""], - ["AUTH-006", "/auth/sso/github", "GET", "GitHub SSO", 'curl -I http://localhost:4444/auth/sso/github', "302", "GitHub redirect", "", "", "☐", "", ""], - ["AUTH-007", "/auth/sso/google", "GET", "Google SSO", 'curl -I http://localhost:4444/auth/sso/google', "302", "Google redirect", "", "", "☐", "", ""], - ["AUTH-008", "/auth/change-password", "POST", "Password change", 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -d password_data', "200", "Password updated", "", "", "☐", "", ""] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_teams_data(sheet, header_fill, header_font): - """Create teams API data.""" - - headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Expected Response", "Status", "Tester", "Comments"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["TEAM-001", "/teams", "GET", "List teams", 'curl http://localhost:4444/teams -H "Authorization: Bearer "', "200", "Team array", "☐", "", ""], - ["TEAM-002", "/teams", "POST", "Create team", 'curl -X POST http://localhost:4444/teams -d team_data -H "Authorization: Bearer "', "201", "Team created", "☐", "", ""], - ["TEAM-003", "/teams/{id}", "GET", "Team details", 'curl http://localhost:4444/teams/{ID} -H "Authorization: Bearer "', "200", "Team details", "☐", "", ""], - ["TEAM-004", "/teams/{id}/members", "GET", "Team members", 'curl http://localhost:4444/teams/{ID}/members -H "Authorization: Bearer "', "200", "Member list", "☐", "", ""], - ["TEAM-005", "/teams/{id}/invitations", "POST", "Create invitation", 'curl -X POST http://localhost:4444/teams/{ID}/invitations -d invite_data', "201", "Invitation sent", "☐", "", ""] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_servers_data(sheet, header_fill, header_font): - """Create servers API data.""" - - headers = ["Test ID", "Endpoint", "Method", "Description", "cURL Command", "Expected Status", "Status", "Tester"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["SRV-001", "/servers", "GET", "List servers", 'curl http://localhost:4444/servers -H "Authorization: Bearer "', "200", "☐", ""], - ["SRV-002", "/servers", "POST", "Create server", 'curl -X POST http://localhost:4444/servers -d server_data', "201", "☐", ""], - ["SRV-003", "/servers/{id}", "GET", "Server details", 'curl http://localhost:4444/servers/{ID} -H "Authorization: Bearer "', "200", "☐", ""], - ["SRV-004", "/servers/{id}/sse", "GET", "SSE connection", 'curl -N http://localhost:4444/servers/{ID}/sse -H "Authorization: Bearer "', "200", "☐", ""], - ["SRV-005", "/servers/{id}/tools", "GET", "Server tools", 'curl http://localhost:4444/servers/{ID}/tools -H "Authorization: Bearer "', "200", "☐", ""] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_ui_data(sheet, header_fill, header_font): - """Create UI test data.""" - - headers = ["Test ID", "Component", "Action", "Steps", "Expected", "Status", "Tester", "Browser", "Screenshot"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["UI-001", "Login", "Test login", "Login with admin creds", "Dashboard loads", "☐", "", "Chrome", "Optional"], - ["UI-002", "Dashboard", "View dashboard", "Check stats and navigation", "Dashboard functional", "☐", "", "Chrome", "Optional"], - ["UI-003", "Servers", "View servers - MAIN TEST", "Navigate to Virtual Servers", "ALL SERVERS VISIBLE", "☐", "", "Chrome", "REQUIRED"], - ["UI-004", "Teams", "Team management", "Navigate to Teams", "Teams functional", "☐", "", "Chrome", "Optional"], - ["UI-005", "Tools", "Tool interface", "View and invoke tools", "Tools accessible", "☐", "", "Chrome", "Optional"], - ["UI-006", "Export", "Config export", "Export configuration", "Export works", "☐", "", "Chrome", "Recommended"] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_db_data(sheet, header_fill, header_font): - """Create database test data.""" - - headers = ["Test ID", "Database", "Feature", "Command", "Expected", "Status", "Performance", "Notes"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["DB-001", "SQLite", "Migration", "python3 -m mcpgateway.bootstrap_db", "Success", "☐", "Fast", ""], - ["DB-002", "SQLite", "Data Check", "sqlite3 mcp.db 'SELECT COUNT(*) FROM servers;'", "Count > 0", "☐", "Fast", ""], - ["DB-003", "PostgreSQL", "Migration", "Set PG URL, run migration", "Success", "☐", "Fast", ""], - ["DB-004", "PostgreSQL", "Advanced Types", "Test UUID, JSONB", "Advanced features work", "☐", "Excellent", ""], - ["DB-005", "Both", "Performance", "Large dataset test", "Good performance", "☐", "Variable", ""] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - sheet.cell(row=i, column=j, value=value) - - auto_size_columns(sheet) - - -def create_security_data(sheet, header_fill, header_font, critical_fill, critical_font): - """Create security test data.""" - - headers = ["Test ID", "Attack Type", "Target", "Description", "Expected Defense", "Risk Level", "Status", "Tester", "Notes"] - - for i, header in enumerate(headers, 1): - cell = sheet.cell(row=1, column=i, value=header) - cell.fill = header_fill - cell.font = header_font - - data = [ - ["SEC-001", "SQL Injection", "API", "SQL injection attempt", "Input sanitized", "Critical", "☐", "", ""], - ["SEC-002", "JWT Tampering", "Auth", "Token manipulation", "Token rejected", "Critical", "☐", "", ""], - ["SEC-003", "Team Bypass", "Authorization", "Cross-team access", "Access denied", "Critical", "☐", "", ""], - ["SEC-004", "XSS Attack", "UI", "Script injection", "Scripts escaped", "High", "☐", "", ""], - ["SEC-005", "Brute Force", "Login", "Password attack", "Account locked", "Medium", "☐", "", ""] - ] - - for i, row in enumerate(data, 2): - for j, value in enumerate(row, 1): - cell = sheet.cell(row=i, column=j, value=value) - if j == 6 and value == "Critical": # Risk Level - cell.fill = critical_fill - cell.font = critical_font - - auto_size_columns(sheet) - - -def auto_size_columns(sheet): - """Auto-size columns.""" - - for col in range(1, sheet.max_column + 1): - max_length = 0 - for row in range(1, min(sheet.max_row + 1, 20)): - cell_value = sheet.cell(row=row, column=col).value - if cell_value: - max_length = max(max_length, len(str(cell_value))) - - width = min(max(max_length + 2, 10), 50) - sheet.column_dimensions[get_column_letter(col)].width = width - - -if __name__ == "__main__": - try: - success = create_working_excel() - if not success: - sys.exit(1) - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/migration_tests.py b/tests/manual/migration_tests.py deleted file mode 100644 index 07c86587d..000000000 --- a/tests/manual/migration_tests.py +++ /dev/null @@ -1,405 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Migration Validation Tests - -Critical post-migration validation tests to ensure the v0.6.0 → v0.7.0 -upgrade completed successfully and old servers are now visible. - -Usage: - python3 tests/manual/migration_tests.py - python3 tests/manual/migration_tests.py --run-all -""" - -import sys -import subprocess -import json -from pathlib import Path -from datetime import datetime - -# Add project root to path -project_root = Path(__file__).parent.parent.parent -sys.path.insert(0, str(project_root)) - -# Migration test cases -MIGRATION_TESTS = [ - { - "id": "MIG-001", - "priority": "CRITICAL", - "component": "Admin User Creation", - "description": "Verify platform admin user was created during migration", - "steps": [ - "1. Check expected admin email from configuration:", - ' python3 -c "from mcpgateway.config import settings; print(f\'Expected admin: {settings.platform_admin_email}\')"', - "2. Check actual admin user in database:", - ' python3 -c "from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.is_admin==True).first(); print(f\'Found admin: {admin.email if admin else None}, is_admin: {admin.is_admin if admin else False}\'); db.close()"', - "3. Compare expected vs actual results", - "4. Record both outputs exactly" - ], - "expected": "Expected admin email matches found admin email, is_admin=True", - "validation_command": 'python3 -c "from mcpgateway.config import settings; from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email, EmailUser.is_admin==True).first(); result = \'PASS\' if admin else \'FAIL\'; print(f\'Result: {result} - Admin {settings.platform_admin_email} exists: {admin is not None}\'); db.close()"' - }, - { - "id": "MIG-002", - "priority": "CRITICAL", - "component": "Personal Team Creation", - "description": "Verify admin user has personal team created automatically", - "steps": [ - "1. Run the verification script:", - " python3 scripts/verify_multitenancy_0_7_0_migration.py", - "2. Look for 'PERSONAL TEAM CHECK' section in the output", - "3. Record the team ID, name, and slug shown", - "4. Verify there are no error messages", - "5. Note team visibility (should be 'private')" - ], - "expected": "✅ Personal team found: (Team ID: , Slug: , Visibility: private)", - "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailTeam, EmailUser; from mcpgateway.config import settings; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email).first(); team=db.query(EmailTeam).filter(EmailTeam.created_by==settings.platform_admin_email, EmailTeam.is_personal==True).first() if admin else None; result = \'PASS\' if team else \'FAIL\'; print(f\'Result: {result} - Personal team exists: {team is not None}\'); db.close()"' - }, - { - "id": "MIG-003", - "priority": "CRITICAL", - "component": "Server Visibility Fix", - "description": "OLD SERVERS NOW VISIBLE - This is the main issue being fixed", - "steps": [ - "1. Open web browser (Chrome or Firefox recommended)", - "2. Navigate to: http://localhost:4444/admin", - "3. Login using admin email and password from your .env file", - "4. Click 'Virtual Servers' in the navigation menu", - "5. Count the total number of servers displayed", - "6. Look for servers with older creation dates (pre-migration)", - "7. Click on each server to verify details are accessible", - "8. Take screenshot of the server list showing all servers", - "9. Record server names, creation dates, and visibility" - ], - "expected": "ALL pre-migration servers visible in admin UI server list, details accessible", - "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, Server; db=SessionLocal(); total=db.query(Server).count(); with_teams=db.query(Server).filter(Server.team_id!=None).count(); print(f\'Server visibility: {with_teams}/{total} servers have team assignments\'); result = \'PASS\' if with_teams == total else \'FAIL\'; print(f\'Result: {result}\'); db.close()"', - "main_test": True, - "screenshot_required": True - }, - { - "id": "MIG-004", - "priority": "CRITICAL", - "component": "Resource Team Assignment", - "description": "All resources assigned to teams (no NULL team_id values)", - "steps": [ - "1. In admin UI, navigate to Tools section", - "2. Click on any tool to view its details", - "3. Verify 'Team' field shows a team name (not empty or NULL)", - "4. Verify 'Owner' field shows the admin email address", - "5. Verify 'Visibility' field has a value (private/team/public)", - "6. Repeat this check for Resources and Prompts sections", - "7. Run database verification command to check for NULL team assignments", - "8. Record the count of unassigned resources" - ], - "expected": "All resources show Team/Owner/Visibility, database query shows 0 unassigned", - "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, Tool, Resource, Prompt; db=SessionLocal(); tool_null=db.query(Tool).filter(Tool.team_id==None).count(); res_null=db.query(Resource).filter(Resource.team_id==None).count(); prompt_null=db.query(Prompt).filter(Prompt.team_id==None).count(); total_null=tool_null+res_null+prompt_null; print(f\'Unassigned resources: Tools={tool_null}, Resources={res_null}, Prompts={prompt_null}, Total={total_null}\'); result = \'PASS\' if total_null == 0 else \'FAIL\'; print(f\'Result: {result}\'); db.close()"' - }, - { - "id": "MIG-005", - "priority": "CRITICAL", - "component": "Email Authentication", - "description": "Email-based authentication functional after migration", - "steps": [ - "1. Open new private/incognito browser window", - "2. Navigate to http://localhost:4444/admin", - "3. Look for email login form or 'Email Login' option", - "4. Enter the admin email address from your .env file", - "5. Enter the admin password from your .env file", - "6. Click the Login/Submit button", - "7. Verify successful redirect to admin dashboard", - "8. Check that user menu/profile shows the correct email address" - ], - "expected": "Email authentication successful, dashboard loads, correct email displayed in UI", - "validation_command": 'curl -s -X POST http://localhost:4444/auth/login -H "Content-Type: application/json" -d \'{"email":"admin@example.com","password":"changeme"}\' | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\'Email auth result: {\"PASS\" if \"token\" in data else \"FAIL\"} - Token present: {\"token\" in data}\')"' - }, - { - "id": "MIG-006", - "priority": "HIGH", - "component": "Basic Auth Compatibility", - "description": "Basic authentication still works alongside email auth", - "steps": [ - "1. Open a new browser window (not incognito)", - "2. Navigate to http://localhost:4444/admin", - "3. When browser prompts for authentication, use basic auth:", - " Username: admin", - " Password: changeme", - "4. Verify access is granted to admin interface", - "5. Navigate to different admin sections to test functionality", - "6. Confirm no conflicts with email authentication" - ], - "expected": "Basic auth continues to work, no conflicts with email auth system", - "validation_command": 'curl -s -u admin:changeme http://localhost:4444/admin/teams | python3 -c "import json, sys; try: data=json.load(sys.stdin); print(\'Basic auth result: PASS - API accessible\'); except: print(\'Basic auth result: FAIL - API not accessible\')"' - }, - { - "id": "MIG-007", - "priority": "HIGH", - "component": "Database Schema Validation", - "description": "All multitenancy tables created with proper structure", - "steps": [ - "1. Check multitenancy tables exist:", - " SQLite: sqlite3 mcp.db '.tables' | grep email", - " PostgreSQL: psql -d mcp -c '\\\\dt' | grep email", - "2. Verify required tables: email_users, email_teams, email_team_members, roles, user_roles", - "3. Check table row counts:", - ' python3 -c "from mcpgateway.db import SessionLocal, EmailUser, EmailTeam; db=SessionLocal(); users=db.query(EmailUser).count(); teams=db.query(EmailTeam).count(); print(f\'Users: {users}, Teams: {teams}\'); db.close()"', - "4. Test foreign key relationships work properly" - ], - "expected": "All multitenancy tables exist with proper data and working relationships", - "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailUser, EmailTeam, EmailTeamMember; db=SessionLocal(); users=db.query(EmailUser).count(); teams=db.query(EmailTeam).count(); members=db.query(EmailTeamMember).count(); result = \'PASS\' if users > 0 and teams > 0 and members > 0 else \'FAIL\'; print(f\'Schema validation: {result} - Users: {users}, Teams: {teams}, Members: {members}\'); db.close()"' - }, - { - "id": "MIG-008", - "priority": "HIGH", - "component": "Team Membership Validation", - "description": "Admin user properly added to personal team as owner", - "steps": [ - "1. In admin UI, navigate to the Teams section", - "2. Find the personal team (usually named ''s Team')", - "3. Click on the personal team to view its details", - "4. Click 'View Members' or 'Members' tab", - "5. Verify admin user is listed with role 'Owner'", - "6. Check the join date is recent (around migration execution time)", - "7. Test basic team management functions" - ], - "expected": "Admin user listed as Owner in personal team with recent join date", - "validation_command": 'python3 -c "from mcpgateway.db import SessionLocal, EmailTeamMember, EmailUser; from mcpgateway.config import settings; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email).first(); membership=db.query(EmailTeamMember).filter(EmailTeamMember.user_email==settings.platform_admin_email, EmailTeamMember.role==\'owner\').first() if admin else None; result = \'PASS\' if membership else \'FAIL\'; print(f\'Team membership: {result} - Admin is owner: {membership is not None}\'); db.close()"' - }, - { - "id": "MIG-009", - "priority": "MEDIUM", - "component": "API Functionality Validation", - "description": "Core APIs respond correctly after migration", - "steps": [ - "1. Test health endpoint:", - " curl http://localhost:4444/health", - "2. Get authentication token:", - ' curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json" -d \'{"email":"","password":""}\'', - "3. Test teams API with the token:", - ' curl -H "Authorization: Bearer " http://localhost:4444/teams', - "4. Test servers API:", - ' curl -H "Authorization: Bearer " http://localhost:4444/servers', - "5. Record all HTTP status codes and response content" - ], - "expected": "Health=200, Login=200 with JWT token, Teams=200 with team data, Servers=200 with server data", - "validation_command": 'curl -s http://localhost:4444/health | python3 -c "import json, sys; data=json.load(sys.stdin); print(f\'Health check: {\"PASS\" if data.get(\"status\") == \"ok\" else \"FAIL\"} - Status: {data.get(\"status\")}\') if isinstance(data, dict) else print(\'Health check: FAIL - Invalid response\')"' - }, - { - "id": "MIG-010", - "priority": "MEDIUM", - "component": "Post-Migration Resource Creation", - "description": "New resources created after migration get proper team assignments", - "steps": [ - "1. In admin UI, navigate to Tools section", - "2. Click 'Create Tool' or 'Add Tool' button", - "3. Fill in tool details:", - " Name: 'Post-Migration Test Tool'", - " Description: 'Tool created after v0.7.0 migration'", - " Visibility: 'Team'", - "4. Save the new tool", - "5. Verify tool appears in the tools list", - "6. Check tool details show automatic team assignment", - "7. Delete the test tool when validation is complete" - ], - "expected": "New tool created successfully with automatic team assignment to creator's team", - "validation_command": "# Manual test - check via UI that new resources get team assignments" - } -] - - -def run_migration_validation(): - """Run interactive migration validation.""" - - print("🔄 MCP GATEWAY MIGRATION VALIDATION") - print("=" * 60) - print("🎯 Purpose: Validate v0.6.0 → v0.7.0 migration success") - print("🚨 Critical: These tests must pass for production use") - - results = [] - - print("\\n📋 MIGRATION TEST EXECUTION") - - for test in MIGRATION_TESTS: - print(f"\\n{'='*60}") - print(f"🧪 TEST {test['id']}: {test['component']}") - print(f"Priority: {test['priority']}") - print(f"Description: {test['description']}") - - if test.get('main_test'): - print("🎯 THIS IS THE MAIN MIGRATION TEST!") - - print(f"\\n📋 Test Steps:") - for step in test['steps']: - print(f" {step}") - - print(f"\\n✅ Expected Result:") - print(f" {test['expected']}") - - # Run validation command if available - if 'validation_command' in test and not test['validation_command'].startswith('#'): - print(f"\\n🔍 Running automated validation...") - try: - result = subprocess.run(test['validation_command'], shell=True, - capture_output=True, text=True, timeout=30) - print(f" Validation output: {result.stdout.strip()}") - if result.stderr: - print(f" Validation errors: {result.stderr.strip()}") - except subprocess.TimeoutExpired: - print(" ⚠️ Validation timeout") - except Exception as e: - print(f" ❌ Validation error: {e}") - - # Get user confirmation - print(f"\\n📝 Manual Verification Required:") - response = input(f"Did test {test['id']} PASS? (y/n/skip): ").lower() - - if response == 'y': - status = "PASS" - print(f"✅ {test['id']}: PASSED") - elif response == 'n': - status = "FAIL" - print(f"❌ {test['id']}: FAILED") - if test['priority'] == 'CRITICAL': - print(f"🚨 CRITICAL TEST FAILED!") - print(f"🛑 Migration may not be successful") - break_early = input("Continue with remaining tests? (y/N): ").lower() - if break_early != 'y': - break - else: - status = "SKIP" - print(f"⚠️ {test['id']}: SKIPPED") - - # Record result - result_data = { - "test_id": test['id'], - "component": test['component'], - "status": status, - "timestamp": datetime.now().isoformat(), - "priority": test['priority'] - } - - if response == 'n': # Failed test - details = input("Please describe what failed: ") - result_data['failure_details'] = details - - results.append(result_data) - - # Generate summary - generate_test_summary(results) - - return results - - -def generate_test_summary(results): - """Generate test execution summary.""" - - print(f"\\n{'='*60}") - print("📊 MIGRATION VALIDATION SUMMARY") - print("=" * 60) - - # Count results - passed = len([r for r in results if r['status'] == 'PASS']) - failed = len([r for r in results if r['status'] == 'FAIL']) - skipped = len([r for r in results if r['status'] == 'SKIP']) - total = len(results) - - print(f"📈 Test Results:") - print(f" ✅ Passed: {passed}/{total}") - print(f" ❌ Failed: {failed}/{total}") - print(f" ⚠️ Skipped: {skipped}/{total}") - - # Check critical tests - critical_results = [r for r in results if r['priority'] == 'CRITICAL'] - critical_passed = len([r for r in critical_results if r['status'] == 'PASS']) - critical_total = len(critical_results) - - print(f"\\n🚨 Critical Test Results:") - print(f" ✅ Critical Passed: {critical_passed}/{critical_total}") - - # Overall assessment - if failed == 0 and critical_passed == critical_total: - print(f"\\n🎉 MIGRATION VALIDATION: SUCCESS!") - print("✅ All critical tests passed") - print("✅ Migration completed successfully") - print("✅ Ready for production use") - elif critical_passed == critical_total: - print(f"\\n⚠️ MIGRATION VALIDATION: PARTIAL SUCCESS") - print("✅ All critical tests passed") - print("⚠️ Some non-critical tests failed") - print("💡 Review failed tests but migration core is successful") - else: - print(f"\\n❌ MIGRATION VALIDATION: FAILED") - print("❌ Critical tests failed") - print("🛑 Migration may not be successful") - print("🔧 Please investigate failures before production use") - - # Save results - save_results(results) - - -def save_results(results): - """Save test results to file.""" - - results_file = Path("tests/manual/migration_test_results.json") - - summary = { - "test_execution": { - "timestamp": datetime.now().isoformat(), - "total_tests": len(results), - "passed": len([r for r in results if r['status'] == 'PASS']), - "failed": len([r for r in results if r['status'] == 'FAIL']), - "skipped": len([r for r in results if r['status'] == 'SKIP']) - }, - "test_results": results - } - - with open(results_file, 'w') as f: - json.dump(summary, f, indent=2) - - print(f"\\n📄 Results saved: {results_file}") - - -def list_all_tests(): - """List all migration tests.""" - - print("📋 ALL MIGRATION VALIDATION TESTS") - print("=" * 50) - - for test in MIGRATION_TESTS: - priority_indicator = "🚨" if test['priority'] == 'CRITICAL' else "🔧" if test['priority'] == 'HIGH' else "📝" - main_indicator = " 🎯 MAIN TEST" if test.get('main_test') else "" - - print(f"\\n{test['id']}: {test['component']} {priority_indicator}{main_indicator}") - print(f" Priority: {test['priority']}") - print(f" Description: {test['description']}") - print(f" Expected: {test['expected']}") - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("📋 Migration Validation Tests") - print("Usage:") - print(" python3 tests/manual/migration_tests.py # Interactive testing") - print(" python3 tests/manual/migration_tests.py --list # List all tests") - print(" python3 tests/manual/migration_tests.py --help # This help") - elif sys.argv[1] == "--list": - list_all_tests() - elif sys.argv[1] == "--run-all": - print("🚀 Running all migration tests...") - run_migration_validation() - else: - print("❌ Unknown option. Use --help for usage.") - else: - # Interactive mode - print("🔄 Starting interactive migration validation...") - print("💡 Tip: Use --list to see all tests first") - - try: - results = run_migration_validation() - print("\\n🎉 Migration validation complete!") - except KeyboardInterrupt: - print("\\n❌ Testing cancelled by user") - sys.exit(1) - except Exception as e: - print(f"❌ Testing error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/run_all_tests.py b/tests/manual/run_all_tests.py deleted file mode 100644 index a34a97537..000000000 --- a/tests/manual/run_all_tests.py +++ /dev/null @@ -1,331 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Master Test Runner - -Coordinates execution of all manual test suites. -Designed for comprehensive validation after v0.6.0 → v0.7.0 migration. - -Usage: - python3 tests/manual/run_all_tests.py - python3 tests/manual/run_all_tests.py --quick - python3 tests/manual/run_all_tests.py --critical-only -""" - -import sys -import subprocess -import json -from pathlib import Path -from datetime import datetime - -# Test suite configuration -TEST_SUITES = [ - { - "name": "Setup Instructions", - "file": "setup_instructions.py", - "description": "Environment setup and validation", - "priority": "CRITICAL", - "estimated_time": "30-60 minutes", - "prerequisite": True - }, - { - "name": "Migration Validation", - "file": "migration_tests.py", - "description": "Post-migration validation tests", - "priority": "CRITICAL", - "estimated_time": "45-90 minutes", - "main_test": True - }, - { - "name": "Admin UI Testing", - "file": "admin_ui_tests.py", - "description": "Complete admin interface testing", - "priority": "CRITICAL", - "estimated_time": "60-120 minutes", - "includes_main_test": True - }, - { - "name": "API Authentication", - "file": "api_authentication_tests.py", - "description": "Authentication endpoint testing", - "priority": "HIGH", - "estimated_time": "30-60 minutes" - }, - { - "name": "API Teams", - "file": "api_teams_tests.py", - "description": "Team management API testing", - "priority": "HIGH", - "estimated_time": "30-60 minutes" - }, - { - "name": "API Servers", - "file": "api_servers_tests.py", - "description": "Virtual servers API testing", - "priority": "HIGH", - "estimated_time": "45-90 minutes" - }, - { - "name": "Database Testing", - "file": "database_tests.py", - "description": "SQLite and PostgreSQL compatibility", - "priority": "HIGH", - "estimated_time": "60-120 minutes" - }, - { - "name": "Security Testing", - "file": "security_tests.py", - "description": "Security and penetration testing", - "priority": "MEDIUM", - "estimated_time": "90-180 minutes", - "warning": "Performs actual attack scenarios" - } -] - -# Tester assignment suggestions -TESTER_ASSIGNMENTS = [ - { - "tester": "Tester 1", - "focus": "Critical Path", - "assignments": ["Setup Instructions", "Migration Validation", "Admin UI Testing"], - "database": "SQLite", - "estimated_time": "3-5 hours" - }, - { - "tester": "Tester 2", - "focus": "API Testing", - "assignments": ["API Authentication", "API Teams", "API Servers"], - "database": "SQLite", - "estimated_time": "2-4 hours" - }, - { - "tester": "Tester 3", - "focus": "Database Compatibility", - "assignments": ["Database Testing", "Migration Validation"], - "database": "PostgreSQL", - "estimated_time": "2-3 hours" - }, - { - "tester": "Tester 4", - "focus": "Security Validation", - "assignments": ["Security Testing", "API Authentication"], - "database": "Both", - "estimated_time": "3-5 hours" - } -] - - -def main(): - """Main test coordination.""" - - print("🎯 MCP GATEWAY COMPREHENSIVE MANUAL TESTING") - print("=" * 70) - print("🔄 Post-Migration Validation Suite") - print("👥 Designed for multiple testers") - - print("\\n📋 Available Test Suites:") - for i, suite in enumerate(TEST_SUITES, 1): - priority_icon = "🚨" if suite['priority'] == 'CRITICAL' else "🔧" if suite['priority'] == 'HIGH' else "📝" - main_icon = " 🎯" if suite.get('main_test') or suite.get('includes_main_test') else "" - - print(f" {i:2}. {suite['name']} {priority_icon}{main_icon}") - print(f" {suite['description']}") - print(f" Time: {suite['estimated_time']}") - - if suite.get('warning'): - print(f" ⚠️ {suite['warning']}") - - print("\\n👥 Suggested Tester Assignments:") - for assignment in TESTER_ASSIGNMENTS: - print(f" {assignment['tester']} ({assignment['focus']}):") - print(f" Tests: {', '.join(assignment['assignments'])}") - print(f" Database: {assignment['database']}") - print(f" Time: {assignment['estimated_time']}") - print() - - -def run_quick_validation(): - """Run quick critical tests only.""" - - print("⚡ QUICK VALIDATION - Critical Tests Only") - print("=" * 50) - - critical_suites = [s for s in TEST_SUITES if s['priority'] == 'CRITICAL'] - - for suite in critical_suites: - print(f"\\n🚨 {suite['name']}") - print(f" {suite['description']}") - - response = input(f"\\nRun {suite['name']}? (y/n): ").lower() - if response == 'y': - run_test_suite(suite) - - -def run_test_suite(suite): - """Run a specific test suite.""" - - print(f"\\n🧪 RUNNING: {suite['name']}") - print("=" * 50) - - test_file = Path("tests/manual") / suite['file'] - - if not test_file.exists(): - print(f"❌ Test file not found: {test_file}") - return False - - print(f"📄 Executing: {test_file}") - print(f"⏱️ Estimated time: {suite['estimated_time']}") - - if suite.get('warning'): - print(f"⚠️ Warning: {suite['warning']}") - proceed = input("Proceed? (y/N): ").lower() - if proceed != 'y': - print("⚠️ Test suite skipped") - return False - - try: - # Execute test file - result = subprocess.run([sys.executable, str(test_file)], - capture_output=True, text=True, timeout=1800) # 30 min timeout - - if result.returncode == 0: - print(f"✅ {suite['name']}: Completed successfully") - if result.stdout: - print("Output summary:") - print(result.stdout[-500:]) # Last 500 chars - else: - print(f"❌ {suite['name']}: Failed or incomplete") - if result.stderr: - print("Errors:") - print(result.stderr[-500:]) - - return result.returncode == 0 - - except subprocess.TimeoutExpired: - print(f"⏰ {suite['name']}: Timeout (exceeded 30 minutes)") - return False - except Exception as e: - print(f"❌ {suite['name']}: Execution error - {e}") - return False - - -def interactive_testing(): - """Interactive test suite selection and execution.""" - - print("🎯 INTERACTIVE TESTING MODE") - print("=" * 50) - - print("\\nSelect test suites to run:") - for i, suite in enumerate(TEST_SUITES, 1): - print(f" {i}. {suite['name']} ({suite['priority']})") - - print("\\nOptions:") - print(" a - Run all test suites") - print(" c - Run critical tests only") - print(" 1,2,3 - Run specific test suites") - print(" q - Quit") - - selection = input("\\nYour choice: ").lower().strip() - - if selection == 'q': - print("❌ Testing cancelled") - return - elif selection == 'a': - print("🚀 Running ALL test suites...") - for suite in TEST_SUITES: - run_test_suite(suite) - elif selection == 'c': - run_quick_validation() - else: - # Parse specific selections - try: - indices = [int(x.strip()) for x in selection.split(',')] - for idx in indices: - if 1 <= idx <= len(TEST_SUITES): - suite = TEST_SUITES[idx - 1] - run_test_suite(suite) - else: - print(f"❌ Invalid selection: {idx}") - except ValueError: - print("❌ Invalid input format") - - -def generate_overall_summary(): - """Generate comprehensive test summary.""" - - print("\\n📊 GENERATING OVERALL TEST SUMMARY") - print("=" * 60) - - # Collect results from all test files - results_files = list(Path("tests/manual").glob("*_test_results.json")) - - overall_summary = { - "test_execution": { - "timestamp": datetime.now().isoformat(), - "total_suites": len(TEST_SUITES), - "results_files": len(results_files) - }, - "suite_results": {} - } - - for results_file in results_files: - try: - with open(results_file, 'r') as f: - data = json.load(f) - suite_name = results_file.stem.replace('_test_results', '') - overall_summary['suite_results'][suite_name] = data - except Exception as e: - print(f"⚠️ Could not read {results_file}: {e}") - - # Save overall summary - summary_file = Path("tests/manual/overall_test_summary.json") - with open(summary_file, 'w') as f: - json.dump(overall_summary, f, indent=2) - - print(f"📄 Overall summary saved: {summary_file}") - - # Print summary - if overall_summary['suite_results']: - print("\\n📈 Test Suite Results:") - for suite_name, data in overall_summary['suite_results'].items(): - if 'summary' in data: - summary = data['summary'] - passed = summary.get('passed', 0) - total = summary.get('total', 0) - print(f" {suite_name}: {passed}/{total} passed") - else: - print(f" {suite_name}: Results available") - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("🎯 MCP Gateway Manual Test Runner") - print("Usage:") - print(" python3 tests/manual/run_all_tests.py # Interactive mode") - print(" python3 tests/manual/run_all_tests.py --quick # Critical tests only") - print(" python3 tests/manual/run_all_tests.py --critical-only # Same as --quick") - print(" python3 tests/manual/run_all_tests.py --list # List all test suites") - print(" python3 tests/manual/run_all_tests.py --help # This help") - print("\\n🎯 Individual test suites can be run directly:") - for suite in TEST_SUITES: - print(f" python3 tests/manual/{suite['file']}") - elif sys.argv[1] == "--list": - main() # Show test suites - elif sys.argv[1] == "--quick" or sys.argv[1] == "--critical-only": - run_quick_validation() - generate_overall_summary() - else: - print("❌ Unknown option. Use --help for usage.") - else: - try: - main() - print("\\n🚀 Starting interactive testing...") - interactive_testing() - generate_overall_summary() - print("\\n🎉 Manual testing session complete!") - except KeyboardInterrupt: - print("\\n❌ Testing cancelled by user") - except Exception as e: - print(f"❌ Testing error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/security_tests.py b/tests/manual/security_tests.py deleted file mode 100644 index 9c3197f35..000000000 --- a/tests/manual/security_tests.py +++ /dev/null @@ -1,380 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Security & Penetration Tests - -Security testing including attack scenarios: -- SQL injection attempts -- Authentication bypass -- Authorization violations -- XSS and CSRF attacks -- Input validation testing - -Usage: - python3 tests/manual/security_tests.py - python3 tests/manual/security_tests.py --attack-type sql-injection -""" - -import sys -import json -import subprocess -from pathlib import Path -from datetime import datetime - -# Security test scenarios -SECURITY_TESTS = [ - { - "id": "SEC-001", - "attack_type": "SQL Injection", - "target": "Teams API", - "risk_level": "Critical", - "description": "Attempt SQL injection via team creation", - "attack_steps": [ - "1. Get valid JWT token from admin login", - "2. Prepare malicious team name with SQL injection:", - ' {"name":"\'; DROP TABLE users; --","description":"injection test"}', - "3. Execute attack:", - ' curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -H "Content-Type: application/json" -d \'{"name":"\\"; DROP TABLE users; --","description":"test"}\'', - "4. Check database integrity:", - " sqlite3 mcp.db '.tables' (verify users table still exists)", - "5. Check error response handling" - ], - "expected_defense": "Input sanitized, parameterized queries prevent injection, error handled gracefully", - "validation": "Database remains intact, no SQL executed, proper error returned" - }, - { - "id": "SEC-002", - "attack_type": "JWT Token Manipulation", - "target": "Authentication System", - "risk_level": "Critical", - "description": "Attempt to manipulate JWT token claims", - "attack_steps": [ - "1. Obtain valid JWT token through normal login", - "2. Decode JWT payload (use jwt.io or similar tool)", - "3. Modify claims (e.g., change user email, add admin role)", - "4. Re-encode JWT with different signature", - "5. Attempt to use modified token:", - ' curl -H "Authorization: Bearer " http://localhost:4444/admin/users', - "6. Verify access is denied" - ], - "expected_defense": "Token signature validation prevents tampering, access denied", - "validation": "Modified tokens rejected, signature verification works" - }, - { - "id": "SEC-003", - "attack_type": "Team Isolation Bypass", - "target": "Multi-tenancy Authorization", - "risk_level": "Critical", - "description": "Attempt to access resources from other teams", - "attack_steps": [ - "1. Create two test users in different teams", - "2. User A creates a private resource in Team 1", - "3. Get User B's JWT token", - "4. User B attempts to access User A's resource:", - ' curl -H "Authorization: Bearer " http://localhost:4444/resources/{USER_A_RESOURCE_ID}', - "5. Verify access is denied", - "6. Test with direct resource ID guessing" - ], - "expected_defense": "Team boundaries strictly enforced, cross-team access blocked", - "validation": "Access denied, team isolation maintained" - }, - { - "id": "SEC-004", - "attack_type": "Privilege Escalation", - "target": "RBAC System", - "risk_level": "Critical", - "description": "Attempt to elevate privileges or access admin functions", - "attack_steps": [ - "1. Login as regular user (non-admin)", - "2. Attempt to access admin-only endpoints:", - ' curl -H "Authorization: Bearer " http://localhost:4444/admin/users', - "3. Try to modify own user role in database", - "4. Attempt direct admin API calls", - "5. Test admin UI access with regular user" - ], - "expected_defense": "Admin privileges protected, privilege escalation prevented", - "validation": "Admin functions inaccessible to regular users" - }, - { - "id": "SEC-005", - "attack_type": "Cross-Site Scripting (XSS)", - "target": "Admin UI", - "risk_level": "High", - "description": "Attempt script injection in web interface", - "attack_steps": [ - "1. Access admin UI with valid credentials", - "2. Create tool with malicious name:", - ' Name: ', - "3. Save tool and navigate to tools list", - "4. Check if JavaScript executes in browser", - "5. Test other input fields for XSS vulnerabilities", - "6. Check browser console for script execution" - ], - "expected_defense": "Script tags escaped or sanitized, no JavaScript execution", - "validation": "No alert boxes, scripts properly escaped in HTML" - }, - { - "id": "SEC-006", - "attack_type": "Cross-Site Request Forgery (CSRF)", - "target": "State-Changing Operations", - "risk_level": "High", - "description": "Attempt CSRF attack on admin operations", - "attack_steps": [ - "1. Create malicious HTML page with form posting to gateway", - "2. Form targets state-changing endpoint (e.g., team creation)", - "3. Get authenticated user to visit malicious page", - "4. Check if operation executes without user consent", - "5. Verify CSRF token requirements", - "6. Test cross-origin request blocking" - ], - "expected_defense": "CSRF tokens required, cross-origin requests properly blocked", - "validation": "Operations require explicit user consent and CSRF protection" - }, - { - "id": "SEC-007", - "attack_type": "Brute Force Attack", - "target": "Login Endpoint", - "risk_level": "Medium", - "description": "Attempt password brute force attack", - "attack_steps": [ - "1. Script multiple rapid login attempts with wrong passwords:", - ' for i in {1..10}; do curl -X POST http://localhost:4444/auth/login -d \'{"email":"admin@example.com","password":"wrong$i"}\'; done', - "2. Monitor response times and status codes", - "3. Check for rate limiting implementation", - "4. Test account lockout after failed attempts", - "5. Verify lockout duration enforcement" - ], - "expected_defense": "Account locked after multiple failures, rate limiting enforced", - "validation": "Brute force attacks mitigated by lockout and rate limiting" - }, - { - "id": "SEC-008", - "attack_type": "File Upload Attack", - "target": "Resource Management", - "risk_level": "High", - "description": "Attempt to upload malicious files", - "attack_steps": [ - "1. Try uploading executable file (.exe, .sh)", - "2. Attempt script file upload (.py, .js, .php)", - "3. Test oversized file upload", - "4. Try files with malicious names", - "5. Attempt path traversal in filenames (../../../etc/passwd)", - "6. Check file type and size validation" - ], - "expected_defense": "File type validation, size limits enforced, path sanitization", - "validation": "Malicious uploads blocked, validation errors returned" - }, - { - "id": "SEC-009", - "attack_type": "API Rate Limiting", - "target": "DoS Prevention", - "risk_level": "Medium", - "description": "Test API rate limiting and DoS protection", - "attack_steps": [ - "1. Script rapid API requests to test rate limiting:", - ' for i in {1..100}; do curl -s http://localhost:4444/health; done', - "2. Monitor response times and status codes", - "3. Check for rate limit headers in responses", - "4. Verify throttling and backoff mechanisms", - "5. Test rate limiting on authenticated endpoints" - ], - "expected_defense": "Rate limits enforced, DoS protection active, proper HTTP status codes", - "validation": "Rate limiting prevents abuse, service remains stable" - }, - { - "id": "SEC-010", - "attack_type": "Information Disclosure", - "target": "Error Handling", - "risk_level": "Medium", - "description": "Check for sensitive information in error responses", - "attack_steps": [ - "1. Trigger various error conditions:", - " - Invalid JSON syntax", - " - Missing required fields", - " - Invalid authentication", - " - Access denied scenarios", - "2. Analyze error messages for sensitive information", - "3. Check for stack traces in responses", - "4. Look for database connection strings", - "5. Verify no internal paths or system info disclosed" - ], - "expected_defense": "No sensitive information disclosed in error responses", - "validation": "Error messages are user-friendly without exposing system internals" - } -] - - -def run_security_tests(): - """Run comprehensive security testing.""" - - print("🛡️ SECURITY & PENETRATION TESTING") - print("=" * 60) - print("⚠️ WARNING: This performs actual attack scenarios") - print("🎯 Purpose: Validate security defenses") - - print("\\n🔧 Security Testing Prerequisites:") - print("1. Test environment (not production)") - print("2. Database backup available") - print("3. MCP Gateway running") - print("4. Valid admin credentials") - - proceed = input("\\nProceed with security testing? (yes/no): ").lower() - if proceed != 'yes': - print("❌ Security testing cancelled") - return [] - - results = [] - - for test in SECURITY_TESTS: - print(f"\\n{'='*60}") - print(f"🛡️ SECURITY TEST {test['id']}") - print(f"Attack Type: {test['attack_type']}") - print(f"Target: {test['target']}") - print(f"Risk Level: {test['risk_level']}") - print(f"Description: {test['description']}") - - if test['risk_level'] == 'Critical': - print("🚨 CRITICAL SECURITY TEST") - - print(f"\\n⚔️ Attack Steps:") - for step in test['attack_steps']: - print(f" {step}") - - print(f"\\n🛡️ Expected Defense:") - print(f" {test['expected_defense']}") - - print(f"\\n✅ Validation Criteria:") - print(f" {test['validation']}") - - # Manual execution - response = input(f"\\nExecute security test {test['id']}? (y/n/skip): ").lower() - - if response == 'skip': - results.append({"id": test['id'], "status": "SKIP"}) - continue - elif response == 'y': - print("\\n🔍 Execute the attack steps above and observe results...") - - # Get results - defense_worked = input("Did the expected defense work? (y/n): ").lower() - vulnerability_found = input("Any vulnerability discovered? (y/n): ").lower() - - if defense_worked == 'y' and vulnerability_found == 'n': - status = "PASS" - print(f"✅ {test['id']}: Security defense PASSED") - else: - status = "FAIL" - print(f"❌ {test['id']}: Security vulnerability DETECTED") - vuln_details = input("Describe the vulnerability: ") - - if test['risk_level'] == 'Critical': - print("🚨 CRITICAL VULNERABILITY FOUND!") - print("🛑 Do not deploy to production until fixed") - - # Record results - result_data = { - "id": test['id'], - "attack_type": test['attack_type'], - "risk_level": test['risk_level'], - "status": status, - "timestamp": datetime.now().isoformat() - } - - if status == "FAIL": - result_data['vulnerability_details'] = vuln_details - - results.append(result_data) - - # Generate security summary - generate_security_summary(results) - - return results - - -def generate_security_summary(results): - """Generate security test summary.""" - - print(f"\\n{'='*60}") - print("🛡️ SECURITY TEST SUMMARY") - print("=" * 60) - - passed = len([r for r in results if r['status'] == 'PASS']) - failed = len([r for r in results if r['status'] == 'FAIL']) - skipped = len([r for r in results if r['status'] == 'SKIP']) - - # Check by risk level - critical_tests = [r for r in results if r.get('risk_level') == 'Critical'] - critical_passed = len([r for r in critical_tests if r['status'] == 'PASS']) - - print(f"📈 Security Test Results:") - print(f" ✅ Defenses Passed: {passed}/{len(results)}") - print(f" ❌ Vulnerabilities Found: {failed}/{len(results)}") - print(f" ⚠️ Tests Skipped: {skipped}/{len(results)}") - - print(f"\\n🚨 Critical Security Tests:") - print(f" ✅ Critical Defenses: {critical_passed}/{len(critical_tests)}") - - # Security assessment - if failed == 0 and critical_passed == len(critical_tests): - print(f"\\n🎉 SECURITY ASSESSMENT: EXCELLENT!") - print("✅ All security defenses working") - print("✅ No vulnerabilities detected") - print("✅ Ready for production deployment") - elif critical_passed == len(critical_tests): - print(f"\\n⚠️ SECURITY ASSESSMENT: GOOD") - print("✅ Critical defenses working") - print("⚠️ Some non-critical issues found") - print("💡 Review non-critical findings") - else: - print(f"\\n❌ SECURITY ASSESSMENT: VULNERABLE") - print("❌ Critical vulnerabilities detected") - print("🛑 DO NOT DEPLOY TO PRODUCTION") - print("🔧 Fix vulnerabilities before deployment") - - # Save results - results_file = Path("tests/manual/security_test_results.json") - with open(results_file, 'w') as f: - json.dump({ - "summary": { - "passed": passed, - "failed": failed, - "skipped": skipped, - "critical_passed": critical_passed, - "critical_total": len(critical_tests) - }, - "results": results, - "timestamp": datetime.now().isoformat() - }, f, indent=2) - - print(f"\\n📄 Security results saved: {results_file}") - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print("🛡️ Security & Penetration Tests") - print("Usage:") - print(" python3 tests/manual/security_tests.py # Run all security tests") - print(" python3 tests/manual/security_tests.py --list # List all tests") - print(" python3 tests/manual/security_tests.py --help # This help") - print("\\n⚠️ WARNING: These tests perform actual attack scenarios") - print("🎯 Only run in test environments, never production") - elif sys.argv[1] == "--list": - print("🛡️ All Security Tests:") - for test in SECURITY_TESTS: - print(f" {test['id']}: {test['attack_type']} ({test['risk_level']})") - print(f" Target: {test['target']}") - print(f" Description: {test['description']}") - else: - print("❌ Unknown option. Use --help") - else: - try: - print("🛡️ Starting security testing...") - print("⚠️ This will perform actual attack scenarios") - results = run_security_tests() - print("\\n🎉 Security testing complete!") - except KeyboardInterrupt: - print("\\n❌ Security testing cancelled") - except Exception as e: - print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/setup_instructions.py b/tests/manual/setup_instructions.py deleted file mode 100644 index 74d51dbbd..000000000 --- a/tests/manual/setup_instructions.py +++ /dev/null @@ -1,342 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -""" -MCP Gateway v0.7.0 - Setup Instructions for Manual Testing - -Complete environment setup guide for testers. -This file contains step-by-step instructions for setting up -the MCP Gateway for comprehensive manual testing. - -Usage: - python3 tests/manual/setup_instructions.py -""" - -import sys -import subprocess -from pathlib import Path -from datetime import datetime - -# Test case data structure -SETUP_TESTS = [ - { - "id": "SETUP-001", - "step": "Check Prerequisites", - "action": "Verify Python 3.11+, Git, and curl installed", - "command": "python3 --version && git --version && curl --version", - "expected": "All tools show version numbers", - "troubleshooting": "Install missing tools via package manager", - "required": True - }, - { - "id": "SETUP-002", - "step": "Clone Repository", - "action": "Download MCP Gateway source code", - "command": "git clone https://github.com/anthropics/mcp-context-forge.git", - "expected": "Repository cloned successfully", - "troubleshooting": "Check git credentials and network access", - "required": True - }, - { - "id": "SETUP-003", - "step": "Enter Directory", - "action": "Navigate to project directory", - "command": "cd mcp-context-forge", - "expected": "Directory changed, can see project files", - "troubleshooting": "Use 'ls' to verify files like README.md, .env.example", - "required": True - }, - { - "id": "SETUP-004", - "step": "Copy Environment", - "action": "Create environment configuration file", - "command": "cp .env.example .env", - "expected": ".env file created", - "troubleshooting": "Check file exists: ls -la .env", - "required": True - }, - { - "id": "SETUP-005", - "step": "Edit Configuration", - "action": "Configure platform admin credentials", - "command": "vi .env", - "expected": "File opens in vi editor", - "troubleshooting": "Use :wq to save and quit vi", - "required": True, - "details": [ - "Set PLATFORM_ADMIN_EMAIL=", - "Set PLATFORM_ADMIN_PASSWORD=", - "Set EMAIL_AUTH_ENABLED=true", - "Save file with :wq" - ] - }, - { - "id": "SETUP-006", - "step": "Verify Configuration", - "action": "Check settings are loaded correctly", - "command": 'python3 -c "from mcpgateway.config import settings; print(f\'Admin: {settings.platform_admin_email}\')"', - "expected": "Shows your configured admin email", - "troubleshooting": "If error, check .env file syntax", - "required": True - }, - { - "id": "SETUP-007", - "step": "Install Dependencies", - "action": "Install Python packages", - "command": "make install-dev", - "expected": "All dependencies installed successfully", - "troubleshooting": "May take 5-15 minutes, check internet connection", - "required": True - }, - { - "id": "SETUP-008", - "step": "Run Migration", - "action": "Execute database migration (CRITICAL STEP)", - "command": "python3 -m mcpgateway.bootstrap_db", - "expected": "'Database ready' message at end", - "troubleshooting": "MUST complete successfully - get help if fails", - "required": True, - "critical": True - }, - { - "id": "SETUP-009", - "step": "Verify Migration", - "action": "Validate migration completed correctly", - "command": "python3 scripts/verify_multitenancy_0_7_0_migration.py", - "expected": "'🎉 MIGRATION VERIFICATION: SUCCESS!' at end", - "troubleshooting": "All checks must pass - use fix script if needed", - "required": True, - "critical": True - }, - { - "id": "SETUP-010", - "step": "Start Gateway", - "action": "Start MCP Gateway server", - "command": "make dev", - "expected": "'Uvicorn running on http://0.0.0.0:4444' message", - "troubleshooting": "Keep this terminal window open during testing", - "required": True - }, - { - "id": "SETUP-011", - "step": "Test Health Check", - "action": "Verify server is responding", - "command": "curl http://localhost:4444/health", - "expected": '{"status":"ok"}', - "troubleshooting": "If fails, check server started correctly", - "required": True - }, - { - "id": "SETUP-012", - "step": "Access Admin UI", - "action": "Open admin interface in browser", - "command": "Open http://localhost:4444/admin in browser", - "expected": "Login page appears", - "troubleshooting": "Try both http:// and https://", - "required": True - }, - { - "id": "SETUP-013", - "step": "Test Admin Login", - "action": "Authenticate with admin credentials", - "command": "Login with admin email/password from .env", - "expected": "Dashboard loads successfully", - "troubleshooting": "Main authentication validation test", - "required": True, - "critical": True - }, - { - "id": "SETUP-014", - "step": "Verify Servers Visible", - "action": "Check old servers appear in UI (MAIN MIGRATION TEST)", - "command": "Navigate to Virtual Servers section", - "expected": "Servers listed, including pre-migration servers", - "troubleshooting": "If empty list, migration failed - get help immediately", - "required": True, - "critical": True, - "main_test": True - } -] - -# Tester information template -TESTER_INFO = { - "name": "", - "email": "", - "start_date": "", - "database_type": "SQLite/PostgreSQL", - "os": "", - "browser": "Chrome/Firefox", - "experience": "Beginner/Intermediate/Expert", - "time_available": "", - "organization": "", - "contact": "" -} - -# Prerequisites checklist -PREREQUISITES = [ - "Python 3.11+ installed (python3 --version)", - "Git installed (git --version)", - "curl installed (curl --version)", - "Modern web browser (Chrome/Firefox recommended)", - "Text editor (vi/vim/VSCode)", - "Terminal/command line access", - "4+ hours dedicated testing time", - "Reliable internet connection", - "Admin/sudo access for package installation", - "Basic understanding of web applications and APIs" -] - - -def run_setup_validation(): - """Interactive setup validation.""" - - print("🚀 MCP GATEWAY SETUP VALIDATION") - print("=" * 60) - - print("\\n👤 TESTER INFORMATION") - print("Please provide your information:") - - tester_info = {} - for key, default in TESTER_INFO.items(): - prompt = f"{key.replace('_', ' ').title()}" - if default: - prompt += f" ({default})" - prompt += ": " - - value = input(prompt).strip() - tester_info[key] = value - - print("\\n⚠️ PREREQUISITES CHECK") - print("Verify you have all prerequisites:") - - for i, prereq in enumerate(PREREQUISITES, 1): - print(f" {i:2}. {prereq}") - - response = input("\\nDo you have all prerequisites? (y/N): ").lower() - if response != 'y': - print("❌ Please install missing prerequisites before continuing") - return False - - print("\\n🔧 SETUP EXECUTION") - print("Follow these steps exactly:") - - for i, test in enumerate(SETUP_TESTS, 1): - print(f"\\n--- STEP {i}: {test['step']} ---") - print(f"Action: {test['action']}") - print(f"Command: {test['command']}") - print(f"Expected: {test['expected']}") - - if test.get('details'): - print("Details:") - for detail in test['details']: - print(f" - {detail}") - - if test.get('critical'): - print("🚨 CRITICAL: This step must succeed!") - - if test.get('main_test'): - print("🎯 MAIN TEST: This validates the migration fix!") - - # Wait for user confirmation - response = input(f"\\nCompleted step {i}? (y/n/q): ").lower() - - if response == 'q': - print("❌ Setup cancelled by user") - return False - elif response == 'n': - print(f"⚠️ Step {i} not completed") - if test.get('critical'): - print("🚨 Critical step failed - please resolve before continuing") - troubleshoot = input("Need troubleshooting help? (y/N): ").lower() - if troubleshoot == 'y': - print(f"💡 Troubleshooting: {test['troubleshooting']}") - return False - else: - print(f"✅ Step {i} completed") - - print("\\n🎊 SETUP COMPLETE!") - print("✅ All setup steps completed successfully") - print("🧪 Ready to begin manual testing") - - # Save tester info for reference - save_tester_info(tester_info) - - return True - - -def save_tester_info(info): - """Save tester information for tracking.""" - - info_file = Path("tests/manual/tester_info.txt") - - with open(info_file, 'w') as f: - f.write(f"Tester Information\\n") - f.write(f"Generated: {datetime.now().isoformat()}\\n") - f.write("=" * 40 + "\\n") - - for key, value in info.items(): - f.write(f"{key.replace('_', ' ').title()}: {value}\\n") - - print(f"\\n📄 Tester info saved: {info_file}") - - -def print_usage(): - """Print usage instructions.""" - - print("📋 SETUP INSTRUCTIONS USAGE") - print("=" * 40) - print() - print("This script guides you through complete environment setup.") - print() - print("Options:") - print(" python3 tests/manual/setup_instructions.py # Interactive setup") - print(" python3 tests/manual/setup_instructions.py --list # Show all steps") - print(" python3 tests/manual/setup_instructions.py --help # This help") - print() - print("Next steps after setup:") - print(" python3 tests/manual/migration_tests.py # Critical migration tests") - print(" python3 tests/manual/api_authentication_tests.py # API authentication") - print(" python3 tests/manual/admin_ui_tests.py # Admin UI testing") - print() - - -def list_all_steps(): - """List all setup steps.""" - - print("📋 ALL SETUP STEPS") - print("=" * 40) - - for i, test in enumerate(SETUP_TESTS, 1): - status = "🚨 CRITICAL" if test.get('critical') else "📋 Required" if test.get('required') else "📝 Optional" - print(f"\\n{i:2}. {test['step']} ({status})") - print(f" Action: {test['action']}") - print(f" Command: {test['command']}") - print(f" Expected: {test['expected']}") - - if test.get('main_test'): - print(" 🎯 THIS IS THE MAIN MIGRATION TEST!") - - -if __name__ == "__main__": - if len(sys.argv) > 1: - if sys.argv[1] == "--help": - print_usage() - elif sys.argv[1] == "--list": - list_all_steps() - else: - print("❌ Unknown option. Use --help for usage.") - else: - # Run interactive setup - try: - success = run_setup_validation() - if success: - print("\\n🎉 Setup complete! Ready for testing.") - print("Next: python3 tests/manual/migration_tests.py") - else: - print("❌ Setup incomplete. Please resolve issues.") - sys.exit(1) - except KeyboardInterrupt: - print("\\n❌ Setup cancelled by user") - sys.exit(1) - except Exception as e: - print(f"❌ Setup error: {e}") - sys.exit(1) \ No newline at end of file diff --git a/tests/manual/testcases/admin_ui_tests.yaml b/tests/manual/testcases/admin_ui_tests.yaml new file mode 100644 index 000000000..4dc9d2afe --- /dev/null +++ b/tests/manual/testcases/admin_ui_tests.yaml @@ -0,0 +1,218 @@ +# MCP Gateway v0.7.0 - Admin UI Tests +# Comprehensive admin interface testing +# Focus: UI validation including critical server visibility test + +worksheet_name: "Admin UI Tests" +description: "Complete admin interface testing including server visibility validation" +priority: "CRITICAL" +estimated_time: "60-120 minutes" + +headers: + - "Test ID" + - "UI Section" + - "Component" + - "Action" + - "Click-by-Click Steps" + - "Expected Behavior" + - "Actual Result" + - "Status" + - "Tester" + - "Browser" + - "Screenshot" + - "Date" + - "Comments" + +tests: + - test_id: "UI-001" + ui_section: "Authentication" + component: "Login Form" + action: "Test admin login interface" + steps: | + 1. Open web browser (Chrome or Firefox recommended) + 2. Navigate to: http://localhost:4444/admin + 3. Observe login page layout and form components + 4. Check for email and password input fields + 5. Look for 'Login' or 'Sign In' button + 6. Test form validation with empty fields + 7. Enter admin email from .env file + 8. Enter admin password from .env file + 9. Click Login button + 10. Verify successful redirect to admin dashboard + expected: "Login page functional, form validation works, authentication successful" + browser: "Chrome/Firefox" + screenshot: "Optional" + critical: true + + - test_id: "UI-002" + ui_section: "Dashboard" + component: "Main Dashboard View" + action: "Navigate and test admin dashboard" + steps: | + 1. After successful login, observe dashboard layout + 2. Count the number of statistics cards displayed + 3. Check navigation menu on left side or top + 4. Click on each statistic card to test interactions + 5. Test responsive design (resize browser window) + 6. Check for any error messages or warnings + 7. Verify user menu/profile in top right corner + 8. Test logout functionality + expected: "Dashboard displays system stats, navigation menu works, responsive design functional" + browser: "Chrome/Firefox" + screenshot: "Optional" + + - test_id: "UI-003" + ui_section: "Virtual Servers" + component: "Server List View" + action: "View and verify server list - CRITICAL MIGRATION TEST" + steps: | + 1. Click 'Virtual Servers' in navigation menu + 2. Observe server list/grid layout + 3. COUNT the total number of servers displayed + 4. IDENTIFY servers created before migration (older creation dates) + 5. Click on each server card/row to view details + 6. Verify server information is accessible and complete + 7. Check server actions (start/stop/restart if available) + 8. Test server filtering and search if available + 9. TAKE SCREENSHOT of server list showing all servers + 10. Record server names and their visibility status + expected: "ALL servers visible including pre-migration servers, details accessible" + browser: "Chrome/Firefox" + screenshot: "REQUIRED" + critical: true + main_migration_test: true + notes: "This is the main migration validation test" + + - test_id: "UI-004" + ui_section: "Teams" + component: "Team Management Interface" + action: "Test team management functionality" + steps: | + 1. Navigate to 'Teams' section in admin interface + 2. View team list/grid display + 3. Find your personal team (usually ''s Team') + 4. Click on personal team to view details + 5. Check team information display + 6. Click 'View Members' or 'Members' tab + 7. Verify you're listed as 'Owner' + 8. Test 'Create Team' functionality + 9. Fill out team creation form + 10. Verify new team appears in list + expected: "Team interface functional, personal team visible, team creation works" + browser: "Chrome/Firefox" + screenshot: "Optional" + + - test_id: "UI-005" + ui_section: "Tools" + component: "Tool Registry Interface" + action: "Test tool management and invocation" + steps: | + 1. Navigate to 'Tools' section + 2. View available tools list + 3. Check team-based filtering is working + 4. Click on any tool to view details + 5. Look for 'Invoke' or 'Execute' button + 6. Test tool invocation interface + 7. Fill in tool parameters if prompted + 8. Submit tool execution + 9. Verify results are displayed properly + 10. Test tool creation form if available + expected: "Tools accessible by team permissions, invocation interface works" + browser: "Chrome/Firefox" + screenshot: "Optional" + + - test_id: "UI-006" + ui_section: "Resources" + component: "Resource Management Interface" + action: "Test resource browser and management" + steps: | + 1. Navigate to 'Resources' section + 2. Browse available resources + 3. Check team-based resource filtering + 4. Click on any resource to view details + 5. Test resource download functionality + 6. Try 'Upload Resource' button if available + 7. Test file upload interface + 8. Fill in resource metadata + 9. Verify upload completes successfully + 10. Check new resource appears in list + expected: "Resource browser functional, upload/download works, team filtering applied" + browser: "Chrome/Firefox" + screenshot: "Optional" + + - test_id: "UI-007" + ui_section: "Export/Import" + component: "Configuration Management Interface" + action: "Test configuration backup and restore" + steps: | + 1. Navigate to 'Export/Import' section + 2. Locate 'Export Configuration' button/link + 3. Click export and select export options + 4. Download the configuration JSON file + 5. Open JSON file and verify contents include servers/tools + 6. Locate 'Import Configuration' button/link + 7. Select the downloaded JSON file + 8. Choose import options (merge/replace) + 9. Execute the import process + 10. Verify import completion and success + expected: "Export downloads complete JSON, import processes successfully" + browser: "Chrome/Firefox" + screenshot: "Recommended" + notes: "Important for backup/restore workflows" + + - test_id: "UI-008" + ui_section: "User Management" + component: "User Administration Interface" + action: "Test user management (admin only)" + steps: | + 1. Navigate to 'Users' section (admin only) + 2. View user list display + 3. Click on any user to view details + 4. Check user profile information + 5. Test 'Create User' functionality if available + 6. Fill user creation form + 7. Test role assignment interface + 8. Verify user permissions management + 9. Check user activity/audit information + 10. Test user status changes (active/inactive) + expected: "User management interface functional, role assignment works" + browser: "Chrome/Firefox" + screenshot: "Optional" + requires: "Platform admin privileges" + + - test_id: "UI-009" + ui_section: "Mobile Compatibility" + component: "Responsive Design" + action: "Test mobile device compatibility" + steps: | + 1. Resize browser window to mobile width (<768px) + 2. OR open admin UI on actual mobile device + 3. Test navigation menu (hamburger menu?) + 4. Check form input usability on mobile + 5. Test touch interactions and gestures + 6. Verify text readability and sizing + 7. Check all features remain accessible + 8. Test portrait and landscape orientations + 9. Verify no horizontal scrolling required + 10. Check mobile-specific UI adaptations + expected: "Interface adapts to mobile screens while maintaining full functionality" + browser: "Mobile Chrome/Safari" + screenshot: "Optional" + + - test_id: "UI-010" + ui_section: "Error Handling" + component: "UI Error Scenarios" + action: "Test error handling and user experience" + steps: | + 1. Trigger network error (disconnect internet briefly) + 2. Submit forms with invalid data + 3. Try accessing resources without permission + 4. Test session timeout scenarios + 5. Check error message display + 6. Verify error messages are user-friendly + 7. Test error recovery mechanisms + 8. Check browser console for JavaScript errors + 9. Verify graceful degradation + 10. Test error logging and reporting + expected: "Graceful error handling, helpful error messages, no JavaScript crashes" + browser: "Chrome/Firefox" + screenshot: "For errors" \ No newline at end of file diff --git a/tests/manual/testcases/api_a2a.yaml b/tests/manual/testcases/api_a2a.yaml new file mode 100644 index 000000000..c90241cdf --- /dev/null +++ b/tests/manual/testcases/api_a2a.yaml @@ -0,0 +1,149 @@ +# MCP Gateway v0.7.0 - A2A (Agent-to-Agent) API Tests +# A2A agent integration testing +# Focus: AI agent management and tool integration + +worksheet_name: "API A2A Agents" +description: "Complete A2A agent integration testing including OpenAI, Anthropic, and custom agents" +priority: "MEDIUM" +estimated_time: "45-90 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Agent Type" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Status" + - "Tester" + - "Config Required" + - "Comments" + +tests: + - test_id: "A2A-001" + endpoint: "/a2a" + method: "GET" + description: "List A2A agents" + agent_type: "All" + curl_command: 'curl http://localhost:4444/a2a -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of registered A2A agents" + config_required: "MCPGATEWAY_A2A_ENABLED=true" + + - test_id: "A2A-002" + endpoint: "/a2a" + method: "POST" + description: "Register OpenAI agent" + agent_type: "OpenAI" + curl_command: 'curl -X POST http://localhost:4444/a2a -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-openai-agent","description":"OpenAI agent for testing","endpoint_url":"https://api.openai.com/v1","config":{"model":"gpt-4","api_key":"sk-test-key"}}' + expected_status: 201 + expected_response: "OpenAI agent registered successfully" + config_required: "Valid OpenAI API key" + + - test_id: "A2A-003" + endpoint: "/a2a" + method: "POST" + description: "Register Anthropic agent" + agent_type: "Anthropic" + curl_command: 'curl -X POST http://localhost:4444/a2a -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-claude-agent","description":"Claude agent for testing","endpoint_url":"https://api.anthropic.com","config":{"model":"claude-3-haiku","api_key":"sk-ant-test"}}' + expected_status: 201 + expected_response: "Anthropic agent registered successfully" + config_required: "Valid Anthropic API key" + + - test_id: "A2A-004" + endpoint: "/a2a" + method: "POST" + description: "Register custom HTTP agent" + agent_type: "Custom" + curl_command: 'curl -X POST http://localhost:4444/a2a -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-custom-agent","description":"Custom HTTP agent","endpoint_url":"http://custom-agent.example.com/api","config":{"timeout":30,"retries":3}}' + expected_status: 201 + expected_response: "Custom agent registered successfully" + config_required: "Accessible agent endpoint" + + - test_id: "A2A-005" + endpoint: "/a2a/{id}" + method: "GET" + description: "Get agent details and configuration" + agent_type: "Any" + curl_command: 'curl http://localhost:4444/a2a/{AGENT_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Agent details with configuration (sensitive data masked)" + + - test_id: "A2A-006" + endpoint: "/a2a/{id}" + method: "PUT" + description: "Update agent configuration" + agent_type: "Any" + curl_command: 'curl -X PUT http://localhost:4444/a2a/{AGENT_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"updated-agent-name","description":"Updated via API testing"}' + expected_status: 200 + expected_response: "Agent updated successfully" + + - test_id: "A2A-007" + endpoint: "/a2a/{id}/tools" + method: "GET" + description: "List tools provided by agent" + agent_type: "Any" + curl_command: 'curl http://localhost:4444/a2a/{AGENT_ID}/tools -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of tools automatically created by agent" + + - test_id: "A2A-008" + endpoint: "/a2a/{id}/invoke" + method: "POST" + description: "Invoke agent directly" + agent_type: "Any" + curl_command: 'curl -X POST http://localhost:4444/a2a/{AGENT_ID}/invoke -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"prompt":"Hello, how are you today?","context":{"user":"test","session":"manual-testing"}}' + expected_status: 200 + expected_response: "Agent response with generated content" + critical: true + + - test_id: "A2A-009" + endpoint: "/a2a/{id}/health" + method: "GET" + description: "Check agent health and availability" + agent_type: "Any" + curl_command: 'curl http://localhost:4444/a2a/{AGENT_ID}/health -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Agent health status and response time" + + - test_id: "A2A-010" + endpoint: "/a2a/{id}/metrics" + method: "GET" + description: "Get agent usage metrics" + agent_type: "Any" + curl_command: 'curl http://localhost:4444/a2a/{AGENT_ID}/metrics -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Agent usage statistics and performance metrics" + + - test_id: "A2A-011" + endpoint: "/a2a/{id}" + method: "DELETE" + description: "Unregister agent" + agent_type: "Any" + curl_command: 'curl -X DELETE http://localhost:4444/a2a/{AGENT_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Agent unregistered successfully" + + - test_id: "A2A-012" + endpoint: "/a2a/providers" + method: "GET" + description: "List available agent providers" + agent_type: "All" + curl_command: 'curl http://localhost:4444/a2a/providers -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of supported agent providers (OpenAI, Anthropic, Custom)" \ No newline at end of file diff --git a/tests/manual/testcases/api_authentication.yaml b/tests/manual/testcases/api_authentication.yaml new file mode 100644 index 000000000..b48dad3ac --- /dev/null +++ b/tests/manual/testcases/api_authentication.yaml @@ -0,0 +1,179 @@ +# MCP Gateway v0.7.0 - Authentication API Tests +# Comprehensive testing of authentication endpoints +# Focus: All authentication methods and security + +worksheet_name: "API Authentication" +description: "Complete authentication endpoint testing including email, SSO, and JWT" +priority: "HIGH" +estimated_time: "30-60 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "AUTH-001" + endpoint: "/auth/register" + method: "POST" + description: "User registration endpoint" + curl_command: 'curl -X POST http://localhost:4444/auth/register -H "Content-Type: application/json"' + request_body: '{"email":"testuser@example.com","password":"TestPass123","full_name":"Test User"}' + expected_status: 201 + expected_response: "User created successfully with personal team" + test_steps: + - "Execute cURL command with test user data" + - "Verify HTTP status code is 201" + - "Check response contains user ID and email" + - "Verify personal team was created for user" + - "Record exact response content" + validation: "Response should include user_id, email, and personal_team_id" + + - test_id: "AUTH-002" + endpoint: "/auth/login" + method: "POST" + description: "Email authentication login" + curl_command: 'curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json"' + request_body: '{"email":"admin@example.com","password":"changeme"}' + expected_status: 200 + expected_response: "JWT token returned in response" + critical: true + test_steps: + - "Use admin credentials from .env file" + - "Execute login request" + - "Verify HTTP 200 status code" + - "Check response contains 'token' field" + - "Verify token is valid JWT format" + - "Save token for subsequent API tests" + validation: "Response must contain valid JWT token" + + - test_id: "AUTH-003" + endpoint: "/auth/logout" + method: "POST" + description: "User logout endpoint" + curl_command: 'curl -X POST http://localhost:4444/auth/logout -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Logout successful, token invalidated" + test_steps: + - "Use JWT token from login test" + - "Execute logout request with Authorization header" + - "Verify HTTP 200 status" + - "Try using the token again (should fail)" + - "Verify token is now invalid" + + - test_id: "AUTH-004" + endpoint: "/auth/refresh" + method: "POST" + description: "JWT token refresh" + curl_command: 'curl -X POST http://localhost:4444/auth/refresh -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "New JWT token issued" + test_steps: + - "Use valid JWT token" + - "Request token refresh" + - "Verify new token returned" + - "Test both old and new tokens" + - "Verify new token works" + + - test_id: "AUTH-005" + endpoint: "/auth/profile" + method: "GET" + description: "Get user profile information" + curl_command: 'curl http://localhost:4444/auth/profile -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "User profile data including email, teams, roles" + test_steps: + - "Use valid JWT token" + - "Request user profile" + - "Verify profile contains user email" + - "Check team membership information" + - "Verify role assignments if applicable" + + - test_id: "AUTH-006" + endpoint: "/auth/change-password" + method: "POST" + description: "Change user password" + curl_command: 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"old_password":"changeme","new_password":"NewPassword123"}' + expected_status: 200 + expected_response: "Password updated successfully" + test_steps: + - "Use current password as old_password" + - "Provide strong new password" + - "Execute password change request" + - "Verify success response" + - "Test login with new password" + - "IMPORTANT: Change password back for other tests" + + - test_id: "AUTH-007" + endpoint: "/auth/sso/github" + method: "GET" + description: "GitHub SSO authentication initiation" + curl_command: 'curl -I http://localhost:4444/auth/sso/github' + request_body: "" + expected_status: 302 + expected_response: "Redirect to GitHub OAuth authorization" + requires_config: "SSO_GITHUB_ENABLED=true, GitHub OAuth app" + test_steps: + - "Execute request to GitHub SSO endpoint" + - "Verify HTTP 302 redirect status" + - "Check Location header contains github.com" + - "Verify OAuth parameters in redirect URL" + + - test_id: "AUTH-008" + endpoint: "/auth/sso/google" + method: "GET" + description: "Google SSO authentication initiation" + curl_command: 'curl -I http://localhost:4444/auth/sso/google' + request_body: "" + expected_status: 302 + expected_response: "Redirect to Google OAuth authorization" + requires_config: "SSO_GOOGLE_ENABLED=true, Google OAuth app" + test_steps: + - "Execute request to Google SSO endpoint" + - "Verify HTTP 302 redirect status" + - "Check Location header contains accounts.google.com" + - "Verify OAuth parameters in redirect URL" + + - test_id: "AUTH-009" + endpoint: "/auth/verify-email" + method: "POST" + description: "Email address verification" + curl_command: 'curl -X POST http://localhost:4444/auth/verify-email -H "Content-Type: application/json"' + request_body: '{"token":""}' + expected_status: 200 + expected_response: "Email verified successfully" + requires_config: "Email delivery configured" + test_steps: + - "Register new user first (to get verification token)" + - "Check email for verification token (if email configured)" + - "Use token in verification request" + - "Verify email verification status updated" + + - test_id: "AUTH-010" + endpoint: "/auth/forgot-password" + method: "POST" + description: "Password reset request" + curl_command: 'curl -X POST http://localhost:4444/auth/forgot-password -H "Content-Type: application/json"' + request_body: '{"email":"admin@example.com"}' + expected_status: 200 + expected_response: "Password reset email sent" + requires_config: "Email delivery configured" + test_steps: + - "Request password reset for known user" + - "Verify HTTP 200 response" + - "Check email for reset link (if email configured)" + - "Test reset token functionality" \ No newline at end of file diff --git a/tests/manual/testcases/api_export_import.yaml b/tests/manual/testcases/api_export_import.yaml new file mode 100644 index 000000000..234b79dff --- /dev/null +++ b/tests/manual/testcases/api_export_import.yaml @@ -0,0 +1,145 @@ +# MCP Gateway v0.7.0 - Export/Import API Tests +# Configuration backup and restore testing +# Focus: Data export/import, backup workflows, and recovery + +worksheet_name: "API Export Import" +description: "Complete configuration export/import API testing for backup and restore workflows" +priority: "MEDIUM" +estimated_time: "30-60 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Operation" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Status" + - "Tester" + - "File Required" + - "Comments" + +tests: + - test_id: "EXP-001" + endpoint: "/admin/export/configuration" + method: "GET" + operation: "Export" + description: "Export complete configuration" + curl_command: 'curl http://localhost:4444/admin/export/configuration -u admin:changeme -o full_config_export.json' + request_body: "" + expected_status: 200 + expected_response: "JSON file downloaded with complete configuration" + file_required: "None" + + - test_id: "EXP-002" + endpoint: "/admin/export/configuration" + method: "GET" + operation: "Export" + description: "Export servers only" + curl_command: 'curl "http://localhost:4444/admin/export/configuration?types=servers" -u admin:changeme -o servers_only_export.json' + request_body: "" + expected_status: 200 + expected_response: "JSON file with servers only" + file_required: "None" + + - test_id: "EXP-003" + endpoint: "/admin/export/configuration" + method: "GET" + operation: "Export" + description: "Export with team filtering" + curl_command: 'curl "http://localhost:4444/admin/export/configuration?team_id={TEAM_ID}" -u admin:changeme -o team_export.json' + request_body: "" + expected_status: 200 + expected_response: "JSON file with team-specific resources only" + file_required: "None" + + - test_id: "EXP-004" + endpoint: "/admin/export/selective" + method: "POST" + operation: "Export" + description: "Selective entity export" + curl_command: 'curl -X POST http://localhost:4444/admin/export/selective -u admin:changeme -H "Content-Type: application/json"' + request_body: '{"entity_selections":{"servers":["server-id-1","server-id-2"],"tools":["tool-id-1"]},"include_dependencies":true}' + expected_status: 200 + expected_response: "JSON with selected entities and their dependencies" + file_required: "None" + + - test_id: "IMP-001" + endpoint: "/admin/import/configuration" + method: "POST" + operation: "Import" + description: "Import complete configuration" + curl_command: 'curl -X POST http://localhost:4444/admin/import/configuration -u admin:changeme -H "Content-Type: application/json" -d @full_config_export.json' + request_body: "JSON configuration file" + expected_status: 200 + expected_response: "Configuration imported successfully" + file_required: "full_config_export.json" + + - test_id: "IMP-002" + endpoint: "/admin/import/configuration" + method: "POST" + operation: "Import" + description: "Import with merge mode" + curl_command: 'curl -X POST http://localhost:4444/admin/import/configuration -u admin:changeme -H "Content-Type: application/json"' + request_body: '{"mode":"merge","data":"","team_assignment":"auto"}' + expected_status: 200 + expected_response: "Configuration merged without overwriting existing" + file_required: "Config JSON data" + + - test_id: "IMP-003" + endpoint: "/admin/import/configuration" + method: "POST" + operation: "Import" + description: "Import with replace mode" + curl_command: 'curl -X POST http://localhost:4444/admin/import/configuration -u admin:changeme -H "Content-Type: application/json"' + request_body: '{"mode":"replace","data":"","backup_existing":true}' + expected_status: 200 + expected_response: "Configuration replaced, existing data backed up" + file_required: "Config JSON data" + + - test_id: "IMP-004" + endpoint: "/admin/import/validate" + method: "POST" + operation: "Import" + description: "Validate import data before import" + curl_command: 'curl -X POST http://localhost:4444/admin/import/validate -u admin:changeme -H "Content-Type: application/json" -d @config_to_validate.json' + request_body: "JSON configuration to validate" + expected_status: 200 + expected_response: "Validation results with any errors or warnings" + file_required: "config_to_validate.json" + + - test_id: "IMP-005" + endpoint: "/admin/import/status" + method: "GET" + operation: "Import" + description: "Check import operation status" + curl_command: 'curl http://localhost:4444/admin/import/status -u admin:changeme' + request_body: "" + expected_status: 200 + expected_response: "Import operation status and progress" + file_required: "None" + + - test_id: "EXP-005" + endpoint: "/admin/export/logs" + method: "GET" + operation: "Export" + description: "Export system logs" + curl_command: 'curl http://localhost:4444/admin/export/logs -u admin:changeme -o system_logs.json' + request_body: "" + expected_status: 200 + expected_response: "System logs exported as JSON" + file_required: "None" + + - test_id: "BULK-001" + endpoint: "/admin/bulk-import" + method: "POST" + operation: "Import" + description: "Bulk import multiple entity types" + curl_command: 'curl -X POST http://localhost:4444/admin/bulk-import -u admin:changeme -H "Content-Type: application/json"' + request_body: '{"tools":[{"name":"bulk-tool-1","schema":{"type":"object"}}],"resources":[{"name":"bulk-resource-1","uri":"file://bulk.txt"}]}' + expected_status: 201 + expected_response: "Bulk import completed with summary" + file_required: "None" \ No newline at end of file diff --git a/tests/manual/testcases/api_federation.yaml b/tests/manual/testcases/api_federation.yaml new file mode 100644 index 000000000..e83c926fa --- /dev/null +++ b/tests/manual/testcases/api_federation.yaml @@ -0,0 +1,115 @@ +# MCP Gateway v0.7.0 - Federation API Tests +# Gateway-to-gateway federation testing +# Focus: Peer registration, discovery, and cross-gateway operations + +worksheet_name: "API Federation" +description: "Gateway federation testing including peer management and cross-gateway communication" +priority: "MEDIUM" +estimated_time: "45-90 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Status" + - "Tester" + - "Setup Required" + - "Comments" + +tests: + - test_id: "FED-001" + endpoint: "/gateways" + method: "GET" + description: "List registered peer gateways" + curl_command: 'curl http://localhost:4444/gateways -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of registered peer gateways" + setup_required: "FEDERATION_ENABLED=true" + + - test_id: "FED-002" + endpoint: "/gateways" + method: "POST" + description: "Register new peer gateway" + curl_command: 'curl -X POST http://localhost:4444/gateways -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-peer-gateway","description":"Peer gateway for testing","endpoint":"http://peer.example.com:4444","auth_type":"basic","auth_config":{"username":"admin","password":"changeme"}}' + expected_status: 201 + expected_response: "Peer gateway registered successfully" + + - test_id: "FED-003" + endpoint: "/gateways/{id}" + method: "GET" + description: "Get peer gateway details" + curl_command: 'curl http://localhost:4444/gateways/{GATEWAY_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Gateway details with connection info" + + - test_id: "FED-004" + endpoint: "/gateways/{id}/health" + method: "GET" + description: "Check peer gateway health" + curl_command: 'curl http://localhost:4444/gateways/{GATEWAY_ID}/health -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Peer gateway health status and connectivity" + + - test_id: "FED-005" + endpoint: "/gateways/{id}/tools" + method: "GET" + description: "List tools available from peer" + curl_command: 'curl http://localhost:4444/gateways/{GATEWAY_ID}/tools -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of tools available from peer gateway" + + - test_id: "FED-006" + endpoint: "/gateways/{id}/sync" + method: "POST" + description: "Synchronize with peer gateway" + curl_command: 'curl -X POST http://localhost:4444/gateways/{GATEWAY_ID}/sync -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Synchronization completed successfully" + + - test_id: "FED-007" + endpoint: "/gateways/{id}" + method: "PUT" + description: "Update peer gateway configuration" + curl_command: 'curl -X PUT http://localhost:4444/gateways/{GATEWAY_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"updated-peer","description":"Updated peer gateway"}' + expected_status: 200 + expected_response: "Peer gateway updated successfully" + + - test_id: "FED-008" + endpoint: "/gateways/{id}" + method: "DELETE" + description: "Unregister peer gateway" + curl_command: 'curl -X DELETE http://localhost:4444/gateways/{GATEWAY_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Peer gateway unregistered successfully" + + - test_id: "FED-009" + endpoint: "/federation/discover" + method: "GET" + description: "Auto-discover peer gateways" + curl_command: 'curl http://localhost:4444/federation/discover -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Discovered peer gateways via mDNS/Zeroconf" + setup_required: "FEDERATION_DISCOVERY=true" + + - test_id: "FED-010" + endpoint: "/federation/status" + method: "GET" + description: "Get federation status and metrics" + curl_command: 'curl http://localhost:4444/federation/status -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Federation status with peer connectivity metrics" \ No newline at end of file diff --git a/tests/manual/testcases/api_prompts.yaml b/tests/manual/testcases/api_prompts.yaml new file mode 100644 index 000000000..3bd94051f --- /dev/null +++ b/tests/manual/testcases/api_prompts.yaml @@ -0,0 +1,115 @@ +# MCP Gateway v0.7.0 - Prompts API Tests +# Prompt management and rendering testing +# Focus: Prompt CRUD, template rendering, and team access + +worksheet_name: "API Prompts" +description: "Complete prompt management API testing including templates and rendering" +priority: "MEDIUM" +estimated_time: "30-60 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "PROM-001" + endpoint: "/prompts" + method: "GET" + description: "List available prompts with team filtering" + curl_command: 'curl http://localhost:4444/prompts -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of prompts accessible based on team membership" + + - test_id: "PROM-002" + endpoint: "/prompts" + method: "POST" + description: "Create new prompt template" + curl_command: 'curl -X POST http://localhost:4444/prompts -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-api-prompt","description":"Prompt created via API","content":"Hello {{name}}! Welcome to {{location}}.","arguments":{"name":{"type":"string","description":"User name"},"location":{"type":"string","description":"Location name"}}}' + expected_status: 201 + expected_response: "Prompt created successfully with team assignment" + + - test_id: "PROM-003" + endpoint: "/prompts/{id}" + method: "GET" + description: "Get prompt details and template" + curl_command: 'curl http://localhost:4444/prompts/{PROMPT_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Prompt details with template content and argument definitions" + + - test_id: "PROM-004" + endpoint: "/prompts/{id}" + method: "PUT" + description: "Update prompt template" + curl_command: 'curl -X PUT http://localhost:4444/prompts/{PROMPT_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"updated-prompt","content":"Updated template: Hello {{name}}!"}' + expected_status: 200 + expected_response: "Prompt updated successfully" + + - test_id: "PROM-005" + endpoint: "/prompts/{id}" + method: "DELETE" + description: "Delete prompt" + curl_command: 'curl -X DELETE http://localhost:4444/prompts/{PROMPT_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Prompt deleted successfully" + + - test_id: "PROM-006" + endpoint: "/prompts/{id}/render" + method: "POST" + description: "Render prompt with arguments" + curl_command: 'curl -X POST http://localhost:4444/prompts/{PROMPT_ID}/render -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"arguments":{"name":"John Doe","location":"New York City"}}' + expected_status: 200 + expected_response: "Rendered prompt content: Hello John Doe! Welcome to New York City." + critical: true + + - test_id: "PROM-007" + endpoint: "/prompts/search" + method: "GET" + description: "Search prompts by content or metadata" + curl_command: 'curl "http://localhost:4444/prompts/search?q=hello&limit=10" -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Search results with team-based access control" + + - test_id: "PROM-008" + endpoint: "/prompts/export" + method: "GET" + description: "Export prompts as JSON" + curl_command: 'curl http://localhost:4444/prompts/export -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Prompts exported with template content and metadata" + + - test_id: "PROM-009" + endpoint: "/prompts/import" + method: "POST" + description: "Bulk import prompts" + curl_command: 'curl -X POST http://localhost:4444/prompts/import -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"prompts":[{"name":"import-test","content":"Imported template: {{message}}","arguments":{"message":{"type":"string"}}}]}' + expected_status: 201 + expected_response: "Prompts imported successfully with team assignments" + + - test_id: "PROM-010" + endpoint: "/prompts/{id}/validate" + method: "POST" + description: "Validate prompt syntax and arguments" + curl_command: 'curl -X POST http://localhost:4444/prompts/{PROMPT_ID}/validate -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Prompt validation results and syntax check" \ No newline at end of file diff --git a/tests/manual/testcases/api_resources.yaml b/tests/manual/testcases/api_resources.yaml new file mode 100644 index 000000000..5acac0933 --- /dev/null +++ b/tests/manual/testcases/api_resources.yaml @@ -0,0 +1,132 @@ +# MCP Gateway v0.7.0 - Resources API Tests +# Resource management and content testing +# Focus: Resource CRUD, content handling, and team access control + +worksheet_name: "API Resources" +description: "Complete resource management API testing including upload, download, and team permissions" +priority: "HIGH" +estimated_time: "30-60 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "RES-001" + endpoint: "/resources" + method: "GET" + description: "List available resources with team filtering" + curl_command: 'curl http://localhost:4444/resources -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of resources accessible to user based on team membership" + + - test_id: "RES-002" + endpoint: "/resources" + method: "POST" + description: "Create new resource" + curl_command: 'curl -X POST http://localhost:4444/resources -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-api-resource","description":"Resource created via API","uri":"file://test-data.txt","mime_type":"text/plain","content":"Sample test content"}' + expected_status: 201 + expected_response: "Resource created successfully with automatic team assignment" + + - test_id: "RES-003" + endpoint: "/resources/{id}" + method: "GET" + description: "Get resource details and metadata" + curl_command: 'curl http://localhost:4444/resources/{RESOURCE_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Resource details with metadata, team, and access info" + + - test_id: "RES-004" + endpoint: "/resources/{id}" + method: "PUT" + description: "Update resource metadata" + curl_command: 'curl -X PUT http://localhost:4444/resources/{RESOURCE_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"updated-resource-name","description":"Updated via API testing"}' + expected_status: 200 + expected_response: "Resource metadata updated successfully" + + - test_id: "RES-005" + endpoint: "/resources/{id}" + method: "DELETE" + description: "Delete resource" + curl_command: 'curl -X DELETE http://localhost:4444/resources/{RESOURCE_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Resource deleted successfully" + + - test_id: "RES-006" + endpoint: "/resources/{id}/content" + method: "GET" + description: "Get resource content data" + curl_command: 'curl http://localhost:4444/resources/{RESOURCE_ID}/content -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Resource content data in appropriate format" + + - test_id: "RES-007" + endpoint: "/resources/{id}/content" + method: "PUT" + description: "Update resource content" + curl_command: 'curl -X PUT http://localhost:4444/resources/{RESOURCE_ID}/content -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"content":"Updated resource content data"}' + expected_status: 200 + expected_response: "Resource content updated successfully" + + - test_id: "RES-008" + endpoint: "/resources/templates" + method: "GET" + description: "List available resource templates" + curl_command: 'curl http://localhost:4444/resources/templates -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of available resource templates" + + - test_id: "RES-009" + endpoint: "/resources/search" + method: "GET" + description: "Search resources by name or content" + curl_command: 'curl "http://localhost:4444/resources/search?q=test&type=text" -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Search results with team-based filtering" + + - test_id: "RES-010" + endpoint: "/resources/{id}/subscribe" + method: "POST" + description: "Subscribe to resource updates" + curl_command: 'curl -X POST http://localhost:4444/resources/{RESOURCE_ID}/subscribe -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Subscription created for resource updates" + + - test_id: "RES-011" + endpoint: "/resources/import" + method: "POST" + description: "Bulk import resources" + curl_command: 'curl -X POST http://localhost:4444/resources/import -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"resources":[{"name":"bulk-resource","uri":"file://bulk-data.txt","mime_type":"text/plain"}]}' + expected_status: 201 + expected_response: "Resources imported successfully" + + - test_id: "RES-012" + endpoint: "/resources/export" + method: "GET" + description: "Export resources as JSON" + curl_command: 'curl http://localhost:4444/resources/export -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Resources exported with team context" \ No newline at end of file diff --git a/tests/manual/testcases/api_servers.yaml b/tests/manual/testcases/api_servers.yaml new file mode 100644 index 000000000..bce1de0dd --- /dev/null +++ b/tests/manual/testcases/api_servers.yaml @@ -0,0 +1,115 @@ +# MCP Gateway v0.7.0 - Virtual Servers API Tests +# Server management endpoint testing +# Focus: Virtual server CRUD operations and transport testing + +worksheet_name: "API Servers" +description: "Virtual server management API testing including CRUD and transport endpoints" +priority: "HIGH" +estimated_time: "45-90 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "SRV-001" + endpoint: "/servers" + method: "GET" + description: "List virtual servers with team filtering" + curl_command: 'curl http://localhost:4444/servers -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of virtual servers user can access" + critical: true + + - test_id: "SRV-002" + endpoint: "/servers" + method: "POST" + description: "Create new virtual server" + curl_command: 'curl -X POST http://localhost:4444/servers -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"Manual Test Server","description":"Server created during testing","transport":"sse","config":{"timeout":30}}' + expected_status: 201 + expected_response: "Virtual server created with ID and team assignment" + + - test_id: "SRV-003" + endpoint: "/servers/{id}" + method: "GET" + description: "Get server details and configuration" + curl_command: 'curl http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Server details with full configuration" + + - test_id: "SRV-004" + endpoint: "/servers/{id}" + method: "PUT" + description: "Update server configuration" + curl_command: 'curl -X PUT http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"Updated Server Name","description":"Updated during testing"}' + expected_status: 200 + expected_response: "Server updated successfully" + + - test_id: "SRV-005" + endpoint: "/servers/{id}/sse" + method: "GET" + description: "Server-Sent Events connection test" + curl_command: 'curl -N http://localhost:4444/servers/{SERVER_ID}/sse -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "SSE stream established, events received" + + - test_id: "SRV-006" + endpoint: "/servers/{id}/tools" + method: "GET" + description: "List tools available on server" + curl_command: 'curl http://localhost:4444/servers/{SERVER_ID}/tools -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of tools available on the server" + + - test_id: "SRV-007" + endpoint: "/servers/{id}/resources" + method: "GET" + description: "List resources available on server" + curl_command: 'curl http://localhost:4444/servers/{SERVER_ID}/resources -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of resources available on the server" + + - test_id: "SRV-008" + endpoint: "/servers/{id}/status" + method: "GET" + description: "Get server status and health" + curl_command: 'curl http://localhost:4444/servers/{SERVER_ID}/status -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Server status, health, and connection info" + + - test_id: "SRV-009" + endpoint: "/servers/{id}" + method: "DELETE" + description: "Delete virtual server" + curl_command: 'curl -X DELETE http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Server deleted successfully" + + - test_id: "SRV-010" + endpoint: "/servers/{id}/restart" + method: "POST" + description: "Restart virtual server" + curl_command: 'curl -X POST http://localhost:4444/servers/{SERVER_ID}/restart -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Server restarted successfully" \ No newline at end of file diff --git a/tests/manual/testcases/api_teams.yaml b/tests/manual/testcases/api_teams.yaml new file mode 100644 index 000000000..337006cd8 --- /dev/null +++ b/tests/manual/testcases/api_teams.yaml @@ -0,0 +1,184 @@ +# MCP Gateway v0.7.0 - Teams API Tests +# Team management endpoint testing +# Focus: Multi-tenancy team operations + +worksheet_name: "API Teams" +description: "Complete team management API testing including CRUD operations and membership" +priority: "HIGH" +estimated_time: "30-60 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "TEAM-001" + endpoint: "/teams" + method: "GET" + description: "List user's teams" + curl_command: 'curl http://localhost:4444/teams -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of teams user belongs to" + test_steps: + - "Get JWT token from login first" + - "Execute teams list request" + - "Verify HTTP 200 status" + - "Check response is JSON array" + - "Verify personal team is included" + - "Check team data includes name, id, visibility" + + - test_id: "TEAM-002" + endpoint: "/teams" + method: "POST" + description: "Create new team" + curl_command: 'curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"Manual Test Team","description":"Team created during manual testing","visibility":"private","max_members":20}' + expected_status: 201 + expected_response: "Team created successfully with generated ID" + test_steps: + - "Prepare team creation data" + - "Execute team creation request" + - "Verify HTTP 201 status" + - "Check response contains team ID" + - "Verify team appears in teams list" + - "Save team ID for subsequent tests" + + - test_id: "TEAM-003" + endpoint: "/teams/{id}" + method: "GET" + description: "Get team details" + curl_command: 'curl http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Team details with member information" + test_steps: + - "Use team ID from creation test or personal team" + - "Request team details" + - "Verify HTTP 200 status" + - "Check response includes team metadata" + - "Verify member list is included" + - "Check permissions are enforced" + + - test_id: "TEAM-004" + endpoint: "/teams/{id}" + method: "PUT" + description: "Update team information" + curl_command: 'curl -X PUT http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"Updated Team Name","description":"Updated during manual testing"}' + expected_status: 200 + expected_response: "Team updated successfully" + test_steps: + - "Use team ID from creation test" + - "Prepare update data" + - "Execute team update request" + - "Verify HTTP 200 status" + - "Check team details show updated information" + - "Verify only team owners can update" + + - test_id: "TEAM-005" + endpoint: "/teams/{id}" + method: "DELETE" + description: "Delete team" + curl_command: 'curl -X DELETE http://localhost:4444/teams/{TEAM_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Team deleted successfully (or 403 if personal team)" + test_steps: + - "Use test team ID (not personal team)" + - "Execute team deletion request" + - "Verify appropriate HTTP status" + - "Check team no longer exists" + - "Test that personal teams cannot be deleted" + - "Verify team resources are handled properly" + + - test_id: "TEAM-006" + endpoint: "/teams/{id}/members" + method: "GET" + description: "List team members" + curl_command: 'curl http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of team members with roles" + test_steps: + - "Use valid team ID" + - "Request member list" + - "Verify HTTP 200 status" + - "Check members array in response" + - "Verify member roles (owner/member)" + - "Check join dates and status" + + - test_id: "TEAM-007" + endpoint: "/teams/{id}/members" + method: "POST" + description: "Add team member" + curl_command: 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/members -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"user_email":"newmember@example.com","role":"member"}' + expected_status: 201 + expected_response: "Member added to team successfully" + test_steps: + - "Create test user first (if needed)" + - "Prepare member addition data" + - "Execute add member request" + - "Verify HTTP 201 status" + - "Check member appears in member list" + - "Verify only team owners can add members" + + - test_id: "TEAM-008" + endpoint: "/teams/{id}/invitations" + method: "GET" + description: "List team invitations" + curl_command: 'curl http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of pending invitations" + test_steps: + - "Use valid team ID" + - "Request invitations list" + - "Verify HTTP 200 status" + - "Check invitations array" + - "Verify invitation details (email, role, status)" + - "Test permissions (team owners only)" + + - test_id: "TEAM-009" + endpoint: "/teams/{id}/invitations" + method: "POST" + description: "Create team invitation" + curl_command: 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/invitations -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"email":"invitee@example.com","role":"member","message":"Join our testing team!"}' + expected_status: 201 + expected_response: "Invitation created and sent" + test_steps: + - "Prepare invitation data" + - "Execute invitation creation" + - "Verify HTTP 201 status" + - "Check invitation created in database" + - "Verify email sent (if email configured)" + - "Test invitation token functionality" + + - test_id: "TEAM-010" + endpoint: "/teams/{id}/leave" + method: "POST" + description: "Leave team" + curl_command: 'curl -X POST http://localhost:4444/teams/{TEAM_ID}/leave -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Successfully left team (or 403 if personal team)" + test_steps: + - "Use non-personal team ID" + - "Execute leave team request" + - "Verify appropriate response" + - "Check user no longer in member list" + - "Test that personal teams cannot be left" + - "Verify access to team resources is removed" \ No newline at end of file diff --git a/tests/manual/testcases/api_tools.yaml b/tests/manual/testcases/api_tools.yaml new file mode 100644 index 000000000..d25c28e5e --- /dev/null +++ b/tests/manual/testcases/api_tools.yaml @@ -0,0 +1,140 @@ +# MCP Gateway v0.7.0 - Tools API Tests +# Tool management and invocation testing +# Focus: Tool CRUD operations, invocation, and team-based access + +worksheet_name: "API Tools" +description: "Complete tool management API testing including creation, invocation, and team scoping" +priority: "HIGH" +estimated_time: "45-90 minutes" + +headers: + - "Test ID" + - "Endpoint" + - "Method" + - "Description" + - "cURL Command" + - "Request Body" + - "Expected Status" + - "Expected Response" + - "Actual Status" + - "Actual Response" + - "Status" + - "Tester" + - "Comments" + +tests: + - test_id: "TOOL-001" + endpoint: "/tools" + method: "GET" + description: "List available tools with team filtering" + curl_command: 'curl http://localhost:4444/tools -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Array of tools filtered by team permissions" + test_steps: + - "Get valid JWT token" + - "Execute tools list request" + - "Verify HTTP 200 status" + - "Check response contains tools array" + - "Verify team-based filtering applied" + - "Check tool metadata includes team, owner, visibility" + + - test_id: "TOOL-002" + endpoint: "/tools" + method: "POST" + description: "Create new tool" + curl_command: 'curl -X POST http://localhost:4444/tools -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"test-api-tool","description":"Tool created via API","schema":{"type":"object","properties":{"input":{"type":"string","description":"Input parameter"}},"required":["input"]}}' + expected_status: 201 + expected_response: "Tool created successfully with team assignment" + + - test_id: "TOOL-003" + endpoint: "/tools/{id}" + method: "GET" + description: "Get tool details and schema" + curl_command: 'curl http://localhost:4444/tools/{TOOL_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Tool details with complete schema definition" + + - test_id: "TOOL-004" + endpoint: "/tools/{id}" + method: "PUT" + description: "Update tool configuration" + curl_command: 'curl -X PUT http://localhost:4444/tools/{TOOL_ID} -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"name":"updated-tool-name","description":"Updated via API testing"}' + expected_status: 200 + expected_response: "Tool updated successfully" + + - test_id: "TOOL-005" + endpoint: "/tools/{id}" + method: "DELETE" + description: "Delete tool" + curl_command: 'curl -X DELETE http://localhost:4444/tools/{TOOL_ID} -H "Authorization: Bearer "' + request_body: "" + expected_status: 204 + expected_response: "Tool deleted successfully" + + - test_id: "TOOL-006" + endpoint: "/tools/{id}/invoke" + method: "POST" + description: "Invoke tool execution" + curl_command: 'curl -X POST http://localhost:4444/tools/{TOOL_ID}/invoke -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"arguments":{"input":"test data for tool execution"}}' + expected_status: 200 + expected_response: "Tool execution result with output" + critical: true + + - test_id: "TOOL-007" + endpoint: "/tools/{id}/schema" + method: "GET" + description: "Get tool schema definition" + curl_command: 'curl http://localhost:4444/tools/{TOOL_ID}/schema -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Tool schema in JSON Schema format" + + - test_id: "TOOL-008" + endpoint: "/tools/search" + method: "GET" + description: "Search tools by name or description" + curl_command: 'curl "http://localhost:4444/tools/search?q=time&limit=10" -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Search results matching query with team filtering" + + - test_id: "TOOL-009" + endpoint: "/tools/import" + method: "POST" + description: "Bulk import tools" + curl_command: 'curl -X POST http://localhost:4444/tools/import -H "Authorization: Bearer " -H "Content-Type: application/json"' + request_body: '{"tools":[{"name":"bulk-import-test","description":"Bulk imported tool","schema":{"type":"object","properties":{"test":{"type":"string"}}}}]}' + expected_status: 201 + expected_response: "Tools imported successfully with team assignments" + + - test_id: "TOOL-010" + endpoint: "/tools/export" + method: "GET" + description: "Export tools as JSON" + curl_command: 'curl http://localhost:4444/tools/export -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Tools exported as JSON with team context" + + - test_id: "TOOL-011" + endpoint: "/tools/{id}/history" + method: "GET" + description: "Get tool execution history" + curl_command: 'curl http://localhost:4444/tools/{TOOL_ID}/history -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Tool execution history and metrics" + + - test_id: "TOOL-012" + endpoint: "/tools/{id}/validate" + method: "POST" + description: "Validate tool schema and configuration" + curl_command: 'curl -X POST http://localhost:4444/tools/{TOOL_ID}/validate -H "Authorization: Bearer "' + request_body: "" + expected_status: 200 + expected_response: "Tool validation results and any warnings" \ No newline at end of file diff --git a/tests/manual/testcases/database_tests.yaml b/tests/manual/testcases/database_tests.yaml new file mode 100644 index 000000000..162f8cf25 --- /dev/null +++ b/tests/manual/testcases/database_tests.yaml @@ -0,0 +1,176 @@ +# MCP Gateway v0.7.0 - Database Tests +# Database compatibility and performance testing +# Focus: SQLite vs PostgreSQL comparison and migration validation + +worksheet_name: "Database Tests" +description: "Complete database compatibility testing for SQLite and PostgreSQL" +priority: "HIGH" +estimated_time: "60-120 minutes" + +headers: + - "Test ID" + - "Database Type" + - "Feature" + - "Test Commands" + - "Expected Result" + - "Actual Result" + - "Performance" + - "Status" + - "Tester" + - "Date" + - "Comments" + +tests: + - test_id: "DB-001" + database_type: "SQLite" + feature: "Migration Execution" + test_commands: | + 1. Set DATABASE_URL=sqlite:///./test_migration.db in .env + 2. python3 -m mcpgateway.bootstrap_db + 3. sqlite3 test_migration.db '.tables' + 4. sqlite3 test_migration.db 'SELECT COUNT(*) FROM email_users;' + expected: "All multitenancy tables created, admin user exists" + performance: "Fast" + + - test_id: "DB-002" + database_type: "SQLite" + feature: "Team Data Population" + test_commands: | + 1. sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NOT NULL;' + 2. sqlite3 mcp.db 'SELECT COUNT(*) FROM tools WHERE team_id IS NOT NULL;' + 3. sqlite3 mcp.db 'SELECT COUNT(*) FROM servers WHERE team_id IS NULL;' + expected: "All resources have team_id populated, zero NULL values" + performance: "Fast" + + - test_id: "DB-003" + database_type: "SQLite" + feature: "Connection Pool" + test_commands: | + 1. Set DB_POOL_SIZE=50 in .env + 2. Start gateway: make dev + 3. Run concurrent requests: for i in {1..20}; do curl http://localhost:4444/health & done; wait + expected: "Connections managed within SQLite limits (~50)" + performance: "Good" + + - test_id: "DB-004" + database_type: "SQLite" + feature: "JSON Fields" + test_commands: | + 1. sqlite3 mcp.db 'SELECT name, schema FROM tools WHERE schema IS NOT NULL LIMIT 3;' + 2. sqlite3 mcp.db 'UPDATE tools SET schema = json_set(schema, "$.test", "value") WHERE id = (SELECT id FROM tools LIMIT 1);' + expected: "JSON data stored and queried correctly" + performance: "Good" + + - test_id: "DB-005" + database_type: "SQLite" + feature: "Backup and Restore" + test_commands: | + 1. cp mcp.db backup_test.db + 2. sqlite3 mcp.db 'DELETE FROM email_teams WHERE name = "test";' + 3. cp backup_test.db mcp.db + 4. sqlite3 mcp.db 'SELECT COUNT(*) FROM email_teams;' + expected: "File-based backup and restore works perfectly" + performance: "Excellent" + + - test_id: "DB-006" + database_type: "PostgreSQL" + feature: "Migration Execution" + test_commands: | + 1. export DATABASE_URL=postgresql://user:pass@localhost:5432/mcp_test + 2. createdb mcp_test + 3. python3 -m mcpgateway.bootstrap_db + 4. psql mcp_test -c '\dt' | grep email + expected: "All tables created with PostgreSQL-specific types" + performance: "Fast" + + - test_id: "DB-007" + database_type: "PostgreSQL" + feature: "UUID and JSONB" + test_commands: | + 1. psql mcp_test -c 'SELECT id FROM email_teams LIMIT 1;' + 2. psql mcp_test -c 'SELECT config FROM servers WHERE config IS NOT NULL LIMIT 1;' + 3. psql mcp_test -c 'SELECT * FROM tools WHERE schema @> '{"type":"object"}';' + expected: "UUID columns work, JSONB queries efficient" + performance: "Excellent" + + - test_id: "DB-008" + database_type: "PostgreSQL" + feature: "High Concurrency" + test_commands: | + 1. Set DB_POOL_SIZE=200 in .env + 2. Start gateway: make dev + 3. Run high concurrency: for i in {1..100}; do curl http://localhost:4444/health & done; wait + expected: "High concurrency supported (200+ connections)" + performance: "Excellent" + + - test_id: "DB-009" + database_type: "PostgreSQL" + feature: "Full-Text Search" + test_commands: | + 1. psql mcp_test -c 'SELECT name FROM tools WHERE to_tsvector(name) @@ plainto_tsquery("time");' + 2. psql mcp_test -c 'SELECT name, ts_rank(to_tsvector(name), plainto_tsquery("time")) FROM tools WHERE to_tsvector(name) @@ plainto_tsquery("time") ORDER BY ts_rank DESC;' + expected: "Advanced full-text search with ranking" + performance: "Excellent" + + - test_id: "DB-010" + database_type: "PostgreSQL" + feature: "Backup and Restore" + test_commands: | + 1. pg_dump mcp_test > backup_test.sql + 2. psql mcp_test -c 'DELETE FROM email_teams WHERE name LIKE "test%";' + 3. dropdb mcp_test && createdb mcp_test + 4. psql mcp_test < backup_test.sql + expected: "SQL-based backup and restore works perfectly" + performance: "Good" + + - test_id: "DB-011" + database_type: "Both" + feature: "Transaction Integrity" + test_commands: | + 1. Begin transaction + 2. Create team, add members, create resources + 3. Rollback transaction + 4. Verify no changes persisted + expected: "ACID transactions work correctly on both databases" + performance: "Good" + + - test_id: "DB-012" + database_type: "Both" + feature: "Constraint Enforcement" + test_commands: | + 1. Try deleting team with members + 2. Try inserting duplicate team slug + 3. Try invalid foreign key reference + expected: "Constraints enforced, referential integrity maintained" + performance: "Good" + + - test_id: "DB-013" + database_type: "Both" + feature: "Performance Under Load" + test_commands: | + 1. Create 1000+ resources (SQLite) / 10,000+ (PostgreSQL) + 2. Test team-filtered queries + 3. Monitor memory usage and response times + expected: "Reasonable performance within database limits" + performance: "Variable" + + - test_id: "DB-014" + database_type: "Both" + feature: "Migration Rollback" + test_commands: | + 1. Note current migration version: alembic current + 2. Run downgrade: alembic downgrade -1 + 3. Check schema reverted + 4. Run upgrade again: alembic upgrade head + expected: "Clean rollback and re-upgrade possible" + performance: "Good" + + - test_id: "DB-015" + database_type: "Both" + feature: "Cross-Database Compatibility" + test_commands: | + 1. Export configuration from SQLite setup + 2. Import same configuration into PostgreSQL setup + 3. Verify data integrity and functionality + expected: "Data portable between database types" + performance: "Good" \ No newline at end of file diff --git a/tests/manual/testcases/edge_cases.yaml b/tests/manual/testcases/edge_cases.yaml new file mode 100644 index 000000000..22176d7d1 --- /dev/null +++ b/tests/manual/testcases/edge_cases.yaml @@ -0,0 +1,209 @@ +# MCP Gateway v0.7.0 - Edge Cases and Error Conditions +# Edge case testing and error handling validation +# Focus: Boundary conditions, error scenarios, and recovery + +worksheet_name: "Edge Cases" +description: "Edge case testing including error conditions, boundary values, and recovery scenarios" +priority: "MEDIUM" +estimated_time: "60-90 minutes" + +headers: + - "Test ID" + - "Edge Case Category" + - "Scenario" + - "Test Steps" + - "Expected Behavior" + - "Actual Behavior" + - "Recovery Method" + - "Status" + - "Tester" + - "Date" + - "Severity" + - "Comments" + +tests: + - test_id: "EDGE-001" + category: "Empty Database" + scenario: "Fresh installation on empty database" + steps: | + 1. Delete existing database file + 2. Run migration: python3 -m mcpgateway.bootstrap_db + 3. Check system initialization + 4. Verify admin user and team creation + expected: "System initializes correctly from completely empty state" + recovery: "Bootstrap migration process" + severity: "Low" + + - test_id: "EDGE-002" + category: "Network Interruption" + scenario: "Network connection lost during operation" + steps: | + 1. Start long-running operation (large export) + 2. Disconnect network interface + 3. Wait 30 seconds + 4. Reconnect network + 5. Check operation recovery + expected: "Graceful error handling, operation retry or proper failure" + recovery: "Retry mechanism or user notification" + severity: "Medium" + + - test_id: "EDGE-003" + category: "Orphaned Resources" + scenario: "Resources without team assignments" + steps: | + 1. Manually set team_id to NULL: UPDATE tools SET team_id = NULL WHERE id = 'test-id'; + 2. Navigate to admin UI tools section + 3. Check tool visibility + 4. Run fix script: python3 scripts/fix_multitenancy_0_7_0_resources.py + 5. Verify resource assignment + expected: "Fix script successfully assigns orphaned resources to admin team" + recovery: "Fix script execution" + severity: "High" + + - test_id: "EDGE-004" + category: "Large Payloads" + scenario: "Oversized request payloads" + steps: | + 1. Create very large JSON payload (>10MB) + 2. Send to tool creation endpoint + 3. Check request size limits enforced + 4. Verify proper error handling + expected: "Request size limits enforced gracefully" + recovery: "Error message with size limit info" + severity: "Medium" + + - test_id: "EDGE-005" + category: "Malformed Data" + scenario: "Invalid JSON and parameter formats" + steps: | + 1. Send malformed JSON to API endpoints + 2. Send invalid parameter types + 3. Test with missing required fields + 4. Check validation error responses + expected: "Input validation rejects malformed data with helpful errors" + recovery: "Validation error messages" + severity: "Medium" + + - test_id: "EDGE-006" + category: "Resource Conflicts" + scenario: "Name conflicts and duplicate identifiers" + steps: | + 1. Create team with existing name + 2. Try creating tool with existing name in same team + 3. Test unique constraint enforcement + 4. Check conflict resolution + expected: "Unique constraints enforced, conflicts handled gracefully" + recovery: "Conflict error messages" + severity: "Medium" + + - test_id: "EDGE-007" + category: "Session Management" + scenario: "Session expiry and timeout handling" + steps: | + 1. Login and get JWT token + 2. Wait for token expiry (or manually expire) + 3. Try using expired token + 4. Test session refresh workflow + expected: "Expired tokens rejected, refresh workflow available" + recovery: "Token refresh or re-authentication" + severity: "Medium" + + - test_id: "EDGE-008" + category: "Database Connection" + scenario: "Database becomes unavailable" + steps: | + 1. Start gateway normally + 2. Stop database service + 3. Try API operations + 4. Restart database + 5. Check connection recovery + expected: "Graceful error handling, automatic reconnection" + recovery: "Connection pool recovery" + severity: "High" + + - test_id: "EDGE-009" + category: "Disk Space" + scenario: "Insufficient disk space" + steps: | + 1. Fill disk space (test environment only) + 2. Try creating resources + 3. Try database operations + 4. Check error handling + expected: "Disk space errors handled gracefully" + recovery: "Clear error messages" + severity: "Medium" + + - test_id: "EDGE-010" + category: "Unicode and Special Characters" + scenario: "International characters and special symbols" + steps: | + 1. Create team with Unicode name: 测试团队 + 2. Create tool with emoji: 🔧 Test Tool + 3. Test special characters in descriptions + 4. Verify proper encoding/decoding + expected: "Unicode and special characters handled correctly" + recovery: "UTF-8 encoding support" + severity: "Low" + + - test_id: "EDGE-011" + category: "Rapid State Changes" + scenario: "Quick successive operations on same resource" + steps: | + 1. Create tool + 2. Rapidly update tool multiple times + 3. Delete and recreate quickly + 4. Check state consistency + expected: "State consistency maintained, no race conditions" + recovery: "Locking mechanisms" + severity: "Medium" + + - test_id: "EDGE-012" + category: "Migration Interruption" + scenario: "Migration process fails or is interrupted" + steps: | + 1. Start migration + 2. Interrupt process (Ctrl+C) + 3. Check database state + 4. Try re-running migration + 5. Verify recovery + expected: "Migration can be safely restarted or rolled back" + recovery: "Migration rollback or resume" + severity: "Critical" + + - test_id: "EDGE-013" + category: "Team Limits" + scenario: "Exceeding team member or resource limits" + steps: | + 1. Set low team limits in configuration + 2. Try exceeding member limits + 3. Try exceeding resource limits + 4. Check limit enforcement + expected: "Limits enforced with clear error messages" + recovery: "Quota management interface" + severity: "Medium" + + - test_id: "EDGE-014" + category: "Cross-Database Migration" + scenario: "Migrating data between SQLite and PostgreSQL" + steps: | + 1. Setup data in SQLite + 2. Export configuration + 3. Switch to PostgreSQL + 4. Run migration + 5. Import configuration + 6. Verify data integrity + expected: "Data migrates correctly between database types" + recovery: "Export/import workflow" + severity: "High" + + - test_id: "EDGE-015" + category: "Clock Skew" + scenario: "Time synchronization issues" + steps: | + 1. Change system clock + 2. Test token expiration + 3. Test audit logging timestamps + 4. Check time-based operations + expected: "Time-based operations handle clock differences gracefully" + recovery: "UTC normalization" + severity: "Low" \ No newline at end of file diff --git a/tests/manual/testcases/migration_tests.yaml b/tests/manual/testcases/migration_tests.yaml new file mode 100644 index 000000000..58924b711 --- /dev/null +++ b/tests/manual/testcases/migration_tests.yaml @@ -0,0 +1,157 @@ +# MCP Gateway v0.7.0 - Migration Tests +# Critical post-migration validation tests +# Focus: Verify old servers are visible and migration successful + +worksheet_name: "Migration Tests" +description: "Critical post-migration validation tests to ensure v0.6.0 → v0.7.0 upgrade successful" +priority: "CRITICAL" +estimated_time: "60-90 minutes" + +headers: + - "Test ID" + - "Priority" + - "Component" + - "Description" + - "Detailed Steps" + - "Expected Result" + - "Actual Output" + - "Status" + - "Tester" + - "Date" + - "Comments" + - "SQLite" + - "PostgreSQL" + +tests: + - test_id: "MIG-001" + priority: "CRITICAL" + component: "Admin User Creation" + description: "Verify platform admin user was created during migration" + steps: | + 1. Check expected admin email from configuration: + python3 -c "from mcpgateway.config import settings; print(f'Expected admin: {settings.platform_admin_email}')" + 2. Check actual admin user in database: + python3 -c "from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.is_admin==True).first(); print(f'Found admin: {admin.email if admin else None}, is_admin: {admin.is_admin if admin else False}'); db.close()" + 3. Compare expected vs actual results + 4. Record both outputs exactly + expected: "Expected admin email matches found admin email, is_admin=True" + sqlite_support: true + postgresql_support: true + validation_command: 'python3 -c "from mcpgateway.config import settings; from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email, EmailUser.is_admin==True).first(); result = \"PASS\" if admin else \"FAIL\"; print(f\"Result: {result}\"); db.close()"' + + - test_id: "MIG-002" + priority: "CRITICAL" + component: "Personal Team Creation" + description: "Verify admin user has personal team created automatically" + steps: | + 1. Run full verification script: + python3 scripts/verify_multitenancy_0_7_0_migration.py + 2. Look for 'PERSONAL TEAM CHECK' section in output + 3. Record team ID, name, and slug shown + 4. Verify there are no error messages + 5. Note team visibility (should be 'private') + expected: "✅ Personal team found: (Team ID: , Slug: , Visibility: private)" + sqlite_support: true + postgresql_support: true + + - test_id: "MIG-003" + priority: "CRITICAL" + component: "Server Visibility Fix" + description: "OLD SERVERS NOW VISIBLE - This is the main issue being fixed" + steps: | + 1. Open web browser to http://localhost:4444/admin + 2. Login with admin email and password from .env file + 3. Click 'Virtual Servers' in navigation menu + 4. Count total servers displayed in the list + 5. Identify servers created before migration (older creation dates) + 6. Click on each server to verify details are accessible + 7. Take screenshot of server list showing all servers + 8. Record server names, creation dates, and visibility settings + expected: "ALL pre-migration servers visible in admin UI server list, details accessible" + sqlite_support: true + postgresql_support: true + main_test: true + screenshot_required: true + critical_for_production: true + + - test_id: "MIG-004" + priority: "CRITICAL" + component: "Resource Team Assignment" + description: "All resources assigned to teams (no NULL team_id values)" + steps: | + 1. In admin UI, navigate to Tools section + 2. Click on any tool to view its details + 3. Verify 'Team' field shows team name (not empty or NULL) + 4. Verify 'Owner' field shows admin email address + 5. Verify 'Visibility' field has value (private/team/public) + 6. Repeat this check for Resources and Prompts sections + 7. Run database verification: + python3 -c "from mcpgateway.db import SessionLocal, Tool, Resource; db=SessionLocal(); tool_unassigned=db.query(Tool).filter(Tool.team_id==None).count(); resource_unassigned=db.query(Resource).filter(Resource.team_id==None).count(); print(f'Unassigned tools: {tool_unassigned}, resources: {resource_unassigned}'); db.close()" + expected: "All resources show Team/Owner/Visibility fields, database query shows 0 unassigned" + sqlite_support: true + postgresql_support: true + + - test_id: "MIG-005" + priority: "CRITICAL" + component: "Email Authentication" + description: "Email-based authentication functional after migration" + steps: | + 1. Open new private/incognito browser window + 2. Navigate to http://localhost:4444/admin + 3. Look for email login form or 'Email Login' option + 4. Enter admin email from .env file + 5. Enter admin password from .env file + 6. Click Login/Submit button + 7. Verify successful redirect to admin dashboard + 8. Check user menu/profile shows correct email address + expected: "Email authentication successful, dashboard loads, correct email displayed" + sqlite_support: true + postgresql_support: true + + - test_id: "MIG-006" + priority: "HIGH" + component: "Basic Auth Compatibility" + description: "Basic authentication still works alongside email auth" + steps: | + 1. Open new browser window + 2. Navigate to http://localhost:4444/admin + 3. Use browser basic auth popup (username: admin, password: changeme) + 4. Verify access is granted + 5. Navigate to different admin sections + 6. Test admin functionality works + expected: "Basic auth continues to work, no conflicts with email auth system" + sqlite_support: true + postgresql_support: true + + - test_id: "MIG-007" + priority: "HIGH" + component: "Database Schema Validation" + description: "All multitenancy tables created with proper structure" + steps: | + 1. Check multitenancy tables exist: + SQLite: sqlite3 mcp.db '.tables' | grep email + PostgreSQL: psql -d mcp -c '\dt' | grep email + 2. Verify required tables: email_users, email_teams, email_team_members, roles, user_roles + 3. Check table row counts: + python3 -c "from mcpgateway.db import SessionLocal, EmailUser, EmailTeam; db=SessionLocal(); users=db.query(EmailUser).count(); teams=db.query(EmailTeam).count(); print(f'Users: {users}, Teams: {teams}'); db.close()" + 4. Test foreign key relationships work properly + expected: "All multitenancy tables exist with proper data and working relationships" + sqlite_support: true + postgresql_support: true + + - test_id: "MIG-008" + priority: "MEDIUM" + component: "API Functionality Validation" + description: "Core APIs respond correctly after migration" + steps: | + 1. Test health endpoint: curl http://localhost:4444/health + 2. Get authentication token: + curl -X POST http://localhost:4444/auth/login -H 'Content-Type: application/json' -d '{"email":"","password":""}' + 3. Test teams API with token: + curl -H 'Authorization: Bearer ' http://localhost:4444/teams + 4. Test servers API: + curl -H 'Authorization: Bearer ' http://localhost:4444/servers + 5. Record all HTTP status codes and response content + expected: "Health=200, Login=200 with JWT token, Teams=200 with team data, Servers=200 with server data" + sqlite_support: true + postgresql_support: true \ No newline at end of file diff --git a/tests/manual/testcases/performance_tests.yaml b/tests/manual/testcases/performance_tests.yaml new file mode 100644 index 000000000..780898b36 --- /dev/null +++ b/tests/manual/testcases/performance_tests.yaml @@ -0,0 +1,107 @@ +# MCP Gateway v0.7.0 - Performance Tests +# Load testing and performance validation +# Focus: Stress testing, concurrent users, and performance benchmarks + +worksheet_name: "Performance Tests" +description: "Complete performance and load testing including concurrent users and stress scenarios" +priority: "MEDIUM" +estimated_time: "60-120 minutes" + +headers: + - "Test ID" + - "Performance Area" + - "Load Parameters" + - "Test Method" + - "Success Criteria" + - "Actual Results" + - "Performance Rating" + - "Status" + - "Tester" + - "Tools Used" + - "Date" + - "Comments" + +tests: + - test_id: "PERF-001" + performance_area: "API Throughput" + load_parameters: "1000 requests/minute" + test_method: "Apache Bench: ab -n 1000 -c 10 http://localhost:4444/health" + success_criteria: "Response time <1s, no errors, stable performance" + tools_used: "Apache Bench (ab)" + + - test_id: "PERF-002" + performance_area: "Concurrent Users" + load_parameters: "50 simultaneous users" + test_method: "Multiple concurrent API sessions with authentication" + success_criteria: "All requests succeed, response time <2s" + tools_used: "Load testing tool or custom script" + + - test_id: "PERF-003" + performance_area: "Database Performance" + load_parameters: "10,000+ resources (PostgreSQL), 1,000+ (SQLite)" + test_method: "Create large dataset, test team-filtered queries, measure response times" + success_criteria: "Query time <500ms, memory usage stable" + tools_used: "Database monitoring, query timing" + + - test_id: "PERF-004" + performance_area: "Memory Usage" + load_parameters: "Extended operation (4+ hours)" + test_method: "Run gateway under normal load, monitor memory consumption over time" + success_criteria: "Memory usage stable <1GB, no memory leaks" + tools_used: "Memory profiler, system monitoring" + + - test_id: "PERF-005" + performance_area: "WebSocket Connections" + load_parameters: "100 concurrent WebSocket connections" + test_method: "Open multiple WebSocket connections to different servers" + success_criteria: "All connections stable, low latency, no drops" + tools_used: "WebSocket testing tool" + + - test_id: "PERF-006" + performance_area: "SSE Connections" + load_parameters: "100 concurrent SSE streams" + test_method: "Open multiple Server-Sent Event connections" + success_criteria: "All streams stable, events delivered reliably" + tools_used: "SSE testing client" + + - test_id: "PERF-007" + performance_area: "Tool Execution" + load_parameters: "Multiple concurrent tool invocations" + test_method: "Execute multiple tools simultaneously, test queue management" + success_criteria: "Queue managed efficiently, all executions complete" + tools_used: "API testing tool" + + - test_id: "PERF-008" + performance_area: "Authentication Load" + load_parameters: "Rapid login/logout cycles" + test_method: "Script rapid authentication operations" + success_criteria: "Auth system remains stable, tokens managed properly" + tools_used: "Authentication testing script" + + - test_id: "PERF-009" + performance_area: "Team Operations" + load_parameters: "Large team operations (1000+ members)" + test_method: "Create teams with many members, test permission checking" + success_criteria: "Team operations scale well, permission checks fast" + tools_used: "Team management testing" + + - test_id: "PERF-010" + performance_area: "Export/Import Performance" + load_parameters: "Large configuration export/import" + test_method: "Export 10,000+ entities, measure time, test import" + success_criteria: "Export <5min, import <10min, no data loss" + tools_used: "Time measurement, data validation" + + - test_id: "PERF-011" + performance_area: "Federation Performance" + load_parameters: "Cross-gateway operations" + test_method: "Test communication with multiple peer gateways under load" + success_criteria: "Network overhead minimal, federation stable" + tools_used: "Network monitoring" + + - test_id: "PERF-012" + performance_area: "A2A Agent Performance" + load_parameters: "Multiple agent invocations" + test_method: "Test concurrent A2A agent calls, measure response times" + success_criteria: "Agent calls handled efficiently, timeouts respected" + tools_used: "Agent testing framework" \ No newline at end of file diff --git a/tests/manual/testcases/security_tests.yaml b/tests/manual/testcases/security_tests.yaml new file mode 100644 index 000000000..0bc177963 --- /dev/null +++ b/tests/manual/testcases/security_tests.yaml @@ -0,0 +1,173 @@ +# MCP Gateway v0.7.0 - Security Tests +# Security and penetration testing +# Focus: Attack scenarios and defense validation + +worksheet_name: "Security Tests" +description: "Security validation and penetration testing with actual attack scenarios" +priority: "HIGH" +estimated_time: "90-180 minutes" +warning: "Performs actual attack scenarios - test environment only" + +headers: + - "Test ID" + - "Attack Type" + - "Target" + - "Risk Level" + - "Attack Steps" + - "Expected Defense" + - "Actual Defense" + - "Vulnerability Found" + - "Status" + - "Tester" + - "Date" + - "Remediation" + - "Notes" + +tests: + - test_id: "SEC-001" + attack_type: "SQL Injection" + target: "Teams API" + risk_level: "Critical" + attack_steps: | + 1. Get valid JWT token from admin login + 2. Prepare malicious team name with SQL injection: + {"name":"'; DROP TABLE users; --","description":"injection test"} + 3. Execute attack: + curl -X POST http://localhost:4444/teams -H "Authorization: Bearer " -d '{"name":"\\"; DROP TABLE users; --"}' + 4. Check database integrity: + sqlite3 mcp.db '.tables' (verify users table still exists) + 5. Check error response handling + expected_defense: "Input sanitized, parameterized queries prevent injection, graceful error handling" + validation: "Database remains intact, no SQL executed, proper error returned" + + - test_id: "SEC-002" + attack_type: "JWT Token Manipulation" + target: "Authentication System" + risk_level: "Critical" + attack_steps: | + 1. Obtain valid JWT token through normal login + 2. Decode JWT payload (use jwt.io or similar tool) + 3. Modify claims (e.g., change user email, add admin role) + 4. Re-encode JWT with different signature + 5. Attempt to use modified token: + curl -H "Authorization: Bearer " http://localhost:4444/admin/users + 6. Verify access is denied + expected_defense: "Token signature validation prevents tampering, access denied" + validation: "Modified tokens rejected, signature verification works" + + - test_id: "SEC-003" + attack_type: "Team Isolation Bypass" + target: "Multi-tenancy Authorization" + risk_level: "Critical" + attack_steps: | + 1. Create two test users in different teams + 2. User A creates a private resource in Team 1 + 3. Get User B's JWT token + 4. User B attempts to access User A's resource: + curl -H "Authorization: Bearer " http://localhost:4444/resources/{USER_A_RESOURCE_ID} + 5. Verify access is denied + 6. Test with direct resource ID guessing + expected_defense: "Team boundaries strictly enforced, cross-team access blocked" + validation: "Access denied, team isolation maintained" + + - test_id: "SEC-004" + attack_type: "Privilege Escalation" + target: "RBAC System" + risk_level: "Critical" + attack_steps: | + 1. Login as regular user (non-admin) + 2. Attempt to access admin-only endpoints: + curl -H "Authorization: Bearer " http://localhost:4444/admin/users + 3. Try to modify own user role in database + 4. Attempt direct admin API calls + 5. Test admin UI access with regular user credentials + expected_defense: "Admin privileges protected, privilege escalation prevented" + validation: "Admin functions inaccessible to regular users" + + - test_id: "SEC-005" + attack_type: "Cross-Site Scripting (XSS)" + target: "Admin UI" + risk_level: "High" + attack_steps: | + 1. Access admin UI with valid credentials + 2. Create tool with malicious name: + Name: + 3. Save tool and navigate to tools list + 4. Check if JavaScript executes in browser + 5. Test other input fields for XSS vulnerabilities + 6. Check browser console for script execution + expected_defense: "Script tags escaped or sanitized, no JavaScript execution" + validation: "No alert boxes, scripts properly escaped in HTML" + + - test_id: "SEC-006" + attack_type: "Cross-Site Request Forgery (CSRF)" + target: "State-Changing Operations" + risk_level: "High" + attack_steps: | + 1. Create malicious HTML page with form posting to gateway + 2. Form targets state-changing endpoint (e.g., team creation) + 3. Get authenticated user to visit malicious page + 4. Check if operation executes without user consent + 5. Verify CSRF token requirements + 6. Test cross-origin request blocking + expected_defense: "CSRF tokens required, cross-origin requests properly blocked" + validation: "Operations require explicit user consent and CSRF protection" + + - test_id: "SEC-007" + attack_type: "Password Brute Force" + target: "Login Endpoint" + risk_level: "Medium" + attack_steps: | + 1. Script multiple rapid login attempts with wrong passwords: + for i in {1..10}; do curl -X POST http://localhost:4444/auth/login -d '{"email":"admin@example.com","password":"wrong$i"}'; done + 2. Monitor response times and status codes + 3. Check for rate limiting implementation + 4. Test account lockout after failed attempts + 5. Verify lockout duration enforcement + expected_defense: "Account locked after multiple failures, rate limiting enforced" + validation: "Brute force attacks mitigated by lockout and rate limiting" + + - test_id: "SEC-008" + attack_type: "File Upload Attack" + target: "Resource Management" + risk_level: "High" + attack_steps: | + 1. Try uploading executable file (.exe, .sh) + 2. Attempt script file upload (.py, .js, .php) + 3. Test oversized file upload + 4. Try files with malicious names + 5. Attempt path traversal in filenames (../../../etc/passwd) + 6. Check file type and size validation + expected_defense: "File type validation, size limits enforced, path sanitization" + validation: "Malicious uploads blocked, validation errors returned" + + - test_id: "SEC-009" + attack_type: "API Rate Limiting" + target: "DoS Prevention" + risk_level: "Medium" + attack_steps: | + 1. Script rapid API requests to test rate limiting: + for i in {1..100}; do curl -s http://localhost:4444/health; done + 2. Monitor response times and status codes + 3. Check for rate limit headers in responses + 4. Verify throttling and backoff mechanisms + 5. Test rate limiting on authenticated endpoints + expected_defense: "Rate limits enforced, DoS protection active, proper HTTP status codes" + validation: "Rate limiting prevents abuse, service remains stable" + + - test_id: "SEC-010" + attack_type: "Information Disclosure" + target: "Error Handling" + risk_level: "Medium" + attack_steps: | + 1. Trigger various error conditions: + - Invalid JSON syntax + - Missing required fields + - Invalid authentication + - Access denied scenarios + 2. Analyze error messages for sensitive information + 3. Check for stack traces in responses + 4. Look for database connection strings or paths + 5. Verify no internal system information disclosed + expected_defense: "No sensitive information disclosed in error responses" + validation: "Error messages are user-friendly without exposing system internals" \ No newline at end of file diff --git a/tests/manual/testcases/setup_instructions.yaml b/tests/manual/testcases/setup_instructions.yaml new file mode 100644 index 000000000..8db1d51dc --- /dev/null +++ b/tests/manual/testcases/setup_instructions.yaml @@ -0,0 +1,173 @@ +# MCP Gateway v0.7.0 - Setup Instructions +# Complete environment setup guide for manual testers +# Must be completed before any other testing + +worksheet_name: "Setup Instructions" +description: "Complete environment setup and validation for MCP Gateway testing" +priority: "CRITICAL" +estimated_time: "30-60 minutes" +prerequisite: true + +headers: + - "Step" + - "Action" + - "Command" + - "Expected Result" + - "Troubleshooting" + - "Status" + - "Notes" + - "Required" + +prerequisites: + - "Python 3.11+ installed (python3 --version)" + - "Git installed (git --version)" + - "curl installed (curl --version)" + - "Modern web browser (Chrome/Firefox recommended)" + - "Text editor (vi/vim/VSCode)" + - "Terminal/command line access" + - "4+ hours dedicated testing time" + - "Reliable internet connection" + - "Admin/sudo access for package installation" + - "Basic understanding of web applications and APIs" + +tests: + - step: "1" + action: "Check Prerequisites" + command: "python3 --version && git --version && curl --version" + expected: "Python 3.11+, Git, and curl version numbers displayed" + troubleshooting: "Install missing tools via package manager" + required: true + notes: "Must have all three tools" + + - step: "2" + action: "Clone Repository" + command: "git clone " + expected: "Repository cloned successfully" + troubleshooting: "Check git credentials and network access" + required: true + notes: "Get repository URL from admin" + + - step: "3" + action: "Enter Project Directory" + command: "cd mcp-context-forge" + expected: "Directory changed to project root" + troubleshooting: "Use 'ls' to verify files like README.md, .env.example exist" + required: true + notes: "" + + - step: "4" + action: "Copy Environment File" + command: "cp .env.example .env" + expected: "Environment configuration file created" + troubleshooting: "Check file exists: ls -la .env" + required: true + notes: "" + + - step: "5" + action: "Edit Configuration" + command: "vi .env" + expected: "Configuration file opened in vi editor" + troubleshooting: "Use :wq to save and quit vi editor" + required: true + notes: "Set PLATFORM_ADMIN_EMAIL=" + + - step: "6" + action: "Configure Admin Email" + command: "Set PLATFORM_ADMIN_EMAIL=" + expected: "Admin email configured in .env" + troubleshooting: "Use a real email address you control" + required: true + notes: "This will be your login email" + + - step: "7" + action: "Configure Admin Password" + command: "Set PLATFORM_ADMIN_PASSWORD=" + expected: "Admin password configured in .env" + troubleshooting: "Use 12+ character password for security" + required: true + notes: "Don't use 'changeme' in production" + + - step: "8" + action: "Enable Email Authentication" + command: "Set EMAIL_AUTH_ENABLED=true" + expected: "Email authentication enabled" + troubleshooting: "Required for multitenancy features" + required: true + notes: "Critical for migration" + + - step: "9" + action: "Verify Configuration" + command: 'python3 -c "from mcpgateway.config import settings; print(f\"Admin: {settings.platform_admin_email}\")"' + expected: "Shows your configured admin email address" + troubleshooting: "If error, check .env file syntax and save" + required: true + notes: "Configuration validation" + + - step: "10" + action: "Install Dependencies" + command: "make install-dev" + expected: "All Python packages installed successfully" + troubleshooting: "May take 5-15 minutes, check internet connection" + required: true + notes: "Download and install packages" + + - step: "11" + action: "Run Database Migration" + command: "python3 -m mcpgateway.bootstrap_db" + expected: "'Database ready' message displayed at end" + troubleshooting: "MUST complete successfully - get help if fails" + required: true + notes: "CRITICAL STEP - migration execution" + critical: true + + - step: "12" + action: "Verify Migration Success" + command: "python3 scripts/verify_multitenancy_0_7_0_migration.py" + expected: "'🎉 MIGRATION VERIFICATION: SUCCESS!' message at end" + troubleshooting: "All checks must pass - use fix script if needed" + required: true + notes: "Migration validation" + critical: true + + - step: "13" + action: "Start MCP Gateway" + command: "make dev" + expected: "'Uvicorn running on http://0.0.0.0:4444' message" + troubleshooting: "Keep this terminal window open during testing" + required: true + notes: "Server startup" + + - step: "14" + action: "Test Basic Connectivity" + command: "curl http://localhost:4444/health" + expected: '{"status":"ok"}' + troubleshooting: "If fails, check server started correctly" + required: true + notes: "Basic connectivity test" + + - step: "15" + action: "Access Admin UI" + command: "Open http://localhost:4444/admin in browser" + expected: "Admin login page loads successfully" + troubleshooting: "Try both http:// and https:// if needed" + required: true + notes: "Web interface access" + + - step: "16" + action: "Test Admin Authentication" + command: "Login with admin email/password from .env file" + expected: "Successful login, admin dashboard appears" + troubleshooting: "Main authentication validation test" + required: true + notes: "Primary authentication test" + critical: true + + - step: "17" + action: "Verify Servers Visible (MAIN TEST)" + command: "Navigate to Virtual Servers section in admin UI" + expected: "Server list displays including pre-migration servers" + troubleshooting: "If empty list, migration failed - get help immediately" + required: true + notes: "THIS IS THE MAIN MIGRATION VALIDATION TEST" + critical: true + main_test: true \ No newline at end of file From 91594ac8028c6d2da6cbe280cc424def07aa77f3 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 09:28:26 +0100 Subject: [PATCH 28/49] Add manual testing Signed-off-by: Mihai Criveti --- tests/manual/generate_test_plan.py | 58 +++++++++---------- tests/manual/testcases/admin_ui_tests.yaml | 10 ++-- tests/manual/testcases/api_a2a.yaml | 4 +- .../manual/testcases/api_authentication.yaml | 18 +++--- tests/manual/testcases/api_export_import.yaml | 10 ++-- tests/manual/testcases/api_federation.yaml | 2 +- tests/manual/testcases/api_prompts.yaml | 2 +- tests/manual/testcases/api_resources.yaml | 2 +- tests/manual/testcases/api_servers.yaml | 6 +- tests/manual/testcases/api_teams.yaml | 2 +- tests/manual/testcases/api_tools.yaml | 2 +- tests/manual/testcases/database_tests.yaml | 4 +- tests/manual/testcases/edge_cases.yaml | 2 +- tests/manual/testcases/migration_tests.yaml | 16 ++--- tests/manual/testcases/performance_tests.yaml | 2 +- tests/manual/testcases/security_tests.yaml | 6 +- .../manual/testcases/setup_instructions.yaml | 6 +- 17 files changed, 76 insertions(+), 76 deletions(-) diff --git a/tests/manual/generate_test_plan.py b/tests/manual/generate_test_plan.py index e7268f1df..134e2678f 100755 --- a/tests/manual/generate_test_plan.py +++ b/tests/manual/generate_test_plan.py @@ -27,32 +27,32 @@ def generate_excel_from_yaml(): """Generate Excel file from YAML test definitions.""" - + print("📊 GENERATING EXCEL FROM YAML TEST FILES") print("=" * 60) print("📁 Reading from testcases/ directory") - + # Find YAML files in testcases directory testcases_dir = Path("testcases") if not testcases_dir.exists(): print("❌ testcases/ directory not found") return False - + yaml_files = list(testcases_dir.glob("*.yaml")) yaml_files = sorted(yaml_files) - + if not yaml_files: print("❌ No YAML test files found") return False - + print(f"📄 Found {len(yaml_files)} YAML files:") for yf in yaml_files: print(f" 📄 {yf.name}") - + # Create Excel workbook wb = openpyxl.Workbook() wb.remove(wb.active) - + # Styles styles = { 'title': Font(size=16, bold=True, color="1F4E79"), @@ -61,42 +61,42 @@ def generate_excel_from_yaml(): 'critical_fill': PatternFill(start_color="C5504B", end_color="C5504B", fill_type="solid"), 'critical_font': Font(color="FFFFFF", bold=True) } - + # Process each YAML file for yaml_file in yaml_files: try: with open(yaml_file, 'r') as f: yaml_data = yaml.safe_load(f) - + worksheet_name = yaml_data.get('worksheet_name', yaml_file.stem) headers = yaml_data.get('headers', []) tests = yaml_data.get('tests', []) - + print(f"\n 📄 {yaml_file.name} → {worksheet_name}") print(f" 📊 {len(tests)} tests") - + # Create worksheet sheet = wb.create_sheet(worksheet_name) - + # Add headers for i, header in enumerate(headers, 1): cell = sheet.cell(row=1, column=i, value=header) cell.fill = styles['header_fill'] cell.font = styles['header_font'] - + # Add test data for row_idx, test in enumerate(tests, 2): for col_idx, header in enumerate(headers, 1): value = get_yaml_value(test, header) cell = sheet.cell(row=row_idx, column=col_idx, value=value) - + # Apply formatting if header.lower() == "priority" and value == "CRITICAL": cell.fill = styles['critical_fill'] cell.font = styles['critical_font'] elif header.lower() == "status": cell.value = "☐" - + # Auto-size columns for col in range(1, len(headers) + 1): max_len = 0 @@ -106,30 +106,30 @@ def generate_excel_from_yaml(): max_len = max(max_len, len(str(val))) width = min(max(max_len + 2, 10), 60) sheet.column_dimensions[get_column_letter(col)].width = width - + print(f" ✅ Created") - + except Exception as e: print(f" ❌ Failed: {e}") - + # Save file output_path = Path("test-plan.xlsx") - + try: print(f"\n💾 Saving Excel file...") wb.save(output_path) wb.close() # CRITICAL: Close properly - + print(f"✅ File saved: {output_path}") - + # Verify test_wb = openpyxl.load_workbook(output_path) print(f"✅ Verified: {len(test_wb.worksheets)} worksheets") test_wb.close() - + print("\n🎊 SUCCESS! Excel generated from YAML files!") return True - + except Exception as e: print(f"❌ Save failed: {e}") return False @@ -137,11 +137,11 @@ def generate_excel_from_yaml(): def get_yaml_value(test, header): """Get value from YAML test data for Excel header.""" - + mappings = { "Test ID": "test_id", "Priority": "priority", - "Component": "component", + "Component": "component", "Description": "description", "Detailed Steps": "steps", "Steps": "steps", @@ -159,10 +159,10 @@ def get_yaml_value(test, header): "Attack Steps": "attack_steps", "Expected Defense": "expected_defense" } - + yaml_key = mappings.get(header, header.lower().replace(' ', '_')) value = test.get(yaml_key, "") - + # Handle special cases if header in ["SQLite", "PostgreSQL"]: return "✓" if test.get(f'{header.lower()}_support', True) else "❌" @@ -170,7 +170,7 @@ def get_yaml_value(test, header): return "" # Empty for tester to fill elif header == "Status": return "☐" - + return str(value) if value else "" @@ -188,4 +188,4 @@ def get_yaml_value(test, header): sys.exit(1) except Exception as e: print(f"❌ Error: {e}") - sys.exit(1) \ No newline at end of file + sys.exit(1) diff --git a/tests/manual/testcases/admin_ui_tests.yaml b/tests/manual/testcases/admin_ui_tests.yaml index 4dc9d2afe..598fc3481 100644 --- a/tests/manual/testcases/admin_ui_tests.yaml +++ b/tests/manual/testcases/admin_ui_tests.yaml @@ -25,7 +25,7 @@ headers: tests: - test_id: "UI-001" ui_section: "Authentication" - component: "Login Form" + component: "Login Form" action: "Test admin login interface" steps: | 1. Open web browser (Chrome or Firefox recommended) @@ -46,7 +46,7 @@ tests: - test_id: "UI-002" ui_section: "Dashboard" component: "Main Dashboard View" - action: "Navigate and test admin dashboard" + action: "Navigate and test admin dashboard" steps: | 1. After successful login, observe dashboard layout 2. Count the number of statistics cards displayed @@ -103,7 +103,7 @@ tests: - test_id: "UI-005" ui_section: "Tools" - component: "Tool Registry Interface" + component: "Tool Registry Interface" action: "Test tool management and invocation" steps: | 1. Navigate to 'Tools' section @@ -155,7 +155,7 @@ tests: 9. Execute the import process 10. Verify import completion and success expected: "Export downloads complete JSON, import processes successfully" - browser: "Chrome/Firefox" + browser: "Chrome/Firefox" screenshot: "Recommended" notes: "Important for backup/restore workflows" @@ -215,4 +215,4 @@ tests: 10. Test error logging and reporting expected: "Graceful error handling, helpful error messages, no JavaScript crashes" browser: "Chrome/Firefox" - screenshot: "For errors" \ No newline at end of file + screenshot: "For errors" diff --git a/tests/manual/testcases/api_a2a.yaml b/tests/manual/testcases/api_a2a.yaml index c90241cdf..14d8100ad 100644 --- a/tests/manual/testcases/api_a2a.yaml +++ b/tests/manual/testcases/api_a2a.yaml @@ -13,7 +13,7 @@ headers: - "Method" - "Agent Type" - "Description" - - "cURL Command" + - "cURL Command" - "Request Body" - "Expected Status" - "Expected Response" @@ -146,4 +146,4 @@ tests: curl_command: 'curl http://localhost:4444/a2a/providers -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Array of supported agent providers (OpenAI, Anthropic, Custom)" \ No newline at end of file + expected_response: "Array of supported agent providers (OpenAI, Anthropic, Custom)" diff --git a/tests/manual/testcases/api_authentication.yaml b/tests/manual/testcases/api_authentication.yaml index b48dad3ac..820aaca19 100644 --- a/tests/manual/testcases/api_authentication.yaml +++ b/tests/manual/testcases/api_authentication.yaml @@ -10,13 +10,13 @@ estimated_time: "30-60 minutes" headers: - "Test ID" - "Endpoint" - - "Method" + - "Method" - "Description" - "cURL Command" - "Request Body" - "Expected Status" - "Expected Response" - - "Actual Status" + - "Actual Status" - "Actual Response" - "Status" - "Tester" @@ -40,7 +40,7 @@ tests: validation: "Response should include user_id, email, and personal_team_id" - test_id: "AUTH-002" - endpoint: "/auth/login" + endpoint: "/auth/login" method: "POST" description: "Email authentication login" curl_command: 'curl -X POST http://localhost:4444/auth/login -H "Content-Type: application/json"' @@ -72,7 +72,7 @@ tests: - "Try using the token again (should fail)" - "Verify token is now invalid" - - test_id: "AUTH-004" + - test_id: "AUTH-004" endpoint: "/auth/refresh" method: "POST" description: "JWT token refresh" @@ -104,7 +104,7 @@ tests: - test_id: "AUTH-006" endpoint: "/auth/change-password" - method: "POST" + method: "POST" description: "Change user password" curl_command: 'curl -X POST http://localhost:4444/auth/change-password -H "Authorization: Bearer " -H "Content-Type: application/json"' request_body: '{"old_password":"changeme","new_password":"NewPassword123"}' @@ -122,7 +122,7 @@ tests: endpoint: "/auth/sso/github" method: "GET" description: "GitHub SSO authentication initiation" - curl_command: 'curl -I http://localhost:4444/auth/sso/github' + curl_command: "curl -I http://localhost:4444/auth/sso/github" request_body: "" expected_status: 302 expected_response: "Redirect to GitHub OAuth authorization" @@ -137,14 +137,14 @@ tests: endpoint: "/auth/sso/google" method: "GET" description: "Google SSO authentication initiation" - curl_command: 'curl -I http://localhost:4444/auth/sso/google' + curl_command: "curl -I http://localhost:4444/auth/sso/google" request_body: "" expected_status: 302 expected_response: "Redirect to Google OAuth authorization" requires_config: "SSO_GOOGLE_ENABLED=true, Google OAuth app" test_steps: - "Execute request to Google SSO endpoint" - - "Verify HTTP 302 redirect status" + - "Verify HTTP 302 redirect status" - "Check Location header contains accounts.google.com" - "Verify OAuth parameters in redirect URL" @@ -176,4 +176,4 @@ tests: - "Request password reset for known user" - "Verify HTTP 200 response" - "Check email for reset link (if email configured)" - - "Test reset token functionality" \ No newline at end of file + - "Test reset token functionality" diff --git a/tests/manual/testcases/api_export_import.yaml b/tests/manual/testcases/api_export_import.yaml index 234b79dff..d9e94c9a3 100644 --- a/tests/manual/testcases/api_export_import.yaml +++ b/tests/manual/testcases/api_export_import.yaml @@ -1,4 +1,4 @@ -# MCP Gateway v0.7.0 - Export/Import API Tests +# MCP Gateway v0.7.0 - Export/Import API Tests # Configuration backup and restore testing # Focus: Data export/import, backup workflows, and recovery @@ -28,7 +28,7 @@ tests: method: "GET" operation: "Export" description: "Export complete configuration" - curl_command: 'curl http://localhost:4444/admin/export/configuration -u admin:changeme -o full_config_export.json' + curl_command: "curl http://localhost:4444/admin/export/configuration -u admin:changeme -o full_config_export.json" request_body: "" expected_status: 200 expected_response: "JSON file downloaded with complete configuration" @@ -116,7 +116,7 @@ tests: method: "GET" operation: "Import" description: "Check import operation status" - curl_command: 'curl http://localhost:4444/admin/import/status -u admin:changeme' + curl_command: "curl http://localhost:4444/admin/import/status -u admin:changeme" request_body: "" expected_status: 200 expected_response: "Import operation status and progress" @@ -127,7 +127,7 @@ tests: method: "GET" operation: "Export" description: "Export system logs" - curl_command: 'curl http://localhost:4444/admin/export/logs -u admin:changeme -o system_logs.json' + curl_command: "curl http://localhost:4444/admin/export/logs -u admin:changeme -o system_logs.json" request_body: "" expected_status: 200 expected_response: "System logs exported as JSON" @@ -142,4 +142,4 @@ tests: request_body: '{"tools":[{"name":"bulk-tool-1","schema":{"type":"object"}}],"resources":[{"name":"bulk-resource-1","uri":"file://bulk.txt"}]}' expected_status: 201 expected_response: "Bulk import completed with summary" - file_required: "None" \ No newline at end of file + file_required: "None" diff --git a/tests/manual/testcases/api_federation.yaml b/tests/manual/testcases/api_federation.yaml index e83c926fa..99048ec95 100644 --- a/tests/manual/testcases/api_federation.yaml +++ b/tests/manual/testcases/api_federation.yaml @@ -112,4 +112,4 @@ tests: curl_command: 'curl http://localhost:4444/federation/status -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Federation status with peer connectivity metrics" \ No newline at end of file + expected_response: "Federation status with peer connectivity metrics" diff --git a/tests/manual/testcases/api_prompts.yaml b/tests/manual/testcases/api_prompts.yaml index 3bd94051f..73ed06929 100644 --- a/tests/manual/testcases/api_prompts.yaml +++ b/tests/manual/testcases/api_prompts.yaml @@ -112,4 +112,4 @@ tests: curl_command: 'curl -X POST http://localhost:4444/prompts/{PROMPT_ID}/validate -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Prompt validation results and syntax check" \ No newline at end of file + expected_response: "Prompt validation results and syntax check" diff --git a/tests/manual/testcases/api_resources.yaml b/tests/manual/testcases/api_resources.yaml index 5acac0933..02f31104b 100644 --- a/tests/manual/testcases/api_resources.yaml +++ b/tests/manual/testcases/api_resources.yaml @@ -129,4 +129,4 @@ tests: curl_command: 'curl http://localhost:4444/resources/export -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Resources exported with team context" \ No newline at end of file + expected_response: "Resources exported with team context" diff --git a/tests/manual/testcases/api_servers.yaml b/tests/manual/testcases/api_servers.yaml index bce1de0dd..fccfdd408 100644 --- a/tests/manual/testcases/api_servers.yaml +++ b/tests/manual/testcases/api_servers.yaml @@ -4,7 +4,7 @@ worksheet_name: "API Servers" description: "Virtual server management API testing including CRUD and transport endpoints" -priority: "HIGH" +priority: "HIGH" estimated_time: "45-90 minutes" headers: @@ -44,7 +44,7 @@ tests: - test_id: "SRV-003" endpoint: "/servers/{id}" - method: "GET" + method: "GET" description: "Get server details and configuration" curl_command: 'curl http://localhost:4444/servers/{SERVER_ID} -H "Authorization: Bearer "' request_body: "" @@ -112,4 +112,4 @@ tests: curl_command: 'curl -X POST http://localhost:4444/servers/{SERVER_ID}/restart -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Server restarted successfully" \ No newline at end of file + expected_response: "Server restarted successfully" diff --git a/tests/manual/testcases/api_teams.yaml b/tests/manual/testcases/api_teams.yaml index 337006cd8..37de2dfd0 100644 --- a/tests/manual/testcases/api_teams.yaml +++ b/tests/manual/testcases/api_teams.yaml @@ -181,4 +181,4 @@ tests: - "Verify appropriate response" - "Check user no longer in member list" - "Test that personal teams cannot be left" - - "Verify access to team resources is removed" \ No newline at end of file + - "Verify access to team resources is removed" diff --git a/tests/manual/testcases/api_tools.yaml b/tests/manual/testcases/api_tools.yaml index d25c28e5e..4e60bc3d1 100644 --- a/tests/manual/testcases/api_tools.yaml +++ b/tests/manual/testcases/api_tools.yaml @@ -137,4 +137,4 @@ tests: curl_command: 'curl -X POST http://localhost:4444/tools/{TOOL_ID}/validate -H "Authorization: Bearer "' request_body: "" expected_status: 200 - expected_response: "Tool validation results and any warnings" \ No newline at end of file + expected_response: "Tool validation results and any warnings" diff --git a/tests/manual/testcases/database_tests.yaml b/tests/manual/testcases/database_tests.yaml index 162f8cf25..ce9dbad95 100644 --- a/tests/manual/testcases/database_tests.yaml +++ b/tests/manual/testcases/database_tests.yaml @@ -4,7 +4,7 @@ worksheet_name: "Database Tests" description: "Complete database compatibility testing for SQLite and PostgreSQL" -priority: "HIGH" +priority: "HIGH" estimated_time: "60-120 minutes" headers: @@ -173,4 +173,4 @@ tests: 2. Import same configuration into PostgreSQL setup 3. Verify data integrity and functionality expected: "Data portable between database types" - performance: "Good" \ No newline at end of file + performance: "Good" diff --git a/tests/manual/testcases/edge_cases.yaml b/tests/manual/testcases/edge_cases.yaml index 22176d7d1..c0b892e34 100644 --- a/tests/manual/testcases/edge_cases.yaml +++ b/tests/manual/testcases/edge_cases.yaml @@ -206,4 +206,4 @@ tests: 4. Check time-based operations expected: "Time-based operations handle clock differences gracefully" recovery: "UTC normalization" - severity: "Low" \ No newline at end of file + severity: "Low" diff --git a/tests/manual/testcases/migration_tests.yaml b/tests/manual/testcases/migration_tests.yaml index 58924b711..b369903f7 100644 --- a/tests/manual/testcases/migration_tests.yaml +++ b/tests/manual/testcases/migration_tests.yaml @@ -9,7 +9,7 @@ estimated_time: "60-90 minutes" headers: - "Test ID" - - "Priority" + - "Priority" - "Component" - "Description" - "Detailed Steps" @@ -40,7 +40,7 @@ tests: validation_command: 'python3 -c "from mcpgateway.config import settings; from mcpgateway.db import SessionLocal, EmailUser; db=SessionLocal(); admin=db.query(EmailUser).filter(EmailUser.email==settings.platform_admin_email, EmailUser.is_admin==True).first(); result = \"PASS\" if admin else \"FAIL\"; print(f\"Result: {result}\"); db.close()"' - test_id: "MIG-002" - priority: "CRITICAL" + priority: "CRITICAL" component: "Personal Team Creation" description: "Verify admin user has personal team created automatically" steps: | @@ -56,7 +56,7 @@ tests: - test_id: "MIG-003" priority: "CRITICAL" - component: "Server Visibility Fix" + component: "Server Visibility Fix" description: "OLD SERVERS NOW VISIBLE - This is the main issue being fixed" steps: | 1. Open web browser to http://localhost:4444/admin @@ -91,7 +91,7 @@ tests: sqlite_support: true postgresql_support: true - - test_id: "MIG-005" + - test_id: "MIG-005" priority: "CRITICAL" component: "Email Authentication" description: "Email-based authentication functional after migration" @@ -110,7 +110,7 @@ tests: - test_id: "MIG-006" priority: "HIGH" - component: "Basic Auth Compatibility" + component: "Basic Auth Compatibility" description: "Basic authentication still works alongside email auth" steps: | 1. Open new browser window @@ -126,7 +126,7 @@ tests: - test_id: "MIG-007" priority: "HIGH" component: "Database Schema Validation" - description: "All multitenancy tables created with proper structure" + description: "All multitenancy tables created with proper structure" steps: | 1. Check multitenancy tables exist: SQLite: sqlite3 mcp.db '.tables' | grep email @@ -148,10 +148,10 @@ tests: 2. Get authentication token: curl -X POST http://localhost:4444/auth/login -H 'Content-Type: application/json' -d '{"email":"","password":""}' 3. Test teams API with token: - curl -H 'Authorization: Bearer ' http://localhost:4444/teams + curl -H 'Authorization: Bearer ' http://localhost:4444/teams 4. Test servers API: curl -H 'Authorization: Bearer ' http://localhost:4444/servers 5. Record all HTTP status codes and response content expected: "Health=200, Login=200 with JWT token, Teams=200 with team data, Servers=200 with server data" sqlite_support: true - postgresql_support: true \ No newline at end of file + postgresql_support: true diff --git a/tests/manual/testcases/performance_tests.yaml b/tests/manual/testcases/performance_tests.yaml index 780898b36..1d9da112f 100644 --- a/tests/manual/testcases/performance_tests.yaml +++ b/tests/manual/testcases/performance_tests.yaml @@ -104,4 +104,4 @@ tests: load_parameters: "Multiple agent invocations" test_method: "Test concurrent A2A agent calls, measure response times" success_criteria: "Agent calls handled efficiently, timeouts respected" - tools_used: "Agent testing framework" \ No newline at end of file + tools_used: "Agent testing framework" diff --git a/tests/manual/testcases/security_tests.yaml b/tests/manual/testcases/security_tests.yaml index 0bc177963..40e636655 100644 --- a/tests/manual/testcases/security_tests.yaml +++ b/tests/manual/testcases/security_tests.yaml @@ -42,7 +42,7 @@ tests: - test_id: "SEC-002" attack_type: "JWT Token Manipulation" - target: "Authentication System" + target: "Authentication System" risk_level: "Critical" attack_steps: | 1. Obtain valid JWT token through normal login @@ -73,7 +73,7 @@ tests: - test_id: "SEC-004" attack_type: "Privilege Escalation" target: "RBAC System" - risk_level: "Critical" + risk_level: "Critical" attack_steps: | 1. Login as regular user (non-admin) 2. Attempt to access admin-only endpoints: @@ -170,4 +170,4 @@ tests: 4. Look for database connection strings or paths 5. Verify no internal system information disclosed expected_defense: "No sensitive information disclosed in error responses" - validation: "Error messages are user-friendly without exposing system internals" \ No newline at end of file + validation: "Error messages are user-friendly without exposing system internals" diff --git a/tests/manual/testcases/setup_instructions.yaml b/tests/manual/testcases/setup_instructions.yaml index 8db1d51dc..54c646d11 100644 --- a/tests/manual/testcases/setup_instructions.yaml +++ b/tests/manual/testcases/setup_instructions.yaml @@ -79,7 +79,7 @@ tests: required: true notes: "This will be your login email" - - step: "7" + - step: "7" action: "Configure Admin Password" command: "Set PLATFORM_ADMIN_PASSWORD=" expected: "Admin password configured in .env" @@ -138,7 +138,7 @@ tests: notes: "Server startup" - step: "14" - action: "Test Basic Connectivity" + action: "Test Basic Connectivity" command: "curl http://localhost:4444/health" expected: '{"status":"ok"}' troubleshooting: "If fails, check server started correctly" @@ -170,4 +170,4 @@ tests: required: true notes: "THIS IS THE MAIN MIGRATION VALIDATION TEST" critical: true - main_test: true \ No newline at end of file + main_test: true From f6ef2f762ed17aaab0701fdc76849fb14215dce5 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 09:47:38 +0100 Subject: [PATCH 29/49] Fix gateways a2a and prompts migration Signed-off-by: Mihai Criveti --- scripts/fix_multitenancy_0_7_0_resources.py | 7 +++++-- scripts/verify_multitenancy_0_7_0_migration.py | 14 ++++++++++---- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/scripts/fix_multitenancy_0_7_0_resources.py b/scripts/fix_multitenancy_0_7_0_resources.py index 53436568d..cdeab0e82 100755 --- a/scripts/fix_multitenancy_0_7_0_resources.py +++ b/scripts/fix_multitenancy_0_7_0_resources.py @@ -23,7 +23,7 @@ sys.path.insert(0, str(project_root)) try: - from mcpgateway.db import SessionLocal, EmailUser, EmailTeam, Server, Tool, Resource + from mcpgateway.db import SessionLocal, EmailUser, EmailTeam, Server, Tool, Resource, Prompt, Gateway, A2AAgent from mcpgateway.config import settings from sqlalchemy import text except ImportError as e: @@ -71,7 +71,10 @@ def fix_unassigned_resources(): resource_types = [ ("servers", Server), ("tools", Tool), - ("resources", Resource) + ("resources", Resource), + ("prompts", Prompt), + ("gateways", Gateway), + ("a2a_agents", A2AAgent) ] total_fixed = 0 diff --git a/scripts/verify_multitenancy_0_7_0_migration.py b/scripts/verify_multitenancy_0_7_0_migration.py index 3a1fef0c6..a7e5b423e 100755 --- a/scripts/verify_multitenancy_0_7_0_migration.py +++ b/scripts/verify_multitenancy_0_7_0_migration.py @@ -9,7 +9,7 @@ Checks: - Platform admin user creation - Personal team setup -- Resource team assignments (servers, tools, resources) +- Resource team assignments (servers, tools, resources, prompts, gateways, a2a_agents) - Visibility settings - Team membership @@ -28,7 +28,7 @@ try: from mcpgateway.db import ( SessionLocal, EmailUser, EmailTeam, EmailTeamMember, - Server, Tool, Resource, Role, UserRole + Server, Tool, Resource, Prompt, Gateway, A2AAgent, Role, UserRole ) from mcpgateway.config import settings from sqlalchemy import text @@ -93,7 +93,10 @@ def verify_migration(): resource_types = [ ("Servers", Server), ("Tools", Tool), - ("Resources", Resource) + ("Resources", Resource), + ("Prompts", Prompt), + ("Gateways", Gateway), + ("A2A Agents", A2AAgent) ] for resource_name, resource_model in resource_types: @@ -178,7 +181,10 @@ def verify_migration(): resource_models = [ ("Server", Server), ("Tool", Tool), - ("Resource", Resource) + ("Resource", Resource), + ("Prompt", Prompt), + ("Gateway", Gateway), + ("A2AAgent", A2AAgent) ] for model_name, model_class in resource_models: From 156c0efd08e232515331da110a02d67d7122e7b7 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 13:25:51 +0100 Subject: [PATCH 30/49] Fix APP_ROOT_PATH Signed-off-by: Mihai Criveti --- mcpgateway/admin.py | 42 +++++++++++++++++++++--------- mcpgateway/routers/oauth_router.py | 15 ++++++----- mcpgateway/static/admin.js | 26 +++++++++++------- 3 files changed, 55 insertions(+), 28 deletions(-) diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index c39bf9191..7af99d2af 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -2388,6 +2388,9 @@ async def admin_create_team( return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: + # Get root path for URL construction + root_path = request.scope.get("root_path", "") if request else "" + form = await request.form() name = form.get("name") slug = form.get("slug") or None @@ -2425,14 +2428,14 @@ async def admin_create_team(
- {'' if not team.is_personal else ""} + {'' if not team.is_personal else ""}
@@ -2728,6 +2731,9 @@ async def admin_get_team_edit( return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: + # Get root path for URL construction + root_path = _request.scope.get("root_path", "") if _request else "" + # First-Party from mcpgateway.services.team_management_service import TeamManagementService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel @@ -2740,7 +2746,7 @@ async def admin_get_team_edit( edit_form = f"""

Edit Team

-
+
- +
@@ -3812,6 +3821,9 @@ async def admin_get_user_edit( return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: + # Get root path for URL construction + root_path = _request.scope.get("root_path", "") if _request else "" + # First-Party from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel @@ -3829,7 +3841,7 @@ async def admin_get_user_edit( edit_form = f"""

Edit User

- +
- - + +
@@ -4025,6 +4040,9 @@ async def admin_deactivate_user( return HTMLResponse(content='
Email authentication is disabled
', status_code=403) try: + # Get root path for URL construction + root_path = _request.scope.get("root_path", "") if _request else "" + # First-Party from mcpgateway.services.email_auth_service import EmailAuthService # pylint: disable=import-outside-toplevel # pylint: disable=import-outside-toplevel @@ -4060,11 +4078,11 @@ async def admin_deactivate_user(
- - + +
diff --git a/mcpgateway/routers/oauth_router.py b/mcpgateway/routers/oauth_router.py index 640fb904f..85b76acf1 100644 --- a/mcpgateway/routers/oauth_router.py +++ b/mcpgateway/routers/oauth_router.py @@ -116,6 +116,9 @@ async def oauth_callback( """ try: + # Get root path for URL construction + root_path = request.scope.get("root_path", "") if request else "" + # Extract gateway_id from state parameter if "_" not in state: return HTMLResponse(content="

❌ Invalid state parameter

", status_code=400) @@ -134,7 +137,7 @@ async def oauth_callback(

❌ OAuth Authorization Failed

Error: Gateway not found

- Return to Admin Panel + Return to Admin Panel """, @@ -150,7 +153,7 @@ async def oauth_callback(

❌ OAuth Authorization Failed

Error: Gateway has no OAuth configuration

- Return to Admin Panel + Return to Admin Panel """, @@ -206,7 +209,7 @@ async def oauth_callback(
- Return to Admin Panel + Return to Admin Panel From a0a5893a3f6e6bf517f41b099bd208c24062c37c Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 21:46:33 +0100 Subject: [PATCH 35/49] Update docs for password change Signed-off-by: Mihai Criveti --- .pre-commit-config.yaml | 2 +- CHANGELOG.md | 3 ++ docs/docs/architecture/multitenancy.md | 75 ++++++++++++++++++++++++++ 3 files changed, 79 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4bcf8ce37..ec8c6c504 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -368,7 +368,7 @@ repos: description: Verifies test files in tests/ directories start with `test_`. language: python files: (^|/)tests/.+\.py$ - exclude: ^tests/(.*/)?(pages|helpers|fuzzers|scripts|fixtures|migration|utils)/.*\.py$|^tests/migration/.*\.py$ # Exclude page object, helper, fuzzer, script, fixture, util, and migration files + exclude: ^tests/(.*/)?(pages|helpers|fuzzers|scripts|fixtures|migration|utils|manual)/.*\.py$|^tests/migration/.*\.py$ # Exclude page object, helper, fuzzer, script, fixture, util, manual, and migration files args: [--pytest-test-first] # `test_.*\.py` # - repo: https://github.com/pycqa/flake8 diff --git a/CHANGELOG.md b/CHANGELOG.md index 6872cd865..2f60f3f0c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -27,6 +27,8 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) - Configuration export/import tools - Comprehensive verification and troubleshooting +**🔑 Password Management**: After migration, platform admin password must be changed using the API endpoint `/auth/email/change-password`. The `PLATFORM_ADMIN_PASSWORD` environment variable is only used during initial setup. + ### Added #### **🔐 Authentication & Authorization System** @@ -34,6 +36,7 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/) * **Complete RBAC System** (#283) - Platform Admin, Team Owner, Team Member roles with full multi-tenancy support * **Enhanced JWT Tokens** (#87) - JWT tokens with team context, scoped permissions, and per-user expiry * **Password Policy Engine** (#426) - Configurable security requirements with password complexity rules +* **Password Change API** - Secure `/auth/email/change-password` endpoint for changing user passwords with old password verification * **Multi-Provider SSO Framework** (#220, #278, #859) - GitHub, Google, and IBM Security Verify integration * **Per-Virtual-Server API Keys** (#282) - Scoped access tokens for individual virtual servers diff --git a/docs/docs/architecture/multitenancy.md b/docs/docs/architecture/multitenancy.md index 9d540c692..774b103f9 100644 --- a/docs/docs/architecture/multitenancy.md +++ b/docs/docs/architecture/multitenancy.md @@ -589,6 +589,81 @@ sequenceDiagram end ``` +## Password Management + +### Changing Platform Admin Password + +The platform admin password can be changed using several methods: + +#### Method 1: Admin UI (Easiest) +Use the web interface to change passwords: + +1. Navigate to [http://localhost:4444/admin/#users](http://localhost:4444/admin/#users) +2. Click "Edit" on the user account +3. Enter a new password in the "New Password" field (leave empty to keep current password) +4. Click "Update User" + +#### Method 2: API Endpoint +Use the `/auth/email/change-password` endpoint after authentication: + +```bash +# First, get a JWT token by logging in +curl -X POST "http://localhost:4444/auth/email/login" \ + -H "Content-Type: application/json" \ + -d '{ + "email": "admin@example.com", + "password": "current_password" + }' + +# Use the returned JWT token to change password +curl -X POST "http://localhost:4444/auth/email/change-password" \ + -H "Authorization: Bearer " \ + -H "Content-Type: application/json" \ + -d '{ + "old_password": "current_password", + "new_password": "new_secure_password" + }' +``` + +#### Method 3: Environment Variable + Migration +1. Update `PLATFORM_ADMIN_PASSWORD` in your `.env` file +2. Run database migration to apply the change: + ```bash + alembic upgrade head + ``` + +**Note**: This method only works during initial setup. After the admin user exists, the environment variable is ignored. + +#### Method 4: Direct Database Update +For emergency password resets, you can update the database directly: + +```bash +# Using the application's password service +python3 -c " +from mcpgateway.services.argon2_service import Argon2PasswordService +from mcpgateway.db import SessionLocal +from mcpgateway.models import EmailUser + +service = Argon2PasswordService() +hashed = service.hash_password('new_password') + +with SessionLocal() as db: + user = db.query(EmailUser).filter(EmailUser.email == 'admin@example.com').first() + if user: + user.password_hash = hashed + db.commit() + print('Password updated successfully') + else: + print('Admin user not found') +" +``` + +### Password Security Requirements +- Minimum 8 characters (enforced by application) +- Uses Argon2id hashing algorithm for secure storage +- Password change events are logged in the audit trail +- Failed login attempts are tracked and can trigger account lockout + ### Role-Based UI Experience The user interface adapts based on the user's assigned roles: From 7081641d482b6bfd5ed1ab253a8bc20c255f3b54 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Thu, 4 Sep 2025 21:53:17 +0100 Subject: [PATCH 36/49] Add 2nd pass. check field Signed-off-by: Mihai Criveti --- docs/docs/architecture/multitenancy.md | 5 +++- mcpgateway/admin.py | 17 ++++++++++-- mcpgateway/static/admin.js | 37 ++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 3 deletions(-) diff --git a/docs/docs/architecture/multitenancy.md b/docs/docs/architecture/multitenancy.md index 774b103f9..6c84a0eb0 100644 --- a/docs/docs/architecture/multitenancy.md +++ b/docs/docs/architecture/multitenancy.md @@ -601,7 +601,10 @@ Use the web interface to change passwords: 1. Navigate to [http://localhost:4444/admin/#users](http://localhost:4444/admin/#users) 2. Click "Edit" on the user account 3. Enter a new password in the "New Password" field (leave empty to keep current password) -4. Click "Update User" +4. Confirm the password in the "Confirm New Password" field +5. Click "Update User" + +**Note**: Both password fields must match for the update to succeed. The form will prevent submission if passwords don't match. #### Method 2: API Endpoint Use the `/auth/email/change-password` endpoint after authentication: diff --git a/mcpgateway/admin.py b/mcpgateway/admin.py index c892a8159..9c4e4034e 100644 --- a/mcpgateway/admin.py +++ b/mcpgateway/admin.py @@ -3863,8 +3863,16 @@ async def admin_get_user_edit(
- + +
+
+ + +
+ + +
+ +
+ 0 items selected +
+ + + + ${Object.keys(preview.bundles || {}).length > 0 ? ` +
+
+ 🌐 Gateway Bundles (Gateway + Auto-discovered Items) +
+
+ ${Object.entries(preview.bundles).map(([gatewayName, bundle]) => ` +
+ +
+ `).join("")} +
+
+ ` : ""} + + + ${Object.entries(preview.items || {}).map(([entityType, items]) => { + const customItems = items.filter(item => item.is_custom); + return customItems.length > 0 ? ` +
+
+ 🛠️ Custom ${entityType} +
+
+ ${customItems.map(item => ` +
+ +
+ `).join("")} +
+
+ ` : ""; + }).join("")} + + + ${Object.keys(preview.conflicts || {}).length > 0 ? ` +
+
+
+
+ + + +
+
+

+ Naming conflicts detected +

+
+ Some items have the same names as existing items. Use conflict strategy to resolve. +
+
+
+
+
+ ` : ""} + + +
+ + +
+ + +
+
+ `; + + // Store preview data and show preview section + window.currentImportPreview = preview; + updateSelectionCount(); +} + +/** + * Handle selective import based on user selections + */ +async function handleSelectiveImport(dryRun = false) { + console.log(`🎯 Starting selective import (dry_run=${dryRun})`); + + if (!window.currentImportData) { + showNotification("❌ Please select an import file first", "error"); + return; + } + + try { + showImportProgress(true); + + // Collect user selections + const selected_entities = collectUserSelections(); + + if (Object.keys(selected_entities).length === 0) { + showNotification("❌ Please select at least one item to import", "warning"); + showImportProgress(false); + return; + } + + const conflictStrategy = + document.getElementById("import-conflict-strategy")?.value || "update"; + const rekeySecret = + document.getElementById("import-rekey-secret")?.value || null; + + const requestData = { + import_data: window.currentImportData, + conflict_strategy: conflictStrategy, + dry_run: dryRun, + rekey_secret: rekeySecret, + selected_entities: selected_entities, + }; + + console.log("🎯 Selected entities for import:", selected_entities); + + const response = await fetch( + (window.ROOT_PATH || "") + "/admin/import/configuration", + { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${await getAuthToken()}`, + }, + body: JSON.stringify(requestData), + } + ); + + if (!response.ok) { + const errorData = await response.json(); + throw new Error( + errorData.detail || `Import failed: ${response.statusText}` + ); + } + + const result = await response.json(); + displayImportResults(result, dryRun); + + if (!dryRun) { + refreshCurrentTabData(); + showNotification("✅ Selective import completed successfully", "success"); + } else { + showNotification("✅ Import preview completed", "success"); + } + + } catch (error) { + console.error("Selective import error:", error); + showNotification(`❌ Import failed: ${error.message}`, "error"); + } finally { + showImportProgress(false); + } +} + +/** + * Collect user selections for selective import + */ +function collectUserSelections() { + const selections = {}; + + // Collect gateway selections + document.querySelectorAll('.gateway-checkbox:checked').forEach(checkbox => { + const gatewayName = checkbox.dataset.gateway; + if (!selections.gateways) selections.gateways = []; + selections.gateways.push(gatewayName); + }); + + // Collect individual item selections + document.querySelectorAll('.item-checkbox:checked').forEach(checkbox => { + const entityType = checkbox.dataset.type; + const itemId = checkbox.dataset.id; + if (!selections[entityType]) selections[entityType] = []; + selections[entityType].push(itemId); + }); + + return selections; +} + +/** + * Update selection count display + */ +function updateSelectionCount() { + const gatewayCount = document.querySelectorAll('.gateway-checkbox:checked').length; + const itemCount = document.querySelectorAll('.item-checkbox:checked').length; + const totalCount = gatewayCount + itemCount; + + const countElement = document.getElementById('selection-count'); + if (countElement) { + countElement.textContent = `${totalCount} items selected (${gatewayCount} gateways, ${itemCount} individual items)`; + } +} + +/** + * Select all items + */ +function selectAllItems() { + document.querySelectorAll('.gateway-checkbox, .item-checkbox').forEach(checkbox => { + checkbox.checked = true; + }); + updateSelectionCount(); +} + +/** + * Select no items + */ +function selectNoneItems() { + document.querySelectorAll('.gateway-checkbox, .item-checkbox').forEach(checkbox => { + checkbox.checked = false; + }); + updateSelectionCount(); +} + +/** + * Select only custom items (not gateway items) + */ +function selectOnlyCustom() { + document.querySelectorAll('.gateway-checkbox').forEach(checkbox => { + checkbox.checked = false; + }); + document.querySelectorAll('.item-checkbox').forEach(checkbox => { + checkbox.checked = true; + }); + updateSelectionCount(); +} + +/** + * Reset import selection + */ +function resetImportSelection() { + const previewContainer = document.getElementById("import-preview-container"); + if (previewContainer) { + previewContainer.remove(); + } + window.currentImportPreview = null; +} + +// Expose selective import functions to global scope +window.previewImport = previewImport; +window.handleSelectiveImport = handleSelectiveImport; +window.displayImportPreview = displayImportPreview; +window.collectUserSelections = collectUserSelections; +window.updateSelectionCount = updateSelectionCount; +window.selectAllItems = selectAllItems; +window.selectNoneItems = selectNoneItems; +window.selectOnlyCustom = selectOnlyCustom; +window.resetImportSelection = resetImportSelection; diff --git a/mcpgateway/templates/admin.html b/mcpgateway/templates/admin.html index 3d1e8feeb..0587d751e 100644 --- a/mcpgateway/templates/admin.html +++ b/mcpgateway/templates/admin.html @@ -934,6 +934,14 @@

+

- ${Object.entries(preview.summary.by_type).map(([type, count]) => - `${type}: ${count}` - ).join(", ")} + ${Object.entries(preview.summary.by_type) + .map(([type, count]) => `${type}: ${count}`) + .join(", ")}
@@ -11473,13 +11474,17 @@ function displayImportPreview(preview) { - ${Object.keys(preview.bundles || {}).length > 0 ? ` + ${ + Object.keys(preview.bundles || {}).length > 0 + ? `
🌐 Gateway Bundles (Gateway + Auto-discovered Items)
- ${Object.entries(preview.bundles).map(([gatewayName, bundle]) => ` + ${Object.entries(preview.bundles) + .map( + ([gatewayName, bundle]) => `
- `).join("")} + `, + ) + .join("")}
- ` : ""} + ` + : "" + } - ${Object.entries(preview.items || {}).map(([entityType, items]) => { - const customItems = items.filter(item => item.is_custom); - return customItems.length > 0 ? ` + ${Object.entries(preview.items || {}) + .map(([entityType, items]) => { + const customItems = items.filter((item) => item.is_custom); + return customItems.length > 0 + ? `
🛠️ Custom ${entityType}
- ${customItems.map(item => ` -
+ ${customItems + .map( + (item) => ` +
- `).join("")} + `, + ) + .join("")}
- ` : ""; - }).join("")} + ` + : ""; + }) + .join("")} - ${Object.keys(preview.conflicts || {}).length > 0 ? ` + ${ + Object.keys(preview.conflicts || {}).length > 0 + ? `
@@ -11565,7 +11592,9 @@ function displayImportPreview(preview) {
- ` : ""} + ` + : "" + }
@@ -11610,13 +11639,17 @@ async function handleSelectiveImport(dryRun = false) { const selected_entities = collectUserSelections(); if (Object.keys(selected_entities).length === 0) { - showNotification("❌ Please select at least one item to import", "warning"); + showNotification( + "❌ Please select at least one item to import", + "warning", + ); showImportProgress(false); return; } const conflictStrategy = - document.getElementById("import-conflict-strategy")?.value || "update"; + document.getElementById("import-conflict-strategy")?.value || + "update"; const rekeySecret = document.getElementById("import-rekey-secret")?.value || null; @@ -11639,13 +11672,13 @@ async function handleSelectiveImport(dryRun = false) { Authorization: `Bearer ${await getAuthToken()}`, }, body: JSON.stringify(requestData), - } + }, ); if (!response.ok) { const errorData = await response.json(); throw new Error( - errorData.detail || `Import failed: ${response.statusText}` + errorData.detail || `Import failed: ${response.statusText}`, ); } @@ -11654,11 +11687,13 @@ async function handleSelectiveImport(dryRun = false) { if (!dryRun) { refreshCurrentTabData(); - showNotification("✅ Selective import completed successfully", "success"); + showNotification( + "✅ Selective import completed successfully", + "success", + ); } else { showNotification("✅ Import preview completed", "success"); } - } catch (error) { console.error("Selective import error:", error); showNotification(`❌ Import failed: ${error.message}`, "error"); @@ -11674,14 +11709,16 @@ function collectUserSelections() { const selections = {}; // Collect gateway selections - document.querySelectorAll('.gateway-checkbox:checked').forEach(checkbox => { - const gatewayName = checkbox.dataset.gateway; - if (!selections.gateways) selections.gateways = []; - selections.gateways.push(gatewayName); - }); + document + .querySelectorAll(".gateway-checkbox:checked") + .forEach((checkbox) => { + const gatewayName = checkbox.dataset.gateway; + if (!selections.gateways) selections.gateways = []; + selections.gateways.push(gatewayName); + }); // Collect individual item selections - document.querySelectorAll('.item-checkbox:checked').forEach(checkbox => { + document.querySelectorAll(".item-checkbox:checked").forEach((checkbox) => { const entityType = checkbox.dataset.type; const itemId = checkbox.dataset.id; if (!selections[entityType]) selections[entityType] = []; @@ -11695,11 +11732,15 @@ function collectUserSelections() { * Update selection count display */ function updateSelectionCount() { - const gatewayCount = document.querySelectorAll('.gateway-checkbox:checked').length; - const itemCount = document.querySelectorAll('.item-checkbox:checked').length; + const gatewayCount = document.querySelectorAll( + ".gateway-checkbox:checked", + ).length; + const itemCount = document.querySelectorAll( + ".item-checkbox:checked", + ).length; const totalCount = gatewayCount + itemCount; - const countElement = document.getElementById('selection-count'); + const countElement = document.getElementById("selection-count"); if (countElement) { countElement.textContent = `${totalCount} items selected (${gatewayCount} gateways, ${itemCount} individual items)`; } @@ -11709,9 +11750,11 @@ function updateSelectionCount() { * Select all items */ function selectAllItems() { - document.querySelectorAll('.gateway-checkbox, .item-checkbox').forEach(checkbox => { - checkbox.checked = true; - }); + document + .querySelectorAll(".gateway-checkbox, .item-checkbox") + .forEach((checkbox) => { + checkbox.checked = true; + }); updateSelectionCount(); } @@ -11719,9 +11762,11 @@ function selectAllItems() { * Select no items */ function selectNoneItems() { - document.querySelectorAll('.gateway-checkbox, .item-checkbox').forEach(checkbox => { - checkbox.checked = false; - }); + document + .querySelectorAll(".gateway-checkbox, .item-checkbox") + .forEach((checkbox) => { + checkbox.checked = false; + }); updateSelectionCount(); } @@ -11729,10 +11774,10 @@ function selectNoneItems() { * Select only custom items (not gateway items) */ function selectOnlyCustom() { - document.querySelectorAll('.gateway-checkbox').forEach(checkbox => { + document.querySelectorAll(".gateway-checkbox").forEach((checkbox) => { checkbox.checked = false; }); - document.querySelectorAll('.item-checkbox').forEach(checkbox => { + document.querySelectorAll(".item-checkbox").forEach((checkbox) => { checkbox.checked = true; }); updateSelectionCount(); @@ -11742,7 +11787,9 @@ function selectOnlyCustom() { * Reset import selection */ function resetImportSelection() { - const previewContainer = document.getElementById("import-preview-container"); + const previewContainer = document.getElementById( + "import-preview-container", + ); if (previewContainer) { previewContainer.remove(); } From 7f5c195b3eb9a67d577048672af487bf221d68a2 Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Fri, 5 Sep 2025 12:29:28 +0100 Subject: [PATCH 48/49] Fix import Signed-off-by: Mihai Criveti --- mcpgateway/static/admin.js | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/mcpgateway/static/admin.js b/mcpgateway/static/admin.js index 91c3e109d..11df88157 100644 --- a/mcpgateway/static/admin.js +++ b/mcpgateway/static/admin.js @@ -11636,9 +11636,9 @@ async function handleSelectiveImport(dryRun = false) { showImportProgress(true); // Collect user selections - const selected_entities = collectUserSelections(); + const selectedEntities = collectUserSelections(); - if (Object.keys(selected_entities).length === 0) { + if (Object.keys(selectedEntities).length === 0) { showNotification( "❌ Please select at least one item to import", "warning", @@ -11658,10 +11658,10 @@ async function handleSelectiveImport(dryRun = false) { conflict_strategy: conflictStrategy, dry_run: dryRun, rekey_secret: rekeySecret, - selected_entities: selected_entities, + selectedEntities, }; - console.log("🎯 Selected entities for import:", selected_entities); + console.log("🎯 Selected entities for import:", selectedEntities); const response = await fetch( (window.ROOT_PATH || "") + "/admin/import/configuration", @@ -11713,7 +11713,9 @@ function collectUserSelections() { .querySelectorAll(".gateway-checkbox:checked") .forEach((checkbox) => { const gatewayName = checkbox.dataset.gateway; - if (!selections.gateways) selections.gateways = []; + if (!selections.gateways) { + selections.gateways = []; + } selections.gateways.push(gatewayName); }); @@ -11721,7 +11723,9 @@ function collectUserSelections() { document.querySelectorAll(".item-checkbox:checked").forEach((checkbox) => { const entityType = checkbox.dataset.type; const itemId = checkbox.dataset.id; - if (!selections[entityType]) selections[entityType] = []; + if (!selections[entityType]) { + selections[entityType] = []; + } selections[entityType].push(itemId); }); From 4b8331e86efac07676dc227a98df63d0f0ab105f Mon Sep 17 00:00:00 2001 From: Mihai Criveti Date: Fri, 5 Sep 2025 14:54:34 +0100 Subject: [PATCH 49/49] Fix tests Signed-off-by: Mihai Criveti --- .../services/test_import_service.py | 49 ++++++++++++------- 1 file changed, 31 insertions(+), 18 deletions(-) diff --git a/tests/unit/mcpgateway/services/test_import_service.py b/tests/unit/mcpgateway/services/test_import_service.py index b0b7f892b..d503bf244 100644 --- a/tests/unit/mcpgateway/services/test_import_service.py +++ b/tests/unit/mcpgateway/services/test_import_service.py @@ -515,7 +515,7 @@ async def test_process_resource_entities(import_service, mock_db): @pytest.mark.asyncio -async def test_process_root_entities(import_service): +async def test_process_root_entities(import_service, mock_db): """Test processing root entities.""" root_data = { "uri": "file:///workspace", @@ -533,10 +533,11 @@ async def test_process_root_entities(import_service): # Setup mocks import_service.root_service.add_root.return_value = MagicMock() + mock_db.flush.return_value = None # Mock flush method # Execute import status = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, imported_by="test_user" ) @@ -917,7 +918,7 @@ async def test_import_configuration_with_selected_entities(import_service, mock_ @pytest.mark.asyncio -async def test_conversion_methods_comprehensive(import_service): +async def test_conversion_methods_comprehensive(import_service, mock_db): """Test all schema conversion methods.""" # Test gateway conversion without auth (simpler test) gateway_data = { @@ -932,7 +933,7 @@ async def test_conversion_methods_comprehensive(import_service): assert gateway_create.name == "test_gateway" assert str(gateway_create.url) == "https://gateway.example.com" - # Test server conversion + # Test server conversion with mock db server_data = { "name": "test_server", "description": "Test server", @@ -940,9 +941,12 @@ async def test_conversion_methods_comprehensive(import_service): "tags": ["server"] } - server_create = import_service._convert_to_server_create(server_data) + # Mock the list_tools method to return empty list (no tools to resolve) + import_service.tool_service.list_tools.return_value = [] + + server_create = await import_service._convert_to_server_create(mock_db, server_data) assert server_create.name == "test_server" - assert server_create.associated_tools == ["tool1", "tool2"] + assert server_create.associated_tools == [] # Empty because no tools found to resolve # Test prompt conversion with schema prompt_data = { @@ -1773,7 +1777,7 @@ async def test_resource_conflict_fail_strategy(import_service, mock_db): @pytest.mark.asyncio -async def test_root_dry_run_processing(import_service): +async def test_root_dry_run_processing(import_service, mock_db): """Test root dry-run processing.""" root_data = { "uri": "file:///test", @@ -1787,9 +1791,12 @@ async def test_root_dry_run_processing(import_service): "metadata": {"entity_counts": {"roots": 1}} } + # Mock flush for dry run (even though it won't be called) + mock_db.flush.return_value = None + # Execute dry-run import status = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, dry_run=True, imported_by="test_user" @@ -1801,7 +1808,7 @@ async def test_root_dry_run_processing(import_service): @pytest.mark.asyncio -async def test_root_conflict_skip_strategy(import_service): +async def test_root_conflict_skip_strategy(import_service, mock_db): """Test root SKIP conflict strategy.""" root_data = { "uri": "file:///existing", @@ -1817,9 +1824,10 @@ async def test_root_conflict_skip_strategy(import_service): # Setup conflict import_service.root_service.add_root.side_effect = Exception("Root already exists") + mock_db.flush.return_value = None # Mock flush method status = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, conflict_strategy=ConflictStrategy.SKIP, imported_by="test_user" @@ -1831,7 +1839,7 @@ async def test_root_conflict_skip_strategy(import_service): @pytest.mark.asyncio -async def test_root_conflict_fail_strategy(import_service): +async def test_root_conflict_fail_strategy(import_service, mock_db): """Test root FAIL conflict strategy.""" root_data = { "uri": "file:///fail", @@ -1847,9 +1855,10 @@ async def test_root_conflict_fail_strategy(import_service): # Setup conflict import_service.root_service.add_root.side_effect = Exception("Root already exists") + mock_db.flush.return_value = None # Mock flush method status = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, conflict_strategy=ConflictStrategy.FAIL, imported_by="test_user" @@ -1861,7 +1870,7 @@ async def test_root_conflict_fail_strategy(import_service): @pytest.mark.asyncio -async def test_root_conflict_update_or_rename_strategy(import_service): +async def test_root_conflict_update_or_rename_strategy(import_service, mock_db): """Test root UPDATE/RENAME conflict strategy (both should raise ImportError).""" root_data = { "uri": "file:///conflict", @@ -1877,10 +1886,11 @@ async def test_root_conflict_update_or_rename_strategy(import_service): # Setup conflict import_service.root_service.add_root.side_effect = Exception("Root already exists") + mock_db.flush.return_value = None # Mock flush method # Test UPDATE strategy status_update = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, conflict_strategy=ConflictStrategy.UPDATE, imported_by="test_user" @@ -1895,7 +1905,7 @@ async def test_root_conflict_update_or_rename_strategy(import_service): # Test RENAME strategy status_rename = await import_service.import_configuration( - db=None, # Root processing doesn't need db + db=mock_db, # Use mock_db instead of None import_data=import_data, conflict_strategy=ConflictStrategy.RENAME, imported_by="test_user" @@ -2061,7 +2071,7 @@ async def test_gateway_update_auth_decode_error(import_service): @pytest.mark.asyncio -async def test_server_update_conversion(import_service): +async def test_server_update_conversion(import_service, mock_db): """Test server update schema conversion.""" server_data = { "name": "update_server", @@ -2070,10 +2080,13 @@ async def test_server_update_conversion(import_service): "tags": ["server", "update"] } - server_update = import_service._convert_to_server_update(server_data) + # Mock the list_tools method to return empty list (no tools to resolve) + import_service.tool_service.list_tools.return_value = [] + + server_update = await import_service._convert_to_server_update(mock_db, server_data) assert server_update.name == "update_server" assert server_update.description == "Updated server description" - assert server_update.associated_tools == ["tool1", "tool2", "tool3"] + assert server_update.associated_tools is None # None because no tools found to resolve assert server_update.tags == ["server", "update"]

+ + - {% if gateway.authType == 'oauth' %} - - 🔐 Authorize - + {% if gateway.authType == 'oauth' %} + + + 🔐 Authorize + + - - - {% endif %} {% if gateway.enabled %} -
- + -
- {% else %} -
- -
- {% endif %} - - -
+ {% else %} + -
+ + {% endif %} + + + {% if gateway.enabled %} +
+ + +
+ {% else %} +
+ + +
+ {% endif %} +
+ +
+