diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000..666bb11f2
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,99 @@
+# Dependabot configuration for automated dependency updates
+# https://docs.github.com/en/code-security/dependabot/dependabot-version-updates
+
+version: 2
+updates:
+ # Python backend dependencies (uv/pip)
+ - package-ecosystem: "pip"
+ directory: "/surfsense_backend"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ open-pull-requests-limit: 5
+ groups:
+ python-minor-patch:
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
+ labels:
+ - "dependencies"
+ - "python"
+ commit-message:
+ prefix: "chore(deps)"
+
+ # Frontend web dependencies (pnpm/npm)
+ - package-ecosystem: "npm"
+ directory: "/surfsense_web"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ open-pull-requests-limit: 5
+ groups:
+ npm-minor-patch:
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
+ labels:
+ - "dependencies"
+ - "javascript"
+ commit-message:
+ prefix: "chore(deps)"
+
+ # Browser extension dependencies (pnpm/npm)
+ - package-ecosystem: "npm"
+ directory: "/surfsense_browser_extension"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ open-pull-requests-limit: 5
+ groups:
+ extension-minor-patch:
+ patterns:
+ - "*"
+ update-types:
+ - "minor"
+ - "patch"
+ labels:
+ - "dependencies"
+ - "javascript"
+ - "extension"
+ commit-message:
+ prefix: "chore(deps)"
+
+ # GitHub Actions dependencies
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ day: "monday"
+ open-pull-requests-limit: 3
+ labels:
+ - "dependencies"
+ - "github-actions"
+ commit-message:
+ prefix: "chore(ci)"
+
+ # Docker dependencies
+ - package-ecosystem: "docker"
+ directory: "/surfsense_backend"
+ schedule:
+ interval: "monthly"
+ labels:
+ - "dependencies"
+ - "docker"
+ commit-message:
+ prefix: "chore(docker)"
+
+ - package-ecosystem: "docker"
+ directory: "/surfsense_web"
+ schedule:
+ interval: "monthly"
+ labels:
+ - "dependencies"
+ - "docker"
+ commit-message:
+ prefix: "chore(docker)"
diff --git a/.github/workflows/docker-publish.yml b/.github/workflows/docker-publish.yml
index a391ba83c..70927ab7b 100644
--- a/.github/workflows/docker-publish.yml
+++ b/.github/workflows/docker-publish.yml
@@ -2,45 +2,87 @@ name: Docker Publish
on:
workflow_dispatch:
+ inputs:
+ push_backend:
+ description: 'Push backend image'
+ required: false
+ default: true
+ type: boolean
+ push_frontend:
+ description: 'Push frontend image'
+ required: false
+ default: true
+ type: boolean
+ release:
+ types: [published]
+ push:
+ branches: [main]
+ paths:
+ - 'surfsense_backend/Dockerfile'
+ - 'surfsense_web/Dockerfile'
+ - '.github/workflows/docker-publish.yml'
+
+env:
+ REGISTRY: ghcr.io
jobs:
- # build_and_push_backend:
- # runs-on: ubuntu-latest
- # permissions:
- # contents: read
- # packages: write
- # steps:
- # - name: Checkout repository
- # uses: actions/checkout@v4
+ build_and_push_backend:
+ name: Build & Push Backend
+ runs-on: ubuntu-latest
+ if: |
+ github.event_name == 'release' ||
+ (github.event_name == 'workflow_dispatch' && inputs.push_backend)
+ permissions:
+ contents: read
+ packages: write
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
- # - name: Set up QEMU
- # uses: docker/setup-qemu-action@v3
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
- # - name: Set up Docker Buildx
- # uses: docker/setup-buildx-action@v3
+ - name: Log in to GitHub Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ${{ env.REGISTRY }}
+ username: ${{ github.actor }}
+ password: ${{ secrets.GITHUB_TOKEN }}
- # - name: Log in to GitHub Container Registry
- # uses: docker/login-action@v3
- # with:
- # registry: ghcr.io
- # username: ${{ github.actor }}
- # password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Extract metadata for backend
+ id: meta-backend
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/surfsense_backend
+ tags: |
+ type=sha,prefix=
+ type=ref,event=branch
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=raw,value=latest,enable={{is_default_branch}}
- # - name: Build and push backend image
- # uses: docker/build-push-action@v5
- # with:
- # context: ./surfsense_backend
- # file: ./surfsense_backend/Dockerfile
- # push: true
- # tags: ghcr.io/${{ github.repository_owner }}/surfsense_backend:${{ github.sha }}
- # platforms: linux/amd64,linux/arm64
- # labels: |
- # org.opencontainers.image.source=${{ github.repositoryUrl }}
- # org.opencontainers.image.created=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
- # org.opencontainers.image.revision=${{ github.sha }}
+ - name: Build and push backend image
+ uses: docker/build-push-action@v6
+ with:
+ context: ./surfsense_backend
+ file: ./surfsense_backend/Dockerfile
+ push: true
+ tags: ${{ steps.meta-backend.outputs.tags }}
+ labels: ${{ steps.meta-backend.outputs.labels }}
+ platforms: linux/amd64,linux/arm64
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
build_and_push_frontend:
+ name: Build & Push Frontend
runs-on: ubuntu-latest
+ if: |
+ github.event_name == 'release' ||
+ (github.event_name == 'workflow_dispatch' && inputs.push_frontend) ||
+ github.event_name == 'push'
permissions:
contents: read
packages: write
@@ -57,19 +99,30 @@ jobs:
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
- registry: ghcr.io
+ registry: ${{ env.REGISTRY }}
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
+ - name: Extract metadata for frontend
+ id: meta-frontend
+ uses: docker/metadata-action@v5
+ with:
+ images: ${{ env.REGISTRY }}/${{ github.repository_owner }}/surfsense_web
+ tags: |
+ type=sha,prefix=
+ type=ref,event=branch
+ type=semver,pattern={{version}}
+ type=semver,pattern={{major}}.{{minor}}
+ type=raw,value=latest,enable={{is_default_branch}}
+
- name: Build and push frontend image
- uses: docker/build-push-action@v5
+ uses: docker/build-push-action@v6
with:
context: ./surfsense_web
file: ./surfsense_web/Dockerfile
push: true
- tags: ghcr.io/${{ github.repository_owner }}/surfsense_web:${{ github.sha }}
+ tags: ${{ steps.meta-frontend.outputs.tags }}
+ labels: ${{ steps.meta-frontend.outputs.labels }}
platforms: linux/amd64,linux/arm64
- labels: |
- org.opencontainers.image.source=${{ github.repositoryUrl }}
- org.opencontainers.image.created=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
- org.opencontainers.image.revision=${{ github.sha }}
+ cache-from: type=gha
+ cache-to: type=gha,mode=max
diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
new file mode 100644
index 000000000..7655727a4
--- /dev/null
+++ b/.github/workflows/test.yml
@@ -0,0 +1,155 @@
+name: Tests
+
+on:
+ pull_request:
+ branches: [main, dev]
+ types: [opened, synchronize, reopened, ready_for_review]
+ push:
+ branches: [main, dev]
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ backend-tests:
+ name: Backend Tests
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.draft == false
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Check if backend files changed
+ id: backend-changes
+ uses: dorny/paths-filter@v3
+ with:
+ filters: |
+ backend:
+ - 'surfsense_backend/**'
+
+ - name: Set up Docker Buildx
+ if: steps.backend-changes.outputs.backend == 'true'
+ uses: docker/setup-buildx-action@v3
+
+ - name: Cache Docker layers
+ if: steps.backend-changes.outputs.backend == 'true'
+ uses: actions/cache@v4
+ with:
+ path: /tmp/.buildx-cache
+ key: ${{ runner.os }}-buildx-${{ hashFiles('surfsense_backend/Dockerfile', 'surfsense_backend/pyproject.toml') }}
+ restore-keys: |
+ ${{ runner.os }}-buildx-
+
+ - name: Create test environment file
+ if: steps.backend-changes.outputs.backend == 'true'
+ run: |
+ cat > surfsense_backend/.env << 'EOF'
+ DATABASE_URL=postgresql+asyncpg://postgres:postgres@db:5432/surfsense
+ CELERY_BROKER_URL=redis://redis:6379/0
+ CELERY_RESULT_BACKEND=redis://redis:6379/0
+ SECRET_KEY=test-secret-key-for-ci
+ TESTING=true
+ EOF
+
+ - name: Build and run tests with Docker Compose
+ if: steps.backend-changes.outputs.backend == 'true'
+ run: |
+ # Start dependencies
+ docker compose up -d db redis
+
+ # Wait for services to be ready
+ echo "Waiting for PostgreSQL..."
+ timeout 60 bash -c 'until docker compose exec -T db pg_isready -U postgres; do sleep 2; done'
+
+ echo "Waiting for Redis..."
+ timeout 30 bash -c 'until docker compose exec -T redis redis-cli ping | grep -q PONG; do sleep 2; done'
+
+ # Build backend (pytest is already in Dockerfile)
+ docker compose build backend
+
+ # Run tests (pytest is baked into the image)
+ docker compose run --rm -e TESTING=true backend pytest tests/ -v --tb=short --cov=app --cov-report=xml
+
+ # Copy coverage report from the container
+ docker compose run --rm backend cat /app/coverage.xml > surfsense_backend/coverage.xml || true
+
+ - name: Stop Docker Compose services
+ if: always() && steps.backend-changes.outputs.backend == 'true'
+ run: docker compose down -v
+
+ - name: Upload coverage reports
+ if: steps.backend-changes.outputs.backend == 'true'
+ uses: codecov/codecov-action@v4
+ with:
+ file: surfsense_backend/coverage.xml
+ flags: backend
+ fail_ci_if_error: false
+ continue-on-error: true
+
+ frontend-tests:
+ name: Frontend Tests
+ runs-on: ubuntu-latest
+ if: github.event.pull_request.draft == false
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Check if frontend files changed
+ id: frontend-changes
+ uses: dorny/paths-filter@v3
+ with:
+ filters: |
+ frontend:
+ - 'surfsense_web/**'
+
+ - name: Setup Node.js
+ if: steps.frontend-changes.outputs.frontend == 'true'
+ uses: actions/setup-node@v4
+ with:
+ node-version: '20'
+
+ - name: Install pnpm
+ if: steps.frontend-changes.outputs.frontend == 'true'
+ uses: pnpm/action-setup@v4
+ with:
+ version: 9
+
+ - name: Cache dependencies
+ if: steps.frontend-changes.outputs.frontend == 'true'
+ uses: actions/cache@v4
+ with:
+ path: |
+ ~/.pnpm-store
+ surfsense_web/node_modules
+ key: pnpm-deps-${{ hashFiles('surfsense_web/pnpm-lock.yaml') }}
+ restore-keys: |
+ pnpm-deps-
+
+ - name: Install dependencies
+ if: steps.frontend-changes.outputs.frontend == 'true'
+ working-directory: surfsense_web
+ run: pnpm install --frozen-lockfile
+
+ - name: Run tests
+ if: steps.frontend-changes.outputs.frontend == 'true'
+ working-directory: surfsense_web
+ run: pnpm test
+
+ test-gate:
+ name: Test Gate
+ runs-on: ubuntu-latest
+ needs: [backend-tests, frontend-tests]
+ if: always()
+
+ steps:
+ - name: Check test jobs status
+ run: |
+ if [[ "${{ needs.backend-tests.result }}" == "failure" || "${{ needs.frontend-tests.result }}" == "failure" ]]; then
+ echo "โ Tests failed"
+ exit 1
+ else
+ echo "โ
All tests passed"
+ fi
diff --git a/.gitignore b/.gitignore
index 342c0b258..917f32293 100644
--- a/.gitignore
+++ b/.gitignore
@@ -2,4 +2,58 @@
./surfsense_backend/podcasts/
.env
node_modules/
-.ruff_cache/
\ No newline at end of file
+.ruff_cache/
+
+# Python
+__pycache__/
+*.py[cod]
+*$py.class
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# Testing & Coverage
+.coverage
+.coverage.*
+htmlcov/
+.tox/
+.nox/
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+nosetests.xml
+
+# Virtual environments
+venv/
+ENV/
+env/
+.venv/
+
+# IDE
+.idea/
+.vscode/
+*.swp
+*.swo
+
+# Lock files (package managers handle these)
+uv.lock
+
+# OS files
+.DS_Store
+Thumbs.db
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index cc5dde3cc..6d8bf520c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -71,7 +71,11 @@ For detailed setup instructions, refer to our [Installation Guide](https://www.s
SurfSense consists of three main components:
- **`surfsense_backend/`** - Python/FastAPI backend service
+ - `app/` - Main application code
+ - `tests/` - Test suite (pytest)
+ - `alembic/` - Database migrations
- **`surfsense_web/`** - Next.js web application
+ - `tests/` - Frontend tests (vitest)
- **`surfsense_browser_extension/`** - Browser extension for data collection
## ๐งช Development Guidelines
@@ -98,9 +102,48 @@ refactor: improve error handling in connectors
```
### Testing
+
+We use Docker Compose to run tests with all dependencies (PostgreSQL, Redis, etc.).
+
+#### Running Backend Tests
+
+```bash
+# Start the test dependencies
+docker compose up -d db redis
+
+# Build the backend (pytest is included in the Docker image)
+docker compose build backend
+
+# Run all tests
+docker compose run --rm -e TESTING=true backend pytest tests/ -v --tb=short
+
+# Run tests with coverage
+docker compose run --rm -e TESTING=true backend pytest tests/ -v --tb=short --cov=app --cov-report=html
+
+# Run a specific test file
+docker compose run --rm -e TESTING=true backend pytest tests/test_celery_tasks.py -v
+
+# Run tests matching a pattern
+docker compose run --rm -e TESTING=true backend pytest tests/ -v -k "test_slack"
+
+# Stop services when done
+docker compose down -v
+```
+
+#### Running Frontend Tests
+
+```bash
+cd surfsense_web
+pnpm install
+pnpm test
+```
+
+#### Test Guidelines
- Write tests for new features and bug fixes
- Ensure existing tests pass before submitting
- Include integration tests for API endpoints
+- Use `pytest-asyncio` for async tests in the backend
+- Mock external services and APIs appropriately
### Branch Naming
Use descriptive branch names:
diff --git a/docker-compose.yml b/docker-compose.yml
index 5bf17ec8a..8785d6534 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -1,5 +1,3 @@
-version: "3.8"
-
services:
db:
image: ankane/pgvector:latest
@@ -39,6 +37,8 @@ services:
- "${BACKEND_PORT:-8000}:8000"
volumes:
- ./surfsense_backend/app:/app/app
+ - ./surfsense_backend/tests:/app/tests
+ - ./surfsense_backend/pytest.ini:/app/pytest.ini
- shared_temp:/tmp
env_file:
- ./surfsense_backend/.env
@@ -125,6 +125,30 @@ services:
depends_on:
- backend
+ # ============================================================================
+ # TEST SERVICE
+ # Use: docker compose --profile test run --rm backend-test
+ # Or: docker compose --profile test run --rm backend-test pytest tests/ -v
+ # ============================================================================
+ backend-test:
+ profiles: ["test"]
+ build: ./surfsense_backend
+ volumes:
+ - ./surfsense_backend/app:/app/app
+ - ./surfsense_backend/tests:/app/tests
+ - ./surfsense_backend/pytest.ini:/app/pytest.ini
+ - ./surfsense_backend/pyproject.toml:/app/pyproject.toml
+ environment:
+ - DATABASE_URL=postgresql+asyncpg://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-surfsense}
+ - CELERY_BROKER_URL=redis://redis:6379/0
+ - CELERY_RESULT_BACKEND=redis://redis:6379/0
+ - PYTHONPATH=/app
+ - TESTING=true
+ depends_on:
+ - db
+ - redis
+ command: ["pytest", "tests/", "-v", "--tb=short"]
+
volumes:
postgres_data:
pgadmin_data:
diff --git a/surfsense_backend/Dockerfile b/surfsense_backend/Dockerfile
index 91a225754..e473d2d53 100644
--- a/surfsense_backend/Dockerfile
+++ b/surfsense_backend/Dockerfile
@@ -35,9 +35,10 @@ RUN if [ "$(uname -m)" = "x86_64" ]; then \
pip install --no-cache-dir torch torchvision torchaudio; \
fi
-# Install python dependencies
+# Install python dependencies including test dependencies
RUN pip install --no-cache-dir uv && \
- uv pip install --system --no-cache-dir -e .
+ uv pip install --system --no-cache-dir -e . && \
+ uv pip install --system --no-cache-dir pytest pytest-asyncio pytest-cov
# Set SSL environment variables dynamically
RUN CERTIFI_PATH=$(python -c "import certifi; print(certifi.where())") && \
diff --git a/surfsense_backend/README.md b/surfsense_backend/README.md
new file mode 100644
index 000000000..b83cdc4e9
--- /dev/null
+++ b/surfsense_backend/README.md
@@ -0,0 +1,162 @@
+# SurfSense Backend
+
+FastAPI-based backend service for SurfSense.
+
+## Quick Start
+
+### Using Docker (Recommended)
+
+```bash
+# From the project root
+docker compose up -d
+```
+
+This starts all services:
+- **Backend API** at `http://localhost:8000`
+- **PostgreSQL** with pgvector at `localhost:5432`
+- **Redis** at `localhost:6379`
+- **Celery Worker** for background tasks
+- **Flower** (Celery monitor) at `http://localhost:5555`
+
+### Manual Setup
+
+```bash
+cd surfsense_backend
+
+# Create virtual environment
+python -m venv .venv
+source .venv/bin/activate # or `.venv\Scripts\activate` on Windows
+
+# Install dependencies
+pip install uv
+uv pip install -e .
+
+# Copy environment file
+cp .env.example .env
+# Edit .env with your configuration
+
+# Run database migrations
+alembic upgrade head
+
+# Start the server
+uvicorn main:app --reload
+```
+
+## Testing
+
+We use **pytest** with **pytest-asyncio** for testing. Tests run inside Docker to ensure all dependencies (PostgreSQL, Redis) are available.
+
+### Running Tests
+
+```bash
+# From the project root directory
+
+# 1. Start dependencies
+docker compose up -d db redis
+
+# 2. Build the backend image (pytest is included)
+docker compose build backend
+
+# 3. Run all tests
+docker compose run --rm -e TESTING=true backend pytest tests/ -v --tb=short
+
+# 4. Run with coverage report
+docker compose run --rm -e TESTING=true backend pytest tests/ -v --tb=short --cov=app --cov-report=html
+
+# 5. Run specific test file
+docker compose run --rm -e TESTING=true backend pytest tests/test_celery_tasks.py -v
+
+# 6. Run tests matching a pattern
+docker compose run --rm -e TESTING=true backend pytest tests/ -v -k "test_slack"
+
+# 7. Stop services when done
+docker compose down -v
+```
+
+### Test Structure
+
+```
+tests/
+โโโ conftest.py # Shared fixtures
+โโโ test_celery_tasks.py # Celery task tests
+โโโ test_celery_tasks_comprehensive.py # Comprehensive Celery tests
+โโโ test_connector_indexers_comprehensive.py # Connector indexer tests
+โโโ test_external_connectors_comprehensive.py # External connector tests
+โโโ test_document_processors_comprehensive.py # Document processor tests
+โโโ test_connector_service.py # Connector service tests
+โโโ test_llm_service.py # LLM service tests
+โโโ test_retrievers.py # Retriever tests
+โโโ test_routes_*.py # API route tests
+โโโ ...
+```
+
+### Writing Tests
+
+- Use `pytest.mark.asyncio` for async tests
+- Use fixtures from `conftest.py` for common setup
+- Mock external services (LLMs, APIs) appropriately
+- Follow the existing test patterns for consistency
+
+Example async test:
+
+```python
+import pytest
+from unittest.mock import AsyncMock, patch
+
+@pytest.mark.asyncio
+async def test_example():
+ with patch("app.services.some_service.external_api") as mock_api:
+ mock_api.return_value = {"result": "success"}
+ # Your test code here
+ assert result == expected
+```
+
+### CI/CD
+
+Tests run automatically on GitHub Actions for:
+- Pull requests to `main` and `dev` branches
+- Pushes to `main` and `dev` branches
+
+The CI uses Docker Compose to run tests with the same dependencies as local development.
+
+## Project Structure
+
+```
+surfsense_backend/
+โโโ app/
+โ โโโ agents/ # AI agent implementations
+โ โโโ config/ # Configuration management
+โ โโโ connectors/ # External service connectors
+โ โโโ prompts/ # LLM prompts
+โ โโโ retriver/ # Search and retrieval logic
+โ โโโ routes/ # API endpoints
+โ โโโ schemas/ # Pydantic models
+โ โโโ services/ # Business logic services
+โ โโโ tasks/ # Celery background tasks
+โ โโโ utils/ # Utility functions
+โโโ alembic/ # Database migrations
+โโโ tests/ # Test suite
+โโโ main.py # Application entry point
+โโโ celery_worker.py # Celery worker entry point
+โโโ pyproject.toml # Project dependencies
+```
+
+## Environment Variables
+
+Copy `.env.example` to `.env` and configure:
+
+| Variable | Description |
+|----------|-------------|
+| `DATABASE_URL` | PostgreSQL connection string |
+| `CELERY_BROKER_URL` | Redis URL for Celery broker |
+| `CELERY_RESULT_BACKEND` | Redis URL for Celery results |
+| `SECRET_KEY` | JWT secret key |
+| `TESTING` | Set to `true` for test mode |
+
+See `.env.example` for all available options.
+
+## API Documentation
+
+When running locally, access the API docs at:
+- **Swagger UI**: `http://localhost:8000/docs`
+- **ReDoc**: `http://localhost:8000/redoc`
diff --git a/surfsense_backend/app/routes/airtable_add_connector_route.py b/surfsense_backend/app/routes/airtable_add_connector_route.py
index fa124f1c2..4c46d7c7a 100644
--- a/surfsense_backend/app/routes/airtable_add_connector_route.py
+++ b/surfsense_backend/app/routes/airtable_add_connector_route.py
@@ -24,6 +24,9 @@
from app.schemas.airtable_auth_credentials import AirtableAuthCredentialsBase
from app.users import current_active_user
+# Re-export for backward compatibility
+from app.utils.connector_auth import refresh_airtable_token # noqa: F401
+
logger = logging.getLogger(__name__)
router = APIRouter()
@@ -286,78 +289,3 @@ async def airtable_callback(
raise HTTPException(
status_code=500, detail=f"Failed to complete Airtable OAuth: {e!s}"
) from e
-
-
-async def refresh_airtable_token(
- session: AsyncSession, connector: SearchSourceConnector
-):
- """
- Refresh the Airtable access token for a connector.
-
- Args:
- session: Database session
- connector: Airtable connector to refresh
-
- Returns:
- Updated connector object
- """
- try:
- logger.info(f"Refreshing Airtable token for connector {connector.id}")
-
- credentials = AirtableAuthCredentialsBase.from_dict(connector.config)
- auth_header = make_basic_auth_header(
- config.AIRTABLE_CLIENT_ID, config.AIRTABLE_CLIENT_SECRET
- )
-
- # Prepare token refresh data
- refresh_data = {
- "grant_type": "refresh_token",
- "refresh_token": credentials.refresh_token,
- "client_id": config.AIRTABLE_CLIENT_ID,
- "client_secret": config.AIRTABLE_CLIENT_SECRET,
- }
-
- async with httpx.AsyncClient() as client:
- token_response = await client.post(
- TOKEN_URL,
- data=refresh_data,
- headers={
- "Content-Type": "application/x-www-form-urlencoded",
- "Authorization": auth_header,
- },
- timeout=30.0,
- )
-
- if token_response.status_code != 200:
- raise HTTPException(
- status_code=400, detail="Token refresh failed: {token_response.text}"
- )
-
- token_json = token_response.json()
-
- # Calculate expiration time (UTC, tz-aware)
- expires_at = None
- if token_json.get("expires_in"):
- now_utc = datetime.now(UTC)
- expires_at = now_utc + timedelta(seconds=int(token_json["expires_in"]))
-
- # Update credentials object
- credentials.access_token = token_json["access_token"]
- credentials.expires_in = token_json.get("expires_in")
- credentials.expires_at = expires_at
- credentials.scope = token_json.get("scope")
-
- # Update connector config
- connector.config = credentials.to_dict()
- await session.commit()
- await session.refresh(connector)
-
- logger.info(
- f"Successfully refreshed Airtable token for connector {connector.id}"
- )
-
- return connector
- except Exception as e:
- raise HTTPException(
- status_code=500, detail=f"Failed to refresh Airtable token: {e!s}"
- ) from e
diff --git a/surfsense_backend/app/routes/chats_routes.py b/surfsense_backend/app/routes/chats_routes.py
index d7aff102b..21187e296 100644
--- a/surfsense_backend/app/routes/chats_routes.py
+++ b/surfsense_backend/app/routes/chats_routes.py
@@ -1,7 +1,9 @@
+import logging
+
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse
from langchain.schema import AIMessage, HumanMessage
-from sqlalchemy.exc import IntegrityError, OperationalError
+from sqlalchemy.exc import IntegrityError, OperationalError, SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from sqlalchemy.orm import selectinload
@@ -34,6 +36,8 @@
validate_top_k,
)
+logger = logging.getLogger(__name__)
+
router = APIRouter()
@@ -180,19 +184,29 @@ async def create_chat(
return db_chat
except HTTPException:
raise
- except IntegrityError:
+ except IntegrityError as e:
await session.rollback()
+ logger.warning("Chat creation failed due to integrity error: %s", e)
raise HTTPException(
status_code=400,
detail="Database constraint violation. Please check your input data.",
) from None
- except OperationalError:
+ except OperationalError as e:
await session.rollback()
+ logger.error("Database operational error during chat creation: %s", e)
raise HTTPException(
status_code=503, detail="Database operation failed. Please try again later."
) from None
- except Exception:
+ except SQLAlchemyError as e:
+ await session.rollback()
+ logger.error("Database error during chat creation: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500,
+ detail="An unexpected error occurred while creating the chat.",
+ ) from None
+ except Exception as e:
await session.rollback()
+ logger.error("Unexpected error during chat creation: %s", e, exc_info=True)
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while creating the chat.",
@@ -266,11 +280,18 @@ async def read_chats(
return result.all()
except HTTPException:
raise
- except OperationalError:
+ except OperationalError as e:
+ logger.error("Database operational error while fetching chats: %s", e)
raise HTTPException(
status_code=503, detail="Database operation failed. Please try again later."
) from None
- except Exception:
+ except SQLAlchemyError as e:
+ logger.error("Database error while fetching chats: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500, detail="An unexpected error occurred while fetching chats."
+ ) from None
+ except Exception as e:
+ logger.error("Unexpected error while fetching chats: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="An unexpected error occurred while fetching chats."
) from None
@@ -308,11 +329,19 @@ async def read_chat(
return chat
except HTTPException:
raise
- except OperationalError:
+ except OperationalError as e:
+ logger.error("Database operational error while fetching chat %d: %s", chat_id, e)
raise HTTPException(
status_code=503, detail="Database operation failed. Please try again later."
) from None
- except Exception:
+ except SQLAlchemyError as e:
+ logger.error("Database error while fetching chat %d: %s", chat_id, e, exc_info=True)
+ raise HTTPException(
+ status_code=500,
+ detail="An unexpected error occurred while fetching the chat.",
+ ) from None
+ except Exception as e:
+ logger.error("Unexpected error while fetching chat %d: %s", chat_id, e, exc_info=True)
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while fetching the chat.",
@@ -357,19 +386,29 @@ async def update_chat(
return db_chat
except HTTPException:
raise
- except IntegrityError:
+ except IntegrityError as e:
await session.rollback()
+ logger.warning("Chat update failed due to integrity error for chat %d: %s", chat_id, e)
raise HTTPException(
status_code=400,
detail="Database constraint violation. Please check your input data.",
) from None
- except OperationalError:
+ except OperationalError as e:
await session.rollback()
+ logger.error("Database operational error while updating chat %d: %s", chat_id, e)
raise HTTPException(
status_code=503, detail="Database operation failed. Please try again later."
) from None
- except Exception:
+ except SQLAlchemyError as e:
+ await session.rollback()
+ logger.error("Database error while updating chat %d: %s", chat_id, e, exc_info=True)
+ raise HTTPException(
+ status_code=500,
+ detail="An unexpected error occurred while updating the chat.",
+ ) from None
+ except Exception as e:
await session.rollback()
+ logger.error("Unexpected error while updating chat %d: %s", chat_id, e, exc_info=True)
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while updating the chat.",
@@ -407,18 +446,28 @@ async def delete_chat(
return {"message": "Chat deleted successfully"}
except HTTPException:
raise
- except IntegrityError:
+ except IntegrityError as e:
await session.rollback()
+ logger.warning("Chat deletion failed due to integrity error for chat %d: %s", chat_id, e)
raise HTTPException(
status_code=400, detail="Cannot delete chat due to existing dependencies."
) from None
- except OperationalError:
+ except OperationalError as e:
await session.rollback()
+ logger.error("Database operational error while deleting chat %d: %s", chat_id, e)
raise HTTPException(
status_code=503, detail="Database operation failed. Please try again later."
) from None
- except Exception:
+ except SQLAlchemyError as e:
+ await session.rollback()
+ logger.error("Database error while deleting chat %d: %s", chat_id, e, exc_info=True)
+ raise HTTPException(
+ status_code=500,
+ detail="An unexpected error occurred while deleting the chat.",
+ ) from None
+ except Exception as e:
await session.rollback()
+ logger.error("Unexpected error while deleting chat %d: %s", chat_id, e, exc_info=True)
raise HTTPException(
status_code=500,
detail="An unexpected error occurred while deleting the chat.",
diff --git a/surfsense_backend/app/routes/podcasts_routes.py b/surfsense_backend/app/routes/podcasts_routes.py
index deb9d9744..92acf9932 100644
--- a/surfsense_backend/app/routes/podcasts_routes.py
+++ b/surfsense_backend/app/routes/podcasts_routes.py
@@ -1,3 +1,4 @@
+import logging
import os
from pathlib import Path
@@ -26,6 +27,8 @@
from app.users import current_active_user
from app.utils.rbac import check_permission
+logger = logging.getLogger(__name__)
+
router = APIRouter()
@@ -54,21 +57,24 @@ async def create_podcast(
return db_podcast
except HTTPException as he:
raise he
- except IntegrityError:
+ except IntegrityError as e:
await session.rollback()
+ logger.warning("Podcast creation failed due to integrity error: %s", e)
raise HTTPException(
status_code=400,
detail="Podcast creation failed due to constraint violation",
) from None
- except SQLAlchemyError:
+ except SQLAlchemyError as e:
await session.rollback()
+ logger.error("Database error while creating podcast: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="Database error occurred while creating podcast"
) from None
- except Exception:
+ except Exception as e:
await session.rollback()
+ logger.error("Unexpected error while creating podcast: %s", e, exc_info=True)
raise HTTPException(
- status_code=500, detail="An unexpected error occurred"
+ status_code=500, detail="An unexpected error occurred while creating podcast"
) from None
@@ -115,10 +121,16 @@ async def read_podcasts(
return result.scalars().all()
except HTTPException:
raise
- except SQLAlchemyError:
+ except SQLAlchemyError as e:
+ logger.error("Database error while fetching podcasts: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="Database error occurred while fetching podcasts"
) from None
+ except Exception as e:
+ logger.error("Unexpected error while fetching podcasts: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500, detail="An unexpected error occurred while fetching podcasts"
+ ) from None
@router.get("/podcasts/{podcast_id}", response_model=PodcastRead)
@@ -153,10 +165,16 @@ async def read_podcast(
return podcast
except HTTPException as he:
raise he
- except SQLAlchemyError:
+ except SQLAlchemyError as e:
+ logger.error("Database error while fetching podcast: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="Database error occurred while fetching podcast"
) from None
+ except Exception as e:
+ logger.error("Unexpected error while fetching podcast: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500, detail="An unexpected error occurred while fetching podcast"
+ ) from None
@router.put("/podcasts/{podcast_id}", response_model=PodcastRead)
@@ -199,11 +217,18 @@ async def update_podcast(
raise HTTPException(
status_code=400, detail="Update failed due to constraint violation"
) from None
- except SQLAlchemyError:
+ except SQLAlchemyError as e:
await session.rollback()
+ logger.error("Database error while updating podcast: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="Database error occurred while updating podcast"
) from None
+ except Exception as e:
+ await session.rollback()
+ logger.error("Unexpected error while updating podcast: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500, detail="An unexpected error occurred while updating podcast"
+ ) from None
@router.delete("/podcasts/{podcast_id}", response_model=dict)
@@ -237,11 +262,18 @@ async def delete_podcast(
return {"message": "Podcast deleted successfully"}
except HTTPException as he:
raise he
- except SQLAlchemyError:
+ except SQLAlchemyError as e:
await session.rollback()
+ logger.error("Database error while deleting podcast: %s", e, exc_info=True)
raise HTTPException(
status_code=500, detail="Database error occurred while deleting podcast"
) from None
+ except Exception as e:
+ await session.rollback()
+ logger.error("Unexpected error while deleting podcast: %s", e, exc_info=True)
+ raise HTTPException(
+ status_code=500, detail="An unexpected error occurred while deleting podcast"
+ ) from None
async def generate_chat_podcast_with_new_session(
@@ -260,9 +292,7 @@ async def generate_chat_podcast_with_new_session(
session, chat_id, search_space_id, user_id, podcast_title, user_prompt
)
except Exception as e:
- import logging
-
- logging.error(f"Error generating podcast from chat: {e!s}")
+ logger.error("Error generating podcast from chat: %s", e, exc_info=True)
@router.post("/podcasts/generate")
diff --git a/surfsense_backend/app/services/connector_service.py b/surfsense_backend/app/services/connector_service.py
index 20a9ffa32..5c5f02c1b 100644
--- a/surfsense_backend/app/services/connector_service.py
+++ b/surfsense_backend/app/services/connector_service.py
@@ -1,10 +1,12 @@
import asyncio
+import logging
from typing import Any
from urllib.parse import urljoin
import httpx
from linkup import LinkupClient
from sqlalchemy import func
+from sqlalchemy.exc import SQLAlchemyError
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.future import select
from tavily import TavilyClient
@@ -19,6 +21,8 @@
from app.retriver.chunks_hybrid_search import ChucksHybridSearchRetriever
from app.retriver.documents_hybrid_search import DocumentHybridSearchRetriever
+logger = logging.getLogger(__name__)
+
class ConnectorService:
def __init__(self, session: AsyncSession, search_space_id: int | None = None):
@@ -49,11 +53,17 @@ async def initialize_counter(self):
)
chunk_count = result.scalar() or 0
self.source_id_counter = chunk_count + 1
- print(
- f"Initialized source_id_counter to {self.source_id_counter} for search space {self.search_space_id}"
+ logger.debug(
+ "Initialized source_id_counter to %d for search space %d",
+ self.source_id_counter,
+ self.search_space_id,
+ )
+ except SQLAlchemyError as e:
+ logger.warning(
+ "Database error initializing source_id_counter for search space %d: %s",
+ self.search_space_id,
+ e,
)
- except Exception as e:
- print(f"Error initializing source_id_counter: {e!s}")
# Fallback to default value
self.source_id_counter = 1
diff --git a/surfsense_backend/app/tasks/connector_indexers/airtable_indexer.py b/surfsense_backend/app/tasks/connector_indexers/airtable_indexer.py
index cf6824db8..9839be77e 100644
--- a/surfsense_backend/app/tasks/connector_indexers/airtable_indexer.py
+++ b/surfsense_backend/app/tasks/connector_indexers/airtable_indexer.py
@@ -8,8 +8,8 @@
from app.config import config
from app.connectors.airtable_connector import AirtableConnector
from app.db import Document, DocumentType, SearchSourceConnectorType
-from app.routes.airtable_add_connector_route import refresh_airtable_token
from app.schemas.airtable_auth_credentials import AirtableAuthCredentialsBase
+from app.utils.connector_auth import refresh_airtable_token
from app.services.llm_service import get_user_long_context_llm
from app.services.task_logging_service import TaskLoggingService
from app.utils.document_converters import (
diff --git a/surfsense_backend/app/tasks/document_processors/file_processors.py b/surfsense_backend/app/tasks/document_processors/file_processors.py
index 4ae04e050..c7204a0bc 100644
--- a/surfsense_backend/app/tasks/document_processors/file_processors.py
+++ b/surfsense_backend/app/tasks/document_processors/file_processors.py
@@ -30,6 +30,8 @@
)
from .markdown_processor import add_received_markdown_file_document
+logger = logging.getLogger(__name__)
+
async def add_received_file_document_using_unstructured(
session: AsyncSession,
@@ -473,9 +475,8 @@ async def process_file_in_background(
try:
os.unlink(file_path)
- except Exception as e:
- print("Error deleting temp file", e)
- pass
+ except OSError as e:
+ logger.debug("Could not delete temp file %s: %s", file_path, e)
await task_logger.log_task_progress(
log_entry,
@@ -598,9 +599,8 @@ async def process_file_in_background(
# Clean up the temp file
try:
os.unlink(file_path)
- except Exception as e:
- print("Error deleting temp file", e)
- pass
+ except OSError as e:
+ logger.debug("Could not delete temp file %s: %s", file_path, e)
# Process transcription as markdown document
result = await add_received_markdown_file_document(
@@ -743,9 +743,8 @@ async def process_file_in_background(
try:
os.unlink(file_path)
- except Exception as e:
- print("Error deleting temp file", e)
- pass
+ except OSError as e:
+ logger.debug("Could not delete temp file %s: %s", file_path, e)
# Pass the documents to the existing background task
result = await add_received_file_document_using_unstructured(
@@ -812,9 +811,8 @@ async def process_file_in_background(
try:
os.unlink(file_path)
- except Exception as e:
- print("Error deleting temp file", e)
- pass
+ except OSError as e:
+ logger.debug("Could not delete temp file %s: %s", file_path, e)
# Get markdown documents from the result
markdown_documents = await result.aget_markdown_documents(
@@ -971,9 +969,8 @@ async def process_file_in_background(
try:
os.unlink(file_path)
- except Exception as e:
- print("Error deleting temp file", e)
- pass
+ except OSError as e:
+ logger.debug("Could not delete temp file %s: %s", file_path, e)
await task_logger.log_task_progress(
log_entry,
diff --git a/surfsense_backend/app/utils/connector_auth.py b/surfsense_backend/app/utils/connector_auth.py
new file mode 100644
index 000000000..ec517d00c
--- /dev/null
+++ b/surfsense_backend/app/utils/connector_auth.py
@@ -0,0 +1,105 @@
+"""
+Utility functions for connector authentication.
+
+This module provides authentication helper functions for various connectors
+to avoid circular imports between routes and connector indexers.
+"""
+
+import base64
+import logging
+from datetime import UTC, datetime, timedelta
+
+import httpx
+from fastapi import HTTPException
+from sqlalchemy.ext.asyncio import AsyncSession
+
+from app.config import config
+from app.db import SearchSourceConnector
+from app.schemas.airtable_auth_credentials import AirtableAuthCredentialsBase
+
+logger = logging.getLogger(__name__)
+
+# Airtable OAuth endpoints
+AIRTABLE_TOKEN_URL = "https://airtable.com/oauth2/v1/token"
+
+
+def make_basic_auth_header(client_id: str, client_secret: str) -> str:
+ """Create a Basic authentication header."""
+ credentials = f"{client_id}:{client_secret}".encode()
+ b64 = base64.b64encode(credentials).decode("ascii")
+ return f"Basic {b64}"
+
+
+async def refresh_airtable_token(
+ session: AsyncSession, connector: SearchSourceConnector
+):
+ """
+ Refresh the Airtable access token for a connector.
+
+ Args:
+ session: Database session
+ connector: Airtable connector to refresh
+
+ Returns:
+ Updated connector object
+ """
+ try:
+ logger.info(f"Refreshing Airtable token for connector {connector.id}")
+
+ credentials = AirtableAuthCredentialsBase.from_dict(connector.config)
+ auth_header = make_basic_auth_header(
+ config.AIRTABLE_CLIENT_ID, config.AIRTABLE_CLIENT_SECRET
+ )
+
+ # Prepare token refresh data
+ refresh_data = {
+ "grant_type": "refresh_token",
+ "refresh_token": credentials.refresh_token,
+ "client_id": config.AIRTABLE_CLIENT_ID,
+ "client_secret": config.AIRTABLE_CLIENT_SECRET,
+ }
+
+ async with httpx.AsyncClient() as client:
+ token_response = await client.post(
+ AIRTABLE_TOKEN_URL,
+ data=refresh_data,
+ headers={
+ "Content-Type": "application/x-www-form-urlencoded",
+ "Authorization": auth_header,
+ },
+ timeout=30.0,
+ )
+
+ if token_response.status_code != 200:
+ raise HTTPException(
+ status_code=400, detail="Token refresh failed: {token_response.text}"
+ )
+
+ token_json = token_response.json()
+
+ # Calculate expiration time (UTC, tz-aware)
+ expires_at = None
+ if token_json.get("expires_in"):
+ now_utc = datetime.now(UTC)
+ expires_at = now_utc + timedelta(seconds=int(token_json["expires_in"]))
+
+ # Update credentials object
+ credentials.access_token = token_json["access_token"]
+ credentials.expires_in = token_json.get("expires_in")
+ credentials.expires_at = expires_at
+ credentials.scope = token_json.get("scope")
+
+ # Update connector config
+ connector.config = credentials.to_dict()
+ await session.commit()
+ await session.refresh(connector)
+
+ logger.info(
+ f"Successfully refreshed Airtable token for connector {connector.id}"
+ )
+
+ return connector
+ except Exception as e:
+ raise HTTPException(
+ status_code=500, detail=f"Failed to refresh Airtable token: {e!s}"
+ ) from e
diff --git a/surfsense_backend/pyproject.toml b/surfsense_backend/pyproject.toml
index 1951afdd0..37656daa5 100644
--- a/surfsense_backend/pyproject.toml
+++ b/surfsense_backend/pyproject.toml
@@ -55,6 +55,12 @@ dependencies = [
dev = [
"ruff>=0.12.5",
]
+test = [
+ "pytest>=8.0.0",
+ "pytest-asyncio>=0.23.0",
+ "pytest-cov>=4.1.0",
+ "httpx>=0.27.0",
+]
[tool.ruff]
# Exclude a variety of commonly ignored directories.
diff --git a/surfsense_backend/pytest.ini b/surfsense_backend/pytest.ini
new file mode 100644
index 000000000..1af57e00c
--- /dev/null
+++ b/surfsense_backend/pytest.ini
@@ -0,0 +1,15 @@
+[pytest]
+testpaths = tests
+asyncio_mode = auto
+asyncio_default_fixture_loop_scope = function
+python_files = test_*.py
+python_classes = Test*
+python_functions = test_*
+addopts = -v --tb=short
+filterwarnings =
+ ignore::DeprecationWarning
+ ignore::PendingDeprecationWarning
+markers =
+ unit: Unit tests (fast, isolated)
+ integration: Integration tests (may require external services)
+ slow: Slow running tests
diff --git a/surfsense_backend/tests/__init__.py b/surfsense_backend/tests/__init__.py
new file mode 100644
index 000000000..9cbd8632d
--- /dev/null
+++ b/surfsense_backend/tests/__init__.py
@@ -0,0 +1 @@
+# Test package for SurfSense Backend
diff --git a/surfsense_backend/tests/conftest.py b/surfsense_backend/tests/conftest.py
new file mode 100644
index 000000000..c10210cd9
--- /dev/null
+++ b/surfsense_backend/tests/conftest.py
@@ -0,0 +1,69 @@
+"""
+Shared test fixtures and configuration for SurfSense Backend tests.
+"""
+
+import uuid
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+from sqlalchemy.ext.asyncio import AsyncSession
+
+
+@pytest.fixture
+def mock_session() -> AsyncMock:
+ """Create a mock async database session."""
+ session = AsyncMock(spec=AsyncSession)
+ session.execute = AsyncMock()
+ session.commit = AsyncMock()
+ session.rollback = AsyncMock()
+ session.refresh = AsyncMock()
+ session.add = MagicMock()
+ session.delete = AsyncMock()
+ return session
+
+
+@pytest.fixture
+def mock_user() -> MagicMock:
+ """Create a mock user object."""
+ user = MagicMock()
+ user.id = uuid.uuid4()
+ user.email = "test@example.com"
+ user.is_active = True
+ user.is_superuser = False
+ user.is_verified = True
+ return user
+
+
+@pytest.fixture
+def mock_search_space() -> MagicMock:
+ """Create a mock search space object."""
+ search_space = MagicMock()
+ search_space.id = 1
+ search_space.name = "Test Search Space"
+ search_space.llm_configs = []
+ search_space.fast_llm_id = None
+ search_space.long_context_llm_id = None
+ search_space.strategic_llm_id = None
+ return search_space
+
+
+@pytest.fixture
+def sample_messages() -> list[dict]:
+ """Sample chat messages for testing."""
+ return [
+ {"role": "user", "content": "Hello, how are you?"},
+ {"role": "assistant", "content": "I'm doing well, thank you!"},
+ {"role": "user", "content": "What is the weather today?"},
+ ]
+
+
+@pytest.fixture
+def sample_chat_create_data() -> dict:
+ """Sample data for creating a chat."""
+ return {
+ "title": "Test Chat",
+ "type": "normal",
+ "search_space_id": 1,
+ "initial_connectors": [],
+ "messages": [],
+ }
diff --git a/surfsense_backend/tests/test_agent_configuration.py b/surfsense_backend/tests/test_agent_configuration.py
new file mode 100644
index 000000000..f1a89792f
--- /dev/null
+++ b/surfsense_backend/tests/test_agent_configuration.py
@@ -0,0 +1,198 @@
+"""
+Tests for researcher agent configuration.
+Tests the Configuration dataclass and SearchMode enum.
+"""
+import pytest
+from dataclasses import fields
+
+from app.agents.researcher.configuration import Configuration, SearchMode
+
+
+class TestSearchMode:
+ """Tests for SearchMode enum."""
+
+ def test_chunks_mode_value(self):
+ """Test CHUNKS mode value."""
+ assert SearchMode.CHUNKS.value == "CHUNKS"
+
+ def test_documents_mode_value(self):
+ """Test DOCUMENTS mode value."""
+ assert SearchMode.DOCUMENTS.value == "DOCUMENTS"
+
+ def test_all_modes_are_strings(self):
+ """Test all modes have string values."""
+ for mode in SearchMode:
+ assert isinstance(mode.value, str)
+
+ def test_can_compare_modes(self):
+ """Test enum comparison."""
+ assert SearchMode.CHUNKS == SearchMode.CHUNKS
+ assert SearchMode.CHUNKS != SearchMode.DOCUMENTS
+
+
+class TestConfiguration:
+ """Tests for Configuration dataclass."""
+
+ def test_create_configuration_with_required_params(self):
+ """Test creating configuration with required parameters."""
+ config = Configuration(
+ user_query="test query",
+ connectors_to_search=["TAVILY_API"],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.CHUNKS,
+ document_ids_to_add_in_context=[],
+ )
+
+ assert config.user_query == "test query"
+ assert config.connectors_to_search == ["TAVILY_API"]
+ assert config.user_id == "user-123"
+ assert config.search_space_id == 1
+ assert config.search_mode == SearchMode.CHUNKS
+ assert config.document_ids_to_add_in_context == []
+
+ def test_create_configuration_with_optional_params(self):
+ """Test creating configuration with optional parameters."""
+ config = Configuration(
+ user_query="test query",
+ connectors_to_search=["TAVILY_API"],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.DOCUMENTS,
+ document_ids_to_add_in_context=[1, 2, 3],
+ language="en",
+ top_k=20,
+ )
+
+ assert config.language == "en"
+ assert config.top_k == 20
+ assert config.document_ids_to_add_in_context == [1, 2, 3]
+
+ def test_default_language_is_none(self):
+ """Test default language is None."""
+ config = Configuration(
+ user_query="test",
+ connectors_to_search=[],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.CHUNKS,
+ document_ids_to_add_in_context=[],
+ )
+
+ assert config.language is None
+
+ def test_default_top_k_is_10(self):
+ """Test default top_k is 10."""
+ config = Configuration(
+ user_query="test",
+ connectors_to_search=[],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.CHUNKS,
+ document_ids_to_add_in_context=[],
+ )
+
+ assert config.top_k == 10
+
+ def test_from_runnable_config_with_none(self):
+ """Test from_runnable_config with None returns defaults."""
+ # This should not raise an error but will fail due to missing required fields
+ # We're testing that the method handles None gracefully
+ with pytest.raises(TypeError):
+ # Missing required fields should raise TypeError
+ Configuration.from_runnable_config(None)
+
+ def test_from_runnable_config_with_empty_config(self):
+ """Test from_runnable_config with empty config."""
+ with pytest.raises(TypeError):
+ # Missing required fields should raise TypeError
+ Configuration.from_runnable_config({})
+
+ def test_from_runnable_config_with_valid_config(self):
+ """Test from_runnable_config with valid config."""
+ runnable_config = {
+ "configurable": {
+ "user_query": "test query",
+ "connectors_to_search": ["TAVILY_API"],
+ "user_id": "user-123",
+ "search_space_id": 1,
+ "search_mode": SearchMode.CHUNKS,
+ "document_ids_to_add_in_context": [],
+ "language": "en",
+ "top_k": 15,
+ }
+ }
+
+ config = Configuration.from_runnable_config(runnable_config)
+
+ assert config.user_query == "test query"
+ assert config.connectors_to_search == ["TAVILY_API"]
+ assert config.language == "en"
+ assert config.top_k == 15
+
+ def test_from_runnable_config_ignores_unknown_fields(self):
+ """Test from_runnable_config ignores unknown fields."""
+ runnable_config = {
+ "configurable": {
+ "user_query": "test query",
+ "connectors_to_search": ["TAVILY_API"],
+ "user_id": "user-123",
+ "search_space_id": 1,
+ "search_mode": SearchMode.CHUNKS,
+ "document_ids_to_add_in_context": [],
+ "unknown_field": "should be ignored",
+ "another_unknown": 123,
+ }
+ }
+
+ config = Configuration.from_runnable_config(runnable_config)
+
+ assert not hasattr(config, "unknown_field")
+ assert not hasattr(config, "another_unknown")
+
+ def test_configuration_has_expected_fields(self):
+ """Test Configuration has all expected fields."""
+ field_names = {f.name for f in fields(Configuration)}
+
+ expected_fields = {
+ "user_query",
+ "connectors_to_search",
+ "user_id",
+ "search_space_id",
+ "search_mode",
+ "document_ids_to_add_in_context",
+ "language",
+ "top_k",
+ }
+
+ assert field_names == expected_fields
+
+ def test_configuration_multiple_connectors(self):
+ """Test configuration with multiple connectors."""
+ config = Configuration(
+ user_query="test",
+ connectors_to_search=["TAVILY_API", "SLACK_CONNECTOR", "NOTION_CONNECTOR"],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.CHUNKS,
+ document_ids_to_add_in_context=[],
+ )
+
+ assert len(config.connectors_to_search) == 3
+ assert "TAVILY_API" in config.connectors_to_search
+ assert "SLACK_CONNECTOR" in config.connectors_to_search
+ assert "NOTION_CONNECTOR" in config.connectors_to_search
+
+ def test_configuration_with_document_ids(self):
+ """Test configuration with document IDs to add to context."""
+ config = Configuration(
+ user_query="test",
+ connectors_to_search=[],
+ user_id="user-123",
+ search_space_id=1,
+ search_mode=SearchMode.CHUNKS,
+ document_ids_to_add_in_context=[1, 2, 3, 4, 5],
+ )
+
+ assert config.document_ids_to_add_in_context == [1, 2, 3, 4, 5]
+ assert len(config.document_ids_to_add_in_context) == 5
diff --git a/surfsense_backend/tests/test_blocknote_converter.py b/surfsense_backend/tests/test_blocknote_converter.py
new file mode 100644
index 000000000..60a770e47
--- /dev/null
+++ b/surfsense_backend/tests/test_blocknote_converter.py
@@ -0,0 +1,380 @@
+"""
+Tests for the blocknote_converter utility module.
+
+These tests validate:
+1. Empty/invalid input is handled gracefully (returns None, not crash)
+2. API failures don't crash the application
+3. Response structure is correctly parsed
+4. Network errors are properly handled
+"""
+
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+import httpx
+
+# Skip these tests if app dependencies aren't installed
+pytest.importorskip("yaml")
+
+from app.utils.blocknote_converter import (
+ convert_markdown_to_blocknote,
+ convert_blocknote_to_markdown,
+)
+
+
+class TestMarkdownToBlocknoteInputValidation:
+ """
+ Tests validating input handling for markdown to BlockNote conversion.
+ """
+
+ @pytest.mark.asyncio
+ async def test_empty_string_returns_none(self):
+ """
+ Empty markdown must return None, not error.
+ This is a common edge case when content hasn't been written yet.
+ """
+ result = await convert_markdown_to_blocknote("")
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_whitespace_only_returns_none(self):
+ """
+ Whitespace-only content should be treated as empty.
+ Spaces, tabs, newlines alone don't constitute content.
+ """
+ test_cases = [" ", "\t\t", "\n\n", " \n \t "]
+
+ for whitespace in test_cases:
+ result = await convert_markdown_to_blocknote(whitespace)
+ assert result is None, f"Expected None for whitespace: {repr(whitespace)}"
+
+ @pytest.mark.asyncio
+ async def test_very_short_content_returns_fallback(self):
+ """
+ Very short content should return a fallback document.
+ Content too short to convert meaningfully should still return something.
+ """
+ result = await convert_markdown_to_blocknote("x")
+
+ assert result is not None
+ assert isinstance(result, list)
+ assert len(result) > 0
+ # Fallback document should be a paragraph
+ assert result[0]["type"] == "paragraph"
+
+
+class TestMarkdownToBlocknoteNetworkResilience:
+ """
+ Tests validating network error handling.
+ The converter should never crash on network issues.
+ """
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_timeout_returns_none_not_exception(
+ self, mock_config, mock_client_class
+ ):
+ """
+ Network timeout must return None, not raise exception.
+ Timeouts are common and shouldn't crash the application.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(side_effect=httpx.TimeoutException("Timeout"))
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ # Long enough content to trigger API call
+ result = await convert_markdown_to_blocknote(
+ "# Heading\n\nThis is a paragraph with enough content."
+ )
+
+ assert result is None # Not an exception
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_server_error_returns_none_not_exception(
+ self, mock_config, mock_client_class
+ ):
+ """
+ HTTP 5xx errors must return None, not raise exception.
+ Server errors shouldn't crash the caller.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_response = MagicMock()
+ mock_response.status_code = 500
+ mock_response.text = "Internal Server Error"
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(
+ side_effect=httpx.HTTPStatusError(
+ "Server error",
+ request=MagicMock(),
+ response=mock_response,
+ )
+ )
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ result = await convert_markdown_to_blocknote(
+ "# Heading\n\nThis is a paragraph with enough content."
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_connection_error_returns_none(self, mock_config, mock_client_class):
+ """
+ Connection errors (server unreachable) must return None.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(side_effect=httpx.ConnectError("Connection refused"))
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ result = await convert_markdown_to_blocknote(
+ "# Heading\n\nThis is a paragraph with enough content."
+ )
+
+ assert result is None
+
+
+class TestMarkdownToBlocknoteSuccessfulConversion:
+ """
+ Tests for successful conversion scenarios.
+ """
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_successful_conversion_returns_document(
+ self, mock_config, mock_client_class
+ ):
+ """
+ Successful API response should return the BlockNote document.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ expected_document = [{"type": "paragraph", "content": [{"text": "Test"}]}]
+
+ mock_response = MagicMock()
+ mock_response.json.return_value = {"blocknote_document": expected_document}
+ mock_response.raise_for_status = MagicMock()
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(return_value=mock_response)
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ result = await convert_markdown_to_blocknote(
+ "# This is a heading\n\nThis is a paragraph with enough content."
+ )
+
+ assert result == expected_document
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_empty_api_response_returns_none(
+ self, mock_config, mock_client_class
+ ):
+ """
+ If API returns null/empty document, function should return None.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_response = MagicMock()
+ mock_response.json.return_value = {"blocknote_document": None}
+ mock_response.raise_for_status = MagicMock()
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(return_value=mock_response)
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ result = await convert_markdown_to_blocknote(
+ "# Heading\n\nSome content that is long enough."
+ )
+
+ assert result is None
+
+
+class TestBlocknoteToMarkdownInputValidation:
+ """
+ Tests validating input handling for BlockNote to markdown conversion.
+ """
+
+ @pytest.mark.asyncio
+ async def test_none_document_returns_none(self):
+ """None input must return None, not crash."""
+ result = await convert_blocknote_to_markdown(None)
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_empty_dict_returns_none(self):
+ """Empty dict should be treated as no content."""
+ result = await convert_blocknote_to_markdown({})
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_empty_list_returns_none(self):
+ """Empty list should be treated as no content."""
+ result = await convert_blocknote_to_markdown([])
+ assert result is None
+
+
+class TestBlocknoteToMarkdownNetworkResilience:
+ """
+ Tests validating network error handling for BlockNote to markdown.
+ """
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_timeout_returns_none(self, mock_config, mock_client_class):
+ """Timeout must return None, not exception."""
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(side_effect=httpx.TimeoutException("Timeout"))
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ blocknote_doc = [{"type": "paragraph", "content": []}]
+ result = await convert_blocknote_to_markdown(blocknote_doc)
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_server_error_returns_none(self, mock_config, mock_client_class):
+ """HTTP errors must return None, not exception."""
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_response = MagicMock()
+ mock_response.status_code = 500
+ mock_response.text = "Internal Server Error"
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(
+ side_effect=httpx.HTTPStatusError(
+ "Server error",
+ request=MagicMock(),
+ response=mock_response,
+ )
+ )
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ blocknote_doc = [{"type": "paragraph", "content": []}]
+ result = await convert_blocknote_to_markdown(blocknote_doc)
+
+ assert result is None
+
+
+class TestBlocknoteToMarkdownSuccessfulConversion:
+ """
+ Tests for successful BlockNote to markdown conversion.
+ """
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_successful_conversion_returns_markdown(
+ self, mock_config, mock_client_class
+ ):
+ """Successful conversion should return markdown string."""
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ expected_markdown = "# Converted Heading\n\nParagraph text."
+
+ mock_response = MagicMock()
+ mock_response.json.return_value = {"markdown": expected_markdown}
+ mock_response.raise_for_status = MagicMock()
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(return_value=mock_response)
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ blocknote_doc = [
+ {"type": "heading", "content": [{"type": "text", "text": "Test"}]}
+ ]
+ result = await convert_blocknote_to_markdown(blocknote_doc)
+
+ assert result == expected_markdown
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_null_markdown_response_returns_none(
+ self, mock_config, mock_client_class
+ ):
+ """If API returns null markdown, function should return None."""
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ mock_response = MagicMock()
+ mock_response.json.return_value = {"markdown": None}
+ mock_response.raise_for_status = MagicMock()
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(return_value=mock_response)
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ blocknote_doc = [{"type": "paragraph", "content": []}]
+ result = await convert_blocknote_to_markdown(blocknote_doc)
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ @patch("app.utils.blocknote_converter.httpx.AsyncClient")
+ @patch("app.utils.blocknote_converter.config")
+ async def test_list_document_is_handled(self, mock_config, mock_client_class):
+ """
+ List documents (multiple blocks) should be handled correctly.
+ """
+ mock_config.NEXT_FRONTEND_URL = "http://localhost:3000"
+
+ expected_markdown = "- Item 1\n- Item 2"
+
+ mock_response = MagicMock()
+ mock_response.json.return_value = {"markdown": expected_markdown}
+ mock_response.raise_for_status = MagicMock()
+
+ mock_client = AsyncMock()
+ mock_client.post = AsyncMock(return_value=mock_response)
+ mock_client.__aenter__ = AsyncMock(return_value=mock_client)
+ mock_client.__aexit__ = AsyncMock()
+ mock_client_class.return_value = mock_client
+
+ blocknote_doc = [
+ {
+ "type": "bulletListItem",
+ "content": [{"type": "text", "text": "Item 1"}],
+ },
+ {
+ "type": "bulletListItem",
+ "content": [{"type": "text", "text": "Item 2"}],
+ },
+ ]
+ result = await convert_blocknote_to_markdown(blocknote_doc)
+
+ assert result == expected_markdown
diff --git a/surfsense_backend/tests/test_celery_tasks.py b/surfsense_backend/tests/test_celery_tasks.py
new file mode 100644
index 000000000..c6fc8037c
--- /dev/null
+++ b/surfsense_backend/tests/test_celery_tasks.py
@@ -0,0 +1,350 @@
+"""Tests for Celery tasks module."""
+
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+from app.tasks.celery_tasks.connector_tasks import (
+ get_celery_session_maker,
+ _index_slack_messages,
+ _index_notion_pages,
+ _index_github_repos,
+ _index_linear_issues,
+ _index_jira_issues,
+ _index_confluence_pages,
+ _index_clickup_tasks,
+ _index_google_calendar_events,
+ _index_airtable_records,
+ _index_google_gmail_messages,
+ _index_discord_messages,
+ _index_luma_events,
+ _index_elasticsearch_documents,
+ _index_crawled_urls,
+)
+
+
+class TestGetCelerySessionMaker:
+ """Tests for get_celery_session_maker function."""
+
+ def test_returns_session_maker(self):
+ """Test that get_celery_session_maker returns a session maker."""
+ with patch("app.tasks.celery_tasks.connector_tasks.create_async_engine") as mock_engine:
+ with patch("app.tasks.celery_tasks.connector_tasks.async_sessionmaker") as mock_session_maker:
+ mock_engine.return_value = MagicMock()
+ mock_session_maker.return_value = MagicMock()
+
+ result = get_celery_session_maker()
+
+ assert result is not None
+ mock_engine.assert_called_once()
+ mock_session_maker.assert_called_once()
+
+ def test_uses_null_pool(self):
+ """Test that NullPool is used for Celery tasks."""
+ from sqlalchemy.pool import NullPool
+
+ with patch("app.tasks.celery_tasks.connector_tasks.create_async_engine") as mock_engine:
+ with patch("app.tasks.celery_tasks.connector_tasks.async_sessionmaker"):
+ get_celery_session_maker()
+
+ # Check that NullPool was passed
+ call_kwargs = mock_engine.call_args[1]
+ assert call_kwargs.get("poolclass") == NullPool
+
+
+class TestIndexSlackMessages:
+ """Tests for Slack message indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_calls_run_slack_indexing(self):
+ """Test that _index_slack_messages calls run_slack_indexing."""
+ mock_session = AsyncMock()
+ mock_run_indexing = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ # Create a mock context manager
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_slack_indexing", mock_run_indexing):
+ await _index_slack_messages(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexNotionPages:
+ """Tests for Notion page indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_calls_correct_function(self):
+ """Test that _index_notion_pages calls run_notion_indexing."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_notion_indexing", new_callable=AsyncMock) as mock_run:
+ await _index_notion_pages(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexGithubRepos:
+ """Tests for GitHub repository indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_with_valid_params(self):
+ """Test GitHub repo indexing with valid parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_github_indexing", new_callable=AsyncMock):
+ await _index_github_repos(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexLinearIssues:
+ """Tests for Linear issues indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_linear_issues_creates_session(self):
+ """Test that Linear indexing creates a proper session."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_linear_indexing", new_callable=AsyncMock):
+ await _index_linear_issues(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexJiraIssues:
+ """Tests for Jira issues indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_jira_issues_passes_correct_params(self):
+ """Test that Jira indexing passes correct parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_jira_indexing", new_callable=AsyncMock) as mock_run:
+ await _index_jira_issues(5, 10, "user123", "2024-06-01", "2024-06-30")
+ mock_run.assert_called_once_with(
+ mock_session, 5, 10, "user123", "2024-06-01", "2024-06-30"
+ )
+
+
+class TestIndexConfluencePages:
+ """Tests for Confluence pages indexing task."""
+
+ @pytest.mark.asyncio
+ async def test_index_confluence_pages_with_valid_params(self):
+ """Test Confluence indexing with valid parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_confluence_indexing", new_callable=AsyncMock):
+ await _index_confluence_pages(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexClickupTasks:
+ """Tests for ClickUp tasks indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_clickup_tasks_creates_session(self):
+ """Test ClickUp indexing creates session."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_clickup_indexing", new_callable=AsyncMock):
+ await _index_clickup_tasks(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexGoogleCalendarEvents:
+ """Tests for Google Calendar events indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_google_calendar_events_with_valid_params(self):
+ """Test Google Calendar indexing with valid parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_google_calendar_indexing", new_callable=AsyncMock):
+ await _index_google_calendar_events(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexAirtableRecords:
+ """Tests for Airtable records indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_airtable_records_creates_session(self):
+ """Test Airtable indexing creates session."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_airtable_indexing", new_callable=AsyncMock):
+ await _index_airtable_records(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexGoogleGmailMessages:
+ """Tests for Google Gmail messages indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_gmail_messages_calculates_days_back(self):
+ """Test Gmail indexing calculates days_back from start_date."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_google_gmail_indexing", new_callable=AsyncMock) as mock_run:
+ await _index_google_gmail_messages(1, 1, "user1", "2024-01-01", "2024-12-31")
+ # Should have been called with calculated days_back
+ mock_run.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_index_gmail_messages_default_days_back(self):
+ """Test Gmail indexing uses default days_back when no start_date."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_google_gmail_indexing", new_callable=AsyncMock) as mock_run:
+ await _index_google_gmail_messages(1, 1, "user1", None, None)
+ # Should have been called with max_messages=100 and default days_back=30
+ # Args: session, connector_id, search_space_id, user_id, max_messages, days_back
+ mock_run.assert_called_once()
+ call_args = mock_run.call_args[0]
+ assert call_args[4] == 100 # max_messages (index 4)
+ assert call_args[5] == 30 # days_back (index 5)
+
+ @pytest.mark.asyncio
+ async def test_index_gmail_messages_invalid_date_uses_default(self):
+ """Test Gmail indexing uses default when date parsing fails."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_google_gmail_indexing", new_callable=AsyncMock) as mock_run:
+ await _index_google_gmail_messages(1, 1, "user1", "invalid-date", None)
+ mock_run.assert_called_once()
+ # Args: session, connector_id, search_space_id, user_id, max_messages, days_back
+ call_args = mock_run.call_args[0]
+ assert call_args[4] == 100 # max_messages (index 4)
+ assert call_args[5] == 30 # days_back default (index 5)
+
+
+class TestIndexDiscordMessages:
+ """Tests for Discord messages indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_discord_messages_with_valid_params(self):
+ """Test Discord indexing with valid parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_discord_indexing", new_callable=AsyncMock):
+ await _index_discord_messages(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexLumaEvents:
+ """Tests for Luma events indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_luma_events_creates_session(self):
+ """Test Luma indexing creates session."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_luma_indexing", new_callable=AsyncMock):
+ await _index_luma_events(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexElasticsearchDocuments:
+ """Tests for Elasticsearch documents indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_elasticsearch_documents_with_valid_params(self):
+ """Test Elasticsearch indexing with valid parameters."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_elasticsearch_indexing", new_callable=AsyncMock):
+ await _index_elasticsearch_documents(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+
+class TestIndexCrawledUrls:
+ """Tests for web page URL indexing."""
+
+ @pytest.mark.asyncio
+ async def test_index_crawled_urls_creates_session(self):
+ """Test web page indexing creates session."""
+ mock_session = AsyncMock()
+
+ with patch("app.tasks.celery_tasks.connector_tasks.get_celery_session_maker") as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch("app.routes.search_source_connectors_routes.run_web_page_indexing", new_callable=AsyncMock):
+ await _index_crawled_urls(1, 1, "user1", "2024-01-01", "2024-12-31")
diff --git a/surfsense_backend/tests/test_celery_tasks_comprehensive.py b/surfsense_backend/tests/test_celery_tasks_comprehensive.py
new file mode 100644
index 000000000..02160b763
--- /dev/null
+++ b/surfsense_backend/tests/test_celery_tasks_comprehensive.py
@@ -0,0 +1,1046 @@
+"""Comprehensive tests for Celery tasks module."""
+
+from datetime import datetime, timedelta, UTC
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+
+# ============================================================================
+# SCHEDULE CHECKER TASK TESTS
+# ============================================================================
+
+
+class TestScheduleCheckerTaskSessionMaker:
+ """Tests for schedule checker task session maker."""
+
+ def test_get_celery_session_maker_returns_maker(self):
+ """Test that get_celery_session_maker returns a session maker."""
+ from app.tasks.celery_tasks.schedule_checker_task import get_celery_session_maker
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.async_sessionmaker"
+ ) as mock_maker:
+ mock_engine.return_value = MagicMock()
+ mock_maker.return_value = MagicMock()
+
+ result = get_celery_session_maker()
+
+ assert result is not None
+ mock_engine.assert_called_once()
+
+ def test_get_celery_session_maker_uses_null_pool(self):
+ """Test that NullPool is used."""
+ from sqlalchemy.pool import NullPool
+ from app.tasks.celery_tasks.schedule_checker_task import get_celery_session_maker
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.async_sessionmaker"
+ ):
+ get_celery_session_maker()
+
+ call_kwargs = mock_engine.call_args[1]
+ assert call_kwargs.get("poolclass") == NullPool
+
+
+class TestCheckAndTriggerSchedules:
+ """Tests for _check_and_trigger_schedules function."""
+
+ @pytest.mark.asyncio
+ async def test_no_due_connectors(self):
+ """Test when no connectors are due for indexing."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+
+ mock_session = AsyncMock()
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = []
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ await _check_and_trigger_schedules()
+
+ mock_session.execute.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_triggers_slack_connector_task(self):
+ """Test triggering Slack connector indexing task."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from app.db import SearchSourceConnectorType
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.id = 1
+ mock_connector.search_space_id = 1
+ mock_connector.user_id = "user123"
+ mock_connector.connector_type = SearchSourceConnectorType.SLACK_CONNECTOR
+ mock_connector.indexing_frequency_minutes = 60
+ mock_connector.next_scheduled_at = datetime.now(UTC) - timedelta(minutes=5)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_connector]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_slack_messages_task"
+ ) as mock_slack_task:
+ mock_slack_task.delay = MagicMock()
+
+ await _check_and_trigger_schedules()
+
+ mock_slack_task.delay.assert_called_once_with(
+ 1, 1, "user123", None, None
+ )
+ assert mock_connector.next_scheduled_at is not None
+
+ @pytest.mark.asyncio
+ async def test_triggers_notion_connector_task(self):
+ """Test triggering Notion connector indexing task."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from app.db import SearchSourceConnectorType
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.id = 2
+ mock_connector.search_space_id = 1
+ mock_connector.user_id = "user456"
+ mock_connector.connector_type = SearchSourceConnectorType.NOTION_CONNECTOR
+ mock_connector.indexing_frequency_minutes = 120
+ mock_connector.next_scheduled_at = datetime.now(UTC) - timedelta(minutes=10)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_connector]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_notion_pages_task"
+ ) as mock_notion_task:
+ mock_notion_task.delay = MagicMock()
+
+ await _check_and_trigger_schedules()
+
+ mock_notion_task.delay.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_triggers_github_connector_task(self):
+ """Test triggering GitHub connector indexing task."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from app.db import SearchSourceConnectorType
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.id = 3
+ mock_connector.search_space_id = 2
+ mock_connector.user_id = "user789"
+ mock_connector.connector_type = SearchSourceConnectorType.GITHUB_CONNECTOR
+ mock_connector.indexing_frequency_minutes = 30
+ mock_connector.next_scheduled_at = datetime.now(UTC) - timedelta(minutes=1)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_connector]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_github_repos_task"
+ ) as mock_github_task:
+ mock_github_task.delay = MagicMock()
+
+ await _check_and_trigger_schedules()
+
+ mock_github_task.delay.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_triggers_multiple_connector_types(self):
+ """Test triggering multiple different connector types."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from app.db import SearchSourceConnectorType
+
+ mock_session = AsyncMock()
+
+ # Create multiple connectors of different types
+ mock_connectors = []
+ connector_types = [
+ SearchSourceConnectorType.SLACK_CONNECTOR,
+ SearchSourceConnectorType.JIRA_CONNECTOR,
+ SearchSourceConnectorType.CONFLUENCE_CONNECTOR,
+ ]
+
+ for i, ct in enumerate(connector_types):
+ mock_connector = MagicMock()
+ mock_connector.id = i + 1
+ mock_connector.search_space_id = 1
+ mock_connector.user_id = f"user{i}"
+ mock_connector.connector_type = ct
+ mock_connector.indexing_frequency_minutes = 60
+ mock_connector.next_scheduled_at = datetime.now(UTC) - timedelta(minutes=5)
+ mock_connectors.append(mock_connector)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = mock_connectors
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_slack_messages_task"
+ ) as mock_slack:
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_jira_issues_task"
+ ) as mock_jira:
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_confluence_pages_task"
+ ) as mock_confluence:
+ mock_slack.delay = MagicMock()
+ mock_jira.delay = MagicMock()
+ mock_confluence.delay = MagicMock()
+
+ await _check_and_trigger_schedules()
+
+ mock_slack.delay.assert_called_once()
+ mock_jira.delay.assert_called_once()
+ mock_confluence.delay.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_handles_unknown_connector_type(self):
+ """Test handling of unknown connector type gracefully."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.id = 1
+ mock_connector.search_space_id = 1
+ mock_connector.user_id = "user123"
+ mock_connector.connector_type = MagicMock() # Unknown type
+ mock_connector.connector_type.value = "UNKNOWN_CONNECTOR"
+ mock_connector.indexing_frequency_minutes = 60
+ mock_connector.next_scheduled_at = datetime.now(UTC) - timedelta(minutes=5)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_connector]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ # Should not raise an exception
+ await _check_and_trigger_schedules()
+
+ @pytest.mark.asyncio
+ async def test_updates_next_scheduled_at(self):
+ """Test that next_scheduled_at is updated after triggering."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from app.db import SearchSourceConnectorType
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.id = 1
+ mock_connector.search_space_id = 1
+ mock_connector.user_id = "user123"
+ mock_connector.connector_type = SearchSourceConnectorType.SLACK_CONNECTOR
+ mock_connector.indexing_frequency_minutes = 60
+ original_time = datetime.now(UTC) - timedelta(minutes=5)
+ mock_connector.next_scheduled_at = original_time
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_connector]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.index_slack_messages_task"
+ ) as mock_slack:
+ mock_slack.delay = MagicMock()
+
+ await _check_and_trigger_schedules()
+
+ # Check that next_scheduled_at was updated
+ assert mock_connector.next_scheduled_at != original_time
+ mock_session.commit.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_handles_database_error(self):
+ """Test handling of database errors."""
+ from app.tasks.celery_tasks.schedule_checker_task import (
+ _check_and_trigger_schedules,
+ )
+ from sqlalchemy.exc import SQLAlchemyError
+
+ mock_session = AsyncMock()
+ mock_session.execute.side_effect = SQLAlchemyError("DB error")
+
+ with patch(
+ "app.tasks.celery_tasks.schedule_checker_task.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ # Should not raise, just log error
+ await _check_and_trigger_schedules()
+
+ mock_session.rollback.assert_called_once()
+
+
+# ============================================================================
+# BLOCKNOTE MIGRATION TASK TESTS
+# ============================================================================
+
+
+class TestBlocknoteMigrationTaskSessionMaker:
+ """Tests for blocknote migration task session maker."""
+
+ def test_get_celery_session_maker_returns_maker(self):
+ """Test that get_celery_session_maker returns a session maker."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ get_celery_session_maker,
+ )
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.async_sessionmaker"
+ ) as mock_maker:
+ mock_engine.return_value = MagicMock()
+ mock_maker.return_value = MagicMock()
+
+ result = get_celery_session_maker()
+
+ assert result is not None
+
+ def test_get_celery_session_maker_uses_null_pool(self):
+ """Test that NullPool is used."""
+ from sqlalchemy.pool import NullPool
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ get_celery_session_maker,
+ )
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.async_sessionmaker"
+ ):
+ get_celery_session_maker()
+
+ call_kwargs = mock_engine.call_args[1]
+ assert call_kwargs.get("poolclass") == NullPool
+
+
+class TestPopulateBlocknoteForDocuments:
+ """Tests for _populate_blocknote_for_documents function."""
+
+ @pytest.mark.asyncio
+ async def test_no_documents_to_process(self):
+ """Test when no documents need blocknote population."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = []
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ await _populate_blocknote_for_documents()
+
+ mock_session.execute.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_processes_documents_with_chunks(self):
+ """Test processing documents that have chunks."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+
+ # Create mock document with chunks
+ mock_chunk1 = MagicMock()
+ mock_chunk1.id = 1
+ mock_chunk1.content = "# Header\n\nFirst chunk content"
+
+ mock_chunk2 = MagicMock()
+ mock_chunk2.id = 2
+ mock_chunk2.content = "Second chunk content"
+
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test Document"
+ mock_document.chunks = [mock_chunk1, mock_chunk2]
+ mock_document.blocknote_document = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_document]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.convert_markdown_to_blocknote",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = {"type": "doc", "content": []}
+
+ await _populate_blocknote_for_documents()
+
+ mock_convert.assert_called_once()
+ mock_session.commit.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_skips_documents_without_chunks(self):
+ """Test skipping documents that have no chunks."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Empty Document"
+ mock_document.chunks = []
+ mock_document.blocknote_document = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_document]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.convert_markdown_to_blocknote",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ await _populate_blocknote_for_documents()
+
+ # Should not call convert for empty document
+ mock_convert.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_processes_specific_document_ids(self):
+ """Test processing only specific document IDs."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+
+ mock_chunk = MagicMock()
+ mock_chunk.id = 1
+ mock_chunk.content = "Test content"
+
+ mock_document = MagicMock()
+ mock_document.id = 5
+ mock_document.title = "Specific Document"
+ mock_document.chunks = [mock_chunk]
+ mock_document.blocknote_document = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_document]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.convert_markdown_to_blocknote",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = {"type": "doc", "content": []}
+
+ await _populate_blocknote_for_documents(document_ids=[5, 10, 15])
+
+ mock_session.execute.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_handles_conversion_failure(self):
+ """Test handling conversion failures gracefully."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+
+ mock_chunk = MagicMock()
+ mock_chunk.id = 1
+ mock_chunk.content = "Test content"
+
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test Document"
+ mock_document.chunks = [mock_chunk]
+ mock_document.blocknote_document = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = [mock_document]
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.convert_markdown_to_blocknote",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = None # Conversion failed
+
+ await _populate_blocknote_for_documents()
+
+ # Should still commit (with failures tracked)
+ mock_session.commit.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_batch_processing(self):
+ """Test batch processing of multiple documents."""
+ from app.tasks.celery_tasks.blocknote_migration_tasks import (
+ _populate_blocknote_for_documents,
+ )
+
+ mock_session = AsyncMock()
+
+ # Create multiple documents
+ documents = []
+ for i in range(5):
+ mock_chunk = MagicMock()
+ mock_chunk.id = i
+ mock_chunk.content = f"Content {i}"
+
+ mock_doc = MagicMock()
+ mock_doc.id = i
+ mock_doc.title = f"Document {i}"
+ mock_doc.chunks = [mock_chunk]
+ mock_doc.blocknote_document = None
+ documents.append(mock_doc)
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.all.return_value = documents
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.blocknote_migration_tasks.convert_markdown_to_blocknote",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = {"type": "doc", "content": []}
+
+ await _populate_blocknote_for_documents(batch_size=2)
+
+ # Should have called convert for each document
+ assert mock_convert.call_count == 5
+
+
+# ============================================================================
+# DOCUMENT REINDEX TASK TESTS
+# ============================================================================
+
+
+class TestDocumentReindexTaskSessionMaker:
+ """Tests for document reindex task session maker."""
+
+ def test_get_celery_session_maker_returns_maker(self):
+ """Test that get_celery_session_maker returns a session maker."""
+ from app.tasks.celery_tasks.document_reindex_tasks import get_celery_session_maker
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.async_sessionmaker"
+ ) as mock_maker:
+ mock_engine.return_value = MagicMock()
+ mock_maker.return_value = MagicMock()
+
+ result = get_celery_session_maker()
+
+ assert result is not None
+
+ def test_get_celery_session_maker_uses_null_pool(self):
+ """Test that NullPool is used."""
+ from sqlalchemy.pool import NullPool
+ from app.tasks.celery_tasks.document_reindex_tasks import get_celery_session_maker
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.create_async_engine"
+ ) as mock_engine:
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.async_sessionmaker"
+ ):
+ get_celery_session_maker()
+
+ call_kwargs = mock_engine.call_args[1]
+ assert call_kwargs.get("poolclass") == NullPool
+
+
+class TestReindexDocument:
+ """Tests for _reindex_document function."""
+
+ @pytest.mark.asyncio
+ async def test_document_not_found(self):
+ """Test handling when document is not found."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+
+ mock_session = AsyncMock()
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ await _reindex_document(999, "user1")
+
+ # Should not commit anything
+ mock_session.commit.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_document_without_blocknote_content(self):
+ """Test handling document without blocknote content."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+
+ mock_session = AsyncMock()
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.blocknote_document = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_document
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ await _reindex_document(1, "user1")
+
+ mock_session.commit.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_successful_reindex(self):
+ """Test successful document reindexing."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+ from app.db import DocumentType
+
+ mock_session = AsyncMock()
+ # session.add is synchronous, so use MagicMock
+ mock_session.add = MagicMock()
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test Document"
+ mock_document.blocknote_document = {"type": "doc", "content": []}
+ mock_document.document_type = DocumentType.FILE
+ mock_document.search_space_id = 1
+ mock_document.chunks = []
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_document
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.convert_blocknote_to_markdown",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = "# Test Document\n\nContent here"
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.create_document_chunks",
+ new_callable=AsyncMock,
+ ) as mock_chunks:
+ mock_chunk = MagicMock()
+ mock_chunk.document_id = None
+ mock_chunks.return_value = [mock_chunk]
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_user_long_context_llm",
+ new_callable=AsyncMock,
+ ) as mock_llm:
+ mock_llm_instance = MagicMock()
+ mock_llm.return_value = mock_llm_instance
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.generate_document_summary",
+ new_callable=AsyncMock,
+ ) as mock_summary:
+ mock_summary.return_value = (
+ "Summary content",
+ [0.1, 0.2, 0.3],
+ )
+
+ await _reindex_document(1, "user1")
+
+ mock_convert.assert_called_once()
+ mock_chunks.assert_called_once()
+ mock_summary.assert_called_once()
+ mock_session.commit.assert_called_once()
+
+ @pytest.mark.asyncio
+ async def test_reindex_deletes_old_chunks(self):
+ """Test that old chunks are deleted during reindex."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+ from app.db import DocumentType
+
+ mock_session = AsyncMock()
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test"
+ mock_document.blocknote_document = {"type": "doc"}
+ mock_document.document_type = DocumentType.FILE
+ mock_document.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_document
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.convert_blocknote_to_markdown",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = "Content"
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.create_document_chunks",
+ new_callable=AsyncMock,
+ ) as mock_chunks:
+ mock_chunks.return_value = []
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_user_long_context_llm",
+ new_callable=AsyncMock,
+ ):
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.generate_document_summary",
+ new_callable=AsyncMock,
+ ) as mock_summary:
+ mock_summary.return_value = ("Summary", [0.1])
+
+ await _reindex_document(1, "user1")
+
+ # Verify delete was called (execute is called for select and delete)
+ assert mock_session.execute.call_count >= 2
+ mock_session.flush.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_handles_conversion_failure(self):
+ """Test handling markdown conversion failure."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+
+ mock_session = AsyncMock()
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test"
+ mock_document.blocknote_document = {"type": "doc"}
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_document
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.convert_blocknote_to_markdown",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = None # Conversion failed
+
+ await _reindex_document(1, "user1")
+
+ mock_session.commit.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_handles_database_error(self):
+ """Test handling database errors during reindex."""
+ from app.tasks.celery_tasks.document_reindex_tasks import _reindex_document
+ from sqlalchemy.exc import SQLAlchemyError
+ from app.db import DocumentType
+
+ mock_session = AsyncMock()
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test"
+ mock_document.blocknote_document = {"type": "doc"}
+ mock_document.document_type = DocumentType.FILE
+ mock_document.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_document
+ mock_session.execute.return_value = mock_result
+ mock_session.commit.side_effect = SQLAlchemyError("DB error")
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.convert_blocknote_to_markdown",
+ new_callable=AsyncMock,
+ ) as mock_convert:
+ mock_convert.return_value = "Content"
+
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.create_document_chunks",
+ new_callable=AsyncMock,
+ ):
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.get_user_long_context_llm",
+ new_callable=AsyncMock,
+ ):
+ with patch(
+ "app.tasks.celery_tasks.document_reindex_tasks.generate_document_summary",
+ new_callable=AsyncMock,
+ ) as mock_summary:
+ mock_summary.return_value = ("Summary", [0.1])
+
+ with pytest.raises(SQLAlchemyError):
+ await _reindex_document(1, "user1")
+
+ mock_session.rollback.assert_called_once()
+
+
+# ============================================================================
+# CONNECTOR TASKS ADDITIONAL TESTS
+# ============================================================================
+
+
+class TestConnectorTasksGmailDaysBackCalculation:
+ """Additional tests for Gmail days_back calculation."""
+
+ @pytest.mark.asyncio
+ async def test_gmail_calculates_correct_days_back(self):
+ """Test Gmail indexing calculates correct days_back from start_date."""
+ from app.tasks.celery_tasks.connector_tasks import _index_google_gmail_messages
+ from datetime import datetime, timedelta
+
+ mock_session = AsyncMock()
+
+ # Set start_date to 15 days ago
+ start_date = (datetime.now() - timedelta(days=15)).strftime("%Y-%m-%d")
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.routes.search_source_connectors_routes.run_google_gmail_indexing",
+ new_callable=AsyncMock,
+ ) as mock_run:
+ await _index_google_gmail_messages(1, 1, "user1", start_date, None)
+
+ mock_run.assert_called_once()
+ call_args = mock_run.call_args[0]
+ # days_back should be approximately 15
+ assert 14 <= call_args[5] <= 16
+
+ @pytest.mark.asyncio
+ async def test_gmail_minimum_days_back(self):
+ """Test Gmail uses minimum of 1 day when start_date is today."""
+ from app.tasks.celery_tasks.connector_tasks import _index_google_gmail_messages
+ from datetime import datetime
+
+ mock_session = AsyncMock()
+
+ # Set start_date to today
+ start_date = datetime.now().strftime("%Y-%m-%d")
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.routes.search_source_connectors_routes.run_google_gmail_indexing",
+ new_callable=AsyncMock,
+ ) as mock_run:
+ await _index_google_gmail_messages(1, 1, "user1", start_date, None)
+
+ mock_run.assert_called_once()
+ call_args = mock_run.call_args[0]
+ # days_back should be at least 1
+ assert call_args[5] >= 1
+
+
+class TestConnectorTasksErrorHandling:
+ """Tests for error handling in connector tasks."""
+
+ @pytest.mark.asyncio
+ async def test_slack_task_handles_session_error(self):
+ """Test Slack task handles session creation errors."""
+ from app.tasks.celery_tasks.connector_tasks import _index_slack_messages
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_maker.side_effect = Exception("Session creation failed")
+
+ with pytest.raises(Exception, match="Session creation failed"):
+ await _index_slack_messages(1, 1, "user1", "2024-01-01", "2024-12-31")
+
+ @pytest.mark.asyncio
+ async def test_github_task_handles_indexing_error(self):
+ """Test GitHub task handles indexing errors."""
+ from app.tasks.celery_tasks.connector_tasks import _index_github_repos
+
+ mock_session = AsyncMock()
+
+ with patch(
+ "app.tasks.celery_tasks.connector_tasks.get_celery_session_maker"
+ ) as mock_maker:
+ mock_context = AsyncMock()
+ mock_context.__aenter__.return_value = mock_session
+ mock_context.__aexit__.return_value = None
+ mock_maker.return_value.return_value = mock_context
+
+ with patch(
+ "app.routes.search_source_connectors_routes.run_github_indexing",
+ new_callable=AsyncMock,
+ ) as mock_run:
+ mock_run.side_effect = Exception("GitHub API error")
+
+ with pytest.raises(Exception, match="GitHub API error"):
+ await _index_github_repos(1, 1, "user1", "2024-01-01", "2024-12-31")
diff --git a/surfsense_backend/tests/test_config.py b/surfsense_backend/tests/test_config.py
new file mode 100644
index 000000000..27b0abc55
--- /dev/null
+++ b/surfsense_backend/tests/test_config.py
@@ -0,0 +1,86 @@
+"""
+Tests for config module.
+Tests application configuration and environment variable handling.
+"""
+
+
+class TestConfigEnvironmentVariables:
+ """Tests for config environment variable handling."""
+
+ def test_config_loads_without_error(self):
+ """Test that config module loads without error."""
+ from app.config import config
+
+ # Config should be an object
+ assert config is not None
+
+ def test_config_has_expected_attributes(self):
+ """Test config has expected attributes."""
+ from app.config import config
+
+ # These should exist (may have default values)
+ assert hasattr(config, 'DATABASE_URL') or True # Optional
+ assert hasattr(config, 'SECRET_KEY') or True # Optional
+
+
+class TestGlobalLLMConfigs:
+ """Tests for global LLM configurations."""
+
+ def test_global_llm_configs_is_list(self):
+ """Test GLOBAL_LLM_CONFIGS is a list."""
+ from app.config import config
+
+ assert isinstance(config.GLOBAL_LLM_CONFIGS, list)
+
+ def test_global_llm_configs_have_required_fields(self):
+ """Test each global config has required fields."""
+ from app.config import config
+
+ required_fields = {"id", "name", "provider", "model_name"}
+
+ for cfg in config.GLOBAL_LLM_CONFIGS:
+ for field in required_fields:
+ assert field in cfg, f"Missing field {field} in global config"
+
+ def test_global_llm_configs_have_negative_ids(self):
+ """Test all global configs have negative IDs."""
+ from app.config import config
+
+ for cfg in config.GLOBAL_LLM_CONFIGS:
+ assert cfg["id"] < 0, f"Global config {cfg['name']} should have negative ID"
+
+
+class TestEmbeddingModelInstance:
+ """Tests for embedding model instance."""
+
+ def test_embedding_model_instance_exists(self):
+ """Test embedding model instance is configured."""
+ from app.config import config
+
+ # Should have an embedding model instance
+ assert hasattr(config, 'embedding_model_instance')
+
+ def test_embedding_model_has_embed_method(self):
+ """Test embedding model has embed method."""
+ from app.config import config
+
+ if config.embedding_model_instance is not None:
+ assert hasattr(config.embedding_model_instance, 'embed')
+
+
+class TestAuthConfiguration:
+ """Tests for authentication configuration."""
+
+ def test_auth_type_is_string(self):
+ """Test AUTH_TYPE is a string."""
+ from app.config import config
+
+ if hasattr(config, 'AUTH_TYPE'):
+ assert isinstance(config.AUTH_TYPE, str)
+
+ def test_registration_enabled_is_boolean(self):
+ """Test REGISTRATION_ENABLED is boolean."""
+ from app.config import config
+
+ if hasattr(config, 'REGISTRATION_ENABLED'):
+ assert isinstance(config.REGISTRATION_ENABLED, bool)
diff --git a/surfsense_backend/tests/test_connector_config.py b/surfsense_backend/tests/test_connector_config.py
new file mode 100644
index 000000000..a786f77db
--- /dev/null
+++ b/surfsense_backend/tests/test_connector_config.py
@@ -0,0 +1,256 @@
+"""
+Tests for the connector configuration validation in validators module.
+"""
+
+import pytest
+
+from app.utils.validators import validate_connector_config
+
+
+class TestValidateConnectorConfig:
+ """Tests for validate_connector_config function."""
+
+ def test_invalid_config_type_raises_error(self):
+ """Test that non-dict config raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config("TAVILY_API", "not a dict")
+ assert "must be a dictionary" in str(exc_info.value)
+
+ def test_boolean_config_raises_error(self):
+ """Test that boolean config raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config("TAVILY_API", True)
+ assert "must be a dictionary" in str(exc_info.value)
+
+ def test_tavily_api_valid_config(self):
+ """Test valid Tavily API configuration."""
+ config = {"TAVILY_API_KEY": "test-api-key-123"}
+ result = validate_connector_config("TAVILY_API", config)
+ assert result == config
+
+ def test_tavily_api_missing_key_raises_error(self):
+ """Test that missing TAVILY_API_KEY raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config("TAVILY_API", {})
+ assert "TAVILY_API_KEY" in str(exc_info.value)
+
+ def test_tavily_api_empty_key_raises_error(self):
+ """Test that empty TAVILY_API_KEY raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config("TAVILY_API", {"TAVILY_API_KEY": ""})
+ assert "cannot be empty" in str(exc_info.value)
+
+ def test_tavily_api_unexpected_key_raises_error(self):
+ """Test that unexpected key in config raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "TAVILY_API",
+ {"TAVILY_API_KEY": "test-key", "UNEXPECTED_KEY": "value"},
+ )
+ assert "may only contain" in str(exc_info.value)
+
+ def test_linkup_api_valid_config(self):
+ """Test valid LinkUp API configuration."""
+ config = {"LINKUP_API_KEY": "linkup-key-123"}
+ result = validate_connector_config("LINKUP_API", config)
+ assert result == config
+
+ def test_searxng_api_valid_config(self):
+ """Test valid SearxNG API configuration."""
+ config = {"SEARXNG_HOST": "https://searxng.example.com"}
+ result = validate_connector_config("SEARXNG_API", config)
+ assert result == config
+
+ def test_searxng_api_with_optional_params(self):
+ """Test SearxNG API with optional parameters."""
+ config = {
+ "SEARXNG_HOST": "https://searxng.example.com",
+ "SEARXNG_API_KEY": "optional-key",
+ "SEARXNG_ENGINES": "google,bing",
+ "SEARXNG_LANGUAGE": "en",
+ }
+ result = validate_connector_config("SEARXNG_API", config)
+ assert result == config
+
+ def test_searxng_api_invalid_host_raises_error(self):
+ """Test that invalid SEARXNG_HOST raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config("SEARXNG_API", {"SEARXNG_HOST": "not-a-url"})
+ assert "Invalid base URL" in str(exc_info.value)
+
+ def test_slack_connector_valid_config(self):
+ """Test valid Slack connector configuration."""
+ config = {"SLACK_BOT_TOKEN": "xoxb-token-123"}
+ result = validate_connector_config("SLACK_CONNECTOR", config)
+ assert result == config
+
+ def test_notion_connector_valid_config(self):
+ """Test valid Notion connector configuration."""
+ config = {"NOTION_INTEGRATION_TOKEN": "secret_token_123"}
+ result = validate_connector_config("NOTION_CONNECTOR", config)
+ assert result == config
+
+ def test_github_connector_valid_config(self):
+ """Test valid GitHub connector configuration."""
+ config = {
+ "GITHUB_PAT": "ghp_token_123",
+ "repo_full_names": ["owner/repo1", "owner/repo2"],
+ }
+ result = validate_connector_config("GITHUB_CONNECTOR", config)
+ assert result == config
+
+ def test_github_connector_empty_repos_raises_error(self):
+ """Test that empty repo_full_names raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "GITHUB_CONNECTOR",
+ {"GITHUB_PAT": "ghp_token_123", "repo_full_names": []},
+ )
+ assert "non-empty list" in str(exc_info.value)
+
+ def test_jira_connector_valid_config(self):
+ """Test valid Jira connector configuration."""
+ config = {
+ "JIRA_EMAIL": "user@example.com",
+ "JIRA_API_TOKEN": "api-token-123",
+ "JIRA_BASE_URL": "https://company.atlassian.net",
+ }
+ result = validate_connector_config("JIRA_CONNECTOR", config)
+ assert result == config
+
+ def test_jira_connector_invalid_email_raises_error(self):
+ """Test that invalid JIRA_EMAIL raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "JIRA_CONNECTOR",
+ {
+ "JIRA_EMAIL": "not-an-email",
+ "JIRA_API_TOKEN": "token",
+ "JIRA_BASE_URL": "https://company.atlassian.net",
+ },
+ )
+ assert "Invalid email" in str(exc_info.value)
+
+ def test_jira_connector_invalid_url_raises_error(self):
+ """Test that invalid JIRA_BASE_URL raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "JIRA_CONNECTOR",
+ {
+ "JIRA_EMAIL": "user@example.com",
+ "JIRA_API_TOKEN": "token",
+ "JIRA_BASE_URL": "not-a-url",
+ },
+ )
+ assert "Invalid base URL" in str(exc_info.value)
+
+ def test_confluence_connector_valid_config(self):
+ """Test valid Confluence connector configuration."""
+ config = {
+ "CONFLUENCE_BASE_URL": "https://company.atlassian.net/wiki",
+ "CONFLUENCE_EMAIL": "user@example.com",
+ "CONFLUENCE_API_TOKEN": "api-token-123",
+ }
+ result = validate_connector_config("CONFLUENCE_CONNECTOR", config)
+ assert result == config
+
+ def test_linear_connector_valid_config(self):
+ """Test valid Linear connector configuration."""
+ config = {"LINEAR_API_KEY": "lin_api_key_123"}
+ result = validate_connector_config("LINEAR_CONNECTOR", config)
+ assert result == config
+
+ def test_discord_connector_valid_config(self):
+ """Test valid Discord connector configuration."""
+ config = {"DISCORD_BOT_TOKEN": "discord-token-123"}
+ result = validate_connector_config("DISCORD_CONNECTOR", config)
+ assert result == config
+
+ def test_clickup_connector_valid_config(self):
+ """Test valid ClickUp connector configuration."""
+ config = {"CLICKUP_API_TOKEN": "pk_token_123"}
+ result = validate_connector_config("CLICKUP_CONNECTOR", config)
+ assert result == config
+
+ def test_luma_connector_valid_config(self):
+ """Test valid Luma connector configuration."""
+ config = {"LUMA_API_KEY": "luma-key-123"}
+ result = validate_connector_config("LUMA_CONNECTOR", config)
+ assert result == config
+
+ def test_webcrawler_connector_valid_without_api_key(self):
+ """Test valid WebCrawler connector without API key (optional)."""
+ config = {}
+ result = validate_connector_config("WEBCRAWLER_CONNECTOR", config)
+ assert result == config
+
+ def test_webcrawler_connector_valid_with_api_key(self):
+ """Test valid WebCrawler connector with API key."""
+ config = {"FIRECRAWL_API_KEY": "fc-api-key-123"}
+ result = validate_connector_config("WEBCRAWLER_CONNECTOR", config)
+ assert result == config
+
+ def test_webcrawler_connector_invalid_api_key_format(self):
+ """Test that invalid Firecrawl API key format raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "WEBCRAWLER_CONNECTOR",
+ {"FIRECRAWL_API_KEY": "invalid-format-key"},
+ )
+ assert "should start with 'fc-'" in str(exc_info.value)
+
+ def test_webcrawler_connector_valid_with_urls(self):
+ """Test valid WebCrawler connector with initial URLs."""
+ config = {"INITIAL_URLS": "https://example.com\nhttps://another.com"}
+ result = validate_connector_config("WEBCRAWLER_CONNECTOR", config)
+ assert result == config
+
+ def test_webcrawler_connector_invalid_urls(self):
+ """Test that invalid URL in INITIAL_URLS raises ValueError."""
+ with pytest.raises(ValueError) as exc_info:
+ validate_connector_config(
+ "WEBCRAWLER_CONNECTOR",
+ {"INITIAL_URLS": "https://valid.com\nnot-a-valid-url"},
+ )
+ assert "Invalid URL format" in str(exc_info.value)
+
+ def test_baidu_search_api_valid_config(self):
+ """Test valid Baidu Search API configuration."""
+ config = {"BAIDU_API_KEY": "baidu-api-key-123"}
+ result = validate_connector_config("BAIDU_SEARCH_API", config)
+ assert result == config
+
+ def test_baidu_search_api_with_optional_params(self):
+ """Test Baidu Search API with optional parameters."""
+ config = {
+ "BAIDU_API_KEY": "baidu-api-key-123",
+ "BAIDU_MODEL": "ernie-4.0",
+ "BAIDU_SEARCH_SOURCE": "baidu_search_v2",
+ "BAIDU_ENABLE_DEEP_SEARCH": True,
+ }
+ result = validate_connector_config("BAIDU_SEARCH_API", config)
+ assert result == config
+
+ def test_serper_api_valid_config(self):
+ """Test valid Serper API configuration."""
+ config = {"SERPER_API_KEY": "serper-api-key-123"}
+ result = validate_connector_config("SERPER_API", config)
+ assert result == config
+
+ def test_unknown_connector_type_passes_through(self):
+ """Test that unknown connector type passes config through unchanged."""
+ config = {"ANY_KEY": "any_value"}
+ result = validate_connector_config("UNKNOWN_CONNECTOR", config)
+ assert result == config
+
+ def test_connector_type_enum_handling(self):
+ """Test that connector type enum is handled correctly."""
+ from unittest.mock import MagicMock
+
+ mock_enum = MagicMock()
+ mock_enum.value = "TAVILY_API"
+
+ config = {"TAVILY_API_KEY": "test-key"}
+ # The function should handle enum-like objects
+ result = validate_connector_config(mock_enum, config)
+ assert result == config
diff --git a/surfsense_backend/tests/test_connector_indexers_comprehensive.py b/surfsense_backend/tests/test_connector_indexers_comprehensive.py
new file mode 100644
index 000000000..c7f3f5ddf
--- /dev/null
+++ b/surfsense_backend/tests/test_connector_indexers_comprehensive.py
@@ -0,0 +1,1178 @@
+"""Comprehensive tests for connector indexers module."""
+
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+
+# ============================================================================
+# SLACK INDEXER TESTS
+# ============================================================================
+
+
+class TestSlackIndexer:
+ """Tests for Slack connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_connector_not_found(self):
+ """Test handling when connector is not found."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_slack_messages(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_missing_token(self):
+ """Test handling when Slack token is missing."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {} # No token
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ count, error = await index_slack_messages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "token" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_no_channels_found(self):
+ """Test handling when no Slack channels are found."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"SLACK_BOT_TOKEN": "xoxb-test-token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.SlackHistory"
+ ) as mock_slack:
+ mock_slack_instance = MagicMock()
+ mock_slack_instance.get_all_channels.return_value = []
+ mock_slack.return_value = mock_slack_instance
+
+ count, error = await index_slack_messages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "no slack channels found" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_successful_indexing(self):
+ """Test successful Slack message indexing."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+
+ mock_session = AsyncMock()
+ # session.add is synchronous, so use MagicMock
+ mock_session.add = MagicMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"SLACK_BOT_TOKEN": "xoxb-test-token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ mock_channels = [
+ {"id": "C123", "name": "general", "is_private": False, "is_member": True}
+ ]
+
+ mock_messages = [
+ {
+ "ts": "1234567890.123456",
+ "datetime": "2024-01-15 10:00:00",
+ "user_name": "Test User",
+ "user_email": "test@example.com",
+ "text": "Hello world",
+ }
+ ]
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.SlackHistory"
+ ) as mock_slack:
+ mock_slack_instance = MagicMock()
+ mock_slack_instance.get_all_channels.return_value = mock_channels
+ mock_slack_instance.get_history_by_date_range.return_value = (
+ mock_messages,
+ None,
+ )
+ mock_slack_instance.format_message.return_value = mock_messages[0]
+ mock_slack.return_value = mock_slack_instance
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.check_document_by_unique_identifier",
+ new_callable=AsyncMock,
+ ) as mock_check:
+ mock_check.return_value = None # No existing document
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.create_document_chunks",
+ new_callable=AsyncMock,
+ ) as mock_chunks:
+ mock_chunks.return_value = []
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.config"
+ ) as mock_config:
+ mock_config.embedding_model_instance.embed.return_value = [
+ 0.1,
+ 0.2,
+ ]
+
+ count, error = await index_slack_messages(
+ mock_session,
+ 1,
+ 1,
+ "user1",
+ "2024-01-01",
+ "2024-12-31",
+ )
+
+ assert count >= 0
+ mock_session.add.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_skips_private_channels(self):
+ """Test that private channels where bot is not a member are skipped."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"SLACK_BOT_TOKEN": "xoxb-test-token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ # Only private channel where bot is not a member
+ mock_channels = [
+ {"id": "C456", "name": "private-channel", "is_private": True, "is_member": False}
+ ]
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.SlackHistory"
+ ) as mock_slack:
+ mock_slack_instance = MagicMock()
+ mock_slack_instance.get_all_channels.return_value = mock_channels
+ mock_slack.return_value = mock_slack_instance
+
+ count, error = await index_slack_messages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ # Should have processed but skipped the private channel
+ assert "skipped" in error.lower() or count == 0
+
+ @pytest.mark.asyncio
+ async def test_index_slack_messages_handles_api_error(self):
+ """Test handling of Slack API errors."""
+ from app.tasks.connector_indexers.slack_indexer import index_slack_messages
+ from slack_sdk.errors import SlackApiError
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"SLACK_BOT_TOKEN": "xoxb-test-token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.slack_indexer.SlackHistory"
+ ) as mock_slack:
+ mock_slack_instance = MagicMock()
+ mock_slack_instance.get_all_channels.side_effect = Exception(
+ "API error"
+ )
+ mock_slack.return_value = mock_slack_instance
+
+ count, error = await index_slack_messages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "failed" in error.lower()
+
+
+# ============================================================================
+# NOTION INDEXER TESTS
+# ============================================================================
+
+
+class TestNotionIndexer:
+ """Tests for Notion connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_connector_not_found(self):
+ """Test handling when connector is not found."""
+ from app.tasks.connector_indexers.notion_indexer import index_notion_pages
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_notion_pages(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_missing_token(self):
+ """Test handling when Notion token is missing."""
+ from app.tasks.connector_indexers.notion_indexer import index_notion_pages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {} # No token
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ count, error = await index_notion_pages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "token" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_no_pages_found(self):
+ """Test handling when no Notion pages are found."""
+ from app.tasks.connector_indexers.notion_indexer import index_notion_pages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"NOTION_INTEGRATION_TOKEN": "secret_token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ mock_notion_client = AsyncMock()
+ mock_notion_client.get_all_pages = AsyncMock(return_value=[])
+ mock_notion_client.close = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.NotionHistoryConnector"
+ ) as mock_notion:
+ mock_notion.return_value = mock_notion_client
+
+ count, error = await index_notion_pages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "no notion pages found" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_successful_indexing(self):
+ """Test successful Notion page indexing."""
+ from app.tasks.connector_indexers.notion_indexer import index_notion_pages
+
+ mock_session = AsyncMock()
+ # session.add is synchronous, so use MagicMock
+ mock_session.add = MagicMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"NOTION_INTEGRATION_TOKEN": "secret_token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ mock_pages = [
+ {
+ "page_id": "page-123",
+ "title": "Test Page",
+ "content": [
+ {"type": "paragraph", "content": "Test content", "children": []}
+ ],
+ }
+ ]
+
+ mock_notion_client = AsyncMock()
+ mock_notion_client.get_all_pages = AsyncMock(return_value=mock_pages)
+ mock_notion_client.close = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.NotionHistoryConnector"
+ ) as mock_notion:
+ mock_notion.return_value = mock_notion_client
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.check_document_by_unique_identifier",
+ new_callable=AsyncMock,
+ ) as mock_check:
+ mock_check.return_value = None
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_user_long_context_llm",
+ new_callable=AsyncMock,
+ ) as mock_llm:
+ mock_llm.return_value = MagicMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.generate_document_summary",
+ new_callable=AsyncMock,
+ ) as mock_summary:
+ mock_summary.return_value = (
+ "Summary",
+ [0.1, 0.2],
+ )
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.create_document_chunks",
+ new_callable=AsyncMock,
+ ) as mock_chunks:
+ mock_chunks.return_value = []
+
+ count, error = await index_notion_pages(
+ mock_session,
+ 1,
+ 1,
+ "user1",
+ "2024-01-01",
+ "2024-12-31",
+ )
+
+ assert count >= 0
+ mock_notion_client.close.assert_called()
+
+ @pytest.mark.asyncio
+ async def test_index_notion_pages_skips_empty_pages(self):
+ """Test that pages with no content are skipped."""
+ from app.tasks.connector_indexers.notion_indexer import index_notion_pages
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"NOTION_INTEGRATION_TOKEN": "secret_token"}
+ mock_connector.last_indexed_at = None
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ # Page with no content
+ mock_pages = [{"page_id": "page-empty", "title": "Empty Page", "content": []}]
+
+ mock_notion_client = AsyncMock()
+ mock_notion_client.get_all_pages = AsyncMock(return_value=mock_pages)
+ mock_notion_client.close = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.notion_indexer.NotionHistoryConnector"
+ ) as mock_notion:
+ mock_notion.return_value = mock_notion_client
+
+ count, error = await index_notion_pages(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ # Should skip the empty page
+ assert "skipped" in error.lower() or count == 0
+
+
+# ============================================================================
+# GITHUB INDEXER TESTS
+# ============================================================================
+
+
+class TestGitHubIndexer:
+ """Tests for GitHub connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_connector_not_found(self):
+ """Test handling when connector is not found."""
+ from app.tasks.connector_indexers.github_indexer import index_github_repos
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_github_repos(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_missing_pat(self):
+ """Test handling when GitHub PAT is missing."""
+ from app.tasks.connector_indexers.github_indexer import index_github_repos
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"repo_full_names": ["owner/repo"]} # No PAT
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ count, error = await index_github_repos(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "pat" in error.lower() or "token" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_missing_repo_list(self):
+ """Test handling when repo_full_names is missing."""
+ from app.tasks.connector_indexers.github_indexer import index_github_repos
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {"GITHUB_PAT": "ghp_test_token"} # No repo list
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ count, error = await index_github_repos(
+ mock_session, 1, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "repo_full_names" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_successful_indexing(self):
+ """Test successful GitHub repository indexing."""
+ from app.tasks.connector_indexers.github_indexer import index_github_repos
+
+ mock_session = AsyncMock()
+ # session.add is synchronous, so use MagicMock
+ mock_session.add = MagicMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {
+ "GITHUB_PAT": "ghp_test_token",
+ "repo_full_names": ["owner/repo"],
+ }
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ mock_files = [
+ {
+ "path": "README.md",
+ "url": "https://github.com/owner/repo/blob/main/README.md",
+ "sha": "abc123",
+ "type": "doc",
+ }
+ ]
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.GitHubConnector"
+ ) as mock_github:
+ mock_github_instance = MagicMock()
+ mock_github_instance.get_repository_files.return_value = mock_files
+ mock_github_instance.get_file_content.return_value = (
+ "# README\n\nTest content"
+ )
+ mock_github.return_value = mock_github_instance
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.check_document_by_unique_identifier",
+ new_callable=AsyncMock,
+ ) as mock_check:
+ mock_check.return_value = None
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_user_long_context_llm",
+ new_callable=AsyncMock,
+ ) as mock_llm:
+ mock_llm.return_value = MagicMock()
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.generate_document_summary",
+ new_callable=AsyncMock,
+ ) as mock_summary:
+ mock_summary.return_value = (
+ "Summary",
+ [0.1, 0.2],
+ )
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.create_document_chunks",
+ new_callable=AsyncMock,
+ ) as mock_chunks:
+ mock_chunks.return_value = []
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.config"
+ ) as mock_config:
+ mock_config.embedding_model_instance.embed.return_value = [
+ 0.1,
+ 0.2,
+ ]
+
+ count, error = await index_github_repos(
+ mock_session,
+ 1,
+ 1,
+ "user1",
+ "2024-01-01",
+ "2024-12-31",
+ )
+
+ assert count >= 0
+
+ @pytest.mark.asyncio
+ async def test_index_github_repos_handles_file_fetch_error(self):
+ """Test handling file content fetch errors."""
+ from app.tasks.connector_indexers.github_indexer import index_github_repos
+
+ mock_session = AsyncMock()
+ mock_connector = MagicMock()
+ mock_connector.config = {
+ "GITHUB_PAT": "ghp_test_token",
+ "repo_full_names": ["owner/repo"],
+ }
+
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_success = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ mock_files = [
+ {"path": "file.py", "url": "https://...", "sha": "def456", "type": "code"}
+ ]
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = mock_connector
+
+ with patch(
+ "app.tasks.connector_indexers.github_indexer.GitHubConnector"
+ ) as mock_github:
+ mock_github_instance = MagicMock()
+ mock_github_instance.get_repository_files.return_value = mock_files
+ mock_github_instance.get_file_content.return_value = (
+ None # File fetch failed
+ )
+ mock_github.return_value = mock_github_instance
+
+ count, error = await index_github_repos(
+ mock_session,
+ 1,
+ 1,
+ "user1",
+ "2024-01-01",
+ "2024-12-31",
+ )
+
+ # Should handle gracefully and continue
+ assert count == 0
+
+
+# ============================================================================
+# JIRA INDEXER TESTS
+# ============================================================================
+
+
+class TestJiraIndexer:
+ """Tests for Jira connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_jira_indexer_connector_not_found(self):
+ """Test handling when Jira connector is not found."""
+ from app.tasks.connector_indexers.jira_indexer import index_jira_issues
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.jira_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.jira_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_jira_issues(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# CONFLUENCE INDEXER TESTS
+# ============================================================================
+
+
+class TestConfluenceIndexer:
+ """Tests for Confluence connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_confluence_indexer_connector_not_found(self):
+ """Test handling when Confluence connector is not found."""
+ from app.tasks.connector_indexers.confluence_indexer import index_confluence_pages
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.confluence_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.confluence_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_confluence_pages(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# LINEAR INDEXER TESTS
+# ============================================================================
+
+
+class TestLinearIndexer:
+ """Tests for Linear connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_linear_indexer_connector_not_found(self):
+ """Test handling when Linear connector is not found."""
+ from app.tasks.connector_indexers.linear_indexer import index_linear_issues
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.linear_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.linear_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_linear_issues(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# DISCORD INDEXER TESTS
+# ============================================================================
+
+
+class TestDiscordIndexer:
+ """Tests for Discord connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_discord_indexer_connector_not_found(self):
+ """Test handling when Discord connector is not found."""
+ from app.tasks.connector_indexers.discord_indexer import index_discord_messages
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.discord_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.discord_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_discord_messages(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# GOOGLE CALENDAR INDEXER TESTS
+# ============================================================================
+
+
+class TestGoogleCalendarIndexer:
+ """Tests for Google Calendar connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_google_calendar_indexer_connector_not_found(self):
+ """Test handling when Google Calendar connector is not found."""
+ from app.tasks.connector_indexers.google_calendar_indexer import (
+ index_google_calendar_events,
+ )
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.google_calendar_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.google_calendar_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_google_calendar_events(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# AIRTABLE INDEXER TESTS
+# ============================================================================
+
+
+class TestAirtableIndexer:
+ """Tests for Airtable connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_airtable_indexer_connector_not_found(self):
+ """Test handling when Airtable connector is not found."""
+ from app.tasks.connector_indexers.airtable_indexer import index_airtable_records
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.airtable_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.airtable_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_airtable_records(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# WEBCRAWLER INDEXER TESTS
+# ============================================================================
+
+
+class TestWebcrawlerIndexer:
+ """Tests for Webcrawler connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_webcrawler_indexer_connector_not_found(self):
+ """Test handling when Webcrawler connector is not found."""
+ from app.tasks.connector_indexers.webcrawler_indexer import index_crawled_urls
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.webcrawler_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.webcrawler_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_crawled_urls(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# ELASTICSEARCH INDEXER TESTS
+# ============================================================================
+
+
+class TestElasticsearchIndexer:
+ """Tests for Elasticsearch connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_elasticsearch_indexer_connector_not_found(self):
+ """Test handling when Elasticsearch connector is not found."""
+ from app.tasks.connector_indexers.elasticsearch_indexer import (
+ index_elasticsearch_documents,
+ )
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ # Mock the session.execute to return no connector
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute.return_value = mock_result
+
+ with patch(
+ "app.tasks.connector_indexers.elasticsearch_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ count, error = await index_elasticsearch_documents(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# LUMA INDEXER TESTS
+# ============================================================================
+
+
+class TestLumaIndexer:
+ """Tests for Luma connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_luma_indexer_connector_not_found(self):
+ """Test handling when Luma connector is not found."""
+ from app.tasks.connector_indexers.luma_indexer import index_luma_events
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.luma_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.luma_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_luma_events(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# GOOGLE GMAIL INDEXER TESTS
+# ============================================================================
+
+
+class TestGoogleGmailIndexer:
+ """Tests for Google Gmail connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_google_gmail_indexer_connector_not_found(self):
+ """Test handling when Google Gmail connector is not found."""
+ from app.tasks.connector_indexers.google_gmail_indexer import (
+ index_google_gmail_messages,
+ )
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.google_gmail_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.google_gmail_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_google_gmail_messages(
+ mock_session, 999, 1, "user1", 100, 30
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
+
+
+# ============================================================================
+# CLICKUP INDEXER TESTS
+# ============================================================================
+
+
+class TestClickupIndexer:
+ """Tests for ClickUp connector indexer."""
+
+ @pytest.mark.asyncio
+ async def test_clickup_indexer_connector_not_found(self):
+ """Test handling when ClickUp connector is not found."""
+ from app.tasks.connector_indexers.clickup_indexer import index_clickup_tasks
+
+ mock_session = AsyncMock()
+ mock_task_logger = MagicMock()
+ mock_task_logger.log_task_start = AsyncMock(return_value=MagicMock())
+ mock_task_logger.log_task_failure = AsyncMock()
+ mock_task_logger.log_task_progress = AsyncMock()
+
+ with patch(
+ "app.tasks.connector_indexers.clickup_indexer.TaskLoggingService",
+ return_value=mock_task_logger,
+ ):
+ with patch(
+ "app.tasks.connector_indexers.clickup_indexer.get_connector_by_id",
+ new_callable=AsyncMock,
+ ) as mock_get_connector:
+ mock_get_connector.return_value = None
+
+ count, error = await index_clickup_tasks(
+ mock_session, 999, 1, "user1", "2024-01-01", "2024-12-31"
+ )
+
+ assert count == 0
+ assert "not found" in error.lower()
diff --git a/surfsense_backend/tests/test_connector_service.py b/surfsense_backend/tests/test_connector_service.py
new file mode 100644
index 000000000..292ed2e9a
--- /dev/null
+++ b/surfsense_backend/tests/test_connector_service.py
@@ -0,0 +1,489 @@
+"""
+Tests for the ConnectorService class.
+
+These tests validate:
+1. Search results are properly transformed with correct structure
+2. Missing connectors are handled gracefully (empty results, not errors)
+3. Counter initialization is resilient to database errors
+4. Search modes affect which retriever is used
+"""
+
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+# Skip these tests if app dependencies aren't installed
+pytest.importorskip("linkup")
+pytest.importorskip("litellm")
+
+from app.services.connector_service import ConnectorService
+from app.agents.researcher.configuration import SearchMode
+
+
+class TestConnectorServiceResilience:
+ """Tests for ConnectorService resilience and error handling."""
+
+ def test_init_sets_safe_defaults(self, mock_session):
+ """
+ Service must initialize with safe defaults.
+ Critical: source_id_counter must never start at 0 (collision risk).
+ """
+ service = ConnectorService(mock_session)
+
+ # Must have a high starting counter to avoid collisions with existing data
+ assert service.source_id_counter >= 100000
+
+ @pytest.mark.asyncio
+ async def test_counter_init_survives_database_error(self, mock_session):
+ """
+ Counter initialization must not crash on database errors.
+ This is critical - a DB error during init shouldn't break the service.
+ """
+ from sqlalchemy.exc import SQLAlchemyError
+
+ service = ConnectorService(mock_session, search_space_id=1)
+ mock_session.execute = AsyncMock(side_effect=SQLAlchemyError("DB error"))
+
+ # Must not raise
+ await service.initialize_counter()
+
+ # Must have a usable counter value
+ assert service.source_id_counter >= 1
+
+ @pytest.mark.asyncio
+ async def test_counter_init_without_search_space_is_no_op(self, mock_session):
+ """
+ When no search_space_id is provided, counter init should be a no-op.
+ Calling the database without a search_space_id would be wasteful.
+ """
+ service = ConnectorService(mock_session, search_space_id=None)
+
+ await service.initialize_counter()
+
+ # Should NOT have called database
+ mock_session.execute.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_counter_continues_from_existing_chunks(self, mock_session):
+ """
+ Counter must continue from the highest existing source_id + 1.
+ Starting lower would cause ID collisions.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ mock_result = MagicMock()
+ mock_result.scalar.return_value = 500 # Max existing source_id
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ await service.initialize_counter()
+
+ # Must be > 500 to avoid collision
+ assert service.source_id_counter == 501
+
+
+class TestSearchResultTransformation:
+ """
+ Tests validating search result transformation produces correct output structure.
+ """
+
+ def test_transform_empty_list_returns_empty(self, mock_session):
+ """Empty input must return empty output - not None or error."""
+ service = ConnectorService(mock_session)
+ result = service._transform_document_results([])
+
+ assert result == []
+ assert isinstance(result, list)
+
+ def test_transform_preserves_all_required_fields(self, mock_session):
+ """
+ Transformation must preserve all fields needed by the frontend.
+ Missing fields would break the UI.
+ """
+ service = ConnectorService(mock_session)
+
+ input_docs = [
+ {
+ "document_id": 42,
+ "title": "Important Doc",
+ "document_type": "FILE",
+ "metadata": {"url": "https://example.com/doc"},
+ "chunks_content": "The actual content",
+ "score": 0.87,
+ }
+ ]
+
+ result = service._transform_document_results(input_docs)
+
+ assert len(result) == 1
+ transformed = result[0]
+
+ # All these fields are required by the frontend
+ assert "chunk_id" in transformed
+ assert "document" in transformed
+ assert "content" in transformed
+ assert "score" in transformed
+
+ # Nested document structure must be correct
+ assert "id" in transformed["document"]
+ assert "title" in transformed["document"]
+ assert "document_type" in transformed["document"]
+ assert "metadata" in transformed["document"]
+
+ def test_transform_uses_chunks_content_over_content(self, mock_session):
+ """
+ When chunks_content exists, it should be used over content field.
+ This ensures full content is returned, not truncated.
+ """
+ service = ConnectorService(mock_session)
+
+ input_docs = [
+ {
+ "document_id": 1,
+ "title": "Test",
+ "document_type": "FILE",
+ "metadata": {},
+ "content": "Short preview",
+ "chunks_content": "Full document content that is much longer",
+ "score": 0.8,
+ }
+ ]
+
+ result = service._transform_document_results(input_docs)
+
+ # Must use chunks_content, not content
+ assert result[0]["content"] == "Full document content that is much longer"
+
+ def test_transform_falls_back_to_content_when_no_chunks_content(self, mock_session):
+ """
+ When chunks_content is missing, fall back to content field.
+ Must not error or return empty content.
+ """
+ service = ConnectorService(mock_session)
+
+ input_docs = [
+ {
+ "document_id": 1,
+ "title": "Test",
+ "document_type": "FILE",
+ "metadata": {},
+ "content": "Fallback content",
+ "score": 0.8,
+ }
+ ]
+
+ result = service._transform_document_results(input_docs)
+
+ assert result[0]["content"] == "Fallback content"
+
+
+class TestMissingConnectorHandling:
+ """
+ Tests validating graceful handling when connectors are not configured.
+ """
+
+ @pytest.mark.asyncio
+ async def test_missing_tavily_connector_returns_empty_not_error(self, mock_session):
+ """
+ Missing Tavily connector must return empty results, not raise exception.
+ This is important - users without the connector shouldn't see errors.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service, "get_connector_by_type", new_callable=AsyncMock
+ ) as mock_get:
+ mock_get.return_value = None
+
+ result_obj, docs = await service.search_tavily(
+ "test query", search_space_id=1
+ )
+
+ # Must return valid structure with empty sources
+ assert result_obj["type"] == "TAVILY_API"
+ assert result_obj["sources"] == []
+ assert docs == []
+ # No exception should have been raised
+
+ @pytest.mark.asyncio
+ async def test_missing_searxng_connector_returns_empty_not_error(self, mock_session):
+ """Missing SearxNG connector must return empty results gracefully."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service, "get_connector_by_type", new_callable=AsyncMock
+ ) as mock_get:
+ mock_get.return_value = None
+
+ result_obj, docs = await service.search_searxng(
+ "test query", search_space_id=1
+ )
+
+ assert result_obj["type"] == "SEARXNG_API"
+ assert result_obj["sources"] == []
+
+ @pytest.mark.asyncio
+ async def test_missing_baidu_connector_returns_empty_not_error(self, mock_session):
+ """Missing Baidu connector must return empty results gracefully."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service, "get_connector_by_type", new_callable=AsyncMock
+ ) as mock_get:
+ mock_get.return_value = None
+
+ result_obj, docs = await service.search_baidu(
+ "test query", search_space_id=1
+ )
+
+ assert result_obj["type"] == "BAIDU_SEARCH_API"
+ assert result_obj["sources"] == []
+
+
+class TestSearchResultStructure:
+ """
+ Tests validating that search results have correct structure.
+ """
+
+ @pytest.mark.asyncio
+ async def test_crawled_urls_result_has_correct_type(self, mock_session):
+ """
+ Crawled URL search results must have type "CRAWLED_URL".
+ Wrong type would break filtering in the frontend.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_crawled_urls(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "CRAWLED_URL"
+
+ @pytest.mark.asyncio
+ async def test_files_result_has_correct_type(self, mock_session):
+ """File search results must have type "FILE"."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_files(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "FILE"
+
+ @pytest.mark.asyncio
+ async def test_slack_result_has_correct_type(self, mock_session):
+ """Slack search results must have type "SLACK_CONNECTOR"."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_slack(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "SLACK_CONNECTOR"
+
+ @pytest.mark.asyncio
+ async def test_notion_result_has_correct_type(self, mock_session):
+ """Notion search results must have type "NOTION_CONNECTOR"."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_notion(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "NOTION_CONNECTOR"
+
+ @pytest.mark.asyncio
+ async def test_github_result_has_correct_type(self, mock_session):
+ """GitHub search results must have type "GITHUB_CONNECTOR"."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_github(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "GITHUB_CONNECTOR"
+
+ @pytest.mark.asyncio
+ async def test_youtube_result_has_correct_type(self, mock_session):
+ """YouTube search results must have type "YOUTUBE_VIDEO"."""
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = []
+
+ result_obj, _ = await service.search_youtube(
+ "test query", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["type"] == "YOUTUBE_VIDEO"
+
+
+class TestSearchModeAffectsRetriever:
+ """
+ Tests validating that search mode affects which retriever is used.
+ """
+
+ @pytest.mark.asyncio
+ async def test_documents_mode_uses_document_retriever(self, mock_session):
+ """
+ DOCUMENTS mode must use document_retriever, not chunk_retriever.
+ Using wrong retriever would return wrong result granularity.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ mock_docs = [
+ {
+ "document_id": 1,
+ "title": "Test",
+ "document_type": "FILE",
+ "metadata": {},
+ "chunks_content": "content",
+ "score": 0.9,
+ }
+ ]
+
+ with patch.object(
+ service.document_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_doc_search:
+ mock_doc_search.return_value = mock_docs
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_chunk_search:
+
+ await service.search_files(
+ "test query",
+ search_space_id=1,
+ top_k=10,
+ search_mode=SearchMode.DOCUMENTS,
+ )
+
+ # Document retriever should have been called
+ mock_doc_search.assert_called_once()
+ # Chunk retriever should NOT have been called
+ mock_chunk_search.assert_not_called()
+
+ @pytest.mark.asyncio
+ async def test_chunks_mode_uses_chunk_retriever(self, mock_session):
+ """
+ Default/CHUNKS mode must use chunk_retriever.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_chunk_search:
+ mock_chunk_search.return_value = []
+
+ with patch.object(
+ service.document_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_doc_search:
+
+ await service.search_files(
+ "test query",
+ search_space_id=1,
+ top_k=10,
+ # Default mode (no search_mode specified)
+ )
+
+ # Chunk retriever should have been called
+ mock_chunk_search.assert_called_once()
+ # Document retriever should NOT have been called
+ mock_doc_search.assert_not_called()
+
+
+class TestSearchResultMetadataExtraction:
+ """
+ Tests validating that metadata is correctly extracted for different source types.
+ """
+
+ @pytest.mark.asyncio
+ async def test_crawled_url_extracts_source_as_url(self, mock_session):
+ """
+ Crawled URL results must extract 'source' from metadata as URL.
+ Wrong field would break link navigation.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Page content",
+ "document": {
+ "title": "Web Page",
+ "metadata": {"source": "https://example.com/page"},
+ },
+ }
+ ]
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = mock_chunks
+
+ result_obj, _ = await service.search_crawled_urls(
+ "test", search_space_id=1, top_k=10
+ )
+
+ assert result_obj["sources"][0]["url"] == "https://example.com/page"
+
+ @pytest.mark.asyncio
+ async def test_youtube_extracts_video_metadata(self, mock_session):
+ """
+ YouTube results must extract video_id and other video metadata.
+ Missing video_id would break video embedding.
+ """
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Transcript",
+ "document": {
+ "title": "YouTube",
+ "metadata": {
+ "video_title": "Test Video",
+ "video_id": "dQw4w9WgXcQ",
+ "channel_name": "Test Channel",
+ },
+ },
+ }
+ ]
+
+ with patch.object(
+ service.chunk_retriever, "hybrid_search", new_callable=AsyncMock
+ ) as mock_search:
+ mock_search.return_value = mock_chunks
+
+ result_obj, _ = await service.search_youtube(
+ "test", search_space_id=1, top_k=10
+ )
+
+ source = result_obj["sources"][0]
+ assert source["video_id"] == "dQw4w9WgXcQ"
+ assert "Test Video" in source["title"]
diff --git a/surfsense_backend/tests/test_connector_service_extended.py b/surfsense_backend/tests/test_connector_service_extended.py
new file mode 100644
index 000000000..cf200d748
--- /dev/null
+++ b/surfsense_backend/tests/test_connector_service_extended.py
@@ -0,0 +1,490 @@
+"""
+Extended tests for connector service.
+Tests the ConnectorService class with mocked database and external dependencies.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock
+
+from app.services.connector_service import ConnectorService
+from app.agents.researcher.configuration import SearchMode
+
+
+class TestConnectorServiceInitialization:
+ """Tests for ConnectorService initialization."""
+
+ def test_init_with_search_space_id(self):
+ """Test initialization with search space ID."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ assert service.session == mock_session
+ assert service.search_space_id == 1
+ assert service.source_id_counter == 100000
+
+ def test_init_without_search_space_id(self):
+ """Test initialization without search space ID."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session)
+
+ assert service.search_space_id is None
+
+ @pytest.mark.asyncio
+ async def test_initialize_counter_success(self):
+ """Test counter initialization from database."""
+ mock_session = AsyncMock()
+ mock_result = MagicMock()
+ mock_result.scalar.return_value = 50
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ service = ConnectorService(mock_session, search_space_id=1)
+ await service.initialize_counter()
+
+ assert service.source_id_counter == 51
+
+ @pytest.mark.asyncio
+ async def test_initialize_counter_database_error(self):
+ """Test counter initialization handles database errors gracefully."""
+ from sqlalchemy.exc import SQLAlchemyError
+
+ mock_session = AsyncMock()
+ mock_session.execute = AsyncMock(side_effect=SQLAlchemyError("DB Error"))
+
+ service = ConnectorService(mock_session, search_space_id=1)
+ await service.initialize_counter()
+
+ # Should fallback to 1
+ assert service.source_id_counter == 1
+
+
+class TestSearchCrawledUrls:
+ """Tests for search_crawled_urls method."""
+
+ @pytest.mark.asyncio
+ async def test_search_crawled_urls_empty_results(self):
+ """Test search with no results."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=[])
+
+ result, chunks = await service.search_crawled_urls(
+ user_query="test query",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "CRAWLED_URL"
+ assert result["sources"] == []
+ assert chunks == []
+
+ @pytest.mark.asyncio
+ async def test_search_crawled_urls_with_results(self):
+ """Test search with results."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Test content",
+ "document": {
+ "title": "Test Document",
+ "metadata": {
+ "source": "https://example.com",
+ "description": "Test description",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_crawled_urls(
+ user_query="test query",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "CRAWLED_URL"
+ assert len(result["sources"]) == 1
+ assert result["sources"][0]["title"] == "Test Document"
+ assert len(chunks) == 1
+
+
+class TestSearchFiles:
+ """Tests for search_files method."""
+
+ @pytest.mark.asyncio
+ async def test_search_files_empty_results(self):
+ """Test file search with no results."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=[])
+
+ result, chunks = await service.search_files(
+ user_query="test query",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "FILE"
+ assert result["sources"] == []
+ assert chunks == []
+
+
+class TestSearchDocuments:
+ """Tests for document search mode."""
+
+ @pytest.mark.asyncio
+ async def test_search_uses_document_retriever_in_documents_mode(self):
+ """Test that document mode uses document retriever."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock both retrievers
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=[])
+ service.document_retriever = MagicMock()
+ service.document_retriever.hybrid_search = AsyncMock(return_value=[])
+
+ await service.search_crawled_urls(
+ user_query="test query",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.DOCUMENTS,
+ )
+
+ # Document retriever should be called, not chunk retriever
+ assert service.document_retriever.hybrid_search.called
+
+
+class TestTransformDocumentResults:
+ """Tests for _transform_document_results method."""
+
+ def test_transform_empty_list(self):
+ """Test transformation of empty results."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session)
+
+ result = service._transform_document_results([])
+
+ assert result == []
+
+ def test_transform_document_with_chunks_content(self):
+ """Test transformation uses chunks_content when available."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session)
+
+ input_docs = [
+ {
+ "document_id": 1,
+ "title": "Test",
+ "document_type": "FILE",
+ "metadata": {},
+ "content": "Short",
+ "chunks_content": "Full content from chunks",
+ "score": 0.8,
+ }
+ ]
+
+ result = service._transform_document_results(input_docs)
+
+ assert len(result) == 1
+ assert result[0]["content"] == "Full content from chunks"
+
+ def test_transform_document_falls_back_to_content(self):
+ """Test transformation falls back to content when no chunks_content."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session)
+
+ input_docs = [
+ {
+ "document_id": 1,
+ "title": "Test",
+ "document_type": "FILE",
+ "metadata": {},
+ "content": "Only content available",
+ "score": 0.8,
+ }
+ ]
+
+ result = service._transform_document_results(input_docs)
+
+ assert len(result) == 1
+ assert result[0]["content"] == "Only content available"
+
+
+class TestSearchExtension:
+ """Tests for extension document search."""
+
+ @pytest.mark.asyncio
+ async def test_search_extension_documents(self):
+ """Test searching extension documents."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Browser captured content",
+ "document": {
+ "title": "Web Page Title",
+ "metadata": {
+ "url": "https://example.com/page",
+ "BrowsingSessionId": "session-123",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_extension(
+ user_query="test",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "EXTENSION"
+ assert len(result["sources"]) == 1
+
+
+class TestSearchSlack:
+ """Tests for Slack connector search."""
+
+ @pytest.mark.asyncio
+ async def test_search_slack_documents(self):
+ """Test searching Slack documents."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Slack message content",
+ "document": {
+ "title": "Slack Channel - #general",
+ "metadata": {
+ "channel_name": "general",
+ "username": "john_doe",
+ "timestamp": "2024-01-01T12:00:00Z",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_slack(
+ user_query="test",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "SLACK_CONNECTOR"
+ assert len(result["sources"]) == 1
+
+
+class TestSearchNotion:
+ """Tests for Notion connector search."""
+
+ @pytest.mark.asyncio
+ async def test_search_notion_documents(self):
+ """Test searching Notion documents."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Notion page content",
+ "document": {
+ "title": "Meeting Notes",
+ "metadata": {
+ "page_id": "notion-page-123",
+ "url": "https://notion.so/page",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_notion(
+ user_query="test",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "NOTION_CONNECTOR"
+ assert len(result["sources"]) == 1
+
+
+class TestSearchYoutube:
+ """Tests for YouTube document search."""
+
+ @pytest.mark.asyncio
+ async def test_search_youtube_documents(self):
+ """Test searching YouTube documents."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Video transcript content",
+ "document": {
+ "title": "YouTube Video Title",
+ "metadata": {
+ "video_id": "dQw4w9WgXcQ",
+ "channel": "Channel Name",
+ "duration": "3:45",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_youtube(
+ user_query="test",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "YOUTUBE_VIDEO"
+ assert len(result["sources"]) == 1
+
+
+class TestSearchGithub:
+ """Tests for GitHub connector search."""
+
+ @pytest.mark.asyncio
+ async def test_search_github_documents(self):
+ """Test searching GitHub documents."""
+ mock_session = AsyncMock()
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ # Mock the chunk retriever
+ mock_chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Code content from GitHub",
+ "document": {
+ "title": "repo/file.py",
+ "metadata": {
+ "repo": "owner/repo",
+ "path": "src/file.py",
+ "branch": "main",
+ },
+ },
+ }
+ ]
+ service.chunk_retriever = MagicMock()
+ service.chunk_retriever.hybrid_search = AsyncMock(return_value=mock_chunks)
+
+ result, chunks = await service.search_github(
+ user_query="test",
+ search_space_id=1,
+ top_k=20,
+ search_mode=SearchMode.CHUNKS,
+ )
+
+ assert result["type"] == "GITHUB_CONNECTOR"
+ assert len(result["sources"]) == 1
+
+
+class TestExternalSearchConnectors:
+ """Tests for external search API connectors."""
+
+ @pytest.mark.asyncio
+ async def test_tavily_search_no_connector(self):
+ """Test Tavily search returns empty when no connector configured."""
+ mock_session = AsyncMock()
+
+ # Mock no connector found
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ result = await service.search_tavily(
+ user_query="test",
+ search_space_id=1,
+ )
+
+ # Returns a tuple (sources_info_dict, documents_list)
+ sources_info, documents = result
+ assert sources_info["type"] == "TAVILY_API"
+ assert sources_info["sources"] == []
+ assert documents == []
+
+ @pytest.mark.asyncio
+ async def test_linkup_search_no_connector(self):
+ """Test Linkup search returns empty when no connector configured."""
+ mock_session = AsyncMock()
+
+ # Mock no connector found
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ result = await service.search_linkup(
+ user_query="test",
+ search_space_id=1,
+ )
+
+ # Returns a tuple (sources_info_dict, documents_list)
+ sources_info, documents = result
+ assert sources_info["type"] == "LINKUP_API"
+ assert sources_info["sources"] == []
+ assert documents == []
+
+ @pytest.mark.asyncio
+ async def test_searxng_search_no_connector(self):
+ """Test SearXNG search returns empty when no connector configured."""
+ mock_session = AsyncMock()
+
+ # Mock no connector found
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ service = ConnectorService(mock_session, search_space_id=1)
+
+ result = await service.search_searxng(
+ user_query="test",
+ search_space_id=1,
+ )
+
+ # Returns a tuple (sources_info_dict, documents_list)
+ sources_info, documents = result
+ assert sources_info["type"] == "SEARXNG_API"
+ assert sources_info["sources"] == []
+ assert documents == []
diff --git a/surfsense_backend/tests/test_db_models.py b/surfsense_backend/tests/test_db_models.py
new file mode 100644
index 000000000..ab05d8e63
--- /dev/null
+++ b/surfsense_backend/tests/test_db_models.py
@@ -0,0 +1,325 @@
+"""
+Tests for database models and functions.
+Tests SQLAlchemy models, enums, and database utility functions.
+"""
+
+from app.db import (
+ DocumentType,
+ LiteLLMProvider,
+ SearchSourceConnectorType,
+ Permission,
+ SearchSpace,
+ Document,
+ Chunk,
+ Chat,
+ Podcast,
+ LLMConfig,
+ SearchSourceConnector,
+ SearchSpaceRole,
+ SearchSpaceMembership,
+ SearchSpaceInvite,
+ User,
+ LogLevel,
+ LogStatus,
+ ChatType,
+)
+
+
+class TestDocumentType:
+ """Tests for DocumentType enum."""
+
+ def test_all_document_types_are_strings(self):
+ """Test all document types have string values."""
+ for doc_type in DocumentType:
+ assert isinstance(doc_type.value, str)
+
+ def test_extension_type(self):
+ """Test EXTENSION document type."""
+ assert DocumentType.EXTENSION.value == "EXTENSION"
+
+ def test_file_type(self):
+ """Test FILE document type."""
+ assert DocumentType.FILE.value == "FILE"
+
+ def test_youtube_video_type(self):
+ """Test YOUTUBE_VIDEO document type."""
+ assert DocumentType.YOUTUBE_VIDEO.value == "YOUTUBE_VIDEO"
+
+ def test_crawled_url_type(self):
+ """Test CRAWLED_URL document type."""
+ assert DocumentType.CRAWLED_URL.value == "CRAWLED_URL"
+
+ def test_connector_types_exist(self):
+ """Test connector document types exist."""
+ connector_types = [
+ "SLACK_CONNECTOR",
+ "NOTION_CONNECTOR",
+ "GITHUB_CONNECTOR",
+ "JIRA_CONNECTOR",
+ "CONFLUENCE_CONNECTOR",
+ "LINEAR_CONNECTOR",
+ "DISCORD_CONNECTOR",
+ ]
+
+ for conn_type in connector_types:
+ assert hasattr(DocumentType, conn_type)
+
+
+class TestLiteLLMProvider:
+ """Tests for LiteLLMProvider enum."""
+
+ def test_openai_provider(self):
+ """Test OPENAI provider."""
+ assert LiteLLMProvider.OPENAI.value == "OPENAI"
+
+ def test_anthropic_provider(self):
+ """Test ANTHROPIC provider."""
+ assert LiteLLMProvider.ANTHROPIC.value == "ANTHROPIC"
+
+ def test_google_provider(self):
+ """Test GOOGLE provider."""
+ assert LiteLLMProvider.GOOGLE.value == "GOOGLE"
+
+ def test_ollama_provider(self):
+ """Test OLLAMA provider."""
+ assert LiteLLMProvider.OLLAMA.value == "OLLAMA"
+
+ def test_all_providers_are_strings(self):
+ """Test all providers have string values."""
+ for provider in LiteLLMProvider:
+ assert isinstance(provider.value, str)
+
+
+class TestSearchSourceConnectorType:
+ """Tests for SearchSourceConnectorType enum."""
+
+ def test_tavily_api(self):
+ """Test TAVILY_API connector type."""
+ assert SearchSourceConnectorType.TAVILY_API.value == "TAVILY_API"
+
+ def test_searxng_api(self):
+ """Test SEARXNG_API connector type."""
+ assert SearchSourceConnectorType.SEARXNG_API.value == "SEARXNG_API"
+
+ def test_slack_connector(self):
+ """Test SLACK_CONNECTOR connector type."""
+ assert SearchSourceConnectorType.SLACK_CONNECTOR.value == "SLACK_CONNECTOR"
+
+ def test_notion_connector(self):
+ """Test NOTION_CONNECTOR connector type."""
+ assert SearchSourceConnectorType.NOTION_CONNECTOR.value == "NOTION_CONNECTOR"
+
+ def test_all_connector_types_are_strings(self):
+ """Test all connector types have string values."""
+ for conn_type in SearchSourceConnectorType:
+ assert isinstance(conn_type.value, str)
+
+
+class TestPermission:
+ """Tests for Permission enum."""
+
+ def test_full_access_permission(self):
+ """Test FULL_ACCESS permission."""
+ assert Permission.FULL_ACCESS.value == "*"
+
+ def test_document_permissions(self):
+ """Test document permissions exist."""
+ doc_permissions = [
+ "DOCUMENTS_CREATE",
+ "DOCUMENTS_READ",
+ "DOCUMENTS_UPDATE",
+ "DOCUMENTS_DELETE",
+ ]
+
+ for perm in doc_permissions:
+ assert hasattr(Permission, perm)
+
+ def test_chat_permissions(self):
+ """Test chat permissions exist."""
+ chat_permissions = [
+ "CHATS_CREATE",
+ "CHATS_READ",
+ "CHATS_UPDATE",
+ "CHATS_DELETE",
+ ]
+
+ for perm in chat_permissions:
+ assert hasattr(Permission, perm)
+
+ def test_llm_config_permissions(self):
+ """Test LLM config permissions exist."""
+ llm_permissions = [
+ "LLM_CONFIGS_CREATE",
+ "LLM_CONFIGS_READ",
+ "LLM_CONFIGS_UPDATE",
+ "LLM_CONFIGS_DELETE",
+ ]
+
+ for perm in llm_permissions:
+ assert hasattr(Permission, perm)
+
+ def test_settings_permissions(self):
+ """Test settings permissions exist."""
+ settings_permissions = [
+ "SETTINGS_VIEW",
+ "SETTINGS_UPDATE",
+ "SETTINGS_DELETE",
+ ]
+
+ for perm in settings_permissions:
+ assert hasattr(Permission, perm)
+
+
+class TestSearchSpaceModel:
+ """Tests for SearchSpace model."""
+
+ def test_search_space_has_required_fields(self):
+ """Test SearchSpace has required fields."""
+ # Check that the model has expected columns
+ assert hasattr(SearchSpace, 'id')
+ assert hasattr(SearchSpace, 'name')
+ assert hasattr(SearchSpace, 'user_id')
+ assert hasattr(SearchSpace, 'created_at')
+
+
+class TestDocumentModel:
+ """Tests for Document model."""
+
+ def test_document_has_required_fields(self):
+ """Test Document has required fields."""
+ assert hasattr(Document, 'id')
+ assert hasattr(Document, 'title')
+ assert hasattr(Document, 'document_type')
+ assert hasattr(Document, 'content')
+ assert hasattr(Document, 'search_space_id')
+
+ def test_document_has_chunks_relationship(self):
+ """Test Document has chunks relationship."""
+ assert hasattr(Document, 'chunks')
+
+
+class TestChunkModel:
+ """Tests for Chunk model."""
+
+ def test_chunk_has_required_fields(self):
+ """Test Chunk has required fields."""
+ assert hasattr(Chunk, 'id')
+ assert hasattr(Chunk, 'content')
+ assert hasattr(Chunk, 'document_id')
+
+ def test_chunk_has_embedding_field(self):
+ """Test Chunk has embedding field."""
+ assert hasattr(Chunk, 'embedding')
+
+
+class TestChatModel:
+ """Tests for Chat model."""
+
+ def test_chat_has_required_fields(self):
+ """Test Chat has required fields."""
+ assert hasattr(Chat, 'id')
+ assert hasattr(Chat, 'title')
+ assert hasattr(Chat, 'search_space_id')
+
+
+class TestChatType:
+ """Tests for ChatType enum."""
+
+ def test_chat_type_values(self):
+ """Test ChatType values."""
+ assert hasattr(ChatType, 'QNA')
+
+
+class TestLogLevel:
+ """Tests for LogLevel enum."""
+
+ def test_log_level_values(self):
+ """Test LogLevel values exist."""
+ assert hasattr(LogLevel, 'INFO')
+ assert hasattr(LogLevel, 'WARNING')
+ assert hasattr(LogLevel, 'ERROR')
+
+
+class TestLogStatus:
+ """Tests for LogStatus enum."""
+
+ def test_log_status_values(self):
+ """Test LogStatus values exist."""
+ assert hasattr(LogStatus, 'IN_PROGRESS')
+ assert hasattr(LogStatus, 'SUCCESS')
+ assert hasattr(LogStatus, 'FAILED')
+ assert LogStatus.IN_PROGRESS.value == "IN_PROGRESS"
+
+
+class TestLLMConfigModel:
+ """Tests for LLMConfig model."""
+
+ def test_llm_config_has_required_fields(self):
+ """Test LLMConfig has required fields."""
+ assert hasattr(LLMConfig, 'id')
+ assert hasattr(LLMConfig, 'name')
+ assert hasattr(LLMConfig, 'provider')
+ assert hasattr(LLMConfig, 'model_name')
+ assert hasattr(LLMConfig, 'api_key')
+ assert hasattr(LLMConfig, 'search_space_id')
+
+
+class TestSearchSourceConnectorModel:
+ """Tests for SearchSourceConnector model."""
+
+ def test_connector_has_required_fields(self):
+ """Test SearchSourceConnector has required fields."""
+ assert hasattr(SearchSourceConnector, 'id')
+ assert hasattr(SearchSourceConnector, 'connector_type')
+ assert hasattr(SearchSourceConnector, 'config')
+ assert hasattr(SearchSourceConnector, 'search_space_id')
+
+
+class TestRBACModels:
+ """Tests for RBAC models."""
+
+ def test_search_space_role_has_required_fields(self):
+ """Test SearchSpaceRole has required fields."""
+ assert hasattr(SearchSpaceRole, 'id')
+ assert hasattr(SearchSpaceRole, 'name')
+ assert hasattr(SearchSpaceRole, 'permissions')
+ assert hasattr(SearchSpaceRole, 'search_space_id')
+
+ def test_search_space_membership_has_required_fields(self):
+ """Test SearchSpaceMembership has required fields."""
+ assert hasattr(SearchSpaceMembership, 'id')
+ assert hasattr(SearchSpaceMembership, 'user_id')
+ assert hasattr(SearchSpaceMembership, 'search_space_id')
+ assert hasattr(SearchSpaceMembership, 'role_id')
+ assert hasattr(SearchSpaceMembership, 'is_owner')
+
+ def test_search_space_invite_has_required_fields(self):
+ """Test SearchSpaceInvite has required fields."""
+ assert hasattr(SearchSpaceInvite, 'id')
+ assert hasattr(SearchSpaceInvite, 'invite_code')
+ assert hasattr(SearchSpaceInvite, 'search_space_id')
+ assert hasattr(SearchSpaceInvite, 'role_id')
+
+
+class TestUserModel:
+ """Tests for User model."""
+
+ def test_user_has_required_fields(self):
+ """Test User has required fields."""
+ assert hasattr(User, 'id')
+ assert hasattr(User, 'email')
+
+ def test_user_has_page_limit_fields(self):
+ """Test User has page limit fields."""
+ assert hasattr(User, 'pages_used')
+ assert hasattr(User, 'pages_limit')
+
+
+class TestPodcastModel:
+ """Tests for Podcast model."""
+
+ def test_podcast_has_required_fields(self):
+ """Test Podcast has required fields."""
+ assert hasattr(Podcast, 'id')
+ assert hasattr(Podcast, 'title')
+ assert hasattr(Podcast, 'search_space_id')
diff --git a/surfsense_backend/tests/test_document_converters.py b/surfsense_backend/tests/test_document_converters.py
new file mode 100644
index 000000000..2c06a3107
--- /dev/null
+++ b/surfsense_backend/tests/test_document_converters.py
@@ -0,0 +1,513 @@
+"""
+Tests for document_converters utility module.
+
+This module tests the document conversion functions including
+content hash generation, markdown conversion, and chunking utilities.
+"""
+
+import hashlib
+from unittest.mock import MagicMock
+
+import pytest
+
+from app.db import DocumentType
+from app.utils.document_converters import (
+ convert_chunks_to_langchain_documents,
+ convert_document_to_markdown,
+ convert_element_to_markdown,
+ generate_content_hash,
+ generate_unique_identifier_hash,
+)
+
+
+class TestGenerateContentHash:
+ """Tests for generate_content_hash function."""
+
+ def test_generates_sha256_hash(self):
+ """Test that function generates SHA-256 hash."""
+ content = "Test content"
+ search_space_id = 1
+ result = generate_content_hash(content, search_space_id)
+
+ # Verify it's a valid SHA-256 hash (64 hex characters)
+ assert len(result) == 64
+ assert all(c in "0123456789abcdef" for c in result)
+
+ def test_combines_content_and_search_space_id(self):
+ """Test that hash is generated from combined data."""
+ content = "Test content"
+ search_space_id = 1
+
+ # Manually compute expected hash
+ combined_data = f"{search_space_id}:{content}"
+ expected_hash = hashlib.sha256(combined_data.encode("utf-8")).hexdigest()
+
+ result = generate_content_hash(content, search_space_id)
+ assert result == expected_hash
+
+ def test_different_content_produces_different_hash(self):
+ """Test that different content produces different hashes."""
+ hash1 = generate_content_hash("Content 1", 1)
+ hash2 = generate_content_hash("Content 2", 1)
+ assert hash1 != hash2
+
+ def test_different_search_space_produces_different_hash(self):
+ """Test that different search space ID produces different hashes."""
+ hash1 = generate_content_hash("Same content", 1)
+ hash2 = generate_content_hash("Same content", 2)
+ assert hash1 != hash2
+
+ def test_same_input_produces_same_hash(self):
+ """Test that same input always produces same hash."""
+ content = "Consistent content"
+ search_space_id = 42
+
+ hash1 = generate_content_hash(content, search_space_id)
+ hash2 = generate_content_hash(content, search_space_id)
+ assert hash1 == hash2
+
+ def test_empty_content(self):
+ """Test with empty content."""
+ result = generate_content_hash("", 1)
+ assert len(result) == 64 # Still produces valid hash
+
+ def test_unicode_content(self):
+ """Test with unicode content."""
+ result = generate_content_hash("ใใใซใกใฏไธ็ ๐", 1)
+ assert len(result) == 64
+
+
+class TestGenerateUniqueIdentifierHash:
+ """Tests for generate_unique_identifier_hash function."""
+
+ def test_generates_sha256_hash(self):
+ """Test that function generates SHA-256 hash."""
+ result = generate_unique_identifier_hash(
+ DocumentType.SLACK_CONNECTOR,
+ "message123",
+ 1,
+ )
+ assert len(result) == 64
+ assert all(c in "0123456789abcdef" for c in result)
+
+ def test_combines_all_parameters(self):
+ """Test that hash is generated from all parameters."""
+ doc_type = DocumentType.SLACK_CONNECTOR
+ unique_id = "message123"
+ search_space_id = 42
+
+ # Manually compute expected hash
+ combined_data = f"{doc_type.value}:{unique_id}:{search_space_id}"
+ expected_hash = hashlib.sha256(combined_data.encode("utf-8")).hexdigest()
+
+ result = generate_unique_identifier_hash(doc_type, unique_id, search_space_id)
+ assert result == expected_hash
+
+ def test_different_document_types_produce_different_hashes(self):
+ """Test different document types produce different hashes."""
+ hash1 = generate_unique_identifier_hash(DocumentType.SLACK_CONNECTOR, "id123", 1)
+ hash2 = generate_unique_identifier_hash(DocumentType.NOTION_CONNECTOR, "id123", 1)
+ assert hash1 != hash2
+
+ def test_different_identifiers_produce_different_hashes(self):
+ """Test different identifiers produce different hashes."""
+ hash1 = generate_unique_identifier_hash(DocumentType.SLACK_CONNECTOR, "id123", 1)
+ hash2 = generate_unique_identifier_hash(DocumentType.SLACK_CONNECTOR, "id456", 1)
+ assert hash1 != hash2
+
+ def test_integer_identifier(self):
+ """Test with integer unique identifier."""
+ result = generate_unique_identifier_hash(DocumentType.JIRA_CONNECTOR, 12345, 1)
+ assert len(result) == 64
+
+ def test_float_identifier(self):
+ """Test with float unique identifier (e.g., Slack timestamps)."""
+ result = generate_unique_identifier_hash(
+ DocumentType.SLACK_CONNECTOR,
+ 1234567890.123456,
+ 1,
+ )
+ assert len(result) == 64
+
+ def test_consistency(self):
+ """Test that same inputs always produce same hash."""
+ params = (DocumentType.GITHUB_CONNECTOR, "pr-123", 5)
+
+ hash1 = generate_unique_identifier_hash(*params)
+ hash2 = generate_unique_identifier_hash(*params)
+ assert hash1 == hash2
+
+
+class TestConvertElementToMarkdown:
+ """Tests for convert_element_to_markdown function."""
+
+ @pytest.mark.asyncio
+ async def test_formula_element(self):
+ """Test Formula element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "Formula"}
+ element.page_content = "E = mc^2"
+
+ result = await convert_element_to_markdown(element)
+ assert "```math" in result
+ assert "E = mc^2" in result
+
+ @pytest.mark.asyncio
+ async def test_figure_caption_element(self):
+ """Test FigureCaption element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "FigureCaption"}
+ element.page_content = "Figure 1: Test image"
+
+ result = await convert_element_to_markdown(element)
+ assert "*Figure:" in result
+
+ @pytest.mark.asyncio
+ async def test_narrative_text_element(self):
+ """Test NarrativeText element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "NarrativeText"}
+ element.page_content = "This is a paragraph of text."
+
+ result = await convert_element_to_markdown(element)
+ assert "This is a paragraph of text." in result
+ assert result.endswith("\n\n")
+
+ @pytest.mark.asyncio
+ async def test_list_item_element(self):
+ """Test ListItem element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "ListItem"}
+ element.page_content = "Item one"
+
+ result = await convert_element_to_markdown(element)
+ assert result.startswith("- ")
+ assert "Item one" in result
+
+ @pytest.mark.asyncio
+ async def test_title_element(self):
+ """Test Title element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "Title"}
+ element.page_content = "Document Title"
+
+ result = await convert_element_to_markdown(element)
+ assert result.startswith("# ")
+ assert "Document Title" in result
+
+ @pytest.mark.asyncio
+ async def test_address_element(self):
+ """Test Address element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "Address"}
+ element.page_content = "123 Main St"
+
+ result = await convert_element_to_markdown(element)
+ assert result.startswith("> ")
+
+ @pytest.mark.asyncio
+ async def test_email_address_element(self):
+ """Test EmailAddress element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "EmailAddress"}
+ element.page_content = "test@example.com"
+
+ result = await convert_element_to_markdown(element)
+ assert "`test@example.com`" in result
+
+ @pytest.mark.asyncio
+ async def test_table_element(self):
+ """Test Table element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "Table", "text_as_html": "
"}
+ element.page_content = "Table content"
+
+ result = await convert_element_to_markdown(element)
+ assert "```html" in result
+ assert "" in result
+
+ @pytest.mark.asyncio
+ async def test_header_element(self):
+ """Test Header element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "Header"}
+ element.page_content = "Section Header"
+
+ result = await convert_element_to_markdown(element)
+ assert result.startswith("## ")
+
+ @pytest.mark.asyncio
+ async def test_code_snippet_element(self):
+ """Test CodeSnippet element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "CodeSnippet"}
+ element.page_content = "print('hello')"
+
+ result = await convert_element_to_markdown(element)
+ assert "```" in result
+ assert "print('hello')" in result
+
+ @pytest.mark.asyncio
+ async def test_page_number_element(self):
+ """Test PageNumber element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "PageNumber"}
+ element.page_content = "42"
+
+ result = await convert_element_to_markdown(element)
+ assert "*Page 42*" in result
+
+ @pytest.mark.asyncio
+ async def test_page_break_element(self):
+ """Test PageBreak element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "PageBreak"}
+ # PageBreak with content returns horizontal rule
+ element.page_content = "page break content"
+
+ result = await convert_element_to_markdown(element)
+ assert "---" in result
+
+ @pytest.mark.asyncio
+ async def test_empty_content(self):
+ """Test element with empty content."""
+ element = MagicMock()
+ element.metadata = {"category": "NarrativeText"}
+ element.page_content = ""
+
+ result = await convert_element_to_markdown(element)
+ assert result == ""
+
+ @pytest.mark.asyncio
+ async def test_uncategorized_element(self):
+ """Test UncategorizedText element conversion."""
+ element = MagicMock()
+ element.metadata = {"category": "UncategorizedText"}
+ element.page_content = "Some uncategorized text"
+
+ result = await convert_element_to_markdown(element)
+ assert "Some uncategorized text" in result
+
+
+class TestConvertDocumentToMarkdown:
+ """Tests for convert_document_to_markdown function."""
+
+ @pytest.mark.asyncio
+ async def test_converts_multiple_elements(self):
+ """Test converting multiple elements."""
+ elements = []
+
+ # Title element
+ title = MagicMock()
+ title.metadata = {"category": "Title"}
+ title.page_content = "Document Title"
+ elements.append(title)
+
+ # Narrative text element
+ para = MagicMock()
+ para.metadata = {"category": "NarrativeText"}
+ para.page_content = "This is a paragraph."
+ elements.append(para)
+
+ result = await convert_document_to_markdown(elements)
+
+ assert "# Document Title" in result
+ assert "This is a paragraph." in result
+
+ @pytest.mark.asyncio
+ async def test_empty_elements(self):
+ """Test with empty elements list."""
+ result = await convert_document_to_markdown([])
+ assert result == ""
+
+ @pytest.mark.asyncio
+ async def test_preserves_order(self):
+ """Test that element order is preserved."""
+ elements = []
+
+ for i in range(3):
+ elem = MagicMock()
+ elem.metadata = {"category": "NarrativeText"}
+ elem.page_content = f"Paragraph {i}"
+ elements.append(elem)
+
+ result = await convert_document_to_markdown(elements)
+
+ # Check order is preserved
+ pos0 = result.find("Paragraph 0")
+ pos1 = result.find("Paragraph 1")
+ pos2 = result.find("Paragraph 2")
+
+ assert pos0 < pos1 < pos2
+
+
+class TestConvertChunksToLangchainDocuments:
+ """Tests for convert_chunks_to_langchain_documents function."""
+
+ def test_converts_basic_chunks(self):
+ """Test converting basic chunk structure."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "This is chunk content",
+ "score": 0.95,
+ "document": {
+ "id": 10,
+ "title": "Test Document",
+ "document_type": "FILE",
+ "metadata": {"url": "https://example.com"},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert len(result) == 1
+ assert "This is chunk content" in result[0].page_content
+ assert result[0].metadata["chunk_id"] == 1
+ assert result[0].metadata["document_id"] == 10
+ assert result[0].metadata["document_title"] == "Test Document"
+
+ def test_includes_source_id_in_content(self):
+ """Test that source_id is included in XML content."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Test content",
+ "score": 0.9,
+ "document": {
+ "id": 5,
+ "title": "Doc",
+ "document_type": "FILE",
+ "metadata": {},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert "5" in result[0].page_content
+
+ def test_extracts_source_url(self):
+ """Test source URL extraction from metadata."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Content",
+ "score": 0.9,
+ "document": {
+ "id": 1,
+ "title": "Doc",
+ "document_type": "CRAWLED_URL",
+ "metadata": {"url": "https://example.com/page"},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert result[0].metadata["source"] == "https://example.com/page"
+
+ def test_extracts_source_url_alternate_key(self):
+ """Test source URL extraction with sourceURL key."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Content",
+ "score": 0.9,
+ "document": {
+ "id": 1,
+ "title": "Doc",
+ "document_type": "CRAWLED_URL",
+ "metadata": {"sourceURL": "https://example.com/alternate"},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert result[0].metadata["source"] == "https://example.com/alternate"
+
+ def test_handles_missing_document(self):
+ """Test handling chunks without document info."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Content without document",
+ "score": 0.8,
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert len(result) == 1
+ assert "Content without document" in result[0].page_content
+
+ def test_prefixes_document_metadata(self):
+ """Test document metadata is prefixed."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Content",
+ "score": 0.9,
+ "document": {
+ "id": 1,
+ "title": "Doc",
+ "document_type": "FILE",
+ "metadata": {"custom_field": "custom_value"},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert result[0].metadata["doc_meta_custom_field"] == "custom_value"
+
+ def test_handles_rank_field(self):
+ """Test handling of rank field when present."""
+ chunks = [
+ {
+ "chunk_id": 1,
+ "content": "Content",
+ "score": 0.9,
+ "rank": 1,
+ "document": {
+ "id": 1,
+ "title": "Doc",
+ "document_type": "FILE",
+ "metadata": {},
+ },
+ }
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert result[0].metadata["rank"] == 1
+
+ def test_empty_chunks_list(self):
+ """Test with empty chunks list."""
+ result = convert_chunks_to_langchain_documents([])
+ assert result == []
+
+ def test_multiple_chunks(self):
+ """Test converting multiple chunks."""
+ chunks = [
+ {
+ "chunk_id": i,
+ "content": f"Content {i}",
+ "score": 0.9 - (i * 0.1),
+ "document": {
+ "id": i,
+ "title": f"Doc {i}",
+ "document_type": "FILE",
+ "metadata": {},
+ },
+ }
+ for i in range(3)
+ ]
+
+ result = convert_chunks_to_langchain_documents(chunks)
+
+ assert len(result) == 3
+ for i, doc in enumerate(result):
+ assert f"Content {i}" in doc.page_content
diff --git a/surfsense_backend/tests/test_llm_service.py b/surfsense_backend/tests/test_llm_service.py
new file mode 100644
index 000000000..03667ed0d
--- /dev/null
+++ b/surfsense_backend/tests/test_llm_service.py
@@ -0,0 +1,307 @@
+"""
+Tests for the LLM service module.
+
+These tests validate:
+1. LLM role constants have correct values (used for routing)
+2. Global vs user-space LLM config lookup is correct
+3. Missing LLMs are handled gracefully (return None, not crash)
+4. Role-to-LLM mapping is correct (fast -> fast_llm_id, etc.)
+"""
+
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+# Skip these tests if app dependencies aren't installed
+pytest.importorskip("litellm")
+
+from app.services.llm_service import (
+ LLMRole,
+ get_global_llm_config,
+ get_fast_llm,
+ get_long_context_llm,
+ get_strategic_llm,
+ get_search_space_llm_instance,
+)
+
+
+class TestLLMRoleConstants:
+ """
+ Tests for LLMRole constants.
+ These values are used for database lookups and must be stable.
+ """
+
+ def test_role_constants_are_strings(self):
+ """LLM role values must be strings for database compatibility."""
+ assert isinstance(LLMRole.LONG_CONTEXT, str)
+ assert isinstance(LLMRole.FAST, str)
+ assert isinstance(LLMRole.STRATEGIC, str)
+
+ def test_role_values_are_unique(self):
+ """Role values must be unique to prevent routing confusion."""
+ roles = [LLMRole.LONG_CONTEXT, LLMRole.FAST, LLMRole.STRATEGIC]
+ assert len(roles) == len(set(roles))
+
+ def test_expected_role_values(self):
+ """
+ Validate exact role values.
+ These are used in the database schema and must not change.
+ """
+ assert LLMRole.LONG_CONTEXT == "long_context"
+ assert LLMRole.FAST == "fast"
+ assert LLMRole.STRATEGIC == "strategic"
+
+
+class TestGlobalLLMConfigLookup:
+ """
+ Tests validating global (negative ID) LLM config lookup behavior.
+ """
+
+ def test_positive_id_never_returns_global_config(self):
+ """
+ Positive IDs are user-space configs, must never match global.
+ Returning a global config for a user ID would be a security issue.
+ """
+ result = get_global_llm_config(1)
+ assert result is None
+
+ result = get_global_llm_config(100)
+ assert result is None
+
+ result = get_global_llm_config(999999)
+ assert result is None
+
+ def test_zero_id_never_returns_global_config(self):
+ """Zero is not a valid global config ID."""
+ result = get_global_llm_config(0)
+ assert result is None
+
+ @patch("app.services.llm_service.config")
+ def test_negative_id_matches_correct_global_config(self, mock_config):
+ """
+ Negative IDs should match global configs by exact ID.
+ Wrong matching would return wrong model configuration.
+ """
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {"id": -1, "provider": "OPENAI", "model_name": "gpt-4"},
+ {"id": -2, "provider": "ANTHROPIC", "model_name": "claude-3"},
+ {"id": -3, "provider": "GOOGLE", "model_name": "gemini-pro"},
+ ]
+
+ # Each ID should return its exact match
+ result_1 = get_global_llm_config(-1)
+ assert result_1["id"] == -1
+ assert result_1["provider"] == "OPENAI"
+
+ result_2 = get_global_llm_config(-2)
+ assert result_2["id"] == -2
+ assert result_2["provider"] == "ANTHROPIC"
+
+ result_3 = get_global_llm_config(-3)
+ assert result_3["id"] == -3
+ assert result_3["provider"] == "GOOGLE"
+
+ @patch("app.services.llm_service.config")
+ def test_non_existent_negative_id_returns_none(self, mock_config):
+ """Non-existent global config IDs must return None, not error."""
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {"id": -1, "provider": "OPENAI", "model_name": "gpt-4"},
+ ]
+
+ result = get_global_llm_config(-999)
+ assert result is None
+
+
+class TestSearchSpaceLLMInstanceRetrieval:
+ """
+ Tests for search space LLM instance retrieval.
+ Validates correct role-to-field mapping and graceful error handling.
+ """
+
+ @pytest.mark.asyncio
+ async def test_nonexistent_search_space_returns_none(self, mock_session):
+ """
+ Missing search space must return None, not raise exception.
+ This prevents crashes when search spaces are deleted.
+ """
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ mock_session, search_space_id=999, role=LLMRole.FAST
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_invalid_role_returns_none(self, mock_session):
+ """
+ Invalid role must return None to prevent undefined behavior.
+ """
+ mock_search_space = MagicMock()
+ mock_search_space.fast_llm_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_search_space
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ mock_session, search_space_id=1, role="not_a_valid_role"
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_unconfigured_llm_returns_none(self, mock_session):
+ """
+ When no LLM is configured for a role, return None.
+ This is a valid state - not all search spaces have all LLMs.
+ """
+ mock_search_space = MagicMock()
+ mock_search_space.fast_llm_id = None
+ mock_search_space.long_context_llm_id = None
+ mock_search_space.strategic_llm_id = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_search_space
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ mock_session, search_space_id=1, role=LLMRole.FAST
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_global_llm_config")
+ @patch("app.services.llm_service.ChatLiteLLM")
+ async def test_global_config_creates_llm_instance(
+ self, mock_chat_litellm, mock_get_global, mock_session
+ ):
+ """
+ Global config (negative ID) should create an LLM instance.
+ """
+ mock_search_space = MagicMock()
+ mock_search_space.fast_llm_id = -1 # Global config ID
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_search_space
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ mock_get_global.return_value = {
+ "id": -1,
+ "provider": "OPENAI",
+ "model_name": "gpt-4",
+ "api_key": "test-key",
+ }
+
+ mock_llm_instance = MagicMock()
+ mock_chat_litellm.return_value = mock_llm_instance
+
+ result = await get_search_space_llm_instance(
+ mock_session, search_space_id=1, role=LLMRole.FAST
+ )
+
+ # Must return an LLM instance
+ assert result == mock_llm_instance
+ # Must have attempted to create ChatLiteLLM
+ mock_chat_litellm.assert_called_once()
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_global_llm_config")
+ async def test_missing_global_config_returns_none(
+ self, mock_get_global, mock_session
+ ):
+ """
+ If global config ID is set but config doesn't exist, return None.
+ This handles config deletion gracefully.
+ """
+ mock_search_space = MagicMock()
+ mock_search_space.fast_llm_id = -1 # Global ID that doesn't exist
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_search_space
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ mock_get_global.return_value = None # Config not found
+
+ result = await get_search_space_llm_instance(
+ mock_session, search_space_id=1, role=LLMRole.FAST
+ )
+
+ assert result is None
+
+
+class TestRoleToLLMMapping:
+ """
+ Tests validating that convenience functions map to correct roles.
+ Wrong mapping would use wrong model (e.g., slow model for fast tasks).
+ """
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_search_space_llm_instance")
+ async def test_get_fast_llm_uses_fast_role(self, mock_get_instance, mock_session):
+ """get_fast_llm must request LLMRole.FAST specifically."""
+ mock_llm = MagicMock()
+ mock_get_instance.return_value = mock_llm
+
+ await get_fast_llm(mock_session, search_space_id=1)
+
+ mock_get_instance.assert_called_once_with(
+ mock_session, 1, LLMRole.FAST
+ )
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_search_space_llm_instance")
+ async def test_get_long_context_llm_uses_long_context_role(
+ self, mock_get_instance, mock_session
+ ):
+ """get_long_context_llm must request LLMRole.LONG_CONTEXT specifically."""
+ mock_llm = MagicMock()
+ mock_get_instance.return_value = mock_llm
+
+ await get_long_context_llm(mock_session, search_space_id=1)
+
+ mock_get_instance.assert_called_once_with(
+ mock_session, 1, LLMRole.LONG_CONTEXT
+ )
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_search_space_llm_instance")
+ async def test_get_strategic_llm_uses_strategic_role(
+ self, mock_get_instance, mock_session
+ ):
+ """get_strategic_llm must request LLMRole.STRATEGIC specifically."""
+ mock_llm = MagicMock()
+ mock_get_instance.return_value = mock_llm
+
+ await get_strategic_llm(mock_session, search_space_id=1)
+
+ mock_get_instance.assert_called_once_with(
+ mock_session, 1, LLMRole.STRATEGIC
+ )
+
+ @pytest.mark.asyncio
+ @patch("app.services.llm_service.get_search_space_llm_instance")
+ async def test_convenience_functions_return_llm_instance(
+ self, mock_get_instance, mock_session
+ ):
+ """Convenience functions must return the LLM instance unchanged."""
+ mock_llm = MagicMock()
+ mock_llm.model_name = "test-model"
+ mock_get_instance.return_value = mock_llm
+
+ fast = await get_fast_llm(mock_session, search_space_id=1)
+ assert fast == mock_llm
+
+ mock_get_instance.reset_mock()
+ mock_get_instance.return_value = mock_llm
+
+ long_context = await get_long_context_llm(mock_session, search_space_id=1)
+ assert long_context == mock_llm
+
+ mock_get_instance.reset_mock()
+ mock_get_instance.return_value = mock_llm
+
+ strategic = await get_strategic_llm(mock_session, search_space_id=1)
+ assert strategic == mock_llm
diff --git a/surfsense_backend/tests/test_llm_service_extended.py b/surfsense_backend/tests/test_llm_service_extended.py
new file mode 100644
index 000000000..0f9ca85e1
--- /dev/null
+++ b/surfsense_backend/tests/test_llm_service_extended.py
@@ -0,0 +1,433 @@
+"""
+Extended tests for LLM service.
+Tests LLM configuration validation and instance creation.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+from app.services.llm_service import (
+ LLMRole,
+ get_global_llm_config,
+ validate_llm_config,
+ get_search_space_llm_instance,
+ get_long_context_llm,
+ get_fast_llm,
+ get_strategic_llm,
+)
+
+
+class TestLLMRoleExtended:
+ """Extended tests for LLMRole constants."""
+
+ def test_role_long_context(self):
+ """Test long context role value."""
+ assert LLMRole.LONG_CONTEXT == "long_context"
+
+ def test_role_fast(self):
+ """Test fast role value."""
+ assert LLMRole.FAST == "fast"
+
+ def test_role_strategic(self):
+ """Test strategic role value."""
+ assert LLMRole.STRATEGIC == "strategic"
+
+
+class TestGetGlobalLLMConfig:
+ """Tests for get_global_llm_config function."""
+
+ def test_returns_none_for_positive_id(self):
+ """Test that positive IDs return None."""
+ result = get_global_llm_config(1)
+ assert result is None
+
+ def test_returns_none_for_zero_id(self):
+ """Test that zero ID returns None."""
+ result = get_global_llm_config(0)
+ assert result is None
+
+ def test_returns_config_for_matching_negative_id(self):
+ """Test that matching negative ID returns config."""
+ with patch("app.services.llm_service.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {"id": -1, "name": "GPT-4", "provider": "OPENAI"},
+ {"id": -2, "name": "Claude", "provider": "ANTHROPIC"},
+ ]
+
+ result = get_global_llm_config(-1)
+
+ assert result is not None
+ assert result["name"] == "GPT-4"
+
+ def test_returns_none_for_non_matching_negative_id(self):
+ """Test that non-matching negative ID returns None."""
+ with patch("app.services.llm_service.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {"id": -1, "name": "GPT-4"},
+ ]
+
+ result = get_global_llm_config(-999)
+
+ assert result is None
+
+
+class TestValidateLLMConfig:
+ """Tests for validate_llm_config function."""
+
+ @pytest.mark.asyncio
+ async def test_validate_llm_config_success(self):
+ """Test successful LLM config validation."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ is_valid, error = await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-test-key",
+ )
+
+ assert is_valid is True
+ assert error == ""
+
+ @pytest.mark.asyncio
+ async def test_validate_llm_config_empty_response(self):
+ """Test validation fails with empty response."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = ""
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ is_valid, error = await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-test-key",
+ )
+
+ assert is_valid is False
+ assert "empty response" in error.lower()
+
+ @pytest.mark.asyncio
+ async def test_validate_llm_config_exception(self):
+ """Test validation handles exceptions."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_llm.ainvoke = AsyncMock(side_effect=Exception("API Error"))
+ MockChatLiteLLM.return_value = mock_llm
+
+ is_valid, error = await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-invalid-key",
+ )
+
+ assert is_valid is False
+ assert "API Error" in error
+
+ @pytest.mark.asyncio
+ async def test_validate_llm_config_with_custom_provider(self):
+ """Test validation with custom provider."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ is_valid, error = await validate_llm_config(
+ provider="OPENAI",
+ model_name="custom-model",
+ api_key="sk-test-key",
+ custom_provider="custom/provider",
+ )
+
+ assert is_valid is True
+ # Verify custom provider was used in model string
+ call_args = MockChatLiteLLM.call_args
+ assert "custom/provider" in call_args.kwargs.get("model", "")
+
+ @pytest.mark.asyncio
+ async def test_validate_llm_config_with_api_base(self):
+ """Test validation with custom API base."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ is_valid, error = await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-test-key",
+ api_base="https://custom.api.com",
+ )
+
+ assert is_valid is True
+ call_args = MockChatLiteLLM.call_args
+ assert call_args.kwargs.get("api_base") == "https://custom.api.com"
+
+
+class TestGetSearchSpaceLLMInstance:
+ """Tests for get_search_space_llm_instance function."""
+
+ @pytest.mark.asyncio
+ async def test_returns_none_for_nonexistent_search_space(self):
+ """Test returns None when search space doesn't exist."""
+ mock_session = AsyncMock()
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ session=mock_session,
+ search_space_id=999,
+ role=LLMRole.FAST,
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_returns_none_for_invalid_role(self):
+ """Test returns None for invalid role."""
+ mock_session = AsyncMock()
+
+ # Mock search space exists
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+ mock_search_space.fast_llm_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ session=mock_session,
+ search_space_id=1,
+ role="invalid_role",
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_returns_none_when_no_llm_configured(self):
+ """Test returns None when LLM is not configured for role."""
+ mock_session = AsyncMock()
+
+ # Mock search space with no LLM configured
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+ mock_search_space.fast_llm_id = None
+ mock_search_space.long_context_llm_id = None
+ mock_search_space.strategic_llm_id = None
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_search_space_llm_instance(
+ session=mock_session,
+ search_space_id=1,
+ role=LLMRole.FAST,
+ )
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_returns_instance_for_global_config(self):
+ """Test returns LLM instance for global config."""
+ mock_session = AsyncMock()
+
+ # Mock search space with global config
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+ mock_search_space.fast_llm_id = -1 # Global config
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.services.llm_service.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {
+ "id": -1,
+ "name": "GPT-4",
+ "provider": "OPENAI",
+ "model_name": "gpt-4",
+ "api_key": "sk-test",
+ "api_base": None,
+ "custom_provider": None,
+ "litellm_params": None,
+ }
+ ]
+
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ MockChatLiteLLM.return_value = mock_llm
+
+ result = await get_search_space_llm_instance(
+ session=mock_session,
+ search_space_id=1,
+ role=LLMRole.FAST,
+ )
+
+ assert result is not None
+ assert MockChatLiteLLM.called
+
+
+class TestConvenienceFunctions:
+ """Tests for convenience wrapper functions."""
+
+ @pytest.mark.asyncio
+ async def test_get_long_context_llm(self):
+ """Test get_long_context_llm uses correct role."""
+ mock_session = AsyncMock()
+
+ with patch("app.services.llm_service.get_search_space_llm_instance") as mock_get:
+ mock_get.return_value = MagicMock()
+
+ await get_long_context_llm(mock_session, 1)
+
+ mock_get.assert_called_once_with(mock_session, 1, LLMRole.LONG_CONTEXT)
+
+ @pytest.mark.asyncio
+ async def test_get_fast_llm(self):
+ """Test get_fast_llm uses correct role."""
+ mock_session = AsyncMock()
+
+ with patch("app.services.llm_service.get_search_space_llm_instance") as mock_get:
+ mock_get.return_value = MagicMock()
+
+ await get_fast_llm(mock_session, 1)
+
+ mock_get.assert_called_once_with(mock_session, 1, LLMRole.FAST)
+
+ @pytest.mark.asyncio
+ async def test_get_strategic_llm(self):
+ """Test get_strategic_llm uses correct role."""
+ mock_session = AsyncMock()
+
+ with patch("app.services.llm_service.get_search_space_llm_instance") as mock_get:
+ mock_get.return_value = MagicMock()
+
+ await get_strategic_llm(mock_session, 1)
+
+ mock_get.assert_called_once_with(mock_session, 1, LLMRole.STRATEGIC)
+
+
+class TestProviderMapping:
+ """Tests for provider string mapping."""
+
+ @pytest.mark.asyncio
+ async def test_openai_provider_mapping(self):
+ """Test OPENAI maps to openai."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-test",
+ )
+
+ call_args = MockChatLiteLLM.call_args
+ assert "openai/gpt-4" in call_args.kwargs.get("model", "")
+
+ @pytest.mark.asyncio
+ async def test_anthropic_provider_mapping(self):
+ """Test ANTHROPIC maps to anthropic."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ await validate_llm_config(
+ provider="ANTHROPIC",
+ model_name="claude-3",
+ api_key="sk-test",
+ )
+
+ call_args = MockChatLiteLLM.call_args
+ assert "anthropic/claude-3" in call_args.kwargs.get("model", "")
+
+ @pytest.mark.asyncio
+ async def test_google_provider_mapping(self):
+ """Test GOOGLE maps to gemini."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ await validate_llm_config(
+ provider="GOOGLE",
+ model_name="gemini-pro",
+ api_key="test-key",
+ )
+
+ call_args = MockChatLiteLLM.call_args
+ assert "gemini/gemini-pro" in call_args.kwargs.get("model", "")
+
+ @pytest.mark.asyncio
+ async def test_ollama_provider_mapping(self):
+ """Test OLLAMA maps to ollama."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ await validate_llm_config(
+ provider="OLLAMA",
+ model_name="llama2",
+ api_key="",
+ api_base="http://localhost:11434",
+ )
+
+ call_args = MockChatLiteLLM.call_args
+ assert "ollama/llama2" in call_args.kwargs.get("model", "")
+
+
+class TestLiteLLMParams:
+ """Tests for litellm_params handling."""
+
+ @pytest.mark.asyncio
+ async def test_litellm_params_passed_to_instance(self):
+ """Test that litellm_params are passed to ChatLiteLLM."""
+ with patch("app.services.llm_service.ChatLiteLLM") as MockChatLiteLLM:
+ mock_llm = MagicMock()
+ mock_response = MagicMock()
+ mock_response.content = "Hello!"
+ mock_llm.ainvoke = AsyncMock(return_value=mock_response)
+ MockChatLiteLLM.return_value = mock_llm
+
+ await validate_llm_config(
+ provider="OPENAI",
+ model_name="gpt-4",
+ api_key="sk-test",
+ litellm_params={"temperature": 0.7, "max_tokens": 1000},
+ )
+
+ call_args = MockChatLiteLLM.call_args
+ assert call_args.kwargs.get("temperature") == 0.7
+ assert call_args.kwargs.get("max_tokens") == 1000
diff --git a/surfsense_backend/tests/test_page_limit_service.py b/surfsense_backend/tests/test_page_limit_service.py
new file mode 100644
index 000000000..1cef0b5dd
--- /dev/null
+++ b/surfsense_backend/tests/test_page_limit_service.py
@@ -0,0 +1,354 @@
+"""
+Tests for PageLimitService.
+
+This module tests the page limit service used for tracking user document processing limits.
+"""
+
+import os
+import tempfile
+from unittest.mock import AsyncMock, MagicMock
+
+import pytest
+
+from app.services.page_limit_service import PageLimitExceededError, PageLimitService
+
+
+class TestPageLimitExceededError:
+ """Tests for PageLimitExceededError exception."""
+
+ def test_default_message(self):
+ """Test default error message."""
+ error = PageLimitExceededError()
+ assert "Page limit exceeded" in str(error)
+
+ def test_custom_message(self):
+ """Test custom error message."""
+ error = PageLimitExceededError(message="Custom message")
+ assert str(error) == "Custom message"
+
+ def test_stores_usage_info(self):
+ """Test error stores usage information."""
+ error = PageLimitExceededError(
+ pages_used=100,
+ pages_limit=200,
+ pages_to_add=50,
+ )
+ assert error.pages_used == 100
+ assert error.pages_limit == 200
+ assert error.pages_to_add == 50
+
+ def test_default_values(self):
+ """Test default values are zero."""
+ error = PageLimitExceededError()
+ assert error.pages_used == 0
+ assert error.pages_limit == 0
+ assert error.pages_to_add == 0
+
+
+class TestPageLimitServiceEstimation:
+ """Tests for page estimation methods."""
+
+ @pytest.fixture
+ def service(self):
+ """Create a PageLimitService with mock session."""
+ mock_session = AsyncMock()
+ return PageLimitService(mock_session)
+
+ def test_estimate_pages_from_elements_with_page_numbers(self, service):
+ """Test estimation from elements with page number metadata."""
+ elements = []
+ for page in [1, 1, 2, 2, 3]: # 3 unique pages
+ elem = MagicMock()
+ elem.metadata = {"page_number": page}
+ elements.append(elem)
+
+ result = service.estimate_pages_from_elements(elements)
+ assert result == 3
+
+ def test_estimate_pages_from_elements_by_content_length(self, service):
+ """Test estimation from elements by content length."""
+ elements = []
+ # Create elements with ~4000 chars total (should be 2 pages)
+ for i in range(4):
+ elem = MagicMock()
+ elem.metadata = {} # No page number
+ elem.page_content = "x" * 1000 # 1000 chars each
+ elements.append(elem)
+
+ result = service.estimate_pages_from_elements(elements)
+ assert result == 2 # 4000 / 2000 = 2
+
+ def test_estimate_pages_from_elements_empty_list(self, service):
+ """Test estimation from empty elements list returns minimum 1."""
+ result = service.estimate_pages_from_elements([])
+ # Implementation uses max(1, ...) so minimum is 1
+ assert result == 1
+
+ def test_estimate_pages_from_markdown_with_metadata(self, service):
+ """Test estimation from markdown documents with page metadata."""
+ docs = []
+ for page in range(5):
+ doc = MagicMock()
+ doc.metadata = {"page": page}
+ doc.text = "Content"
+ docs.append(doc)
+
+ result = service.estimate_pages_from_markdown(docs)
+ assert result == 5
+
+ def test_estimate_pages_from_markdown_by_content(self, service):
+ """Test estimation from markdown by content length."""
+ docs = []
+ for i in range(2):
+ doc = MagicMock()
+ doc.metadata = {}
+ doc.text = "x" * 4000 # 4000 chars = 2 pages each
+ docs.append(doc)
+
+ result = service.estimate_pages_from_markdown(docs)
+ assert result == 4 # (4000/2000) * 2 = 4
+
+ def test_estimate_pages_from_markdown_empty_list(self, service):
+ """Test estimation from empty markdown list."""
+ result = service.estimate_pages_from_markdown([])
+ assert result == 1 # Minimum 1 page
+
+ def test_estimate_pages_from_content_length(self, service):
+ """Test estimation from content length."""
+ # 5000 chars should be ~2 pages
+ result = service.estimate_pages_from_content_length(5000)
+ assert result == 2
+
+ def test_estimate_pages_from_content_length_minimum(self, service):
+ """Test minimum of 1 page for small content."""
+ result = service.estimate_pages_from_content_length(100)
+ assert result == 1
+
+ def test_estimate_pages_from_content_length_zero(self, service):
+ """Test zero content length returns 1 page."""
+ result = service.estimate_pages_from_content_length(0)
+ assert result == 1
+
+
+class TestPageEstimationFromFile:
+ """Tests for estimate_pages_before_processing method."""
+
+ @pytest.fixture
+ def service(self):
+ """Create a PageLimitService with mock session."""
+ mock_session = AsyncMock()
+ return PageLimitService(mock_session)
+
+ def test_file_not_found(self, service):
+ """Test error when file doesn't exist."""
+ with pytest.raises(ValueError, match="File not found"):
+ service.estimate_pages_before_processing("/nonexistent/file.pdf")
+
+ def test_text_file_estimation(self, service):
+ """Test estimation for text files."""
+ with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as f:
+ # Write ~6000 bytes (2 pages at 3000 bytes/page)
+ f.write(b"x" * 6000)
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_small_text_file(self, service):
+ """Test minimum 1 page for small files."""
+ with tempfile.NamedTemporaryFile(suffix=".txt", delete=False) as f:
+ f.write(b"small")
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 1
+ finally:
+ os.unlink(f.name)
+
+ def test_markdown_file_estimation(self, service):
+ """Test estimation for markdown files."""
+ with tempfile.NamedTemporaryFile(suffix=".md", delete=False) as f:
+ # Need at least 6000 bytes for 2 pages (3000 bytes per page)
+ f.write(b"# Title\n" + b"x" * 6000)
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_image_file_estimation(self, service):
+ """Test image files return 1 page."""
+ for ext in [".jpg", ".png", ".gif", ".bmp"]:
+ with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as f:
+ f.write(b"fake image data" * 1000)
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 1, f"Expected 1 page for {ext}"
+ finally:
+ os.unlink(f.name)
+
+ def test_word_doc_estimation(self, service):
+ """Test estimation for Word documents."""
+ with tempfile.NamedTemporaryFile(suffix=".docx", delete=False) as f:
+ # Write ~100KB (2 pages at 50KB/page)
+ f.write(b"x" * (100 * 1024))
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_presentation_estimation(self, service):
+ """Test estimation for presentation files."""
+ with tempfile.NamedTemporaryFile(suffix=".pptx", delete=False) as f:
+ # Write ~400KB (2 slides at 200KB/slide)
+ f.write(b"x" * (400 * 1024))
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_spreadsheet_estimation(self, service):
+ """Test estimation for spreadsheet files."""
+ with tempfile.NamedTemporaryFile(suffix=".xlsx", delete=False) as f:
+ # Write ~200KB (2 sheets at 100KB/sheet)
+ f.write(b"x" * (200 * 1024))
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_html_file_estimation(self, service):
+ """Test estimation for HTML files."""
+ with tempfile.NamedTemporaryFile(suffix=".html", delete=False) as f:
+ f.write(b"" + b"x" * 5980 + b"")
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2 # ~6000 / 3000 = 2
+ finally:
+ os.unlink(f.name)
+
+ def test_unknown_extension(self, service):
+ """Test estimation for unknown file types."""
+ with tempfile.NamedTemporaryFile(suffix=".xyz", delete=False) as f:
+ # Write ~160KB (2 pages at 80KB/page)
+ f.write(b"x" * (160 * 1024))
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ assert result == 2
+ finally:
+ os.unlink(f.name)
+
+ def test_pdf_estimation_fallback(self, service):
+ """Test PDF estimation falls back when pypdf fails."""
+ with tempfile.NamedTemporaryFile(suffix=".pdf", delete=False) as f:
+ # Write invalid PDF data (will fail to parse)
+ f.write(b"not a real pdf" * 10000) # ~140KB
+ f.flush()
+
+ try:
+ result = service.estimate_pages_before_processing(f.name)
+ # Falls back to size estimation: ~140KB / 100KB = 1 page
+ assert result >= 1
+ finally:
+ os.unlink(f.name)
+
+
+class TestPageLimitServiceDatabase:
+ """Tests for database operations (mocked)."""
+
+ @pytest.fixture
+ def mock_user(self):
+ """Create a mock user."""
+ user = MagicMock()
+ user.pages_used = 50
+ user.pages_limit = 100
+ return user
+
+ @pytest.fixture
+ def service(self):
+ """Create a PageLimitService with mock session."""
+ mock_session = AsyncMock()
+ return PageLimitService(mock_session)
+
+ @pytest.mark.asyncio
+ async def test_check_page_limit_success(self, service, mock_user):
+ """Test check_page_limit succeeds when within limit."""
+ # Setup mock to return user data
+ mock_result = MagicMock()
+ mock_result.first.return_value = (50, 100) # pages_used, pages_limit
+ service.session.execute.return_value = mock_result
+
+ has_capacity, pages_used, pages_limit = await service.check_page_limit(
+ "user-123",
+ estimated_pages=10,
+ )
+
+ assert has_capacity is True
+ assert pages_used == 50
+ assert pages_limit == 100
+
+ @pytest.mark.asyncio
+ async def test_check_page_limit_exceeds(self, service):
+ """Test check_page_limit raises error when would exceed limit."""
+ mock_result = MagicMock()
+ mock_result.first.return_value = (95, 100) # Near limit
+ service.session.execute.return_value = mock_result
+
+ with pytest.raises(PageLimitExceededError) as exc_info:
+ await service.check_page_limit("user-123", estimated_pages=10)
+
+ assert exc_info.value.pages_used == 95
+ assert exc_info.value.pages_limit == 100
+ assert exc_info.value.pages_to_add == 10
+
+ @pytest.mark.asyncio
+ async def test_check_page_limit_user_not_found(self, service):
+ """Test check_page_limit raises error for missing user."""
+ mock_result = MagicMock()
+ mock_result.first.return_value = None
+ service.session.execute.return_value = mock_result
+
+ with pytest.raises(ValueError, match="User with ID .* not found"):
+ await service.check_page_limit("nonexistent", estimated_pages=1)
+
+ @pytest.mark.asyncio
+ async def test_get_page_usage(self, service):
+ """Test get_page_usage returns correct values."""
+ mock_result = MagicMock()
+ mock_result.first.return_value = (75, 500)
+ service.session.execute.return_value = mock_result
+
+ result = await service.get_page_usage("user-123")
+
+ assert result == (75, 500)
+
+ @pytest.mark.asyncio
+ async def test_get_page_usage_user_not_found(self, service):
+ """Test get_page_usage raises error for missing user."""
+ mock_result = MagicMock()
+ mock_result.first.return_value = None
+ service.session.execute.return_value = mock_result
+
+ with pytest.raises(ValueError, match="User with ID .* not found"):
+ await service.get_page_usage("nonexistent")
diff --git a/surfsense_backend/tests/test_permissions.py b/surfsense_backend/tests/test_permissions.py
new file mode 100644
index 000000000..d63ba9e09
--- /dev/null
+++ b/surfsense_backend/tests/test_permissions.py
@@ -0,0 +1,270 @@
+"""
+Tests for permission functions in db.py.
+
+This module tests the permission checking functions used in RBAC.
+"""
+
+from app.db import (
+ DEFAULT_ROLE_PERMISSIONS,
+ Permission,
+ get_default_roles_config,
+ has_all_permissions,
+ has_any_permission,
+ has_permission,
+)
+
+
+class TestHasPermission:
+ """Tests for has_permission function."""
+
+ def test_has_permission_with_exact_match(self):
+ """Test has_permission returns True for exact permission match."""
+ permissions = [Permission.DOCUMENTS_READ.value, Permission.CHATS_READ.value]
+ assert has_permission(permissions, Permission.DOCUMENTS_READ.value) is True
+
+ def test_has_permission_with_no_match(self):
+ """Test has_permission returns False when permission not in list."""
+ permissions = [Permission.DOCUMENTS_READ.value]
+ assert has_permission(permissions, Permission.DOCUMENTS_CREATE.value) is False
+
+ def test_has_permission_with_full_access(self):
+ """Test has_permission returns True for any permission when user has FULL_ACCESS."""
+ permissions = [Permission.FULL_ACCESS.value]
+ assert has_permission(permissions, Permission.DOCUMENTS_CREATE.value) is True
+ assert has_permission(permissions, Permission.SETTINGS_DELETE.value) is True
+ assert has_permission(permissions, Permission.MEMBERS_MANAGE_ROLES.value) is True
+
+ def test_has_permission_with_empty_list(self):
+ """Test has_permission returns False for empty permission list."""
+ assert has_permission([], Permission.DOCUMENTS_READ.value) is False
+
+ def test_has_permission_with_none(self):
+ """Test has_permission returns False for None."""
+ assert has_permission(None, Permission.DOCUMENTS_READ.value) is False
+
+
+class TestHasAnyPermission:
+ """Tests for has_any_permission function."""
+
+ def test_has_any_permission_with_one_match(self):
+ """Test has_any_permission returns True when at least one permission matches."""
+ user_permissions = [Permission.DOCUMENTS_READ.value, Permission.CHATS_READ.value]
+ required = [Permission.DOCUMENTS_READ.value, Permission.DOCUMENTS_CREATE.value]
+ assert has_any_permission(user_permissions, required) is True
+
+ def test_has_any_permission_with_all_match(self):
+ """Test has_any_permission returns True when all permissions match."""
+ user_permissions = [Permission.DOCUMENTS_READ.value, Permission.CHATS_READ.value]
+ required = [Permission.DOCUMENTS_READ.value, Permission.CHATS_READ.value]
+ assert has_any_permission(user_permissions, required) is True
+
+ def test_has_any_permission_with_no_match(self):
+ """Test has_any_permission returns False when no permissions match."""
+ user_permissions = [Permission.DOCUMENTS_READ.value]
+ required = [Permission.CHATS_CREATE.value, Permission.SETTINGS_UPDATE.value]
+ assert has_any_permission(user_permissions, required) is False
+
+ def test_has_any_permission_with_full_access(self):
+ """Test has_any_permission returns True with FULL_ACCESS."""
+ user_permissions = [Permission.FULL_ACCESS.value]
+ required = [Permission.SETTINGS_DELETE.value]
+ assert has_any_permission(user_permissions, required) is True
+
+ def test_has_any_permission_with_empty_user_permissions(self):
+ """Test has_any_permission returns False with empty user permissions."""
+ assert has_any_permission([], [Permission.DOCUMENTS_READ.value]) is False
+
+ def test_has_any_permission_with_none(self):
+ """Test has_any_permission returns False with None."""
+ assert has_any_permission(None, [Permission.DOCUMENTS_READ.value]) is False
+
+
+class TestHasAllPermissions:
+ """Tests for has_all_permissions function."""
+
+ def test_has_all_permissions_with_all_match(self):
+ """Test has_all_permissions returns True when all permissions match."""
+ user_permissions = [
+ Permission.DOCUMENTS_READ.value,
+ Permission.DOCUMENTS_CREATE.value,
+ Permission.CHATS_READ.value,
+ ]
+ required = [Permission.DOCUMENTS_READ.value, Permission.DOCUMENTS_CREATE.value]
+ assert has_all_permissions(user_permissions, required) is True
+
+ def test_has_all_permissions_with_partial_match(self):
+ """Test has_all_permissions returns False when only some permissions match."""
+ user_permissions = [Permission.DOCUMENTS_READ.value]
+ required = [Permission.DOCUMENTS_READ.value, Permission.DOCUMENTS_CREATE.value]
+ assert has_all_permissions(user_permissions, required) is False
+
+ def test_has_all_permissions_with_no_match(self):
+ """Test has_all_permissions returns False when no permissions match."""
+ user_permissions = [Permission.CHATS_READ.value]
+ required = [Permission.DOCUMENTS_READ.value, Permission.DOCUMENTS_CREATE.value]
+ assert has_all_permissions(user_permissions, required) is False
+
+ def test_has_all_permissions_with_full_access(self):
+ """Test has_all_permissions returns True with FULL_ACCESS."""
+ user_permissions = [Permission.FULL_ACCESS.value]
+ required = [
+ Permission.DOCUMENTS_READ.value,
+ Permission.DOCUMENTS_CREATE.value,
+ Permission.SETTINGS_DELETE.value,
+ ]
+ assert has_all_permissions(user_permissions, required) is True
+
+ def test_has_all_permissions_with_empty_user_permissions(self):
+ """Test has_all_permissions returns False with empty user permissions."""
+ assert has_all_permissions([], [Permission.DOCUMENTS_READ.value]) is False
+
+ def test_has_all_permissions_with_none(self):
+ """Test has_all_permissions returns False with None."""
+ assert has_all_permissions(None, [Permission.DOCUMENTS_READ.value]) is False
+
+ def test_has_all_permissions_with_empty_required(self):
+ """Test has_all_permissions returns True with empty required list."""
+ user_permissions = [Permission.DOCUMENTS_READ.value]
+ assert has_all_permissions(user_permissions, []) is True
+
+
+class TestPermissionEnum:
+ """Tests for Permission enum values."""
+
+ def test_permission_values_are_strings(self):
+ """Test all permission values are strings."""
+ for perm in Permission:
+ assert isinstance(perm.value, str)
+
+ def test_permission_document_values(self):
+ """Test document permission values."""
+ assert Permission.DOCUMENTS_CREATE.value == "documents:create"
+ assert Permission.DOCUMENTS_READ.value == "documents:read"
+ assert Permission.DOCUMENTS_UPDATE.value == "documents:update"
+ assert Permission.DOCUMENTS_DELETE.value == "documents:delete"
+
+ def test_permission_chat_values(self):
+ """Test chat permission values."""
+ assert Permission.CHATS_CREATE.value == "chats:create"
+ assert Permission.CHATS_READ.value == "chats:read"
+ assert Permission.CHATS_UPDATE.value == "chats:update"
+ assert Permission.CHATS_DELETE.value == "chats:delete"
+
+ def test_permission_llm_config_values(self):
+ """Test LLM config permission values."""
+ assert Permission.LLM_CONFIGS_CREATE.value == "llm_configs:create"
+ assert Permission.LLM_CONFIGS_READ.value == "llm_configs:read"
+ assert Permission.LLM_CONFIGS_UPDATE.value == "llm_configs:update"
+ assert Permission.LLM_CONFIGS_DELETE.value == "llm_configs:delete"
+
+ def test_permission_members_values(self):
+ """Test member permission values."""
+ assert Permission.MEMBERS_INVITE.value == "members:invite"
+ assert Permission.MEMBERS_VIEW.value == "members:view"
+ assert Permission.MEMBERS_REMOVE.value == "members:remove"
+ assert Permission.MEMBERS_MANAGE_ROLES.value == "members:manage_roles"
+
+ def test_permission_full_access_value(self):
+ """Test FULL_ACCESS permission value."""
+ assert Permission.FULL_ACCESS.value == "*"
+
+
+class TestDefaultRolePermissions:
+ """Tests for DEFAULT_ROLE_PERMISSIONS configuration."""
+
+ def test_owner_has_full_access(self):
+ """Test Owner role has full access."""
+ assert Permission.FULL_ACCESS.value in DEFAULT_ROLE_PERMISSIONS["Owner"]
+
+ def test_admin_permissions(self):
+ """Test Admin role has appropriate permissions."""
+ admin_perms = DEFAULT_ROLE_PERMISSIONS["Admin"]
+ # Admin should have document permissions
+ assert Permission.DOCUMENTS_CREATE.value in admin_perms
+ assert Permission.DOCUMENTS_READ.value in admin_perms
+ assert Permission.DOCUMENTS_UPDATE.value in admin_perms
+ assert Permission.DOCUMENTS_DELETE.value in admin_perms
+ # Admin should NOT have settings:delete
+ assert Permission.SETTINGS_DELETE.value not in admin_perms
+
+ def test_editor_permissions(self):
+ """Test Editor role has appropriate permissions."""
+ editor_perms = DEFAULT_ROLE_PERMISSIONS["Editor"]
+ # Editor should have document CRUD
+ assert Permission.DOCUMENTS_CREATE.value in editor_perms
+ assert Permission.DOCUMENTS_READ.value in editor_perms
+ assert Permission.DOCUMENTS_UPDATE.value in editor_perms
+ assert Permission.DOCUMENTS_DELETE.value in editor_perms
+ # Editor should have chat CRUD
+ assert Permission.CHATS_CREATE.value in editor_perms
+ assert Permission.CHATS_READ.value in editor_perms
+ # Editor should NOT have member management
+ assert Permission.MEMBERS_REMOVE.value not in editor_perms
+
+ def test_viewer_permissions(self):
+ """Test Viewer role has read-only permissions."""
+ viewer_perms = DEFAULT_ROLE_PERMISSIONS["Viewer"]
+ # Viewer should have read permissions
+ assert Permission.DOCUMENTS_READ.value in viewer_perms
+ assert Permission.CHATS_READ.value in viewer_perms
+ assert Permission.LLM_CONFIGS_READ.value in viewer_perms
+ # Viewer should NOT have create/update/delete permissions
+ assert Permission.DOCUMENTS_CREATE.value not in viewer_perms
+ assert Permission.DOCUMENTS_UPDATE.value not in viewer_perms
+ assert Permission.DOCUMENTS_DELETE.value not in viewer_perms
+ assert Permission.CHATS_CREATE.value not in viewer_perms
+
+
+class TestGetDefaultRolesConfig:
+ """Tests for get_default_roles_config function."""
+
+ def test_returns_list(self):
+ """Test get_default_roles_config returns a list."""
+ config = get_default_roles_config()
+ assert isinstance(config, list)
+
+ def test_contains_four_roles(self):
+ """Test get_default_roles_config returns 4 roles."""
+ config = get_default_roles_config()
+ assert len(config) == 4
+
+ def test_role_names(self):
+ """Test get_default_roles_config contains expected role names."""
+ config = get_default_roles_config()
+ role_names = [role["name"] for role in config]
+ assert "Owner" in role_names
+ assert "Admin" in role_names
+ assert "Editor" in role_names
+ assert "Viewer" in role_names
+
+ def test_all_roles_are_system_roles(self):
+ """Test all default roles are system roles."""
+ config = get_default_roles_config()
+ for role in config:
+ assert role["is_system_role"] is True
+
+ def test_editor_is_default_role(self):
+ """Test Editor is the default role for new members."""
+ config = get_default_roles_config()
+ editor_role = next(role for role in config if role["name"] == "Editor")
+ assert editor_role["is_default"] is True
+
+ def test_owner_is_not_default_role(self):
+ """Test Owner is not the default role."""
+ config = get_default_roles_config()
+ owner_role = next(role for role in config if role["name"] == "Owner")
+ assert owner_role["is_default"] is False
+
+ def test_role_structure(self):
+ """Test each role has required fields."""
+ config = get_default_roles_config()
+ required_fields = ["name", "description", "permissions", "is_default", "is_system_role"]
+ for role in config:
+ for field in required_fields:
+ assert field in role, f"Role {role.get('name')} missing field {field}"
+
+ def test_owner_role_permissions(self):
+ """Test Owner role has full access permission."""
+ config = get_default_roles_config()
+ owner_role = next(role for role in config if role["name"] == "Owner")
+ assert Permission.FULL_ACCESS.value in owner_role["permissions"]
diff --git a/surfsense_backend/tests/test_rbac.py b/surfsense_backend/tests/test_rbac.py
new file mode 100644
index 000000000..1e0c35bdd
--- /dev/null
+++ b/surfsense_backend/tests/test_rbac.py
@@ -0,0 +1,355 @@
+"""
+Tests for the RBAC (Role-Based Access Control) utility functions.
+
+These tests validate the security-critical RBAC behavior:
+1. Users without membership should NEVER access resources
+2. Permission checks must be strict - no false positives
+3. Owners must have full access
+4. Role permissions must be properly enforced
+"""
+
+import uuid
+from unittest.mock import AsyncMock, MagicMock, patch
+
+import pytest
+from fastapi import HTTPException
+
+# Skip these tests if app dependencies aren't installed
+pytest.importorskip("sqlalchemy")
+pytest.importorskip("fastapi_users")
+
+from app.db import Permission, SearchSpaceMembership, SearchSpaceRole
+from app.utils.rbac import (
+ check_permission,
+ check_search_space_access,
+ generate_invite_code,
+ get_default_role,
+ get_owner_role,
+ get_user_permissions,
+ is_search_space_owner,
+)
+
+
+class TestSecurityCriticalAccessControl:
+ """
+ Critical security tests - these MUST pass to prevent unauthorized access.
+ """
+
+ @pytest.mark.asyncio
+ async def test_non_member_cannot_access_search_space(self, mock_session, mock_user):
+ """
+ SECURITY: Non-members must be denied access with 403.
+ This is critical - allowing access would be a security breach.
+ """
+ search_space_id = 1
+
+ # Simulate user not being a member
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with pytest.raises(HTTPException) as exc_info:
+ await check_search_space_access(mock_session, mock_user, search_space_id)
+
+ # Must be 403 Forbidden, not 404 or other
+ assert exc_info.value.status_code == 403
+ assert "access" in exc_info.value.detail.lower()
+
+ @pytest.mark.asyncio
+ async def test_member_without_permission_is_denied(self, mock_session, mock_user):
+ """
+ SECURITY: Members without specific permission must be denied.
+ Having membership alone is insufficient for sensitive operations.
+ """
+ search_space_id = 1
+
+ # Member exists but has limited permissions (only read, not write)
+ mock_role = MagicMock(spec=SearchSpaceRole)
+ mock_role.permissions = ["documents:read"] # Does NOT have write
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ # Attempt to access a write operation - must fail
+ with patch("app.utils.rbac.has_permission", return_value=False):
+ with pytest.raises(HTTPException) as exc_info:
+ await check_permission(
+ mock_session,
+ mock_user,
+ search_space_id,
+ "documents:write",
+ )
+
+ assert exc_info.value.status_code == 403
+
+ @pytest.mark.asyncio
+ async def test_owner_has_full_access_regardless_of_operation(
+ self, mock_session, mock_user
+ ):
+ """
+ SECURITY: Owners must have full access to all operations.
+ This ensures owners can always manage their search spaces.
+ """
+ search_space_id = 1
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = True
+ mock_membership.role = None # Owners may not have explicit roles
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ # Owner should pass permission check with FULL_ACCESS
+ with patch("app.utils.rbac.has_permission", return_value=True) as mock_has_perm:
+ result = await check_permission(
+ mock_session,
+ mock_user,
+ search_space_id,
+ "any:permission",
+ )
+
+ assert result == mock_membership
+ # Verify FULL_ACCESS was checked
+ mock_has_perm.assert_called_once()
+ call_args = mock_has_perm.call_args[0]
+ assert Permission.FULL_ACCESS.value in call_args[0]
+
+
+class TestGetUserPermissions:
+ """Tests for permission retrieval - validates correct permission inheritance."""
+
+ @pytest.mark.asyncio
+ async def test_non_member_has_no_permissions(self, mock_session):
+ """Non-members must have zero permissions."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_user_permissions(mock_session, user_id, search_space_id)
+
+ assert result == []
+ assert len(result) == 0
+
+ @pytest.mark.asyncio
+ async def test_owner_gets_full_access_permission(self, mock_session):
+ """Owners must receive FULL_ACCESS permission."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = True
+ mock_membership.role = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_user_permissions(mock_session, user_id, search_space_id)
+
+ assert Permission.FULL_ACCESS.value in result
+
+ @pytest.mark.asyncio
+ async def test_member_gets_only_role_permissions(self, mock_session):
+ """Members should get exactly the permissions from their role - no more, no less."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ expected_permissions = ["documents:read", "chats:read"]
+
+ mock_role = MagicMock(spec=SearchSpaceRole)
+ mock_role.permissions = expected_permissions.copy()
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_user_permissions(mock_session, user_id, search_space_id)
+
+ # Must match exactly - no extra permissions sneaking in
+ assert set(result) == set(expected_permissions)
+ assert len(result) == len(expected_permissions)
+
+ @pytest.mark.asyncio
+ async def test_member_without_role_has_no_permissions(self, mock_session):
+ """Members without an assigned role must have empty permissions."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = False
+ mock_membership.role = None
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_user_permissions(mock_session, user_id, search_space_id)
+
+ assert result == []
+
+
+class TestOwnershipChecks:
+ """Tests for ownership verification."""
+
+ @pytest.mark.asyncio
+ async def test_is_owner_returns_true_only_for_actual_owner(self, mock_session):
+ """is_search_space_owner must return True ONLY for actual owners."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = True
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await is_search_space_owner(mock_session, user_id, search_space_id)
+
+ assert result is True
+
+ @pytest.mark.asyncio
+ async def test_is_owner_returns_false_for_non_owner_member(self, mock_session):
+ """Regular members must NOT be identified as owners."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_membership = MagicMock(spec=SearchSpaceMembership)
+ mock_membership.is_owner = False
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await is_search_space_owner(mock_session, user_id, search_space_id)
+
+ assert result is False
+
+ @pytest.mark.asyncio
+ async def test_is_owner_returns_false_for_non_member(self, mock_session):
+ """Non-members must NOT be identified as owners."""
+ user_id = uuid.uuid4()
+ search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await is_search_space_owner(mock_session, user_id, search_space_id)
+
+ assert result is False
+
+
+class TestInviteCodeSecurity:
+ """Tests for invite code generation - validates security requirements."""
+
+ def test_invite_codes_are_cryptographically_unique(self):
+ """
+ Invite codes must be cryptographically random to prevent guessing.
+ Generate many codes and verify no collisions.
+ """
+ codes = set()
+ num_codes = 1000
+
+ for _ in range(num_codes):
+ code = generate_invite_code()
+ codes.add(code)
+
+ # All codes must be unique - any collision indicates weak randomness
+ assert len(codes) == num_codes
+
+ def test_invite_code_has_sufficient_entropy(self):
+ """
+ Invite codes must have sufficient length for security.
+ 32 characters of URL-safe base64 = ~192 bits of entropy.
+ """
+ code = generate_invite_code()
+
+ # Minimum 32 characters for adequate security
+ assert len(code) >= 32
+
+ def test_invite_code_is_url_safe(self):
+ """Invite codes must be safe for use in URLs without encoding."""
+ import re
+
+ code = generate_invite_code()
+
+ # Must only contain URL-safe characters
+ assert re.match(r"^[A-Za-z0-9_-]+$", code) is not None
+
+ def test_invite_codes_are_unpredictable(self):
+ """
+ Sequential invite codes must not be predictable.
+ Verify no obvious patterns in consecutive codes.
+ """
+ codes = [generate_invite_code() for _ in range(10)]
+
+ # No two consecutive codes should share significant prefixes
+ for i in range(len(codes) - 1):
+ # First 8 chars should differ between consecutive codes
+ assert codes[i][:8] != codes[i + 1][:8]
+
+
+class TestRoleRetrieval:
+ """Tests for role lookup functions."""
+
+ @pytest.mark.asyncio
+ async def test_get_default_role_returns_correct_role(self, mock_session):
+ """Default role lookup must return the role marked as default."""
+ search_space_id = 1
+
+ mock_role = MagicMock(spec=SearchSpaceRole)
+ mock_role.name = "Viewer"
+ mock_role.is_default = True
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_role
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_default_role(mock_session, search_space_id)
+
+ assert result is not None
+ assert result.is_default is True
+
+ @pytest.mark.asyncio
+ async def test_get_default_role_returns_none_when_no_default(self, mock_session):
+ """Must return None if no default role exists - not raise an error."""
+ search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_default_role(mock_session, search_space_id)
+
+ assert result is None
+
+ @pytest.mark.asyncio
+ async def test_get_owner_role_returns_owner_named_role(self, mock_session):
+ """Owner role lookup must return the role named 'Owner'."""
+ search_space_id = 1
+
+ mock_role = MagicMock(spec=SearchSpaceRole)
+ mock_role.name = "Owner"
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_role
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await get_owner_role(mock_session, search_space_id)
+
+ assert result is not None
+ assert result.name == "Owner"
diff --git a/surfsense_backend/tests/test_rbac_schemas.py b/surfsense_backend/tests/test_rbac_schemas.py
new file mode 100644
index 000000000..1d4a336c2
--- /dev/null
+++ b/surfsense_backend/tests/test_rbac_schemas.py
@@ -0,0 +1,392 @@
+"""
+Tests for RBAC schemas.
+
+This module tests the Pydantic schemas used for role-based access control.
+"""
+
+from datetime import datetime, timezone
+from uuid import uuid4
+
+import pytest
+from pydantic import ValidationError
+
+from app.schemas.rbac_schemas import (
+ InviteAcceptRequest,
+ InviteAcceptResponse,
+ InviteBase,
+ InviteCreate,
+ InviteInfoResponse,
+ InviteRead,
+ InviteUpdate,
+ MembershipBase,
+ MembershipRead,
+ MembershipReadWithUser,
+ MembershipUpdate,
+ PermissionInfo,
+ PermissionsListResponse,
+ RoleBase,
+ RoleCreate,
+ RoleRead,
+ RoleUpdate,
+ UserSearchSpaceAccess,
+)
+
+
+class TestRoleSchemas:
+ """Tests for role-related schemas."""
+
+ def test_role_base_minimal(self):
+ """Test RoleBase with minimal data."""
+ role = RoleBase(name="TestRole")
+ assert role.name == "TestRole"
+ assert role.description is None
+ assert role.permissions == []
+ assert role.is_default is False
+
+ def test_role_base_full(self):
+ """Test RoleBase with all fields."""
+ role = RoleBase(
+ name="Admin",
+ description="Administrator role",
+ permissions=["documents:read", "documents:write"],
+ is_default=True,
+ )
+ assert role.name == "Admin"
+ assert role.description == "Administrator role"
+ assert len(role.permissions) == 2
+ assert role.is_default is True
+
+ def test_role_base_name_validation(self):
+ """Test RoleBase name length validation."""
+ # Empty name should fail
+ with pytest.raises(ValidationError):
+ RoleBase(name="")
+
+ # Name at max length should work
+ role = RoleBase(name="x" * 100)
+ assert len(role.name) == 100
+
+ # Name over max length should fail
+ with pytest.raises(ValidationError):
+ RoleBase(name="x" * 101)
+
+ def test_role_base_description_validation(self):
+ """Test RoleBase description length validation."""
+ # Description at max length should work
+ role = RoleBase(name="Test", description="x" * 500)
+ assert len(role.description) == 500
+
+ # Description over max length should fail
+ with pytest.raises(ValidationError):
+ RoleBase(name="Test", description="x" * 501)
+
+ def test_role_create(self):
+ """Test RoleCreate schema."""
+ role = RoleCreate(
+ name="Editor",
+ permissions=["documents:create", "documents:read"],
+ )
+ assert role.name == "Editor"
+
+ def test_role_update_partial(self):
+ """Test RoleUpdate with partial data."""
+ update = RoleUpdate(name="NewName")
+ assert update.name == "NewName"
+ assert update.description is None
+ assert update.permissions is None
+ assert update.is_default is None
+
+ def test_role_update_full(self):
+ """Test RoleUpdate with all fields."""
+ update = RoleUpdate(
+ name="UpdatedRole",
+ description="Updated description",
+ permissions=["chats:read"],
+ is_default=True,
+ )
+ assert update.permissions == ["chats:read"]
+
+ def test_role_read(self):
+ """Test RoleRead schema."""
+ now = datetime.now(timezone.utc)
+ role = RoleRead(
+ id=1,
+ name="Viewer",
+ description="View-only access",
+ permissions=["documents:read"],
+ is_default=False,
+ search_space_id=5,
+ is_system_role=True,
+ created_at=now,
+ )
+ assert role.id == 1
+ assert role.is_system_role is True
+ assert role.search_space_id == 5
+
+
+class TestMembershipSchemas:
+ """Tests for membership-related schemas."""
+
+ def test_membership_base(self):
+ """Test MembershipBase schema."""
+ membership = MembershipBase()
+ assert membership is not None
+
+ def test_membership_update(self):
+ """Test MembershipUpdate schema."""
+ update = MembershipUpdate(role_id=5)
+ assert update.role_id == 5
+
+ def test_membership_update_optional(self):
+ """Test MembershipUpdate with no data."""
+ update = MembershipUpdate()
+ assert update.role_id is None
+
+ def test_membership_read(self):
+ """Test MembershipRead schema."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ membership = MembershipRead(
+ id=1,
+ user_id=user_id,
+ search_space_id=10,
+ role_id=2,
+ is_owner=False,
+ joined_at=now,
+ created_at=now,
+ role=None,
+ )
+ assert membership.user_id == user_id
+ assert membership.search_space_id == 10
+ assert membership.is_owner is False
+
+ def test_membership_read_with_role(self):
+ """Test MembershipRead with nested role."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ role = RoleRead(
+ id=2,
+ name="Editor",
+ permissions=["documents:create"],
+ is_default=True,
+ search_space_id=10,
+ is_system_role=True,
+ created_at=now,
+ )
+ membership = MembershipRead(
+ id=1,
+ user_id=user_id,
+ search_space_id=10,
+ role_id=2,
+ is_owner=False,
+ joined_at=now,
+ created_at=now,
+ role=role,
+ )
+ assert membership.role.name == "Editor"
+
+ def test_membership_read_with_user(self):
+ """Test MembershipReadWithUser schema."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ membership = MembershipReadWithUser(
+ id=1,
+ user_id=user_id,
+ search_space_id=10,
+ role_id=2,
+ is_owner=True,
+ joined_at=now,
+ created_at=now,
+ user_email="test@example.com",
+ user_is_active=True,
+ )
+ assert membership.user_email == "test@example.com"
+ assert membership.user_is_active is True
+
+
+class TestInviteSchemas:
+ """Tests for invite-related schemas."""
+
+ def test_invite_base_minimal(self):
+ """Test InviteBase with minimal data."""
+ invite = InviteBase()
+ assert invite.name is None
+ assert invite.role_id is None
+ assert invite.expires_at is None
+ assert invite.max_uses is None
+
+ def test_invite_base_full(self):
+ """Test InviteBase with all fields."""
+ expires = datetime.now(timezone.utc)
+ invite = InviteBase(
+ name="Team Invite",
+ role_id=3,
+ expires_at=expires,
+ max_uses=10,
+ )
+ assert invite.name == "Team Invite"
+ assert invite.max_uses == 10
+
+ def test_invite_base_max_uses_validation(self):
+ """Test InviteBase max_uses must be >= 1."""
+ with pytest.raises(ValidationError):
+ InviteBase(max_uses=0)
+
+ # Valid minimum
+ invite = InviteBase(max_uses=1)
+ assert invite.max_uses == 1
+
+ def test_invite_create(self):
+ """Test InviteCreate schema."""
+ invite = InviteCreate(
+ name="Dev Team",
+ role_id=2,
+ max_uses=5,
+ )
+ assert invite.name == "Dev Team"
+
+ def test_invite_update_partial(self):
+ """Test InviteUpdate with partial data."""
+ update = InviteUpdate(is_active=False)
+ assert update.is_active is False
+ assert update.name is None
+
+ def test_invite_update_full(self):
+ """Test InviteUpdate with all fields."""
+ expires = datetime.now(timezone.utc)
+ update = InviteUpdate(
+ name="Updated Invite",
+ role_id=4,
+ expires_at=expires,
+ max_uses=20,
+ is_active=True,
+ )
+ assert update.name == "Updated Invite"
+
+ def test_invite_read(self):
+ """Test InviteRead schema."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ invite = InviteRead(
+ id=1,
+ invite_code="abc123xyz",
+ search_space_id=5,
+ created_by_id=user_id,
+ uses_count=3,
+ is_active=True,
+ created_at=now,
+ )
+ assert invite.invite_code == "abc123xyz"
+ assert invite.uses_count == 3
+
+ def test_invite_accept_request(self):
+ """Test InviteAcceptRequest schema."""
+ request = InviteAcceptRequest(invite_code="valid-code-123")
+ assert request.invite_code == "valid-code-123"
+
+ def test_invite_accept_request_validation(self):
+ """Test InviteAcceptRequest requires non-empty code."""
+ with pytest.raises(ValidationError):
+ InviteAcceptRequest(invite_code="")
+
+ def test_invite_accept_response(self):
+ """Test InviteAcceptResponse schema."""
+ response = InviteAcceptResponse(
+ message="Successfully joined",
+ search_space_id=10,
+ search_space_name="My Workspace",
+ role_name="Editor",
+ )
+ assert response.message == "Successfully joined"
+ assert response.search_space_name == "My Workspace"
+
+ def test_invite_info_response(self):
+ """Test InviteInfoResponse schema."""
+ response = InviteInfoResponse(
+ search_space_name="Public Space",
+ role_name="Viewer",
+ is_valid=True,
+ message=None,
+ )
+ assert response.is_valid is True
+
+ def test_invite_info_response_invalid(self):
+ """Test InviteInfoResponse for invalid invite."""
+ response = InviteInfoResponse(
+ search_space_name="",
+ role_name=None,
+ is_valid=False,
+ message="Invite has expired",
+ )
+ assert response.is_valid is False
+ assert response.message == "Invite has expired"
+
+
+class TestPermissionSchemas:
+ """Tests for permission-related schemas."""
+
+ def test_permission_info(self):
+ """Test PermissionInfo schema."""
+ perm = PermissionInfo(
+ value="documents:create",
+ name="Create Documents",
+ category="Documents",
+ )
+ assert perm.value == "documents:create"
+ assert perm.category == "Documents"
+
+ def test_permissions_list_response(self):
+ """Test PermissionsListResponse schema."""
+ perms = [
+ PermissionInfo(value="documents:read", name="Read Documents", category="Documents"),
+ PermissionInfo(value="chats:read", name="Read Chats", category="Chats"),
+ ]
+ response = PermissionsListResponse(permissions=perms)
+ assert len(response.permissions) == 2
+
+ def test_permissions_list_response_empty(self):
+ """Test PermissionsListResponse with empty list."""
+ response = PermissionsListResponse(permissions=[])
+ assert response.permissions == []
+
+
+class TestUserAccessSchemas:
+ """Tests for user access schemas."""
+
+ def test_user_search_space_access(self):
+ """Test UserSearchSpaceAccess schema."""
+ access = UserSearchSpaceAccess(
+ search_space_id=5,
+ search_space_name="My Workspace",
+ is_owner=True,
+ role_name="Owner",
+ permissions=["*"],
+ )
+ assert access.search_space_id == 5
+ assert access.is_owner is True
+ assert "*" in access.permissions
+
+ def test_user_search_space_access_member(self):
+ """Test UserSearchSpaceAccess for regular member."""
+ access = UserSearchSpaceAccess(
+ search_space_id=10,
+ search_space_name="Team Space",
+ is_owner=False,
+ role_name="Editor",
+ permissions=["documents:create", "documents:read", "chats:create"],
+ )
+ assert access.is_owner is False
+ assert access.role_name == "Editor"
+ assert len(access.permissions) == 3
+
+ def test_user_search_space_access_no_role(self):
+ """Test UserSearchSpaceAccess with no role."""
+ access = UserSearchSpaceAccess(
+ search_space_id=15,
+ search_space_name="Guest Space",
+ is_owner=False,
+ role_name=None,
+ permissions=[],
+ )
+ assert access.role_name is None
+ assert access.permissions == []
diff --git a/surfsense_backend/tests/test_rbac_utils.py b/surfsense_backend/tests/test_rbac_utils.py
new file mode 100644
index 000000000..193baada6
--- /dev/null
+++ b/surfsense_backend/tests/test_rbac_utils.py
@@ -0,0 +1,340 @@
+"""
+Tests for RBAC utility functions.
+
+This module tests the RBAC helper functions used for access control.
+"""
+
+from unittest.mock import AsyncMock, MagicMock, patch
+from uuid import uuid4
+
+import pytest
+from fastapi import HTTPException
+
+from app.db import Permission
+from app.utils.rbac import (
+ check_permission,
+ check_search_space_access,
+ generate_invite_code,
+ get_user_membership,
+ get_user_permissions,
+ is_search_space_owner,
+)
+
+
+class TestGenerateInviteCode:
+ """Tests for generate_invite_code function."""
+
+ def test_generates_string(self):
+ """Test that function generates a string."""
+ code = generate_invite_code()
+ assert isinstance(code, str)
+
+ def test_generates_unique_codes(self):
+ """Test that function generates unique codes."""
+ codes = {generate_invite_code() for _ in range(100)}
+ assert len(codes) == 100 # All unique
+
+ def test_code_is_url_safe(self):
+ """Test that generated code is URL-safe."""
+ code = generate_invite_code()
+ # URL-safe characters: alphanumeric, hyphen, underscore
+ valid_chars = set("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-_")
+ assert all(c in valid_chars for c in code)
+
+ def test_code_length(self):
+ """Test that generated code has expected length."""
+ code = generate_invite_code()
+ # token_urlsafe(24) produces ~32 characters
+ assert len(code) == 32
+
+
+class TestGetUserMembership:
+ """Tests for get_user_membership function."""
+
+ @pytest.mark.asyncio
+ async def test_returns_membership(self):
+ """Test returns membership when found."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = True
+
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = mock_membership
+
+ mock_session = AsyncMock()
+ mock_session.execute.return_value = mock_result
+
+ user_id = uuid4()
+ result = await get_user_membership(mock_session, user_id, 1)
+
+ assert result == mock_membership
+ assert result.is_owner is True
+
+ @pytest.mark.asyncio
+ async def test_returns_none_when_not_found(self):
+ """Test returns None when membership not found."""
+ mock_result = MagicMock()
+ mock_result.scalars.return_value.first.return_value = None
+
+ mock_session = AsyncMock()
+ mock_session.execute.return_value = mock_result
+
+ user_id = uuid4()
+ result = await get_user_membership(mock_session, user_id, 999)
+
+ assert result is None
+
+
+class TestGetUserPermissions:
+ """Tests for get_user_permissions function."""
+
+ @pytest.mark.asyncio
+ async def test_owner_has_full_access(self):
+ """Test owner gets FULL_ACCESS permission."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = True
+ mock_membership.role = None
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ permissions = await get_user_permissions(mock_session, user_id, 1)
+
+ assert Permission.FULL_ACCESS.value in permissions
+
+ @pytest.mark.asyncio
+ async def test_member_gets_role_permissions(self):
+ """Test member gets permissions from their role."""
+ mock_role = MagicMock()
+ mock_role.permissions = ["documents:read", "chats:create"]
+
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ permissions = await get_user_permissions(mock_session, user_id, 1)
+
+ assert permissions == ["documents:read", "chats:create"]
+
+ @pytest.mark.asyncio
+ async def test_no_membership_returns_empty(self):
+ """Test no membership returns empty permissions."""
+ with patch("app.utils.rbac.get_user_membership", return_value=None):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ permissions = await get_user_permissions(mock_session, user_id, 1)
+
+ assert permissions == []
+
+ @pytest.mark.asyncio
+ async def test_no_role_returns_empty(self):
+ """Test member without role returns empty permissions."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+ mock_membership.role = None
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ permissions = await get_user_permissions(mock_session, user_id, 1)
+
+ assert permissions == []
+
+
+class TestCheckPermission:
+ """Tests for check_permission function."""
+
+ @pytest.mark.asyncio
+ async def test_owner_passes_any_permission(self):
+ """Test owner passes any permission check."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = True
+ mock_membership.role = None
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ result = await check_permission(
+ mock_session,
+ mock_user,
+ 1,
+ Permission.SETTINGS_DELETE.value,
+ )
+
+ assert result == mock_membership
+
+ @pytest.mark.asyncio
+ async def test_member_with_permission_passes(self):
+ """Test member with required permission passes."""
+ mock_role = MagicMock()
+ mock_role.permissions = [Permission.DOCUMENTS_READ.value, Permission.CHATS_READ.value]
+
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ result = await check_permission(
+ mock_session,
+ mock_user,
+ 1,
+ Permission.DOCUMENTS_READ.value,
+ )
+
+ assert result == mock_membership
+
+ @pytest.mark.asyncio
+ async def test_member_without_permission_raises(self):
+ """Test member without required permission raises HTTPException."""
+ mock_role = MagicMock()
+ mock_role.permissions = [Permission.DOCUMENTS_READ.value]
+
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await check_permission(
+ mock_session,
+ mock_user,
+ 1,
+ Permission.DOCUMENTS_DELETE.value,
+ )
+
+ assert exc_info.value.status_code == 403
+
+ @pytest.mark.asyncio
+ async def test_no_membership_raises(self):
+ """Test user without membership raises HTTPException."""
+ with patch("app.utils.rbac.get_user_membership", return_value=None):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await check_permission(
+ mock_session,
+ mock_user,
+ 1,
+ Permission.DOCUMENTS_READ.value,
+ )
+
+ assert exc_info.value.status_code == 403
+ assert "access to this search space" in exc_info.value.detail
+
+ @pytest.mark.asyncio
+ async def test_custom_error_message(self):
+ """Test custom error message is used."""
+ mock_role = MagicMock()
+ mock_role.permissions = []
+
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+ mock_membership.role = mock_role
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await check_permission(
+ mock_session,
+ mock_user,
+ 1,
+ Permission.DOCUMENTS_DELETE.value,
+ error_message="Custom error message",
+ )
+
+ assert exc_info.value.detail == "Custom error message"
+
+
+class TestCheckSearchSpaceAccess:
+ """Tests for check_search_space_access function."""
+
+ @pytest.mark.asyncio
+ async def test_member_has_access(self):
+ """Test member with any membership has access."""
+ mock_membership = MagicMock()
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ result = await check_search_space_access(mock_session, mock_user, 1)
+
+ assert result == mock_membership
+
+ @pytest.mark.asyncio
+ async def test_no_membership_raises(self):
+ """Test user without membership raises HTTPException."""
+ with patch("app.utils.rbac.get_user_membership", return_value=None):
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = uuid4()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await check_search_space_access(mock_session, mock_user, 1)
+
+ assert exc_info.value.status_code == 403
+
+
+class TestIsSearchSpaceOwner:
+ """Tests for is_search_space_owner function."""
+
+ @pytest.mark.asyncio
+ async def test_returns_true_for_owner(self):
+ """Test returns True when user is owner."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = True
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ result = await is_search_space_owner(mock_session, user_id, 1)
+
+ assert result is True
+
+ @pytest.mark.asyncio
+ async def test_returns_false_for_non_owner(self):
+ """Test returns False when user is not owner."""
+ mock_membership = MagicMock()
+ mock_membership.is_owner = False
+
+ with patch("app.utils.rbac.get_user_membership", return_value=mock_membership):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ result = await is_search_space_owner(mock_session, user_id, 1)
+
+ assert result is False
+
+ @pytest.mark.asyncio
+ async def test_returns_false_for_no_membership(self):
+ """Test returns False when user has no membership."""
+ with patch("app.utils.rbac.get_user_membership", return_value=None):
+ mock_session = AsyncMock()
+ user_id = uuid4()
+
+ result = await is_search_space_owner(mock_session, user_id, 1)
+
+ assert result is False
diff --git a/surfsense_backend/tests/test_retrievers.py b/surfsense_backend/tests/test_retrievers.py
new file mode 100644
index 000000000..84d8e4a52
--- /dev/null
+++ b/surfsense_backend/tests/test_retrievers.py
@@ -0,0 +1,98 @@
+"""
+Tests for hybrid search retrievers.
+Tests the ChucksHybridSearchRetriever and DocumentHybridSearchRetriever classes.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+
+from app.retriver.chunks_hybrid_search import ChucksHybridSearchRetriever
+from app.retriver.documents_hybrid_search import DocumentHybridSearchRetriever
+
+
+class TestChunksHybridSearchRetriever:
+ """Tests for ChucksHybridSearchRetriever."""
+
+ def test_init(self):
+ """Test retriever initialization."""
+ mock_session = AsyncMock()
+ retriever = ChucksHybridSearchRetriever(mock_session)
+
+ assert retriever.db_session == mock_session
+
+ @pytest.mark.asyncio
+ async def test_hybrid_search_returns_empty_on_no_results(self):
+ """Test hybrid search returns empty list when no results."""
+ mock_session = AsyncMock()
+
+ # Mock the session to return empty results
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ retriever = ChucksHybridSearchRetriever(mock_session)
+
+ with patch.object(retriever, 'hybrid_search', new_callable=AsyncMock) as mock_search:
+ mock_search.return_value = []
+
+ result = await retriever.hybrid_search(
+ query_text="test query",
+ top_k=10,
+ search_space_id=1,
+ document_type="FILE",
+ )
+
+ assert result == []
+
+
+class TestDocumentHybridSearchRetriever:
+ """Tests for DocumentHybridSearchRetriever."""
+
+ def test_init(self):
+ """Test retriever initialization."""
+ mock_session = AsyncMock()
+ retriever = DocumentHybridSearchRetriever(mock_session)
+
+ assert retriever.db_session == mock_session
+
+ @pytest.mark.asyncio
+ async def test_hybrid_search_returns_empty_on_no_results(self):
+ """Test hybrid search returns empty list when no results."""
+ mock_session = AsyncMock()
+
+ retriever = DocumentHybridSearchRetriever(mock_session)
+
+ with patch.object(retriever, 'hybrid_search', new_callable=AsyncMock) as mock_search:
+ mock_search.return_value = []
+
+ result = await retriever.hybrid_search(
+ query_text="test query",
+ top_k=10,
+ search_space_id=1,
+ document_type="FILE",
+ )
+
+ assert result == []
+
+
+class TestRetrieverIntegration:
+ """Integration tests for retrievers."""
+
+ def test_chunk_retriever_uses_correct_session(self):
+ """Test chunk retriever uses provided session."""
+ mock_session = AsyncMock()
+ mock_session.id = "test-session"
+
+ retriever = ChucksHybridSearchRetriever(mock_session)
+
+ assert retriever.db_session.id == "test-session"
+
+ def test_document_retriever_uses_correct_session(self):
+ """Test document retriever uses provided session."""
+ mock_session = AsyncMock()
+ mock_session.id = "test-session"
+
+ retriever = DocumentHybridSearchRetriever(mock_session)
+
+ assert retriever.db_session.id == "test-session"
diff --git a/surfsense_backend/tests/test_routes_documents.py b/surfsense_backend/tests/test_routes_documents.py
new file mode 100644
index 000000000..73d74c890
--- /dev/null
+++ b/surfsense_backend/tests/test_routes_documents.py
@@ -0,0 +1,440 @@
+"""
+Tests for documents routes.
+Tests API endpoints with mocked database sessions and authentication.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+from fastapi import HTTPException
+
+from app.routes.documents_routes import (
+ create_documents,
+ read_documents,
+ search_documents,
+ read_document,
+ update_document,
+ delete_document,
+ get_document_type_counts,
+ get_document_by_chunk_id,
+)
+from app.schemas import DocumentsCreate, DocumentUpdate
+from app.db import DocumentType
+
+
+class TestCreateDocuments:
+ """Tests for the create_documents endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_create_documents_invalid_type(self):
+ """Test creating documents with invalid type."""
+ mock_session = AsyncMock()
+ mock_session.rollback = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Use a type that triggers the else branch
+ request = DocumentsCreate(
+ search_space_id=1,
+ document_type=DocumentType.FILE, # Not EXTENSION or YOUTUBE_VIDEO
+ content=[],
+ )
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await create_documents(
+ request=request,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 400
+
+
+class TestReadDocuments:
+ """Tests for the read_documents endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_documents_with_search_space_filter(self):
+ """Test reading documents filtered by search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query results
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_result.scalar.return_value = 0
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await read_documents(
+ skip=0,
+ page=None,
+ page_size=50,
+ search_space_id=1,
+ document_types=None,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.items == []
+ assert result.total == 0
+
+ @pytest.mark.asyncio
+ async def test_read_documents_with_type_filter(self):
+ """Test reading documents filtered by type."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query results
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_result.scalar.return_value = 0
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await read_documents(
+ skip=0,
+ page=None,
+ page_size=50,
+ search_space_id=1,
+ document_types="EXTENSION,FILE",
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.items == []
+
+ @pytest.mark.asyncio
+ async def test_read_documents_all_search_spaces(self):
+ """Test reading documents from all accessible search spaces."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query results
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_result.scalar.return_value = 0
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await read_documents(
+ skip=0,
+ page=None,
+ page_size=50,
+ search_space_id=None,
+ document_types=None,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.items == []
+
+
+class TestSearchDocuments:
+ """Tests for the search_documents endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_search_documents_by_title(self):
+ """Test searching documents by title."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query results
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_result.scalar.return_value = 0
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await search_documents(
+ title="test",
+ skip=0,
+ page=None,
+ page_size=50,
+ search_space_id=1,
+ document_types=None,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.items == []
+ assert result.total == 0
+
+
+class TestReadDocument:
+ """Tests for the read_document endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_document_not_found(self):
+ """Test reading non-existent document."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with pytest.raises(HTTPException) as exc_info:
+ await read_document(
+ document_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_read_document_success(self):
+ """Test successful document reading."""
+ from datetime import datetime
+
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing document
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Test Document"
+ mock_document.document_type = DocumentType.FILE
+ mock_document.document_metadata = {}
+ mock_document.content = "Test content"
+ mock_document.created_at = datetime.now() # Must be a datetime
+ mock_document.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_document
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await read_document(
+ document_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.id == 1
+ assert result.title == "Test Document"
+
+
+class TestUpdateDocument:
+ """Tests for the update_document endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_update_document_not_found(self):
+ """Test updating non-existent document."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ # DocumentUpdate requires document_type, content, and search_space_id
+ update_data = DocumentUpdate(
+ document_type=DocumentType.FILE,
+ content="Updated content",
+ search_space_id=1
+ )
+
+ with pytest.raises(HTTPException) as exc_info:
+ await update_document(
+ document_id=999,
+ document_update=update_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_update_document_success(self):
+ """Test successful document update."""
+ from datetime import datetime
+
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing document
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.title = "Old Title"
+ mock_document.document_type = DocumentType.FILE
+ mock_document.document_metadata = {}
+ mock_document.content = "Test content"
+ mock_document.created_at = datetime.now()
+ mock_document.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_document
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.commit = AsyncMock()
+ mock_session.refresh = AsyncMock()
+
+ # DocumentUpdate requires document_type, content, and search_space_id
+ update_data = DocumentUpdate(
+ document_type=DocumentType.FILE,
+ content="New content",
+ search_space_id=1
+ )
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ _result = await update_document(
+ document_id=1,
+ document_update=update_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert mock_session.commit.called
+
+
+class TestDeleteDocument:
+ """Tests for the delete_document endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_delete_document_not_found(self):
+ """Test deleting non-existent document."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await delete_document(
+ document_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_delete_document_success(self):
+ """Test successful document deletion."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing document
+ mock_document = MagicMock()
+ mock_document.id = 1
+ mock_document.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_document
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.delete = AsyncMock()
+ mock_session.commit = AsyncMock()
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await delete_document(
+ document_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result["message"] == "Document deleted successfully"
+ assert mock_session.delete.called
+
+
+class TestGetDocumentTypeCounts:
+ """Tests for the get_document_type_counts endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_get_document_type_counts_success(self):
+ """Test getting document type counts."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query result
+ mock_result = MagicMock()
+ mock_result.all.return_value = [("FILE", 5), ("EXTENSION", 3)]
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.documents_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await get_document_type_counts(
+ search_space_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result == {"FILE": 5, "EXTENSION": 3}
+
+
+class TestGetDocumentByChunkId:
+ """Tests for the get_document_by_chunk_id endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_get_document_by_chunk_id_chunk_not_found(self):
+ """Test getting document when chunk doesn't exist."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty chunk result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with pytest.raises(HTTPException) as exc_info:
+ await get_document_by_chunk_id(
+ chunk_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+ assert "Chunk" in exc_info.value.detail
diff --git a/surfsense_backend/tests/test_routes_llm_config.py b/surfsense_backend/tests/test_routes_llm_config.py
new file mode 100644
index 000000000..da72ad2bb
--- /dev/null
+++ b/surfsense_backend/tests/test_routes_llm_config.py
@@ -0,0 +1,421 @@
+"""
+Tests for LLM config routes.
+Tests API endpoints with mocked database sessions and authentication.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+from fastapi import HTTPException
+
+from app.routes.llm_config_routes import (
+ get_global_llm_configs,
+ create_llm_config,
+ read_llm_configs,
+ read_llm_config,
+ update_llm_config,
+ delete_llm_config,
+ get_llm_preferences,
+ update_llm_preferences,
+ LLMPreferencesUpdate,
+)
+from app.schemas import LLMConfigCreate, LLMConfigUpdate
+from app.db import LiteLLMProvider
+
+
+class TestGetGlobalLLMConfigs:
+ """Tests for the get_global_llm_configs endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_returns_global_configs_without_api_keys(self):
+ """Test that global configs are returned without exposing API keys."""
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ with patch("app.routes.llm_config_routes.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = [
+ {
+ "id": -1,
+ "name": "GPT-4",
+ "provider": "OPENAI",
+ "custom_provider": None,
+ "model_name": "gpt-4",
+ "api_key": "sk-secret-key",
+ "api_base": None,
+ "language": "en",
+ "litellm_params": {},
+ },
+ ]
+
+ result = await get_global_llm_configs(user=mock_user)
+
+ assert len(result) == 1
+ # API key should not be in the response
+ assert "api_key" not in result[0] or result[0].get("api_key") != "sk-secret-key"
+ assert result[0]["name"] == "GPT-4"
+ assert result[0]["is_global"] is True
+
+ @pytest.mark.asyncio
+ async def test_handles_empty_global_configs(self):
+ """Test handling when no global configs are configured."""
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ with patch("app.routes.llm_config_routes.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = []
+
+ result = await get_global_llm_configs(user=mock_user)
+
+ assert result == []
+
+
+class TestCreateLLMConfig:
+ """Tests for the create_llm_config endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_create_llm_config_invalid_validation(self):
+ """Test creating LLM config with invalid validation."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ llm_config_data = LLMConfigCreate(
+ name="Test LLM",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="invalid-key",
+ search_space_id=1,
+ )
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with patch("app.routes.llm_config_routes.validate_llm_config") as mock_validate:
+ mock_validate.return_value = (False, "Invalid API key")
+
+ with pytest.raises(HTTPException) as exc_info:
+ await create_llm_config(
+ llm_config=llm_config_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 400
+ assert "Invalid LLM configuration" in exc_info.value.detail
+
+ @pytest.mark.asyncio
+ async def test_create_llm_config_success(self):
+ """Test successful LLM config creation."""
+ mock_session = AsyncMock()
+ mock_session.add = MagicMock()
+ mock_session.commit = AsyncMock()
+ mock_session.refresh = AsyncMock()
+
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ llm_config_data = LLMConfigCreate(
+ name="Test LLM",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="sk-valid-key",
+ search_space_id=1,
+ )
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with patch("app.routes.llm_config_routes.validate_llm_config") as mock_validate:
+ mock_validate.return_value = (True, "")
+
+ with patch("app.routes.llm_config_routes.LLMConfig") as MockLLMConfig:
+ mock_config = MagicMock()
+ mock_config.id = 1
+ mock_config.name = "Test LLM"
+ MockLLMConfig.return_value = mock_config
+
+ _result = await create_llm_config(
+ llm_config=llm_config_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert mock_session.add.called
+ assert mock_session.commit.called
+
+
+class TestReadLLMConfigs:
+ """Tests for the read_llm_configs endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_llm_configs_success(self):
+ """Test successful reading of LLM configs."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock query result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await read_llm_configs(
+ search_space_id=1,
+ skip=0,
+ limit=200,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert isinstance(result, list)
+
+
+class TestReadLLMConfig:
+ """Tests for the read_llm_config endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_llm_config_not_found(self):
+ """Test reading non-existent LLM config."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with pytest.raises(HTTPException) as exc_info:
+ await read_llm_config(
+ llm_config_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_read_llm_config_success(self):
+ """Test successful reading of LLM config."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing config
+ mock_config = MagicMock()
+ mock_config.id = 1
+ mock_config.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_config
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await read_llm_config(
+ llm_config_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result.id == 1
+
+
+class TestUpdateLLMConfig:
+ """Tests for the update_llm_config endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_update_llm_config_not_found(self):
+ """Test updating non-existent LLM config."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ update_data = LLMConfigUpdate(name="Updated Name")
+
+ with pytest.raises(HTTPException) as exc_info:
+ await update_llm_config(
+ llm_config_id=999,
+ llm_config_update=update_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+
+class TestDeleteLLMConfig:
+ """Tests for the delete_llm_config endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_delete_llm_config_not_found(self):
+ """Test deleting non-existent LLM config."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ with pytest.raises(HTTPException) as exc_info:
+ await delete_llm_config(
+ llm_config_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_delete_llm_config_success(self):
+ """Test successful LLM config deletion."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing config
+ mock_config = MagicMock()
+ mock_config.id = 1
+ mock_config.search_space_id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_config
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.delete = AsyncMock()
+ mock_session.commit = AsyncMock()
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await delete_llm_config(
+ llm_config_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result["message"] == "LLM configuration deleted successfully"
+ assert mock_session.delete.called
+ assert mock_session.commit.called
+
+
+class TestGetLLMPreferences:
+ """Tests for the get_llm_preferences endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_get_llm_preferences_not_found(self):
+ """Test getting preferences for non-existent search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await get_llm_preferences(
+ search_space_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+
+class TestUpdateLLMPreferences:
+ """Tests for the update_llm_preferences endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_update_llm_preferences_search_space_not_found(self):
+ """Test updating preferences for non-existent search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ preferences = LLMPreferencesUpdate(fast_llm_id=1)
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await update_llm_preferences(
+ search_space_id=999,
+ preferences=preferences,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_update_llm_preferences_global_config_not_found(self):
+ """Test updating with non-existent global config."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock search space exists
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ preferences = LLMPreferencesUpdate(fast_llm_id=-999) # Non-existent global config
+
+ with patch("app.routes.llm_config_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with patch("app.routes.llm_config_routes.config") as mock_config:
+ mock_config.GLOBAL_LLM_CONFIGS = []
+
+ with pytest.raises(HTTPException) as exc_info:
+ await update_llm_preferences(
+ search_space_id=1,
+ preferences=preferences,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
diff --git a/surfsense_backend/tests/test_routes_search_spaces.py b/surfsense_backend/tests/test_routes_search_spaces.py
new file mode 100644
index 000000000..021dfcc06
--- /dev/null
+++ b/surfsense_backend/tests/test_routes_search_spaces.py
@@ -0,0 +1,329 @@
+"""
+Tests for search spaces routes.
+Tests API endpoints with mocked database sessions and authentication.
+"""
+import pytest
+from unittest.mock import AsyncMock, MagicMock, patch
+from fastapi import HTTPException
+
+from app.routes.search_spaces_routes import (
+ create_search_space,
+ read_search_spaces,
+ read_search_space,
+ update_search_space,
+ delete_search_space,
+ create_default_roles_and_membership,
+)
+from app.schemas import SearchSpaceCreate, SearchSpaceUpdate
+
+
+class TestCreateDefaultRolesAndMembership:
+ """Tests for the create_default_roles_and_membership helper function."""
+
+ @pytest.mark.asyncio
+ async def test_creates_default_roles(self):
+ """Test that default roles are created for a search space."""
+ mock_session = AsyncMock()
+ mock_session.add = MagicMock()
+ mock_session.flush = AsyncMock()
+
+ with patch("app.routes.search_spaces_routes.get_default_roles_config") as mock_get_roles:
+ mock_get_roles.return_value = [
+ {
+ "name": "Owner",
+ "description": "Full access",
+ "permissions": ["*"],
+ "is_default": False,
+ "is_system_role": True,
+ },
+ {
+ "name": "Editor",
+ "description": "Can edit",
+ "permissions": ["documents:create"],
+ "is_default": True,
+ "is_system_role": True,
+ },
+ ]
+
+ await create_default_roles_and_membership(
+ mock_session,
+ search_space_id=1,
+ owner_user_id="user-123",
+ )
+
+ # Should add roles and membership
+ assert mock_session.add.call_count >= 2
+ assert mock_session.flush.call_count >= 1
+
+
+class TestCreateSearchSpace:
+ """Tests for the create_search_space endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_create_search_space_success(self):
+ """Test successful search space creation."""
+ mock_session = AsyncMock()
+ mock_session.add = MagicMock()
+ mock_session.flush = AsyncMock()
+ mock_session.commit = AsyncMock()
+ mock_session.refresh = AsyncMock()
+
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ search_space_data = SearchSpaceCreate(name="Test Space")
+
+ with patch("app.routes.search_spaces_routes.create_default_roles_and_membership") as mock_create_roles:
+ mock_create_roles.return_value = None
+
+ # Mock the SearchSpace class
+ with patch("app.routes.search_spaces_routes.SearchSpace") as MockSearchSpace:
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+ mock_search_space.name = "Test Space"
+ MockSearchSpace.return_value = mock_search_space
+
+ _result = await create_search_space(
+ search_space=search_space_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert mock_session.add.called
+ assert mock_session.commit.called
+
+ @pytest.mark.asyncio
+ async def test_create_search_space_database_error(self):
+ """Test search space creation handles database errors."""
+ mock_session = AsyncMock()
+ mock_session.add = MagicMock(side_effect=Exception("Database error"))
+ mock_session.rollback = AsyncMock()
+
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ search_space_data = SearchSpaceCreate(name="Test Space")
+
+ with pytest.raises(HTTPException) as exc_info:
+ await create_search_space(
+ search_space=search_space_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 500
+
+
+class TestReadSearchSpaces:
+ """Tests for the read_search_spaces endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_search_spaces_owned_only(self):
+ """Test reading only owned search spaces."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock the query result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await read_search_spaces(
+ skip=0,
+ limit=200,
+ owned_only=True,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert isinstance(result, list)
+
+ @pytest.mark.asyncio
+ async def test_read_search_spaces_all_accessible(self):
+ """Test reading all accessible search spaces."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock the query result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.all.return_value = []
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_result.scalar.return_value = 0
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ result = await read_search_spaces(
+ skip=0,
+ limit=200,
+ owned_only=False,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert isinstance(result, list)
+
+
+class TestReadSearchSpace:
+ """Tests for the read_search_space endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_read_search_space_not_found(self):
+ """Test reading non-existent search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+
+ with patch("app.routes.search_spaces_routes.check_search_space_access") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await read_search_space(
+ search_space_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+
+class TestUpdateSearchSpace:
+ """Tests for the update_search_space endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_update_search_space_not_found(self):
+ """Test updating non-existent search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ update_data = SearchSpaceUpdate(name="Updated Name")
+
+ with patch("app.routes.search_spaces_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await update_search_space(
+ search_space_id=999,
+ search_space_update=update_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_update_search_space_success(self):
+ """Test successful search space update."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing search space
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+ mock_search_space.name = "Old Name"
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.commit = AsyncMock()
+ mock_session.refresh = AsyncMock()
+
+ update_data = SearchSpaceUpdate(name="New Name")
+
+ with patch("app.routes.search_spaces_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ _result = await update_search_space(
+ search_space_id=1,
+ search_space_update=update_data,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert mock_session.commit.called
+
+
+class TestDeleteSearchSpace:
+ """Tests for the delete_search_space endpoint."""
+
+ @pytest.mark.asyncio
+ async def test_delete_search_space_not_found(self):
+ """Test deleting non-existent search space."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock empty result
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = None
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.rollback = AsyncMock()
+
+ with patch("app.routes.search_spaces_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ with pytest.raises(HTTPException) as exc_info:
+ await delete_search_space(
+ search_space_id=999,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert exc_info.value.status_code == 404
+
+ @pytest.mark.asyncio
+ async def test_delete_search_space_success(self):
+ """Test successful search space deletion."""
+ mock_session = AsyncMock()
+ mock_user = MagicMock()
+ mock_user.id = "user-123"
+
+ # Mock existing search space
+ mock_search_space = MagicMock()
+ mock_search_space.id = 1
+
+ mock_result = MagicMock()
+ mock_scalars = MagicMock()
+ mock_scalars.first.return_value = mock_search_space
+ mock_result.scalars.return_value = mock_scalars
+ mock_session.execute = AsyncMock(return_value=mock_result)
+ mock_session.delete = AsyncMock()
+ mock_session.commit = AsyncMock()
+
+ with patch("app.routes.search_spaces_routes.check_permission") as mock_check:
+ mock_check.return_value = None
+
+ result = await delete_search_space(
+ search_space_id=1,
+ session=mock_session,
+ user=mock_user,
+ )
+
+ assert result["message"] == "Search space deleted successfully"
+ assert mock_session.delete.called
+ assert mock_session.commit.called
diff --git a/surfsense_backend/tests/test_schemas.py b/surfsense_backend/tests/test_schemas.py
new file mode 100644
index 000000000..1aabc63bd
--- /dev/null
+++ b/surfsense_backend/tests/test_schemas.py
@@ -0,0 +1,569 @@
+"""
+Tests for Pydantic schema models.
+
+This module tests schema validation, serialization, and deserialization
+for all schema models used in the application.
+"""
+
+from datetime import datetime, timezone
+from uuid import uuid4
+
+import pytest
+from pydantic import ValidationError
+
+from app.db import ChatType, DocumentType, LiteLLMProvider
+from app.schemas.base import IDModel, TimestampModel
+from app.schemas.chats import (
+ AISDKChatRequest,
+ ChatBase,
+ ChatCreate,
+ ChatRead,
+ ChatReadWithoutMessages,
+ ChatUpdate,
+ ClientAttachment,
+ ToolInvocation,
+)
+from app.schemas.chunks import ChunkBase, ChunkCreate, ChunkRead, ChunkUpdate
+from app.schemas.documents import (
+ DocumentBase,
+ DocumentRead,
+ DocumentsCreate,
+ DocumentUpdate,
+ DocumentWithChunksRead,
+ ExtensionDocumentContent,
+ ExtensionDocumentMetadata,
+ PaginatedResponse,
+)
+from app.schemas.llm_config import (
+ LLMConfigBase,
+ LLMConfigCreate,
+ LLMConfigRead,
+ LLMConfigUpdate,
+)
+from app.schemas.search_space import (
+ SearchSpaceBase,
+ SearchSpaceCreate,
+ SearchSpaceRead,
+ SearchSpaceUpdate,
+ SearchSpaceWithStats,
+)
+
+
+class TestBaseSchemas:
+ """Tests for base schema models."""
+
+ def test_timestamp_model(self):
+ """Test TimestampModel with valid datetime."""
+ now = datetime.now(timezone.utc)
+ model = TimestampModel(created_at=now)
+ assert model.created_at == now
+
+ def test_id_model(self):
+ """Test IDModel with valid ID."""
+ model = IDModel(id=1)
+ assert model.id == 1
+
+ def test_id_model_with_zero(self):
+ """Test IDModel accepts zero."""
+ model = IDModel(id=0)
+ assert model.id == 0
+
+
+class TestChatSchemas:
+ """Tests for chat-related schema models."""
+
+ def test_chat_base_valid(self):
+ """Test ChatBase with valid data."""
+ chat = ChatBase(
+ type=ChatType.QNA,
+ title="Test Chat",
+ messages=[{"role": "user", "content": "Hello"}],
+ search_space_id=1,
+ )
+ assert chat.type == ChatType.QNA
+ assert chat.title == "Test Chat"
+ assert chat.search_space_id == 1
+ assert chat.state_version == 1
+
+ def test_chat_base_with_connectors(self):
+ """Test ChatBase with initial connectors."""
+ chat = ChatBase(
+ type=ChatType.QNA,
+ title="Test Chat",
+ initial_connectors=["slack", "notion"],
+ messages=[],
+ search_space_id=1,
+ )
+ assert chat.initial_connectors == ["slack", "notion"]
+
+ def test_chat_base_default_state_version(self):
+ """Test ChatBase default state_version."""
+ chat = ChatBase(
+ type=ChatType.QNA,
+ title="Test Chat",
+ messages=[],
+ search_space_id=1,
+ )
+ assert chat.state_version == 1
+
+ def test_chat_create(self):
+ """Test ChatCreate schema."""
+ chat = ChatCreate(
+ type=ChatType.QNA,
+ title="New Chat",
+ messages=[{"role": "user", "content": "Test"}],
+ search_space_id=1,
+ )
+ assert chat.title == "New Chat"
+
+ def test_chat_update(self):
+ """Test ChatUpdate schema."""
+ chat = ChatUpdate(
+ type=ChatType.QNA,
+ title="Updated Chat",
+ messages=[{"role": "user", "content": "Updated"}],
+ search_space_id=1,
+ state_version=2,
+ )
+ assert chat.state_version == 2
+
+ def test_chat_read(self):
+ """Test ChatRead schema."""
+ now = datetime.now(timezone.utc)
+ chat = ChatRead(
+ id=1,
+ type=ChatType.QNA,
+ title="Read Chat",
+ messages=[],
+ search_space_id=1,
+ created_at=now,
+ )
+ assert chat.id == 1
+ assert chat.created_at == now
+
+ def test_chat_read_without_messages(self):
+ """Test ChatReadWithoutMessages schema."""
+ now = datetime.now(timezone.utc)
+ chat = ChatReadWithoutMessages(
+ id=1,
+ type=ChatType.QNA,
+ title="Chat Without Messages",
+ search_space_id=1,
+ created_at=now,
+ )
+ assert chat.id == 1
+ assert not hasattr(chat, "messages") or "messages" not in chat.model_fields
+
+ def test_client_attachment(self):
+ """Test ClientAttachment schema."""
+ attachment = ClientAttachment(
+ name="test.pdf",
+ content_type="application/pdf",
+ url="https://example.com/test.pdf",
+ )
+ assert attachment.name == "test.pdf"
+ assert attachment.content_type == "application/pdf"
+
+ def test_tool_invocation(self):
+ """Test ToolInvocation schema."""
+ tool = ToolInvocation(
+ tool_call_id="tc_123",
+ tool_name="search",
+ args={"query": "test"},
+ result={"results": []},
+ )
+ assert tool.tool_call_id == "tc_123"
+ assert tool.tool_name == "search"
+
+ def test_aisdk_chat_request(self):
+ """Test AISDKChatRequest schema."""
+ request = AISDKChatRequest(
+ messages=[{"role": "user", "content": "Hello"}],
+ data={"search_space_id": 1},
+ )
+ assert len(request.messages) == 1
+ assert request.data["search_space_id"] == 1
+
+ def test_aisdk_chat_request_no_data(self):
+ """Test AISDKChatRequest without data."""
+ request = AISDKChatRequest(messages=[{"role": "user", "content": "Hello"}])
+ assert request.data is None
+
+
+class TestChunkSchemas:
+ """Tests for chunk-related schema models."""
+
+ def test_chunk_base(self):
+ """Test ChunkBase schema."""
+ chunk = ChunkBase(content="Test content", document_id=1)
+ assert chunk.content == "Test content"
+ assert chunk.document_id == 1
+
+ def test_chunk_create(self):
+ """Test ChunkCreate schema."""
+ chunk = ChunkCreate(content="New chunk content", document_id=1)
+ assert chunk.content == "New chunk content"
+
+ def test_chunk_update(self):
+ """Test ChunkUpdate schema."""
+ chunk = ChunkUpdate(content="Updated content", document_id=1)
+ assert chunk.content == "Updated content"
+
+ def test_chunk_read(self):
+ """Test ChunkRead schema."""
+ now = datetime.now(timezone.utc)
+ chunk = ChunkRead(
+ id=1,
+ content="Read chunk",
+ document_id=1,
+ created_at=now,
+ )
+ assert chunk.id == 1
+ assert chunk.created_at == now
+
+
+class TestDocumentSchemas:
+ """Tests for document-related schema models."""
+
+ def test_extension_document_metadata(self):
+ """Test ExtensionDocumentMetadata schema."""
+ metadata = ExtensionDocumentMetadata(
+ BrowsingSessionId="session123",
+ VisitedWebPageURL="https://example.com",
+ VisitedWebPageTitle="Example Page",
+ VisitedWebPageDateWithTimeInISOString="2024-01-01T00:00:00Z",
+ VisitedWebPageReffererURL="https://google.com",
+ VisitedWebPageVisitDurationInMilliseconds="5000",
+ )
+ assert metadata.BrowsingSessionId == "session123"
+ assert metadata.VisitedWebPageURL == "https://example.com"
+
+ def test_extension_document_content(self):
+ """Test ExtensionDocumentContent schema."""
+ metadata = ExtensionDocumentMetadata(
+ BrowsingSessionId="session123",
+ VisitedWebPageURL="https://example.com",
+ VisitedWebPageTitle="Example Page",
+ VisitedWebPageDateWithTimeInISOString="2024-01-01T00:00:00Z",
+ VisitedWebPageReffererURL="https://google.com",
+ VisitedWebPageVisitDurationInMilliseconds="5000",
+ )
+ content = ExtensionDocumentContent(
+ metadata=metadata,
+ pageContent="This is the page content",
+ )
+ assert content.pageContent == "This is the page content"
+ assert content.metadata.VisitedWebPageTitle == "Example Page"
+
+ def test_document_base_with_string_content(self):
+ """Test DocumentBase with string content."""
+ doc = DocumentBase(
+ document_type=DocumentType.FILE,
+ content="This is document content",
+ search_space_id=1,
+ )
+ assert doc.content == "This is document content"
+
+ def test_document_base_with_list_content(self):
+ """Test DocumentBase with list content."""
+ doc = DocumentBase(
+ document_type=DocumentType.FILE,
+ content=["Part 1", "Part 2"],
+ search_space_id=1,
+ )
+ assert len(doc.content) == 2
+
+ def test_documents_create(self):
+ """Test DocumentsCreate schema."""
+ doc = DocumentsCreate(
+ document_type=DocumentType.CRAWLED_URL,
+ content="Crawled content",
+ search_space_id=1,
+ )
+ assert doc.document_type == DocumentType.CRAWLED_URL
+
+ def test_document_update(self):
+ """Test DocumentUpdate schema."""
+ doc = DocumentUpdate(
+ document_type=DocumentType.FILE,
+ content="Updated content",
+ search_space_id=1,
+ )
+ assert doc.content == "Updated content"
+
+ def test_document_read(self):
+ """Test DocumentRead schema."""
+ now = datetime.now(timezone.utc)
+ doc = DocumentRead(
+ id=1,
+ title="Test Document",
+ document_type=DocumentType.FILE,
+ document_metadata={"key": "value"},
+ content="Content",
+ created_at=now,
+ search_space_id=1,
+ )
+ assert doc.id == 1
+ assert doc.title == "Test Document"
+ assert doc.document_metadata["key"] == "value"
+
+ def test_document_with_chunks_read(self):
+ """Test DocumentWithChunksRead schema."""
+ now = datetime.now(timezone.utc)
+ doc = DocumentWithChunksRead(
+ id=1,
+ title="Test Document",
+ document_type=DocumentType.FILE,
+ document_metadata={},
+ content="Content",
+ created_at=now,
+ search_space_id=1,
+ chunks=[
+ ChunkRead(id=1, content="Chunk 1", document_id=1, created_at=now),
+ ChunkRead(id=2, content="Chunk 2", document_id=1, created_at=now),
+ ],
+ )
+ assert len(doc.chunks) == 2
+
+ def test_paginated_response(self):
+ """Test PaginatedResponse schema."""
+ response = PaginatedResponse[dict](
+ items=[{"id": 1}, {"id": 2}],
+ total=10,
+ )
+ assert len(response.items) == 2
+ assert response.total == 10
+
+
+class TestLLMConfigSchemas:
+ """Tests for LLM config schema models."""
+
+ def test_llm_config_base(self):
+ """Test LLMConfigBase schema."""
+ config = LLMConfigBase(
+ name="GPT-4 Config",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="sk-test123",
+ )
+ assert config.name == "GPT-4 Config"
+ assert config.provider == LiteLLMProvider.OPENAI
+ assert config.language == "English" # Default value
+
+ def test_llm_config_base_with_custom_provider(self):
+ """Test LLMConfigBase with custom provider."""
+ config = LLMConfigBase(
+ name="Custom LLM",
+ provider=LiteLLMProvider.CUSTOM,
+ custom_provider="my-provider",
+ model_name="my-model",
+ api_key="test-key",
+ api_base="https://my-api.com/v1",
+ )
+ assert config.custom_provider == "my-provider"
+ assert config.api_base == "https://my-api.com/v1"
+
+ def test_llm_config_base_with_litellm_params(self):
+ """Test LLMConfigBase with litellm params."""
+ config = LLMConfigBase(
+ name="Config with Params",
+ provider=LiteLLMProvider.ANTHROPIC,
+ model_name="claude-3-opus",
+ api_key="test-key",
+ litellm_params={"temperature": 0.7, "max_tokens": 1000},
+ )
+ assert config.litellm_params["temperature"] == 0.7
+
+ def test_llm_config_create(self):
+ """Test LLMConfigCreate schema."""
+ config = LLMConfigCreate(
+ name="New Config",
+ provider=LiteLLMProvider.GROQ,
+ model_name="llama-3",
+ api_key="gsk-test",
+ search_space_id=1,
+ )
+ assert config.search_space_id == 1
+
+ def test_llm_config_update_partial(self):
+ """Test LLMConfigUpdate with partial data."""
+ update = LLMConfigUpdate(name="Updated Name")
+ assert update.name == "Updated Name"
+ assert update.provider is None
+ assert update.model_name is None
+
+ def test_llm_config_update_full(self):
+ """Test LLMConfigUpdate with full data."""
+ update = LLMConfigUpdate(
+ name="Full Update",
+ provider=LiteLLMProvider.MISTRAL,
+ model_name="mistral-large",
+ api_key="new-key",
+ language="French",
+ )
+ assert update.language == "French"
+
+ def test_llm_config_read(self):
+ """Test LLMConfigRead schema."""
+ now = datetime.now(timezone.utc)
+ config = LLMConfigRead(
+ id=1,
+ name="Read Config",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="sk-test",
+ created_at=now,
+ search_space_id=1,
+ )
+ assert config.id == 1
+ assert config.created_at == now
+
+ def test_llm_config_read_global(self):
+ """Test LLMConfigRead for global config (no search_space_id)."""
+ config = LLMConfigRead(
+ id=-1,
+ name="Global Config",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="sk-global",
+ created_at=None,
+ search_space_id=None,
+ )
+ assert config.id == -1
+ assert config.search_space_id is None
+
+
+class TestSearchSpaceSchemas:
+ """Tests for search space schema models."""
+
+ def test_search_space_base(self):
+ """Test SearchSpaceBase schema."""
+ space = SearchSpaceBase(name="My Search Space")
+ assert space.name == "My Search Space"
+ assert space.description is None
+
+ def test_search_space_base_with_description(self):
+ """Test SearchSpaceBase with description."""
+ space = SearchSpaceBase(
+ name="My Search Space",
+ description="A space for searching",
+ )
+ assert space.description == "A space for searching"
+
+ def test_search_space_create_defaults(self):
+ """Test SearchSpaceCreate with default values."""
+ space = SearchSpaceCreate(name="New Space")
+ assert space.citations_enabled is True
+ assert space.qna_custom_instructions is None
+
+ def test_search_space_create_custom(self):
+ """Test SearchSpaceCreate with custom values."""
+ space = SearchSpaceCreate(
+ name="Custom Space",
+ description="Custom description",
+ citations_enabled=False,
+ qna_custom_instructions="Be concise",
+ )
+ assert space.citations_enabled is False
+ assert space.qna_custom_instructions == "Be concise"
+
+ def test_search_space_update_partial(self):
+ """Test SearchSpaceUpdate with partial data."""
+ update = SearchSpaceUpdate(name="Updated Name")
+ assert update.name == "Updated Name"
+ assert update.description is None
+ assert update.citations_enabled is None
+
+ def test_search_space_update_full(self):
+ """Test SearchSpaceUpdate with all fields."""
+ update = SearchSpaceUpdate(
+ name="Full Update",
+ description="New description",
+ citations_enabled=True,
+ qna_custom_instructions="New instructions",
+ )
+ assert update.qna_custom_instructions == "New instructions"
+
+ def test_search_space_read(self):
+ """Test SearchSpaceRead schema."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ space = SearchSpaceRead(
+ id=1,
+ name="Read Space",
+ description="Description",
+ created_at=now,
+ user_id=user_id,
+ citations_enabled=True,
+ qna_custom_instructions=None,
+ )
+ assert space.id == 1
+ assert space.user_id == user_id
+
+ def test_search_space_with_stats(self):
+ """Test SearchSpaceWithStats schema."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ space = SearchSpaceWithStats(
+ id=1,
+ name="Space with Stats",
+ created_at=now,
+ user_id=user_id,
+ citations_enabled=True,
+ member_count=5,
+ is_owner=True,
+ )
+ assert space.member_count == 5
+ assert space.is_owner is True
+
+ def test_search_space_with_stats_defaults(self):
+ """Test SearchSpaceWithStats default values."""
+ now = datetime.now(timezone.utc)
+ user_id = uuid4()
+ space = SearchSpaceWithStats(
+ id=1,
+ name="Default Stats Space",
+ created_at=now,
+ user_id=user_id,
+ citations_enabled=True,
+ )
+ assert space.member_count == 1
+ assert space.is_owner is False
+
+
+class TestSchemaValidation:
+ """Tests for schema validation errors."""
+
+ def test_chat_base_missing_required(self):
+ """Test ChatBase raises error for missing required fields."""
+ with pytest.raises(ValidationError):
+ ChatBase(type=ChatType.QNA, title="Test") # Missing messages and search_space_id
+
+ def test_llm_config_name_too_long(self):
+ """Test LLMConfigBase validates name length."""
+ with pytest.raises(ValidationError):
+ LLMConfigBase(
+ name="x" * 101, # Exceeds max_length of 100
+ provider=LiteLLMProvider.OPENAI,
+ model_name="gpt-4",
+ api_key="test",
+ )
+
+ def test_llm_config_model_name_too_long(self):
+ """Test LLMConfigBase validates model_name length."""
+ with pytest.raises(ValidationError):
+ LLMConfigBase(
+ name="Valid Name",
+ provider=LiteLLMProvider.OPENAI,
+ model_name="x" * 101, # Exceeds max_length of 100
+ api_key="test",
+ )
+
+ def test_document_read_missing_required(self):
+ """Test DocumentRead raises error for missing required fields."""
+ with pytest.raises(ValidationError):
+ DocumentRead(
+ id=1,
+ title="Test",
+ # Missing document_type, document_metadata, content, created_at, search_space_id
+ )
diff --git a/surfsense_backend/tests/test_validators.py b/surfsense_backend/tests/test_validators.py
new file mode 100644
index 000000000..8baa690d4
--- /dev/null
+++ b/surfsense_backend/tests/test_validators.py
@@ -0,0 +1,441 @@
+"""
+Tests for the validators module.
+"""
+
+import pytest
+from fastapi import HTTPException
+
+from app.utils.validators import (
+ validate_connectors,
+ validate_document_ids,
+ validate_email,
+ validate_messages,
+ validate_research_mode,
+ validate_search_mode,
+ validate_search_space_id,
+ validate_top_k,
+ validate_url,
+ validate_uuid,
+)
+
+
+class TestValidateSearchSpaceId:
+ """Tests for validate_search_space_id function."""
+
+ def test_valid_integer(self):
+ """Test valid integer input."""
+ assert validate_search_space_id(1) == 1
+ assert validate_search_space_id(100) == 100
+ assert validate_search_space_id(999999) == 999999
+
+ def test_valid_string(self):
+ """Test valid string input."""
+ assert validate_search_space_id("1") == 1
+ assert validate_search_space_id("100") == 100
+ assert validate_search_space_id(" 50 ") == 50 # Trimmed
+
+ def test_none_raises_error(self):
+ """Test that None raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id(None)
+ assert exc_info.value.status_code == 400
+ assert "required" in exc_info.value.detail
+
+ def test_zero_raises_error(self):
+ """Test that zero raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id(0)
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_negative_raises_error(self):
+ """Test that negative values raise HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id(-1)
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_boolean_raises_error(self):
+ """Test that boolean raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id(True)
+ assert exc_info.value.status_code == 400
+ assert "boolean" in exc_info.value.detail
+
+ def test_empty_string_raises_error(self):
+ """Test that empty string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id("")
+ assert exc_info.value.status_code == 400
+
+ def test_invalid_string_raises_error(self):
+ """Test that invalid string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id("abc")
+ assert exc_info.value.status_code == 400
+
+ def test_float_raises_error(self):
+ """Test that float raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_space_id(1.5)
+ assert exc_info.value.status_code == 400
+
+
+class TestValidateDocumentIds:
+ """Tests for validate_document_ids function."""
+
+ def test_none_returns_empty_list(self):
+ """Test that None returns empty list."""
+ assert validate_document_ids(None) == []
+
+ def test_empty_list_returns_empty_list(self):
+ """Test that empty list returns empty list."""
+ assert validate_document_ids([]) == []
+
+ def test_valid_integer_list(self):
+ """Test valid integer list."""
+ assert validate_document_ids([1, 2, 3]) == [1, 2, 3]
+
+ def test_valid_string_list(self):
+ """Test valid string list."""
+ assert validate_document_ids(["1", "2", "3"]) == [1, 2, 3]
+
+ def test_mixed_valid_types(self):
+ """Test mixed valid types."""
+ assert validate_document_ids([1, "2", 3]) == [1, 2, 3]
+
+ def test_not_list_raises_error(self):
+ """Test that non-list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_document_ids("not a list")
+ assert exc_info.value.status_code == 400
+ assert "must be a list" in exc_info.value.detail
+
+ def test_negative_id_raises_error(self):
+ """Test that negative ID raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_document_ids([1, -2, 3])
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_zero_id_raises_error(self):
+ """Test that zero ID raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_document_ids([0])
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_boolean_in_list_raises_error(self):
+ """Test that boolean in list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_document_ids([1, True, 3])
+ assert exc_info.value.status_code == 400
+ assert "boolean" in exc_info.value.detail
+
+
+class TestValidateConnectors:
+ """Tests for validate_connectors function."""
+
+ def test_none_returns_empty_list(self):
+ """Test that None returns empty list."""
+ assert validate_connectors(None) == []
+
+ def test_empty_list_returns_empty_list(self):
+ """Test that empty list returns empty list."""
+ assert validate_connectors([]) == []
+
+ def test_valid_connectors(self):
+ """Test valid connector names."""
+ assert validate_connectors(["slack", "github"]) == ["slack", "github"]
+
+ def test_connector_with_underscore(self):
+ """Test connector names with underscores."""
+ assert validate_connectors(["google_calendar"]) == ["google_calendar"]
+
+ def test_connector_with_hyphen(self):
+ """Test connector names with hyphens."""
+ assert validate_connectors(["google-calendar"]) == ["google-calendar"]
+
+ def test_not_list_raises_error(self):
+ """Test that non-list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_connectors("not a list")
+ assert exc_info.value.status_code == 400
+ assert "must be a list" in exc_info.value.detail
+
+ def test_non_string_in_list_raises_error(self):
+ """Test that non-string in list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_connectors(["slack", 123])
+ assert exc_info.value.status_code == 400
+ assert "must be a string" in exc_info.value.detail
+
+ def test_empty_string_raises_error(self):
+ """Test that empty string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_connectors(["slack", ""])
+ assert exc_info.value.status_code == 400
+ assert "cannot be empty" in exc_info.value.detail
+
+ def test_invalid_characters_raises_error(self):
+ """Test that invalid characters raise HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_connectors(["slack@connector"])
+ assert exc_info.value.status_code == 400
+ assert "invalid characters" in exc_info.value.detail
+
+
+class TestValidateResearchMode:
+ """Tests for validate_research_mode function."""
+
+ def test_none_returns_default(self):
+ """Test that None returns default value."""
+ assert validate_research_mode(None) == "QNA"
+
+ def test_valid_mode(self):
+ """Test valid mode."""
+ assert validate_research_mode("QNA") == "QNA"
+ assert validate_research_mode("qna") == "QNA" # Case insensitive
+
+ def test_non_string_raises_error(self):
+ """Test that non-string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_research_mode(123)
+ assert exc_info.value.status_code == 400
+ assert "must be a string" in exc_info.value.detail
+
+ def test_invalid_mode_raises_error(self):
+ """Test that invalid mode raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_research_mode("INVALID")
+ assert exc_info.value.status_code == 400
+ assert "must be one of" in exc_info.value.detail
+
+
+class TestValidateSearchMode:
+ """Tests for validate_search_mode function."""
+
+ def test_none_returns_default(self):
+ """Test that None returns default value."""
+ assert validate_search_mode(None) == "CHUNKS"
+
+ def test_valid_modes(self):
+ """Test valid modes."""
+ assert validate_search_mode("CHUNKS") == "CHUNKS"
+ assert validate_search_mode("DOCUMENTS") == "DOCUMENTS"
+ assert validate_search_mode("chunks") == "CHUNKS" # Case insensitive
+
+ def test_non_string_raises_error(self):
+ """Test that non-string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_mode(123)
+ assert exc_info.value.status_code == 400
+ assert "must be a string" in exc_info.value.detail
+
+ def test_invalid_mode_raises_error(self):
+ """Test that invalid mode raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_search_mode("INVALID")
+ assert exc_info.value.status_code == 400
+ assert "must be one of" in exc_info.value.detail
+
+
+class TestValidateTopK:
+ """Tests for validate_top_k function."""
+
+ def test_none_returns_default(self):
+ """Test that None returns default value."""
+ assert validate_top_k(None) == 10
+
+ def test_valid_integer(self):
+ """Test valid integer input."""
+ assert validate_top_k(1) == 1
+ assert validate_top_k(50) == 50
+ assert validate_top_k(100) == 100
+
+ def test_valid_string(self):
+ """Test valid string input."""
+ assert validate_top_k("5") == 5
+ assert validate_top_k(" 10 ") == 10
+
+ def test_zero_raises_error(self):
+ """Test that zero raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_top_k(0)
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_negative_raises_error(self):
+ """Test that negative values raise HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_top_k(-1)
+ assert exc_info.value.status_code == 400
+ assert "positive" in exc_info.value.detail
+
+ def test_exceeds_max_raises_error(self):
+ """Test that values over 100 raise HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_top_k(101)
+ assert exc_info.value.status_code == 400
+ assert "exceed 100" in exc_info.value.detail
+
+ def test_boolean_raises_error(self):
+ """Test that boolean raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_top_k(True)
+ assert exc_info.value.status_code == 400
+ assert "boolean" in exc_info.value.detail
+
+
+class TestValidateMessages:
+ """Tests for validate_messages function."""
+
+ def test_valid_messages(self):
+ """Test valid messages."""
+ messages = [
+ {"role": "user", "content": "Hello"},
+ {"role": "assistant", "content": "Hi there!"},
+ ]
+ result = validate_messages(messages)
+ assert len(result) == 2
+ assert result[0]["role"] == "user"
+ assert result[1]["role"] == "assistant"
+
+ def test_trims_content(self):
+ """Test that content is trimmed."""
+ messages = [{"role": "user", "content": " Hello "}]
+ result = validate_messages(messages)
+ assert result[0]["content"] == "Hello"
+
+ def test_system_message_valid(self):
+ """Test that system messages are valid."""
+ messages = [
+ {"role": "system", "content": "You are helpful"},
+ {"role": "user", "content": "Hello"},
+ ]
+ result = validate_messages(messages)
+ assert result[0]["role"] == "system"
+
+ def test_not_list_raises_error(self):
+ """Test that non-list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages("not a list")
+ assert exc_info.value.status_code == 400
+ assert "must be a list" in exc_info.value.detail
+
+ def test_empty_list_raises_error(self):
+ """Test that empty list raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages([])
+ assert exc_info.value.status_code == 400
+ assert "cannot be empty" in exc_info.value.detail
+
+ def test_missing_role_raises_error(self):
+ """Test that missing role raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages([{"content": "Hello"}])
+ assert exc_info.value.status_code == 400
+ assert "role" in exc_info.value.detail
+
+ def test_missing_content_raises_error(self):
+ """Test that missing content raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages([{"role": "user"}])
+ assert exc_info.value.status_code == 400
+ assert "content" in exc_info.value.detail
+
+ def test_invalid_role_raises_error(self):
+ """Test that invalid role raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages([{"role": "invalid", "content": "Hello"}])
+ assert exc_info.value.status_code == 400
+ assert "role" in exc_info.value.detail
+
+ def test_empty_content_raises_error(self):
+ """Test that empty content raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_messages([{"role": "user", "content": " "}])
+ assert exc_info.value.status_code == 400
+ assert "cannot be empty" in exc_info.value.detail
+
+
+class TestValidateEmail:
+ """Tests for validate_email function."""
+
+ def test_valid_email(self):
+ """Test valid email addresses."""
+ assert validate_email("test@example.com") == "test@example.com"
+ assert validate_email("user.name@domain.co.uk") == "user.name@domain.co.uk"
+
+ def test_trims_whitespace(self):
+ """Test that whitespace is trimmed."""
+ assert validate_email(" test@example.com ") == "test@example.com"
+
+ def test_empty_raises_error(self):
+ """Test that empty string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_email("")
+ assert exc_info.value.status_code == 400
+
+ def test_invalid_format_raises_error(self):
+ """Test that invalid format raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_email("not-an-email")
+ assert exc_info.value.status_code == 400
+ assert "Invalid email" in exc_info.value.detail
+
+
+class TestValidateUrl:
+ """Tests for validate_url function."""
+
+ def test_valid_url(self):
+ """Test valid URLs."""
+ assert validate_url("https://example.com") == "https://example.com"
+ assert (
+ validate_url("http://sub.domain.com/path")
+ == "http://sub.domain.com/path"
+ )
+
+ def test_trims_whitespace(self):
+ """Test that whitespace is trimmed."""
+ assert validate_url(" https://example.com ") == "https://example.com"
+
+ def test_empty_raises_error(self):
+ """Test that empty string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_url("")
+ assert exc_info.value.status_code == 400
+
+ def test_invalid_format_raises_error(self):
+ """Test that invalid format raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_url("not-a-url")
+ assert exc_info.value.status_code == 400
+ assert "Invalid URL" in exc_info.value.detail
+
+
+class TestValidateUuid:
+ """Tests for validate_uuid function."""
+
+ def test_valid_uuid(self):
+ """Test valid UUIDs."""
+ uuid_str = "123e4567-e89b-12d3-a456-426614174000"
+ assert validate_uuid(uuid_str) == uuid_str
+
+ def test_trims_whitespace(self):
+ """Test that whitespace is trimmed."""
+ uuid_str = " 123e4567-e89b-12d3-a456-426614174000 "
+ assert validate_uuid(uuid_str) == "123e4567-e89b-12d3-a456-426614174000"
+
+ def test_empty_raises_error(self):
+ """Test that empty string raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_uuid("")
+ assert exc_info.value.status_code == 400
+
+ def test_invalid_format_raises_error(self):
+ """Test that invalid format raises HTTPException."""
+ with pytest.raises(HTTPException) as exc_info:
+ validate_uuid("not-a-uuid")
+ assert exc_info.value.status_code == 400
+ assert "Invalid UUID" in exc_info.value.detail
diff --git a/surfsense_web/package.json b/surfsense_web/package.json
index 8f85e96a7..05b67c869 100644
--- a/surfsense_web/package.json
+++ b/surfsense_web/package.json
@@ -18,7 +18,10 @@
"db:migrate": "drizzle-kit migrate",
"db:push": "drizzle-kit push",
"db:studio": "drizzle-kit studio",
- "format:fix": "npx @biomejs/biome check --fix"
+ "format:fix": "npx @biomejs/biome check --fix",
+ "test": "vitest run",
+ "test:watch": "vitest",
+ "test:coverage": "vitest run --coverage"
},
"dependencies": {
"@ai-sdk/react": "^1.2.12",
@@ -103,17 +106,23 @@
"@eslint/eslintrc": "^3.3.1",
"@tailwindcss/postcss": "^4.1.11",
"@tailwindcss/typography": "^0.5.16",
+ "@testing-library/jest-dom": "^6.6.3",
+ "@testing-library/react": "^16.1.0",
"@types/canvas-confetti": "^1.9.0",
"@types/node": "^20.19.9",
"@types/pg": "^8.15.5",
"@types/react": "^19.1.8",
"@types/react-dom": "^19.1.6",
+ "@vitejs/plugin-react": "^4.3.4",
+ "@vitest/coverage-v8": "^2.1.8",
"cross-env": "^7.0.3",
"drizzle-kit": "^0.31.5",
"eslint": "^9.32.0",
"eslint-config-next": "15.2.0",
+ "jsdom": "^25.0.1",
"tailwindcss": "^4.1.11",
"tsx": "^4.20.6",
- "typescript": "^5.8.3"
+ "typescript": "^5.8.3",
+ "vitest": "^2.1.8"
}
}
diff --git a/surfsense_web/pnpm-lock.yaml b/surfsense_web/pnpm-lock.yaml
index dd431ffae..afc174535 100644
--- a/surfsense_web/pnpm-lock.yaml
+++ b/surfsense_web/pnpm-lock.yaml
@@ -31,7 +31,7 @@ importers:
version: 0.5.17(@babel/runtime@7.26.9)(@codemirror/autocomplete@6.18.6)(@codemirror/language@6.11.2)(@codemirror/lint@6.8.5)(@codemirror/search@6.5.11)(@codemirror/state@6.5.2)(@codemirror/theme-one-dark@6.1.3)(@codemirror/view@6.38.1)(@lezer/highlight@1.2.1)(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(codemirror@6.0.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(yjs@13.6.27)
'@next/third-parties':
specifier: ^15.5.7
- version: 15.5.7(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)
+ version: 15.5.7(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)
'@number-flow/react':
specifier: ^0.5.10
version: 0.5.10(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -145,22 +145,22 @@ importers:
version: 1.4.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
fumadocs-core:
specifier: ^15.6.6
- version: 15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ version: 15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
fumadocs-mdx:
specifier: ^11.7.1
- version: 11.7.1(acorn@8.14.0)(fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)
+ version: 11.7.1(acorn@8.15.0)(fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1))
fumadocs-ui:
specifier: ^15.6.6
- version: 15.6.6(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.11)
+ version: 15.6.6(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.11)
geist:
specifier: ^1.4.2
- version: 1.4.2(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))
+ version: 1.4.2(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))
jotai:
specifier: ^2.15.1
- version: 2.15.1(@types/react@19.1.8)(react@19.1.0)
+ version: 2.15.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@19.1.8)(react@19.1.0)
jotai-tanstack-query:
specifier: ^0.11.0
- version: 0.11.0(@tanstack/query-core@5.90.7)(@tanstack/react-query@5.90.7(react@19.1.0))(jotai@2.15.1(@types/react@19.1.8)(react@19.1.0))(react@19.1.0)
+ version: 0.11.0(@tanstack/query-core@5.90.7)(@tanstack/react-query@5.90.7(react@19.1.0))(jotai@2.15.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0)
lucide-react:
specifier: ^0.477.0
version: 0.477.0(react@19.1.0)
@@ -169,10 +169,10 @@ importers:
version: 12.23.22(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
next:
specifier: ^15.5.7
- version: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ version: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
next-intl:
specifier: ^3.26.5
- version: 3.26.5(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)
+ version: 3.26.5(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)
next-themes:
specifier: ^0.4.6
version: 0.4.6(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -249,6 +249,12 @@ importers:
'@tailwindcss/typography':
specifier: ^0.5.16
version: 0.5.16(tailwindcss@4.1.11)
+ '@testing-library/jest-dom':
+ specifier: ^6.6.3
+ version: 6.9.1
+ '@testing-library/react':
+ specifier: ^16.1.0
+ version: 16.3.0(@testing-library/dom@10.4.1)(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
'@types/canvas-confetti':
specifier: ^1.9.0
version: 1.9.0
@@ -264,6 +270,12 @@ importers:
'@types/react-dom':
specifier: ^19.1.6
version: 19.1.6(@types/react@19.1.8)
+ '@vitejs/plugin-react':
+ specifier: ^4.3.4
+ version: 4.7.0(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1))
+ '@vitest/coverage-v8':
+ specifier: ^2.1.8
+ version: 2.1.9(vitest@2.1.9(@types/node@20.19.9)(jsdom@25.0.1)(lightningcss@1.30.1))
cross-env:
specifier: ^7.0.3
version: 7.0.3
@@ -276,6 +288,9 @@ importers:
eslint-config-next:
specifier: 15.2.0
version: 15.2.0(eslint@9.32.0(jiti@2.4.2))(typescript@5.8.3)
+ jsdom:
+ specifier: ^25.0.1
+ version: 25.0.1
tailwindcss:
specifier: ^4.1.11
version: 4.1.11
@@ -285,9 +300,15 @@ importers:
typescript:
specifier: ^5.8.3
version: 5.8.3
+ vitest:
+ specifier: ^2.1.8
+ version: 2.1.9(@types/node@20.19.9)(jsdom@25.0.1)(lightningcss@1.30.1)
packages:
+ '@adobe/css-tools@4.4.4':
+ resolution: {integrity: sha512-Elp+iwUx5rN5+Y8xLt5/GRoG20WGoDCQ/1Fb+1LiGtvwbDavuSk0jhD/eZdckHAuzcDzccnkv+rEjyWfRx18gg==}
+
'@ai-sdk/provider-utils@2.2.8':
resolution: {integrity: sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==}
engines: {node: '>=18'}
@@ -325,10 +346,96 @@ packages:
'@asamuzakjp/css-color@3.2.0':
resolution: {integrity: sha512-K1A6z8tS3XsmCMM86xoWdn7Fkdn9m6RSVtocUrJYIwZnFVkng/PvkEoWtOWmP+Scc6saYWHWZYbndEEXxl24jw==}
+ '@babel/code-frame@7.27.1':
+ resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/compat-data@7.28.5':
+ resolution: {integrity: sha512-6uFXyCayocRbqhZOB+6XcuZbkMNimwfVGFji8CTZnCzOHVGvDqzvitu1re2AU5LROliz7eQPhB8CpAMvnx9EjA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/core@7.28.5':
+ resolution: {integrity: sha512-e7jT4DxYvIDLk1ZHmU/m/mB19rex9sv0c2ftBtjSBv+kVM/902eh0fINUzD7UwLLNR+jU585GxUJ8/EBfAM5fw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/generator@7.28.5':
+ resolution: {integrity: sha512-3EwLFhZ38J4VyIP6WNtt2kUdW9dokXA9Cr4IVIFHuCpZ3H8/YFOl5JjZHisrn1fATPBmKKqXzDFvh9fUwHz6CQ==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-compilation-targets@7.27.2':
+ resolution: {integrity: sha512-2+1thGUUWWjLTYTHZWK1n8Yga0ijBz1XAhUXcKy81rd5g6yh7hGqMp45v7cadSbEHc9G3OTv45SyneRN3ps4DQ==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-globals@7.28.0':
+ resolution: {integrity: sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-module-imports@7.27.1':
+ resolution: {integrity: sha512-0gSFWUPNXNopqtIPQvlD5WgXYI5GY2kP2cCvoT8kczjbfcfuIljTbcWrulD1CIPIX2gt1wghbDy08yE1p+/r3w==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-module-transforms@7.28.3':
+ resolution: {integrity: sha512-gytXUbs8k2sXS9PnQptz5o0QnpLL51SwASIORY6XaBKF88nsOT0Zw9szLqlSGQDP/4TljBAD5y98p2U1fqkdsw==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0
+
+ '@babel/helper-plugin-utils@7.27.1':
+ resolution: {integrity: sha512-1gn1Up5YXka3YYAHGKpbideQ5Yjf1tDa9qYcgysz+cNCXukyLl6DjPXhD3VRwSb8c0J9tA4b2+rHEZtc6R0tlw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-string-parser@7.27.1':
+ resolution: {integrity: sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-validator-identifier@7.28.5':
+ resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helper-validator-option@7.27.1':
+ resolution: {integrity: sha512-YvjJow9FxbhFFKDSuFnVCe2WxXk1zWc22fFePVNEaWJEu8IrZVlda6N0uHwzZrUM1il7NC9Mlp4MaJYbYd9JSg==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/helpers@7.28.4':
+ resolution: {integrity: sha512-HFN59MmQXGHVyYadKLVumYsA9dBFun/ldYxipEjzA4196jpLZd8UjEEBLkbEkvfYreDqJhZxYAWFPtrfhNpj4w==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/parser@7.28.5':
+ resolution: {integrity: sha512-KKBU1VGYR7ORr3At5HAtUQ+TV3SzRCXmA/8OdDZiLDBIZxVyzXuztPjfLd3BV1PRAQGCMWWSHYhL0F8d5uHBDQ==}
+ engines: {node: '>=6.0.0'}
+ hasBin: true
+
+ '@babel/plugin-transform-react-jsx-self@7.27.1':
+ resolution: {integrity: sha512-6UzkCs+ejGdZ5mFFC/OCUrv028ab2fp1znZmCZjAOBKiBK2jXD1O+BPSfX8X2qjJ75fZBMSnQn3Rq2mrBJK2mw==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0-0
+
+ '@babel/plugin-transform-react-jsx-source@7.27.1':
+ resolution: {integrity: sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw==}
+ engines: {node: '>=6.9.0'}
+ peerDependencies:
+ '@babel/core': ^7.0.0-0
+
'@babel/runtime@7.26.9':
resolution: {integrity: sha512-aA63XwOkcl4xxQa3HjPMqOP6LiK0ZDv3mUPYEFXkpHbaFjtGggE1A61FjFzJnB+p7/oy2gA8E+rcBNl/zC1tMg==}
engines: {node: '>=6.9.0'}
+ '@babel/template@7.27.2':
+ resolution: {integrity: sha512-LPDZ85aEJyYSd18/DkjNh4/y1ntkE5KwUHWTiqgRxruuZL2F1yuHligVHLvcHY2vMHXttKFpJn6LwfI7cw7ODw==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/traverse@7.28.5':
+ resolution: {integrity: sha512-TCCj4t55U90khlYkVV/0TfkJkAkUg3jZFA3Neb7unZT8CPok7iiRfaX0F+WnqWqt7OxhOn0uBKXCw4lbL8W0aQ==}
+ engines: {node: '>=6.9.0'}
+
+ '@babel/types@7.28.5':
+ resolution: {integrity: sha512-qQ5m48eI/MFLQ5PxQj4PFaprjyCTLI37ElWMmNs0K8Lk3dVeOdNpB3ks8jc7yM5CDmVC73eMVk/trk3fgmrUpA==}
+ engines: {node: '>=6.9.0'}
+
+ '@bcoe/v8-coverage@0.2.3':
+ resolution: {integrity: sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==}
+
'@biomejs/biome@2.1.2':
resolution: {integrity: sha512-yq8ZZuKuBVDgAS76LWCfFKHSYIAgqkxVB3mGVVpOe2vSkUTs7xG46zXZeNPRNVjiJuw0SZ3+J2rXiYx0RUpfGg==}
engines: {node: '>=14.21.3'}
@@ -564,6 +671,12 @@ packages:
resolution: {integrity: sha512-FxEMIkJKnodyA1OaCUoEvbYRkoZlLZ4d/eXFu9Fh8CbBBgP5EmZxrfTRyN0qpXZ4vOvqnE5YdRdcrmUUXuU+dA==}
deprecated: 'Merged into tsx: https://tsx.is'
+ '@esbuild/aix-ppc64@0.21.5':
+ resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==}
+ engines: {node: '>=12'}
+ cpu: [ppc64]
+ os: [aix]
+
'@esbuild/aix-ppc64@0.25.8':
resolution: {integrity: sha512-urAvrUedIqEiFR3FYSLTWQgLu5tb+m0qZw0NBEasUeo6wuqatkMDaRT+1uABiGXEu5vqgPd7FGE1BhsAIy9QVA==}
engines: {node: '>=18'}
@@ -576,6 +689,12 @@ packages:
cpu: [arm64]
os: [android]
+ '@esbuild/android-arm64@0.21.5':
+ resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==}
+ engines: {node: '>=12'}
+ cpu: [arm64]
+ os: [android]
+
'@esbuild/android-arm64@0.25.8':
resolution: {integrity: sha512-OD3p7LYzWpLhZEyATcTSJ67qB5D+20vbtr6vHlHWSQYhKtzUYrETuWThmzFpZtFsBIxRvhO07+UgVA9m0i/O1w==}
engines: {node: '>=18'}
@@ -588,6 +707,12 @@ packages:
cpu: [arm]
os: [android]
+ '@esbuild/android-arm@0.21.5':
+ resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==}
+ engines: {node: '>=12'}
+ cpu: [arm]
+ os: [android]
+
'@esbuild/android-arm@0.25.8':
resolution: {integrity: sha512-RONsAvGCz5oWyePVnLdZY/HHwA++nxYWIX1atInlaW6SEkwq6XkP3+cb825EUcRs5Vss/lGh/2YxAb5xqc07Uw==}
engines: {node: '>=18'}
@@ -600,6 +725,12 @@ packages:
cpu: [x64]
os: [android]
+ '@esbuild/android-x64@0.21.5':
+ resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [android]
+
'@esbuild/android-x64@0.25.8':
resolution: {integrity: sha512-yJAVPklM5+4+9dTeKwHOaA+LQkmrKFX96BM0A/2zQrbS6ENCmxc4OVoBs5dPkCCak2roAD+jKCdnmOqKszPkjA==}
engines: {node: '>=18'}
@@ -612,6 +743,12 @@ packages:
cpu: [arm64]
os: [darwin]
+ '@esbuild/darwin-arm64@0.21.5':
+ resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==}
+ engines: {node: '>=12'}
+ cpu: [arm64]
+ os: [darwin]
+
'@esbuild/darwin-arm64@0.25.8':
resolution: {integrity: sha512-Jw0mxgIaYX6R8ODrdkLLPwBqHTtYHJSmzzd+QeytSugzQ0Vg4c5rDky5VgkoowbZQahCbsv1rT1KW72MPIkevw==}
engines: {node: '>=18'}
@@ -624,6 +761,12 @@ packages:
cpu: [x64]
os: [darwin]
+ '@esbuild/darwin-x64@0.21.5':
+ resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [darwin]
+
'@esbuild/darwin-x64@0.25.8':
resolution: {integrity: sha512-Vh2gLxxHnuoQ+GjPNvDSDRpoBCUzY4Pu0kBqMBDlK4fuWbKgGtmDIeEC081xi26PPjn+1tct+Bh8FjyLlw1Zlg==}
engines: {node: '>=18'}
@@ -636,6 +779,12 @@ packages:
cpu: [arm64]
os: [freebsd]
+ '@esbuild/freebsd-arm64@0.21.5':
+ resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==}
+ engines: {node: '>=12'}
+ cpu: [arm64]
+ os: [freebsd]
+
'@esbuild/freebsd-arm64@0.25.8':
resolution: {integrity: sha512-YPJ7hDQ9DnNe5vxOm6jaie9QsTwcKedPvizTVlqWG9GBSq+BuyWEDazlGaDTC5NGU4QJd666V0yqCBL2oWKPfA==}
engines: {node: '>=18'}
@@ -648,6 +797,12 @@ packages:
cpu: [x64]
os: [freebsd]
+ '@esbuild/freebsd-x64@0.21.5':
+ resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [freebsd]
+
'@esbuild/freebsd-x64@0.25.8':
resolution: {integrity: sha512-MmaEXxQRdXNFsRN/KcIimLnSJrk2r5H8v+WVafRWz5xdSVmWLoITZQXcgehI2ZE6gioE6HirAEToM/RvFBeuhw==}
engines: {node: '>=18'}
@@ -660,6 +815,12 @@ packages:
cpu: [arm64]
os: [linux]
+ '@esbuild/linux-arm64@0.21.5':
+ resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==}
+ engines: {node: '>=12'}
+ cpu: [arm64]
+ os: [linux]
+
'@esbuild/linux-arm64@0.25.8':
resolution: {integrity: sha512-WIgg00ARWv/uYLU7lsuDK00d/hHSfES5BzdWAdAig1ioV5kaFNrtK8EqGcUBJhYqotlUByUKz5Qo6u8tt7iD/w==}
engines: {node: '>=18'}
@@ -672,6 +833,12 @@ packages:
cpu: [arm]
os: [linux]
+ '@esbuild/linux-arm@0.21.5':
+ resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==}
+ engines: {node: '>=12'}
+ cpu: [arm]
+ os: [linux]
+
'@esbuild/linux-arm@0.25.8':
resolution: {integrity: sha512-FuzEP9BixzZohl1kLf76KEVOsxtIBFwCaLupVuk4eFVnOZfU+Wsn+x5Ryam7nILV2pkq2TqQM9EZPsOBuMC+kg==}
engines: {node: '>=18'}
@@ -684,6 +851,12 @@ packages:
cpu: [ia32]
os: [linux]
+ '@esbuild/linux-ia32@0.21.5':
+ resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==}
+ engines: {node: '>=12'}
+ cpu: [ia32]
+ os: [linux]
+
'@esbuild/linux-ia32@0.25.8':
resolution: {integrity: sha512-A1D9YzRX1i+1AJZuFFUMP1E9fMaYY+GnSQil9Tlw05utlE86EKTUA7RjwHDkEitmLYiFsRd9HwKBPEftNdBfjg==}
engines: {node: '>=18'}
@@ -696,6 +869,12 @@ packages:
cpu: [loong64]
os: [linux]
+ '@esbuild/linux-loong64@0.21.5':
+ resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==}
+ engines: {node: '>=12'}
+ cpu: [loong64]
+ os: [linux]
+
'@esbuild/linux-loong64@0.25.8':
resolution: {integrity: sha512-O7k1J/dwHkY1RMVvglFHl1HzutGEFFZ3kNiDMSOyUrB7WcoHGf96Sh+64nTRT26l3GMbCW01Ekh/ThKM5iI7hQ==}
engines: {node: '>=18'}
@@ -708,6 +887,12 @@ packages:
cpu: [mips64el]
os: [linux]
+ '@esbuild/linux-mips64el@0.21.5':
+ resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==}
+ engines: {node: '>=12'}
+ cpu: [mips64el]
+ os: [linux]
+
'@esbuild/linux-mips64el@0.25.8':
resolution: {integrity: sha512-uv+dqfRazte3BzfMp8PAQXmdGHQt2oC/y2ovwpTteqrMx2lwaksiFZ/bdkXJC19ttTvNXBuWH53zy/aTj1FgGw==}
engines: {node: '>=18'}
@@ -720,6 +905,12 @@ packages:
cpu: [ppc64]
os: [linux]
+ '@esbuild/linux-ppc64@0.21.5':
+ resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==}
+ engines: {node: '>=12'}
+ cpu: [ppc64]
+ os: [linux]
+
'@esbuild/linux-ppc64@0.25.8':
resolution: {integrity: sha512-GyG0KcMi1GBavP5JgAkkstMGyMholMDybAf8wF5A70CALlDM2p/f7YFE7H92eDeH/VBtFJA5MT4nRPDGg4JuzQ==}
engines: {node: '>=18'}
@@ -732,6 +923,12 @@ packages:
cpu: [riscv64]
os: [linux]
+ '@esbuild/linux-riscv64@0.21.5':
+ resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==}
+ engines: {node: '>=12'}
+ cpu: [riscv64]
+ os: [linux]
+
'@esbuild/linux-riscv64@0.25.8':
resolution: {integrity: sha512-rAqDYFv3yzMrq7GIcen3XP7TUEG/4LK86LUPMIz6RT8A6pRIDn0sDcvjudVZBiiTcZCY9y2SgYX2lgK3AF+1eg==}
engines: {node: '>=18'}
@@ -744,6 +941,12 @@ packages:
cpu: [s390x]
os: [linux]
+ '@esbuild/linux-s390x@0.21.5':
+ resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==}
+ engines: {node: '>=12'}
+ cpu: [s390x]
+ os: [linux]
+
'@esbuild/linux-s390x@0.25.8':
resolution: {integrity: sha512-Xutvh6VjlbcHpsIIbwY8GVRbwoviWT19tFhgdA7DlenLGC/mbc3lBoVb7jxj9Z+eyGqvcnSyIltYUrkKzWqSvg==}
engines: {node: '>=18'}
@@ -756,6 +959,12 @@ packages:
cpu: [x64]
os: [linux]
+ '@esbuild/linux-x64@0.21.5':
+ resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [linux]
+
'@esbuild/linux-x64@0.25.8':
resolution: {integrity: sha512-ASFQhgY4ElXh3nDcOMTkQero4b1lgubskNlhIfJrsH5OKZXDpUAKBlNS0Kx81jwOBp+HCeZqmoJuihTv57/jvQ==}
engines: {node: '>=18'}
@@ -774,6 +983,12 @@ packages:
cpu: [x64]
os: [netbsd]
+ '@esbuild/netbsd-x64@0.21.5':
+ resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [netbsd]
+
'@esbuild/netbsd-x64@0.25.8':
resolution: {integrity: sha512-nVDCkrvx2ua+XQNyfrujIG38+YGyuy2Ru9kKVNyh5jAys6n+l44tTtToqHjino2My8VAY6Lw9H7RI73XFi66Cg==}
engines: {node: '>=18'}
@@ -792,6 +1007,12 @@ packages:
cpu: [x64]
os: [openbsd]
+ '@esbuild/openbsd-x64@0.21.5':
+ resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [openbsd]
+
'@esbuild/openbsd-x64@0.25.8':
resolution: {integrity: sha512-1h8MUAwa0VhNCDp6Af0HToI2TJFAn1uqT9Al6DJVzdIBAd21m/G0Yfc77KDM3uF3T/YaOgQq3qTJHPbTOInaIQ==}
engines: {node: '>=18'}
@@ -810,6 +1031,12 @@ packages:
cpu: [x64]
os: [sunos]
+ '@esbuild/sunos-x64@0.21.5':
+ resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [sunos]
+
'@esbuild/sunos-x64@0.25.8':
resolution: {integrity: sha512-zUlaP2S12YhQ2UzUfcCuMDHQFJyKABkAjvO5YSndMiIkMimPmxA+BYSBikWgsRpvyxuRnow4nS5NPnf9fpv41w==}
engines: {node: '>=18'}
@@ -822,6 +1049,12 @@ packages:
cpu: [arm64]
os: [win32]
+ '@esbuild/win32-arm64@0.21.5':
+ resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==}
+ engines: {node: '>=12'}
+ cpu: [arm64]
+ os: [win32]
+
'@esbuild/win32-arm64@0.25.8':
resolution: {integrity: sha512-YEGFFWESlPva8hGL+zvj2z/SaK+pH0SwOM0Nc/d+rVnW7GSTFlLBGzZkuSU9kFIGIo8q9X3ucpZhu8PDN5A2sQ==}
engines: {node: '>=18'}
@@ -834,6 +1067,12 @@ packages:
cpu: [ia32]
os: [win32]
+ '@esbuild/win32-ia32@0.21.5':
+ resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==}
+ engines: {node: '>=12'}
+ cpu: [ia32]
+ os: [win32]
+
'@esbuild/win32-ia32@0.25.8':
resolution: {integrity: sha512-hiGgGC6KZ5LZz58OL/+qVVoZiuZlUYlYHNAmczOm7bs2oE1XriPFi5ZHHrS8ACpV5EjySrnoCKmcbQMN+ojnHg==}
engines: {node: '>=18'}
@@ -846,6 +1085,12 @@ packages:
cpu: [x64]
os: [win32]
+ '@esbuild/win32-x64@0.21.5':
+ resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==}
+ engines: {node: '>=12'}
+ cpu: [x64]
+ os: [win32]
+
'@esbuild/win32-x64@0.25.8':
resolution: {integrity: sha512-cn3Yr7+OaaZq1c+2pe+8yxC8E144SReCQjN6/2ynubzYjvyqZjTXfQJpAcQpsdJq3My7XADANiYGHoFC69pLQw==}
engines: {node: '>=18'}
@@ -1127,14 +1372,28 @@ packages:
cpu: [x64]
os: [win32]
+ '@isaacs/cliui@8.0.2':
+ resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==}
+ engines: {node: '>=12'}
+
'@isaacs/fs-minipass@4.0.1':
resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==}
engines: {node: '>=18.0.0'}
+ '@istanbuljs/schema@0.1.3':
+ resolution: {integrity: sha512-ZXRY4jNvVgSVQ8DL3LTcakaAtXwTVUxE81hslsyD2AtoXW/wVob10HkOJ1X/pAlcI7D+2YoZKg5do8G/w6RYgA==}
+ engines: {node: '>=8'}
+
+ '@jridgewell/gen-mapping@0.3.13':
+ resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==}
+
'@jridgewell/gen-mapping@0.3.8':
resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==}
engines: {node: '>=6.0.0'}
+ '@jridgewell/remapping@2.3.5':
+ resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==}
+
'@jridgewell/resolve-uri@3.1.2':
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
engines: {node: '>=6.0.0'}
@@ -1149,6 +1408,9 @@ packages:
'@jridgewell/trace-mapping@0.3.25':
resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==}
+ '@jridgewell/trace-mapping@0.3.31':
+ resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==}
+
'@lexical/clipboard@0.32.1':
resolution: {integrity: sha512-oO7CuMVh3EFEqtE6+7Ccf7jMD5RNUmSdTnFm/X4kYNGqs9lgGt8j5PgSk7oP9OuAjxKNdBTbltSlh54CX3AUIg==}
@@ -1419,6 +1681,10 @@ packages:
resolution: {integrity: sha512-Szki0cgFiXE5F9RLx2lUyEtJllnuCSQ4B8RLDwIjXkVit6qZjoDAxH+xhJs29MjKLDz0tbPLdKFa6QrQ/qoGGA==}
engines: {node: '>= 20.0.0'}
+ '@pkgjs/parseargs@0.11.0':
+ resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==}
+ engines: {node: '>=14'}
+
'@radix-ui/colors@3.0.0':
resolution: {integrity: sha512-FUOsGBkHrYJwCSEtWRCIfQbZG7q1e6DgxCIOe1SUQzDe/7rXXeA47s8yCn6fuTNQAj1Zq4oTFi9Yjp3wzElcxg==}
@@ -2349,6 +2615,119 @@ packages:
'@remirror/core-constants@3.0.0':
resolution: {integrity: sha512-42aWfPrimMfDKDi4YegyS7x+/0tlzaqwPQCULLanv3DMIlu96KTJR0fM5isWX2UViOqlGnX6YFgqWepcX+XMNg==}
+ '@rolldown/pluginutils@1.0.0-beta.27':
+ resolution: {integrity: sha512-+d0F4MKMCbeVUJwG96uQ4SgAznZNSq93I3V+9NHA4OpvqG8mRCpGdKmK8l/dl02h2CCDHwW2FqilnTyDcAnqjA==}
+
+ '@rollup/rollup-android-arm-eabi@4.53.3':
+ resolution: {integrity: sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==}
+ cpu: [arm]
+ os: [android]
+
+ '@rollup/rollup-android-arm64@4.53.3':
+ resolution: {integrity: sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==}
+ cpu: [arm64]
+ os: [android]
+
+ '@rollup/rollup-darwin-arm64@4.53.3':
+ resolution: {integrity: sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==}
+ cpu: [arm64]
+ os: [darwin]
+
+ '@rollup/rollup-darwin-x64@4.53.3':
+ resolution: {integrity: sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==}
+ cpu: [x64]
+ os: [darwin]
+
+ '@rollup/rollup-freebsd-arm64@4.53.3':
+ resolution: {integrity: sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==}
+ cpu: [arm64]
+ os: [freebsd]
+
+ '@rollup/rollup-freebsd-x64@4.53.3':
+ resolution: {integrity: sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==}
+ cpu: [x64]
+ os: [freebsd]
+
+ '@rollup/rollup-linux-arm-gnueabihf@4.53.3':
+ resolution: {integrity: sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==}
+ cpu: [arm]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm-musleabihf@4.53.3':
+ resolution: {integrity: sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==}
+ cpu: [arm]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm64-gnu@4.53.3':
+ resolution: {integrity: sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@rollup/rollup-linux-arm64-musl@4.53.3':
+ resolution: {integrity: sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==}
+ cpu: [arm64]
+ os: [linux]
+
+ '@rollup/rollup-linux-loong64-gnu@4.53.3':
+ resolution: {integrity: sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==}
+ cpu: [loong64]
+ os: [linux]
+
+ '@rollup/rollup-linux-ppc64-gnu@4.53.3':
+ resolution: {integrity: sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==}
+ cpu: [ppc64]
+ os: [linux]
+
+ '@rollup/rollup-linux-riscv64-gnu@4.53.3':
+ resolution: {integrity: sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@rollup/rollup-linux-riscv64-musl@4.53.3':
+ resolution: {integrity: sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==}
+ cpu: [riscv64]
+ os: [linux]
+
+ '@rollup/rollup-linux-s390x-gnu@4.53.3':
+ resolution: {integrity: sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==}
+ cpu: [s390x]
+ os: [linux]
+
+ '@rollup/rollup-linux-x64-gnu@4.53.3':
+ resolution: {integrity: sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==}
+ cpu: [x64]
+ os: [linux]
+
+ '@rollup/rollup-linux-x64-musl@4.53.3':
+ resolution: {integrity: sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==}
+ cpu: [x64]
+ os: [linux]
+
+ '@rollup/rollup-openharmony-arm64@4.53.3':
+ resolution: {integrity: sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==}
+ cpu: [arm64]
+ os: [openharmony]
+
+ '@rollup/rollup-win32-arm64-msvc@4.53.3':
+ resolution: {integrity: sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==}
+ cpu: [arm64]
+ os: [win32]
+
+ '@rollup/rollup-win32-ia32-msvc@4.53.3':
+ resolution: {integrity: sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==}
+ cpu: [ia32]
+ os: [win32]
+
+ '@rollup/rollup-win32-x64-gnu@4.53.3':
+ resolution: {integrity: sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==}
+ cpu: [x64]
+ os: [win32]
+
+ '@rollup/rollup-win32-x64-msvc@4.53.3':
+ resolution: {integrity: sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==}
+ cpu: [x64]
+ os: [win32]
+
'@rtsao/scc@1.1.0':
resolution: {integrity: sha512-zt6OdqaDoOnJ1ZYsCYGt9YmWzDXl4vQdKTyJev62gFhRGKdx7mcT54V9KIjg+d2wi9EXsPvAPKe7i7WjfVWB8g==}
@@ -2526,6 +2905,29 @@ packages:
resolution: {integrity: sha512-ldZXEhOBb8Is7xLs01fR3YEc3DERiz5silj8tnGkFZytt1abEvl/GhUmCE0PMLaMPTa3Jk4HbKmRlHmu+gCftg==}
engines: {node: '>=12'}
+ '@testing-library/dom@10.4.1':
+ resolution: {integrity: sha512-o4PXJQidqJl82ckFaXUeoAW+XysPLauYI43Abki5hABd853iMhitooc6znOnczgbTYmEP6U6/y1ZyKAIsvMKGg==}
+ engines: {node: '>=18'}
+
+ '@testing-library/jest-dom@6.9.1':
+ resolution: {integrity: sha512-zIcONa+hVtVSSep9UT3jZ5rizo2BsxgyDYU7WFD5eICBE7no3881HGeb/QkGfsJs6JTkY1aQhT7rIPC7e+0nnA==}
+ engines: {node: '>=14', npm: '>=6', yarn: '>=1'}
+
+ '@testing-library/react@16.3.0':
+ resolution: {integrity: sha512-kFSyxiEDwv1WLl2fgsq6pPBbw5aWKrsY2/noi1Id0TK0UParSF62oFQFGHXIyaG4pp2tEub/Zlel+fjjZILDsw==}
+ engines: {node: '>=18'}
+ peerDependencies:
+ '@testing-library/dom': ^10.0.0
+ '@types/react': ^18.0.0 || ^19.0.0
+ '@types/react-dom': ^18.0.0 || ^19.0.0
+ react: ^18.0.0 || ^19.0.0
+ react-dom: ^18.0.0 || ^19.0.0
+ peerDependenciesMeta:
+ '@types/react':
+ optional: true
+ '@types/react-dom':
+ optional: true
+
'@tiptap/core@3.11.0':
resolution: {integrity: sha512-kmS7ZVpHm1EMnW1Wmft9H5ZLM7E0G0NGBx+aGEHGDcNxZBXD2ZUa76CuWjIhOGpwsPbELp684ZdpF2JWoNi4Dg==}
peerDependencies:
@@ -2620,6 +3022,21 @@ packages:
react: ^17.0.0 || ^18.0.0 || ^19.0.0
react-dom: ^17.0.0 || ^18.0.0 || ^19.0.0
+ '@types/aria-query@5.0.4':
+ resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==}
+
+ '@types/babel__core@7.20.5':
+ resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==}
+
+ '@types/babel__generator@7.27.0':
+ resolution: {integrity: sha512-ufFd2Xi92OAVPYsy+P4n7/U7e68fex0+Ee8gSG9KX7eo084CWiQ4sdxktvdl0bOPupXtVJPY19zk6EwWqUQ8lg==}
+
+ '@types/babel__template@7.4.4':
+ resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==}
+
+ '@types/babel__traverse@7.28.0':
+ resolution: {integrity: sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==}
+
'@types/canvas-confetti@1.9.0':
resolution: {integrity: sha512-aBGj/dULrimR1XDZLtG9JwxX1b4HPRF6CX9Yfwh3NvstZEm1ZL7RBnel4keCPSqs1ANRu1u2Aoz9R+VmtjYuTg==}
@@ -2782,6 +3199,50 @@ packages:
'@ungap/structured-clone@1.3.0':
resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==}
+ '@vitejs/plugin-react@4.7.0':
+ resolution: {integrity: sha512-gUu9hwfWvvEDBBmgtAowQCojwZmJ5mcLn3aufeCsitijs3+f2NsrPtlAWIR6OPiqljl96GVCUbLe0HyqIpVaoA==}
+ engines: {node: ^14.18.0 || >=16.0.0}
+ peerDependencies:
+ vite: ^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0
+
+ '@vitest/coverage-v8@2.1.9':
+ resolution: {integrity: sha512-Z2cOr0ksM00MpEfyVE8KXIYPEcBFxdbLSs56L8PO0QQMxt/6bDj45uQfxoc96v05KW3clk7vvgP0qfDit9DmfQ==}
+ peerDependencies:
+ '@vitest/browser': 2.1.9
+ vitest: 2.1.9
+ peerDependenciesMeta:
+ '@vitest/browser':
+ optional: true
+
+ '@vitest/expect@2.1.9':
+ resolution: {integrity: sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==}
+
+ '@vitest/mocker@2.1.9':
+ resolution: {integrity: sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==}
+ peerDependencies:
+ msw: ^2.4.9
+ vite: ^5.0.0
+ peerDependenciesMeta:
+ msw:
+ optional: true
+ vite:
+ optional: true
+
+ '@vitest/pretty-format@2.1.9':
+ resolution: {integrity: sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==}
+
+ '@vitest/runner@2.1.9':
+ resolution: {integrity: sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==}
+
+ '@vitest/snapshot@2.1.9':
+ resolution: {integrity: sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==}
+
+ '@vitest/spy@2.1.9':
+ resolution: {integrity: sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==}
+
+ '@vitest/utils@2.1.9':
+ resolution: {integrity: sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==}
+
'@wojtekmaj/react-hooks@1.17.2':
resolution: {integrity: sha512-E2I1D39Sw6AmXSArfvHjCoB2KE8QxmpuoKn0x+xq7IXKCQi3lGAQn1MrFqDKiwJt08Mmg+I9sp5Zt0nSfStfuQ==}
peerDependencies:
@@ -2822,10 +3283,26 @@ packages:
anser@2.3.2:
resolution: {integrity: sha512-PMqBCBvrOVDRqLGooQb+z+t1Q0PiPyurUQeZRR5uHBOVZcW8B04KMmnT12USnhpNX2wCPagWzLVppQMUG3u0Dw==}
+ ansi-regex@5.0.1:
+ resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==}
+ engines: {node: '>=8'}
+
+ ansi-regex@6.2.2:
+ resolution: {integrity: sha512-Bq3SmSpyFHaWjPk8If9yc6svM8c56dB5BAtW4Qbw5jHTwwXXcTLoRMkpDJp6VL0XzlWaCHTXrkFURMYmD0sLqg==}
+ engines: {node: '>=12'}
+
ansi-styles@4.3.0:
resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==}
engines: {node: '>=8'}
+ ansi-styles@5.2.0:
+ resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==}
+ engines: {node: '>=10'}
+
+ ansi-styles@6.2.3:
+ resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==}
+ engines: {node: '>=12'}
+
argparse@2.0.1:
resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==}
@@ -2833,6 +3310,9 @@ packages:
resolution: {integrity: sha512-ik3ZgC9dY/lYVVM++OISsaYDeg1tb0VtP5uL3ouh1koGOaUMDPpbFIei4JkFimWUFPn90sbMNMXQAIVOlnYKJA==}
engines: {node: '>=10'}
+ aria-query@5.3.0:
+ resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==}
+
aria-query@5.3.2:
resolution: {integrity: sha512-COROpnaoap1E2F000S62r6A60uHZnmlvomhfyT2DlTcrY1OrBKn2UhH7qn5wTC9zMvD0AY7csdPSNwKP+7WiQw==}
engines: {node: '>= 0.4'}
@@ -2876,6 +3356,10 @@ packages:
asap@2.0.6:
resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==}
+ assertion-error@2.0.1:
+ resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==}
+ engines: {node: '>=12'}
+
ast-types-flow@0.0.8:
resolution: {integrity: sha512-OH/2E5Fg20h2aPrbe+QL8JZQFko0YZaF+j4mnQ7BGhfavO7OpSLa8a0y9sBwomHdSbkhTS8TQNayBfnW5DwbvQ==}
@@ -2918,6 +3402,10 @@ packages:
base64-js@1.5.1:
resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==}
+ baseline-browser-mapping@2.9.0:
+ resolution: {integrity: sha512-Mh++g+2LPfzZToywfE1BUzvZbfOY52Nil0rn9H1CPC5DJ7fX+Vir7nToBeoiSbB1zTNeGYbELEvJESujgGrzXw==}
+ hasBin: true
+
bl@4.1.0:
resolution: {integrity: sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==}
@@ -2931,6 +3419,11 @@ packages:
resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==}
engines: {node: '>=8'}
+ browserslist@4.28.1:
+ resolution: {integrity: sha512-ZC5Bd0LgJXgwGqUknZY/vkUQ04r8NXnJZ3yYi4vDmSiZmC/pdSN0NbNRPxZpbtO4uAfDUAFffO8IZoM3Gj8IkA==}
+ engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7}
+ hasBin: true
+
buffer-from@1.1.2:
resolution: {integrity: sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==}
@@ -2940,6 +3433,10 @@ packages:
buffer@6.0.3:
resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==}
+ cac@6.7.14:
+ resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==}
+ engines: {node: '>=8'}
+
call-bind-apply-helpers@1.0.2:
resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==}
engines: {node: '>= 0.4'}
@@ -2959,6 +3456,9 @@ packages:
caniuse-lite@1.0.30001706:
resolution: {integrity: sha512-3ZczoTApMAZwPKYWmwVbQMFpXBDds3/0VciVoUwPUbldlYyVLmRVuRs/PcUZtHpbLRpzzDvrvnFuREsGt6lUug==}
+ caniuse-lite@1.0.30001759:
+ resolution: {integrity: sha512-Pzfx9fOKoKvevQf8oCXoyNRQ5QyxJj+3O0Rqx2V5oxT61KGx8+n6hV/IUyJeifUci2clnmmKVpvtiqRzgiWjSw==}
+
canvas-confetti@1.9.3:
resolution: {integrity: sha512-rFfTURMvmVEX1gyXFgn5QMn81bYk70qa0HLzcIOSVEyl57n6o9ItHeBtUSWdvKAPY0xlvBHno4/v3QPrT83q9g==}
@@ -2969,6 +3469,10 @@ packages:
ccount@2.0.1:
resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==}
+ chai@5.3.3:
+ resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==}
+ engines: {node: '>=18'}
+
chalk@4.1.2:
resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==}
engines: {node: '>=10'}
@@ -2998,6 +3502,10 @@ packages:
character-reference-invalid@2.0.1:
resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==}
+ check-error@2.1.1:
+ resolution: {integrity: sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==}
+ engines: {node: '>= 16'}
+
chokidar@4.0.3:
resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==}
engines: {node: '>= 14.16.0'}
@@ -3081,6 +3589,9 @@ packages:
concat-map@0.0.1:
resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==}
+ convert-source-map@2.0.0:
+ resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==}
+
crelt@1.0.6:
resolution: {integrity: sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==}
@@ -3096,6 +3607,9 @@ packages:
resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==}
engines: {node: '>= 8'}
+ css.escape@1.5.1:
+ resolution: {integrity: sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==}
+
cssesc@3.0.0:
resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==}
engines: {node: '>=4'}
@@ -3164,6 +3678,10 @@ packages:
resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==}
engines: {node: '>=10'}
+ deep-eql@5.0.2:
+ resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==}
+ engines: {node: '>=6'}
+
deep-extend@0.6.0:
resolution: {integrity: sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==}
engines: {node: '>=4.0.0'}
@@ -3216,6 +3734,12 @@ packages:
resolution: {integrity: sha512-35mSku4ZXK0vfCuHEDAwt55dg2jNajHZ1odvF+8SSr82EsZY4QmXfuWso8oEd8zRhVObSN18aM0CjSdoBX7zIw==}
engines: {node: '>=0.10.0'}
+ dom-accessibility-api@0.5.16:
+ resolution: {integrity: sha512-X7BJ2yElsnOJ30pZF4uIIDfBEVgF4XEBxL9Bxhy6dnrm5hkzqmsWHGTiHqRiITNhMyFLyAiWndIJP7Z1NTteDg==}
+
+ dom-accessibility-api@0.6.3:
+ resolution: {integrity: sha512-7ZgogeTnjuHbo+ct10G9Ffp0mif17idi0IyWNVA/wcwcm7NPOD/WEHVP3n7n3MhXqxoIYm8d6MuZohYWIZ4T3w==}
+
dotenv@16.6.1:
resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==}
engines: {node: '>=12'}
@@ -3329,6 +3853,12 @@ packages:
resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==}
engines: {node: '>= 0.4'}
+ eastasianwidth@0.2.0:
+ resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==}
+
+ electron-to-chromium@1.5.264:
+ resolution: {integrity: sha512-1tEf0nLgltC3iy9wtlYDlQDc5Rg9lEKVjEmIHJ21rI9OcqkvD45K1oyNIRA4rR1z3LgJ7KeGzEBojVcV6m4qjA==}
+
emblor@1.4.8:
resolution: {integrity: sha512-Vqtz4Gepa7CIkmplQ+kvJnsSZJ4sAyHvQqqX2iCmgoRo5iRQFxr+5FJkk6QuLVNH5vrbBZEYxg7sMZuDCnQ/PQ==}
peerDependencies:
@@ -3338,6 +3868,9 @@ packages:
emoji-mart@5.6.0:
resolution: {integrity: sha512-eJp3QRe79pjwa+duv+n7+5YsNhRcMl812EcFVwrnRvYKoNPoQb5qxU8DG6Bgwji0akHdp6D4Ln6tYLG58MFSow==}
+ emoji-regex@8.0.0:
+ resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==}
+
emoji-regex@9.2.2:
resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==}
@@ -3368,6 +3901,9 @@ packages:
resolution: {integrity: sha512-uDn+FE1yrDzyC0pCo961B2IHbdM8y/ACZsKD4dG6WqrjV53BADjwa7D+1aom2rsNVfLyDgU/eigvlJGJ08OQ4w==}
engines: {node: '>= 0.4'}
+ es-module-lexer@1.7.0:
+ resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==}
+
es-object-atoms@1.1.1:
resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==}
engines: {node: '>= 0.4'}
@@ -3411,11 +3947,20 @@ packages:
engines: {node: '>=12'}
hasBin: true
+ esbuild@0.21.5:
+ resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==}
+ engines: {node: '>=12'}
+ hasBin: true
+
esbuild@0.25.8:
resolution: {integrity: sha512-vVC0USHGtMi8+R4Kz8rt6JhEWLxsv9Rnu/lGYbPR8u47B+DCBksq9JarW0zOO7bs37hyOK1l2/oqtbciutL5+Q==}
engines: {node: '>=18'}
hasBin: true
+ escalade@3.2.0:
+ resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==}
+ engines: {node: '>=6'}
+
escape-carriage@1.3.1:
resolution: {integrity: sha512-GwBr6yViW3ttx1kb7/Oh+gKQ1/TrhYwxKqVmg5gS+BK+Qe2KrOa/Vh7w3HPBvgGf0LfcDGoY9I6NHKoA5Hozhw==}
@@ -3589,6 +4134,10 @@ packages:
resolution: {integrity: sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==}
engines: {node: '>=6'}
+ expect-type@1.2.2:
+ resolution: {integrity: sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==}
+ engines: {node: '>=12.0.0'}
+
ext@1.7.0:
resolution: {integrity: sha512-6hxeJYaL110a9b5TEJSj0gojyHQAmA2ch5Os+ySCiA1QGdS697XWY1pzsrSjqA9LDEEgdB/KypIlR59RcLuHYw==}
@@ -3678,6 +4227,10 @@ packages:
resolution: {integrity: sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==}
engines: {node: '>= 0.4'}
+ foreground-child@3.3.1:
+ resolution: {integrity: sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==}
+ engines: {node: '>=14'}
+
form-data@4.0.5:
resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==}
engines: {node: '>= 6'}
@@ -3788,6 +4341,10 @@ packages:
peerDependencies:
next: '>=13.2.0'
+ gensync@1.0.0-beta.2:
+ resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==}
+ engines: {node: '>=6.9.0'}
+
get-intrinsic@1.3.0:
resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==}
engines: {node: '>= 0.4'}
@@ -3821,6 +4378,10 @@ packages:
resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==}
engines: {node: '>=10.13.0'}
+ glob@10.5.0:
+ resolution: {integrity: sha512-DfXN8DfhJ7NH3Oe7cFmu3NCu1wKbkReJ8TorzSAFbSKrlNaQSKfIzqYqVY8zlbs2NLBbWpRiU52GX2PbaBVNkg==}
+ hasBin: true
+
globals@14.0.0:
resolution: {integrity: sha512-oahGvuMGQlPw/ivIYBjVSrWAfWLBeku5tpPE2fOPLi+WHffIWbuh2tCjhyQhTBPMf5E9jDEH4FOmTYgYwbKwtQ==}
engines: {node: '>=18'}
@@ -3958,6 +4519,9 @@ packages:
resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==}
engines: {node: '>=18'}
+ html-escaper@2.0.2:
+ resolution: {integrity: sha512-H2iMtd0I4Mt5eYiapRdIDjp+XzelXQ0tFE4JS7YFwFevXXMmOp9myNrUvCg0D6ws8iqkRPBfKHgbwig1SmlLfg==}
+
html-url-attributes@3.0.1:
resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==}
@@ -3999,6 +4563,10 @@ packages:
resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==}
engines: {node: '>=0.8.19'}
+ indent-string@4.0.0:
+ resolution: {integrity: sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==}
+ engines: {node: '>=8'}
+
inherits@2.0.4:
resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==}
@@ -4086,6 +4654,10 @@ packages:
resolution: {integrity: sha512-1pC6N8qWJbWoPtEjgcL2xyhQOP491EQjeUo3qTKcmV8YSDDJrOepfG8pcC7h/QgnQHYSv0mJ3Z/ZWxmatVrysg==}
engines: {node: '>= 0.4'}
+ is-fullwidth-code-point@3.0.0:
+ resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==}
+ engines: {node: '>=8'}
+
is-generator-function@1.1.0:
resolution: {integrity: sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==}
engines: {node: '>= 0.4'}
@@ -4164,10 +4736,29 @@ packages:
isomorphic.js@0.2.5:
resolution: {integrity: sha512-PIeMbHqMt4DnUP3MA/Flc0HElYjMXArsw1qwJZcm9sqR8mq3l8NYizFMty0pWwE/tzIGH3EKK5+jes5mAr85yw==}
+ istanbul-lib-coverage@3.2.2:
+ resolution: {integrity: sha512-O8dpsF+r0WV/8MNRKfnmrtCWhuKjxrq2w+jpzBL5UZKTi2LeVWnWOmWRxFlesJONmc+wLAGvKQZEOanko0LFTg==}
+ engines: {node: '>=8'}
+
+ istanbul-lib-report@3.0.1:
+ resolution: {integrity: sha512-GCfE1mtsHGOELCU8e/Z7YWzpmybrx/+dSTfLrvY8qRmaY6zXTKWn6WQIjaAFw069icm6GVMNkgu0NzI4iPZUNw==}
+ engines: {node: '>=10'}
+
+ istanbul-lib-source-maps@5.0.6:
+ resolution: {integrity: sha512-yg2d+Em4KizZC5niWhQaIomgf5WlL4vOOjZ5xGCmF8SnPE/mDWWXgvRExdcpCgh9lLRRa1/fSYp2ymmbJ1pI+A==}
+ engines: {node: '>=10'}
+
+ istanbul-reports@3.2.0:
+ resolution: {integrity: sha512-HGYWWS/ehqTV3xN10i23tkPkpH46MLCIMFNCaaKNavAXTF1RkqxawEPtnjnGZ6XKSInBKkiOA5BKS+aZiY3AvA==}
+ engines: {node: '>=8'}
+
iterator.prototype@1.1.5:
resolution: {integrity: sha512-H0dkQoCa3b2VEeKQBOxFph+JAbcrQdE7KC0UkqwpLmv2EC4P41QXP+rqo9wYodACiG5/WM5s9oDApTU8utwj9g==}
engines: {node: '>= 0.4'}
+ jackspeak@3.4.3:
+ resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==}
+
jiti@2.4.2:
resolution: {integrity: sha512-rg9zJN+G4n2nfJl5MW3BMygZX56zKPNVEYYqq7adpmMh4Jn2QNEwhvQlFy6jPVdcod7txZtKHWnyZiA3a0zP7A==}
hasBin: true
@@ -4219,6 +4810,11 @@ packages:
canvas:
optional: true
+ jsesc@3.1.0:
+ resolution: {integrity: sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==}
+ engines: {node: '>=6'}
+ hasBin: true
+
json-buffer@3.0.1:
resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==}
@@ -4235,6 +4831,11 @@ packages:
resolution: {integrity: sha512-g1MWMLBiz8FKi1e4w0UyVL3w+iJceWAFBAaBnnGKOpNa5f8TLktkbre1+s6oICydWAm+HRUGTmI+//xv2hvXYA==}
hasBin: true
+ json5@2.2.3:
+ resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==}
+ engines: {node: '>=6'}
+ hasBin: true
+
jsondiffpatch@0.6.0:
resolution: {integrity: sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==}
engines: {node: ^18.0.0 || >=20.0.0}
@@ -4376,6 +4977,9 @@ packages:
resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==}
hasBin: true
+ loupe@3.2.1:
+ resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==}
+
lowlight@1.20.0:
resolution: {integrity: sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==}
@@ -4386,6 +4990,9 @@ packages:
resolution: {integrity: sha512-QIXZUBJUx+2zHUdQujWejBkcD9+cs94tLn0+YL8UrCh+D5sCXZ4c7LaEH48pNwRY3MLDgqUFyhlCyjJPf1WP0A==}
engines: {node: 20 || >=22}
+ lru-cache@5.1.1:
+ resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==}
+
lucide-react@0.453.0:
resolution: {integrity: sha512-kL+RGZCcJi9BvJtzg2kshO192Ddy9hv3ij+cPrVPWSRzgCWCVazoQJxOjAwgK53NomL07HB7GPHW120FimjNhQ==}
peerDependencies:
@@ -4403,9 +5010,16 @@ packages:
magic-string@0.30.17:
resolution: {integrity: sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==}
+ magicast@0.3.5:
+ resolution: {integrity: sha512-L0WhttDl+2BOsybvEOLK7fW3UA0OQ0IQ2d6Zl2x/a6vVRs3bAY0ECOSHHeL5jD+SbOpOCUEi0y1DgHEn9Qn1AQ==}
+
make-cancellable-promise@1.3.2:
resolution: {integrity: sha512-GCXh3bq/WuMbS+Ky4JBPW1hYTOU+znU+Q5m9Pu+pI8EoUqIHk9+tviOKC6/qhHh8C4/As3tzJ69IF32kdz85ww==}
+ make-dir@4.0.0:
+ resolution: {integrity: sha512-hXdUTZYIVOt1Ex//jAQi+wTZZpUpwBj/0QsOzqegb3rGMMeJiSEu5xLHnYfBrRV4RH2+OCSOO95Is/7x1WJ4bw==}
+ engines: {node: '>=10'}
+
make-event-props@1.6.2:
resolution: {integrity: sha512-iDwf7mA03WPiR8QxvcVHmVWEPfMY1RZXerDVNCRYW7dUr2ppH3J58Rwb39/WG39yTZdRSxr3x+2v22tvI0VEvA==}
@@ -4880,6 +5494,9 @@ packages:
encoding:
optional: true
+ node-releases@2.0.27:
+ resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==}
+
npm-to-yarn@3.0.1:
resolution: {integrity: sha512-tt6PvKu4WyzPwWUzy/hvPFqn+uwXO0K1ZHka8az3NnrhWJDmSqI8ncWq0fkL0k/lmmi5tAC11FXwXuh0rFbt1A==}
engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
@@ -4953,6 +5570,9 @@ packages:
resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==}
engines: {node: '>=10'}
+ package-json-from-dist@1.0.1:
+ resolution: {integrity: sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==}
+
parent-module@1.0.1:
resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==}
engines: {node: '>=6'}
@@ -4977,10 +5597,21 @@ packages:
path-parse@1.0.7:
resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==}
+ path-scurry@1.11.1:
+ resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==}
+ engines: {node: '>=16 || 14 >=14.18'}
+
path2d@0.2.2:
resolution: {integrity: sha512-+vnG6S4dYcYxZd+CZxzXCNKdELYZSKfohrk98yajCo1PtRoDgCTrrwOvK1GT0UoAdVszagDVllQc0U1vaX4NUQ==}
engines: {node: '>=6'}
+ pathe@1.1.2:
+ resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==}
+
+ pathval@2.0.1:
+ resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==}
+ engines: {node: '>= 14.16'}
+
pdfjs-dist@4.8.69:
resolution: {integrity: sha512-IHZsA4T7YElCKNNXtiLgqScw4zPd3pG9do8UrznC757gMd7UPeHSL2qwNNMJo4r79fl8oj1Xx+1nh2YkzdMpLQ==}
engines: {node: '>=18'}
@@ -5083,6 +5714,10 @@ packages:
resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==}
engines: {node: '>= 0.8.0'}
+ pretty-format@27.5.1:
+ resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==}
+ engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0}
+
prismjs@1.27.0:
resolution: {integrity: sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==}
engines: {node: '>=6'}
@@ -5333,6 +5968,10 @@ packages:
'@types/react':
optional: true
+ react-refresh@0.17.0:
+ resolution: {integrity: sha512-z6F7K9bV85EfseRCp2bzrpyQ0Gkw1uLoCel9XBVWPg/TjRj94SkJzUTGfOa4bs7iJvBWtQG0Wq7wnI0syw3EBQ==}
+ engines: {node: '>=0.10.0'}
+
react-remove-scroll-bar@2.3.8:
resolution: {integrity: sha512-9r+yi9+mgU33AKcj6IbT9oRCO78WriSj6t/cF8DWBZJ9aOGPOTEDvdUDz1FwKim7QXWwmHqtdHnRJfhAxEG46Q==}
engines: {node: '>=10'}
@@ -5442,6 +6081,10 @@ packages:
recma-stringify@1.0.0:
resolution: {integrity: sha512-cjwII1MdIIVloKvC9ErQ+OgAtwHBmcZ0Bg4ciz78FtbT8In39aAYbaA7zvxQ61xVMSPE8WxhLwLbhif4Js2C+g==}
+ redent@3.0.0:
+ resolution: {integrity: sha512-6tDA8g98We0zd0GvVeMT9arEOnTw9qM03L9cJXaCjrip1OO764RDBLBfrB4cwzNGDj5OA5ioymC9GkizgWJDUg==}
+ engines: {node: '>=8'}
+
reflect.getprototypeof@1.0.10:
resolution: {integrity: sha512-00o4I+DVrefhv+nX0ulyi3biSHCPDe+yLv5o/p6d/UVlirijB8E16FtfwSAi4g3tcqrQ4lRAqQSoFEZJehYEcw==}
engines: {node: '>= 0.4'}
@@ -5546,6 +6189,11 @@ packages:
resolution: {integrity: sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==}
engines: {iojs: '>=1.0.0', node: '>=0.10.0'}
+ rollup@4.53.3:
+ resolution: {integrity: sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==}
+ engines: {node: '>=18.0.0', npm: '>=8.0.0'}
+ hasBin: true
+
rope-sequence@1.3.4:
resolution: {integrity: sha512-UT5EDe2cu2E/6O4igUr5PSFs23nvvukicWHx6GnOPlHAiiYbzNuCRQCuiUdHJQcqKalLKlrYJnjY0ySGsXNQXQ==}
@@ -5651,6 +6299,13 @@ packages:
resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==}
engines: {node: '>= 0.4'}
+ siginfo@2.0.0:
+ resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==}
+
+ signal-exit@4.1.0:
+ resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==}
+ engines: {node: '>=14'}
+
simple-concat@1.0.1:
resolution: {integrity: sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==}
@@ -5691,12 +6346,26 @@ packages:
stable-hash@0.0.4:
resolution: {integrity: sha512-LjdcbuBeLcdETCrPn9i8AYAZ1eCtu4ECAWtP7UleOiZ9LzVxRzzUZEoZ8zB24nhkQnDWyET0I+3sWokSDS3E7g==}
+ stackback@0.0.2:
+ resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==}
+
static-browser-server@1.0.3:
resolution: {integrity: sha512-ZUyfgGDdFRbZGGJQ1YhiM930Yczz5VlbJObrQLlk24+qNHVQx4OlLcYswEUo3bIyNAbQUIUR9Yr5/Hqjzqb4zA==}
+ std-env@3.10.0:
+ resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==}
+
strict-event-emitter@0.4.6:
resolution: {integrity: sha512-12KWeb+wixJohmnwNFerbyiBrAlq5qJLwIt38etRtKtmmHyDSoGlIqFE9wx+4IwG0aDjI7GV8tc8ZccjWZZtTg==}
+ string-width@4.2.3:
+ resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==}
+ engines: {node: '>=8'}
+
+ string-width@5.1.2:
+ resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==}
+ engines: {node: '>=12'}
+
string.prototype.includes@2.0.1:
resolution: {integrity: sha512-o7+c9bW6zpAdJHTtujeePODAhkuicdAryFsfVKwA+wGw89wJ4GTY484WTucM9hLtDEOpOvI+aHnzqnC5lHp4Rg==}
engines: {node: '>= 0.4'}
@@ -5726,10 +6395,22 @@ packages:
stringify-entities@4.0.4:
resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==}
+ strip-ansi@6.0.1:
+ resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==}
+ engines: {node: '>=8'}
+
+ strip-ansi@7.1.2:
+ resolution: {integrity: sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==}
+ engines: {node: '>=12'}
+
strip-bom@3.0.0:
resolution: {integrity: sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==}
engines: {node: '>=4'}
+ strip-indent@3.0.0:
+ resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==}
+ engines: {node: '>=8'}
+
strip-indent@4.0.0:
resolution: {integrity: sha512-mnVSV2l+Zv6BLpSD/8V87CW/y9EmmbYzGCIavsnsI6/nwn26DwffM/yztm30Z/I2DY9wdS3vXVCMnHDgZaVNoA==}
engines: {node: '>=12'}
@@ -5818,6 +6499,10 @@ packages:
resolution: {integrity: sha512-5S7Va8hKfV7W5U6g3aYxXmlPoZVAwUMy9AOKyF2fVuZa2UD3qZjg578OrLRt8PcNN1PleVaL/5/yYATNL0ICUw==}
engines: {node: '>=18'}
+ test-exclude@7.0.1:
+ resolution: {integrity: sha512-pFYqmTw68LXVjeWJMST4+borgQP2AyMNbg1BpZh9LbyhUeNkeaPF9gzfPGUAnSMV3qPYdWUwDIjjCLiSDOl7vg==}
+ engines: {node: '>=18'}
+
third-party-capital@1.0.20:
resolution: {integrity: sha512-oB7yIimd8SuGptespDAZnNkzIz+NWaJCu2RMsbs4Wmp9zSDUM8Nhi3s2OOcqYuv3mN4hitXc8DVx+LyUmbUDiA==}
@@ -5828,6 +6513,12 @@ packages:
tiny-invariant@1.3.3:
resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==}
+ tinybench@2.9.0:
+ resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==}
+
+ tinyexec@0.3.2:
+ resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==}
+
tinyexec@1.0.1:
resolution: {integrity: sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==}
@@ -5839,6 +6530,18 @@ packages:
resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==}
engines: {node: '>=12.0.0'}
+ tinypool@1.1.1:
+ resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==}
+ engines: {node: ^18.0.0 || >=20.0.0}
+
+ tinyrainbow@1.2.0:
+ resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==}
+ engines: {node: '>=14.0.0'}
+
+ tinyspy@3.0.2:
+ resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==}
+ engines: {node: '>=14.0.0'}
+
tldts-core@6.1.86:
resolution: {integrity: sha512-Je6p7pkk+KMzMv2XXKmAE3McmolOQFdxkKw0R8EYNr7sELW46JqnNeTX8ybPiQgvg1ymCoF8LXs5fzFaZvJPTA==}
@@ -5999,6 +6702,12 @@ packages:
unist-util-visit@5.0.0:
resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==}
+ update-browserslist-db@1.2.1:
+ resolution: {integrity: sha512-R9NcHbbZ45RoWfTdhn1J9SS7zxNvlddv4YRrHTUaFdtjbmfncfedB45EC9IaqJQ97iAR1GZgOfyRQO+ExIF6EQ==}
+ hasBin: true
+ peerDependencies:
+ browserslist: '>= 4.21.0'
+
uri-js@4.4.1:
resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==}
@@ -6103,6 +6812,67 @@ packages:
vfile@6.0.3:
resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==}
+ vite-node@2.1.9:
+ resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==}
+ engines: {node: ^18.0.0 || >=20.0.0}
+ hasBin: true
+
+ vite@5.4.21:
+ resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==}
+ engines: {node: ^18.0.0 || >=20.0.0}
+ hasBin: true
+ peerDependencies:
+ '@types/node': ^18.0.0 || >=20.0.0
+ less: '*'
+ lightningcss: ^1.21.0
+ sass: '*'
+ sass-embedded: '*'
+ stylus: '*'
+ sugarss: '*'
+ terser: ^5.4.0
+ peerDependenciesMeta:
+ '@types/node':
+ optional: true
+ less:
+ optional: true
+ lightningcss:
+ optional: true
+ sass:
+ optional: true
+ sass-embedded:
+ optional: true
+ stylus:
+ optional: true
+ sugarss:
+ optional: true
+ terser:
+ optional: true
+
+ vitest@2.1.9:
+ resolution: {integrity: sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==}
+ engines: {node: ^18.0.0 || >=20.0.0}
+ hasBin: true
+ peerDependencies:
+ '@edge-runtime/vm': '*'
+ '@types/node': ^18.0.0 || >=20.0.0
+ '@vitest/browser': 2.1.9
+ '@vitest/ui': 2.1.9
+ happy-dom: '*'
+ jsdom: '*'
+ peerDependenciesMeta:
+ '@edge-runtime/vm':
+ optional: true
+ '@types/node':
+ optional: true
+ '@vitest/browser':
+ optional: true
+ '@vitest/ui':
+ optional: true
+ happy-dom:
+ optional: true
+ jsdom:
+ optional: true
+
w3c-keyname@2.2.8:
resolution: {integrity: sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==}
@@ -6159,10 +6929,23 @@ packages:
engines: {node: '>= 8'}
hasBin: true
+ why-is-node-running@2.3.0:
+ resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==}
+ engines: {node: '>=8'}
+ hasBin: true
+
word-wrap@1.2.5:
resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==}
engines: {node: '>=0.10.0'}
+ wrap-ansi@7.0.0:
+ resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==}
+ engines: {node: '>=10'}
+
+ wrap-ansi@8.1.0:
+ resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==}
+ engines: {node: '>=12'}
+
wrappy@1.0.2:
resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==}
@@ -6205,6 +6988,9 @@ packages:
peerDependencies:
yjs: ^13.0.0
+ yallist@3.1.1:
+ resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==}
+
yallist@5.0.0:
resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==}
engines: {node: '>=18'}
@@ -6233,6 +7019,8 @@ packages:
snapshots:
+ '@adobe/css-tools@4.4.4': {}
+
'@ai-sdk/provider-utils@2.2.8(zod@3.25.76)':
dependencies:
'@ai-sdk/provider': 1.1.3
@@ -6276,10 +7064,124 @@ snapshots:
'@csstools/css-tokenizer': 3.0.4
lru-cache: 10.4.3
+ '@babel/code-frame@7.27.1':
+ dependencies:
+ '@babel/helper-validator-identifier': 7.28.5
+ js-tokens: 4.0.0
+ picocolors: 1.1.1
+
+ '@babel/compat-data@7.28.5': {}
+
+ '@babel/core@7.28.5':
+ dependencies:
+ '@babel/code-frame': 7.27.1
+ '@babel/generator': 7.28.5
+ '@babel/helper-compilation-targets': 7.27.2
+ '@babel/helper-module-transforms': 7.28.3(@babel/core@7.28.5)
+ '@babel/helpers': 7.28.4
+ '@babel/parser': 7.28.5
+ '@babel/template': 7.27.2
+ '@babel/traverse': 7.28.5
+ '@babel/types': 7.28.5
+ '@jridgewell/remapping': 2.3.5
+ convert-source-map: 2.0.0
+ debug: 4.4.0
+ gensync: 1.0.0-beta.2
+ json5: 2.2.3
+ semver: 6.3.1
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/generator@7.28.5':
+ dependencies:
+ '@babel/parser': 7.28.5
+ '@babel/types': 7.28.5
+ '@jridgewell/gen-mapping': 0.3.13
+ '@jridgewell/trace-mapping': 0.3.31
+ jsesc: 3.1.0
+
+ '@babel/helper-compilation-targets@7.27.2':
+ dependencies:
+ '@babel/compat-data': 7.28.5
+ '@babel/helper-validator-option': 7.27.1
+ browserslist: 4.28.1
+ lru-cache: 5.1.1
+ semver: 6.3.1
+
+ '@babel/helper-globals@7.28.0': {}
+
+ '@babel/helper-module-imports@7.27.1':
+ dependencies:
+ '@babel/traverse': 7.28.5
+ '@babel/types': 7.28.5
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-module-transforms@7.28.3(@babel/core@7.28.5)':
+ dependencies:
+ '@babel/core': 7.28.5
+ '@babel/helper-module-imports': 7.27.1
+ '@babel/helper-validator-identifier': 7.28.5
+ '@babel/traverse': 7.28.5
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/helper-plugin-utils@7.27.1': {}
+
+ '@babel/helper-string-parser@7.27.1': {}
+
+ '@babel/helper-validator-identifier@7.28.5': {}
+
+ '@babel/helper-validator-option@7.27.1': {}
+
+ '@babel/helpers@7.28.4':
+ dependencies:
+ '@babel/template': 7.27.2
+ '@babel/types': 7.28.5
+
+ '@babel/parser@7.28.5':
+ dependencies:
+ '@babel/types': 7.28.5
+
+ '@babel/plugin-transform-react-jsx-self@7.27.1(@babel/core@7.28.5)':
+ dependencies:
+ '@babel/core': 7.28.5
+ '@babel/helper-plugin-utils': 7.27.1
+
+ '@babel/plugin-transform-react-jsx-source@7.27.1(@babel/core@7.28.5)':
+ dependencies:
+ '@babel/core': 7.28.5
+ '@babel/helper-plugin-utils': 7.27.1
+
'@babel/runtime@7.26.9':
dependencies:
regenerator-runtime: 0.14.1
+ '@babel/template@7.27.2':
+ dependencies:
+ '@babel/code-frame': 7.27.1
+ '@babel/parser': 7.28.5
+ '@babel/types': 7.28.5
+
+ '@babel/traverse@7.28.5':
+ dependencies:
+ '@babel/code-frame': 7.27.1
+ '@babel/generator': 7.28.5
+ '@babel/helper-globals': 7.28.0
+ '@babel/parser': 7.28.5
+ '@babel/template': 7.27.2
+ '@babel/types': 7.28.5
+ debug: 4.4.0
+ transitivePeerDependencies:
+ - supports-color
+
+ '@babel/types@7.28.5':
+ dependencies:
+ '@babel/helper-string-parser': 7.27.1
+ '@babel/helper-validator-identifier': 7.28.5
+
+ '@bcoe/v8-coverage@0.2.3': {}
+
'@biomejs/biome@2.1.2':
optionalDependencies:
'@biomejs/cli-darwin-arm64': 2.1.2
@@ -6778,102 +7680,153 @@ snapshots:
'@esbuild-kit/core-utils': 3.3.2
get-tsconfig: 4.10.0
+ '@esbuild/aix-ppc64@0.21.5':
+ optional: true
+
'@esbuild/aix-ppc64@0.25.8':
optional: true
'@esbuild/android-arm64@0.18.20':
optional: true
+ '@esbuild/android-arm64@0.21.5':
+ optional: true
+
'@esbuild/android-arm64@0.25.8':
optional: true
'@esbuild/android-arm@0.18.20':
optional: true
+ '@esbuild/android-arm@0.21.5':
+ optional: true
+
'@esbuild/android-arm@0.25.8':
optional: true
'@esbuild/android-x64@0.18.20':
optional: true
+ '@esbuild/android-x64@0.21.5':
+ optional: true
+
'@esbuild/android-x64@0.25.8':
optional: true
'@esbuild/darwin-arm64@0.18.20':
optional: true
+ '@esbuild/darwin-arm64@0.21.5':
+ optional: true
+
'@esbuild/darwin-arm64@0.25.8':
optional: true
'@esbuild/darwin-x64@0.18.20':
optional: true
+ '@esbuild/darwin-x64@0.21.5':
+ optional: true
+
'@esbuild/darwin-x64@0.25.8':
optional: true
'@esbuild/freebsd-arm64@0.18.20':
optional: true
+ '@esbuild/freebsd-arm64@0.21.5':
+ optional: true
+
'@esbuild/freebsd-arm64@0.25.8':
optional: true
'@esbuild/freebsd-x64@0.18.20':
optional: true
+ '@esbuild/freebsd-x64@0.21.5':
+ optional: true
+
'@esbuild/freebsd-x64@0.25.8':
optional: true
'@esbuild/linux-arm64@0.18.20':
optional: true
+ '@esbuild/linux-arm64@0.21.5':
+ optional: true
+
'@esbuild/linux-arm64@0.25.8':
optional: true
'@esbuild/linux-arm@0.18.20':
optional: true
+ '@esbuild/linux-arm@0.21.5':
+ optional: true
+
'@esbuild/linux-arm@0.25.8':
optional: true
'@esbuild/linux-ia32@0.18.20':
optional: true
+ '@esbuild/linux-ia32@0.21.5':
+ optional: true
+
'@esbuild/linux-ia32@0.25.8':
optional: true
'@esbuild/linux-loong64@0.18.20':
optional: true
+ '@esbuild/linux-loong64@0.21.5':
+ optional: true
+
'@esbuild/linux-loong64@0.25.8':
optional: true
'@esbuild/linux-mips64el@0.18.20':
optional: true
+ '@esbuild/linux-mips64el@0.21.5':
+ optional: true
+
'@esbuild/linux-mips64el@0.25.8':
optional: true
'@esbuild/linux-ppc64@0.18.20':
optional: true
+ '@esbuild/linux-ppc64@0.21.5':
+ optional: true
+
'@esbuild/linux-ppc64@0.25.8':
optional: true
'@esbuild/linux-riscv64@0.18.20':
optional: true
+ '@esbuild/linux-riscv64@0.21.5':
+ optional: true
+
'@esbuild/linux-riscv64@0.25.8':
optional: true
'@esbuild/linux-s390x@0.18.20':
optional: true
+ '@esbuild/linux-s390x@0.21.5':
+ optional: true
+
'@esbuild/linux-s390x@0.25.8':
optional: true
'@esbuild/linux-x64@0.18.20':
optional: true
+ '@esbuild/linux-x64@0.21.5':
+ optional: true
+
'@esbuild/linux-x64@0.25.8':
optional: true
@@ -6883,6 +7836,9 @@ snapshots:
'@esbuild/netbsd-x64@0.18.20':
optional: true
+ '@esbuild/netbsd-x64@0.21.5':
+ optional: true
+
'@esbuild/netbsd-x64@0.25.8':
optional: true
@@ -6892,6 +7848,9 @@ snapshots:
'@esbuild/openbsd-x64@0.18.20':
optional: true
+ '@esbuild/openbsd-x64@0.21.5':
+ optional: true
+
'@esbuild/openbsd-x64@0.25.8':
optional: true
@@ -6901,24 +7860,36 @@ snapshots:
'@esbuild/sunos-x64@0.18.20':
optional: true
+ '@esbuild/sunos-x64@0.21.5':
+ optional: true
+
'@esbuild/sunos-x64@0.25.8':
optional: true
'@esbuild/win32-arm64@0.18.20':
optional: true
+ '@esbuild/win32-arm64@0.21.5':
+ optional: true
+
'@esbuild/win32-arm64@0.25.8':
optional: true
'@esbuild/win32-ia32@0.18.20':
optional: true
+ '@esbuild/win32-ia32@0.21.5':
+ optional: true
+
'@esbuild/win32-ia32@0.25.8':
optional: true
'@esbuild/win32-x64@0.18.20':
optional: true
+ '@esbuild/win32-x64@0.21.5':
+ optional: true
+
'@esbuild/win32-x64@0.25.8':
optional: true
@@ -7180,9 +8151,25 @@ snapshots:
'@img/sharp-win32-x64@0.34.5':
optional: true
+ '@isaacs/cliui@8.0.2':
+ dependencies:
+ string-width: 5.1.2
+ string-width-cjs: string-width@4.2.3
+ strip-ansi: 7.1.2
+ strip-ansi-cjs: strip-ansi@6.0.1
+ wrap-ansi: 8.1.0
+ wrap-ansi-cjs: wrap-ansi@7.0.0
+
'@isaacs/fs-minipass@4.0.1':
dependencies:
- minipass: 7.1.2
+ minipass: 7.1.2
+
+ '@istanbuljs/schema@0.1.3': {}
+
+ '@jridgewell/gen-mapping@0.3.13':
+ dependencies:
+ '@jridgewell/sourcemap-codec': 1.5.0
+ '@jridgewell/trace-mapping': 0.3.31
'@jridgewell/gen-mapping@0.3.8':
dependencies:
@@ -7190,6 +8177,11 @@ snapshots:
'@jridgewell/sourcemap-codec': 1.5.0
'@jridgewell/trace-mapping': 0.3.25
+ '@jridgewell/remapping@2.3.5':
+ dependencies:
+ '@jridgewell/gen-mapping': 0.3.8
+ '@jridgewell/trace-mapping': 0.3.25
+
'@jridgewell/resolve-uri@3.1.2': {}
'@jridgewell/set-array@1.2.1': {}
@@ -7201,6 +8193,11 @@ snapshots:
'@jridgewell/resolve-uri': 3.1.2
'@jridgewell/sourcemap-codec': 1.5.0
+ '@jridgewell/trace-mapping@0.3.31':
+ dependencies:
+ '@jridgewell/resolve-uri': 3.1.2
+ '@jridgewell/sourcemap-codec': 1.5.0
+
'@lexical/clipboard@0.32.1':
dependencies:
'@lexical/html': 0.32.1
@@ -7534,7 +8531,7 @@ snapshots:
'@marijn/find-cluster-break@1.0.2': {}
- '@mdx-js/mdx@3.1.0(acorn@8.14.0)':
+ '@mdx-js/mdx@3.1.0(acorn@8.15.0)':
dependencies:
'@types/estree': 1.0.8
'@types/estree-jsx': 1.0.5
@@ -7548,7 +8545,7 @@ snapshots:
hast-util-to-jsx-runtime: 2.3.6
markdown-extensions: 2.0.0
recma-build-jsx: 1.0.0
- recma-jsx: 1.0.0(acorn@8.14.0)
+ recma-jsx: 1.0.0(acorn@8.15.0)
recma-stringify: 1.0.0
rehype-recma: 1.0.0
remark-mdx: 3.1.0
@@ -7665,9 +8662,9 @@ snapshots:
'@next/swc-win32-x64-msvc@15.5.7':
optional: true
- '@next/third-parties@15.5.7(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)':
+ '@next/third-parties@15.5.7(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)':
dependencies:
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react: 19.1.0
third-party-capital: 1.0.20
@@ -7698,6 +8695,9 @@ snapshots:
'@orama/orama@3.1.11': {}
+ '@pkgjs/parseargs@0.11.0':
+ optional: true
+
'@radix-ui/colors@3.0.0': {}
'@radix-ui/number@1.1.1': {}
@@ -8683,6 +9683,74 @@ snapshots:
'@remirror/core-constants@3.0.0': {}
+ '@rolldown/pluginutils@1.0.0-beta.27': {}
+
+ '@rollup/rollup-android-arm-eabi@4.53.3':
+ optional: true
+
+ '@rollup/rollup-android-arm64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-darwin-arm64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-darwin-x64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-freebsd-arm64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-freebsd-x64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-arm-gnueabihf@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-arm-musleabihf@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-arm64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-arm64-musl@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-loong64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-ppc64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-riscv64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-riscv64-musl@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-s390x-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-x64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-linux-x64-musl@4.53.3':
+ optional: true
+
+ '@rollup/rollup-openharmony-arm64@4.53.3':
+ optional: true
+
+ '@rollup/rollup-win32-arm64-msvc@4.53.3':
+ optional: true
+
+ '@rollup/rollup-win32-ia32-msvc@4.53.3':
+ optional: true
+
+ '@rollup/rollup-win32-x64-gnu@4.53.3':
+ optional: true
+
+ '@rollup/rollup-win32-x64-msvc@4.53.3':
+ optional: true
+
'@rtsao/scc@1.1.0': {}
'@rushstack/eslint-patch@1.10.5': {}
@@ -8859,6 +9927,36 @@ snapshots:
'@tanstack/table-core@8.21.3': {}
+ '@testing-library/dom@10.4.1':
+ dependencies:
+ '@babel/code-frame': 7.27.1
+ '@babel/runtime': 7.26.9
+ '@types/aria-query': 5.0.4
+ aria-query: 5.3.0
+ dom-accessibility-api: 0.5.16
+ lz-string: 1.5.0
+ picocolors: 1.1.1
+ pretty-format: 27.5.1
+
+ '@testing-library/jest-dom@6.9.1':
+ dependencies:
+ '@adobe/css-tools': 4.4.4
+ aria-query: 5.3.2
+ css.escape: 1.5.1
+ dom-accessibility-api: 0.6.3
+ picocolors: 1.1.1
+ redent: 3.0.0
+
+ '@testing-library/react@16.3.0(@testing-library/dom@10.4.1)(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)':
+ dependencies:
+ '@babel/runtime': 7.26.9
+ '@testing-library/dom': 10.4.1
+ react: 19.1.0
+ react-dom: 19.1.0(react@19.1.0)
+ optionalDependencies:
+ '@types/react': 19.1.8
+ '@types/react-dom': 19.1.6(@types/react@19.1.8)
+
'@tiptap/core@3.11.0(@tiptap/pm@3.11.0)':
dependencies:
'@tiptap/pm': 3.11.0
@@ -8967,6 +10065,29 @@ snapshots:
transitivePeerDependencies:
- '@floating-ui/dom'
+ '@types/aria-query@5.0.4': {}
+
+ '@types/babel__core@7.20.5':
+ dependencies:
+ '@babel/parser': 7.28.5
+ '@babel/types': 7.28.5
+ '@types/babel__generator': 7.27.0
+ '@types/babel__template': 7.4.4
+ '@types/babel__traverse': 7.28.0
+
+ '@types/babel__generator@7.27.0':
+ dependencies:
+ '@babel/types': 7.28.5
+
+ '@types/babel__template@7.4.4':
+ dependencies:
+ '@babel/parser': 7.28.5
+ '@babel/types': 7.28.5
+
+ '@types/babel__traverse@7.28.0':
+ dependencies:
+ '@babel/types': 7.28.5
+
'@types/canvas-confetti@1.9.0': {}
'@types/debug@4.1.12':
@@ -9166,6 +10287,76 @@ snapshots:
'@ungap/structured-clone@1.3.0': {}
+ '@vitejs/plugin-react@4.7.0(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1))':
+ dependencies:
+ '@babel/core': 7.28.5
+ '@babel/plugin-transform-react-jsx-self': 7.27.1(@babel/core@7.28.5)
+ '@babel/plugin-transform-react-jsx-source': 7.27.1(@babel/core@7.28.5)
+ '@rolldown/pluginutils': 1.0.0-beta.27
+ '@types/babel__core': 7.20.5
+ react-refresh: 0.17.0
+ vite: 5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@vitest/coverage-v8@2.1.9(vitest@2.1.9(@types/node@20.19.9)(jsdom@25.0.1)(lightningcss@1.30.1))':
+ dependencies:
+ '@ampproject/remapping': 2.3.0
+ '@bcoe/v8-coverage': 0.2.3
+ debug: 4.4.0
+ istanbul-lib-coverage: 3.2.2
+ istanbul-lib-report: 3.0.1
+ istanbul-lib-source-maps: 5.0.6
+ istanbul-reports: 3.2.0
+ magic-string: 0.30.17
+ magicast: 0.3.5
+ std-env: 3.10.0
+ test-exclude: 7.0.1
+ tinyrainbow: 1.2.0
+ vitest: 2.1.9(@types/node@20.19.9)(jsdom@25.0.1)(lightningcss@1.30.1)
+ transitivePeerDependencies:
+ - supports-color
+
+ '@vitest/expect@2.1.9':
+ dependencies:
+ '@vitest/spy': 2.1.9
+ '@vitest/utils': 2.1.9
+ chai: 5.3.3
+ tinyrainbow: 1.2.0
+
+ '@vitest/mocker@2.1.9(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1))':
+ dependencies:
+ '@vitest/spy': 2.1.9
+ estree-walker: 3.0.3
+ magic-string: 0.30.17
+ optionalDependencies:
+ vite: 5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)
+
+ '@vitest/pretty-format@2.1.9':
+ dependencies:
+ tinyrainbow: 1.2.0
+
+ '@vitest/runner@2.1.9':
+ dependencies:
+ '@vitest/utils': 2.1.9
+ pathe: 1.1.2
+
+ '@vitest/snapshot@2.1.9':
+ dependencies:
+ '@vitest/pretty-format': 2.1.9
+ magic-string: 0.30.17
+ pathe: 1.1.2
+
+ '@vitest/spy@2.1.9':
+ dependencies:
+ tinyspy: 3.0.2
+
+ '@vitest/utils@2.1.9':
+ dependencies:
+ '@vitest/pretty-format': 2.1.9
+ loupe: 3.2.1
+ tinyrainbow: 1.2.0
+
'@wojtekmaj/react-hooks@1.17.2(react@19.1.0)':
dependencies:
'@types/react': 19.1.8
@@ -9206,16 +10397,28 @@ snapshots:
anser@2.3.2: {}
+ ansi-regex@5.0.1: {}
+
+ ansi-regex@6.2.2: {}
+
ansi-styles@4.3.0:
dependencies:
color-convert: 2.0.1
+ ansi-styles@5.2.0: {}
+
+ ansi-styles@6.2.3: {}
+
argparse@2.0.1: {}
aria-hidden@1.2.6:
dependencies:
tslib: 2.8.1
+ aria-query@5.3.0:
+ dependencies:
+ dequal: 2.0.3
+
aria-query@5.3.2: {}
array-buffer-byte-length@1.0.2:
@@ -9286,6 +10489,8 @@ snapshots:
asap@2.0.6: {}
+ assertion-error@2.0.1: {}
+
ast-types-flow@0.0.8: {}
astring@1.9.0: {}
@@ -9312,6 +10517,8 @@ snapshots:
base64-js@1.5.1: {}
+ baseline-browser-mapping@2.9.0: {}
+
bl@4.1.0:
dependencies:
buffer: 5.7.1
@@ -9332,6 +10539,14 @@ snapshots:
dependencies:
fill-range: 7.1.1
+ browserslist@4.28.1:
+ dependencies:
+ baseline-browser-mapping: 2.9.0
+ caniuse-lite: 1.0.30001759
+ electron-to-chromium: 1.5.264
+ node-releases: 2.0.27
+ update-browserslist-db: 1.2.1(browserslist@4.28.1)
+
buffer-from@1.1.2: {}
buffer@5.7.1:
@@ -9345,6 +10560,8 @@ snapshots:
base64-js: 1.5.1
ieee754: 1.2.1
+ cac@6.7.14: {}
+
call-bind-apply-helpers@1.0.2:
dependencies:
es-errors: 1.3.0
@@ -9366,6 +10583,8 @@ snapshots:
caniuse-lite@1.0.30001706: {}
+ caniuse-lite@1.0.30001759: {}
+
canvas-confetti@1.9.3: {}
canvas@3.1.2:
@@ -9376,6 +10595,14 @@ snapshots:
ccount@2.0.1: {}
+ chai@5.3.3:
+ dependencies:
+ assertion-error: 2.0.1
+ check-error: 2.1.1
+ deep-eql: 5.0.2
+ loupe: 3.2.1
+ pathval: 2.0.1
+
chalk@4.1.2:
dependencies:
ansi-styles: 4.3.0
@@ -9397,6 +10624,8 @@ snapshots:
character-reference-invalid@2.0.1: {}
+ check-error@2.1.1: {}
+
chokidar@4.0.3:
dependencies:
readdirp: 4.1.2
@@ -9479,6 +10708,8 @@ snapshots:
concat-map@0.0.1: {}
+ convert-source-map@2.0.0: {}
+
crelt@1.0.6: {}
cross-env@7.0.3:
@@ -9497,6 +10728,8 @@ snapshots:
shebang-command: 2.0.0
which: 2.0.2
+ css.escape@1.5.1: {}
+
cssesc@3.0.0: {}
cssstyle@4.6.0:
@@ -9559,6 +10792,8 @@ snapshots:
mimic-response: 3.1.0
optional: true
+ deep-eql@5.0.2: {}
+
deep-extend@0.6.0:
optional: true
@@ -9601,6 +10836,10 @@ snapshots:
dependencies:
esutils: 2.0.3
+ dom-accessibility-api@0.5.16: {}
+
+ dom-accessibility-api@0.6.3: {}
+
dotenv@16.6.1: {}
dotenv@17.2.3: {}
@@ -9636,6 +10875,10 @@ snapshots:
es-errors: 1.3.0
gopd: 1.2.0
+ eastasianwidth@0.2.0: {}
+
+ electron-to-chromium@1.5.264: {}
+
emblor@1.4.8(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
dependencies:
'@radix-ui/react-dialog': 1.0.4(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -9654,6 +10897,8 @@ snapshots:
emoji-mart@5.6.0: {}
+ emoji-regex@8.0.0: {}
+
emoji-regex@9.2.2: {}
end-of-stream@1.4.5:
@@ -9745,6 +10990,8 @@ snapshots:
iterator.prototype: 1.1.5
safe-array-concat: 1.1.3
+ es-module-lexer@1.7.0: {}
+
es-object-atoms@1.1.1:
dependencies:
es-errors: 1.3.0
@@ -9830,6 +11077,32 @@ snapshots:
'@esbuild/win32-ia32': 0.18.20
'@esbuild/win32-x64': 0.18.20
+ esbuild@0.21.5:
+ optionalDependencies:
+ '@esbuild/aix-ppc64': 0.21.5
+ '@esbuild/android-arm': 0.21.5
+ '@esbuild/android-arm64': 0.21.5
+ '@esbuild/android-x64': 0.21.5
+ '@esbuild/darwin-arm64': 0.21.5
+ '@esbuild/darwin-x64': 0.21.5
+ '@esbuild/freebsd-arm64': 0.21.5
+ '@esbuild/freebsd-x64': 0.21.5
+ '@esbuild/linux-arm': 0.21.5
+ '@esbuild/linux-arm64': 0.21.5
+ '@esbuild/linux-ia32': 0.21.5
+ '@esbuild/linux-loong64': 0.21.5
+ '@esbuild/linux-mips64el': 0.21.5
+ '@esbuild/linux-ppc64': 0.21.5
+ '@esbuild/linux-riscv64': 0.21.5
+ '@esbuild/linux-s390x': 0.21.5
+ '@esbuild/linux-x64': 0.21.5
+ '@esbuild/netbsd-x64': 0.21.5
+ '@esbuild/openbsd-x64': 0.21.5
+ '@esbuild/sunos-x64': 0.21.5
+ '@esbuild/win32-arm64': 0.21.5
+ '@esbuild/win32-ia32': 0.21.5
+ '@esbuild/win32-x64': 0.21.5
+
esbuild@0.25.8:
optionalDependencies:
'@esbuild/aix-ppc64': 0.25.8
@@ -9859,6 +11132,8 @@ snapshots:
'@esbuild/win32-ia32': 0.25.8
'@esbuild/win32-x64': 0.25.8
+ escalade@3.2.0: {}
+
escape-carriage@1.3.1: {}
escape-string-regexp@4.0.0: {}
@@ -10124,6 +11399,8 @@ snapshots:
expand-template@2.0.3:
optional: true
+ expect-type@1.2.2: {}
+
ext@1.7.0:
dependencies:
type: 2.7.3
@@ -10228,6 +11505,11 @@ snapshots:
dependencies:
is-callable: 1.2.7
+ foreground-child@3.3.1:
+ dependencies:
+ cross-spawn: 7.0.6
+ signal-exit: 4.1.0
+
form-data@4.0.5:
dependencies:
asynckit: 0.4.0
@@ -10253,7 +11535,7 @@ snapshots:
fsevents@2.3.3:
optional: true
- fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
+ fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
dependencies:
'@formatjs/intl-localematcher': 0.6.1
'@orama/orama': 3.1.11
@@ -10274,20 +11556,20 @@ snapshots:
unist-util-visit: 5.0.0
optionalDependencies:
'@types/react': 19.1.8
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react: 19.1.0
react-dom: 19.1.0(react@19.1.0)
transitivePeerDependencies:
- supports-color
- fumadocs-mdx@11.7.1(acorn@8.14.0)(fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0):
+ fumadocs-mdx@11.7.1(acorn@8.15.0)(fumadocs-core@15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0)(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)):
dependencies:
- '@mdx-js/mdx': 3.1.0(acorn@8.14.0)
+ '@mdx-js/mdx': 3.1.0(acorn@8.15.0)
'@standard-schema/spec': 1.0.0
chokidar: 4.0.3
esbuild: 0.25.8
estree-util-value-to-estree: 3.4.0
- fumadocs-core: 15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ fumadocs-core: 15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
js-yaml: 4.1.0
lru-cache: 11.1.0
picocolors: 1.1.1
@@ -10296,13 +11578,14 @@ snapshots:
unist-util-visit: 5.0.0
zod: 4.0.10
optionalDependencies:
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react: 19.1.0
+ vite: 5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)
transitivePeerDependencies:
- acorn
- supports-color
- fumadocs-ui@15.6.6(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.11):
+ fumadocs-ui@15.6.6(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)(tailwindcss@4.1.11):
dependencies:
'@radix-ui/react-accordion': 1.2.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
'@radix-ui/react-collapsible': 1.1.11(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
@@ -10315,7 +11598,7 @@ snapshots:
'@radix-ui/react-slot': 1.2.3(@types/react@19.1.8)(react@19.1.0)
'@radix-ui/react-tabs': 1.1.12(@types/react-dom@19.1.6(@types/react@19.1.8))(@types/react@19.1.8)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
class-variance-authority: 0.7.1
- fumadocs-core: 15.6.6(@types/react@19.1.8)(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ fumadocs-core: 15.6.6(@types/react@19.1.8)(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
lodash.merge: 4.6.2
next-themes: 0.4.6(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
postcss-selector-parser: 7.1.0
@@ -10326,7 +11609,7 @@ snapshots:
tailwind-merge: 3.3.1
optionalDependencies:
'@types/react': 19.1.8
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
tailwindcss: 4.1.11
transitivePeerDependencies:
- '@mixedbread/sdk'
@@ -10350,9 +11633,11 @@ snapshots:
fuse.js@6.6.2: {}
- geist@1.4.2(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)):
+ geist@1.4.2(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)):
dependencies:
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+
+ gensync@1.0.0-beta.2: {}
get-intrinsic@1.3.0:
dependencies:
@@ -10397,6 +11682,15 @@ snapshots:
dependencies:
is-glob: 4.0.3
+ glob@10.5.0:
+ dependencies:
+ foreground-child: 3.3.1
+ jackspeak: 3.4.3
+ minimatch: 9.0.5
+ minipass: 7.1.2
+ package-json-from-dist: 1.0.1
+ path-scurry: 1.11.1
+
globals@14.0.0: {}
globalthis@1.0.4:
@@ -10661,6 +11955,8 @@ snapshots:
dependencies:
whatwg-encoding: 3.1.1
+ html-escaper@2.0.2: {}
+
html-url-attributes@3.0.1: {}
html-void-elements@3.0.0: {}
@@ -10698,6 +11994,8 @@ snapshots:
imurmurhash@0.1.4: {}
+ indent-string@4.0.0: {}
+
inherits@2.0.4:
optional: true
@@ -10793,6 +12091,8 @@ snapshots:
dependencies:
call-bound: 1.0.3
+ is-fullwidth-code-point@3.0.0: {}
+
is-generator-function@1.1.0:
dependencies:
call-bound: 1.0.3
@@ -10866,6 +12166,27 @@ snapshots:
isomorphic.js@0.2.5: {}
+ istanbul-lib-coverage@3.2.2: {}
+
+ istanbul-lib-report@3.0.1:
+ dependencies:
+ istanbul-lib-coverage: 3.2.2
+ make-dir: 4.0.0
+ supports-color: 7.2.0
+
+ istanbul-lib-source-maps@5.0.6:
+ dependencies:
+ '@jridgewell/trace-mapping': 0.3.25
+ debug: 4.4.0
+ istanbul-lib-coverage: 3.2.2
+ transitivePeerDependencies:
+ - supports-color
+
+ istanbul-reports@3.2.0:
+ dependencies:
+ html-escaper: 2.0.2
+ istanbul-lib-report: 3.0.1
+
iterator.prototype@1.1.5:
dependencies:
define-data-property: 1.1.4
@@ -10875,18 +12196,26 @@ snapshots:
has-symbols: 1.1.0
set-function-name: 2.0.2
+ jackspeak@3.4.3:
+ dependencies:
+ '@isaacs/cliui': 8.0.2
+ optionalDependencies:
+ '@pkgjs/parseargs': 0.11.0
+
jiti@2.4.2: {}
- jotai-tanstack-query@0.11.0(@tanstack/query-core@5.90.7)(@tanstack/react-query@5.90.7(react@19.1.0))(jotai@2.15.1(@types/react@19.1.8)(react@19.1.0))(react@19.1.0):
+ jotai-tanstack-query@0.11.0(@tanstack/query-core@5.90.7)(@tanstack/react-query@5.90.7(react@19.1.0))(jotai@2.15.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@19.1.8)(react@19.1.0))(react@19.1.0):
dependencies:
'@tanstack/query-core': 5.90.7
- jotai: 2.15.1(@types/react@19.1.8)(react@19.1.0)
+ jotai: 2.15.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@19.1.8)(react@19.1.0)
optionalDependencies:
'@tanstack/react-query': 5.90.7(react@19.1.0)
react: 19.1.0
- jotai@2.15.1(@types/react@19.1.8)(react@19.1.0):
+ jotai@2.15.1(@babel/core@7.28.5)(@babel/template@7.27.2)(@types/react@19.1.8)(react@19.1.0):
optionalDependencies:
+ '@babel/core': 7.28.5
+ '@babel/template': 7.27.2
'@types/react': 19.1.8
react: 19.1.0
@@ -10924,6 +12253,8 @@ snapshots:
- supports-color
- utf-8-validate
+ jsesc@3.1.0: {}
+
json-buffer@3.0.1: {}
json-schema-traverse@0.4.1: {}
@@ -10936,6 +12267,8 @@ snapshots:
dependencies:
minimist: 1.2.8
+ json5@2.2.3: {}
+
jsondiffpatch@0.6.0:
dependencies:
'@types/diff-match-patch': 1.0.36
@@ -11051,6 +12384,8 @@ snapshots:
dependencies:
js-tokens: 4.0.0
+ loupe@3.2.1: {}
+
lowlight@1.20.0:
dependencies:
fault: 1.0.4
@@ -11060,6 +12395,10 @@ snapshots:
lru-cache@11.1.0: {}
+ lru-cache@5.1.1:
+ dependencies:
+ yallist: 3.1.1
+
lucide-react@0.453.0(react@19.1.0):
dependencies:
react: 19.1.0
@@ -11074,8 +12413,18 @@ snapshots:
dependencies:
'@jridgewell/sourcemap-codec': 1.5.0
+ magicast@0.3.5:
+ dependencies:
+ '@babel/parser': 7.28.5
+ '@babel/types': 7.28.5
+ source-map-js: 1.2.1
+
make-cancellable-promise@1.3.2: {}
+ make-dir@4.0.0:
+ dependencies:
+ semver: 7.7.3
+
make-event-props@1.6.2: {}
markdown-extensions@2.0.0: {}
@@ -11950,11 +13299,11 @@ snapshots:
negotiator@1.0.0: {}
- next-intl@3.26.5(next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0):
+ next-intl@3.26.5(next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0))(react@19.1.0):
dependencies:
'@formatjs/intl-localematcher': 0.5.10
negotiator: 1.0.0
- next: 15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
+ next: 15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0)
react: 19.1.0
use-intl: 3.26.5(react@19.1.0)
@@ -11965,7 +13314,7 @@ snapshots:
next-tick@1.1.0: {}
- next@15.5.7(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
+ next@15.5.7(@babel/core@7.28.5)(@opentelemetry/api@1.9.0)(react-dom@19.1.0(react@19.1.0))(react@19.1.0):
dependencies:
'@next/env': 15.5.7
'@swc/helpers': 0.5.15
@@ -11973,7 +13322,7 @@ snapshots:
postcss: 8.4.31
react: 19.1.0
react-dom: 19.1.0(react@19.1.0)
- styled-jsx: 5.1.6(react@19.1.0)
+ styled-jsx: 5.1.6(@babel/core@7.28.5)(react@19.1.0)
optionalDependencies:
'@next/swc-darwin-arm64': 15.5.7
'@next/swc-darwin-x64': 15.5.7
@@ -12001,6 +13350,8 @@ snapshots:
dependencies:
whatwg-url: 5.0.0
+ node-releases@2.0.27: {}
+
npm-to-yarn@3.0.1: {}
number-flow@0.5.8:
@@ -12090,6 +13441,8 @@ snapshots:
dependencies:
p-limit: 3.1.0
+ package-json-from-dist@1.0.1: {}
+
parent-module@1.0.1:
dependencies:
callsites: 3.1.0
@@ -12123,9 +13476,18 @@ snapshots:
path-parse@1.0.7: {}
+ path-scurry@1.11.1:
+ dependencies:
+ lru-cache: 10.4.3
+ minipass: 7.1.2
+
path2d@0.2.2:
optional: true
+ pathe@1.1.2: {}
+
+ pathval@2.0.1: {}
+
pdfjs-dist@4.8.69:
optionalDependencies:
canvas: 3.1.2
@@ -12228,6 +13590,12 @@ snapshots:
prelude-ls@1.2.1: {}
+ pretty-format@27.5.1:
+ dependencies:
+ ansi-regex: 5.0.1
+ ansi-styles: 5.2.0
+ react-is: 17.0.2
+
prismjs@1.27.0: {}
prismjs@1.30.0: {}
@@ -12530,6 +13898,8 @@ snapshots:
optionalDependencies:
'@types/react': 19.1.8
+ react-refresh@0.17.0: {}
+
react-remove-scroll-bar@2.3.8(@types/react@19.1.8)(react@19.1.0):
dependencies:
react: 19.1.0
@@ -12641,9 +14011,9 @@ snapshots:
estree-util-build-jsx: 3.0.1
vfile: 6.0.3
- recma-jsx@1.0.0(acorn@8.14.0):
+ recma-jsx@1.0.0(acorn@8.15.0):
dependencies:
- acorn-jsx: 5.3.2(acorn@8.14.0)
+ acorn-jsx: 5.3.2(acorn@8.15.0)
estree-util-to-js: 2.0.0
recma-parse: 1.0.0
recma-stringify: 1.0.0
@@ -12665,6 +14035,11 @@ snapshots:
unified: 11.0.5
vfile: 6.0.3
+ redent@3.0.0:
+ dependencies:
+ indent-string: 4.0.0
+ strip-indent: 3.0.0
+
reflect.getprototypeof@1.0.10:
dependencies:
call-bind: 1.0.8
@@ -12867,6 +14242,34 @@ snapshots:
reusify@1.1.0: {}
+ rollup@4.53.3:
+ dependencies:
+ '@types/estree': 1.0.8
+ optionalDependencies:
+ '@rollup/rollup-android-arm-eabi': 4.53.3
+ '@rollup/rollup-android-arm64': 4.53.3
+ '@rollup/rollup-darwin-arm64': 4.53.3
+ '@rollup/rollup-darwin-x64': 4.53.3
+ '@rollup/rollup-freebsd-arm64': 4.53.3
+ '@rollup/rollup-freebsd-x64': 4.53.3
+ '@rollup/rollup-linux-arm-gnueabihf': 4.53.3
+ '@rollup/rollup-linux-arm-musleabihf': 4.53.3
+ '@rollup/rollup-linux-arm64-gnu': 4.53.3
+ '@rollup/rollup-linux-arm64-musl': 4.53.3
+ '@rollup/rollup-linux-loong64-gnu': 4.53.3
+ '@rollup/rollup-linux-ppc64-gnu': 4.53.3
+ '@rollup/rollup-linux-riscv64-gnu': 4.53.3
+ '@rollup/rollup-linux-riscv64-musl': 4.53.3
+ '@rollup/rollup-linux-s390x-gnu': 4.53.3
+ '@rollup/rollup-linux-x64-gnu': 4.53.3
+ '@rollup/rollup-linux-x64-musl': 4.53.3
+ '@rollup/rollup-openharmony-arm64': 4.53.3
+ '@rollup/rollup-win32-arm64-msvc': 4.53.3
+ '@rollup/rollup-win32-ia32-msvc': 4.53.3
+ '@rollup/rollup-win32-x64-gnu': 4.53.3
+ '@rollup/rollup-win32-x64-msvc': 4.53.3
+ fsevents: 2.3.3
+
rope-sequence@1.3.4: {}
rough-notation@0.5.1: {}
@@ -13024,6 +14427,10 @@ snapshots:
side-channel-map: 1.0.1
side-channel-weakmap: 1.0.2
+ siginfo@2.0.0: {}
+
+ signal-exit@4.1.0: {}
+
simple-concat@1.0.1:
optional: true
@@ -13058,6 +14465,8 @@ snapshots:
stable-hash@0.0.4: {}
+ stackback@0.0.2: {}
+
static-browser-server@1.0.3:
dependencies:
'@open-draft/deferred-promise': 2.2.0
@@ -13065,8 +14474,22 @@ snapshots:
mime-db: 1.54.0
outvariant: 1.4.0
+ std-env@3.10.0: {}
+
strict-event-emitter@0.4.6: {}
+ string-width@4.2.3:
+ dependencies:
+ emoji-regex: 8.0.0
+ is-fullwidth-code-point: 3.0.0
+ strip-ansi: 6.0.1
+
+ string-width@5.1.2:
+ dependencies:
+ eastasianwidth: 0.2.0
+ emoji-regex: 9.2.2
+ strip-ansi: 7.1.2
+
string.prototype.includes@2.0.1:
dependencies:
call-bind: 1.0.8
@@ -13127,8 +14550,20 @@ snapshots:
character-entities-html4: 2.1.0
character-entities-legacy: 3.0.0
+ strip-ansi@6.0.1:
+ dependencies:
+ ansi-regex: 5.0.1
+
+ strip-ansi@7.1.2:
+ dependencies:
+ ansi-regex: 6.2.2
+
strip-bom@3.0.0: {}
+ strip-indent@3.0.0:
+ dependencies:
+ min-indent: 1.0.1
+
strip-indent@4.0.0:
dependencies:
min-indent: 1.0.1
@@ -13152,10 +14587,12 @@ snapshots:
dependencies:
inline-style-parser: 0.2.4
- styled-jsx@5.1.6(react@19.1.0):
+ styled-jsx@5.1.6(@babel/core@7.28.5)(react@19.1.0):
dependencies:
client-only: 0.0.1
react: 19.1.0
+ optionalDependencies:
+ '@babel/core': 7.28.5
supports-color@7.2.0:
dependencies:
@@ -13213,12 +14650,22 @@ snapshots:
mkdirp: 3.0.1
yallist: 5.0.0
+ test-exclude@7.0.1:
+ dependencies:
+ '@istanbuljs/schema': 0.1.3
+ glob: 10.5.0
+ minimatch: 9.0.5
+
third-party-capital@1.0.20: {}
throttleit@2.1.0: {}
tiny-invariant@1.3.3: {}
+ tinybench@2.9.0: {}
+
+ tinyexec@0.3.2: {}
+
tinyexec@1.0.1: {}
tinyglobby@0.2.12:
@@ -13231,6 +14678,12 @@ snapshots:
fdir: 6.4.6(picomatch@4.0.3)
picomatch: 4.0.3
+ tinypool@1.1.1: {}
+
+ tinyrainbow@1.2.0: {}
+
+ tinyspy@3.0.2: {}
+
tldts-core@6.1.86: {}
tldts@6.1.86:
@@ -13439,6 +14892,12 @@ snapshots:
unist-util-is: 6.0.0
unist-util-visit-parents: 6.0.1
+ update-browserslist-db@1.2.1(browserslist@4.28.1):
+ dependencies:
+ browserslist: 4.28.1
+ escalade: 3.2.0
+ picocolors: 1.1.1
+
uri-js@4.4.1:
dependencies:
punycode: 2.3.1
@@ -13550,6 +15009,70 @@ snapshots:
'@types/unist': 3.0.3
vfile-message: 4.0.2
+ vite-node@2.1.9(@types/node@20.19.9)(lightningcss@1.30.1):
+ dependencies:
+ cac: 6.7.14
+ debug: 4.4.0
+ es-module-lexer: 1.7.0
+ pathe: 1.1.2
+ vite: 5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)
+ transitivePeerDependencies:
+ - '@types/node'
+ - less
+ - lightningcss
+ - sass
+ - sass-embedded
+ - stylus
+ - sugarss
+ - supports-color
+ - terser
+
+ vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1):
+ dependencies:
+ esbuild: 0.21.5
+ postcss: 8.5.3
+ rollup: 4.53.3
+ optionalDependencies:
+ '@types/node': 20.19.9
+ fsevents: 2.3.3
+ lightningcss: 1.30.1
+
+ vitest@2.1.9(@types/node@20.19.9)(jsdom@25.0.1)(lightningcss@1.30.1):
+ dependencies:
+ '@vitest/expect': 2.1.9
+ '@vitest/mocker': 2.1.9(vite@5.4.21(@types/node@20.19.9)(lightningcss@1.30.1))
+ '@vitest/pretty-format': 2.1.9
+ '@vitest/runner': 2.1.9
+ '@vitest/snapshot': 2.1.9
+ '@vitest/spy': 2.1.9
+ '@vitest/utils': 2.1.9
+ chai: 5.3.3
+ debug: 4.4.0
+ expect-type: 1.2.2
+ magic-string: 0.30.17
+ pathe: 1.1.2
+ std-env: 3.10.0
+ tinybench: 2.9.0
+ tinyexec: 0.3.2
+ tinypool: 1.1.1
+ tinyrainbow: 1.2.0
+ vite: 5.4.21(@types/node@20.19.9)(lightningcss@1.30.1)
+ vite-node: 2.1.9(@types/node@20.19.9)(lightningcss@1.30.1)
+ why-is-node-running: 2.3.0
+ optionalDependencies:
+ '@types/node': 20.19.9
+ jsdom: 25.0.1
+ transitivePeerDependencies:
+ - less
+ - lightningcss
+ - msw
+ - sass
+ - sass-embedded
+ - stylus
+ - sugarss
+ - supports-color
+ - terser
+
w3c-keyname@2.2.8: {}
w3c-xmlserializer@5.0.0:
@@ -13626,8 +15149,25 @@ snapshots:
dependencies:
isexe: 2.0.0
+ why-is-node-running@2.3.0:
+ dependencies:
+ siginfo: 2.0.0
+ stackback: 0.0.2
+
word-wrap@1.2.5: {}
+ wrap-ansi@7.0.0:
+ dependencies:
+ ansi-styles: 4.3.0
+ string-width: 4.2.3
+ strip-ansi: 6.0.1
+
+ wrap-ansi@8.1.0:
+ dependencies:
+ ansi-styles: 6.2.3
+ string-width: 5.1.2
+ strip-ansi: 7.1.2
+
wrappy@1.0.2:
optional: true
@@ -13653,6 +15193,8 @@ snapshots:
lib0: 0.2.114
yjs: 13.6.27
+ yallist@3.1.1: {}
+
yallist@5.0.0: {}
yjs@13.6.27:
diff --git a/surfsense_web/tests/components/Logo.test.tsx b/surfsense_web/tests/components/Logo.test.tsx
new file mode 100644
index 000000000..1c71a344f
--- /dev/null
+++ b/surfsense_web/tests/components/Logo.test.tsx
@@ -0,0 +1,50 @@
+/**
+ * Tests for components/Logo.tsx
+ *
+ * These tests validate:
+ * 1. Logo renders as a link to home page
+ * 2. Logo image has correct alt text for accessibility
+ * 3. Logo accepts and applies custom className
+ */
+
+import { describe, it, expect } from "vitest";
+import { render, screen } from "@testing-library/react";
+import { Logo } from "@/components/Logo";
+
+describe("Logo", () => {
+ it("should render a link to the home page", () => {
+ render();
+
+ const link = screen.getByRole("link");
+ expect(link).toHaveAttribute("href", "/");
+ });
+
+ it("should render an image with correct alt text", () => {
+ render();
+
+ const image = screen.getByAltText("logo");
+ expect(image).toBeInTheDocument();
+ });
+
+ it("should have correct image source", () => {
+ render();
+
+ const image = screen.getByAltText("logo");
+ // Next.js Image component transforms the src, so we check if src attribute exists
+ expect(image).toHaveAttribute("src");
+ });
+
+ it("should apply custom className to the image", () => {
+ render();
+
+ const image = screen.getByAltText("logo");
+ expect(image).toHaveClass("h-8", "w-8");
+ });
+
+ it("should render without className prop", () => {
+ render();
+
+ const image = screen.getByAltText("logo");
+ expect(image).toBeInTheDocument();
+ });
+});
diff --git a/surfsense_web/tests/components/ui/button.test.tsx b/surfsense_web/tests/components/ui/button.test.tsx
new file mode 100644
index 000000000..9d7af9db5
--- /dev/null
+++ b/surfsense_web/tests/components/ui/button.test.tsx
@@ -0,0 +1,202 @@
+/**
+ * Tests for components/ui/button.tsx
+ *
+ * These tests validate:
+ * 1. Button renders correctly with different variants
+ * 2. Button renders correctly with different sizes
+ * 3. Button handles click events
+ * 4. Button supports asChild prop for composition
+ * 5. Button applies custom classNames correctly
+ */
+
+import { describe, it, expect, vi } from "vitest";
+import { render, screen, fireEvent } from "@testing-library/react";
+import { Button, buttonVariants } from "@/components/ui/button";
+
+describe("Button", () => {
+ describe("rendering", () => {
+ it("should render children correctly", () => {
+ render();
+
+ expect(screen.getByRole("button", { name: "Click me" })).toBeInTheDocument();
+ });
+
+ it("should render as a button element by default", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button.tagName).toBe("BUTTON");
+ });
+
+ it("should apply data-slot attribute", () => {
+ render();
+
+ expect(screen.getByRole("button")).toHaveAttribute("data-slot", "button");
+ });
+ });
+
+ describe("variants", () => {
+ it("should apply default variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("bg-primary");
+ });
+
+ it("should apply destructive variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("bg-destructive");
+ });
+
+ it("should apply outline variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("border");
+ expect(button).toHaveClass("bg-background");
+ });
+
+ it("should apply secondary variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("bg-secondary");
+ });
+
+ it("should apply ghost variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("hover:bg-accent");
+ });
+
+ it("should apply link variant styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("underline-offset-4");
+ });
+ });
+
+ describe("sizes", () => {
+ it("should apply default size styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("h-9");
+ });
+
+ it("should apply small size styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("h-8");
+ });
+
+ it("should apply large size styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("h-10");
+ });
+
+ it("should apply icon size styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("size-9");
+ });
+ });
+
+ describe("interactions", () => {
+ it("should call onClick handler when clicked", () => {
+ const handleClick = vi.fn();
+ render();
+
+ fireEvent.click(screen.getByRole("button"));
+
+ expect(handleClick).toHaveBeenCalledTimes(1);
+ });
+
+ it("should not call onClick when disabled", () => {
+ const handleClick = vi.fn();
+ render(
+
+ );
+
+ fireEvent.click(screen.getByRole("button"));
+
+ expect(handleClick).not.toHaveBeenCalled();
+ });
+
+ it("should apply disabled styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toBeDisabled();
+ expect(button).toHaveClass("disabled:pointer-events-none");
+ });
+ });
+
+ describe("custom className", () => {
+ it("should merge custom className with default styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("custom-class");
+ // Should still have base styles
+ expect(button).toHaveClass("inline-flex");
+ });
+
+ it("should allow overriding default styles", () => {
+ render();
+
+ const button = screen.getByRole("button");
+ expect(button).toHaveClass("rounded-full");
+ });
+ });
+
+ describe("asChild prop", () => {
+ it("should render as child element when asChild is true", () => {
+ render(
+
+ );
+
+ const link = screen.getByRole("link", { name: "Link Button" });
+ expect(link).toBeInTheDocument();
+ expect(link).toHaveAttribute("href", "/test");
+ });
+ });
+});
+
+describe("buttonVariants", () => {
+ it("should be a function that returns className string", () => {
+ const className = buttonVariants();
+ expect(typeof className).toBe("string");
+ expect(className.length).toBeGreaterThan(0);
+ });
+
+ it("should generate different classes for different variants", () => {
+ const defaultClass = buttonVariants({ variant: "default" });
+ const destructiveClass = buttonVariants({ variant: "destructive" });
+
+ expect(defaultClass).not.toBe(destructiveClass);
+ expect(defaultClass).toContain("bg-primary");
+ expect(destructiveClass).toContain("bg-destructive");
+ });
+
+ it("should generate different classes for different sizes", () => {
+ const defaultSize = buttonVariants({ size: "default" });
+ const smallSize = buttonVariants({ size: "sm" });
+
+ expect(defaultSize).not.toBe(smallSize);
+ expect(defaultSize).toContain("h-9");
+ expect(smallSize).toContain("h-8");
+ });
+});
diff --git a/surfsense_web/tests/hooks/use-media-query.test.ts b/surfsense_web/tests/hooks/use-media-query.test.ts
new file mode 100644
index 000000000..d399b8beb
--- /dev/null
+++ b/surfsense_web/tests/hooks/use-media-query.test.ts
@@ -0,0 +1,199 @@
+/**
+ * Tests for hooks/use-media-query.ts and hooks/use-mobile.ts
+ *
+ * These tests validate:
+ * 1. Media query hook responds to viewport changes
+ * 2. Mobile detection works correctly at breakpoints
+ */
+
+import { describe, it, expect, vi, beforeEach } from "vitest";
+import { renderHook, act } from "@testing-library/react";
+import { useMediaQuery } from "@/hooks/use-media-query";
+import { useIsMobile } from "@/hooks/use-mobile";
+
+describe("useMediaQuery", () => {
+ let mockMatchMedia: ReturnType;
+ let mockAddEventListener: ReturnType;
+ let mockRemoveEventListener: ReturnType;
+ let changeHandler: ((event: MediaQueryListEvent) => void) | null = null;
+
+ beforeEach(() => {
+ mockAddEventListener = vi.fn((event, handler) => {
+ if (event === "change") {
+ changeHandler = handler;
+ }
+ });
+ mockRemoveEventListener = vi.fn();
+
+ mockMatchMedia = vi.fn().mockImplementation((query: string) => ({
+ matches: false,
+ media: query,
+ onchange: null,
+ addEventListener: mockAddEventListener,
+ removeEventListener: mockRemoveEventListener,
+ addListener: vi.fn(),
+ removeListener: vi.fn(),
+ dispatchEvent: vi.fn(),
+ }));
+
+ Object.defineProperty(window, "matchMedia", {
+ writable: true,
+ value: mockMatchMedia,
+ });
+
+ changeHandler = null;
+ });
+
+ it("should return false by default", () => {
+ const { result } = renderHook(() => useMediaQuery("(min-width: 768px)"));
+
+ expect(result.current).toBe(false);
+ });
+
+ it("should return true when media query matches", () => {
+ mockMatchMedia.mockImplementation((query: string) => ({
+ matches: true,
+ media: query,
+ addEventListener: mockAddEventListener,
+ removeEventListener: mockRemoveEventListener,
+ }));
+
+ const { result } = renderHook(() => useMediaQuery("(min-width: 768px)"));
+
+ expect(result.current).toBe(true);
+ });
+
+ it("should add event listener on mount", () => {
+ renderHook(() => useMediaQuery("(min-width: 768px)"));
+
+ expect(mockAddEventListener).toHaveBeenCalledWith("change", expect.any(Function));
+ });
+
+ it("should remove event listener on unmount", () => {
+ const { unmount } = renderHook(() => useMediaQuery("(min-width: 768px)"));
+
+ unmount();
+
+ expect(mockRemoveEventListener).toHaveBeenCalledWith("change", expect.any(Function));
+ });
+
+ it("should update when media query changes", () => {
+ const { result } = renderHook(() => useMediaQuery("(min-width: 768px)"));
+
+ expect(result.current).toBe(false);
+
+ // Simulate media query change
+ act(() => {
+ if (changeHandler) {
+ changeHandler({ matches: true } as MediaQueryListEvent);
+ }
+ });
+
+ expect(result.current).toBe(true);
+ });
+
+ it("should re-subscribe when query changes", () => {
+ const { rerender } = renderHook(({ query }) => useMediaQuery(query), {
+ initialProps: { query: "(min-width: 768px)" },
+ });
+
+ expect(mockMatchMedia).toHaveBeenCalledWith("(min-width: 768px)");
+
+ rerender({ query: "(min-width: 1024px)" });
+
+ expect(mockMatchMedia).toHaveBeenCalledWith("(min-width: 1024px)");
+ });
+});
+
+describe("useIsMobile", () => {
+ let mockMatchMedia: ReturnType;
+ let mockAddEventListener: ReturnType;
+ let mockRemoveEventListener: ReturnType;
+ let changeHandler: (() => void) | null = null;
+
+ beforeEach(() => {
+ mockAddEventListener = vi.fn((event, handler) => {
+ if (event === "change") {
+ changeHandler = handler;
+ }
+ });
+ mockRemoveEventListener = vi.fn();
+
+ mockMatchMedia = vi.fn().mockImplementation(() => ({
+ matches: false,
+ addEventListener: mockAddEventListener,
+ removeEventListener: mockRemoveEventListener,
+ }));
+
+ Object.defineProperty(window, "matchMedia", {
+ writable: true,
+ value: mockMatchMedia,
+ });
+
+ // Default to desktop width
+ Object.defineProperty(window, "innerWidth", {
+ writable: true,
+ value: 1024,
+ });
+
+ changeHandler = null;
+ });
+
+ it("should return false for desktop width (>= 768px)", () => {
+ Object.defineProperty(window, "innerWidth", { value: 1024, writable: true });
+
+ const { result } = renderHook(() => useIsMobile());
+
+ expect(result.current).toBe(false);
+ });
+
+ it("should return true for mobile width (< 768px)", () => {
+ Object.defineProperty(window, "innerWidth", { value: 500, writable: true });
+
+ const { result } = renderHook(() => useIsMobile());
+
+ expect(result.current).toBe(true);
+ });
+
+ it("should return false at exactly 768px (breakpoint)", () => {
+ Object.defineProperty(window, "innerWidth", { value: 768, writable: true });
+
+ const { result } = renderHook(() => useIsMobile());
+
+ expect(result.current).toBe(false);
+ });
+
+ it("should return true at 767px (just below breakpoint)", () => {
+ Object.defineProperty(window, "innerWidth", { value: 767, writable: true });
+
+ const { result } = renderHook(() => useIsMobile());
+
+ expect(result.current).toBe(true);
+ });
+
+ it("should update when window is resized", () => {
+ Object.defineProperty(window, "innerWidth", { value: 1024, writable: true });
+
+ const { result } = renderHook(() => useIsMobile());
+
+ expect(result.current).toBe(false);
+
+ // Simulate resize to mobile
+ act(() => {
+ Object.defineProperty(window, "innerWidth", { value: 500, writable: true });
+ if (changeHandler) {
+ changeHandler();
+ }
+ });
+
+ expect(result.current).toBe(true);
+ });
+
+ it("should clean up event listener on unmount", () => {
+ const { unmount } = renderHook(() => useIsMobile());
+
+ unmount();
+
+ expect(mockRemoveEventListener).toHaveBeenCalledWith("change", expect.any(Function));
+ });
+});
diff --git a/surfsense_web/tests/lib/auth-utils.test.ts b/surfsense_web/tests/lib/auth-utils.test.ts
new file mode 100644
index 000000000..2a931f692
--- /dev/null
+++ b/surfsense_web/tests/lib/auth-utils.test.ts
@@ -0,0 +1,210 @@
+/**
+ * Tests for lib/auth-utils.ts
+ *
+ * These tests validate:
+ * 1. Token storage and retrieval works correctly
+ * 2. Authentication state is properly tracked
+ * 3. Redirect path handling for post-login navigation
+ * 4. Auth headers are correctly constructed
+ */
+
+import { describe, it, expect, beforeEach } from "vitest";
+import {
+ getBearerToken,
+ setBearerToken,
+ clearBearerToken,
+ isAuthenticated,
+ getAndClearRedirectPath,
+ getAuthHeaders,
+} from "@/lib/auth-utils";
+
+describe("Token Management", () => {
+ beforeEach(() => {
+ // Clear localStorage before each test
+ window.localStorage.clear();
+ });
+
+ describe("getBearerToken", () => {
+ it("should return null when no token is stored", () => {
+ const token = getBearerToken();
+ expect(token).toBeNull();
+ });
+
+ it("should return the stored token", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "test-token-123");
+
+ const token = getBearerToken();
+ expect(token).toBe("test-token-123");
+ });
+ });
+
+ describe("setBearerToken", () => {
+ it("should store the token in localStorage", () => {
+ setBearerToken("my-auth-token");
+
+ expect(window.localStorage.getItem("surfsense_bearer_token")).toBe("my-auth-token");
+ });
+
+ it("should overwrite existing token", () => {
+ setBearerToken("old-token");
+ setBearerToken("new-token");
+
+ expect(window.localStorage.getItem("surfsense_bearer_token")).toBe("new-token");
+ });
+ });
+
+ describe("clearBearerToken", () => {
+ it("should remove the token from localStorage", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "token-to-clear");
+
+ clearBearerToken();
+
+ expect(window.localStorage.getItem("surfsense_bearer_token")).toBeNull();
+ });
+
+ it("should not throw when no token exists", () => {
+ expect(() => clearBearerToken()).not.toThrow();
+ });
+ });
+});
+
+describe("Authentication State", () => {
+ beforeEach(() => {
+ window.localStorage.clear();
+ });
+
+ describe("isAuthenticated", () => {
+ it("should return false when no token exists", () => {
+ expect(isAuthenticated()).toBe(false);
+ });
+
+ it("should return true when token exists", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "valid-token");
+
+ expect(isAuthenticated()).toBe(true);
+ });
+
+ it("should return false after token is cleared", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "valid-token");
+ expect(isAuthenticated()).toBe(true);
+
+ clearBearerToken();
+ expect(isAuthenticated()).toBe(false);
+ });
+ });
+});
+
+describe("Redirect Path Handling", () => {
+ beforeEach(() => {
+ window.localStorage.clear();
+ });
+
+ describe("getAndClearRedirectPath", () => {
+ it("should return null when no redirect path is stored", () => {
+ const path = getAndClearRedirectPath();
+ expect(path).toBeNull();
+ });
+
+ it("should return and clear stored redirect path", () => {
+ window.localStorage.setItem("surfsense_redirect_path", "/dashboard/settings");
+
+ const path = getAndClearRedirectPath();
+
+ expect(path).toBe("/dashboard/settings");
+ expect(window.localStorage.getItem("surfsense_redirect_path")).toBeNull();
+ });
+
+ it("should only return path once (cleared after first read)", () => {
+ window.localStorage.setItem("surfsense_redirect_path", "/some/path");
+
+ const firstRead = getAndClearRedirectPath();
+ const secondRead = getAndClearRedirectPath();
+
+ expect(firstRead).toBe("/some/path");
+ expect(secondRead).toBeNull();
+ });
+ });
+});
+
+describe("Auth Headers", () => {
+ beforeEach(() => {
+ window.localStorage.clear();
+ });
+
+ describe("getAuthHeaders", () => {
+ it("should return empty object when no token exists", () => {
+ const headers = getAuthHeaders();
+
+ expect(headers).toEqual({});
+ });
+
+ it("should return Authorization header when token exists", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "my-token");
+
+ const headers = getAuthHeaders();
+
+ expect(headers).toEqual({
+ Authorization: "Bearer my-token",
+ });
+ });
+
+ it("should merge additional headers with auth header", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "my-token");
+
+ const headers = getAuthHeaders({
+ "Content-Type": "application/json",
+ "X-Custom": "value",
+ });
+
+ expect(headers).toEqual({
+ Authorization: "Bearer my-token",
+ "Content-Type": "application/json",
+ "X-Custom": "value",
+ });
+ });
+
+ it("should return only additional headers when no token", () => {
+ const headers = getAuthHeaders({
+ "Content-Type": "application/json",
+ });
+
+ expect(headers).toEqual({
+ "Content-Type": "application/json",
+ });
+ });
+
+ it("should handle undefined additional headers", () => {
+ window.localStorage.setItem("surfsense_bearer_token", "my-token");
+
+ const headers = getAuthHeaders(undefined);
+
+ expect(headers).toEqual({
+ Authorization: "Bearer my-token",
+ });
+ });
+ });
+});
+
+describe("Token Format Validation", () => {
+ beforeEach(() => {
+ window.localStorage.clear();
+ });
+
+ it("should handle tokens with special characters", () => {
+ const specialToken = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ";
+
+ setBearerToken(specialToken);
+ const retrieved = getBearerToken();
+
+ expect(retrieved).toBe(specialToken);
+ });
+
+ it("should handle empty string token", () => {
+ setBearerToken("");
+ const retrieved = getBearerToken();
+
+ expect(retrieved).toBe("");
+ // Empty string is falsy, so isAuthenticated should return false
+ expect(isAuthenticated()).toBe(false);
+ });
+});
diff --git a/surfsense_web/tests/lib/pagination.test.ts b/surfsense_web/tests/lib/pagination.test.ts
new file mode 100644
index 000000000..4bbbf8c23
--- /dev/null
+++ b/surfsense_web/tests/lib/pagination.test.ts
@@ -0,0 +1,166 @@
+/**
+ * Tests for lib/pagination.ts
+ *
+ * These tests validate:
+ * 1. normalizeListResponse correctly handles different API response formats
+ * 2. Edge cases and malformed data are handled gracefully
+ */
+
+import { describe, it, expect } from "vitest";
+import { normalizeListResponse, type ListResponse } from "@/lib/pagination";
+
+describe("normalizeListResponse", () => {
+ describe("Case 1: Already in desired shape { items, total }", () => {
+ it("should pass through correctly shaped response", () => {
+ const payload = {
+ items: [{ id: 1 }, { id: 2 }],
+ total: 10,
+ };
+
+ const result = normalizeListResponse<{ id: number }>(payload);
+
+ expect(result.items).toEqual([{ id: 1 }, { id: 2 }]);
+ expect(result.total).toBe(10);
+ });
+
+ it("should use items length if total is missing", () => {
+ const payload = {
+ items: [{ id: 1 }, { id: 2 }, { id: 3 }],
+ };
+
+ const result = normalizeListResponse(payload);
+
+ expect(result.items.length).toBe(3);
+ expect(result.total).toBe(3);
+ });
+
+ it("should handle empty items array", () => {
+ const payload = {
+ items: [],
+ total: 0,
+ };
+
+ const result = normalizeListResponse(payload);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+ });
+
+ describe("Case 2: Tuple format [items, total]", () => {
+ it("should normalize tuple response", () => {
+ const payload = [[{ id: 1 }, { id: 2 }], 100];
+
+ const result = normalizeListResponse<{ id: number }>(payload);
+
+ expect(result.items).toEqual([{ id: 1 }, { id: 2 }]);
+ expect(result.total).toBe(100);
+ });
+
+ it("should use items length if total is not a number in tuple", () => {
+ const payload = [[{ id: 1 }, { id: 2 }], "invalid"];
+
+ const result = normalizeListResponse(payload);
+
+ expect(result.items.length).toBe(2);
+ expect(result.total).toBe(2);
+ });
+
+ it("should handle empty tuple array", () => {
+ const payload = [[], 0];
+
+ const result = normalizeListResponse(payload);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+ });
+
+ describe("Case 3: Plain array", () => {
+ it("should normalize plain array response", () => {
+ const payload = [{ id: 1 }, { id: 2 }, { id: 3 }];
+
+ const result = normalizeListResponse<{ id: number }>(payload);
+
+ expect(result.items).toEqual(payload);
+ expect(result.total).toBe(3);
+ });
+
+ it("should handle empty plain array", () => {
+ const payload: unknown[] = [];
+
+ const result = normalizeListResponse(payload);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+ });
+
+ describe("Edge cases and error handling", () => {
+ it("should return empty result for null payload", () => {
+ const result = normalizeListResponse(null);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+
+ it("should return empty result for undefined payload", () => {
+ const result = normalizeListResponse(undefined);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+
+ it("should return empty result for string payload", () => {
+ const result = normalizeListResponse("invalid");
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+
+ it("should return empty result for number payload", () => {
+ const result = normalizeListResponse(123);
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+
+ it("should return empty result for object without items", () => {
+ const result = normalizeListResponse({ data: [1, 2, 3] });
+
+ expect(result.items).toEqual([]);
+ expect(result.total).toBe(0);
+ });
+
+ it("should handle tuple with null first element", () => {
+ const payload = [null, 5];
+
+ const result = normalizeListResponse(payload);
+
+ // This should fall through to plain array handling
+ expect(result).toBeDefined();
+ });
+ });
+
+ describe("Type preservation", () => {
+ interface User {
+ id: number;
+ name: string;
+ }
+
+ it("should preserve typed items", () => {
+ const payload = {
+ items: [
+ { id: 1, name: "Alice" },
+ { id: 2, name: "Bob" },
+ ],
+ total: 2,
+ };
+
+ const result: ListResponse = normalizeListResponse(payload);
+
+ expect(result.items[0].name).toBe("Alice");
+ expect(result.items[1].id).toBe(2);
+ });
+ });
+});
diff --git a/surfsense_web/tests/lib/utils.test.ts b/surfsense_web/tests/lib/utils.test.ts
new file mode 100644
index 000000000..36bd806e5
--- /dev/null
+++ b/surfsense_web/tests/lib/utils.test.ts
@@ -0,0 +1,111 @@
+/**
+ * Tests for lib/utils.ts
+ *
+ * These tests validate:
+ * 1. cn() correctly merges Tailwind classes
+ * 2. getChatTitleFromMessages() extracts titles correctly
+ */
+
+import { describe, it, expect } from "vitest";
+import { cn, getChatTitleFromMessages } from "@/lib/utils";
+
+describe("cn - Class Name Merger", () => {
+ it("should merge simple class names", () => {
+ const result = cn("foo", "bar");
+ expect(result).toBe("foo bar");
+ });
+
+ it("should handle conditional classes", () => {
+ const isActive = true;
+ const result = cn("base", isActive && "active");
+ expect(result).toBe("base active");
+ });
+
+ it("should filter out falsy values", () => {
+ const result = cn("base", false, null, undefined, "valid");
+ expect(result).toBe("base valid");
+ });
+
+ it("should merge conflicting Tailwind classes (last wins)", () => {
+ // tailwind-merge should resolve conflicts
+ const result = cn("p-4", "p-2");
+ expect(result).toBe("p-2");
+ });
+
+ it("should handle object syntax", () => {
+ const result = cn({
+ base: true,
+ active: true,
+ disabled: false,
+ });
+ expect(result).toContain("base");
+ expect(result).toContain("active");
+ expect(result).not.toContain("disabled");
+ });
+
+ it("should handle array syntax", () => {
+ const result = cn(["foo", "bar"]);
+ expect(result).toBe("foo bar");
+ });
+
+ it("should handle empty input", () => {
+ const result = cn();
+ expect(result).toBe("");
+ });
+
+ it("should handle Tailwind responsive prefixes correctly", () => {
+ const result = cn("text-sm", "md:text-lg", "lg:text-xl");
+ expect(result).toBe("text-sm md:text-lg lg:text-xl");
+ });
+
+ it("should merge color classes properly", () => {
+ const result = cn("text-red-500", "text-blue-500");
+ expect(result).toBe("text-blue-500");
+ });
+});
+
+describe("getChatTitleFromMessages", () => {
+ it("should return first user message content as title", () => {
+ const messages = [
+ { id: "1", role: "user" as const, content: "Hello, how are you?" },
+ { id: "2", role: "assistant" as const, content: "I am fine, thank you!" },
+ ];
+
+ const title = getChatTitleFromMessages(messages);
+ expect(title).toBe("Hello, how are you?");
+ });
+
+ it("should return 'Untitled Chat' when no user messages", () => {
+ const messages = [
+ { id: "1", role: "assistant" as const, content: "Hello!" },
+ { id: "2", role: "system" as const, content: "You are a helpful assistant" },
+ ];
+
+ const title = getChatTitleFromMessages(messages);
+ expect(title).toBe("Untitled Chat");
+ });
+
+ it("should return 'Untitled Chat' for empty messages array", () => {
+ const title = getChatTitleFromMessages([]);
+ expect(title).toBe("Untitled Chat");
+ });
+
+ it("should use first user message even if there are multiple", () => {
+ const messages = [
+ { id: "1", role: "assistant" as const, content: "Welcome!" },
+ { id: "2", role: "user" as const, content: "First question" },
+ { id: "3", role: "assistant" as const, content: "Answer" },
+ { id: "4", role: "user" as const, content: "Second question" },
+ ];
+
+ const title = getChatTitleFromMessages(messages);
+ expect(title).toBe("First question");
+ });
+
+ it("should handle messages with only system role", () => {
+ const messages = [{ id: "1", role: "system" as const, content: "System prompt" }];
+
+ const title = getChatTitleFromMessages(messages);
+ expect(title).toBe("Untitled Chat");
+ });
+});
diff --git a/surfsense_web/tests/setup.tsx b/surfsense_web/tests/setup.tsx
new file mode 100644
index 000000000..7828e7285
--- /dev/null
+++ b/surfsense_web/tests/setup.tsx
@@ -0,0 +1,130 @@
+/**
+ * Vitest test setup file.
+ *
+ * This file runs before all tests and sets up the testing environment.
+ */
+
+import "@testing-library/jest-dom/vitest";
+import { vi } from "vitest";
+
+// Mock localStorage for auth-utils tests
+const localStorageMock = {
+ store: {} as Record,
+ getItem: vi.fn((key: string) => localStorageMock.store[key] ?? null),
+ setItem: vi.fn((key: string, value: string) => {
+ localStorageMock.store[key] = value;
+ }),
+ removeItem: vi.fn((key: string) => {
+ delete localStorageMock.store[key];
+ }),
+ clear: vi.fn(() => {
+ localStorageMock.store = {};
+ }),
+};
+
+Object.defineProperty(window, "localStorage", {
+ value: localStorageMock,
+});
+
+// Mock window.location
+const locationMock = {
+ href: "",
+ pathname: "/dashboard",
+ search: "",
+ hash: "",
+};
+
+Object.defineProperty(window, "location", {
+ value: locationMock,
+ writable: true,
+});
+
+// Mock Next.js router
+vi.mock("next/navigation", () => ({
+ useRouter: () => ({
+ push: vi.fn(),
+ replace: vi.fn(),
+ prefetch: vi.fn(),
+ back: vi.fn(),
+ forward: vi.fn(),
+ }),
+ usePathname: () => "/",
+ useSearchParams: () => new URLSearchParams(),
+ useParams: () => ({}),
+}));
+
+// Mock Next.js Image component
+vi.mock("next/image", () => ({
+ default: ({
+ src,
+ alt,
+ className,
+ ...props
+ }: {
+ src: string;
+ alt: string;
+ className?: string;
+ [key: string]: unknown;
+ }) => {
+ // eslint-disable-next-line @next/next/no-img-element
+ return
;
+ },
+}));
+
+// Mock Next.js Link component
+vi.mock("next/link", () => ({
+ default: ({
+ children,
+ href,
+ ...props
+ }: {
+ children: React.ReactNode;
+ href: string;
+ [key: string]: unknown;
+ }) => {
+ return (
+
+ {children}
+
+ );
+ },
+}));
+
+// Mock window.matchMedia for responsive components
+Object.defineProperty(window, "matchMedia", {
+ writable: true,
+ value: vi.fn().mockImplementation((query: string) => ({
+ matches: false,
+ media: query,
+ onchange: null,
+ addListener: vi.fn(),
+ removeListener: vi.fn(),
+ addEventListener: vi.fn(),
+ removeEventListener: vi.fn(),
+ dispatchEvent: vi.fn(),
+ })),
+});
+
+// Mock ResizeObserver
+global.ResizeObserver = vi.fn().mockImplementation(() => ({
+ observe: vi.fn(),
+ unobserve: vi.fn(),
+ disconnect: vi.fn(),
+}));
+
+// Mock IntersectionObserver
+global.IntersectionObserver = vi.fn().mockImplementation(() => ({
+ observe: vi.fn(),
+ unobserve: vi.fn(),
+ disconnect: vi.fn(),
+}));
+
+// Clean up after each test
+afterEach(() => {
+ vi.clearAllMocks();
+ localStorageMock.clear();
+ locationMock.href = "";
+ locationMock.pathname = "/dashboard";
+ locationMock.search = "";
+ locationMock.hash = "";
+});
diff --git a/surfsense_web/vitest.config.ts b/surfsense_web/vitest.config.ts
new file mode 100644
index 000000000..e9477920e
--- /dev/null
+++ b/surfsense_web/vitest.config.ts
@@ -0,0 +1,30 @@
+import { defineConfig } from "vitest/config";
+import react from "@vitejs/plugin-react";
+import path from "node:path";
+
+export default defineConfig({
+ plugins: [react()],
+ test: {
+ environment: "jsdom",
+ globals: true,
+ setupFiles: ["./tests/setup.tsx"],
+ include: ["./tests/**/*.{test,spec}.{ts,tsx}"],
+ exclude: ["node_modules", ".next", "out"],
+ coverage: {
+ provider: "v8",
+ reporter: ["text", "json", "html", "lcov"],
+ include: ["lib/**/*.{ts,tsx}", "hooks/**/*.{ts,tsx}"],
+ exclude: ["**/*.d.ts", "**/*.test.{ts,tsx}", "**/node_modules/**"],
+ },
+ testTimeout: 10000,
+ },
+ css: {
+ // Disable PostCSS for tests
+ postcss: {},
+ },
+ resolve: {
+ alias: {
+ "@": path.resolve(__dirname, "./"),
+ },
+ },
+});