🎨 Fix dropdown visibility and enhance UI contrast #331
Workflow file for this run
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| name: verifyExpected | |
| on: | |
| push: | |
| branches: | |
| - main | |
| pull_request: | |
| branches: | |
| - main | |
| jobs: | |
| unit-tests: | |
| runs-on: ubuntu-latest | |
| strategy: | |
| matrix: | |
| python-version: [3.11] | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Set up Python ${{ matrix.python-version }} | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: ${{ matrix.python-version }} | |
| - name: Cache pip dependencies | |
| uses: actions/cache@v4 | |
| with: | |
| path: ~/.cache/pip | |
| key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pip- | |
| - name: Install dependencies | |
| run: | | |
| python -m pip install --upgrade pip | |
| pip install -r requirements.txt | |
| - name: Create test directories | |
| run: | | |
| mkdir -p tests/data | |
| mkdir -p test_chroma_db | |
| - name: Run unit tests only | |
| run: | | |
| python -m pytest -n auto tests/ -m "unit or fast" --ignore=tests/integration -v --tb=short --cov=app --cov=reasoning_engine --cov=document_processor --cov=utils --cov=task_manager --cov=task_ui --cov=tasks --cov-report=term-missing --cov-report=html:htmlcov | |
| env: | |
| ENABLE_BACKGROUND_TASKS: "true" | |
| REDIS_ENABLED: "false" | |
| CELERY_BROKER_URL: "redis://localhost:6379/0" | |
| MOCK_EXTERNAL_SERVICES: "true" | |
| TESTING: "true" | |
| CHROMA_PERSIST_DIR: "./test_chroma_db" | |
| - name: Upload coverage reports | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: coverage-reports | |
| path: htmlcov/ | |
| retention-days: 30 | |
| - name: Generate Final Test Report | |
| run: | | |
| python scripts/generate_final_report.py || true | |
| - name: Upload Final Test Report | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: final-test-report-unit-tests-${{ github.run_id }} | |
| path: final_test_report.md | |
| retention-days: 30 | |
| e2e-tests: | |
| runs-on: ubuntu-latest | |
| needs: unit-tests | |
| if: false # Temporarily disable E2E tests - they require full server setup | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Setup Node.js | |
| uses: actions/setup-node@v4 | |
| with: | |
| node-version: '18' | |
| cache: 'npm' | |
| - name: Install Playwright | |
| run: | | |
| npm ci | |
| npx playwright install --with-deps | |
| - name: Setup Python | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: '3.11' | |
| - name: Cache pip dependencies | |
| uses: actions/cache@v4 | |
| with: | |
| path: ~/.cache/pip | |
| key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pip- | |
| - name: Install Python dependencies | |
| run: | | |
| python -m pip install --upgrade pip | |
| pip install -r requirements.txt | |
| - name: Create test directories | |
| run: | | |
| mkdir -p tests/data | |
| mkdir -p test_chroma_db | |
| mkdir -p tests/e2e/fixtures | |
| - name: Generate test fixtures | |
| run: | | |
| python scripts/generate_test_assets.py || echo "Test assets generation failed, continuing..." | |
| - name: Run E2E tests | |
| run: | | |
| npx playwright test --reporter=html,json,junit | |
| env: | |
| MOCK_EXTERNAL_SERVICES: "true" | |
| CHROMA_PERSIST_DIR: "./test_chroma_db" | |
| ENABLE_BACKGROUND_TASKS: "true" | |
| REDIS_ENABLED: "false" | |
| CELERY_BROKER_URL: "redis://localhost:6379/0" | |
| TESTING: "true" | |
| - name: Upload Playwright report | |
| uses: actions/upload-artifact@v4 | |
| if: always() | |
| with: | |
| name: playwright-report | |
| path: playwright-report/ | |
| retention-days: 30 | |
| - name: Upload test results | |
| uses: actions/upload-artifact@v4 | |
| if: always() | |
| with: | |
| name: test-results | |
| path: | | |
| test-results.json | |
| test-results.xml | |
| retention-days: 30 | |
| integration-tests: | |
| runs-on: ubuntu-latest | |
| if: | | |
| github.ref == 'refs/heads/main' || | |
| contains(github.event.head_commit.message, '[run-integration]') || | |
| contains(github.event.pull_request.title, '[run-integration]') | |
| needs: [unit-tests] | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Set up Python 3.11 | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: '3.11' | |
| - name: Cache pip dependencies | |
| uses: actions/cache@v4 | |
| with: | |
| path: ~/.cache/pip | |
| key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pip- | |
| - name: Install dependencies | |
| run: | | |
| python -m pip install --upgrade pip | |
| pip install -r requirements.txt | |
| - name: Setup test environment | |
| run: | | |
| mkdir -p tests/data | |
| mkdir -p test_chroma_db | |
| python scripts/generate_assets.py || echo "Test assets generation failed, continuing..." | |
| - name: Run integration tests | |
| run: | | |
| python -m pytest -n auto tests/ -m "integration" -v --tb=short --timeout=300 | |
| env: | |
| MOCK_EXTERNAL_SERVICES: "true" | |
| CHROMA_PERSIST_DIR: "./test_chroma_db" | |
| ENABLE_BACKGROUND_TASKS: "true" | |
| REDIS_ENABLED: "false" | |
| CELERY_BROKER_URL: "redis://localhost:6379/0" | |
| TESTING: "true" | |
| - name: Cleanup test environment | |
| if: always() | |
| run: | | |
| rm -rf ./test_chroma_db | |
| rm -rf temp_*.mp3 | |
| rm -rf tests/data/test_* | |
| - name: Generate Final Test Report | |
| run: | | |
| python scripts/generate_final_report.py || true | |
| - name: Upload Final Test Report | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: final-test-report-integration-tests-${{ github.run_id }} | |
| path: final_test_report.md | |
| retention-days: 30 | |
| performance-regression: | |
| runs-on: ubuntu-latest | |
| if: | | |
| (github.event_name == 'push' && github.ref == 'refs/heads/main') || | |
| (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name == github.repository) | |
| steps: | |
| - uses: actions/checkout@v4 | |
| - name: Set up Python 3.11 | |
| uses: actions/setup-python@v5 | |
| with: | |
| python-version: '3.11' | |
| - name: Cache pip dependencies | |
| uses: actions/cache@v4 | |
| with: | |
| path: ~/.cache/pip | |
| key: ${{ runner.os }}-pip-${{ hashFiles('requirements.txt') }} | |
| restore-keys: | | |
| ${{ runner.os }}-pip- | |
| - name: Install dependencies | |
| run: | | |
| python -m pip install --upgrade pip | |
| pip install -r requirements.txt | |
| - name: Run Performance Regression Test | |
| env: | |
| PERF_TIME_THRESHOLD: "30.0" | |
| # Default memory threshold set to 600MB for LLM evaluation workloads | |
| PERF_MEM_THRESHOLD: "600.0" | |
| OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} | |
| OPENAI_MODEL: ${{ vars.OPENAI_MODEL || 'gpt-3.5-turbo' }} | |
| run: | | |
| # Parallelize for speed | |
| python -m pytest -n auto tests/ -m "performance" -v --tb=short || python scripts/test_performance_regression.py | |
| - name: Upload Performance Metrics | |
| if: always() | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: performance-metrics | |
| path: performance_metrics.json | |
| retention-days: 30 | |
| - name: Generate Final Test Report | |
| run: | | |
| python scripts/generate_final_report.py || true | |
| - name: Check Final Test Report Exists | |
| run: | | |
| if [ ! -f final_test_report.md ]; then | |
| echo "❌ final_test_report.md not found! Report generation failed." | |
| exit 1 | |
| fi | |
| - name: Print Final Test Report (for debugging) | |
| run: cat final_test_report.md | |
| - name: Upload Final Test Report | |
| uses: actions/upload-artifact@v4 | |
| with: | |
| name: final-test-report-performance-regression-${{ github.run_id }} | |
| path: final_test_report.md | |
| retention-days: 30 |