Skip to content

chore(deps): bump litellm from 1.82.6 to 1.83.0 #263

chore(deps): bump litellm from 1.82.6 to 1.83.0

chore(deps): bump litellm from 1.82.6 to 1.83.0 #263

---
name: API Compliance Tests
on:
pull_request:
types: [labeled]
workflow_dispatch:
inputs:
reason:
description: Reason for running compliance tests
required: true
patterns:
description: Comma-separated patterns to test (empty = all)
required: false
models:
description: Comma-separated model IDs (empty = all defaults)
required: false
env:
# Default models to test (matches DEFAULT_MODELS in run_compliance.py)
DEFAULT_MODELS: claude-sonnet-4-5,gpt-5.2,gemini-3.1-pro
jobs:
run-compliance-tests:
# Only run on api-compliance-test label or workflow_dispatch
if: |
github.event_name == 'workflow_dispatch' ||
(github.event_name == 'pull_request' && github.event.label.name == 'api-compliance-test')
runs-on: ubuntu-latest
permissions:
contents: read
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v6
with:
repository: ${{ github.event.pull_request.head.repo.full_name || github.repository }}
ref: ${{ github.event.pull_request.head.sha || github.ref }}
persist-credentials: false
- name: Install uv
uses: astral-sh/setup-uv@v7
with:
version: latest
python-version: '3.13'
- name: Install dependencies
run: uv sync --dev
- name: Determine test parameters
id: params
run: |
# Use input values or defaults
if [ "${{ github.event_name }}" = "workflow_dispatch" ]; then
PATTERNS="${{ github.event.inputs.patterns }}"
MODELS="${{ github.event.inputs.models }}"
else
PATTERNS=""
MODELS=""
fi
# Build command args
ARGS=""
if [ -n "$PATTERNS" ]; then
ARGS="$ARGS --patterns $PATTERNS"
fi
if [ -n "$MODELS" ]; then
ARGS="$ARGS --models $MODELS"
else
ARGS="$ARGS --models $DEFAULT_MODELS"
fi
echo "args=$ARGS" >> $GITHUB_OUTPUT
- name: Run API compliance tests
id: compliance
env:
LLM_API_KEY: ${{ secrets.LLM_API_KEY_EVAL }}
LLM_BASE_URL: https://llm-proxy.eval.all-hands.dev
GITHUB_RUN_ID: ${{ github.run_id }}
run: |
uv run python tests/integration/api_compliance/run_compliance.py \
${{ steps.params.outputs.args }} \
--output-dir compliance-results/
continue-on-error: true # Tests may "fail" but that's expected
- name: Upload results
uses: actions/upload-artifact@v7
with:
name: compliance-results
path: compliance-results/
retention-days: 30
- name: Post results to PR
if: github.event_name == 'pull_request'
uses: actions/github-script@v8
with:
script: |
const fs = require('fs');
const path = require('path');
// Find the report directory
const resultsDir = 'compliance-results';
const dirs = fs.readdirSync(resultsDir);
if (dirs.length === 0) {
console.log('No results found');
return;
}
const latestDir = path.join(resultsDir, dirs[0]);
const reportPath = path.join(latestDir, 'compliance_report.md');
if (!fs.existsSync(reportPath)) {
console.log('Report not found at', reportPath);
return;
}
let report = fs.readFileSync(reportPath, 'utf8');
// Truncate if too long
if (report.length > 60000) {
report = report.substring(0, 60000) + '\n\n... (truncated)';
}
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: report
});