-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathaction.yml
More file actions
254 lines (231 loc) · 8.8 KB
/
action.yml
File metadata and controls
254 lines (231 loc) · 8.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
name: 'VerifyWise LLM Evaluation'
description: >
Run LLM evaluations against a VerifyWise instance and gate CI on quality
thresholds. Supports chatbot, RAG, and agent use cases.
branding:
icon: 'check-circle'
color: 'green'
# ─── Available metrics ──────────────────────────────────────────────────
#
# Universal: answer_relevancy, correctness, completeness,
# hallucination*, toxicity*, bias*, instruction_following
#
# RAG: faithfulness, contextual_relevancy,
# context_precision, context_recall
#
# Agent: tool_correctness, argument_correctness,
# task_completion, step_efficiency,
# plan_quality, plan_adherence
#
# (* = inverted — lower is better)
# ────────────────────────────────────────────────────────────────────────
inputs:
api_url:
description: 'VerifyWise instance URL'
required: true
project_id:
description: 'Project ID'
required: true
dataset_id:
description: 'Dataset ID to evaluate against'
required: true
metrics:
description: 'Comma-separated metric names (e.g. answer_relevancy,bias,toxicity)'
required: true
model_name:
description: 'Model to evaluate (e.g. gpt-4o-mini, claude-3.5-sonnet)'
required: true
model_provider:
description: 'Provider: openai | anthropic | google | mistral | xai | self-hosted'
required: true
judge_model:
description: 'Judge LLM for metric scoring'
required: false
default: 'gpt-4o'
judge_provider:
description: 'Judge LLM provider'
required: false
default: 'openai'
threshold:
description: 'Pass/fail threshold (0.0–1.0)'
required: false
default: '0.7'
timeout_minutes:
description: 'Max wait time in minutes'
required: false
default: '30'
poll_interval_seconds:
description: 'Seconds between status polls'
required: false
default: '15'
experiment_name:
description: 'Custom experiment name (auto-generated if empty)'
required: false
default: ''
fail_on_threshold:
description: 'Fail the step when thresholds are not met'
required: false
default: 'true'
post_pr_comment:
description: 'Post results as a comment on the pull request'
required: false
default: 'true'
vw_api_token:
description: 'VerifyWise API token'
required: true
llm_api_key:
description: 'API key for the model being evaluated'
required: true
judge_api_key:
description: 'API key for the judge LLM (defaults to llm_api_key if not set)'
required: false
default: ''
outputs:
passed:
description: 'Whether all metrics passed (true/false)'
value: ${{ steps.report.outputs.passed }}
results_path:
description: 'Path to results JSON'
value: ${{ steps.eval.outputs.results_path }}
summary_path:
description: 'Path to Markdown summary'
value: ${{ steps.eval.outputs.summary_path }}
experiment_id:
description: 'ID of the created experiment'
value: ${{ steps.eval.outputs.experiment_id }}
runs:
using: 'composite'
steps:
- name: Set up Python
uses: actions/setup-python@v6
with:
python-version: '3.11'
- name: Install dependencies
shell: bash
run: pip install requests
- name: Run evaluation
id: eval
shell: bash
env:
VW_API_URL: ${{ inputs.api_url }}
VW_API_TOKEN: ${{ inputs.vw_api_token }}
VW_PROJECT_ID: ${{ inputs.project_id }}
VW_DATASET_ID: ${{ inputs.dataset_id }}
VW_METRICS: ${{ inputs.metrics }}
VW_MODEL_NAME: ${{ inputs.model_name }}
VW_MODEL_PROVIDER: ${{ inputs.model_provider }}
VW_JUDGE_MODEL: ${{ inputs.judge_model }}
VW_JUDGE_PROVIDER: ${{ inputs.judge_provider }}
VW_THRESHOLD: ${{ inputs.threshold }}
VW_TIMEOUT_MINUTES: ${{ inputs.timeout_minutes }}
VW_POLL_INTERVAL: ${{ inputs.poll_interval_seconds }}
VW_EXPERIMENT_NAME: ${{ inputs.experiment_name }}
LLM_API_KEY: ${{ inputs.llm_api_key }}
JUDGE_API_KEY: ${{ inputs.judge_api_key }}
run: |
RESULTS="${{ runner.temp }}/vw-results.json"
SUMMARY="${{ runner.temp }}/vw-summary.md"
python "${{ github.action_path }}/ci_eval_runner.py" \
--output "$RESULTS" \
--markdown-output "$SUMMARY" \
|| true
echo "results_path=$RESULTS" >> "$GITHUB_OUTPUT"
echo "summary_path=$SUMMARY" >> "$GITHUB_OUTPUT"
if [ -f "$RESULTS" ]; then
EXP_ID=$(python -c "import json; print(json.load(open('$RESULTS')).get('experiment_id',''))" 2>/dev/null || echo "")
echo "experiment_id=$EXP_ID" >> "$GITHUB_OUTPUT"
fi
- name: Report results
id: report
if: always()
shell: bash
env:
RESULTS_PATH: ${{ steps.eval.outputs.results_path }}
SUMMARY_PATH: ${{ steps.eval.outputs.summary_path }}
FAIL_ON_THRESHOLD: ${{ inputs.fail_on_threshold }}
run: |
if [ ! -f "$RESULTS_PATH" ]; then
echo "::error title=Evaluation Failed::No results produced. The evaluation may have failed to connect to the VerifyWise instance."
echo "passed=false" >> "$GITHUB_OUTPUT"
{
echo "## VerifyWise Evaluation"
echo ""
echo "**FAILED** -- No results were produced. The evaluation may have failed to start."
echo "Check the logs in the \"Run evaluation\" step for details."
} >> "$GITHUB_STEP_SUMMARY"
[ "$FAIL_ON_THRESHOLD" = "true" ] && exit 1
exit 0
fi
if [ -f "$SUMMARY_PATH" ]; then
cat "$SUMMARY_PATH" >> "$GITHUB_STEP_SUMMARY"
fi
python3 << 'PYEOF'
import json, os
results_path = os.environ["RESULTS_PATH"]
fail_on = os.environ.get("FAIL_ON_THRESHOLD", "true") == "true"
with open(results_path) as f:
data = json.load(f)
passed = data.get("passed", False)
metrics = data.get("metrics", [])
name = data.get("name", "Evaluation")
model = data.get("model", "unknown")
failing = [m for m in metrics if not m.get("passed")]
with open(os.environ["GITHUB_OUTPUT"], "a") as out:
out.write(f"passed={'true' if passed else 'false'}\n")
if passed:
print(f"::notice title=All metrics passed::{len(metrics)} metrics passed for {model}")
else:
for m in failing:
inv = " (inverted -- lower is better)" if m.get("inverted") else ""
print(f"::error title={m['name']} failed threshold::"
f"{m['name']}: scored {m['score']*100:.1f}% "
f"against {m['threshold']*100:.0f}% threshold{inv}")
summary = ", ".join(f"{m['name']}={m['score']*100:.0f}%" for m in failing)
print(f"::error title=Evaluation Failed::"
f"{len(failing)}/{len(metrics)} metrics below threshold "
f"on {model}: {summary}")
if fail_on:
raise SystemExit(1)
PYEOF
- name: Post PR comment
if: always() && inputs.post_pr_comment == 'true' && github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const summaryPath = '${{ steps.eval.outputs.summary_path }}';
if (!summaryPath || !fs.existsSync(summaryPath)) return;
const body = fs.readFileSync(summaryPath, 'utf8');
const marker = '<!-- verifywise-eval -->';
const { data: comments } = await github.rest.issues.listComments({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
});
const existing = comments.find(c => c.body.includes(marker));
const fullBody = `${marker}\n${body}`;
if (existing) {
await github.rest.issues.updateComment({
owner: context.repo.owner,
repo: context.repo.repo,
comment_id: existing.id,
body: fullBody,
});
} else {
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.issue.number,
body: fullBody,
});
}
- name: Upload results
if: always()
uses: actions/upload-artifact@v7
with:
name: verifywise-eval-results
path: |
${{ steps.eval.outputs.results_path }}
${{ steps.eval.outputs.summary_path }}
retention-days: 30
if-no-files-found: ignore