-
Notifications
You must be signed in to change notification settings - Fork 156
55 lines (52 loc) · 1.57 KB
/
integration-test.yml
File metadata and controls
55 lines (52 loc) · 1.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
name: integration-test
permissions:
contents: read
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
on:
workflow_dispatch: { }
pull_request:
paths:
- 'apps/agentstack-server/**'
- 'apps/agentstack-cli/**'
push:
branches:
- main
paths:
- 'apps/agentstack-server/**'
- 'apps/agentstack-cli/**'
jobs:
integration-test:
timeout-minutes: 25
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Maximize build space
uses: ./.github/actions/maximize-build-space
with:
root-reserve-mb: 15360
temp-reserve-mb: 2048
swap-size-mb: 1024
remove-dotnet: 'true'
- name: "Set up Lima"
uses: lima-vm/lima-actions/setup@v1
id: lima-actions-setup
- name: "Cache ~/.cache/lima"
uses: actions/cache@v4
with:
path: ~/.cache/lima
key: lima-${{ steps.lima-actions-setup.outputs.version }}
- uses: actions/checkout@v4
- uses: ./.github/actions/setup
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: mise run agentstack-server:test:integration
env:
LLM_API_BASE: "${{ secrets.OPENAI_API_BASE }}"
LLM_MODEL: "${{ vars.OPENAI_MODEL }}"
LLM_API_KEY: "${{ secrets.OPENAI_API_KEY }}"
# LLM_API_BASE: "https://api.groq.com/openai/v1"
# LLM_MODEL: "groq:meta-llama/llama-4-maverick-17b-128e-instruct"
# LLM_API_KEY: "${{ secrets.GROQ_API_KEY }}"
- run: uv cache prune --ci