Skip to content

release: v0.3.0-rc6 #500

release: v0.3.0-rc6

release: v0.3.0-rc6 #500

Workflow file for this run

name: e2e-test
permissions:
contents: read
on:
workflow_dispatch: { }
pull_request:
paths:
- 'apps/beeai-server/**'
- 'apps/beeai-cli/**'
- 'helm/beeai-platform/**'
push:
branches:
- main
paths:
- 'apps/beeai-server/**'
- 'apps/beeai-cli/**'
- 'helm/beeai-platform/**'
jobs:
e2e-test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: "Set up Lima"
uses: lima-vm/lima-actions/setup@v1
id: lima-actions-setup
- name: "Cache ~/.cache/lima"
uses: actions/cache@v4
with:
path: ~/.cache/lima
key: lima-${{ steps.lima-actions-setup.outputs.version }}
- uses: ./.github/actions/setup
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- run: mise run beeai-server:test:e2e
env:
# TODO: use github models
# LLM_API_BASE: "https://models.github.ai/inference"
# LLM_MODEL: "meta/Meta-Llama-3.1-8B-Instruct"
# LLM_API_KEY: "${{ secrets.GITHUB_TOKEN }}"
LLM_API_BASE: "https://api.groq.com/openai/v1"
LLM_MODEL: "llama-3.1-8b-instant"
LLM_API_KEY: "${{ secrets.GROQ_API_KEY }}"