|
| 1 | +name: Spanish tests |
| 2 | + |
| 3 | +on: [workflow_dispatch, push, pull_request] |
| 4 | + |
| 5 | +jobs: |
| 6 | + |
| 7 | + canary-multi-turn: |
| 8 | + runs-on: ubuntu-22.04 |
| 9 | + timeout-minutes: 3 |
| 10 | + strategy: |
| 11 | + max-parallel: 3 |
| 12 | + fail-fast: false |
| 13 | + matrix: |
| 14 | + model: |
| 15 | + - meta-llama/llama-3.2-3b-instruct # $0.0100/$0.0200 [ 128K] |
| 16 | + - mistralai/mistral-nemo # $0.0100/$0.0270 [ 128K] |
| 17 | + - google/gemma-3n-e4b-it # $0.0200/$0.0400 [ 32K] |
| 18 | + - google/gemma-3-4b-it # $0.0200/$0.0400 [ 128K] |
| 19 | + - mistralai/ministral-3b # $0.0400/$0.0400 [ 128K] |
| 20 | + - mistralai/mistral-7b-instruct-v0.3 # $0.0280/$0.0540 [ 32K] |
| 21 | + - mistralai/mistral-small-3.2-24b-instruct # $0.0500/$0.1000 [ 32K] |
| 22 | + - google/gemma-3-12b-it # $0.0500/$0.1000 [ 128K] |
| 23 | + - microsoft/phi-3-mini-128k-instruct # $0.1000/$0.1000 [ 128K] |
| 24 | + - qwen/qwen3-8b # $0.0350/$0.1380 [ 128K] |
| 25 | + - amazon/nova-micro-v1 # $0.0350/$0.1400 [ 128K] |
| 26 | + - microsoft/phi-4 # $0.0700/$0.1400 [ 16K] |
| 27 | + - google/gemini-flash-1.5-8b # $0.0380/$0.1500 [1000K] |
| 28 | + - amazon/nova-lite-v1 # $0.0600/$0.2400 [ 300K] |
| 29 | + - qwen/qwen3-14b # $0.0800/$0.2400 [ 40K] |
| 30 | + - mistralai/mixtral-8x7b-instruct # $0.2400/$0.2400 [ 32K] |
| 31 | + - meta-llama/llama-3.3-70b-instruct # $0.0700/$0.2500 [ 128K] |
| 32 | + - qwen/qwen3-30b-a3b # $0.0800/$0.2900 [ 40K] |
| 33 | + - google/gemini-2.0-flash-lite-001 # $0.0750/$0.3000 [1000K] |
| 34 | + - meta-llama/llama-4-scout # $0.0800/$0.3000 [ 320K] |
| 35 | + - google/gemini-2.0-flash-001 # $0.1000/$0.4000 [1000K] |
| 36 | + - openai/gpt-4.1-nano # $0.1000/$0.4000 [1000K] |
| 37 | + - google/gemini-2.5-flash-lite-preview-06-17 # $0.1000/$0.4000 [1000K] |
| 38 | + |
| 39 | + steps: |
| 40 | + - uses: actions/checkout@v4 |
| 41 | + |
| 42 | + - run: ./chat-llm.js tests/es/canary-single-turn.txt |
| 43 | + env: |
| 44 | + LLM_API_BASE_URL: https://openrouter.ai/api/v1 |
| 45 | + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} |
| 46 | + LLM_CHAT_MODEL: ${{ matrix.model }} |
| 47 | + |
| 48 | + - run: ./chat-llm.js tests/es/canary-multi-turn.txt |
| 49 | + env: |
| 50 | + LLM_API_BASE_URL: https://openrouter.ai/api/v1 |
| 51 | + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} |
| 52 | + LLM_CHAT_MODEL: ${{ matrix.model }} |
| 53 | + |
| 54 | + |
| 55 | + high-school-stem: |
| 56 | + runs-on: ubuntu-22.04 |
| 57 | + timeout-minutes: 5 |
| 58 | + strategy: |
| 59 | + max-parallel: 3 |
| 60 | + fail-fast: false |
| 61 | + matrix: |
| 62 | + model: |
| 63 | + - meta-llama/llama-3.2-3b-instruct # $0.0100/$0.0200 [ 128K] |
| 64 | + - mistralai/mistral-nemo # $0.0100/$0.0270 [ 128K] |
| 65 | + - google/gemma-3n-e4b-it # $0.0200/$0.0400 [ 32K] |
| 66 | + - google/gemma-3-4b-it # $0.0200/$0.0400 [ 128K] |
| 67 | + - mistralai/ministral-3b # $0.0400/$0.0400 [ 128K] |
| 68 | + - mistralai/mistral-7b-instruct-v0.3 # $0.0280/$0.0540 [ 32K] |
| 69 | + - mistralai/mistral-small-3.2-24b-instruct # $0.0500/$0.1000 [ 32K] |
| 70 | + - google/gemma-3-12b-it # $0.0500/$0.1000 [ 128K] |
| 71 | + - microsoft/phi-3-mini-128k-instruct # $0.1000/$0.1000 [ 128K] |
| 72 | + - qwen/qwen3-8b # $0.0350/$0.1380 [ 128K] |
| 73 | + - amazon/nova-micro-v1 # $0.0350/$0.1400 [ 128K] |
| 74 | + - microsoft/phi-4 # $0.0700/$0.1400 [ 16K] |
| 75 | + - google/gemini-flash-1.5-8b # $0.0380/$0.1500 [1000K] |
| 76 | + - amazon/nova-lite-v1 # $0.0600/$0.2400 [ 300K] |
| 77 | + - qwen/qwen3-14b # $0.0800/$0.2400 [ 40K] |
| 78 | + - mistralai/mixtral-8x7b-instruct # $0.2400/$0.2400 [ 32K] |
| 79 | + - meta-llama/llama-3.3-70b-instruct # $0.0700/$0.2500 [ 128K] |
| 80 | + - qwen/qwen3-30b-a3b # $0.0800/$0.2900 [ 40K] |
| 81 | + - google/gemini-2.0-flash-lite-001 # $0.0750/$0.3000 [1000K] |
| 82 | + - meta-llama/llama-4-scout # $0.0800/$0.3000 [ 320K] |
| 83 | + - google/gemini-2.0-flash-001 # $0.1000/$0.4000 [1000K] |
| 84 | + - openai/gpt-4.1-nano # $0.1000/$0.4000 [1000K] |
| 85 | + - google/gemini-2.5-flash-lite-preview-06-17 # $0.1000/$0.4000 [1000K] |
| 86 | + |
| 87 | + steps: |
| 88 | + - uses: actions/checkout@v4 |
| 89 | + |
| 90 | + - run: ./chat-llm.js tests/es/high-school-stem.txt |
| 91 | + env: |
| 92 | + LLM_API_BASE_URL: https://openrouter.ai/api/v1 |
| 93 | + LLM_API_KEY: ${{ secrets.LLM_API_KEY }} |
| 94 | + LLM_CHAT_MODEL: ${{ matrix.model }} |
0 commit comments