Skip to content

support ShareGPT dataset as data file #137

support ShareGPT dataset as data file

support ShareGPT dataset as data file #137

Workflow file for this run

# This workflow builds a Docker artifact, caches it based on the Dockerfile content,
# and then runs e2e tests using that artifact.
name: E2E Tests
on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch:
jobs:
build-and-test:
runs-on: ubuntu-latest
steps:
- name: Check out repository
uses: actions/checkout@v4
# Cache the binary artifact
# The key is based on the runner's OS and the hash of the Dockerfile.
# If the Dockerfile changes, the hash changes, and a new cache is created.
- name: Cache vLLM-sim binary
id: cache-vllm-sim
uses: actions/cache@v4
with:
# The path to the file you want to cache
path: bin/llm-d-inference-sim
# The unique key for the cache
key: vllm-sim-binary-${{ runner.os }}-${{ hashFiles('tests/e2e/vllm-sim.Dockerfile') }}
# Set up Docker Buildx (required for the 'docker build -o' command)
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
# Conditionally build the artifact
# This step only runs if the cache step above did NOT find a match.
# 'steps.cache-vllm-sim.outputs.cache-hit' will be 'true' if the cache was restored.
- name: Build vLLM-sim artifact (if not cached)
if: steps.cache-vllm-sim.outputs.cache-hit != 'true'
run: |
echo "Cache miss. Building artifact..."
docker build . -f tests/e2e/vllm-sim.Dockerfile -o type=local,dest=./
shell: bash
- name: Verify artifact
run: |
if [ -f "bin/llm-d-inference-sim" ]; then
echo "Artifact found."
else
echo "ERROR: Artifact bin/llm-d-inference-sim not found!"
exit 1
fi
shell: bash
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python }}
- name: Install dependencies
run: |
curl -sSL https://pdm-project.org/install-pdm.py | python3 -
pip install tox tox-pdm
- name: Run E2E tests
run: tox -e test-e2e
shell: bash