Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .github/workflows/python-package.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python

name: Python package

on:
push:
branches: [ "main" ]
pull_request:
branches: [ "main" ]

jobs:
build:

runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]

steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
python -m pip install flake8 pytest
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
- name: Lint with flake8
run: |
# stop the build if there are Python syntax errors or undefined names
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude fossil-main,ex/benchmarks-deterministic/FOSSIL-versions
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude fossil-main,ex/benchmarks-deterministic/FOSSIL-versions
- name: Test with pytest
run: |
pytest --ignore=fossil-main --ignore=ex/benchmarks-deterministic/FOSSIL-versions
25 changes: 25 additions & 0 deletions tests/test_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import subprocess
import sys
import os

def test_benchmark_runs():
"""Check that the benchmark script runs without errors."""

benchmark_path = os.path.join(
os.path.dirname(__file__),
"..", "ex", "benchmarks-deterministic", "PRoTECT-versions", "ex1_dt_DS.py"
)

assert os.path.exists(benchmark_path), f"Benchmark script not found: {benchmark_path}"

env = os.environ.copy()
env["PYTHONPATH"] = os.path.join(os.path.dirname(__file__), "..")

result = subprocess.run(
[sys.executable, benchmark_path],
capture_output=True,
text=True,
env=env
)

assert result.returncode == 0, f"Benchmark failed with error:\n{result.stderr}"