diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml new file mode 100644 index 0000000..9e34c50 --- /dev/null +++ b/.github/workflows/python-package.yml @@ -0,0 +1,40 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-python + +name: Python package + +on: + push: + branches: [ "main" ] + pull_request: + branches: [ "main" ] + +jobs: + build: + + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: ["3.9", "3.10", "3.11"] + + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + - name: Install dependencies + run: | + python -m pip install --upgrade pip + python -m pip install flake8 pytest + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi + - name: Lint with flake8 + run: | + # stop the build if there are Python syntax errors or undefined names + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude fossil-main,ex/benchmarks-deterministic/FOSSIL-versions + # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude fossil-main,ex/benchmarks-deterministic/FOSSIL-versions + - name: Test with pytest + run: | + pytest --ignore=fossil-main --ignore=ex/benchmarks-deterministic/FOSSIL-versions diff --git a/tests/test_main.py b/tests/test_main.py new file mode 100644 index 0000000..22bcb8f --- /dev/null +++ b/tests/test_main.py @@ -0,0 +1,25 @@ +import subprocess +import sys +import os + +def test_benchmark_runs(): + """Check that the benchmark script runs without errors.""" + + benchmark_path = os.path.join( + os.path.dirname(__file__), + "..", "ex", "benchmarks-deterministic", "PRoTECT-versions", "ex1_dt_DS.py" + ) + + assert os.path.exists(benchmark_path), f"Benchmark script not found: {benchmark_path}" + + env = os.environ.copy() + env["PYTHONPATH"] = os.path.join(os.path.dirname(__file__), "..") + + result = subprocess.run( + [sys.executable, benchmark_path], + capture_output=True, + text=True, + env=env + ) + + assert result.returncode == 0, f"Benchmark failed with error:\n{result.stderr}"