Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
uv run pre-commit run --all-files
- name: Test
run: |
uv run pytest --cov --cov-report=term-missing --cov-report=xml
uv run pytest --cov --cov-report=term-missing --cov-report=xml --benchmark-disable
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
Expand All @@ -44,4 +44,4 @@ jobs:
run: uv sync --all-groups
- name: Test
run: |
uv run pytest --cov --cov-report=term-missing
uv run pytest --cov --cov-report=term-missing --benchmark-disable
8 changes: 8 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,14 @@ To run the test suite:
uv run pytest tests/
```

### Benchmarks

To monitor performance, a set of benchmarks can be run:

```console
uv run pytest benches/
```

### Code Quality

Python linting and code formatting is provided by `ruff`.
Expand Down
1 change: 1 addition & 0 deletions benches/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"""Benchmarks test module."""
124 changes: 124 additions & 0 deletions benches/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,124 @@
"""Pytest configuration for benchmarks."""

import base64
import os
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Generator
from unittest import mock

import pytest
from typing_extensions import Buffer

import pyautoenv
from benches.tools import environment_variable, make_venv
from tests.tools import clear_lru_caches

POETRY_PYPROJECT = """[project]
name = "{project_name}"
version = "0.1.0"
description = ""
authors = [
{{name = "A Name",email = "someemail@abc.com"}}
]
readme = "README.md"
requires-python = ">=3.12"
dependencies = [
]

[tool.poetry]
packages = [{{include = "{project_name}", from = "src"}}]

[build-system]
requires = ["poetry-core>=2.0.0,<3.0.0"]
build-backend = "poetry.core.masonry.api"
"""


@pytest.fixture(autouse=True)
def reset_caches() -> None:
"""Reset the LRU caches in pyautoenv."""
clear_lru_caches(pyautoenv)


@pytest.fixture(autouse=True, scope="module")
def capture_logging() -> Generator[None, None, None]:
"""Capture all logging as benchmarks are extremely noisy."""
if __debug__:
logging_disable = pyautoenv.logger.disabled
try:
pyautoenv.logger.disabled = True
yield
finally:
pyautoenv.logger.disabled = logging_disable
else:
yield None


@pytest.fixture(autouse=True, scope="module")
def deactivate_venvs() -> Generator[None, None, None]:
"""Fixture to 'deactivate' any currently active virtualenvs."""
original_venv = os.environ.get("VIRTUAL_ENV")
try:
os.environ.pop("VIRTUAL_ENV", None)
yield
finally:
if original_venv is not None:
os.environ["VIRTUAL_ENV"] = original_venv


@pytest.fixture
def venv(tmp_path: Path) -> Path:
"""Fixture returning a venv in a temporary directory."""
return make_venv(tmp_path / "venv_fixture")


@dataclass
class PoetryVenvFixture:
"""Poetry virtual environment fixture data."""

project_dir: Path
venv_dir: Path


@pytest.fixture
def poetry_venv(tmp_path: Path) -> Generator[PoetryVenvFixture, None, None]:
"""Create a poetry virtual environment and associated project."""
# Make poetry's cache directory.
cache_dir = tmp_path / "pypoetry"
cache_dir.mkdir()
virtualenvs_dir = cache_dir / "virtualenvs"
virtualenvs_dir.mkdir()

# Create a virtual environment within the cache directory.
project_name = "benchmark"
py_version = ".".join(
str(v) for v in [sys.version_info.major, sys.version_info.minor]
)
fake_hash = "SOMEHASH" + "A" * (32 - 8)
venv_name = f"{project_name}-{fake_hash[:8]}-py{py_version}"
venv_dir = make_venv(virtualenvs_dir, venv_name)

# Create a poetry project directory with a lockfile and pyproject.
project_dir = tmp_path / project_name
project_dir.mkdir()
pyproject = project_dir / "pyproject.toml"
with pyproject.open("w") as f:
f.write(POETRY_PYPROJECT.format(project_name=project_name))
(project_dir / "poetry.lock").touch()

# Mock base64 encode to return a fixed hash so the poetry env is
# discoverable. Actually run the encoder so the benchmark is more
# representative, but return a fixed value.
real_b64_encode = base64.urlsafe_b64encode

def b64encode(s: Buffer) -> bytes:
real_b64_encode(s)
return fake_hash.encode()

with (
mock.patch("base64.urlsafe_b64encode", new=b64encode),
environment_variable("POETRY_CACHE_DIR", str(cache_dir)),
):
yield PoetryVenvFixture(project_dir, venv_dir)
115 changes: 115 additions & 0 deletions benches/test_benchmarks.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
"""Benchmarks for pyautoenv's main function."""

from io import StringIO
from pathlib import Path
from typing import Union

import pytest

import pyautoenv
from benches.conftest import PoetryVenvFixture
from benches.tools import make_venv, venv_active, working_directory
from tests.tools import clear_lru_caches


class ResettingStream(StringIO):
"""
A writable stream that resets its position to 0 after each write.

We can use this in benchmarks to check what's written to the stream
in the final iteration.
"""

def write(self, s):
r = super().write(s)
self.seek(0)
return r


def run_main_benchmark(benchmark, *, shell: Union[str, None] = None):
stream = ResettingStream()
argv = []
if shell:
argv.append(f"--{shell}")
benchmark(pyautoenv.main, argv, stdout=stream)
clear_lru_caches(pyautoenv)
return stream.getvalue()


def test_no_activation(benchmark, tmp_path: Path):
with working_directory(tmp_path):
assert not run_main_benchmark(benchmark)


def test_deactivate(benchmark, venv: Path, tmp_path: Path):
with venv_active(venv), working_directory(tmp_path):
assert run_main_benchmark(benchmark) == "deactivate"


@pytest.mark.parametrize("shell", [None, "fish", "pwsh"])
def test_venv_activate(shell, benchmark, venv: Path):
with working_directory(venv):
output = run_main_benchmark(benchmark, shell=shell)

assert all(s in output.lower() for s in ["activate", str(venv).lower()]), (
output
)


def test_venv_already_active(benchmark, venv: Path):
with venv_active(venv), working_directory(venv):
assert not run_main_benchmark(benchmark)


@pytest.mark.parametrize("shell", [None, "fish", "pwsh"])
def test_venv_switch_venv(shell, benchmark, venv: Path, tmp_path: Path):
make_venv(tmp_path)

with venv_active(venv), working_directory(tmp_path):
output = run_main_benchmark(benchmark, shell=shell)

assert all(
s in output for s in ["deactivate", "&&", str(tmp_path), "activate"]
), output


@pytest.mark.parametrize("shell", [None, "fish", "pwsh"])
def test_poetry_activate(shell, benchmark, poetry_venv: PoetryVenvFixture):
with working_directory(poetry_venv.project_dir):
output = run_main_benchmark(benchmark, shell=shell)

assert "activate" in output.lower()
assert str(poetry_venv.venv_dir).lower() in output.lower()


def test_poetry_already_active(benchmark, poetry_venv: PoetryVenvFixture):
with (
venv_active(poetry_venv.venv_dir),
working_directory(poetry_venv.project_dir),
):
assert not run_main_benchmark(benchmark)


@pytest.mark.parametrize("shell", [None, "fish", "pwsh"])
def test_venv_switch_to_poetry(
shell, benchmark, poetry_venv: PoetryVenvFixture, venv: Path
):
with venv_active(venv), working_directory(poetry_venv.project_dir):
output = run_main_benchmark(benchmark, shell=shell)

assert all(
s in output
for s in ["deactivate", "&&", str(poetry_venv.venv_dir), "activate"]
), output


@pytest.mark.parametrize("shell", [None, "fish", "pwsh"])
def test_poetry_switch_to_venv(
shell, benchmark, poetry_venv: PoetryVenvFixture, venv: Path
):
with venv_active(poetry_venv.venv_dir), working_directory(venv):
output = run_main_benchmark(benchmark, shell=shell)

assert all(
s in output for s in ["deactivate", "&&", str(venv), "activate"]
), output
51 changes: 51 additions & 0 deletions benches/tools.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
"""Utilities for benchmarks."""

import os
from contextlib import contextmanager
from pathlib import Path
from typing import Generator

import virtualenv


def make_venv(path: Path, venv_name: str = ".venv") -> Path:
"""Make a virtual environment in the given directory."""
venv_dir = path / venv_name
virtualenv.cli_run([str(venv_dir)])
return venv_dir


@contextmanager
def environment_variable(
variable: str, value: str
) -> Generator[None, None, None]:
"""Set an environment variable within a context."""
original_value = os.environ.get(variable)
try:
os.environ[variable] = value
yield
finally:
if original_value:
os.environ[variable] = original_value
else:
os.environ.pop(variable)


@contextmanager
def working_directory(path: Path) -> Generator[None, None, None]:
"""Set the current working directory within a context."""
original_path = Path.cwd()
try:
os.chdir(path)
yield
finally:
os.chdir(original_path)


@contextmanager
def venv_active(venv_dir: Path) -> Generator[None, None, None]:
"""Activate a virtual environment within a context."""
if not venv_dir.is_dir():
raise ValueError(f"Directory '{venv_dir}' does not exist.")
with environment_variable("VIRTUAL_ENV", str(venv_dir)):
yield
Loading