Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,3 +127,5 @@ venv.bak/
/docs/source/_generated/
/tmp/
/docs/source/_static/_generated/
# pixi environments
.pixi
34 changes: 34 additions & 0 deletions CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,40 @@ Please open an [issue](https://github.com/scikit-hep/mplhep/issues).
## Installing the development environment

```bash
### Using pixi (recommended)

[Pixi](https://prefix.dev/docs/pixi/overview) provides a fast, reproducible development environment:

```bash
```bash
```bash
# Install the default environment (includes all development tools)
pixi install

# Activate a shell with all tools
pixi shell

# Or run commands directly
pixi run test-basic
```

Available pixi tasks (now just `pixi run <task>`):
- `pixi run test` - Run full test suite with visual comparison
- `pixi run test-parallel` - Run tests in parallel (auto-detects optimal worker count)
- `pixi run test-basic` - Run basic tests without visual comparison
- `pixi run generate-baseline` - Generate new baseline images
- `pixi run lint` - Check code with ruff
- `pixi run format` - Format code with ruff
- `pixi run format-check` - Check formatting without changes

Benchmarking tasks (requires benchmark environment):
- `pixi run -e benchmark benchmark` - Run benchmark tests
- `pixi run -e benchmark benchmark-run` - Run and save benchmark results
- `pixi run -e benchmark benchmark-compare` - Compare benchmark results

**Note:** For development work, use the default environment (`pixi install`) which includes all tools. The specialized environments (`dev`, `test`, `docs`, `benchmark`) are available for specific use cases.

```
python -m pip install --upgrade --editable ".[all]"
```
Also conveniently accessible as `bash install.sh`.
Expand Down
62 changes: 61 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -58,14 +58,16 @@ test = [
"pytest-mock",
"pytest-mpl",
"pytest-xdist",
"pytest-benchmark",
"pytest>=6.0",
"scikit-hep-testdata",
"scipy>=1.1.0",
"uproot",
"uproot4",
"seaborn",
]
benchmark = [
"pytest-benchmark",
]
all = [
"mplhep[dev,test,docs]",
]
Expand Down Expand Up @@ -199,3 +201,61 @@ ignore = [
"src/mplhep/_dev.py" = ["T20"]
"test_*.py" = ["T20"]
"src/mplhep/styles/*.py" = ["FLY002"]

[tool.pixi.workspace]
channels = ["conda-forge"]
platforms = ["linux-64"]

[tool.pixi.environments]
default = { features = ["dev", "test", "docs"], solve-group = "default" }
dev = { features = ["dev", "test"], solve-group = "default" }
docs = { features = ["docs"], solve-group = "default" }
test = { features = ["test"], solve-group = "default" }
benchmark = { features = ["test", "benchmark"], solve-group = "default" }

[tool.pixi.tasks]

[tool.pixi.feature.test.tasks]
test = "pytest -r sa --mpl --mpl-results-path=pytest_results"
test-parallel = "pytest -r sa --mpl --mpl-results-path=pytest_results -n auto"
test-basic = "pytest tests/"
test-single = "pytest"
generate-baseline = "pytest -r sa --mpl --mpl-generate-path=tests/baseline"

[tool.pixi.feature.benchmark.tasks]
benchmark = "pytest --benchmark-only --benchmark-sort=mean"
benchmark-run = "pytest --benchmark-only --benchmark-sort=mean --benchmark-save=latest"
benchmark-compare = "pytest --benchmark-only --benchmark-compare=latest"

[tool.pixi.feature.dev.tasks]
lint = "ruff check src/"
format = "ruff format src/"
format-check = "ruff format --check src/"

[tool.pixi.dependencies]
python = ">=3.9,<3.14"
pytest = ">=9.0.1,<10"
matplotlib = ">=3.4"
numpy = ">=1.16.0"
packaging = "*"

[tool.pixi.pypi-dependencies]
mplhep = { path = ".", editable = true }

[tool.pixi.feature.test.dependencies]
pytest-mock = "*"
pytest-mpl = "*"
pytest-xdist = "*"
scipy = ">=1.1.0"
seaborn = "*"

[tool.pixi.feature.benchmark.dependencies]
pytest-benchmark = "*"

[tool.pixi.feature.dev.dependencies]
jupyter = "*"
twine = "*"
ruff = "*"
pre-commit = "*"
questionary = "*"
mkdocs = ">=1.6"
53 changes: 43 additions & 10 deletions src/mplhep/_dev.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,11 +78,14 @@ def _get_terminal_width(self) -> int:
return 80

def _run_command_with_confirmation(
self, cmd: list[str], prompt: str = "Confirm command (editable):"
self,
cmd: list[str],
prompt: str = "Confirm command (editable):",
env: Optional[dict] = None,
) -> bool:
"""Run a command with user confirmation and editing capability."""
if not HAS_QUESTIONARY or questionary is None:
return self._run_command(cmd)
return self._run_command(cmd, env=env)

modified_cmd_str = questionary.text(
prompt, default=" ".join(cmd), style=self.style
Expand All @@ -98,16 +101,31 @@ def _run_command_with_confirmation(
self._print_error(f"Invalid command syntax: {e}")
return False

return self._run_command(cmd)
return self._run_command(cmd, env=env)

def _run_command(self, cmd: list[str], cwd: Path | None = None) -> bool:
def _run_command(
self, cmd: list[str], cwd: Optional[Path] = None, env: Optional[dict] = None
) -> bool:
"""Run a command and return True if successful."""
self._print_header(f"Running: {' '.join(cmd)}")
separator = 3 * ("=" * self._get_terminal_width() + "\n")
print(separator)

try:
result = subprocess.run(cmd, cwd=cwd or self.project_root, check=True)
self._print_header(f"Running: {' '.join(cmd)}")
separator = 3 * ("=" * self._get_terminal_width() + "\n")
print(separator)
# Merge environment variables if provided
run_env = os.environ.copy()
if env:
run_env.update(env)
result = subprocess.run(
cmd, cwd=cwd or self.project_root, env=run_env, check=True
)
print(separator)
return result.returncode == 0
except subprocess.CalledProcessError as e:
self._print_error(f"Command failed with exit code {e.returncode}")
return False
Expand Down Expand Up @@ -200,15 +218,30 @@ def cmd_test(
sys.executable,
"-m",
"pytest",
"-r",
"sa",
"--mpl",
"--mpl-results-path=pytest_results",
]

# Only add -n flag if using parallelism (jobs > 1)
# Handle parallel execution - now works normally without conflicts
if jobs > 1:
cmd.extend(["-n", str(jobs)])
cmd.extend(
[
"-r",
"sa",
"--mpl",
"--mpl-results-path=pytest_results",
"-n",
str(jobs),
]
)
else:
# Non-parallel execution - use normal config
cmd.extend(
[
"-r",
"sa",
"--mpl",
"--mpl-results-path=pytest_results",
]
)

if filter_pattern:
cmd.extend(["-k", filter_pattern])
Expand Down Expand Up @@ -1062,7 +1095,7 @@ def make_menu_item(title, command_text):
(
make_menu_item(
"🧪 Run pytest",
"python -m pytest -r sa --mpl --mpl-results-path=pytest_results",
f"python -m pytest -r sa --mpl --mpl-results-path=pytest_results{' -n ' + str(self.default_jobs) if self.default_jobs > 1 else ''}",
),
"test",
),
Expand Down
Binary file added tests/baseline/test_issue_594.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
19 changes: 19 additions & 0 deletions tests/from_issues/test_issue594.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
import hist
import numpy as np
import pytest

import mplhep as hep


@pytest.mark.mpl_image_compare(remove_text=False)
def test_issue_594():
np.random.seed(42) # Set seed for reproducible results
h1 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
h1.fill(np.random.normal(5, 2, 1000))
h2 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
h2.fill(np.random.normal(5, 2, 1000))

fig, ax, rax = hep.comparison_plotters.data_model(
h1, unstacked_components=[h2], flow="show"
)
return fig
Loading