Skip to content

Commit 5dad4ca

Browse files
committed
fix: wip
1 parent c29a5b6 commit 5dad4ca

File tree

6 files changed

+159
-11
lines changed

6 files changed

+159
-11
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,5 @@ venv.bak/
127127
/docs/source/_generated/
128128
/tmp/
129129
/docs/source/_static/_generated/
130+
# pixi environments
131+
.pixi

CONTRIBUTING.md

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,40 @@ Please open an [issue](https://github.com/scikit-hep/mplhep/issues).
99
## Installing the development environment
1010

1111
```bash
12+
### Using pixi (recommended)
13+
14+
[Pixi](https://prefix.dev/docs/pixi/overview) provides a fast, reproducible development environment:
15+
16+
```bash
17+
```bash
18+
```bash
19+
# Install the default environment (includes all development tools)
20+
pixi install
21+
22+
# Activate a shell with all tools
23+
pixi shell
24+
25+
# Or run commands directly
26+
pixi run test-basic
27+
```
28+
29+
Available pixi tasks (now just `pixi run <task>`):
30+
- `pixi run test` - Run full test suite with visual comparison
31+
- `pixi run test-parallel` - Run tests in parallel (auto-detects optimal worker count)
32+
- `pixi run test-basic` - Run basic tests without visual comparison
33+
- `pixi run generate-baseline` - Generate new baseline images
34+
- `pixi run lint` - Check code with ruff
35+
- `pixi run format` - Format code with ruff
36+
- `pixi run format-check` - Check formatting without changes
37+
38+
Benchmarking tasks (requires benchmark environment):
39+
- `pixi run -e benchmark benchmark` - Run benchmark tests
40+
- `pixi run -e benchmark benchmark-run` - Run and save benchmark results
41+
- `pixi run -e benchmark benchmark-compare` - Compare benchmark results
42+
43+
**Note:** For development work, use the default environment (`pixi install`) which includes all tools. The specialized environments (`dev`, `test`, `docs`, `benchmark`) are available for specific use cases.
44+
45+
```
1246
python -m pip install --upgrade --editable ".[all]"
1347
```
1448
Also conveniently accessible as `bash install.sh`.

pyproject.toml

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,14 +58,16 @@ test = [
5858
"pytest-mock",
5959
"pytest-mpl",
6060
"pytest-xdist",
61-
"pytest-benchmark",
6261
"pytest>=6.0",
6362
"scikit-hep-testdata",
6463
"scipy>=1.1.0",
6564
"uproot",
6665
"uproot4",
6766
"seaborn",
6867
]
68+
benchmark = [
69+
"pytest-benchmark",
70+
]
6971
all = [
7072
"mplhep[dev,test,docs]",
7173
]
@@ -199,3 +201,61 @@ ignore = [
199201
"src/mplhep/_dev.py" = ["T20"]
200202
"test_*.py" = ["T20"]
201203
"src/mplhep/styles/*.py" = ["FLY002"]
204+
205+
[tool.pixi.workspace]
206+
channels = ["conda-forge"]
207+
platforms = ["linux-64"]
208+
209+
[tool.pixi.environments]
210+
default = { features = ["dev", "test", "docs"], solve-group = "default" }
211+
dev = { features = ["dev", "test"], solve-group = "default" }
212+
docs = { features = ["docs"], solve-group = "default" }
213+
test = { features = ["test"], solve-group = "default" }
214+
benchmark = { features = ["test", "benchmark"], solve-group = "default" }
215+
216+
[tool.pixi.tasks]
217+
218+
[tool.pixi.feature.test.tasks]
219+
test = "pytest -r sa --mpl --mpl-results-path=pytest_results"
220+
test-parallel = "pytest -r sa --mpl --mpl-results-path=pytest_results -n auto"
221+
test-basic = "pytest tests/"
222+
test-single = "pytest"
223+
generate-baseline = "pytest -r sa --mpl --mpl-generate-path=tests/baseline"
224+
225+
[tool.pixi.feature.benchmark.tasks]
226+
benchmark = "pytest --benchmark-only --benchmark-sort=mean"
227+
benchmark-run = "pytest --benchmark-only --benchmark-sort=mean --benchmark-save=latest"
228+
benchmark-compare = "pytest --benchmark-only --benchmark-compare=latest"
229+
230+
[tool.pixi.feature.dev.tasks]
231+
lint = "ruff check src/"
232+
format = "ruff format src/"
233+
format-check = "ruff format --check src/"
234+
235+
[tool.pixi.dependencies]
236+
python = ">=3.9,<3.14"
237+
pytest = ">=9.0.1,<10"
238+
matplotlib = ">=3.4"
239+
numpy = ">=1.16.0"
240+
packaging = "*"
241+
242+
[tool.pixi.pypi-dependencies]
243+
mplhep = { path = ".", editable = true }
244+
245+
[tool.pixi.feature.test.dependencies]
246+
pytest-mock = "*"
247+
pytest-mpl = "*"
248+
pytest-xdist = "*"
249+
scipy = ">=1.1.0"
250+
seaborn = "*"
251+
252+
[tool.pixi.feature.benchmark.dependencies]
253+
pytest-benchmark = "*"
254+
255+
[tool.pixi.feature.dev.dependencies]
256+
jupyter = "*"
257+
twine = "*"
258+
ruff = "*"
259+
pre-commit = "*"
260+
questionary = "*"
261+
mkdocs = ">=1.6"

src/mplhep/_dev.py

Lines changed: 43 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -78,11 +78,14 @@ def _get_terminal_width(self) -> int:
7878
return 80
7979

8080
def _run_command_with_confirmation(
81-
self, cmd: list[str], prompt: str = "Confirm command (editable):"
81+
self,
82+
cmd: list[str],
83+
prompt: str = "Confirm command (editable):",
84+
env: Optional[dict] = None,
8285
) -> bool:
8386
"""Run a command with user confirmation and editing capability."""
8487
if not HAS_QUESTIONARY or questionary is None:
85-
return self._run_command(cmd)
88+
return self._run_command(cmd, env=env)
8689

8790
modified_cmd_str = questionary.text(
8891
prompt, default=" ".join(cmd), style=self.style
@@ -98,16 +101,31 @@ def _run_command_with_confirmation(
98101
self._print_error(f"Invalid command syntax: {e}")
99102
return False
100103

101-
return self._run_command(cmd)
104+
return self._run_command(cmd, env=env)
102105

103106
def _run_command(self, cmd: list[str], cwd: Path | None = None) -> bool:
107+
def _run_command(
108+
self, cmd: list[str], cwd: Optional[Path] = None, env: Optional[dict] = None
109+
) -> bool:
104110
"""Run a command and return True if successful."""
105111
self._print_header(f"Running: {' '.join(cmd)}")
106112
separator = 3 * ("=" * self._get_terminal_width() + "\n")
107113
print(separator)
108114

109115
try:
110116
result = subprocess.run(cmd, cwd=cwd or self.project_root, check=True)
117+
self._print_header(f"Running: {' '.join(cmd)}")
118+
separator = 3 * ("=" * self._get_terminal_width() + "\n")
119+
print(separator)
120+
# Merge environment variables if provided
121+
run_env = os.environ.copy()
122+
if env:
123+
run_env.update(env)
124+
result = subprocess.run(
125+
cmd, cwd=cwd or self.project_root, env=run_env, check=True
126+
)
127+
print(separator)
128+
return result.returncode == 0
111129
except subprocess.CalledProcessError as e:
112130
self._print_error(f"Command failed with exit code {e.returncode}")
113131
return False
@@ -200,15 +218,30 @@ def cmd_test(
200218
sys.executable,
201219
"-m",
202220
"pytest",
203-
"-r",
204-
"sa",
205-
"--mpl",
206-
"--mpl-results-path=pytest_results",
207221
]
208222

209-
# Only add -n flag if using parallelism (jobs > 1)
223+
# Handle parallel execution - now works normally without conflicts
210224
if jobs > 1:
211-
cmd.extend(["-n", str(jobs)])
225+
cmd.extend(
226+
[
227+
"-r",
228+
"sa",
229+
"--mpl",
230+
"--mpl-results-path=pytest_results",
231+
"-n",
232+
str(jobs),
233+
]
234+
)
235+
else:
236+
# Non-parallel execution - use normal config
237+
cmd.extend(
238+
[
239+
"-r",
240+
"sa",
241+
"--mpl",
242+
"--mpl-results-path=pytest_results",
243+
]
244+
)
212245

213246
if filter_pattern:
214247
cmd.extend(["-k", filter_pattern])
@@ -1062,7 +1095,7 @@ def make_menu_item(title, command_text):
10621095
(
10631096
make_menu_item(
10641097
"🧪 Run pytest",
1065-
"python -m pytest -r sa --mpl --mpl-results-path=pytest_results",
1098+
f"python -m pytest -r sa --mpl --mpl-results-path=pytest_results{' -n ' + str(self.default_jobs) if self.default_jobs > 1 else ''}",
10661099
),
10671100
"test",
10681101
),

tests/baseline/test_issue_594.png

34.8 KB
Loading

tests/from_issues/test_issue594.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import hist
2+
import numpy as np
3+
import pytest
4+
5+
import mplhep as hep
6+
7+
8+
@pytest.mark.mpl_image_compare(remove_text=False)
9+
def test_issue_594():
10+
np.random.seed(42) # Set seed for reproducible results
11+
h1 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
12+
h1.fill(np.random.normal(5, 2, 1000))
13+
h2 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
14+
h2.fill(np.random.normal(5, 2, 1000))
15+
16+
fig, ax, rax = hep.comparison_plotters.data_model(
17+
h1, unstacked_components=[h2], flow="show"
18+
)
19+
return fig

0 commit comments

Comments
 (0)