Skip to content

Commit fa3dd27

Browse files
committed
fix: wip
1 parent 82c9c4e commit fa3dd27

File tree

6 files changed

+153
-13
lines changed

6 files changed

+153
-13
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -127,3 +127,5 @@ venv.bak/
127127
/docs/source/_generated/
128128
/tmp/
129129
/docs/source/_static/_generated/
130+
# pixi environments
131+
.pixi

CONTRIBUTING.md

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,39 @@ Please open an issue.
99

1010
## Installing the development environment
1111

12+
### Using pixi (recommended)
13+
14+
[Pixi](https://prefix.dev/docs/pixi/overview) provides a fast, reproducible development environment:
15+
16+
```bash
17+
```bash
18+
```bash
19+
# Install the default environment (includes all development tools)
20+
pixi install
21+
22+
# Activate a shell with all tools
23+
pixi shell
24+
25+
# Or run commands directly
26+
pixi run test-basic
27+
```
28+
29+
Available pixi tasks (now just `pixi run <task>`):
30+
- `pixi run test` - Run full test suite with visual comparison
31+
- `pixi run test-parallel` - Run tests in parallel (auto-detects optimal worker count)
32+
- `pixi run test-basic` - Run basic tests without visual comparison
33+
- `pixi run generate-baseline` - Generate new baseline images
34+
- `pixi run lint` - Check code with ruff
35+
- `pixi run format` - Format code with ruff
36+
- `pixi run format-check` - Check formatting without changes
37+
38+
Benchmarking tasks (requires benchmark environment):
39+
- `pixi run -e benchmark benchmark` - Run benchmark tests
40+
- `pixi run -e benchmark benchmark-run` - Run and save benchmark results
41+
- `pixi run -e benchmark benchmark-compare` - Compare benchmark results
42+
43+
**Note:** For development work, use the default environment (`pixi install`) which includes all tools. The specialized environments (`dev`, `test`, `docs`, `benchmark`) are available for specific use cases.
44+
1245
```
1346
python -m pip install --upgrade --editable ".[all]"
1447
```

pyproject.toml

Lines changed: 61 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,14 +59,16 @@ test = [
5959
"pytest-mock",
6060
"pytest-mpl",
6161
"pytest-xdist",
62-
"pytest-benchmark",
6362
"pytest>=6.0",
6463
"scikit-hep-testdata",
6564
"scipy>=1.1.0",
6665
"uproot",
6766
"uproot4",
6867
"seaborn",
6968
]
69+
benchmark = [
70+
"pytest-benchmark",
71+
]
7072
all = [
7173
"mplhep[dev,test,docs]",
7274
]
@@ -186,3 +188,61 @@ ignore = [
186188
"docs/conf.py" = ["T20"]
187189
"src/mplhep/_dev.py" = ["T20"]
188190
"test_*.py" = ["T20"]
191+
192+
[tool.pixi.workspace]
193+
channels = ["conda-forge"]
194+
platforms = ["linux-64"]
195+
196+
[tool.pixi.environments]
197+
default = { features = ["dev", "test", "docs"], solve-group = "default" }
198+
dev = { features = ["dev", "test"], solve-group = "default" }
199+
docs = { features = ["docs"], solve-group = "default" }
200+
test = { features = ["test"], solve-group = "default" }
201+
benchmark = { features = ["test", "benchmark"], solve-group = "default" }
202+
203+
[tool.pixi.tasks]
204+
205+
[tool.pixi.feature.test.tasks]
206+
test = "pytest -r sa --mpl --mpl-results-path=pytest_results"
207+
test-parallel = "pytest -r sa --mpl --mpl-results-path=pytest_results -n auto"
208+
test-basic = "pytest tests/"
209+
test-single = "pytest"
210+
generate-baseline = "pytest -r sa --mpl --mpl-generate-path=tests/baseline"
211+
212+
[tool.pixi.feature.benchmark.tasks]
213+
benchmark = "pytest --benchmark-only --benchmark-sort=mean"
214+
benchmark-run = "pytest --benchmark-only --benchmark-sort=mean --benchmark-save=latest"
215+
benchmark-compare = "pytest --benchmark-only --benchmark-compare=latest"
216+
217+
[tool.pixi.feature.dev.tasks]
218+
lint = "ruff check src/"
219+
format = "ruff format src/"
220+
format-check = "ruff format --check src/"
221+
222+
[tool.pixi.dependencies]
223+
python = ">=3.9,<3.14"
224+
pytest = ">=9.0.1,<10"
225+
matplotlib = ">=3.4"
226+
numpy = ">=1.16.0"
227+
packaging = "*"
228+
229+
[tool.pixi.pypi-dependencies]
230+
mplhep = { path = ".", editable = true }
231+
232+
[tool.pixi.feature.test.dependencies]
233+
pytest-mock = "*"
234+
pytest-mpl = "*"
235+
pytest-xdist = "*"
236+
scipy = ">=1.1.0"
237+
seaborn = "*"
238+
239+
[tool.pixi.feature.benchmark.dependencies]
240+
pytest-benchmark = "*"
241+
242+
[tool.pixi.feature.dev.dependencies]
243+
jupyter = "*"
244+
twine = "*"
245+
ruff = "*"
246+
pre-commit = "*"
247+
questionary = "*"
248+
mkdocs = ">=1.6"

src/mplhep/_dev.py

Lines changed: 38 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -77,11 +77,14 @@ def _get_terminal_width(self) -> int:
7777
return 80
7878

7979
def _run_command_with_confirmation(
80-
self, cmd: list[str], prompt: str = "Confirm command (editable):"
80+
self,
81+
cmd: list[str],
82+
prompt: str = "Confirm command (editable):",
83+
env: Optional[dict] = None,
8184
) -> bool:
8285
"""Run a command with user confirmation and editing capability."""
8386
if not HAS_QUESTIONARY or questionary is None:
84-
return self._run_command(cmd)
87+
return self._run_command(cmd, env=env)
8588

8689
modified_cmd_str = questionary.text(
8790
prompt, default=" ".join(cmd), style=self.style
@@ -97,15 +100,23 @@ def _run_command_with_confirmation(
97100
self._print_error(f"Invalid command syntax: {e}")
98101
return False
99102

100-
return self._run_command(cmd)
103+
return self._run_command(cmd, env=env)
101104

102-
def _run_command(self, cmd: list[str], cwd: Optional[Path] = None) -> bool:
105+
def _run_command(
106+
self, cmd: list[str], cwd: Optional[Path] = None, env: Optional[dict] = None
107+
) -> bool:
103108
"""Run a command and return True if successful."""
104109
try:
105110
self._print_header(f"Running: {' '.join(cmd)}")
106111
separator = 3 * ("=" * self._get_terminal_width() + "\n")
107112
print(separator)
108-
result = subprocess.run(cmd, cwd=cwd or self.project_root, check=True)
113+
# Merge environment variables if provided
114+
run_env = os.environ.copy()
115+
if env:
116+
run_env.update(env)
117+
result = subprocess.run(
118+
cmd, cwd=cwd or self.project_root, env=run_env, check=True
119+
)
109120
print(separator)
110121
return result.returncode == 0
111122
except subprocess.CalledProcessError as e:
@@ -198,15 +209,30 @@ def cmd_test(
198209
sys.executable,
199210
"-m",
200211
"pytest",
201-
"-r",
202-
"sa",
203-
"--mpl",
204-
"--mpl-results-path=pytest_results",
205212
]
206213

207-
# Only add -n flag if using parallelism (jobs > 1)
214+
# Handle parallel execution - now works normally without conflicts
208215
if jobs > 1:
209-
cmd.extend(["-n", str(jobs)])
216+
cmd.extend(
217+
[
218+
"-r",
219+
"sa",
220+
"--mpl",
221+
"--mpl-results-path=pytest_results",
222+
"-n",
223+
str(jobs),
224+
]
225+
)
226+
else:
227+
# Non-parallel execution - use normal config
228+
cmd.extend(
229+
[
230+
"-r",
231+
"sa",
232+
"--mpl",
233+
"--mpl-results-path=pytest_results",
234+
]
235+
)
210236

211237
if filter_pattern:
212238
cmd.extend(["-k", filter_pattern])
@@ -1056,7 +1082,7 @@ def make_menu_item(title, command_text):
10561082
(
10571083
make_menu_item(
10581084
"🧪 Run pytest",
1059-
"python -m pytest -r sa --mpl --mpl-results-path=pytest_results",
1085+
f"python -m pytest -r sa --mpl --mpl-results-path=pytest_results{' -n ' + str(self.default_jobs) if self.default_jobs > 1 else ''}",
10601086
),
10611087
"test",
10621088
),

tests/baseline/test_issue_594.png

34.8 KB
Loading

tests/from_issues/test_issue594.py

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
import hist
2+
import numpy as np
3+
import pytest
4+
5+
import mplhep as hep
6+
7+
8+
@pytest.mark.mpl_image_compare(remove_text=False)
9+
def test_issue_594():
10+
np.random.seed(42) # Set seed for reproducible results
11+
h1 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
12+
h1.fill(np.random.normal(5, 2, 1000))
13+
h2 = hist.Hist(hist.axis.Regular(10, 0, 10, underflow=True, overflow=True))
14+
h2.fill(np.random.normal(5, 2, 1000))
15+
16+
fig, ax, rax = hep.comparison_plotters.data_model(
17+
h1, unstacked_components=[h2], flow="show"
18+
)
19+
return fig

0 commit comments

Comments
 (0)