Skip to content

Commit 4a276bd

Browse files
committed
[GR-52848] Make numpy 1.26.4 the default for pip and benchmarks
PullRequest: graalpython/3272
2 parents aee7793 + 331827e commit 4a276bd

File tree

3 files changed

+59
-59
lines changed

3 files changed

+59
-59
lines changed

ci.jsonnet

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1 +1 @@
1-
{ "overlay": "330d573abdbcfb0a301dade2b57c1626868a2433" }
1+
{ "overlay": "65b6442a7de19c1ec883b3aec8ddf4554dce0e99" }

graalpython/lib-graalpython/patches/numpy/metadata.toml

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,8 +8,6 @@ ignore-rule-on-llvm = true
88
version = '== 1.23.5'
99
patch = 'numpy-1.23.5.patch'
1010
dist-type = 'sdist'
11-
# Keep this version as default for now
12-
install-priority = 2
1311

1412
[[rules]]
1513
# 1.23.2 is currently used by oldest-supported-numpy

mx.graalpython/mx_graalpython_python_benchmarks.py

Lines changed: 58 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,30 @@
7272
# "bench_ufunc",
7373
]
7474

75+
SKIPPED_NUMPY_BENCHMARKS = [
76+
"bench_core.CountNonzero.time_count_nonzero(3, 1000000, <class 'str'>)", # Times out
77+
"bench_core.CountNonzero.time_count_nonzero_axis(3, 1000000, <class 'str'>)", # Times out
78+
"bench_core.CountNonzero.time_count_nonzero_multi_axis(3, 1000000, <class 'str'>)", # Times out
79+
"bench_linalg.LinalgSmallArrays.time_det_small_array", # TODO fails with numpy.linalg.LinAlgError
80+
]
81+
7582
DEFAULT_PANDAS_BENCHMARKS = [
7683
"reshape",
7784
"replace"
7885
]
7986

87+
SKIPPED_PANDAS_BENCHMARKS = [
88+
"replace.ReplaceDict.time_replace_series", # Times out
89+
"replace.ReplaceList.time_replace_list", # OOM, WIP msimacek
90+
"replace.ReplaceList.time_replace_list_one_match", # OOM, WIP msimacek
91+
"reshape.Crosstab.time_crosstab_normalize_margins", # Times out
92+
"reshape.Cut.peakmem_cut_interval", # Times out
93+
"reshape.Cut.time_cut_interval", # Times out
94+
"reshape.GetDummies.time_get_dummies_1d_sparse", # Times out
95+
"reshape.PivotTable.time_pivot_table_margins", # Times out
96+
"reshape.WideToLong.time_wide_to_long_big", # Times out
97+
]
98+
8099
DEFAULT_PYPERFORMANCE_BENCHMARKS = [
81100
# "2to3",
82101
# "chameleon",
@@ -173,6 +192,14 @@
173192
]
174193

175194

195+
def create_asv_benchmark_selection(benchmarks, skipped=()):
196+
regex = '|'.join(benchmarks)
197+
if not skipped:
198+
return regex
199+
negative_lookaheads = [re.escape(skip) + (r'\b' if not skip.endswith(')') else '') for skip in skipped]
200+
return '^(?!' + '|'.join(negative_lookaheads) + ')(' + regex + ')'
201+
202+
176203
class PyPerfJsonRule(mx_benchmark.Rule):
177204
"""Parses a JSON file produced by PyPerf and creates a measurement result."""
178205

@@ -656,22 +683,18 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
656683

657684

658685
class NumPySuite(PySuite):
659-
VERSION = "v1.23.5"
660-
661-
PREREQUISITES = """
662-
setuptools==63.1.0
663-
wheel==0.37.1
664-
"""
665-
666-
BENCHMARK_REQ = f"""
667-
asv==0.5.1
668-
distlib==0.3.6
669-
filelock==3.8.0
670-
platformdirs==2.5.2
671-
six==1.16.0
672-
virtualenv==20.16.3
673-
numpy=={VERSION}
674-
"""
686+
VERSION = "v1.26.4"
687+
688+
BENCHMARK_REQ = [
689+
"asv==0.5.1",
690+
"distlib==0.3.6",
691+
"filelock==3.8.0",
692+
"platformdirs==2.5.2",
693+
"six==1.16.0",
694+
"virtualenv==20.16.3",
695+
"packaging==24.0",
696+
f"numpy=={VERSION}",
697+
]
675698

676699
def name(self):
677700
return "numpy-suite"
@@ -730,21 +753,13 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
730753

731754
vm.run(workdir, ["-m", "venv", join(workdir, vm_venv)])
732755
pip = join(workdir, vm_venv, "bin", "pip")
733-
requirements_txt = join(workdir, "requirements.txt")
734-
with open(requirements_txt, "w") as f:
735-
f.write(self.PREREQUISITES)
736-
mx.run([pip, "install", "-r", requirements_txt], cwd=workdir)
737-
with open(requirements_txt, "w") as f:
738-
f.write(self.BENCHMARK_REQ)
739-
mx.run([pip, "install", "-r", requirements_txt], cwd=workdir)
756+
mx.run([pip, "install", *self.BENCHMARK_REQ], cwd=workdir)
740757
mx.run(
741758
[join(workdir, vm_venv, "bin", "asv"), "machine", "--yes"], cwd=benchdir
742759
)
743760

744-
if benchmarks:
745-
bms = ["-b", "|".join(benchmarks)]
746-
else:
747-
bms = ["-b", "|".join(DEFAULT_NUMPY_BENCHMARKS)]
761+
if not benchmarks:
762+
benchmarks = DEFAULT_NUMPY_BENCHMARKS
748763
retcode = mx.run(
749764
[
750765
join(workdir, vm_venv, "bin", "asv"),
@@ -755,7 +770,7 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
755770
"--python=same",
756771
"--set-commit-hash",
757772
self.VERSION,
758-
*bms,
773+
"-b", create_asv_benchmark_selection(benchmarks, skipped=SKIPPED_NUMPY_BENCHMARKS),
759774
],
760775
cwd=benchdir,
761776
nonZeroIsFatal=False,
@@ -775,22 +790,17 @@ class PandasSuite(PySuite):
775790
VERSION = "1.5.2"
776791
VERSION_TAG = "v" + VERSION
777792

778-
PREREQUISITES = """
779-
setuptools==63.1.0
780-
wheel==0.37.1
781-
"""
782-
783-
BENCHMARK_REQ = f"""
784-
asv==0.5.1
785-
distlib==0.3.6
786-
filelock==3.8.0
787-
platformdirs==2.5.2
788-
six==1.16.0
789-
virtualenv==20.16.3
790-
jinja2
791-
numpy==1.23.5
792-
pandas=={VERSION}
793-
"""
793+
BENCHMARK_REQ = [
794+
"asv==0.5.1",
795+
"distlib==0.3.6",
796+
"filelock==3.8.0",
797+
"platformdirs==2.5.2",
798+
"six==1.16.0",
799+
"virtualenv==20.16.3",
800+
"jinja2",
801+
f"numpy=={NumPySuite.VERSION}",
802+
f"pandas=={VERSION}",
803+
]
794804

795805
def name(self):
796806
return "pandas-suite"
@@ -870,21 +880,13 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
870880

871881
vm.run(workdir, ["-m", "venv", join(workdir, vm_venv)])
872882
pip = join(workdir, vm_venv, "bin", "pip")
873-
requirements_txt = join(workdir, "requirements.txt")
874-
with open(requirements_txt, "w") as f:
875-
f.write(self.PREREQUISITES)
876-
mx.run([pip, "install", "-r", requirements_txt], cwd=workdir)
877-
with open(requirements_txt, "w") as f:
878-
f.write(self.BENCHMARK_REQ)
879-
mx.run([pip, "install", "-r", requirements_txt], cwd=workdir)
883+
mx.run([pip, "install", *self.BENCHMARK_REQ], cwd=workdir)
880884
mx.run(
881885
[join(workdir, vm_venv, "bin", "asv"), "machine", "--yes"], cwd=benchdir
882886
)
883887

884-
if benchmarks:
885-
bms = ["-b", "|".join(benchmarks)]
886-
else:
887-
bms = ["-b", "|".join(DEFAULT_PANDAS_BENCHMARKS)]
888+
if not benchmarks:
889+
benchmarks = DEFAULT_PANDAS_BENCHMARKS
888890
retcode = mx.run(
889891
[
890892
join(workdir, vm_venv, "bin", "asv"),
@@ -895,7 +897,7 @@ def _vmRun(self, vm, workdir, command, benchmarks, bmSuiteArgs):
895897
"--python=same",
896898
"--set-commit-hash",
897899
self.VERSION_TAG,
898-
*bms,
900+
"-b", create_asv_benchmark_selection(benchmarks, skipped=SKIPPED_PANDAS_BENCHMARKS),
899901
],
900902
cwd=benchdir,
901903
nonZeroIsFatal=False,

0 commit comments

Comments
 (0)