From 1d796e20ec6731354149481c592d8f61402069da Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 19 Apr 2025 10:49:11 +0200 Subject: [PATCH 01/67] MNT: Document ignored ruff rules --- ruff.toml | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/ruff.toml b/ruff.toml index 56ac820ce23d..d01ff1b5dd21 100644 --- a/ruff.toml +++ b/ruff.toml @@ -28,19 +28,19 @@ extend-select = [ ignore = [ "F", # TODO: enable Pyflakes rules "PIE790", # Unnecessary `pass` statement - "E241", - "E251", - "E265", - "E266", - "E302", - "E402", + "E241", # Multiple spaces after comma + "E251", # Unexpected spaces around keyword / parameter equals + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file "E501", # TODO: Line too long - "E712", - "E721", - "E731", - "E741", - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + "UP015", # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] From 908e7e4b2854d96f3478cd85ebcc426ab833378e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 19 Apr 2025 11:20:05 +0200 Subject: [PATCH 02/67] STY: Partially apply ruff/pycodestyle rule E241 Multiple spaces after comma Do not apply this rule to tabular data, tables must remain aligned. --- benchmarks/benchmarks/bench_ufunc.py | 2 +- benchmarks/benchmarks/common.py | 4 +-- numpy/_core/overrides.py | 2 +- numpy/_core/tests/test_arrayprint.py | 2 +- numpy/_core/tests/test_indexing.py | 4 +-- numpy/_core/tests/test_regression.py | 4 +-- numpy/_core/tests/test_scalarmath.py | 4 +-- numpy/lib/tests/test_function_base.py | 36 ++++++++++---------- numpy/linalg/lapack_lite/clapack_scrub.py | 16 ++++----- numpy/linalg/tests/test_regression.py | 10 +++--- numpy/ma/tests/test_extras.py | 6 ++-- numpy/matrixlib/tests/test_defmatrix.py | 4 +-- numpy/random/tests/test_generator_mt19937.py | 2 +- numpy/testing/_private/utils.py | 2 +- numpy/tests/test_scripts.py | 2 +- tools/ci/push_docs_to_repo.py | 2 +- 16 files changed, 51 insertions(+), 51 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 926e04571402..ad1c2d3fad9f 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -432,7 +432,7 @@ def time_divide_scalar2_inplace(self, dtype): class CustomComparison(Benchmark): - params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, + params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64, np.bool) param_names = ['dtype'] diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index bee012d4ab26..064255e185eb 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -20,14 +20,14 @@ TYPES1 = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', ] DLPACK_TYPES = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', 'bool', ] diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index cb466408cd39..c73d8eb4c1c5 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -5,7 +5,7 @@ from .._utils import set_module from .._utils._inspect import getargspec from numpy._core._multiarray_umath import ( - add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) + add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) ARRAY_FUNCTIONS = set() diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 4b9da0ebb7c6..d31f29271e93 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -320,7 +320,7 @@ def test_structure_format_float(self): assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], dtype='u1').view('V8') assert_equal(repr(a[0]), r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index c9508bb03bdc..410a68b7efb4 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -219,7 +219,7 @@ def test_boolean_shape_mismatch(self): def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one - a = np.array([[0., 0., 0.]]) + a = np.array([[0., 0., 0.]]) b = np.array([True], dtype=bool) assert_equal(a[b], a) # boolean assignment @@ -643,7 +643,7 @@ def test_prepend_not_one(self): a = np.zeros(5) # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index 30d66bb3cbe2..b8b539946909 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -171,7 +171,7 @@ def test_endian_where(self): net[2] = 0.605202 max_net = net.max() test = np.where(net <= 0., max_net, net) - correct = np.array([0.60520202, 0.00458849, 0.60520202]) + correct = np.array([0.60520202, 0.00458849, 0.60520202]) assert_array_almost_equal(test, correct) def test_endian_recarray(self): @@ -2046,7 +2046,7 @@ def test_unique_stable(self): # get consistent results v = np.array(([0] * 5 + [1] * 6 + [2] * 6) * 4) res = np.unique(v, return_index=True) - tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) + tgt = (np.array([0, 1, 2]), np.array([0, 5, 11])) assert_equal(res, tgt) def test_unicode_alloc_dealloc_match(self): diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index e9cac03c7a9b..1842f5edd08b 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -171,11 +171,11 @@ def test_blocked(self): inp2[...] += np.arange(inp2.size, dtype=dt) + 1 assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) + np.multiply(inp2, inp2), err_msg=msg) # skip true divide for ints if dt != np.int32: assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) + np.divide(1, inp2), err_msg=msg) inp1[...] = np.ones_like(inp1) np.add(inp1, 2, out=out) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 653a0f068372..782068eac206 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -2413,7 +2413,7 @@ class TestCorrCoef: def test_non_array(self): assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), - [[1., -1.], [-1., 1.]]) + [[1., -1.], [-1., 1.]]) def test_simple(self): tgt1 = corrcoef(self.A) @@ -3401,10 +3401,10 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) - r1 = np.array([1.5, 5.5, 9.5]) + r1 = np.array([1.5, 5.5, 9.5]) assert_almost_equal(np.percentile(x, 50, axis=1), r1) assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) @@ -3422,11 +3422,11 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, method='lower'), 5.) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, method='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) - r1 = np.array([1., 5., 9.]) + r1 = np.array([1., 5., 9.]) c1 = np.percentile(x, 50, method='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) @@ -3496,18 +3496,18 @@ def test_percentile_out(self, percentile, with_weights): percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 ) assert_equal(out, r0) - r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) out = np.empty((2, 3)) assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) assert_equal(out, r1) # q.dim > 1, int - r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) - r1 = np.array([[0, 4, 8], [1, 5, 9]]) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) assert_equal(c, r1) @@ -3583,7 +3583,7 @@ def test_extended_axis(self): d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) - assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], np.percentile(d[:, :, :, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], np.percentile(d[:, :, 1, :].flatten(), [10, 90])) @@ -3888,7 +3888,7 @@ def test_q_zero_one(self, method): # gh-24710 arr = [10, 11, 12] quantile = np.quantile(arr, q = [0, 1], method=method) - assert_equal(quantile, np.array([10, 12])) + assert_equal(quantile, np.array([10, 12])) @pytest.mark.parametrize("method", quantile_methods) def test_quantile_monotonic(self, method): @@ -4217,7 +4217,7 @@ def test_basic(self): assert_equal(np.median(a0), 1) assert_allclose(np.median(a1), 0.5) assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) assert_equal(np.median(a2, axis=1), [1, 4]) assert_allclose(np.median(a2, axis=None), 2.5) @@ -4244,8 +4244,8 @@ def test_axis_keyword(self): np.median(a, axis=ax) assert_array_equal(a, orig) - assert_allclose(np.median(a3, axis=0), [3, 4]) - assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) assert_allclose(np.median(a3), 3.5) assert_allclose(np.median(a3, axis=None), 3.5) assert_allclose(np.median(a3.T), 3.5) @@ -4261,16 +4261,16 @@ def test_overwrite_keyword(self): assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), - [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) assert_allclose( - np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) - assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), - [3, 4]) + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) np.random.shuffle(a4.ravel()) diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 65231aed7998..1d903bd6409d 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -79,19 +79,19 @@ def endArgs(self, text): keep_ftnlen = (Str('ilaenv_') | Str('iparmq_') | Str('s_rnge')) + Str('(') lexicon = Lexicon([ - (iofunctions, TEXT), - (keep_ftnlen, beginArgs), + (iofunctions, TEXT), + (keep_ftnlen, beginArgs), State('args', [ (Str(')'), endArgs), (Str('('), beginArgs), (AnyChar, TEXT), ]), - (cS + Re(r'[1-9][0-9]*L'), IGNORE), - (cS + Str('ftnlen') + Opt(S + len_), IGNORE), - (cS + sep_seq(['(', 'ftnlen', ')'], S) + S + digits, IGNORE), - (Bol + Str('ftnlen ') + len_ + Str(';\n'), IGNORE), - (cS + len_, TEXT), - (AnyChar, TEXT), + (cS + Re(r'[1-9][0-9]*L'), IGNORE), + (cS + Str('ftnlen') + Opt(S + len_), IGNORE), + (cS + sep_seq(['(', 'ftnlen', ')'], S) + S + digits, IGNORE), + (Bol + Str('ftnlen ') + len_ + Str(';\n'), IGNORE), + (cS + len_, TEXT), + (AnyChar, TEXT), ]) def scrubFtnlen(source): diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 3949b0b9e66f..e8159fd570bf 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -40,9 +40,9 @@ def test_eigh_build(self): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] - cov = array([[77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals) @@ -64,8 +64,8 @@ def test_norm_vector_badarg(self): def test_lapack_endian(self): # For bug #1482 - a = array([[5.7998084, -2.1825367], - [-2.1825367, 9.85910595]], dtype='>f8') + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') b = array(a, dtype=' Date: Sat, 19 Apr 2025 11:35:02 +0200 Subject: [PATCH 03/67] STY: Partially apply ruff/pycodestyle rule E251 Unexpected spaces around keyword / parameter equals For now, do not modify large and consistent code blocks that use spaces on purpose, mostly when the default values are arrays. --- benchmarks/benchmarks/bench_ufunc.py | 2 +- numpy/_core/code_generators/generate_umath.py | 2 +- numpy/_core/tests/test_ufunc.py | 2 +- numpy/_core/tests/test_umath_accuracy.py | 2 +- numpy/f2py/auxfuncs.py | 2 +- numpy/f2py/tests/test_array_from_pyobj.py | 2 +- numpy/f2py/tests/test_regression.py | 2 +- numpy/lib/tests/test_function_base.py | 12 ++++++------ numpy/lib/tests/test_histograms.py | 2 +- numpy/ma/extras.pyi | 8 ++++---- numpy/ma/tests/test_core.py | 4 ++-- tools/download-wheels.py | 2 +- tools/swig/test/setup.py | 16 ++++++++-------- 13 files changed, 29 insertions(+), 29 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index ad1c2d3fad9f..4d9f3c9c8f61 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -342,7 +342,7 @@ def time_ufunc_small_array(self, ufuncname): self.f(self.array_5) def time_ufunc_small_array_inplace(self, ufuncname): - self.f(self.array_5, out = self.array_5) + self.f(self.array_5, out=self.array_5) def time_ufunc_small_int_array(self, ufuncname): self.f(self.array_int_3) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index e9b7989afd28..b80a0a52ba36 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1570,7 +1570,7 @@ def make_ufuncs(funcdict): typenum=f"NPY_{english_upper(chartoname[c])}", count=uf.nin + uf.nout, name=name, - funcname = f"{english_upper(chartoname[c])}_{name}_indexed", + funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 0dbef44300b1..26844fabd437 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2104,7 +2104,7 @@ def __rmul__(self, other): def test_array_wrap_array_priority(self): class ArrayPriorityBase(np.ndarray): @classmethod - def __array_wrap__(cls, array, context=None, return_scalar = False): + def __array_wrap__(cls, array, context=None, return_scalar=False): return cls class ArrayPriorityMinus0(ArrayPriorityBase): diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 0d3c99bf7d54..a0e0cbccc596 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -75,7 +75,7 @@ def test_validate_transcendentals(self): assert_array_max_ulp(npfunc(inval), outval, maxulperr) @pytest.mark.skipif(IS_AVX512FP16, - reason = "SVML FP16 have slightly higher ULP errors") + reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 1adc2d6228c4..3c1b4500793b 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -923,7 +923,7 @@ def getuseblocks(pymod): all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) return all_uses -def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): """ Update the Fortran-to-C type mapping dictionary with new mappings and return a list of successfully mapped C types. diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index e0e366fb94a2..9bdd91f47638 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -33,7 +33,7 @@ def setup_module(): src = [ get_testdir() / "wrapmodule.c", ] - wrap = util.build_meson(src, module_name = "test_array_from_pyobj_ext") + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") def flags_info(arr): diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 7917d2fb6b7b..bf994ffa07a5 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -53,7 +53,7 @@ def ubound(xl, xh): return xh - xl + 1 rval = self.module.foo(is_=xlow, ie_=xhigh, arr=xvec[:ubound(xlow, xhigh)]) - expval = np.arange(11, dtype = np.float32) + expval = np.arange(11, dtype=np.float32) assert np.allclose(rval, expval) diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 782068eac206..97fec7071ad5 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1962,7 +1962,7 @@ def f(x): def test_bad_input(self): with assert_raises(TypeError): - A = np.vectorize(pyfunc = 3) + A = np.vectorize(pyfunc=3) def test_no_keywords(self): with assert_raises(TypeError): @@ -3887,7 +3887,7 @@ def test_quantile_preserve_int_type(self, dtype): def test_q_zero_one(self, method): # gh-24710 arr = [10, 11, 12] - quantile = np.quantile(arr, q = [0, 1], method=method) + quantile = np.quantile(arr, q=[0, 1], method=method) assert_equal(quantile, np.array([10, 12])) @pytest.mark.parametrize("method", quantile_methods) @@ -4163,10 +4163,10 @@ class TestLerp: min_value=0, max_value=1), t1=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), - a = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300), - b = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300)) + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): l0 = nfb._lerp(a, b, t0) l1 = nfb._lerp(a, b, t1) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 49ec7c34456f..bfb0248ebdcf 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -588,7 +588,7 @@ def test_simple_range(self): x3 = np.linspace(-100, -50, testlen) x = np.hstack((x1, x2, x3)) for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator, range = (-20, 20)) + a, b = np.histogram(x, estimator, range=(-20, 20)) msg = f"For the {estimator} estimator" msg += f" with datasize of {testlen}" assert_equal(len(a), numbins, err_msg=msg) diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index ba76f3517526..c3f9fcde4a0a 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -56,7 +56,7 @@ __all__ = [ ] def count_masked(arr, axis=...): ... -def masked_all(shape, dtype = ...): ... +def masked_all(shape, dtype=...): ... def masked_all_like(arr): ... class _fromnxfunction: @@ -96,8 +96,8 @@ def compress_nd(x, axis=...): ... def compress_rowcols(x, axis=...): ... def compress_rows(a): ... def compress_cols(a): ... -def mask_rows(a, axis = ...): ... -def mask_cols(a, axis = ...): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... def ediff1d(arr, to_end=..., to_begin=...): ... def unique(ar1, return_index=..., return_inverse=...): ... def intersect1d(ar1, ar2, assume_unique=...): ... @@ -107,7 +107,7 @@ def isin(element, test_elements, assume_unique=..., invert=...): ... def union1d(ar1, ar2): ... def setdiff1d(ar1, ar2, assume_unique=...): ... def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... +def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... class MAxisConcatenator(AxisConcatenator): @staticmethod diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 280d94bc0fe8..f660123f394d 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -997,8 +997,8 @@ def test_mvoid_multidim_print(self): assert_(str(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") assert_(repr(t_2d[0]) == "([[1, --], [--, 4]], 1.0)") - t_ne = masked_array(data=[(1, (1, 1))], - mask=[(True, (True, False))], + t_ne = masked_array(data = [(1, (1, 1))], + mask = [(True, (True, False))], dtype = [('a', '/release/installers]") parser.add_argument( "-t", "--test", - action = 'store_true', + action='store_true', help="only list available wheels, do not download") args = parser.parse_args() diff --git a/tools/swig/test/setup.py b/tools/swig/test/setup.py index bc310043d82e..c925f358ec7b 100755 --- a/tools/swig/test/setup.py +++ b/tools/swig/test/setup.py @@ -46,16 +46,16 @@ ) _Fortran = Extension("_Fortran", - ["Fortran_wrap.cxx", - "Fortran.cxx"], - include_dirs = [numpy_include], - ) + ["Fortran_wrap.cxx", + "Fortran.cxx"], + include_dirs = [numpy_include], + ) _Flat = Extension("_Flat", - ["Flat_wrap.cxx", - "Flat.cxx"], - include_dirs = [numpy_include], - ) + ["Flat_wrap.cxx", + "Flat.cxx"], + include_dirs = [numpy_include], + ) # NumyTypemapTests setup setup(name = "NumpyTypemapTests", From ba5bf8def8796f664c83faa1f5abebcc5f5ce78a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 19 Apr 2025 12:06:18 +0200 Subject: [PATCH 04/67] STY: Partially apply ruff/pycodestyle rule E265 Block comment should start with `# ` For now, keep commented out code as is. --- numpy/_core/code_generators/generate_umath.py | 22 ++++++------ numpy/_core/records.py | 2 +- numpy/_core/tests/test_dtype.py | 2 +- numpy/_core/tests/test_einsum.py | 6 ++-- numpy/_core/tests/test_indexing.py | 2 +- numpy/_core/tests/test_multiarray.py | 4 +-- numpy/_core/tests/test_nditer.py | 2 +- numpy/_core/tests/test_numeric.py | 2 +- numpy/_core/tests/test_records.py | 12 +++---- numpy/_core/tests/test_regression.py | 12 +++---- numpy/_core/tests/test_scalarmath.py | 6 ++-- numpy/_core/tests/test_scalarprint.py | 2 +- numpy/_core/tests/test_shape_base.py | 4 +-- numpy/_core/tests/test_umath.py | 6 ++-- numpy/_core/tests/test_umath_complex.py | 4 +-- numpy/conftest.py | 2 +- numpy/lib/_function_base_impl.py | 4 +-- numpy/lib/tests/test_format.py | 6 ++-- numpy/lib/tests/test_function_base.py | 4 +-- numpy/lib/tests/test_index_tricks.py | 2 +- numpy/lib/tests/test_io.py | 2 +- numpy/linalg/tests/test_linalg.py | 2 +- numpy/ma/core.py | 2 +- numpy/ma/tests/test_extras.py | 2 +- numpy/ma/tests/test_mrecords.py | 4 +-- numpy/ma/tests/test_old_ma.py | 2 +- numpy/polynomial/tests/test_chebyshev.py | 34 +++++++++---------- numpy/polynomial/tests/test_hermite.py | 26 +++++++------- numpy/polynomial/tests/test_hermite_e.py | 26 +++++++------- numpy/polynomial/tests/test_laguerre.py | 26 +++++++------- numpy/polynomial/tests/test_legendre.py | 26 +++++++------- numpy/polynomial/tests/test_polynomial.py | 30 ++++++++-------- numpy/testing/_private/utils.py | 2 +- tools/swig/test/testSuperTensor.py | 4 +-- 34 files changed, 147 insertions(+), 147 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index b80a0a52ba36..35b5ad92ac82 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -259,16 +259,16 @@ def english_upper(s): return uppered -#each entry in defdict is a Ufunc object. +# each entry in defdict is a Ufunc object. -#name: [string of chars for which it is defined, -# string of characters using func interface, -# tuple of strings giving funcs for data, -# (in, out), or (instr, outstr) giving the signature as character codes, -# identity, -# docstring, -# output specification (optional) -# ] +# name: [string of chars for which it is defined, +# string of characters using func interface, +# tuple of strings giving funcs for data, +# (in, out), or (instr, outstr) giving the signature as character codes, +# identity, +# docstring, +# output specification (optional) +# ] chartoname = { '?': 'bool', @@ -396,7 +396,7 @@ def english_upper(s): TD(O, f='PyNumber_Multiply'), indexed=intfltcmplx ), -#'true_divide' : aliased to divide in umathmodule.c:initumath +# 'true_divide' : aliased to divide in umathmodule.c:initumath 'floor_divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy._core.umath.floor_divide'), @@ -1382,7 +1382,7 @@ def indent(st, spaces): } } -#for each name +# for each name # 1) create functions, data, and signature # 2) fill in functions and data in InitOperators # 3) add function. diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 09ac92f9c1f1..3e2d48d5f267 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -228,7 +228,7 @@ def __getattribute__(self, attr): try: dt = obj.dtype except AttributeError: - #happens if field is Object type + # happens if field is Object type return obj if dt.names is not None: return obj.view((self.__class__, obj.dtype)) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 5d458729d278..68698fc229fb 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -742,7 +742,7 @@ def test_shape_invalid(self): assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))]) def test_alignment(self): - #Check that subarrays are aligned + # Check that subarrays are aligned t1 = np.dtype('(1,)i4', align=True) t2 = np.dtype('2i4', align=True) assert_equal(t1.alignment, t2.alignment) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 5aa75b5a6b97..649e05c4476d 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -1240,7 +1240,7 @@ def test_path_type_input(self): assert_almost_equal(noopt, opt) def test_path_type_input_internal_trace(self): - #gh-20962 + # gh-20962 path_test = self.build_operands('cab,cdd->ab') exp_path = ['einsum_path', (1,), (0, 1)] @@ -1266,7 +1266,7 @@ def test_path_type_input_invalid(self): RuntimeError, np.einsum_path, *path_test, optimize=exp_path) def test_spaces(self): - #gh-10794 + # gh-10794 arr = np.array([[1]]) for sp in itertools.product(['', ' '], repeat=4): # no error for any spacing @@ -1279,7 +1279,7 @@ def test_overlap(): # sanity check c = np.einsum('ij,jk->ik', a, b) assert_equal(c, d) - #gh-10080, out overlaps one of the operands + # gh-10080, out overlaps one of the operands c = np.einsum('ij,jk->ik', a, b, out=b) assert_equal(c, d) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index 410a68b7efb4..b65533bbc5ef 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -492,7 +492,7 @@ def test_unaligned(self): x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) - #trivial + # trivial assert_equal(d[b], d) d[b] = x # nontrivial diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 6d97124d66c0..e8abf1aef01a 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2295,11 +2295,11 @@ def test_void_sort(self): arr[::-1].sort() def test_sort_raises(self): - #gh-9404 + # gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in self.sort_kinds: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + # gh-3879 class Raiser: def raises_anything(*args, **kwargs): diff --git a/numpy/_core/tests/test_nditer.py b/numpy/_core/tests/test_nditer.py index 5bf0a6b7b2f4..9f60b67ba5b1 100644 --- a/numpy/_core/tests/test_nditer.py +++ b/numpy/_core/tests/test_nditer.py @@ -1113,7 +1113,7 @@ def test_iter_object_arrays_conversions(): x[...] += 1 assert_equal(a, np.arange(6) + 1) - #Non-contiguous value array + # Non-contiguous value array a = np.zeros((6,), dtype=[('p', 'i1'), ('a', 'i4')]) a = a['a'] a[:] = np.arange(6) + 98172488 diff --git a/numpy/_core/tests/test_numeric.py b/numpy/_core/tests/test_numeric.py index d821c14c3bad..21dae72168de 100644 --- a/numpy/_core/tests/test_numeric.py +++ b/numpy/_core/tests/test_numeric.py @@ -4142,7 +4142,7 @@ def test_number_of_arguments(self): assert_equal(mit.numiter, j) def test_broadcast_error_kwargs(self): - #gh-13455 + # gh-13455 arrs = [np.empty((5, 6, 7))] mit = np.broadcast(*arrs) mit2 = np.broadcast(*arrs, **{}) # noqa: PIE804 diff --git a/numpy/_core/tests/test_records.py b/numpy/_core/tests/test_records.py index e8a757dc3d42..9e30887942ae 100644 --- a/numpy/_core/tests/test_records.py +++ b/numpy/_core/tests/test_records.py @@ -185,31 +185,31 @@ def test_recarray_views(self): dtype=[('foo', int), ('bar', 'S4')]) b = np.array([1, 2, 3, 4, 5], dtype=np.int64) - #check that np.rec.array gives right dtypes + # check that np.rec.array gives right dtypes assert_equal(np.rec.array(a).dtype.type, np.record) assert_equal(type(np.rec.array(a)), np.recarray) assert_equal(np.rec.array(b).dtype.type, np.int64) assert_equal(type(np.rec.array(b)), np.recarray) - #check that viewing as recarray does the same + # check that viewing as recarray does the same assert_equal(a.view(np.recarray).dtype.type, np.record) assert_equal(type(a.view(np.recarray)), np.recarray) assert_equal(b.view(np.recarray).dtype.type, np.int64) assert_equal(type(b.view(np.recarray)), np.recarray) - #check that view to non-structured dtype preserves type=np.recarray + # check that view to non-structured dtype preserves type=np.recarray r = np.rec.array(np.ones(4, dtype="f4,i4")) rv = r.view('f8').view('f4,i4') assert_equal(type(rv), np.recarray) assert_equal(rv.dtype.type, np.record) - #check that getitem also preserves np.recarray and np.record + # check that getitem also preserves np.recarray and np.record r = np.rec.array(np.ones(4, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'i4,i4')])) assert_equal(r['c'].dtype.type, np.record) assert_equal(type(r['c']), np.recarray) - #and that it preserves subclasses (gh-6949) + # and that it preserves subclasses (gh-6949) class C(np.recarray): pass @@ -233,7 +233,7 @@ class C(np.recarray): assert_equal(r.view('V8').dtype.type, np.void) assert_equal(r.view(('i8', 'i4,i4')).dtype.type, np.int64) - #check that we can undo the view + # check that we can undo the view arrs = [np.ones(4, dtype='f4,i4'), np.ones(4, dtype='f8')] for arr in arrs: rec = np.rec.array(arr) diff --git a/numpy/_core/tests/test_regression.py b/numpy/_core/tests/test_regression.py index b8b539946909..8aca446b3920 100644 --- a/numpy/_core/tests/test_regression.py +++ b/numpy/_core/tests/test_regression.py @@ -1243,18 +1243,18 @@ def test_void_scalar_with_titles(self): assert_(arr[0][1] == 4) def test_void_scalar_constructor(self): - #Issue #1550 + # Issue #1550 - #Create test string data, construct void scalar from data and assert - #that void scalar contains original data. + # Create test string data, construct void scalar from data and assert + # that void scalar contains original data. test_string = np.array("test") test_string_void_scalar = np._core.multiarray.scalar( np.dtype(("V", test_string.dtype.itemsize)), test_string.tobytes()) assert_(test_string_void_scalar.view(test_string.dtype) == test_string) - #Create record scalar, construct from data and assert that - #reconstructed scalar is correct. + # Create record scalar, construct from data and assert that + # reconstructed scalar is correct. test_record = np.ones((), "i,i") test_record_void_scalar = np._core.multiarray.scalar( test_record.dtype, test_record.tobytes()) @@ -2456,7 +2456,7 @@ def __len__(self): @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python') def test_to_ctypes(self): - #gh-14214 + # gh-14214 arr = np.zeros((2 ** 31 + 1,), 'b') assert arr.size * arr.itemsize > 2 ** 31 c_arr = np.ctypeslib.as_ctypes(arr) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 1842f5edd08b..0b086df21c60 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -562,13 +562,13 @@ def test_numpy_scalar_relational_operators(self): assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], f"type {dt1} and {dt2} failed") - #Unsigned integers + # Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") - #unsigned vs signed + # unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], f"type {dt1} and {dt2} failed") @@ -577,7 +577,7 @@ def test_numpy_scalar_relational_operators(self): assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], f"type {dt1} and {dt2} failed") - #Signed integers and floats + # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") diff --git a/numpy/_core/tests/test_scalarprint.py b/numpy/_core/tests/test_scalarprint.py index 16a9267e235c..298eb232eafb 100644 --- a/numpy/_core/tests/test_scalarprint.py +++ b/numpy/_core/tests/test_scalarprint.py @@ -303,7 +303,7 @@ def test_dragon4_positional_interface_overflow(self, tp, pad_val): fpos = np.format_float_positional - #gh-28068 + # gh-28068 with pytest.raises(RuntimeError, match="Float formatting result too large"): fpos(tp('1.047'), unique=False, precision=pad_val) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 58a36e8c022f..9e4ef3a8e6e9 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -477,13 +477,13 @@ def test_stack(): with pytest.raises(TypeError, match="arrays to stack must be"): stack(x for x in range(3)) - #casting and dtype test + # casting and dtype test a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) expected_res = np.array([[1, 2], [2, 3], [3, 4]]) assert_array_equal(res, expected_res) - #casting and dtype with TypeError + # casting and dtype with TypeError with assert_raises(TypeError): stack((a, b), dtype=np.int64, axis=1, casting="safe") diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 0efa51bfd772..22ad1b8ac302 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1167,14 +1167,14 @@ def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) - #Complex powers with positive real part will not generate a warning + # Complex powers with positive real part will not generate a warning assert_complex_equal(np.power(zero, 1 + 4j), zero) assert_complex_equal(np.power(zero, 2 - 3j), zero) - #Testing zero values when real part is greater than zero + # Testing zero values when real part is greater than zero assert_complex_equal(np.power(zero, 1 + 1j), zero) assert_complex_equal(np.power(zero, 1 + 0j), zero) assert_complex_equal(np.power(zero, 1 - 1j), zero) - #Complex powers will negative real part or 0 (provided imaginary + # Complex powers will negative real part or 0 (provided imaginary # part is not zero) will generate a NAN and hence a RUNTIME warning with pytest.warns(expected_warning=RuntimeWarning) as r: assert_complex_equal(np.power(zero, -1 + 1j), cnan) diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 81c1447c4cbb..eb221f15f327 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -16,7 +16,7 @@ # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 compatibility +# FIXME: this will probably change when we require full C99 compatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) @@ -333,7 +333,7 @@ def test_special_values(self): def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. + # FIXME: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) diff --git a/numpy/conftest.py b/numpy/conftest.py index 9ae2b290ee71..84d856e55684 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -102,7 +102,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): tr.line("code that re-enables the GIL should do so in a subprocess.") pytest.exit("GIL re-enabled during tests", returncode=1) -#FIXME when yield tests are gone. +# FIXME when yield tests are gone. @pytest.hookimpl() def pytest_itemcollected(item): """ diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 33bc64511009..e44b27a68adb 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2434,8 +2434,8 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, cache=False, signature=None): if (pyfunc != np._NoValue) and (not callable(pyfunc)): - #Splitting the error message to keep - #the length below 79 characters. + # Splitting the error message to keep + # the length below 79 characters. part1 = "When used as a decorator, " part2 = "only accepts keyword arguments." raise TypeError(part1 + part2) diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index dd6f4fc9c765..cf076ac01eb5 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -396,7 +396,7 @@ ] -#BytesIO that reads a random number of bytes at a time +# BytesIO that reads a random number of bytes at a time class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random @@ -423,7 +423,7 @@ def roundtrip_randsize(arr): def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) - #BytesIO is one byte short + # BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2 @@ -456,7 +456,7 @@ def test_file_truncated(tmp_path): if arr.dtype != object: with open(path, 'wb') as f: format.write_array(f, arr) - #truncate the file by one byte + # truncate the file by one byte with open(path, 'rb+') as f: f.seek(-1, os.SEEK_END) f.truncate() diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 97fec7071ad5..7329287721c4 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1915,7 +1915,7 @@ class subclass(np.ndarray): assert_equal(r, m * v) def test_name(self): - #See gh-23021 + # gh-23021 @np.vectorize def f2(a, b): return a + b @@ -3857,7 +3857,7 @@ def test_fraction(self): assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) def test_complex(self): - #See gh-22652 + # gh-22652 arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.quantile, arr_c, 0.5) arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index bf249100d17b..d17bd9e6259b 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -151,7 +151,7 @@ def test_clipmodes(self): ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) def test_writeability(self): - # See gh-7269 + # gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) assert_(x.flags.writeable) assert_(y.flags.writeable) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 3276584779a7..6939e5ceffac 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2359,7 +2359,7 @@ def test_recfromcsv(self): assert_(isinstance(test, np.recarray)) assert_equal(test, control) - #gh-10394 + # gh-10394 data = TextIO('color\n"red"\n"blue"') test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 1a79629814e9..b47bb180a486 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1034,7 +1034,7 @@ class TestMatrixPower: rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] noninv = array([[1, 0], [0, 0]]) stacked = np.block([[[rshft_0]]] * 2) - #FIXME the 'e' dtype might work in future + # FIXME the 'e' dtype might work in future dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] def test_large_power(self, dt): diff --git a/numpy/ma/core.py b/numpy/ma/core.py index 3e291efa6032..add0ad9770d7 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -5183,7 +5183,7 @@ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None): """ (this docstring should be overwritten) """ - #!!!: implement out + test! + # !!!: implement out + test! m = self._mask if m is nomask: result = super().trace(offset=offset, axis1=axis1, axis2=axis2, diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py index c2aa0c29a556..d4eeec59723e 100644 --- a/numpy/ma/tests/test_extras.py +++ b/numpy/ma/tests/test_extras.py @@ -1717,7 +1717,7 @@ def test_isin(self): c = isin(a, b) assert_(isinstance(c, MaskedArray)) assert_array_equal(c, ec) - #compare results of np.isin to ma.isin + # compare results of np.isin to ma.isin d = np.isin(a, b[~b.mask]) & ~a.mask assert_array_equal(c, d) diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py index ff463fc4d912..b73d32796772 100644 --- a/numpy/ma/tests/test_mrecords.py +++ b/numpy/ma/tests/test_mrecords.py @@ -411,14 +411,14 @@ def test_fromarrays(self): def test_fromrecords(self): # Test construction from records. (mrec, nrec, ddtype) = self.data - #...... + # ...... palist = [(1, 'abc', 3.7000002861022949, 0), (2, 'xy', 6.6999998092651367, 1), (0, ' ', 0.40000000596046448, 0)] pa = recfromrecords(palist, names='c1, c2, c3, c4') mpa = fromrecords(palist, names='c1, c2, c3, c4') assert_equal_records(pa, mpa) - #..... + # ..... _mrec = fromrecords(nrec) assert_equal(_mrec.dtype, mrec.dtype) for field in _mrec.dtype.names: diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py index 958601047109..e21dd39768e1 100644 --- a/numpy/ma/tests/test_old_ma.py +++ b/numpy/ma/tests/test_old_ma.py @@ -651,7 +651,7 @@ def test_testToPython(self): def test_testScalarArithmetic(self): xm = array(0, mask=1) - #TODO FIXME: Find out what the following raises a warning in r8247 + # TODO FIXME: Find out what the following raises a warning in r8247 with np.errstate(divide='ignore'): assert_((1 / array(0)).mask) assert_((1 + xm).mask) diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py index 9d892a1f61d5..7733ded90412 100644 --- a/numpy/polynomial/tests/test_chebyshev.py +++ b/numpy/polynomial/tests/test_chebyshev.py @@ -133,10 +133,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_chebval(self): - #check empty input + # check empty input assert_equal(cheb.chebval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Tlist] for i in range(10): @@ -145,7 +145,7 @@ def test_chebval(self): res = cheb.chebval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -157,15 +157,15 @@ def test_chebval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = cheb.chebval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -174,15 +174,15 @@ def test_chebval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = cheb.chebval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -191,12 +191,12 @@ def test_chebgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = cheb.chebgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -205,12 +205,12 @@ def test_chebgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = cheb.chebgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) @@ -590,11 +590,11 @@ def test_weight(self): assert_almost_equal(res, tgt) def test_chebpts1(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts1, 1.5) assert_raises(ValueError, cheb.chebpts1, 0) - #test points + # test points tgt = [0] assert_almost_equal(cheb.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] @@ -605,11 +605,11 @@ def test_chebpts1(self): assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts2, 1.5) assert_raises(ValueError, cheb.chebpts2, 1) - #test points + # test points tgt = [-1, 1] assert_almost_equal(cheb.chebpts2(2), tgt) tgt = [-1, 0, 1] diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 3e2b7c0032c6..2f17091137b9 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -120,10 +120,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_hermval(self): - #check empty input + # check empty input assert_equal(herm.hermval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10): @@ -132,7 +132,7 @@ def test_hermval(self): res = herm.hermval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -144,15 +144,15 @@ def test_hermval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = herm.hermval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +161,15 @@ def test_hermval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = herm.hermval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,12 +178,12 @@ def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herm.hermgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -192,12 +192,12 @@ def test_hermgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herm.hermgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index bd567d513027..ce55e2098b97 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -120,10 +120,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_hermeval(self): - #check empty input + # check empty input assert_equal(herme.hermeval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): @@ -132,7 +132,7 @@ def test_hermeval(self): res = herme.hermeval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -144,15 +144,15 @@ def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +161,15 @@ def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,12 +178,12 @@ def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -192,12 +192,12 @@ def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index f19c4d2fc2aa..1dd1977de684 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -117,10 +117,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_lagval(self): - #check empty input + # check empty input assert_equal(lag.lagval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7): @@ -129,7 +129,7 @@ def test_lagval(self): res = lag.lagval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -141,15 +141,15 @@ def test_lagval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = lag.lagval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -158,15 +158,15 @@ def test_lagval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = lag.lagval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -175,12 +175,12 @@ def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = lag.laggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -189,12 +189,12 @@ def test_laggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = lag.laggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 80b428e31dcc..ee23b4a2527f 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -121,10 +121,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_legval(self): - #check empty input + # check empty input assert_equal(leg.legval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): @@ -133,7 +133,7 @@ def test_legval(self): res = leg.legval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -145,15 +145,15 @@ def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -162,15 +162,15 @@ def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -179,12 +179,12 @@ def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -193,12 +193,12 @@ def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 177bd0893ec9..e7c4fdfe8996 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -155,10 +155,10 @@ class TestEvaluation: y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): - #check empty input + # check empty input assert_equal(poly.polyval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): @@ -169,7 +169,7 @@ def test_polyval(self): res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -177,13 +177,13 @@ def test_polyval(self): assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - #check masked arrays are processed correctly + # check masked arrays are processed correctly mask = [False, True, False] mx = np.ma.array([1, 2, 3], mask=mask) res = np.polyval([7, 5, 3], mx) assert_array_equal(res.mask, mask) - #check subtypes of ndarray are preserved + # check subtypes of ndarray are preserved class C(np.ndarray): pass @@ -258,16 +258,16 @@ def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -276,16 +276,16 @@ def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -294,12 +294,12 @@ def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -308,12 +308,12 @@ def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 4bc10e2f58f6..5cbb5130dc1f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -136,7 +136,7 @@ def GetPerformanceAttributes(object, counter, instance=None, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - #(dead link) + # (dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index 11cbc76f2642..5f185884641e 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -29,8 +29,8 @@ def testNorm(self): norm = SuperTensor.__dict__[self.typeStr + "Norm"] supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2)) - #Note: cludge to get an answer of the same type as supertensor. - #Answer is simply sqrt(sum(supertensor*supertensor)/16) + # Note: cludge to get an answer of the same type as supertensor. + # Answer is simply sqrt(sum(supertensor*supertensor)/16) answer = np.array([np.sqrt(np.sum(supertensor.astype('d') * supertensor) / 16.)], dtype=self.typeCode)[0] # noqa: E501 self.assertAlmostEqual(norm(supertensor), answer, 6) From fe417f5bd5743f325641c761c504145146163de2 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Sat, 19 Apr 2025 12:09:32 +0200 Subject: [PATCH 05/67] STY: Partially apply ruff:pycodestyle rule E266 Too many leading `#` before block comment For now, do not modify titles. --- numpy/_core/tests/test_api.py | 4 ++-- numpy/ctypeslib/_ctypeslib.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index f0200d59cafe..8d7c617898e6 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -82,8 +82,8 @@ def test_array_array(): dtype=[('f0', int), ('f1', float), ('f2', str)]) o = type("o", (object,), {"__array_struct__": a.__array_struct__}) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get a array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) # test array diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index bd5632702d8c..40b9e58b5912 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -158,9 +158,9 @@ def load_library(libname, loader_path): try: return ctypes.cdll[libpath] except OSError: - ## defective lib file + # defective lib file raise - ## if no successful return in the libname_ext loop: + # if no successful return in the libname_ext loop: raise OSError("no file with expected extension") From ff0ad59f2f152e4848b5e7214b18ede02a597194 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 22 Apr 2025 10:17:43 +0200 Subject: [PATCH 06/67] STY: Take into account reviewer's comments Co-authored-by: Joren Hammudoglu --- doc/source/f2py/code/setup_example.py | 18 +++++----- numpy/_core/tests/test_arrayprint.py | 2 +- numpy/_core/tests/test_cpu_features.py | 2 +- numpy/_core/tests/test_datetime.py | 2 +- numpy/_core/tests/test_einsum.py | 2 +- numpy/_core/tests/test_multiarray.py | 3 +- numpy/lib/tests/test_format.py | 2 +- numpy/ma/tests/test_core.py | 46 +++++++++++++------------- 8 files changed, 38 insertions(+), 39 deletions(-) diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py index 654c448a4b75..ef79ad1ecfb6 100644 --- a/doc/source/f2py/code/setup_example.py +++ b/doc/source/f2py/code/setup_example.py @@ -1,16 +1,16 @@ from numpy.distutils.core import Extension -ext1 = Extension(name = 'scalar', - sources = ['scalar.f']) -ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf', 'fib1.f']) +ext1 = Extension(name='scalar', + sources=['scalar.f']) +ext2 = Extension(name='fib2', + sources=['fib2.pyf', 'fib1.f']) if __name__ == "__main__": from numpy.distutils.core import setup - setup(name = 'f2py_example', - description = "F2PY Users Guide examples", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1, ext2] + setup(name='f2py_example', + description="F2PY Users Guide examples", + author="Pearu Peterson", + author_email="pearu@cens.ioc.ee", + ext_modules=[ext1, ext2] ) # End of setup_example.py diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index d31f29271e93..c15034f56ee2 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -320,7 +320,7 @@ def test_structure_format_float(self): assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], dtype='u1').view('V8') assert_equal(repr(a[0]), r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 62b0eac5dda5..f4bd02ab55e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -117,7 +117,7 @@ def load_flags_auxv(self): @pytest.mark.skipif( sys.platform == 'emscripten', - reason= ( + reason=( "The subprocess module is not available on WASM platforms and" " therefore this test class cannot be properly executed." ), diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index d2e4e5ec6cad..8d48e8a6630a 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2492,7 +2492,7 @@ def test_isfinite_isinf_isnan_units(self, unit, dstr): '''check isfinite, isinf, isnan for all units of M, m dtypes ''' arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) + arr = np.array(arr_val, dtype=(dstr % unit)) pos = np.array([True, True, False]) neg = np.array([False, False, True]) false = np.array([False, False, False]) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 649e05c4476d..f3fd137b7c5c 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -764,7 +764,7 @@ def __mul__(self, other): return 42 objMult = np.array([Mult()]) - objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) with pytest.raises(TypeError): np.einsum("i,j", [1], objNULL) diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index e8abf1aef01a..0a62cb6945f0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -9427,8 +9427,7 @@ def _make_readonly(x): np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype({ + np.zeros((2,), dtype=np.dtype({ "formats": [' Date: Thu, 24 Apr 2025 11:31:17 +0200 Subject: [PATCH 07/67] STY: More taking into account reviewer's comments Unwrap the array arbitrarily split over two lines, as it fits the new default line-length limit of 88 characters. Co-authored-by: Joren Hammudoglu --- numpy/_core/tests/test_arrayprint.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index c15034f56ee2..09ed71f342a2 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -320,8 +320,8 @@ def test_structure_format_float(self): assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, - 27, 91, 51, 49, 109, 82, 101, 100], dtype='u1').view('V8') + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], + dtype='u1').view('V8') assert_equal(repr(a[0]), r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") From e079be34ed89afc0b1f30ae052c7edb2459d4e3c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 28 Apr 2025 18:37:48 +0000 Subject: [PATCH 08/67] MAINT: Bump pypa/cibuildwheel from 2.23.2 to 2.23.3 Bumps [pypa/cibuildwheel](https://github.com/pypa/cibuildwheel) from 2.23.2 to 2.23.3. - [Release notes](https://github.com/pypa/cibuildwheel/releases) - [Changelog](https://github.com/pypa/cibuildwheel/blob/main/docs/changelog.md) - [Commits](https://github.com/pypa/cibuildwheel/compare/d04cacbc9866d432033b1d09142936e6a0e2121a...faf86a6ed7efa889faf6996aa23820831055001a) --- updated-dependencies: - dependency-name: pypa/cibuildwheel dependency-version: 2.23.3 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/emscripten.yml | 2 +- .github/workflows/wheels.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index a1b6e923b131..fea77068e128 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@d04cacbc9866d432033b1d09142936e6a0e2121a # 2.23.2 + - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 159ffc4f4131..1107b3caf6f7 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -171,7 +171,7 @@ jobs: echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@d04cacbc9866d432033b1d09142936e6a0e2121a # v2.23.2 + uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} From 46e2ea1b755110fbd30ccdcce48a1d2a6e31e478 Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Tue, 29 Apr 2025 08:13:45 +0200 Subject: [PATCH 09/67] BLD: update vendored Meson: v1.6.1 and iOS support [wheel build] These are PRs 18 and 19 from https://github.com/numpy/meson --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 7300f5fd4c1c..23ec306e6510 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 7300f5fd4c1c8b0406faeec4cc631f11f1ea324c +Subproject commit 23ec306e65107f5ad39a03709799dc90ea678a54 From 241006e863d688651387c961aa281d13b96e0c3f Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 29 Apr 2025 08:33:12 -0600 Subject: [PATCH 10/67] BUG: set the array_owned flag on the StringDType singleton --- numpy/_core/src/multiarray/stringdtype/dtype.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 41dfa7c21ca1..167ab86f5dc4 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -850,14 +850,17 @@ init_string_dtype(void) return -1; } - PyArray_Descr *singleton = - NPY_DT_CALL_default_descr(&PyArray_StringDType); + PyArray_StringDTypeObject *singleton = + (PyArray_StringDTypeObject *)NPY_DT_CALL_default_descr(&PyArray_StringDType); if (singleton == NULL) { return -1; } - PyArray_StringDType.singleton = singleton; + // never associate the singleton with an array + singleton->array_owned = 1; + + PyArray_StringDType.singleton = (PyArray_Descr *)singleton; PyArray_StringDType.type_num = NPY_VSTRING; for (int i = 0; PyArray_StringDType_casts[i] != NULL; i++) { From 52170a9b776f9cb4f3aef91cedf3bd05670f0c27 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Tue, 29 Apr 2025 08:33:35 -0600 Subject: [PATCH 11/67] ENH: acquire the allocator lock when setting the array_owned flag --- numpy/_core/src/multiarray/stringdtype/dtype.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 167ab86f5dc4..a06e7a1ed1b6 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -633,11 +633,16 @@ PyArray_Descr * stringdtype_finalize_descr(PyArray_Descr *dtype) { PyArray_StringDTypeObject *sdtype = (PyArray_StringDTypeObject *)dtype; + // acquire the allocator lock in case the descriptor we want to finalize + // is shared between threads, see gh-28813 + npy_string_allocator *allocator = NpyString_acquire_allocator(sdtype); if (sdtype->array_owned == 0) { sdtype->array_owned = 1; + NpyString_release_allocator(allocator); Py_INCREF(dtype); return dtype; } + NpyString_release_allocator(allocator); PyArray_StringDTypeObject *ret = (PyArray_StringDTypeObject *)new_stringdtype_instance( sdtype->na_object, sdtype->coerce); ret->array_owned = 1; From 5aafa1f8e74a1018b66498534cfbb1169a1a69a2 Mon Sep 17 00:00:00 2001 From: Krishna Bindumadhavan <31140965+f2013519@users.noreply.github.com> Date: Tue, 29 Apr 2025 21:59:05 +0530 Subject: [PATCH 12/67] ENH: Improve Floating Point Cast Performance on ARM (#28769) * WIP,Prototype: Use Neon SIMD to improve half->float cast performance [ci skip] [skip ci] * Support Neon SIMD float32->float16 cast and update scalar path to use hardware cast * Add missing header * Relax VECTOR_ARITHMETIC check and add comment on need for SIMD routines * Enable hardware cast on x86 when F16C is available * Relax fp exceptions in Clang to enable vectorization for cast * Ignore fp exceptions only for float casts * Fix build * Attempt to fix test failure on ARM64 native * Work around gcc bug for double->half casts * Add release note --- .../upcoming_changes/28769.performance.rst | 8 +++ .../multiarray/lowlevel_strided_loops.c.src | 71 +++++++++++++++---- 2 files changed, 66 insertions(+), 13 deletions(-) create mode 100644 doc/release/upcoming_changes/28769.performance.rst diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst new file mode 100644 index 000000000000..7fb8f02282f6 --- /dev/null +++ b/doc/release/upcoming_changes/28769.performance.rst @@ -0,0 +1,8 @@ +Performance improvements for ``np.float16`` casts +-------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 1299e55b4258..01ffd225274f 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -708,6 +708,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /************* STRIDED CASTING SPECIALIZED FUNCTIONS *************/ +#if defined(NPY_HAVE_NEON_FP16) + #define EMULATED_FP16 0 + #define NATIVE_FP16 1 + typedef _Float16 _npy_half; +#else + #define EMULATED_FP16 1 + #define NATIVE_FP16 0 + typedef npy_half _npy_half; +#endif + /**begin repeat * * #NAME1 = BOOL, @@ -723,15 +733,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# * #is_bool1 = 1, 0*17# - * #is_half1 = 0*11, 1, 0*6# + * #is_emu_half1 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half1 = 0*11, NATIVE_FP16, 0*6# * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# @@ -752,15 +763,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# * #is_bool2 = 1, 0*17# - * #is_half2 = 0*11, 1, 0*6# + * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# * #is_float2 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double2 = 0*13, 1, 0, 0, 1, 0# * #is_complex2 = 0*15, 1*3# @@ -774,8 +786,8 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For half types, don't use actual double/float types in conversion */ -#if @is_half1@ || @is_half2@ +/* For emulated half types, don't use actual double/float types in conversion */ +#if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ # define _TYPE1 npy_uint32 @@ -801,13 +813,13 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #endif /* Determine an appropriate casting conversion function */ -#if @is_half1@ +#if @is_emu_half1@ # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) # elif @is_double2@ # define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) -# elif @is_half2@ +# elif @is_emu_half2@ # define _CONVERT_FN(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) @@ -815,13 +827,13 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) # endif -#elif @is_half2@ +#elif @is_emu_half2@ # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# elif @is_half1@ +# elif @is_emu_half1@ # define _CONVERT_FN(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) @@ -839,7 +851,29 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #endif -static NPY_GCC_OPT_3 int +// Enable auto-vectorization for floating point casts with clang +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(ignore)") + #endif + #endif + #endif +#endif + +// Work around GCC bug for double->half casts. For SVE and +// OPT_LEVEL > 1, it implements this as double->single->half +// which is incorrect as it introduces double rounding with +// narrowing casts. +#if (@is_double1@ && @is_native_half2@) && \ + defined(NPY_HAVE_SVE) && defined(__GNUC__) + #define GCC_CAST_OPT_LEVEL __attribute__((optimize("O1"))) +#else + #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 +#endif + +static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, @@ -933,6 +967,17 @@ static NPY_GCC_OPT_3 int return 0; } +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(strict)") + #endif + #endif + #endif +#endif + +#undef GCC_CAST_OPT_LEVEL #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 From 11b9e5f1b2c2daacf4c4af11c81e44017b729b7e Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 29 Apr 2025 21:52:57 +0100 Subject: [PATCH 13/67] TYP: Use _Array1D alias in ``numpy.ma.core.pyi`` (#28847) --- numpy/ma/core.pyi | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 71652e047905..1a94e04ff3d4 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -226,9 +226,10 @@ _DTypeT = TypeVar("_DTypeT", bound=dtype) _DTypeT_co = TypeVar("_DTypeT_co", bound=dtype, covariant=True) _ArrayT = TypeVar("_ArrayT", bound=ndarray[Any, Any]) _ScalarT = TypeVar("_ScalarT", bound=generic) -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic) +_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) # A subset of `MaskedArray` that can be parametrized w.r.t. `np.generic` _MaskedArray: TypeAlias = MaskedArray[_Shape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] MaskType = bool_ nomask: bool_[Literal[False]] @@ -1166,9 +1167,9 @@ def sort( stable: Literal[False] | None = False, ) -> NDArray[Any]: ... @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> ndarray[tuple[int], dtype[_ScalarT_co]]: ... +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... @overload -def compressed(x: ArrayLike) -> ndarray[tuple[int], dtype]: ... +def compressed(x: ArrayLike) -> _Array1D[Any]: ... def concatenate(arrays, axis=...): ... def diag(v, k=...): ... def left_shift(a, n): ... From 14ea82d79dc9489def603ff614bbda82094e1ba6 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 29 Apr 2025 21:57:59 +0100 Subject: [PATCH 14/67] TYP: Type ``MaskedArray.ravel`` (#28848) --- numpy/ma/core.pyi | 2 +- numpy/typing/tests/data/reveal/ma.pyi | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 1a94e04ff3d4..35fd6f223de1 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -484,7 +484,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... - def ravel(self, order=...): ... + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... def reshape(self, *s, **kwargs): ... def resize(self, newshape, refcheck=..., order=...): ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 5c1b04a03718..f7c99357cb41 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -283,6 +283,9 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # PyRight detects this one correctly, but mypy doesn't: # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` From 0b3efb14d88bfbaa9b288d23aea2c1798f27dad0 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Tue, 29 Apr 2025 22:27:45 +0100 Subject: [PATCH 15/67] TYP: Type ``MaskedArray.repeat``, improve overloads for ``NDArray.repeat``, ``generic.repeat``, and ``np.repeat`` (#28849) --- numpy/__init__.pyi | 11 +++++++++-- numpy/_core/fromnumeric.pyi | 16 ++++++++++++++-- numpy/ma/core.pyi | 15 ++++++++++++++- numpy/typing/tests/data/reveal/fromnumeric.pyi | 12 +++++++----- numpy/typing/tests/data/reveal/ma.pyi | 5 +++++ numpy/typing/tests/data/reveal/ndarray_misc.pyi | 9 ++++++--- 6 files changed, 55 insertions(+), 13 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5c16b91b7772..d39bb02279fb 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2421,10 +2421,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... + @overload def repeat( self, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> ndarray[_Shape, _DTypeT_co]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... @@ -3685,7 +3692,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = ...) -> NDArray[Self]: ... + def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 9e30a84165b4..f974dc33a027 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -277,13 +277,25 @@ def choose( def repeat( a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> NDArray[_ScalarT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> NDArray[Any]: ... def put( diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 35fd6f223de1..9e755e5901da 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -778,7 +778,20 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: Any diagonal: Any flatten: Any - repeat: Any + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_Shape, _DTypeT_co]: ... + squeeze: Any swapaxes: Any T: Any diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index ee761a2762b7..0827b27a056b 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -46,11 +46,13 @@ assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) -assert_type(np.repeat(b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(f4, 1), npt.NDArray[np.float32]) -assert_type(np.repeat(f, 1), npt.NDArray[Any]) -assert_type(np.repeat(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) # TODO: array_bdd tests for np.put() diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index f7c99357cb41..eb5b4508d954 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -276,6 +276,11 @@ assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedNDArray[np.float32]) + assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 2016fb5c7971..682f9db50220 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -126,9 +126,12 @@ assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) assert_type(AR_f8.round(out=B), SubClass) -assert_type(f8.repeat(1), npt.NDArray[np.float64]) -assert_type(AR_f8.repeat(1), npt.NDArray[np.float64]) -assert_type(B.repeat(1), npt.NDArray[np.object_]) +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) assert_type(f8.std(), Any) assert_type(AR_f8.std(), Any) From 445aa6be06fd382ccbf57d82c8d33dea69aa3884 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 30 Apr 2025 01:09:08 +0100 Subject: [PATCH 16/67] TYP: Improve consistency of (masked) array typing aliases (#28863) --- numpy/_typing/_add_docstring.py | 2 +- numpy/_typing/_array_like.py | 2 +- numpy/typing/tests/data/reveal/ma.pyi | 206 +++++++++++++------------- 3 files changed, 105 insertions(+), 105 deletions(-) diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 493e775ebae2..da415f1b94c6 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,7 +137,7 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[int, ...], numpy.dtype[+_ScalarT_co]] + numpy.ndarray[tuple[int, ...], numpy.dtype[~_ScalarT]] >>> print(npt.NDArray[np.float64]) numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 56388db1155e..b4c291639d6a 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -23,7 +23,7 @@ _DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) _DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) -NDArray: TypeAlias = np.ndarray[_Shape, dtype[_ScalarT_co]] +NDArray: TypeAlias = np.ndarray[_Shape, dtype[_ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index eb5b4508d954..5274deed90a1 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -5,10 +5,10 @@ import numpy as np from numpy import dtype, generic from numpy._typing import NDArray, _Shape -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) -MaskedNDArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT_co]] +_ScalarT = TypeVar("_ScalarT", bound=generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT]] -class MaskedNDArraySubclass(MaskedNDArray[np.complex128]): ... +class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -16,18 +16,18 @@ AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] -MAR_b: MaskedNDArray[np.bool] -MAR_f4: MaskedNDArray[np.float32] -MAR_f8: MaskedNDArray[np.float64] -MAR_i8: MaskedNDArray[np.int64] -MAR_dt64: MaskedNDArray[np.datetime64] -MAR_td64: MaskedNDArray[np.timedelta64] -MAR_o: MaskedNDArray[np.object_] -MAR_s: MaskedNDArray[np.str_] -MAR_byte: MaskedNDArray[np.bytes_] -MAR_V: MaskedNDArray[np.void] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] -MAR_subclass: MaskedNDArraySubclass +MAR_subclass: MaskedArraySubclass MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] @@ -49,9 +49,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -59,9 +59,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -69,9 +69,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -79,9 +79,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -89,9 +89,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -99,9 +99,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -109,8 +109,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -118,8 +118,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -127,8 +127,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -136,40 +136,40 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) -assert_type(np.ma.sort(MAR_f4), MaskedNDArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) -assert_type(MAR_f8.take([0]), MaskedNDArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) assert_type(np.ma.take(MAR_f8, 0), np.float64) assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) -assert_type(np.ma.take(MAR_f8, [0]), MaskedNDArray[np.float64]) -assert_type(np.ma.take(AR_f4, [0]), MaskedNDArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.take([1], [0]), MaskedNDArray[Any]) -assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedNDArray[np.float64]) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) assert_type(MAR_f4.partition(1), None) assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) -assert_type(MAR_f4.argpartition(1), MaskedNDArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedNDArray[np.intp]) +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -185,55 +185,55 @@ assert_type(MAR_f4.ids(), tuple[int, int]) assert_type(MAR_f4.iscontiguous(), bool) -assert_type(MAR_f4 >= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 >= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o >= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d >= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s >= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte >= MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 > 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 > AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o > AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d > 0, MaskedNDArray[np.bool]) -assert_type(MAR_s > MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte > MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 <= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 <= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o <= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d <= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s <= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte <= MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 < 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 < AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o < AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d < 0, MaskedNDArray[np.bool]) -assert_type(MAR_s < MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte < MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 <= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 <= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o <= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d <= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s <= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte <= MAR_byte, MaskedNDArray[np.bool]) +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) @@ -279,7 +279,7 @@ assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ign assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) -assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedNDArray[np.float32]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) From e6a97fdf8a9273e5174a3de817da9beb6c1cedad Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Wed, 30 Apr 2025 17:15:21 +0100 Subject: [PATCH 17/67] TYP: Type ``MaskedArray.swapaxes`` (#28850) --- numpy/ma/core.pyi | 9 ++++++++- numpy/typing/tests/data/fail/ma.pyi | 2 ++ numpy/typing/tests/data/reveal/ma.pyi | 3 +++ 3 files changed, 13 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 9e755e5901da..73b2a803304c 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -793,7 +793,14 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): ) -> MaskedArray[_Shape, _DTypeT_co]: ... squeeze: Any - swapaxes: Any + + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + / + ) -> MaskedArray[_Shape, _DTypeT_co]: ... + T: Any transpose: Any diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index a4f2517fd4a6..d79008b60d25 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -122,3 +122,5 @@ np.ma.allequal(m, [1,2,3], fill_value=1.5) # E: No overload variant np.ma.allclose(m, [1,2,3], masked_equal=4.5) # E: No overload variant np.ma.allclose(m, [1,2,3], rtol='.4') # E: No overload variant np.ma.allclose(m, [1,2,3], atol='.5') # E: No overload variant + +m.swapaxes(axis1=1, axis2=0) # E: No overload variant diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 5274deed90a1..d59a0a709707 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -308,6 +308,9 @@ def func(x: object) -> None: else: assert_type(x, object) +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) + assert_type(np.ma.nomask, np.bool[Literal[False]]) # https://github.com/python/mypy/issues/18974 assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] From e2f5a94bd1f1c5746617076c5b5988f6eea271f6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 17:50:15 +0000 Subject: [PATCH 18/67] MAINT: Bump astral-sh/setup-uv from 6.0.0 to 6.0.1 Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 6.0.0 to 6.0.1. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/c7f87aa956e4c323abf06d5dec078e358f6b4d04...6b9c6063abd6010835644d4c2e1bef4cf5cd0fca) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: 6.0.1 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/macos.yml | 2 +- .github/workflows/windows.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 7156e8f486f2..82de69009aac 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -124,7 +124,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 + - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca with: activate-environment: true python-version: ${{ matrix.version }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 985d7d9c6d6a..098c29f52893 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca with: activate-environment: true python-version: ${{ matrix.compiler-pyversion[1] }} From bc368b30b1799357fd94a4811d14fe030404c84b Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Thu, 1 May 2025 12:30:00 +0200 Subject: [PATCH 19/67] DOC: update content of cross compilation build docs [docs only] --- doc/source/building/cross_compilation.rst | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/doc/source/building/cross_compilation.rst b/doc/source/building/cross_compilation.rst index 0a2e3a5af42a..f03b620ff031 100644 --- a/doc/source/building/cross_compilation.rst +++ b/doc/source/building/cross_compilation.rst @@ -2,10 +2,10 @@ Cross compilation ================= Cross compilation is a complex topic, we only add some hopefully helpful hints -here (for now). As of May 2023, cross-compilation based on ``crossenv`` is -known to work, as used (for example) in conda-forge. Cross-compilation without -``crossenv`` requires some manual overrides. You instruct these overrides by -passing options to ``meson setup`` via `meson-python`_. +here (for now). As of May 2025, cross-compilation with a Meson cross file as +well as cross-compilation based on ``crossenv`` are known to work. Conda-forge +uses the latter method. Cross-compilation without ``crossenv`` requires passing +build options to ``meson setup`` via `meson-python`_. .. _meson-python: https://meson-python.readthedocs.io/en/latest/how-to-guides/meson-args.html @@ -33,9 +33,18 @@ your *cross file*: [properties] longdouble_format = 'IEEE_DOUBLE_LE' +For an example of a cross file needed to cross-compile NumPy, see +`numpy#288861 `__. +Putting that together, invoking a cross build with such a cross file, looks like: + +.. code:: bash + + $ python -m build --wheel -Csetup-args="--cross-file=aarch64-myos-cross-file.txt" + For more details and the current status around cross compilation, see: - The state of cross compilation in Python: `pypackaging-native key issue page `__ +- The `set of NumPy issues with the "Cross compilation" label `__ - Tracking issue for SciPy cross-compilation needs and issues: `scipy#14812 `__ From 39bac4a06bead7cdf96d8e38a84372e1b8a05ac8 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 10:17:53 +0200 Subject: [PATCH 20/67] MNT: retire old script for SVN repositories Back in 2009, this script was used to determine active SVN committers of NumPy, SciPy, and SciKits. --- tools/commitstats.py | 38 -------------------------------------- 1 file changed, 38 deletions(-) delete mode 100644 tools/commitstats.py diff --git a/tools/commitstats.py b/tools/commitstats.py deleted file mode 100644 index 5aee433025b4..000000000000 --- a/tools/commitstats.py +++ /dev/null @@ -1,38 +0,0 @@ -# Run svn log -l - -import re -import numpy as np -import os - -names = re.compile(r'r\d+\s\|\s(.*)\s\|\s200') - -def get_count(filename, repo): - mystr = open(filename).read() - result = names.findall(mystr) - u = np.unique(result) - count = [(x, result.count(x), repo) for x in u] - return count - - -command = 'svn log -l 2300 > output.txt' -os.chdir('..') -os.system(command) - -count = get_count('output.txt', 'NumPy') - - -os.chdir('../scipy') -os.system(command) - -count.extend(get_count('output.txt', 'SciPy')) - -os.chdir('../scikits') -os.system(command) -count.extend(get_count('output.txt', 'SciKits')) -count.sort() - - -print("** SciPy and NumPy **") -print("=====================") -for val in count: - print(val) From 39e84480793d0bf88beb925c86df880500fb7ff7 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 11:47:34 +0200 Subject: [PATCH 21/67] MNT: discard unused function using os.system() --- numpy/f2py/diagnose.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index cbcaa9eb2931..7eb1697cc787 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -4,12 +4,6 @@ import tempfile -def run_command(cmd): - print(f'Running {cmd!r}:') - os.system(cmd) - print('------') - - def run(): _path = os.getcwd() os.chdir(tempfile.gettempdir()) From fcee0f7c50b12a60cee4d3c6b63340f397db125f Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 10:51:11 +0200 Subject: [PATCH 22/67] DOC: consistent and updated LICENSE files for wheels * Add an empty line to LICENSE_osx.txt for consistency. * Change URLs from http:// to https://, now verbatim copy of latest GPLv3: https://www.gnu.org/licenses/gpl-3.0.txt --- tools/wheels/LICENSE_linux.txt | 10 +++++----- tools/wheels/LICENSE_osx.txt | 11 ++++++----- tools/wheels/LICENSE_win32.txt | 10 +++++----- 3 files changed, 16 insertions(+), 15 deletions(-) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 021b4b0289e7..9e2d9053b8a7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -133,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -207,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -851,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -870,14 +870,14 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. Name: libquadmath Files: numpy.libs/libquadmath*.so diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 81889131cfa7..7ef2e381874e 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -3,6 +3,7 @@ This binary distribution of NumPy also bundles the following software: + Name: OpenBLAS Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library @@ -132,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -206,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -850,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -869,14 +870,14 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. Name: libquadmath Files: numpy/.dylibs/libquadmath*.so diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index a2ccce66fbe5..c8277e7710a2 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -133,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -207,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -851,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -870,12 +870,12 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. From 39ac0e1302c46dc8ccc365c56cfce0796e40d877 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 16:07:55 +0200 Subject: [PATCH 23/67] DOC: numpy.i will not be included as part of SWIG (#28874) https://github.com/swig/swig/issues/361#issuecomment-100635225 --- tools/swig/README | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/swig/README b/tools/swig/README index c539c597f8c6..876d6a698034 100644 --- a/tools/swig/README +++ b/tools/swig/README @@ -3,9 +3,7 @@ Notes for the numpy/tools/swig directory This set of files is for developing and testing file numpy.i, which is intended to be a set of typemaps for helping SWIG interface between C -and C++ code that uses C arrays and the python module NumPy. It is -ultimately hoped that numpy.i will be included as part of the SWIG -distribution. +and C++ code that uses C arrays and the python module NumPy. Documentation ------------- From e4d774a6f693a879b7df66472b26af44aa3c2fa4 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 10:34:10 +0200 Subject: [PATCH 24/67] MNT: retire script superseded by ruff rule W605 The script would detect deprecated invalid escape sequences, just like ruff rule W605 which is now enabled: https://docs.astral.sh/ruff/rules/invalid-escape-sequence/ --- tools/find_deprecated_escaped_characters.py | 62 --------------------- 1 file changed, 62 deletions(-) delete mode 100644 tools/find_deprecated_escaped_characters.py diff --git a/tools/find_deprecated_escaped_characters.py b/tools/find_deprecated_escaped_characters.py deleted file mode 100644 index d7225b8e85f6..000000000000 --- a/tools/find_deprecated_escaped_characters.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 -r""" -Look for escape sequences deprecated in Python 3.6. - -Python 3.6 deprecates a number of non-escape sequences starting with '\' that -were accepted before. For instance, '\(' was previously accepted but must now -be written as '\\(' or r'\('. - -""" - - -def main(root): - """Find deprecated escape sequences. - - Checks for deprecated escape sequences in ``*.py files``. If `root` is a - file, that file is checked, if `root` is a directory all ``*.py`` files - found in a recursive descent are checked. - - If a deprecated escape sequence is found, the file and line where found is - printed. Note that for multiline strings the line where the string ends is - printed and the error(s) are somewhere in the body of the string. - - Parameters - ---------- - root : str - File or directory to check. - Returns - ------- - None - - """ - import ast - import tokenize - import warnings - from pathlib import Path - - count = 0 - base = Path(root) - paths = base.rglob("*.py") if base.is_dir() else [base] - for path in paths: - # use tokenize to auto-detect encoding on systems where no - # default encoding is defined (e.g. LANG='C') - with tokenize.open(str(path)) as f: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - tree = ast.parse(f.read()) - if w: - print("file: ", str(path)) - for e in w: - print('line: ', e.lineno, ': ', e.message) - print() - count += len(w) - print("Errors Found", count) - - -if __name__ == "__main__": - from argparse import ArgumentParser - - parser = ArgumentParser(description="Find deprecated escaped characters") - parser.add_argument('root', help='directory or file to be checked') - args = parser.parse_args() - main(args.root) From c421fa27ca6016f0ce6051b5f36252ec2f97e5b3 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 12:38:49 +0200 Subject: [PATCH 25/67] STY: Enforce more ruff rules --- ruff.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ruff.toml b/ruff.toml index d01ff1b5dd21..c01b5cf30991 100644 --- a/ruff.toml +++ b/ruff.toml @@ -17,7 +17,11 @@ extend-exclude = [ [lint] preview = true extend-select = [ + "C4", + "LOG", + "G", "PIE", + "TID", "FLY", "E", "W", @@ -27,6 +31,7 @@ extend-select = [ ] ignore = [ "F", # TODO: enable Pyflakes rules + "C408", # Unnecessary `dict()` call (rewrite as a literal) "PIE790", # Unnecessary `pass` statement "E241", # Multiple spaces after comma "E251", # Unexpected spaces around keyword / parameter equals From baa0fe5bf5f9f3c7ef7593c7fb6490c1b7857ba4 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 12:40:41 +0200 Subject: [PATCH 26/67] STY: Apply ruff/flake8-tidy-imports rule TID252 Prefer absolute imports over relative imports from parent modules --- numpy/_core/_exceptions.py | 2 +- numpy/_core/_internal.py | 2 +- numpy/_core/_machar.py | 2 +- numpy/_core/_ufunc_config.py | 2 +- numpy/_core/defchararray.py | 2 +- numpy/_core/fromnumeric.py | 2 +- numpy/_core/getlimits.py | 2 +- numpy/_core/memmap.py | 2 +- numpy/_core/numeric.py | 2 +- numpy/_core/numerictypes.py | 2 +- numpy/_core/overrides.py | 4 ++-- numpy/_core/records.py | 2 +- numpy/_typing/_nbit_base.py | 2 +- numpy/_typing/_ufunc.py | 2 +- numpy/compat/__init__.py | 4 ++-- numpy/lib/_datasource.py | 2 +- numpy/lib/_index_tricks_impl.py | 2 +- numpy/lib/_polynomial_impl.py | 2 +- numpy/lib/_type_check_impl.py | 2 +- numpy/matrixlib/defmatrix.py | 2 +- 20 files changed, 22 insertions(+), 22 deletions(-) diff --git a/numpy/_core/_exceptions.py b/numpy/_core/_exceptions.py index 180e71946e6c..aaa41648a1d2 100644 --- a/numpy/_core/_exceptions.py +++ b/numpy/_core/_exceptions.py @@ -5,7 +5,7 @@ By putting the formatting in `__str__`, we also avoid paying the cost for users who silence the exceptions. """ -from .._utils import set_module +from numpy._utils import set_module def _unpack_tuple(tup): if len(tup) == 1: diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index ed31afde6fa8..915510b220d0 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -10,7 +10,7 @@ import sys import warnings -from ..exceptions import DTypePromotionError +from numpy.exceptions import DTypePromotionError from .multiarray import dtype, array, ndarray, promote_types, StringDType from numpy import _NoValue try: diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py index e9d621b764c3..84d1f82a89ab 100644 --- a/numpy/_core/_machar.py +++ b/numpy/_core/_machar.py @@ -9,7 +9,7 @@ from .fromnumeric import any from ._ufunc_config import errstate -from .._utils import set_module +from numpy._utils import set_module # Need to speed this up...especially for longdouble diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index ec9fd77ecbd8..edb533ea3c2b 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -8,7 +8,7 @@ import contextvars import functools -from .._utils import set_module +from numpy._utils import set_module from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar __all__ = [ diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index cde1c3a7f291..d782e6131337 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -18,7 +18,7 @@ import functools import numpy as np -from .._utils import set_module +from numpy._utils import set_module from .numerictypes import bytes_, str_, character from .numeric import ndarray, array as narray, asarray as asnarray from numpy._core.multiarray import compare_chararrays diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 1b8b5198277a..bc00877612d6 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -6,7 +6,7 @@ import warnings import numpy as np -from .._utils import set_module +from numpy._utils import set_module from . import multiarray as mu from . import overrides from . import umath as um diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index f33a1254467d..2dc6d1e7fad2 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -6,7 +6,7 @@ import types import warnings -from .._utils import set_module +from numpy._utils import set_module from ._machar import MachAr from . import numeric from . import numerictypes as ntypes diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index cf95687962af..561ac38a4d58 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -1,7 +1,7 @@ from contextlib import nullcontext import operator import numpy as np -from .._utils import set_module +from numpy._utils import set_module from .numeric import uint8, ndarray, dtype __all__ = ['memmap'] diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index da82391b4b8a..7adeaeddda54 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -26,7 +26,7 @@ from .overrides import finalize_array_function_like, set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes -from ..exceptions import AxisError +from numpy.exceptions import AxisError from ._ufunc_config import errstate bitwise_not = invert diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 029390ab0a5a..cb8d3c11a23f 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -84,7 +84,7 @@ ndarray, dtype, datetime_data, datetime_as_string, busday_offset, busday_count, is_busday, busdaycalendar ) -from .._utils import set_module +from numpy._utils import set_module # we add more at the bottom __all__ = [ diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index c73d8eb4c1c5..aed83d17b836 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -2,8 +2,8 @@ import collections import functools -from .._utils import set_module -from .._utils._inspect import getargspec +from numpy._utils import set_module +from numpy._utils._inspect import getargspec from numpy._core._multiarray_umath import ( add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 3e2d48d5f267..6d0331984bc7 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -6,7 +6,7 @@ from collections import Counter from contextlib import nullcontext -from .._utils import set_module +from numpy._utils import set_module from . import numeric as sb from . import numerictypes as nt from .arrayprint import _get_legacy_print_mode diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 4f764757c4ea..bf16c436c6da 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -1,5 +1,5 @@ """A module with the precisions of generic `~numpy.number` types.""" -from .._utils import set_module +from numpy._utils import set_module from typing import final diff --git a/numpy/_typing/_ufunc.py b/numpy/_typing/_ufunc.py index d0573c8f5463..db52a1fdb318 100644 --- a/numpy/_typing/_ufunc.py +++ b/numpy/_typing/_ufunc.py @@ -1,4 +1,4 @@ -from .. import ufunc +from numpy import ufunc _UFunc_Nin1_Nout1 = ufunc _UFunc_Nin2_Nout1 = ufunc diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py index 729265aa9c27..8f926c4bd568 100644 --- a/numpy/compat/__init__.py +++ b/numpy/compat/__init__.py @@ -13,8 +13,8 @@ import warnings -from .._utils import _inspect -from .._utils._inspect import getargspec, formatargspec +from numpy._utils import _inspect +from numpy._utils._inspect import getargspec, formatargspec from . import py3k from .py3k import * diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 1babc722b036..5dafb0ee3843 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -36,7 +36,7 @@ """ import os -from .._utils import set_module +from numpy._utils import set_module _open = open diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 77aa1394fa0e..7fe0539fa86d 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -4,7 +4,7 @@ import warnings import numpy as np -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as _nx from numpy._core.numeric import ScalarType, array from numpy._core.numerictypes import issubdtype diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 91cf7405a1cc..a1d21c624c57 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -10,7 +10,7 @@ import re import warnings -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as NX from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 7b0c245335a4..671f27adc0d7 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -8,7 +8,7 @@ 'typename', 'mintypecode', 'common_type'] -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as _nx from numpy._core.numeric import asarray, asanyarray, isnan, zeros from numpy._core import overrides, getlimits diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py index 5c9f9616b814..09f10fa3be6d 100644 --- a/numpy/matrixlib/defmatrix.py +++ b/numpy/matrixlib/defmatrix.py @@ -4,7 +4,7 @@ import warnings import ast -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as N from numpy._core.numeric import concatenate, isscalar # While not in __all__, matrix_power used to be defined here, so we import From ea317e61e1fadd6a593e880f6da4f14abadfac18 Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Thu, 1 May 2025 16:04:59 +0100 Subject: [PATCH 27/67] TYP: Type ``MaskedArray.nonzero`` (#28845) --- numpy/ma/core.pyi | 2 +- numpy/typing/tests/data/reveal/ma.pyi | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 73b2a803304c..4bf2924ef566 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -492,7 +492,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def iscontiguous(self) -> bool: ... def all(self, axis=..., out=..., keepdims=...): ... def any(self, axis=..., out=..., keepdims=...): ... - def nonzero(self): ... + def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index d59a0a709707..86c097b96a4c 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -7,6 +7,7 @@ from numpy._typing import NDArray, _Shape _ScalarT = TypeVar("_ScalarT", bound=generic) MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] class MaskedArraySubclass(MaskedArray[np.complex128]): ... @@ -314,3 +315,6 @@ assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) assert_type(np.ma.nomask, np.bool[Literal[False]]) # https://github.com/python/mypy/issues/18974 assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From ce513cc0b237f8976a392d20d4abeb9f54eb664f Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Thu, 1 May 2025 16:10:04 +0100 Subject: [PATCH 28/67] TYP: Type ``MaskedArray.{imag, real, baseclass, mT}`` (#28868) --- numpy/ma/core.pyi | 22 +++++++++++----------- numpy/typing/tests/data/reveal/ma.pyi | 8 ++++++++ 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 4bf2924ef566..006386a03296 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -2,13 +2,14 @@ # ruff: noqa: ANN001, ANN002, ANN003, ANN201, ANN202 ANN204, ANN401 from collections.abc import Sequence -from typing import Any, Literal, SupportsIndex, TypeAlias, TypeVar, overload +from typing import Any, Literal, Self, SupportsIndex, TypeAlias, TypeVar, overload from _typeshed import Incomplete from typing_extensions import TypeIs, deprecated import numpy as np from numpy import ( + _HasDTypeWithRealAndImag, _ModeKind, _OrderKACF, _PartitionKind, @@ -401,10 +402,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __getitem__(self, indx): ... def __setitem__(self, indx, value): ... @property - def dtype(self) -> _DTypeT_co: ... - @dtype.setter - def dtype(self: MaskedArray[Any, _DTypeT], dtype: _DTypeT, /) -> None: ... - @property def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... @@ -426,7 +423,7 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def sharedmask(self): ... def shrink_mask(self): ... @property - def baseclass(self): ... + def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property def flat(self): ... @@ -468,10 +465,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __itruediv__(self, other): ... def __ipow__(self, other): ... @property # type: ignore[misc] - def imag(self): ... + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... get_imag: Any @property # type: ignore[misc] - def real(self): ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... get_real: Any # keep in sync with `np.ma.count` @@ -804,9 +801,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): T: Any transpose: Any - @property # type: ignore[misc] - def mT(self): ... - # def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... @@ -820,6 +814,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __reduce__(self): ... def __deepcopy__(self, memo=...): ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[Any, _DTypeT], dtype: _DTypeT, /) -> None: ... + class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( self, # pyright: ignore[reportSelfClsParameterName] diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 86c097b96a4c..79bfe7f86744 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -17,6 +17,7 @@ AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] +MAR_c16: MaskedArray[np.complex128] MAR_b: MaskedArray[np.bool] MAR_f4: MaskedArray[np.float32] MAR_f8: MaskedArray[np.float64] @@ -309,6 +310,13 @@ def func(x: object) -> None: else: assert_type(x, object) +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) From 1b76d571689114f0c434e4b1e64f606410d1e301 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 17:28:52 +0200 Subject: [PATCH 29/67] MNT: address warnign in SWIG tests MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Array2.cxx:163:25: warning: ‘*’ in boolean context, suggest ‘&&’ instead [-Wint-in-bool-context] 163 | if (_ownData && _nrows*_ncols && _buffer) | ~~~~~~^~~~~~~ --- tools/swig/test/Array2.cxx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/swig/test/Array2.cxx b/tools/swig/test/Array2.cxx index 2da61f728569..11b523523617 100644 --- a/tools/swig/test/Array2.cxx +++ b/tools/swig/test/Array2.cxx @@ -160,7 +160,7 @@ void Array2::allocateRows() void Array2::deallocateMemory() { - if (_ownData && _nrows*_ncols && _buffer) + if (_ownData && _nrows && _ncols && _buffer) { delete [] _rows; delete [] _buffer; From add17203a771bb146fa46ae9b6dd4eca7cd16fdb Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 28 Apr 2025 23:32:51 +0200 Subject: [PATCH 30/67] TYP: fix the ``set_module`` signature --- numpy/_utils/__init__.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 4e8eca9e9a11..f3472df9a554 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -20,7 +20,7 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: _HasModuleT) -> _HasModuleT: ... +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... # def _rename_parameter( From acf6cd7b08168f839bb7098be77c381cacec05d3 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 28 Apr 2025 22:50:33 +0200 Subject: [PATCH 31/67] TYP: implicit ``linalg`` private submodule re-exports ported from numpy/numtype#380 --- numpy/linalg/__init__.pyi | 91 ++++++++++++++++++++------------------- 1 file changed, 47 insertions(+), 44 deletions(-) diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 119ca0d0683d..16c8048c1a11 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,70 +1,73 @@ -from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot, vecdot - +from . import _linalg as _linalg +from . import _umath_linalg as _umath_linalg +from . import linalg as linalg from ._linalg import ( - matrix_power, - solve, - tensorsolve, - tensorinv, - inv, cholesky, - outer, - eigvals, - eigvalsh, - pinv, - slogdet, + cond, + cross, det, - svd, - svdvals, + diagonal, eig, eigh, + eigvals, + eigvalsh, + inv, lstsq, - norm, + matmul, matrix_norm, - vector_norm, - qr, - cond, + matrix_power, matrix_rank, + matrix_transpose, multi_dot, - matmul, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, trace, - diagonal, - cross, + vecdot, + vector_norm, ) __all__ = [ - "matrix_power", - "solve", - "tensorsolve", - "tensorinv", - "inv", + "LinAlgError", "cholesky", - "eigvals", - "eigvalsh", - "pinv", - "slogdet", + "cond", + "cross", "det", - "svd", - "svdvals", + "diagonal", "eig", "eigh", + "eigvals", + "eigvalsh", + "inv", "lstsq", - "norm", - "qr", - "cond", + "matmul", + "matrix_norm", + "matrix_power", "matrix_rank", - "LinAlgError", + "matrix_transpose", "multi_dot", - "trace", - "diagonal", - "cross", + "norm", "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", "tensordot", - "matmul", - "matrix_transpose", - "matrix_norm", - "vector_norm", + "tensorinv", + "tensorsolve", + "trace", "vecdot", + "vector_norm", ] class LinAlgError(ValueError): ... From a774590854852018c5a329374abed2b6bb09edab Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 28 Apr 2025 22:17:55 +0200 Subject: [PATCH 32/67] TYP: generic ``StringDType`` --- numpy/dtypes.pyi | 45 +++++++++++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 16 deletions(-) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index fad612380359..98829745e067 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,6 +1,5 @@ # ruff: noqa: ANN401 -from types import MemberDescriptorType -from typing import Any, ClassVar, Generic, LiteralString, NoReturn, Self, TypeAlias, final, type_check_only +from typing import Any, Generic, LiteralString, Never, NoReturn, Self, TypeAlias, final, overload, type_check_only from typing import Literal as L from typing_extensions import TypeVar @@ -568,40 +567,54 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], _NativeOrder, _NBit[L[8], L[16]], - # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues,reportInvalidTypeArguments] + # TODO(jorenham): change once we have a string scalar type: + # https://github.com/numpy/numpy/pull/28196 + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] + Generic[_NaObjectT_co], ): + @property + def na_object(self) -> _NaObjectT_co: ... @property def coerce(self) -> L[True]: ... - na_object: ClassVar[MemberDescriptorType] # does not get instantiated # - def __new__(cls, /) -> StringDType: ... - def __getitem__(self, key: Any, /) -> NoReturn: ... - @property - def base(self) -> StringDType: ... + @overload + def __new__(cls, /, *, coerce: bool = True) -> Self: ... + @overload + def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ... + + # + def __getitem__(self, key: Never, /) -> NoReturn: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @property def fields(self) -> None: ... @property - def hasobject(self) -> L[True]: ... + def base(self) -> Self: ... @property - def isalignedstruct(self) -> L[False]: ... + def ndim(self) -> L[0]: ... @property - def isnative(self) -> L[True]: ... + def shape(self) -> tuple[()]: ... + + # @property def name(self) -> L["StringDType64", "StringDType128"]: ... @property - def ndim(self) -> L[0]: ... + def subdtype(self) -> None: ... @property - def shape(self) -> tuple[()]: ... + def type(self) -> type[str]: ... @property def str(self) -> L["|T8", "|T16"]: ... + + # @property - def subdtype(self) -> None: ... + def hasobject(self) -> L[True]: ... @property - def type(self) -> type[str]: ... # type: ignore[valid-type] + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... From 9af817f16585819358310c0afeec3f2d1515cc04 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 28 Apr 2025 22:36:57 +0200 Subject: [PATCH 33/67] DOC: add release note for #28856 --- doc/release/upcoming_changes/28856.improvement.rst | 5 +++++ 1 file changed, 5 insertions(+) create mode 100644 doc/release/upcoming_changes/28856.improvement.rst diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst new file mode 100644 index 000000000000..83911035f097 --- /dev/null +++ b/doc/release/upcoming_changes/28856.improvement.rst @@ -0,0 +1,5 @@ +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. From f07ef490877dec5353f3d99bf8e273df2ab4546f Mon Sep 17 00:00:00 2001 From: Joren Hammudoglu Date: Tue, 29 Apr 2025 19:07:30 +0200 Subject: [PATCH 34/67] TYP: Apply suggestions from code review Co-authored-by: Nathan Goldbaum --- numpy/dtypes.pyi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index 98829745e067..07f889406353 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -575,7 +575,7 @@ class StringDType( # type: ignore[misc] _NativeOrder, _NBit[L[8], L[16]], # TODO(jorenham): change once we have a string scalar type: - # https://github.com/numpy/numpy/pull/28196 + # https://github.com/numpy/numpy/issues/28165 np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] Generic[_NaObjectT_co], ): From 231326d10a622a5474d9725cb56e34ebdca6db24 Mon Sep 17 00:00:00 2001 From: jorenham Date: Mon, 28 Apr 2025 21:47:20 +0200 Subject: [PATCH 35/67] TYP: add missing ``mod`` params to ``__[r]pow__`` --- numpy/__init__.pyi | 113 ++++++++++++++++++++++++++------------------- numpy/ma/core.pyi | 4 +- 2 files changed, 67 insertions(+), 50 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index d39bb02279fb..5f176e519a24 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -3155,66 +3155,70 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @@ -4254,21 +4258,25 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @overload - def __pow__(self, other: _Float64_co, /) -> float64: ... + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload - def __pow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __pow__(self, other: complex, /) -> float64 | complex128: ... + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @overload - def __rpow__(self, other: _Float64_co, /) -> float64: ... + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __rpow__(self, other: complex, /) -> float64 | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] @@ -4358,17 +4366,23 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... @overload - def __pow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __pow__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __pow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... @overload - def __rpow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __rpow__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __rpow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... complex64: TypeAlias = complexfloating[_32Bit, _32Bit] @@ -4424,10 +4438,12 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __pow__(self, other: _Complex128_co, /) -> complex128: ... + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, /) -> complex128: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] @@ -5287,7 +5303,8 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... def __imul__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... - def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... + def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... + def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... def __ipow__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... @overload diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 006386a03296..71a05d05a940 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -455,8 +455,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rtruediv__(self, other): ... def __floordiv__(self, other): ... def __rfloordiv__(self, other): ... - def __pow__(self, other): ... - def __rpow__(self, other): ... + def __pow__(self, other, mod: None = None, /): ... + def __rpow__(self, other, mod: None = None, /): ... def __iadd__(self, other): ... def __isub__(self, other): ... def __imul__(self, other): ... From 9a221c933cf5d9cbd6feb304ee50cff7eb209d9b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 29 Apr 2025 09:09:32 +0200 Subject: [PATCH 36/67] =?UTF-8?q?DOC:=20http://=20=E2=86=92=20https://?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update a few links in the process. --- doc/source/building/index.rst | 2 +- doc/source/building/introspecting_a_build.rst | 2 +- doc/source/f2py/f2py.getting-started.rst | 2 +- doc/source/f2py/windows/index.rst | 2 +- doc/source/user/basics.dispatch.rst | 4 ++-- doc/source/user/c-info.python-as-glue.rst | 2 +- doc/source/user/numpy-for-matlab-users.rst | 2 +- numpy/distutils/system_info.py | 14 +++++++------- 8 files changed, 15 insertions(+), 15 deletions(-) diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index 3a9709f1ebc1..d7baeaee9324 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -52,7 +52,7 @@ your system. * BLAS and LAPACK libraries. `OpenBLAS `__ is the NumPy default; other variants include Apple Accelerate, `MKL `__, - `ATLAS `__ and + `ATLAS `__ and `Netlib `__ (or "Reference") BLAS and LAPACK. diff --git a/doc/source/building/introspecting_a_build.rst b/doc/source/building/introspecting_a_build.rst index f23628bf3ffd..268365f595bf 100644 --- a/doc/source/building/introspecting_a_build.rst +++ b/doc/source/building/introspecting_a_build.rst @@ -19,4 +19,4 @@ These things are all available after the configure stage of the build (i.e., information, rather than running the build and reading the full build log. For more details on this topic, see the -`SciPy doc page on build introspection `__. +`SciPy doc page on build introspection `__. diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index dd1349979a39..e5746c49e94d 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -308,4 +308,4 @@ the previous case:: >>> print(fib3.fib(8)) [ 0. 1. 1. 2. 3. 5. 8. 13.] -.. _`system dependencies panel`: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _`system dependencies panel`: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index 797dfc2b4179..ea0af7505ce7 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -217,4 +217,4 @@ path using a hash. This needs to be added to the ``PATH`` variable. .. _are outdated: https://github.com/conda-forge/conda-forge.github.io/issues/1044 .. _now deprecated: https://github.com/numpy/numpy/pull/20875 .. _LLVM Flang: https://releases.llvm.org/11.0.0/tools/flang/docs/ReleaseNotes.html -.. _SciPy's documentation: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _SciPy's documentation: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 1505c9285ea8..ae53995a3917 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -7,8 +7,8 @@ Writing custom array containers Numpy's dispatch mechanism, introduced in numpy version v1.16 is the recommended approach for writing custom N-dimensional array containers that are compatible with the numpy API and provide custom implementations of numpy -functionality. Applications include `dask `_ arrays, an -N-dimensional array distributed across multiple nodes, and `cupy +functionality. Applications include `dask `_ +arrays, an N-dimensional array distributed across multiple nodes, and `cupy `_ arrays, an N-dimensional array on a GPU. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index d791341ac560..c699760fdebd 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -144,7 +144,7 @@ written C-code. Cython ====== -`Cython `_ is a compiler for a Python dialect that adds +`Cython `_ is a compiler for a Python dialect that adds (optional) static typing for speed, and allows mixing C or C++ code into your modules. It produces C or C++ extensions that can be compiled and imported in Python code. diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index a3ced19e23ad..370a1f7ab435 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -810,7 +810,7 @@ Links ===== Another somewhat outdated MATLAB/NumPy cross-reference can be found at -http://mathesaurus.sf.net/ +https://mathesaurus.sourceforge.net/ An extensive list of tools for scientific work with Python can be found in the `topical software page `__. diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index e428b47f08d4..4702742e4589 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -604,7 +604,7 @@ class AliasedOptionError(DistutilsError): class AtlasNotFoundError(NotFoundError): """ - Atlas (http://github.com/math-atlas/math-atlas) libraries not found. + Atlas (https://github.com/math-atlas/math-atlas) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable.""" @@ -612,14 +612,14 @@ class AtlasNotFoundError(NotFoundError): class FlameNotFoundError(NotFoundError): """ - FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. + FLAME (https://shpc.oden.utexas.edu/libFLAME.html) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [flame]).""" class LapackNotFoundError(NotFoundError): """ - Lapack (http://www.netlib.org/lapack/) libraries not found. + Lapack (https://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable.""" @@ -627,7 +627,7 @@ class LapackNotFoundError(NotFoundError): class LapackSrcNotFoundError(LapackNotFoundError): """ - Lapack (http://www.netlib.org/lapack/) sources not found. + Lapack (https://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable.""" @@ -649,7 +649,7 @@ class BlasOptNotFoundError(NotFoundError): class BlasNotFoundError(NotFoundError): """ - Blas (http://www.netlib.org/blas/) libraries not found. + Blas (https://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable.""" @@ -663,7 +663,7 @@ class BlasILP64NotFoundError(NotFoundError): class BlasSrcNotFoundError(BlasNotFoundError): """ - Blas (http://www.netlib.org/blas/) sources not found. + Blas (https://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable.""" @@ -671,7 +671,7 @@ class BlasSrcNotFoundError(BlasNotFoundError): class FFTWNotFoundError(NotFoundError): """ - FFTW (http://www.fftw.org/) libraries not found. + FFTW (https://www.fftw.org/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [fftw]) or by setting the FFTW environment variable.""" From 55c4be5645a53490875c8d9643fca1485bfd83c9 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos Orfanos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 29 Apr 2025 21:04:37 +0200 Subject: [PATCH 37/67] DOC: keep URL short by using sf.net Co-authored-by: Joren Hammudoglu --- doc/source/user/numpy-for-matlab-users.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 370a1f7ab435..0dfe49e386b9 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -810,7 +810,7 @@ Links ===== Another somewhat outdated MATLAB/NumPy cross-reference can be found at -https://mathesaurus.sourceforge.net/ +https://mathesaurus.sf.net/ An extensive list of tools for scientific work with Python can be found in the `topical software page `__. From 52abf10a55fd2ab10d85b5caf49bf156e69ad1e8 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Tue, 29 Apr 2025 22:12:48 +0200 Subject: [PATCH 38/67] DOC: Revert changes to `numpy/distutils` --- doc/source/user/numpy-for-matlab-users.rst | 2 +- numpy/distutils/system_info.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index 0dfe49e386b9..8c7914ea8dec 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -810,7 +810,7 @@ Links ===== Another somewhat outdated MATLAB/NumPy cross-reference can be found at -https://mathesaurus.sf.net/ +https://mathesaurus.sf.net/ An extensive list of tools for scientific work with Python can be found in the `topical software page `__. diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py index 4702742e4589..e428b47f08d4 100644 --- a/numpy/distutils/system_info.py +++ b/numpy/distutils/system_info.py @@ -604,7 +604,7 @@ class AliasedOptionError(DistutilsError): class AtlasNotFoundError(NotFoundError): """ - Atlas (https://github.com/math-atlas/math-atlas) libraries not found. + Atlas (http://github.com/math-atlas/math-atlas) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [atlas]) or by setting the ATLAS environment variable.""" @@ -612,14 +612,14 @@ class AtlasNotFoundError(NotFoundError): class FlameNotFoundError(NotFoundError): """ - FLAME (https://shpc.oden.utexas.edu/libFLAME.html) libraries not found. + FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [flame]).""" class LapackNotFoundError(NotFoundError): """ - Lapack (https://www.netlib.org/lapack/) libraries not found. + Lapack (http://www.netlib.org/lapack/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [lapack]) or by setting the LAPACK environment variable.""" @@ -627,7 +627,7 @@ class LapackNotFoundError(NotFoundError): class LapackSrcNotFoundError(LapackNotFoundError): """ - Lapack (https://www.netlib.org/lapack/) sources not found. + Lapack (http://www.netlib.org/lapack/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [lapack_src]) or by setting the LAPACK_SRC environment variable.""" @@ -649,7 +649,7 @@ class BlasOptNotFoundError(NotFoundError): class BlasNotFoundError(NotFoundError): """ - Blas (https://www.netlib.org/blas/) libraries not found. + Blas (http://www.netlib.org/blas/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [blas]) or by setting the BLAS environment variable.""" @@ -663,7 +663,7 @@ class BlasILP64NotFoundError(NotFoundError): class BlasSrcNotFoundError(BlasNotFoundError): """ - Blas (https://www.netlib.org/blas/) sources not found. + Blas (http://www.netlib.org/blas/) sources not found. Directories to search for the sources can be specified in the numpy/distutils/site.cfg file (section [blas_src]) or by setting the BLAS_SRC environment variable.""" @@ -671,7 +671,7 @@ class BlasSrcNotFoundError(BlasNotFoundError): class FFTWNotFoundError(NotFoundError): """ - FFTW (https://www.fftw.org/) libraries not found. + FFTW (http://www.fftw.org/) libraries not found. Directories to search for the libraries can be specified in the numpy/distutils/site.cfg file (section [fftw]) or by setting the FFTW environment variable.""" From 3229cf46d977afc7b14e84b38833698ee8ac8ac9 Mon Sep 17 00:00:00 2001 From: Nathan Goldbaum Date: Fri, 2 May 2025 08:42:22 -0600 Subject: [PATCH 39/67] MAINT: from_dlpack thread safety fixes (#28883) Fixes #28881 Moves the global variables defined in from_dlpack to the npy_static_pydata struct and initializes them during module init. --- numpy/_core/src/multiarray/dlpack.c | 31 +++----------------- numpy/_core/src/multiarray/npy_static_data.c | 16 ++++++++++ numpy/_core/src/multiarray/npy_static_data.h | 7 +++++ 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 4bea7f9fc1ab..ac37a04c30c6 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -504,36 +504,12 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* Prepare the arguments to call objects __dlpack__() method */ - static PyObject *call_kwnames = NULL; - static PyObject *dl_cpu_device_tuple = NULL; - static PyObject *max_version = NULL; - - if (call_kwnames == NULL) { - call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); - if (call_kwnames == NULL) { - return NULL; - } - } - if (dl_cpu_device_tuple == NULL) { - dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); - if (dl_cpu_device_tuple == NULL) { - return NULL; - } - } - if (max_version == NULL) { - max_version = Py_BuildValue("(i,i)", 1, 0); - if (max_version == NULL) { - return NULL; - } - } - /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). */ - PyObject *call_args[] = {obj, Py_None, copy, max_version}; + PyObject *call_args[] = {obj, Py_None, copy, npy_static_pydata.dl_max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; /* If device is passed it must be "cpu" and replace it with (1, 0) */ @@ -544,12 +520,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } assert(device_request == NPY_DEVICE_CPU); - call_args[1] = dl_cpu_device_tuple; + call_args[1] = npy_static_pydata.dl_cpu_device_tuple; } PyObject *capsule = PyObject_VectorcallMethod( - npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, + npy_static_pydata.dl_call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 2cc6ea72c26e..62e1fd3c1b15 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -184,6 +184,22 @@ initialize_static_globals(void) return -1; } + npy_static_pydata.dl_call_kwnames = + Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (npy_static_pydata.dl_call_kwnames == NULL) { + return -1; + } + + npy_static_pydata.dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_cpu_device_tuple == NULL) { + return -1; + } + + npy_static_pydata.dl_max_version = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_max_version == NULL) { + return -1; + } + /* * Initialize contents of npy_static_cdata struct * diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index d6ee4a8dc54d..287dc80e4c1f 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -138,6 +138,13 @@ typedef struct npy_static_pydata_struct { PyObject *GenericToVoidMethod; PyObject *ObjectToGenericMethod; PyObject *GenericToObjectMethod; + + /* + * Used in from_dlpack + */ + PyObject *dl_call_kwnames; + PyObject *dl_cpu_device_tuple; + PyObject *dl_max_version; } npy_static_pydata_struct; From 7c942041cf171520d96e0cdcfd22c0db4306ad7e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 12:49:39 +0200 Subject: [PATCH 40/67] STY: Apply ruff/refurb rule FURB110 Replace ternary `if` expression with `or` operator --- benchmarks/benchmarks/__init__.py | 2 +- numpy/_core/code_generators/generate_umath.py | 2 +- numpy/_core/strings.py | 4 ++-- numpy/_pytesttester.py | 2 +- numpy/f2py/tests/util.py | 2 +- numpy/testing/_private/extbuild.py | 6 +++--- 6 files changed, 9 insertions(+), 9 deletions(-) diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 6aa85c22f614..8372be467005 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -5,7 +5,7 @@ def show_cpu_features(): from numpy.lib._utils_impl import _opt_info info = _opt_info() - info = "NumPy CPU features: " + (info if info else 'nothing enabled') + info = "NumPy CPU features: " + (info or 'nothing enabled') # ASV wrapping stdout & stderr, so we assume having a tty here if 'SHELL' in os.environ and sys.platform != 'win32': # to avoid the red color that imposed by ASV diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index 35b5ad92ac82..fbda9393b5a5 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1403,7 +1403,7 @@ def make_arrays(funcdict): sub = 0 for k, t in enumerate(uf.type_descriptions): - cfunc_alias = t.cfunc_alias if t.cfunc_alias else name + cfunc_alias = t.cfunc_alias or name cfunc_fname = None if t.func_data is FullTypeDescr: tname = english_upper(chartoname[t.type]) diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index befd76d8ceed..cd6d1ec439f1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1332,8 +1332,8 @@ def replace(a, old, new, count=-1): return _replace(arr, old, new, count) a_dt = arr.dtype - old = old.astype(old_dtype if old_dtype else a_dt, copy=False) - new = new.astype(new_dtype if new_dtype else a_dt, copy=False) + old = old.astype(old_dtype or a_dt, copy=False) + new = new.astype(new_dtype or a_dt, copy=False) max_int64 = np.iinfo(np.int64).max counts = _count_ufunc(arr, old, 0, max_int64) counts = np.where(count < 0, counts, np.minimum(counts, count)) diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 482c6eddbca0..f41d54f36bec 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -39,7 +39,7 @@ def _show_numpy_info(): print(f"NumPy version {np.__version__}") info = np.lib._utils_impl._opt_info() - print("NumPy CPU features: ", (info if info else 'nothing enabled')) + print("NumPy CPU features: ", (info or 'nothing enabled')) class PytestTester: diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index dbb7416b7765..ab2a1b6f8710 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -384,7 +384,7 @@ def setup_method(self): if self.module is not None: return - codes = self.sources if self.sources else [] + codes = self.sources or [] if self.code: codes.append(self.suffix) diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index af403bf7f8f4..f81184e9af1e 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -104,9 +104,9 @@ def compile_extension_module( dirname = builddir / name dirname.mkdir(exist_ok=True) cfile = _convert_str_to_file(source_string, dirname) - include_dirs = include_dirs if include_dirs else [] - libraries = libraries if libraries else [] - library_dirs = library_dirs if library_dirs else [] + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] return _c_compile( cfile, outputfilename=dirname / modname, From 57d794b0c65e731f01ae6099007863237d52dc9b Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 13:00:07 +0200 Subject: [PATCH 41/67] STY: Apply ruff/refurb rule FURB113 Use `extend()` instead of repeatedly calling `append()` --- numpy/_core/code_generators/generate_umath.py | 18 +++++++++--------- numpy/lib/tests/test_arraysetops.py | 8 ++------ numpy/random/tests/test_direct.py | 3 +-- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index fbda9393b5a5..eb5751df590f 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1466,13 +1466,11 @@ def make_arrays(funcdict): funcnames = ', '.join(funclist) signames = ', '.join(siglist) datanames = ', '.join(datalist) - code1list.append( - "static PyUFuncGenericFunction %s_functions[] = {%s};" - % (name, funcnames)) - code1list.append("static void * %s_data[] = {%s};" - % (name, datanames)) - code1list.append("static const char %s_signatures[] = {%s};" - % (name, signames)) + code1list.extend(( + f"static PyUFuncGenericFunction {name}_functions[] = {{{funcnames}}};", + f"static void * {name}_data[] = {{{datanames}}};", + f"static const char {name}_signatures[] = {{{signames}}};", + )) uf.empty = False else: uf.empty = True @@ -1573,8 +1571,10 @@ def make_ufuncs(funcdict): funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) - mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) - mlist.append(r"""Py_DECREF(f);""") + mlist.extend(( + f'PyDict_SetItemString(dictionary, "{name}", f);', + "Py_DECREF(f);", + )) code3list.append('\n'.join(mlist)) return '\n'.join(code3list) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index 788a4cecdb44..b4c4b39aac83 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -688,8 +688,7 @@ def get_types(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') + types.extend(('datetime64[D]', 'timedelta64[D]')) return types def test_unique_1d(self): @@ -871,10 +870,7 @@ def test_unique_axis(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) - types.append('datetime64[D]') - types.append('timedelta64[D]') - types.append([('a', int), ('b', int)]) - types.append([('a', int), ('b', float)]) + types.extend(('datetime64[D]', 'timedelta64[D]', [('a', int), ('b', int)], [('a', int), ('b', float)])) for dtype in types: self._run_axis_tests(dtype) diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index c8a83f3bc40b..23da9714ddb8 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -123,8 +123,7 @@ def gauss_from_uint(x, n, bits): loc += 2 f = np.sqrt(-2.0 * np.log(r2) / r2) - gauss.append(f * x2) - gauss.append(f * x1) + gauss.extend((f * x2, f * x1)) return gauss[:n] From c7f8824b9fd72c982ff290817bc7c42637ac0fdc Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 12:47:56 +0200 Subject: [PATCH 42/67] STY: Apply ruff/refurb rule FURB116 Replace `hex` call with f-string --- numpy/_build_utils/tempita/_tempita.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index d8ce41742f3a..446658fc15f8 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -175,11 +175,7 @@ def from_filename( from_filename = classmethod(from_filename) def __repr__(self): - return "<%s %s name=%r>" % ( - self.__class__.__name__, - hex(id(self))[2:], - self.name, - ) + return f"<{self.__class__.__name__} {id(self):x} name={self.name!r}>" def substitute(self, *args, **kw): if args: From eeb6e0e6c5267a3dd548547664e0f3d09e36eb7a Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 13:02:00 +0200 Subject: [PATCH 43/67] STY: Apply ruff/refurb rule FURB140 Use `itertools.starmap` instead of the generator --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- doc/source/reference/simd/gen_features.py | 3 ++- numpy/_core/code_generators/genapi.py | 3 ++- numpy/_core/tests/test_cython.py | 4 ++-- numpy/_core/tests/test_mem_overlap.py | 9 ++++----- numpy/_core/tests/test_scalarmath.py | 4 ++-- numpy/_core/tests/test_simd.py | 8 ++++---- numpy/_core/tests/test_strings.py | 7 ++++--- numpy/_core/tests/test_umath.py | 4 ++-- numpy/lib/_arrayterator_impl.py | 6 +++--- numpy/lib/_function_base_impl.py | 3 ++- numpy/lib/_iotools.py | 2 +- numpy/lib/tests/test_arrayterator.py | 3 ++- numpy/ma/extras.py | 5 ++--- 14 files changed, 34 insertions(+), 31 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 4d9f3c9c8f61..810555fec73b 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -61,7 +61,7 @@ def setup(self, ufuncname): self.args.append(arg) def time_afdn_types(self, ufuncname): - [self.afdn(*arg) for arg in self.args] + list(itertools.starmap(self.afdn, self.args)) class Broadcast(Benchmark): @@ -108,7 +108,7 @@ def setup(self, ufuncname): self.args.append(arg) def time_ufunc_types(self, ufuncname): - [self.ufn(*arg) for arg in self.args] + list(itertools.starmap(self.ufn, self.args)) class MethodsV0(Benchmark): diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index 47b35dbfc397..2d8a2e49cd2f 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -2,6 +2,7 @@ Generate CPU features tables from CCompilerOpt """ from os import path +from itertools import starmap from numpy.distutils.ccompiler_opt import CCompilerOpt class FakeCCompilerOpt(CCompilerOpt): @@ -122,7 +123,7 @@ def gen_rst_table(self, field_names, rows, tab_size=4): cformat = ' '.join('{:<%d}' % i for i in cls_len) border = cformat.format(*['=' * i for i in cls_len]) - rows = [cformat.format(*row) for row in rows] + rows = list(starmap(cformat.format, rows)) # header rows = [border, cformat.format(*field_names), border] + rows # footer diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 9b51904a2d7d..3045149b3405 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -15,6 +15,7 @@ import textwrap from os.path import join +from itertools import starmap def get_processor(): @@ -152,7 +153,7 @@ def _format_arg(self, typename, name): return typename + ' ' + name def __str__(self): - argstr = ', '.join([self._format_arg(*a) for a in self.args]) + argstr = ', '.join(list(starmap(self._format_arg, self.args))) if self.doc: doccomment = f'/* {self.doc} */\n' else: diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 81ddc63258c2..3fd45582c5eb 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -3,6 +3,7 @@ import subprocess import sys import pytest +from itertools import starmap import numpy as np from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE @@ -278,8 +279,7 @@ def test_npyiter_api(install_temp): x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) ) assert all( - np.allclose(x, y) - for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) + starmap(np.allclose, zip(checks.get_npyiter_itviews(it), it.itviews)) ) diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 0d80951a854a..9317d87358f3 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -281,7 +281,7 @@ def random_slice_fixed_size(n, step, size): steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + s1 = tuple(itertools.starmap(random_slice, zip(x.shape, steps))) t1 = np.arange(x.ndim) rng.shuffle(t1) @@ -301,8 +301,7 @@ def random_slice_fixed_size(n, step, size): steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) - s2 = tuple(random_slice_fixed_size(p, s, pa) - for p, s, pa in zip(x.shape, steps2, a.shape)) + s2 = tuple(itertools.starmap(random_slice_fixed_size, zip(x.shape, steps2, a.shape))) elif same_steps: steps2 = steps else: @@ -311,7 +310,7 @@ def random_slice_fixed_size(n, step, size): for j in range(x.ndim)) if not equal_size: - s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) + s2 = tuple(itertools.starmap(random_slice, zip(x.shape, steps2))) a = a.transpose(t1) b = x[s2].transpose(t2) @@ -442,7 +441,7 @@ def random_slice(n, step): for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) - s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) + s1 = tuple(itertools.starmap(random_slice, zip(x.shape, steps))) a = x[s1].transpose(t1) assert_(not internal_overlap(a)) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index 0b086df21c60..ad671c94c78b 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -314,7 +314,7 @@ def test_float_modulus_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = [divmod(*t) for t in arg] + tgt = list(itertools.starmap(divmod, arg)) a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -329,7 +329,7 @@ def test_float_modulus_exact(self): fa = a.astype(dt) fb = b.astype(dt) # use list comprehension so a_ and b_ are scalars - div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) + div, rem = zip(*list(itertools.starmap(op, zip(fa, fb)))) assert_equal(div, tgtdiv, err_msg=msg) assert_equal(rem, tgtrem, err_msg=msg) diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index fdcab688963e..385b2cc6c0bf 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -302,11 +302,11 @@ def test_math_max_min(self): data_b = self._data(self.nlanes) vdata_a, vdata_b = self.load(data_a), self.load(data_b) - data_max = [max(a, b) for a, b in zip(data_a, data_b)] + data_max = list(itertools.starmap(max, zip(data_a, data_b))) simd_max = self.max(vdata_a, vdata_b) assert simd_max == data_max - data_min = [min(a, b) for a, b in zip(data_a, data_b)] + data_min = list(itertools.starmap(min, zip(data_a, data_b))) simd_min = self.min(vdata_a, vdata_b) assert simd_min == data_min @@ -613,7 +613,7 @@ def to_bool(vector): vdata_a = self.setall(case_operand1) vdata_b = self.setall(case_operand2) vcmp = to_bool(intrin(vdata_a, vdata_b)) - data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] + data_cmp = list(itertools.starmap(py_comp, zip(data_a, data_b))) assert vcmp == data_cmp @pytest.mark.parametrize("intrin", ["any", "all"]) @@ -1036,7 +1036,7 @@ def test_operators_comparison(self, func, intrin): def to_bool(vector): return [lane == mask_true for lane in vector] - data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] + data_cmp = list(itertools.starmap(func, zip(data_a, data_b))) cmp = to_bool(intrin(vdata_a, vdata_b)) assert cmp == data_cmp diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 7960142162c5..7ea94eafa7da 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -1,9 +1,10 @@ import sys +import operator +from itertools import starmap + import pytest -import operator import numpy as np - from numpy.testing import assert_array_equal, assert_raises, IS_PYPY from numpy.testing._private.utils import requires_memory @@ -65,7 +66,7 @@ def test_string_comparisons(op, ufunc, sym, dtypes, aligned): np.random.shuffle(arr2) arr[0] = arr2[0] # make sure one matches - expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] + expected = list(starmap(op, zip(arr.tolist(), arr2.tolist()))) assert_array_equal(op(arr, arr2), expected) assert_array_equal(ufunc(arr, arr2), expected) assert_array_equal( diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 22ad1b8ac302..0078c2a9d54b 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -508,7 +508,7 @@ def test_division_int_boundary(self, dtype, ex_val): ac = a.copy() ac //= b div_ab = a // b - div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] + div_lst = list(itertools.starmap(c_div, zip(a_lst, b_lst))) msg = "Integer arrays floor division check (//)" assert all(div_ab == div_lst), msg @@ -740,7 +740,7 @@ def test_float_remainder_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = [divmod(*t) for t in arg] + tgt = list(itertools.starmap(divmod, arg)) a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 5bb1630a9300..9aa7bcc0690c 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -9,6 +9,7 @@ """ from operator import mul from functools import reduce +from itertools import starmap __all__ = ['Arrayterator'] @@ -133,8 +134,7 @@ def __array__(self, dtype=None, copy=None): Return corresponding data. """ - slice_ = tuple(slice(*t) for t in zip( - self.start, self.stop, self.step)) + slice_ = tuple(starmap(slice, zip(self.start, self.stop, self.step))) return self.var[slice_] @property @@ -210,7 +210,7 @@ def __iter__(self): count = count // self.shape[i] # yield a block - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + slice_ = tuple(starmap(slice, zip(start, stop, step))) yield self.var[slice_] # Update start position, taking care of overflow to diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index e44b27a68adb..b5bf32398339 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4,6 +4,7 @@ import re import sys import warnings +from itertools import starmap import numpy as np import numpy._core.numeric as _nx @@ -1971,7 +1972,7 @@ def trim_zeros(filt, trim='fb', axis=None): sl = slice(start[0], stop[0]) elif axis is None: # trim all axes - sl = tuple(slice(*x) for x in zip(start, stop)) + sl = tuple(starmap(slice, zip(start, stop))) else: # only trim single axis axis = normalize_axis_index(axis, filt_.ndim) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 56ee65d38575..5a2372ef75f7 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -181,7 +181,7 @@ def __init__(self, delimiter=None, comments='#', autostrip=True, elif hasattr(delimiter, '__iter__'): _handyman = self._variablewidth_splitter idx = np.cumsum([0] + list(delimiter)) - delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] + delimiter = list(itertools.starmap(slice, itertools.pairwise(idx))) # Delimiter is a single integer elif int(delimiter): (_handyman, delimiter) = ( diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index e64d1d1e3ece..675cc513ab54 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -1,5 +1,6 @@ from operator import mul from functools import reduce +from itertools import starmap import numpy as np from numpy.random import randint @@ -31,7 +32,7 @@ def test(): start = [randint(dim) for dim in shape] stop = [randint(dim) + 1 for dim in shape] step = [randint(dim) + 1 for dim in shape] - slice_ = tuple(slice(*t) for t in zip(start, stop, step)) + slice_ = tuple(starmap(slice, zip(start, stop, step))) c = b[slice_] d = a[slice_] diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index 22151b95e27d..ef1b6904ae39 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -2183,13 +2183,12 @@ def _ezclump(mask): return [slice(0, mask.size)] r = [slice(0, idx[0])] - r.extend((slice(left, right) - for left, right in zip(idx[1:-1:2], idx[2::2]))) + r.extend(itertools.starmap(slice, zip(idx[1:-1:2], idx[2::2]))) else: if len(idx) == 0: return [] - r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] + r = list(itertools.starmap(slice, zip(idx[:-1:2], idx[1::2]))) if mask[-1]: r.append(slice(idx[-1], mask.size)) From 9166090bc11be77c1f598c27dac7ed2a8912920d Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 16:53:40 +0200 Subject: [PATCH 44/67] Revert "STY: Apply ruff/refurb rule FURB113" This reverts commit a53b45f66255ee378a29d3a255a1de52fb1af522. --- numpy/_core/code_generators/generate_umath.py | 18 +++++++++--------- numpy/lib/tests/test_arraysetops.py | 8 ++++++-- numpy/random/tests/test_direct.py | 3 ++- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index eb5751df590f..fbda9393b5a5 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -1466,11 +1466,13 @@ def make_arrays(funcdict): funcnames = ', '.join(funclist) signames = ', '.join(siglist) datanames = ', '.join(datalist) - code1list.extend(( - f"static PyUFuncGenericFunction {name}_functions[] = {{{funcnames}}};", - f"static void * {name}_data[] = {{{datanames}}};", - f"static const char {name}_signatures[] = {{{signames}}};", - )) + code1list.append( + "static PyUFuncGenericFunction %s_functions[] = {%s};" + % (name, funcnames)) + code1list.append("static void * %s_data[] = {%s};" + % (name, datanames)) + code1list.append("static const char %s_signatures[] = {%s};" + % (name, signames)) uf.empty = False else: uf.empty = True @@ -1571,10 +1573,8 @@ def make_ufuncs(funcdict): funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) - mlist.extend(( - f'PyDict_SetItemString(dictionary, "{name}", f);', - "Py_DECREF(f);", - )) + mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) + mlist.append(r"""Py_DECREF(f);""") code3list.append('\n'.join(mlist)) return '\n'.join(code3list) diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py index b4c4b39aac83..788a4cecdb44 100644 --- a/numpy/lib/tests/test_arraysetops.py +++ b/numpy/lib/tests/test_arraysetops.py @@ -688,7 +688,8 @@ def get_types(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) - types.extend(('datetime64[D]', 'timedelta64[D]')) + types.append('datetime64[D]') + types.append('timedelta64[D]') return types def test_unique_1d(self): @@ -870,7 +871,10 @@ def test_unique_axis(self): types = [] types.extend(np.typecodes['AllInteger']) types.extend(np.typecodes['AllFloat']) - types.extend(('datetime64[D]', 'timedelta64[D]', [('a', int), ('b', int)], [('a', int), ('b', float)])) + types.append('datetime64[D]') + types.append('timedelta64[D]') + types.append([('a', int), ('b', int)]) + types.append([('a', int), ('b', float)]) for dtype in types: self._run_axis_tests(dtype) diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py index 23da9714ddb8..c8a83f3bc40b 100644 --- a/numpy/random/tests/test_direct.py +++ b/numpy/random/tests/test_direct.py @@ -123,7 +123,8 @@ def gauss_from_uint(x, n, bits): loc += 2 f = np.sqrt(-2.0 * np.log(r2) / r2) - gauss.extend((f * x2, f * x1)) + gauss.append(f * x2) + gauss.append(f * x1) return gauss[:n] From c561fc58615126640c0f94dc301f8e4d4bb1fb8e Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 16:55:32 +0200 Subject: [PATCH 45/67] STY: add missing blank line --- doc/source/reference/simd/gen_features.py | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index 2d8a2e49cd2f..95af997fd2d0 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -3,6 +3,7 @@ """ from os import path from itertools import starmap + from numpy.distutils.ccompiler_opt import CCompilerOpt class FakeCCompilerOpt(CCompilerOpt): From e329915a305ee12c2ebf1de379f6127187d85673 Mon Sep 17 00:00:00 2001 From: Dimitri Papadopoulos <3234522+DimitriPapadopoulos@users.noreply.github.com> Date: Thu, 1 May 2025 17:44:38 +0200 Subject: [PATCH 46/67] Revert "STY: Apply ruff/refurb rule FURB140" This reverts commit f1b77fae7d74e609d7c25adc814c41df3cea5679. --- benchmarks/benchmarks/bench_ufunc.py | 4 ++-- doc/source/reference/simd/gen_features.py | 3 +-- numpy/_core/code_generators/genapi.py | 3 +-- numpy/_core/tests/test_cython.py | 4 ++-- numpy/_core/tests/test_mem_overlap.py | 9 +++++---- numpy/_core/tests/test_scalarmath.py | 4 ++-- numpy/_core/tests/test_simd.py | 8 ++++---- numpy/_core/tests/test_strings.py | 7 +++---- numpy/_core/tests/test_umath.py | 4 ++-- numpy/lib/_arrayterator_impl.py | 6 +++--- numpy/lib/_function_base_impl.py | 3 +-- numpy/lib/_iotools.py | 2 +- numpy/lib/tests/test_arrayterator.py | 3 +-- numpy/ma/extras.py | 5 +++-- 14 files changed, 31 insertions(+), 34 deletions(-) diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 810555fec73b..4d9f3c9c8f61 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -61,7 +61,7 @@ def setup(self, ufuncname): self.args.append(arg) def time_afdn_types(self, ufuncname): - list(itertools.starmap(self.afdn, self.args)) + [self.afdn(*arg) for arg in self.args] class Broadcast(Benchmark): @@ -108,7 +108,7 @@ def setup(self, ufuncname): self.args.append(arg) def time_ufunc_types(self, ufuncname): - list(itertools.starmap(self.ufn, self.args)) + [self.ufn(*arg) for arg in self.args] class MethodsV0(Benchmark): diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index 95af997fd2d0..eb516e3ff2ac 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -2,7 +2,6 @@ Generate CPU features tables from CCompilerOpt """ from os import path -from itertools import starmap from numpy.distutils.ccompiler_opt import CCompilerOpt @@ -124,7 +123,7 @@ def gen_rst_table(self, field_names, rows, tab_size=4): cformat = ' '.join('{:<%d}' % i for i in cls_len) border = cformat.format(*['=' * i for i in cls_len]) - rows = list(starmap(cformat.format, rows)) + rows = [cformat.format(*row) for row in rows] # header rows = [border, cformat.format(*field_names), border] + rows # footer diff --git a/numpy/_core/code_generators/genapi.py b/numpy/_core/code_generators/genapi.py index 3045149b3405..9b51904a2d7d 100644 --- a/numpy/_core/code_generators/genapi.py +++ b/numpy/_core/code_generators/genapi.py @@ -15,7 +15,6 @@ import textwrap from os.path import join -from itertools import starmap def get_processor(): @@ -153,7 +152,7 @@ def _format_arg(self, typename, name): return typename + ' ' + name def __str__(self): - argstr = ', '.join(list(starmap(self._format_arg, self.args))) + argstr = ', '.join([self._format_arg(*a) for a in self.args]) if self.doc: doccomment = f'/* {self.doc} */\n' else: diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 3fd45582c5eb..81ddc63258c2 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -3,7 +3,6 @@ import subprocess import sys import pytest -from itertools import starmap import numpy as np from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE @@ -279,7 +278,8 @@ def test_npyiter_api(install_temp): x is y for x, y in zip(checks.get_npyiter_operands(it), it.operands) ) assert all( - starmap(np.allclose, zip(checks.get_npyiter_itviews(it), it.itviews)) + np.allclose(x, y) + for x, y in zip(checks.get_npyiter_itviews(it), it.itviews) ) diff --git a/numpy/_core/tests/test_mem_overlap.py b/numpy/_core/tests/test_mem_overlap.py index 9317d87358f3..0d80951a854a 100644 --- a/numpy/_core/tests/test_mem_overlap.py +++ b/numpy/_core/tests/test_mem_overlap.py @@ -281,7 +281,7 @@ def random_slice_fixed_size(n, step, size): steps = tuple(rng.randint(1, 11, dtype=np.intp) if rng.randint(0, 5, dtype=np.intp) == 0 else 1 for j in range(x.ndim)) - s1 = tuple(itertools.starmap(random_slice, zip(x.shape, steps))) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) t1 = np.arange(x.ndim) rng.shuffle(t1) @@ -301,7 +301,8 @@ def random_slice_fixed_size(n, step, size): steps2 = tuple(rng.randint(1, max(2, p // (1 + pa))) if rng.randint(0, 5) == 0 else 1 for p, s, pa in zip(x.shape, s1, a.shape)) - s2 = tuple(itertools.starmap(random_slice_fixed_size, zip(x.shape, steps2, a.shape))) + s2 = tuple(random_slice_fixed_size(p, s, pa) + for p, s, pa in zip(x.shape, steps2, a.shape)) elif same_steps: steps2 = steps else: @@ -310,7 +311,7 @@ def random_slice_fixed_size(n, step, size): for j in range(x.ndim)) if not equal_size: - s2 = tuple(itertools.starmap(random_slice, zip(x.shape, steps2))) + s2 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps2)) a = a.transpose(t1) b = x[s2].transpose(t2) @@ -441,7 +442,7 @@ def random_slice(n, step): for j in range(x.ndim)) t1 = np.arange(x.ndim) rng.shuffle(t1) - s1 = tuple(itertools.starmap(random_slice, zip(x.shape, steps))) + s1 = tuple(random_slice(p, s) for p, s in zip(x.shape, steps)) a = x[s1].transpose(t1) assert_(not internal_overlap(a)) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index ad671c94c78b..0b086df21c60 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -314,7 +314,7 @@ def test_float_modulus_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(itertools.starmap(divmod, arg)) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that @@ -329,7 +329,7 @@ def test_float_modulus_exact(self): fa = a.astype(dt) fb = b.astype(dt) # use list comprehension so a_ and b_ are scalars - div, rem = zip(*list(itertools.starmap(op, zip(fa, fb)))) + div, rem = zip(*[op(a_, b_) for a_, b_ in zip(fa, fb)]) assert_equal(div, tgtdiv, err_msg=msg) assert_equal(rem, tgtrem, err_msg=msg) diff --git a/numpy/_core/tests/test_simd.py b/numpy/_core/tests/test_simd.py index 385b2cc6c0bf..fdcab688963e 100644 --- a/numpy/_core/tests/test_simd.py +++ b/numpy/_core/tests/test_simd.py @@ -302,11 +302,11 @@ def test_math_max_min(self): data_b = self._data(self.nlanes) vdata_a, vdata_b = self.load(data_a), self.load(data_b) - data_max = list(itertools.starmap(max, zip(data_a, data_b))) + data_max = [max(a, b) for a, b in zip(data_a, data_b)] simd_max = self.max(vdata_a, vdata_b) assert simd_max == data_max - data_min = list(itertools.starmap(min, zip(data_a, data_b))) + data_min = [min(a, b) for a, b in zip(data_a, data_b)] simd_min = self.min(vdata_a, vdata_b) assert simd_min == data_min @@ -613,7 +613,7 @@ def to_bool(vector): vdata_a = self.setall(case_operand1) vdata_b = self.setall(case_operand2) vcmp = to_bool(intrin(vdata_a, vdata_b)) - data_cmp = list(itertools.starmap(py_comp, zip(data_a, data_b))) + data_cmp = [py_comp(a, b) for a, b in zip(data_a, data_b)] assert vcmp == data_cmp @pytest.mark.parametrize("intrin", ["any", "all"]) @@ -1036,7 +1036,7 @@ def test_operators_comparison(self, func, intrin): def to_bool(vector): return [lane == mask_true for lane in vector] - data_cmp = list(itertools.starmap(func, zip(data_a, data_b))) + data_cmp = [func(a, b) for a, b in zip(data_a, data_b)] cmp = to_bool(intrin(vdata_a, vdata_b)) assert cmp == data_cmp diff --git a/numpy/_core/tests/test_strings.py b/numpy/_core/tests/test_strings.py index 7ea94eafa7da..7960142162c5 100644 --- a/numpy/_core/tests/test_strings.py +++ b/numpy/_core/tests/test_strings.py @@ -1,10 +1,9 @@ import sys -import operator -from itertools import starmap - import pytest +import operator import numpy as np + from numpy.testing import assert_array_equal, assert_raises, IS_PYPY from numpy.testing._private.utils import requires_memory @@ -66,7 +65,7 @@ def test_string_comparisons(op, ufunc, sym, dtypes, aligned): np.random.shuffle(arr2) arr[0] = arr2[0] # make sure one matches - expected = list(starmap(op, zip(arr.tolist(), arr2.tolist()))) + expected = [op(d1, d2) for d1, d2 in zip(arr.tolist(), arr2.tolist())] assert_array_equal(op(arr, arr2), expected) assert_array_equal(ufunc(arr, arr2), expected) assert_array_equal( diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 0078c2a9d54b..22ad1b8ac302 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -508,7 +508,7 @@ def test_division_int_boundary(self, dtype, ex_val): ac = a.copy() ac //= b div_ab = a // b - div_lst = list(itertools.starmap(c_div, zip(a_lst, b_lst))) + div_lst = [c_div(x, y) for x, y in zip(a_lst, b_lst)] msg = "Integer arrays floor division check (//)" assert all(div_ab == div_lst), msg @@ -740,7 +740,7 @@ def test_float_remainder_exact(self): dividend = nlst + [0] + plst divisor = nlst + plst arg = list(itertools.product(dividend, divisor)) - tgt = list(itertools.starmap(divmod, arg)) + tgt = [divmod(*t) for t in arg] a, b = np.array(arg, dtype=int).T # convert exact integer results from Python to float so that diff --git a/numpy/lib/_arrayterator_impl.py b/numpy/lib/_arrayterator_impl.py index 9aa7bcc0690c..5bb1630a9300 100644 --- a/numpy/lib/_arrayterator_impl.py +++ b/numpy/lib/_arrayterator_impl.py @@ -9,7 +9,6 @@ """ from operator import mul from functools import reduce -from itertools import starmap __all__ = ['Arrayterator'] @@ -134,7 +133,8 @@ def __array__(self, dtype=None, copy=None): Return corresponding data. """ - slice_ = tuple(starmap(slice, zip(self.start, self.stop, self.step))) + slice_ = tuple(slice(*t) for t in zip( + self.start, self.stop, self.step)) return self.var[slice_] @property @@ -210,7 +210,7 @@ def __iter__(self): count = count // self.shape[i] # yield a block - slice_ = tuple(starmap(slice, zip(start, stop, step))) + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) yield self.var[slice_] # Update start position, taking care of overflow to diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index b5bf32398339..e44b27a68adb 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -4,7 +4,6 @@ import re import sys import warnings -from itertools import starmap import numpy as np import numpy._core.numeric as _nx @@ -1972,7 +1971,7 @@ def trim_zeros(filt, trim='fb', axis=None): sl = slice(start[0], stop[0]) elif axis is None: # trim all axes - sl = tuple(starmap(slice, zip(start, stop))) + sl = tuple(slice(*x) for x in zip(start, stop)) else: # only trim single axis axis = normalize_axis_index(axis, filt_.ndim) diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py index 5a2372ef75f7..56ee65d38575 100644 --- a/numpy/lib/_iotools.py +++ b/numpy/lib/_iotools.py @@ -181,7 +181,7 @@ def __init__(self, delimiter=None, comments='#', autostrip=True, elif hasattr(delimiter, '__iter__'): _handyman = self._variablewidth_splitter idx = np.cumsum([0] + list(delimiter)) - delimiter = list(itertools.starmap(slice, itertools.pairwise(idx))) + delimiter = [slice(i, j) for (i, j) in itertools.pairwise(idx)] # Delimiter is a single integer elif int(delimiter): (_handyman, delimiter) = ( diff --git a/numpy/lib/tests/test_arrayterator.py b/numpy/lib/tests/test_arrayterator.py index 675cc513ab54..e64d1d1e3ece 100644 --- a/numpy/lib/tests/test_arrayterator.py +++ b/numpy/lib/tests/test_arrayterator.py @@ -1,6 +1,5 @@ from operator import mul from functools import reduce -from itertools import starmap import numpy as np from numpy.random import randint @@ -32,7 +31,7 @@ def test(): start = [randint(dim) for dim in shape] stop = [randint(dim) + 1 for dim in shape] step = [randint(dim) + 1 for dim in shape] - slice_ = tuple(starmap(slice, zip(start, stop, step))) + slice_ = tuple(slice(*t) for t in zip(start, stop, step)) c = b[slice_] d = a[slice_] diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py index ef1b6904ae39..22151b95e27d 100644 --- a/numpy/ma/extras.py +++ b/numpy/ma/extras.py @@ -2183,12 +2183,13 @@ def _ezclump(mask): return [slice(0, mask.size)] r = [slice(0, idx[0])] - r.extend(itertools.starmap(slice, zip(idx[1:-1:2], idx[2::2]))) + r.extend((slice(left, right) + for left, right in zip(idx[1:-1:2], idx[2::2]))) else: if len(idx) == 0: return [] - r = list(itertools.starmap(slice, zip(idx[:-1:2], idx[1::2]))) + r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])] if mask[-1]: r.append(slice(idx[-1], mask.size)) From 5b5e0ff6b302b44e48b5f043c643c81ea158770e Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 18:02:23 +0000 Subject: [PATCH 47/67] MAINT: Bump github/codeql-action from 3.28.16 to 3.28.17 Bumps [github/codeql-action](https://github.com/github/codeql-action) from 3.28.16 to 3.28.17. - [Release notes](https://github.com/github/codeql-action/releases) - [Changelog](https://github.com/github/codeql-action/blob/main/CHANGELOG.md) - [Commits](https://github.com/github/codeql-action/compare/28deaeda66b76a05916b6923827895f2b14ab387...60168efe1c415ce0f5521ea06d5c2062adbeed1b) --- updated-dependencies: - dependency-name: github/codeql-action dependency-version: 3.28.17 dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/scorecards.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 68e75754346a..68ef1e811e88 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/autobuild@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 # ℹ️ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c2b15262ea41..360261b6a186 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v2.1.27 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v2.1.27 with: sarif_file: results.sarif From d1ca438822ad7ea98da8a817630a8e26f85644da Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Sat, 3 May 2025 10:10:50 +0100 Subject: [PATCH 48/67] TYP: Type ``MaskedArray.all`` and ``MaskedArray.any`` (#28880) --- numpy/ma/core.pyi | 93 ++++++++++++++++++++++++++- numpy/typing/tests/data/fail/ma.pyi | 8 +++ numpy/typing/tests/data/reveal/ma.pyi | 22 +++++++ 3 files changed, 121 insertions(+), 2 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index 71a05d05a940..f3fc825124e1 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -487,8 +487,97 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... - def all(self, axis=..., out=..., keepdims=...): ... - def any(self, axis=..., out=..., keepdims=...): ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index d79008b60d25..78f98e918c43 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -63,6 +63,14 @@ np.ma.argmax(m, keepdims=1.0) # E: No overload variant np.ma.argmax(m, out=1.0) # E: No overload variant np.ma.argmax(m, fill_value=lambda x: 27) # E: No overload variant +m.all(axis=1.0) # E: No overload variant +m.all(keepdims=1.0) # E: No overload variant +m.all(out=1.0) # E: No overload variant + +m.any(axis=1.0) # E: No overload variant +m.any(keepdims=1.0) # E: No overload variant +m.any(out=1.0) # E: No overload variant + m.sort(axis=(0,1)) # E: No overload variant m.sort(axis=None) # E: No overload variant m.sort(kind='cabbage') # E: No overload variant diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 79bfe7f86744..39245fddf780 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -141,6 +141,28 @@ assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) + assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) From 63ff9587808f0adbf757ab8936e55eed8429a9fc Mon Sep 17 00:00:00 2001 From: Marco Edward Gorelli Date: Sat, 3 May 2025 12:45:08 +0100 Subject: [PATCH 49/67] TYP: Type ``MaskedArray.{__setmask__,mask,harden_mask,soften_mask,hardmask,unshare_mask,sharedmask,shrink_mask}`` (#28867) --- numpy/ma/core.py | 3 ++- numpy/ma/core.pyi | 18 +++++++++--------- numpy/typing/tests/data/fail/ma.pyi | 2 ++ numpy/typing/tests/data/pass/ma.py | 6 +++++- numpy/typing/tests/data/reveal/ma.pyi | 14 ++++++++++++++ 5 files changed, 32 insertions(+), 11 deletions(-) diff --git a/numpy/ma/core.py b/numpy/ma/core.py index add0ad9770d7..bbd64bd76007 100644 --- a/numpy/ma/core.py +++ b/numpy/ma/core.py @@ -3737,7 +3737,8 @@ def shrink_mask(self): Returns ------- - None + result : MaskedArray + A :class:`~ma.MaskedArray` object. Examples -------- diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index f3fc825124e1..dc2a472a18d3 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -405,23 +405,23 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... - def __setmask__(self, mask, copy=...): ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self): ... + def mask(self) -> NDArray[MaskType] | MaskType: ... @mask.setter - def mask(self, value): ... + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property def recordmask(self): ... @recordmask.setter def recordmask(self, mask): ... - def harden_mask(self): ... - def soften_mask(self): ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... @property - def hardmask(self): ... - def unshare_mask(self): ... + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... @property - def sharedmask(self): ... - def shrink_mask(self): ... + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... @property def baseclass(self) -> type[NDArray[Any]]: ... data: Any diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index 78f98e918c43..e93be464de10 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -131,4 +131,6 @@ np.ma.allclose(m, [1,2,3], masked_equal=4.5) # E: No overload variant np.ma.allclose(m, [1,2,3], rtol='.4') # E: No overload variant np.ma.allclose(m, [1,2,3], atol='.5') # E: No overload variant +m.__setmask__('mask') # E: No overload variant + m.swapaxes(axis1=1, axis2=0) # E: No overload variant diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 7a4e9909e334..abd1a0103005 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -2,7 +2,11 @@ import numpy as np import numpy.ma +import numpy.typing as npt - +ar_b: npt.NDArray[np.bool] = np.array([True, False, True]) m: np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) +m.mask = ar_b +m.mask = np.False_ + diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 39245fddf780..8c22a4ea5e44 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -289,6 +289,7 @@ assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) assert_type(MAR_i8.filled(), NDArray[np.int64]) @@ -346,5 +347,18 @@ assert_type(np.ma.nomask, np.bool[Literal[False]]) # https://github.com/python/mypy/issues/18974 assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From 8093fee41aea818073bbc875a6adf97e05f911c7 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 May 2025 20:29:05 +0200 Subject: [PATCH 50/67] DEP: deprecate ``numpy.typing.NBitBase`` --- numpy/_typing/__init__.py | 2 +- numpy/_typing/_nbit_base.py | 7 ++++++- numpy/_typing/_nbit_base.pyi | 40 ++++++++++++++++++++++++++++++++++++ numpy/typing/__init__.py | 37 +++++++++++++++++++++++++++------ 4 files changed, 78 insertions(+), 8 deletions(-) create mode 100644 numpy/_typing/_nbit_base.pyi diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 92cdcec84900..097e59390691 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -6,7 +6,7 @@ _NestedSequence as _NestedSequence, ) from ._nbit_base import ( - NBitBase as NBitBase, + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] _8Bit as _8Bit, _16Bit as _16Bit, _32Bit as _32Bit, diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index bf16c436c6da..df2fb64e4040 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -9,13 +9,17 @@ class NBitBase: """ A type representing `numpy.number` precision during static type checking. - Used exclusively for the purpose static type checking, `NBitBase` + Used exclusively for the purpose of static type checking, `NBitBase` represents the base of a hierarchical set of subclasses. Each subsequent subclass is herein used for representing a lower level of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. .. versionadded:: 1.20 + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + Examples -------- Below is a typical usage example: `NBitBase` is herein used for annotating @@ -48,6 +52,7 @@ class NBitBase: ... # note: out: numpy.floating[numpy.typing._64Bit*] """ + # Deprecated in NumPy 2.3, 2025-05-01 def __init_subclass__(cls) -> None: allowed_names = { diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi new file mode 100644 index 000000000000..ccf8f5ceac45 --- /dev/null +++ b/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,40 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final + +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 8a4a974a9928..2c75c348667e 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -155,15 +155,40 @@ # NOTE: The API section will be appended with additional entries # further down in this file -from numpy._typing import ( - ArrayLike, - DTypeLike, - NBitBase, - NDArray, -) +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str): + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + if __doc__ is not None: from numpy._typing._add_docstring import _docstrings __doc__ += _docstrings From 40b61f4f7475c665c325ccc64c9dc635ed071648 Mon Sep 17 00:00:00 2001 From: jorenham Date: Thu, 1 May 2025 20:53:14 +0200 Subject: [PATCH 51/67] DOC: add release note for the ``numpy.typing.NBitBase`` deprecation --- .../upcoming_changes/28884.deprecation.rst | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 doc/release/upcoming_changes/28884.deprecation.rst diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst new file mode 100644 index 000000000000..c1be55fb0dd3 --- /dev/null +++ b/doc/release/upcoming_changes/28884.deprecation.rst @@ -0,0 +1,28 @@ +``numpy.typing.NBitBase`` deprecation +------------------------------------- +The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. + +This type was previously intended to be used as a generic upper bound for type-parameters, for example: + +.. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + +But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. + +So instead, the better approach is to use ``typing.overload``: + +.. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... From c454b562fb2219288bb4af4b8b50b722a9faafbd Mon Sep 17 00:00:00 2001 From: jorenham Date: Sat, 3 May 2025 13:39:18 +0200 Subject: [PATCH 52/67] TYP: remove non-existent extended-precision scalar types ported from numpy/numtype#209 --- numpy/__init__.pyi | 16 ++---------- numpy/_core/numerictypes.pyi | 23 +---------------- numpy/_typing/__init__.py | 2 -- numpy/_typing/_extended_precision.py | 15 +---------- numpy/_typing/_nbit_base.py | 25 ++++++------------- numpy/typing/mypy_plugin.py | 11 +------- .../tests/data/misc/extended_precision.pyi | 13 +--------- numpy/typing/tests/test_typing.py | 16 +++--------- 8 files changed, 17 insertions(+), 104 deletions(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5f176e519a24..b309c355d164 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -57,10 +57,8 @@ from numpy._typing import ( NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; # they're used by the mypy plugin - _256Bit, _128Bit, _96Bit, - _80Bit, _64Bit, _32Bit, _16Bit, @@ -160,21 +158,12 @@ from numpy._typing._callable import ( _ComparisonOpGE, ) -# NOTE: Numpy's mypy plugin is used for removing the types unavailable -# to the specific platform +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, float96, float128, - float256, - complex160, complex192, complex256, - complex512, ) from numpy._array_api_info import __array_namespace_info__ @@ -698,8 +687,7 @@ __all__ = [ # noqa: RUF022 "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", - "uint128", "uint256", "int128", "int256", "float80", "float96", "float128", - "float256", "complex160", "complex192", "complex256", "complex512", + "float96", "float128", "complex192", "complex256", "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", "printoptions", "format_float_positional", "format_float_scientific", "require", "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index f04d11303ad5..3b6b0c63713a 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -68,20 +68,7 @@ from .multiarray import ( ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, - float96, - float128, - float256, - complex160, - complex192, - complex256, - complex512, -) +from numpy._typing._extended_precision import float96, float128, complex192, complex256 __all__ = [ "ScalarType", @@ -146,18 +133,10 @@ __all__ = [ "bool_", "int_", "uint", - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] @type_check_only diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 097e59390691..a0ed7cd53622 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -11,10 +11,8 @@ _16Bit as _16Bit, _32Bit as _32Bit, _64Bit as _64Bit, - _80Bit as _80Bit, _96Bit as _96Bit, _128Bit as _128Bit, - _256Bit as _256Bit, ) from ._nbit import ( _NBitByte as _NBitByte, diff --git a/numpy/_typing/_extended_precision.py b/numpy/_typing/_extended_precision.py index 7246b47d0ee1..73a1847ccbeb 100644 --- a/numpy/_typing/_extended_precision.py +++ b/numpy/_typing/_extended_precision.py @@ -6,22 +6,9 @@ """ import numpy as np -from . import ( - _80Bit, - _96Bit, - _128Bit, - _256Bit, -) +from . import _96Bit, _128Bit -uint128 = np.unsignedinteger[_128Bit] -uint256 = np.unsignedinteger[_256Bit] -int128 = np.signedinteger[_128Bit] -int256 = np.signedinteger[_256Bit] -float80 = np.floating[_80Bit] float96 = np.floating[_96Bit] float128 = np.floating[_128Bit] -float256 = np.floating[_256Bit] -complex160 = np.complexfloating[_80Bit, _80Bit] complex192 = np.complexfloating[_96Bit, _96Bit] complex256 = np.complexfloating[_128Bit, _128Bit] -complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index df2fb64e4040..aa8b85cd1592 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -56,8 +56,7 @@ class NBitBase: def __init_subclass__(cls) -> None: allowed_names = { - "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", - "_64Bit", "_32Bit", "_16Bit", "_8Bit", + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" } if cls.__name__ not in allowed_names: raise TypeError('cannot inherit from final class "NBitBase"') @@ -66,40 +65,30 @@ def __init_subclass__(cls) -> None: @final @set_module("numpy._typing") # Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): # type: ignore[misc] +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _128Bit(_256Bit): # type: ignore[misc] +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _96Bit(_128Bit): # type: ignore[misc] +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _80Bit(_96Bit): # type: ignore[misc] +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _64Bit(_80Bit): # type: ignore[misc] +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _32Bit(_64Bit): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _16Bit(_32Bit): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _8Bit(_16Bit): # type: ignore[misc] +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 81ccf0b64fc1..5c01f261bb79 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -65,18 +65,10 @@ def _get_precision_dict() -> dict[str, str]: def _get_extended_precision_list() -> list[str]: extended_names = [ - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] return [i for i in extended_names if hasattr(np, i)] @@ -169,8 +161,7 @@ def get_additional_deps( """Handle all import-based overrides. * Import platform-specific extended-precision `numpy.number` - subclasses (*e.g.* `numpy.float96`, `numpy.float128` and - `numpy.complex256`). + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). * Import the appropriate `ctypes` equivalent to `numpy.intp`. """ diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 02dfcec6a8e2..84b5f516bdde 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,20 +1,9 @@ import numpy as np -from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit +from numpy._typing import _96Bit, _128Bit from typing import assert_type -assert_type(np.uint128(), np.unsignedinteger[_128Bit]) -assert_type(np.uint256(), np.unsignedinteger[_256Bit]) - -assert_type(np.int128(), np.signedinteger[_128Bit]) -assert_type(np.int256(), np.signedinteger[_256Bit]) - -assert_type(np.float80(), np.floating[_80Bit]) assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) -assert_type(np.float256(), np.floating[_256Bit]) - -assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) -assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit]) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index c1e708c876e5..068b27a7709f 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -249,18 +249,10 @@ def test_code_runs(path: str) -> None: LINENO_MAPPING = { - 6: "uint128", - 7: "uint256", - 9: "int128", - 10: "int256", - 12: "float80", - 13: "float96", - 14: "float128", - 15: "float256", - 17: "complex160", - 18: "complex192", - 19: "complex256", - 20: "complex512", + 6: "float96", + 7: "float128", + 8: "complex192", + 9: "complex256", } From ad5a33a5fbdcb62c88eeb22d24151387b572a9fe Mon Sep 17 00:00:00 2001 From: Ralf Gommers Date: Mon, 5 May 2025 12:39:30 +0200 Subject: [PATCH 53/67] BLD: update vendored Meson to include iOS fix This is PR 21 from https://github.com/numpy/meson --- vendored-meson/meson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vendored-meson/meson b/vendored-meson/meson index 23ec306e6510..f754c4258805 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 23ec306e65107f5ad39a03709799dc90ea678a54 +Subproject commit f754c4258805056ed7be09830d96af45215d341b From add2d0bb75ea82292eab2e0659c47ea810787922 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Mon, 5 May 2025 16:42:20 +0100 Subject: [PATCH 54/67] TYP: Test ``MaskedArray.transpose`` and ``MaskedArray.T``, remove unnecessary annotations --- numpy/ma/core.pyi | 3 --- numpy/typing/tests/data/reveal/ma.pyi | 7 +++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/numpy/ma/core.pyi b/numpy/ma/core.pyi index dc2a472a18d3..055f25fe0557 100644 --- a/numpy/ma/core.pyi +++ b/numpy/ma/core.pyi @@ -887,9 +887,6 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): / ) -> MaskedArray[_Shape, _DTypeT_co]: ... - T: Any - transpose: Any - # def toflex(self) -> Incomplete: ... def torecords(self) -> Incomplete: ... diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 8c22a4ea5e44..e80426efc03e 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -360,5 +360,12 @@ assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) assert_type(MAR_i8.hardmask, bool) assert_type(MAR_i8.sharedmask, bool) +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) From 3282fb284ca22dbc8dd945f702b7871bf2664081 Mon Sep 17 00:00:00 2001 From: Marco Gorelli <33491632+MarcoGorelli@users.noreply.github.com> Date: Tue, 6 May 2025 09:07:10 +0100 Subject: [PATCH 55/67] TYP: Fix non-existent ``CanIndex`` annotation in ``ndarray.setfield`` --- numpy/__init__.pyi | 2 +- numpy/typing/tests/data/fail/array_like.pyi | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index b309c355d164..540cf188a967 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -2542,7 +2542,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: ?, type: type[T]) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: CanIndex = 0) -> None: ... + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... @overload diff --git a/numpy/typing/tests/data/fail/array_like.pyi b/numpy/typing/tests/data/fail/array_like.pyi index 53f0c1ec85e9..6b6c16dd6e70 100644 --- a/numpy/typing/tests/data/fail/array_like.pyi +++ b/numpy/typing/tests/data/fail/array_like.pyi @@ -11,3 +11,5 @@ scalar = np.int64(1) scalar.__array__(dtype=np.float64) # E: No overload variant array = np.array([1]) array.__array__(dtype=np.float64) # E: No overload variant + +array.setfield(np.eye(1), np.int32, (0, 1)) # E: No overload variant From 9c0d2aea137835bdadf844b331bc800b7d381734 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 6 May 2025 15:53:51 +0200 Subject: [PATCH 56/67] TYP: add a ``float64`` overload to ``np.*space`` --- numpy/_core/function_base.pyi | 48 +++++++++++++++++++ .../tests/data/reveal/array_constructors.pyi | 8 ++-- 2 files changed, 52 insertions(+), 4 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 3d0a80c23e6a..d019803abc6a 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,11 +1,13 @@ from typing import ( Literal as L, + TypeAlias, overload, Any, SupportsIndex, TypeVar, ) +import numpy as np from numpy import floating, complexfloating, generic from numpy._typing import ( NDArray, @@ -14,11 +16,26 @@ from numpy._typing import ( _ArrayLikeFloat_co, _ArrayLikeComplex_co, ) +from numpy._typing._array_like import _DualArrayLike __all__ = ["logspace", "linspace", "geomspace"] _ScalarT = TypeVar("_ScalarT", bound=generic) +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] + +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = ..., + endpoint: bool = ..., + retstep: L[False] = ..., + dtype: None = ..., + axis: SupportsIndex = ..., + *, + device: L["cpu"] | None = ..., +) -> NDArray[np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, @@ -80,6 +97,18 @@ def linspace( device: L["cpu"] | None = ..., ) -> NDArray[Any]: ... @overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = ..., + endpoint: bool = ..., + *, + retstep: L[True], + dtype: None = ..., + axis: SupportsIndex = ..., + device: L["cpu"] | None = ..., +) -> tuple[NDArray[np.float64], np.float64]: ... +@overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, @@ -128,6 +157,16 @@ def linspace( device: L["cpu"] | None = ..., ) -> tuple[NDArray[Any], Any]: ... +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = ..., + endpoint: bool = ..., + base: _ToArrayFloat64 = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[np.float64]: ... @overload def logspace( start: _ArrayLikeFloat_co, @@ -180,6 +219,15 @@ def logspace( axis: SupportsIndex = ..., ) -> NDArray[Any]: ... +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = ..., + endpoint: bool = ..., + dtype: None = ..., + axis: SupportsIndex = ..., +) -> NDArray[np.float64]: ... @overload def geomspace( start: _ArrayLikeFloat_co, diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 07610d982751..2f32579c0816 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -132,21 +132,21 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating], np.floating]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) From 84d4fdc05cc7a7360f7443df1e6030c0de077894 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 6 May 2025 15:57:56 +0200 Subject: [PATCH 57/67] TYP: fill in the function param defaults in ``_core.function_base`` --- numpy/_core/function_base.pyi | 202 +++++++++++++++++----------------- 1 file changed, 101 insertions(+), 101 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index d019803abc6a..34af79177f97 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -28,37 +28,37 @@ _ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | n def linspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[complexfloating]: ... @overload def linspace( @@ -68,124 +68,124 @@ def linspace( endpoint: bool, retstep: L[False], dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[Any]: ... @overload def linspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[np.float64], np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[floating], floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[complexfloating], complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[_ScalarT], _ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[Any], Any]: ... @overload def logspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ToArrayFloat64 = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[np.float64]: ... @overload def logspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeFloat_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[complexfloating]: ... @overload def logspace( @@ -195,56 +195,56 @@ def logspace( endpoint: bool, base: _ArrayLikeComplex_co, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, ) -> NDArray[Any]: ... @overload def geomspace( start: _ToArrayFloat64, stop: _ToArrayFloat64, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[np.float64]: ... @overload def geomspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, ) -> NDArray[complexfloating]: ... @overload def geomspace( @@ -253,31 +253,31 @@ def geomspace( num: SupportsIndex, endpoint: bool, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, ) -> NDArray[Any]: ... def add_newdoc( place: str, obj: str, doc: str | tuple[str, str] | list[tuple[str, str]], - warn_on_python: bool = ..., + warn_on_python: bool = True, ) -> None: ... From ed31ca2d8dcaa626ba2e79dccc5b4012de250b47 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 6 May 2025 16:01:19 +0200 Subject: [PATCH 58/67] TYP: use ``Incomplete`` when appropriate in ``_core.function_base`` --- numpy/_core/function_base.pyi | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 34af79177f97..3ba22cb167e0 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,8 +1,8 @@ +from _typeshed import Incomplete from typing import ( Literal as L, TypeAlias, overload, - Any, SupportsIndex, TypeVar, ) @@ -95,7 +95,7 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[Any]: ... +) -> NDArray[Incomplete]: ... @overload def linspace( start: _ToArrayFloat64, @@ -155,7 +155,7 @@ def linspace( dtype: DTypeLike | None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[Any], Any]: ... +) -> tuple[NDArray[Incomplete], Incomplete]: ... @overload def logspace( @@ -217,7 +217,7 @@ def logspace( base: _ArrayLikeComplex_co = 10.0, dtype: DTypeLike | None = None, axis: SupportsIndex = 0, -) -> NDArray[Any]: ... +) -> NDArray[Incomplete]: ... @overload def geomspace( @@ -273,7 +273,7 @@ def geomspace( endpoint: bool = True, dtype: DTypeLike | None = None, axis: SupportsIndex = 0, -) -> NDArray[Any]: ... +) -> NDArray[Incomplete]: ... def add_newdoc( place: str, From 18fece30701679ec159b54da5d08077fcaef8f98 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 6 May 2025 16:04:20 +0200 Subject: [PATCH 59/67] TYP: prefer ``np._`` over ``from numpy import _`` in ``_core.function_base`` --- numpy/_core/function_base.pyi | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 3ba22cb167e0..0b416aca3a99 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -8,7 +8,6 @@ from typing import ( ) import numpy as np -from numpy import floating, complexfloating, generic from numpy._typing import ( NDArray, DTypeLike, @@ -20,7 +19,7 @@ from numpy._typing._array_like import _DualArrayLike __all__ = ["logspace", "linspace", "geomspace"] -_ScalarT = TypeVar("_ScalarT", bound=generic) +_ScalarT = TypeVar("_ScalarT", bound=np.generic) _ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] @@ -47,7 +46,7 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[floating]: ... +) -> NDArray[np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -59,7 +58,7 @@ def linspace( axis: SupportsIndex = 0, *, device: L["cpu"] | None = None, -) -> NDArray[complexfloating]: ... +) -> NDArray[np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -119,7 +118,7 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[floating], floating]: ... +) -> tuple[NDArray[np.floating], np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -131,7 +130,7 @@ def linspace( dtype: None = None, axis: SupportsIndex = 0, device: L["cpu"] | None = None, -) -> tuple[NDArray[complexfloating], complexfloating]: ... +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -176,7 +175,7 @@ def logspace( base: _ArrayLikeFloat_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[floating]: ... +) -> NDArray[np.floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -186,7 +185,7 @@ def logspace( base: _ArrayLikeComplex_co = 10.0, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[complexfloating]: ... +) -> NDArray[np.complexfloating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -236,7 +235,7 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[floating]: ... +) -> NDArray[np.floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -245,7 +244,7 @@ def geomspace( endpoint: bool = True, dtype: None = None, axis: SupportsIndex = 0, -) -> NDArray[complexfloating]: ... +) -> NDArray[np.complexfloating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, From aed5b6a342972a54675eec4c92e55d1a94113940 Mon Sep 17 00:00:00 2001 From: jorenham Date: Tue, 6 May 2025 16:06:15 +0200 Subject: [PATCH 60/67] STY: appease ruff in the ``_core.function_base`` stubs --- numpy/_core/function_base.pyi | 20 +++++--------------- 1 file changed, 5 insertions(+), 15 deletions(-) diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 0b416aca3a99..5348ebfb40c3 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,23 +1,13 @@ +from typing import Literal as L +from typing import SupportsIndex, TypeAlias, TypeVar, overload + from _typeshed import Incomplete -from typing import ( - Literal as L, - TypeAlias, - overload, - SupportsIndex, - TypeVar, -) import numpy as np -from numpy._typing import ( - NDArray, - DTypeLike, - _DTypeLike, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, -) +from numpy._typing import DTypeLike, NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _DTypeLike from numpy._typing._array_like import _DualArrayLike -__all__ = ["logspace", "linspace", "geomspace"] +__all__ = ["geomspace", "linspace", "logspace"] _ScalarT = TypeVar("_ScalarT", bound=np.generic) From ca6d144465d4aa48898868d2d3075d5af12969fa Mon Sep 17 00:00:00 2001 From: Ilhan Polat Date: Tue, 6 May 2025 22:14:08 +0200 Subject: [PATCH 61/67] MAINT: Avoid dereferencing/strict aliasing warnings during complex casts in `npy_math.h` for C++ runs (#28892) * MAINT: Avoid type-punning complex casts in npy_math.h * MAINT: Add missing typecast in npy_cimagl --- numpy/_core/include/numpy/npy_math.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index d11df12b7ceb..abc784bc686c 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -363,7 +363,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[0]; + return z._Val[0]; #else return creal(z); #endif @@ -377,7 +377,7 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[1]; + return z._Val[1]; #else return cimag(z); #endif @@ -391,7 +391,7 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[0]; + return z._Val[0]; #else return crealf(z); #endif @@ -405,7 +405,7 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[1]; + return z._Val[1]; #else return cimagf(z); #endif @@ -419,7 +419,7 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[0]; + return (npy_longdouble)z._Val[0]; #else return creall(z); #endif @@ -433,7 +433,7 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[1]; + return (npy_longdouble)z._Val[1]; #else return cimagl(z); #endif From f78a034c629fa5705b181d4c919ee5849b656d02 Mon Sep 17 00:00:00 2001 From: Peter Hawkins Date: Tue, 6 May 2025 16:15:10 -0400 Subject: [PATCH 62/67] BUG: Fix missing check for PyErr_Occurred() in _pyarray_correlate. (#28898) When running the scipy 1.15 test suite test signal/tests/test_signaltools.py::test_lfilter_bad_object, with Python built in debug mode, we see the following error: ``` Fatal Python error: _Py_CheckSlotResult: Slot * of type float succeeded with an exception set ``` `None` ends up as the first argument to `dot`, and this triggers an error from PyFloat_Multiply. Once an error has occurred, we must avoid calling multiply again, since it asserts that PyErr_Occurred() is false if the output is a non-error, which will fail if an error was set at entry. --- numpy/_core/src/multiarray/multiarraymodule.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index cddfad16a972..a53dd0960ed0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1205,6 +1205,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, goto clean_ret; } + int needs_pyapi = PyDataType_FLAGCHK(PyArray_DESCR(ret), NPY_NEEDS_PYAPI); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ret)); is1 = PyArray_STRIDES(ap1)[0]; is2 = PyArray_STRIDES(ap2)[0]; @@ -1215,6 +1216,9 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, n = n - n_left; for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); + if (needs_pyapi && PyErr_Occurred()) { + goto done; + } n++; ip2 -= is2; op += os; @@ -1226,19 +1230,21 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, op += os * (n1 - n2 + 1); } else { - for (i = 0; i < (n1 - n2 + 1); i++) { + for (i = 0; i < (n1 - n2 + 1) && (!needs_pyapi || !PyErr_Occurred()); + i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } } - for (i = 0; i < n_right; i++) { + for (i = 0; i < n_right && (!needs_pyapi || !PyErr_Occurred()); i++) { n--; dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } +done: NPY_END_THREADS_DESCR(PyArray_DESCR(ret)); if (PyErr_Occurred()) { goto clean_ret; From eb23a09424b6e811b2f2adc134babebae8b2b1cb Mon Sep 17 00:00:00 2001 From: Hin-Tak Leung Date: Sat, 26 Apr 2025 16:22:44 +0100 Subject: [PATCH 63/67] ENH: Provide Windows 11 ARM64 wheels (#22530) This is mostly adapting and duplicating how the 32-bit no-OpenBLAS wheels are built, to make ARM64 wheels. The mamba-org/setup-micromamba github action reports "win_arm64" as unsupported for installation of anaconda-client at the moment. Beyond that, a number of tests need to be skipped. They are in three categories: - Meson outside of the msdevshell github action does not seems to be able to find the MSVC linker. (Possibly missing some PATH env) - No "checks" modules in win-arm64 (yet) - Mingw GNU objdump does not understand arm64 dll format (yet) to to generate import libraries. closes #22530 --- .github/workflows/wheels.yml | 11 +++++++++ .github/workflows/windows.yml | 22 ++++++++++++------ numpy/_core/tests/test_array_interface.py | 4 ++++ numpy/_core/tests/test_cython.py | 4 ++++ numpy/_core/tests/test_limited_api.py | 2 ++ numpy/_core/tests/test_mem_policy.py | 3 +++ numpy/distutils/mingw32ccompiler.py | 23 +++++++++++++++++++ .../distutils/tests/test_mingw32ccompiler.py | 2 ++ numpy/random/tests/test_extending.py | 1 + pyproject.toml | 5 ++++ tools/wheels/cibw_before_build.sh | 3 +++ 11 files changed, 73 insertions(+), 7 deletions(-) diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 1107b3caf6f7..3f7295ff787b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -91,11 +91,15 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] + - [windows-11-arm, win_arm64, ""] python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] python: "pp311" + # Don't build PyPy arm64 windows + - buildplat: [windows-11-arm, win_arm64, ""] + python: "pp311" # No PyPy on musllinux images - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] python: "pp311" @@ -121,6 +125,12 @@ jobs: with: architecture: 'x86' + - name: Setup MSVC arm64 + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'arm64' + - name: pkg-config-for-win run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite @@ -181,6 +191,7 @@ jobs: path: ./wheelhouse/*.whl - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 098c29f52893..80b4a961e3e1 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -96,9 +96,17 @@ jobs: run: | spin test -- --timeout=600 --durations=10 - msvc_32bit_python_no_openblas: - name: MSVC, 32-bit Python, no BLAS - runs-on: windows-2019 + msvc_python_no_openblas: + name: MSVC, ${{ matrix.architecture }} Python , no BLAS + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: windows-2019 + architecture: x86 + - os: windows-11-arm + architecture: arm64 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' steps: @@ -109,16 +117,16 @@ jobs: fetch-tags: true persist-credentials: false - - name: Setup Python (32-bit) + - name: Setup Python uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.11' - architecture: 'x86' + architecture: ${{ matrix.architecture }} - - name: Setup MSVC (32-bit) + - name: Setup MSVC uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 with: - architecture: 'x86' + architecture: ${{ matrix.architecture }} - name: Build and install run: | diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index 1917c8fecafe..a044c883bb26 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -2,6 +2,8 @@ import pytest import numpy as np from numpy.testing import extbuild, IS_WASM, IS_EDITABLE +import sysconfig + @pytest.fixture @@ -123,6 +125,8 @@ def get_module(tmp_path): pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('array_interface_testing', functions, prologue=prologue, diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 81ddc63258c2..fda70b9ac79c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -3,6 +3,7 @@ import subprocess import sys import pytest +import sysconfig import numpy as np from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE @@ -53,6 +54,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--buildtype=release", @@ -341,6 +344,7 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 51bed1d77561..58f2b5ce050d 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -52,6 +52,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--werror", diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9846f89c404c..8d09a9ded659 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -3,6 +3,7 @@ import os import sys import threading +import sysconfig import pytest @@ -220,6 +221,8 @@ def get_module(tmp_path): except ImportError: pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('mem_policy', functions, prologue=prologue, diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 2599a9e9a807..944ba2d03b33 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -262,6 +262,7 @@ def generate_def(dll, dfile): def find_dll(dll_name): arch = {'AMD64' : 'amd64', + 'ARM64' : 'arm64', 'Intel' : 'x86'}[get_build_architecture()] def _find_dll_in_winsxs(dll_name): @@ -351,6 +352,8 @@ def build_import_library(): arch = get_build_architecture() if arch == 'AMD64': return _build_import_library_amd64() + if arch == 'ARM64': + return _build_import_library_arm64() elif arch == 'Intel': return _build_import_library_x86() else: @@ -412,6 +415,26 @@ def _build_import_library_amd64(): cmd = ['dlltool', '-d', def_file, '-l', out_file] subprocess.check_call(cmd) +def _build_import_library_arm64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=ARM64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py index 19b19450fc8c..c4eac7b72de1 100644 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ b/numpy/distutils/tests/test_mingw32ccompiler.py @@ -3,6 +3,7 @@ import sys import pytest import os +import sysconfig from numpy.distutils import mingw32ccompiler @@ -10,6 +11,7 @@ @pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') @pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), reason="test requires mingw library layout") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') def test_build_import(): '''Test the mingw32ccompiler.build_import_library, which builds a `python.a` from the MSVC `python.lib` diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 608f58756289..86cbefcbf2ed 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -54,6 +54,7 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='meson not working on win-arm64') @pytest.mark.slow def test_cython(tmp_path): import glob diff --git a/pyproject.toml b/pyproject.toml index eb7015acc347..b62d71cbba73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -188,6 +188,11 @@ select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" +[[tool.cibuildwheel.overrides]] +select = "*-win_arm64" +config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +repair-wheel-command = "" + [[tool.cibuildwheel.overrides]] select = "*pyodide*" before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index e2f464d32a2a..c8d119b1b39f 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,6 +22,9 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false +elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then + echo "No BLAS used for ARM64 wheels" + export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true From f9902e3ced7f0491eeff057d7ae9e40506b45dfc Mon Sep 17 00:00:00 2001 From: Hin-Tak Leung Date: Sat, 26 Apr 2025 17:03:02 +0100 Subject: [PATCH 64/67] MAINT: Removing extra blank line mistakenly added earlier. --- numpy/_core/tests/test_array_interface.py | 1 - 1 file changed, 1 deletion(-) diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index a044c883bb26..ed56f7e79daf 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -5,7 +5,6 @@ import sysconfig - @pytest.fixture def get_module(tmp_path): """ Some codes to generate data and manage temporary buffers use when From 520bcf4806d695178a4d8068660c796cd6012224 Mon Sep 17 00:00:00 2001 From: Hin-Tak Leung Date: Sat, 26 Apr 2025 17:11:39 +0100 Subject: [PATCH 65/67] MAINT: Better / more uniform Meson related skip message --- numpy/random/tests/test_extending.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 86cbefcbf2ed..dad9b10449d6 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -54,7 +54,7 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") -@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='meson not working on win-arm64') +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow def test_cython(tmp_path): import glob From a35c6965077c8be33192fcd23e17d03cd215ad20 Mon Sep 17 00:00:00 2001 From: Chevali2004 <93073405+Chevali2004@users.noreply.github.com> Date: Wed, 7 May 2025 20:08:51 +0200 Subject: [PATCH 66/67] BUG: Use unrotated companion matrix in polynomial.polyroots. (#28821) * BUG: Fix #27881 inconsistent behavior between numpy.roots and numpy.polynomial.polynomial.polyroots Both functions use numpy.linalg.eigvals, but while roots gives in argument the polynomial's companion matrix unchanged, polyroots rotates it. Though in theory this rotation shouldn't change anything, in terms of numerical calcuations, eigvals gives significantly different results. This commit removes the rotation as an easy fix to the inconsistency. This strange behavior by eigvals is however a bug. I did some research on it, which you can find on the issue. * BUG: Fix #27881 Adding tests for numpy.roots and numpy.polynomial.polynomial.polyroots The following tests provide a minimum precision expected by the functions. In the case of my change, they allow my change but not the previous version. You'll find that the difference of precision between the two versions vary according to the root values but in all cases, this change either increases precision or maintains the previous result. * BUG: Fix #27881 Adding tests for numpy.roots and numpy.polynomial.polynomial.polyroots Fixing tests * BUG: Fix #27881 Adding tests for numpy.roots and numpy.polynomial.polynomial.polyroots Fixing tests. It seems that the way numpy.roots creates it's matrice companion augments the numerical error compared to the polycompanion function, which is why I had to reduce the expected precision. * Reverting change from commit 6703b91a22f7f0d1e98f565efcb659d8369b4952 in matlib.pyi --- numpy/lib/tests/test_polynomial.py | 12 ++++++++++++ numpy/polynomial/polynomial.py | 3 +-- numpy/polynomial/tests/test_polynomial.py | 11 +++++++++++ 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 0a91b941526e..bf432348cb36 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -1,4 +1,5 @@ import numpy as np +import numpy.polynomial.polynomial as poly from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose @@ -121,6 +122,17 @@ def test_poly(self): def test_roots(self): assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + # Testing for larger root values + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py index dada4544dc63..32b53b757a1c 100644 --- a/numpy/polynomial/polynomial.py +++ b/numpy/polynomial/polynomial.py @@ -1535,8 +1535,7 @@ def polyroots(c): if len(c) == 2: return np.array([-c[0] / c[1]]) - # rotated companion matrix reduces error - m = polycompanion(c)[::-1, ::-1] + m = polycompanion(c) r = la.eigvals(m) r.sort() return r diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index e7c4fdfe8996..84ccc45fc4e4 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -543,6 +543,17 @@ def test_polyroots(self): res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) + # Testing for larger root values + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + def test_polyfit(self): def f(x): return x * (x - 1) * (x - 2) From 835f0dd7915adaf0192176ccaea5ae15fbcd8a5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 14 May 2025 10:46:40 +0000 Subject: [PATCH 67/67] MAINT: Bump actions/dependency-review-action from 4.6.0 to 4.7.1 Bumps [actions/dependency-review-action](https://github.com/actions/dependency-review-action) from 4.6.0 to 4.7.1. - [Release notes](https://github.com/actions/dependency-review-action/releases) - [Commits](https://github.com/actions/dependency-review-action/compare/ce3cf9537a52e8119d91fd484ab5b8a807627bf8...da24556b548a50705dd671f47852072ea4c105d9) --- updated-dependencies: - dependency-name: actions/dependency-review-action dependency-version: 4.7.1 dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- .github/workflows/dependency-review.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 11ab6e4c67cd..5036a94ce399 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@ce3cf9537a52e8119d91fd484ab5b8a807627bf8 # v4.6.0 + uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 with: allow-ghsas: GHSA-cx63-2mw6-8hw5