diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 68e75754346a..68ef1e811e88 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -47,7 +47,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/init@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 with: languages: ${{ matrix.language }} # If you wish to specify custom queries, you can do so here or in a config file. @@ -57,7 +57,7 @@ jobs: # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). # If this step fails, then you should remove it and run the build manually (see below) - name: Autobuild - uses: github/codeql-action/autobuild@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/autobuild@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 # â„šī¸ Command-line programs to run using the OS shell. # 📚 See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#jobsjob_idstepsrun @@ -70,6 +70,6 @@ jobs: # ./location_of_script_within_repo/buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@28deaeda66b76a05916b6923827895f2b14ab387 # v3.28.16 + uses: github/codeql-action/analyze@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v3.28.17 with: category: "/language:${{matrix.language}}" diff --git a/.github/workflows/dependency-review.yml b/.github/workflows/dependency-review.yml index 11ab6e4c67cd..5036a94ce399 100644 --- a/.github/workflows/dependency-review.yml +++ b/.github/workflows/dependency-review.yml @@ -19,6 +19,6 @@ jobs: with: persist-credentials: false - name: 'Dependency Review' - uses: actions/dependency-review-action@ce3cf9537a52e8119d91fd484ab5b8a807627bf8 # v4.6.0 + uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1 with: allow-ghsas: GHSA-cx63-2mw6-8hw5 diff --git a/.github/workflows/emscripten.yml b/.github/workflows/emscripten.yml index a1b6e923b131..fea77068e128 100644 --- a/.github/workflows/emscripten.yml +++ b/.github/workflows/emscripten.yml @@ -49,7 +49,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: pypa/cibuildwheel@d04cacbc9866d432033b1d09142936e6a0e2121a # 2.23.2 + - uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # 2.23.3 env: CIBW_PLATFORM: pyodide diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 7156e8f486f2..82de69009aac 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -124,7 +124,7 @@ jobs: fetch-tags: true persist-credentials: false - - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 + - uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca with: activate-environment: true python-version: ${{ matrix.version }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index c2b15262ea41..360261b6a186 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -50,6 +50,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@28deaeda66b76a05916b6923827895f2b14ab387 # v2.1.27 + uses: github/codeql-action/upload-sarif@60168efe1c415ce0f5521ea06d5c2062adbeed1b # v2.1.27 with: sarif_file: results.sarif diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 159ffc4f4131..3f7295ff787b 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -91,11 +91,15 @@ jobs: - [macos-14, macosx_arm64, accelerate] # always use accelerate - [windows-2019, win_amd64, ""] - [windows-2019, win32, ""] + - [windows-11-arm, win_arm64, ""] python: ["cp311", "cp312", "cp313", "cp313t", "pp311"] exclude: # Don't build PyPy 32-bit windows - buildplat: [windows-2019, win32, ""] python: "pp311" + # Don't build PyPy arm64 windows + - buildplat: [windows-11-arm, win_arm64, ""] + python: "pp311" # No PyPy on musllinux images - buildplat: [ ubuntu-22.04, musllinux_x86_64, "" ] python: "pp311" @@ -121,6 +125,12 @@ jobs: with: architecture: 'x86' + - name: Setup MSVC arm64 + if: ${{ matrix.buildplat[1] == 'win_arm64' }} + uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 + with: + architecture: 'arm64' + - name: pkg-config-for-win run: | choco install -y --no-progress --stoponfirstfailure --checksum 6004DF17818F5A6DBF19CB335CC92702 pkgconfiglite @@ -171,7 +181,7 @@ jobs: echo "CIBW_BUILD_FRONTEND=pip; args: --no-build-isolation" >> "$GITHUB_ENV" - name: Build wheels - uses: pypa/cibuildwheel@d04cacbc9866d432033b1d09142936e6a0e2121a # v2.23.2 + uses: pypa/cibuildwheel@faf86a6ed7efa889faf6996aa23820831055001a # v2.23.3 env: CIBW_BUILD: ${{ matrix.python }}-${{ matrix.buildplat[1] }} @@ -181,6 +191,7 @@ jobs: path: ./wheelhouse/*.whl - uses: mamba-org/setup-micromamba@0dea6379afdaffa5d528b3d1dabc45da37f443fc + if: ${{ matrix.buildplat[1] != 'win_arm64' }} # unsupported platform at the moment with: # for installation of anaconda-client, required for upload to # anaconda.org diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 985d7d9c6d6a..80b4a961e3e1 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -35,7 +35,7 @@ jobs: persist-credentials: false - name: Setup Python - uses: astral-sh/setup-uv@c7f87aa956e4c323abf06d5dec078e358f6b4d04 + uses: astral-sh/setup-uv@6b9c6063abd6010835644d4c2e1bef4cf5cd0fca with: activate-environment: true python-version: ${{ matrix.compiler-pyversion[1] }} @@ -96,9 +96,17 @@ jobs: run: | spin test -- --timeout=600 --durations=10 - msvc_32bit_python_no_openblas: - name: MSVC, 32-bit Python, no BLAS - runs-on: windows-2019 + msvc_python_no_openblas: + name: MSVC, ${{ matrix.architecture }} Python , no BLAS + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + include: + - os: windows-2019 + architecture: x86 + - os: windows-11-arm + architecture: arm64 # To enable this job on a fork, comment out: if: github.repository == 'numpy/numpy' steps: @@ -109,16 +117,16 @@ jobs: fetch-tags: true persist-credentials: false - - name: Setup Python (32-bit) + - name: Setup Python uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 # v5.6.0 with: python-version: '3.11' - architecture: 'x86' + architecture: ${{ matrix.architecture }} - - name: Setup MSVC (32-bit) + - name: Setup MSVC uses: bus1/cabuild/action/msdevshell@e22aba57d6e74891d059d66501b6b5aed8123c4d # v1 with: - architecture: 'x86' + architecture: ${{ matrix.architecture }} - name: Build and install run: | diff --git a/benchmarks/benchmarks/__init__.py b/benchmarks/benchmarks/__init__.py index 6aa85c22f614..8372be467005 100644 --- a/benchmarks/benchmarks/__init__.py +++ b/benchmarks/benchmarks/__init__.py @@ -5,7 +5,7 @@ def show_cpu_features(): from numpy.lib._utils_impl import _opt_info info = _opt_info() - info = "NumPy CPU features: " + (info if info else 'nothing enabled') + info = "NumPy CPU features: " + (info or 'nothing enabled') # ASV wrapping stdout & stderr, so we assume having a tty here if 'SHELL' in os.environ and sys.platform != 'win32': # to avoid the red color that imposed by ASV diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py index 926e04571402..4d9f3c9c8f61 100644 --- a/benchmarks/benchmarks/bench_ufunc.py +++ b/benchmarks/benchmarks/bench_ufunc.py @@ -342,7 +342,7 @@ def time_ufunc_small_array(self, ufuncname): self.f(self.array_5) def time_ufunc_small_array_inplace(self, ufuncname): - self.f(self.array_5, out = self.array_5) + self.f(self.array_5, out=self.array_5) def time_ufunc_small_int_array(self, ufuncname): self.f(self.array_int_3) @@ -432,7 +432,7 @@ def time_divide_scalar2_inplace(self, dtype): class CustomComparison(Benchmark): - params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, + params = (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, np.float32, np.float64, np.bool) param_names = ['dtype'] diff --git a/benchmarks/benchmarks/common.py b/benchmarks/benchmarks/common.py index bee012d4ab26..064255e185eb 100644 --- a/benchmarks/benchmarks/common.py +++ b/benchmarks/benchmarks/common.py @@ -20,14 +20,14 @@ TYPES1 = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', ] DLPACK_TYPES = [ 'int16', 'float16', 'int32', 'float32', - 'int64', 'float64', 'complex64', + 'int64', 'float64', 'complex64', 'complex128', 'bool', ] diff --git a/doc/release/upcoming_changes/28769.performance.rst b/doc/release/upcoming_changes/28769.performance.rst new file mode 100644 index 000000000000..7fb8f02282f6 --- /dev/null +++ b/doc/release/upcoming_changes/28769.performance.rst @@ -0,0 +1,8 @@ +Performance improvements for ``np.float16`` casts +-------------------------------------------------- +Earlier, floating point casts to and from ``np.float16`` types +were emulated in software on all platforms. + +Now, on ARM devices that support Neon float16 intrinsics (such as +recent Apple Silicon), the native float16 path is used to achieve +the best performance. diff --git a/doc/release/upcoming_changes/28856.improvement.rst b/doc/release/upcoming_changes/28856.improvement.rst new file mode 100644 index 000000000000..83911035f097 --- /dev/null +++ b/doc/release/upcoming_changes/28856.improvement.rst @@ -0,0 +1,5 @@ +* ``np.dtypes.StringDType`` is now a + `generic type `_ which + accepts a type argument for ``na_object`` that defaults to ``typing.Never``. + For example, ``StringDType(na_object=None)`` returns a ``StringDType[None]``, + and ``StringDType()`` returns a ``StringDType[typing.Never]``. diff --git a/doc/release/upcoming_changes/28884.deprecation.rst b/doc/release/upcoming_changes/28884.deprecation.rst new file mode 100644 index 000000000000..c1be55fb0dd3 --- /dev/null +++ b/doc/release/upcoming_changes/28884.deprecation.rst @@ -0,0 +1,28 @@ +``numpy.typing.NBitBase`` deprecation +------------------------------------- +The ``numpy.typing.NBitBase`` type has been deprecated and will be removed in a future version. + +This type was previously intended to be used as a generic upper bound for type-parameters, for example: + +.. code-block:: python + + import numpy as np + import numpy.typing as npt + + def f[NT: npt.NBitBase](x: np.complexfloating[NT]) -> np.floating[NT]: ... + +But in NumPy 2.2.0, ``float64`` and ``complex128`` were changed to concrete subtypes, causing static type-checkers to reject ``x: np.float64 = f(np.complex128(42j))``. + +So instead, the better approach is to use ``typing.overload``: + +.. code-block:: python + + import numpy as np + from typing import overload + + @overload + def f(x: np.complex64) -> np.float32: ... + @overload + def f(x: np.complex128) -> np.float64: ... + @overload + def f(x: np.clongdouble) -> np.longdouble: ... diff --git a/doc/source/building/cross_compilation.rst b/doc/source/building/cross_compilation.rst index 0a2e3a5af42a..f03b620ff031 100644 --- a/doc/source/building/cross_compilation.rst +++ b/doc/source/building/cross_compilation.rst @@ -2,10 +2,10 @@ Cross compilation ================= Cross compilation is a complex topic, we only add some hopefully helpful hints -here (for now). As of May 2023, cross-compilation based on ``crossenv`` is -known to work, as used (for example) in conda-forge. Cross-compilation without -``crossenv`` requires some manual overrides. You instruct these overrides by -passing options to ``meson setup`` via `meson-python`_. +here (for now). As of May 2025, cross-compilation with a Meson cross file as +well as cross-compilation based on ``crossenv`` are known to work. Conda-forge +uses the latter method. Cross-compilation without ``crossenv`` requires passing +build options to ``meson setup`` via `meson-python`_. .. _meson-python: https://meson-python.readthedocs.io/en/latest/how-to-guides/meson-args.html @@ -33,9 +33,18 @@ your *cross file*: [properties] longdouble_format = 'IEEE_DOUBLE_LE' +For an example of a cross file needed to cross-compile NumPy, see +`numpy#288861 `__. +Putting that together, invoking a cross build with such a cross file, looks like: + +.. code:: bash + + $ python -m build --wheel -Csetup-args="--cross-file=aarch64-myos-cross-file.txt" + For more details and the current status around cross compilation, see: - The state of cross compilation in Python: `pypackaging-native key issue page `__ +- The `set of NumPy issues with the "Cross compilation" label `__ - Tracking issue for SciPy cross-compilation needs and issues: `scipy#14812 `__ diff --git a/doc/source/building/index.rst b/doc/source/building/index.rst index 3a9709f1ebc1..d7baeaee9324 100644 --- a/doc/source/building/index.rst +++ b/doc/source/building/index.rst @@ -52,7 +52,7 @@ your system. * BLAS and LAPACK libraries. `OpenBLAS `__ is the NumPy default; other variants include Apple Accelerate, `MKL `__, - `ATLAS `__ and + `ATLAS `__ and `Netlib `__ (or "Reference") BLAS and LAPACK. diff --git a/doc/source/building/introspecting_a_build.rst b/doc/source/building/introspecting_a_build.rst index f23628bf3ffd..268365f595bf 100644 --- a/doc/source/building/introspecting_a_build.rst +++ b/doc/source/building/introspecting_a_build.rst @@ -19,4 +19,4 @@ These things are all available after the configure stage of the build (i.e., information, rather than running the build and reading the full build log. For more details on this topic, see the -`SciPy doc page on build introspection `__. +`SciPy doc page on build introspection `__. diff --git a/doc/source/f2py/code/setup_example.py b/doc/source/f2py/code/setup_example.py index 654c448a4b75..ef79ad1ecfb6 100644 --- a/doc/source/f2py/code/setup_example.py +++ b/doc/source/f2py/code/setup_example.py @@ -1,16 +1,16 @@ from numpy.distutils.core import Extension -ext1 = Extension(name = 'scalar', - sources = ['scalar.f']) -ext2 = Extension(name = 'fib2', - sources = ['fib2.pyf', 'fib1.f']) +ext1 = Extension(name='scalar', + sources=['scalar.f']) +ext2 = Extension(name='fib2', + sources=['fib2.pyf', 'fib1.f']) if __name__ == "__main__": from numpy.distutils.core import setup - setup(name = 'f2py_example', - description = "F2PY Users Guide examples", - author = "Pearu Peterson", - author_email = "pearu@cens.ioc.ee", - ext_modules = [ext1, ext2] + setup(name='f2py_example', + description="F2PY Users Guide examples", + author="Pearu Peterson", + author_email="pearu@cens.ioc.ee", + ext_modules=[ext1, ext2] ) # End of setup_example.py diff --git a/doc/source/f2py/f2py.getting-started.rst b/doc/source/f2py/f2py.getting-started.rst index dd1349979a39..e5746c49e94d 100644 --- a/doc/source/f2py/f2py.getting-started.rst +++ b/doc/source/f2py/f2py.getting-started.rst @@ -308,4 +308,4 @@ the previous case:: >>> print(fib3.fib(8)) [ 0. 1. 1. 2. 3. 5. 8. 13.] -.. _`system dependencies panel`: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _`system dependencies panel`: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/f2py/windows/index.rst b/doc/source/f2py/windows/index.rst index 797dfc2b4179..ea0af7505ce7 100644 --- a/doc/source/f2py/windows/index.rst +++ b/doc/source/f2py/windows/index.rst @@ -217,4 +217,4 @@ path using a hash. This needs to be added to the ``PATH`` variable. .. _are outdated: https://github.com/conda-forge/conda-forge.github.io/issues/1044 .. _now deprecated: https://github.com/numpy/numpy/pull/20875 .. _LLVM Flang: https://releases.llvm.org/11.0.0/tools/flang/docs/ReleaseNotes.html -.. _SciPy's documentation: http://scipy.github.io/devdocs/building/index.html#system-level-dependencies +.. _SciPy's documentation: https://scipy.github.io/devdocs/building/index.html#system-level-dependencies diff --git a/doc/source/reference/simd/gen_features.py b/doc/source/reference/simd/gen_features.py index 47b35dbfc397..eb516e3ff2ac 100644 --- a/doc/source/reference/simd/gen_features.py +++ b/doc/source/reference/simd/gen_features.py @@ -2,6 +2,7 @@ Generate CPU features tables from CCompilerOpt """ from os import path + from numpy.distutils.ccompiler_opt import CCompilerOpt class FakeCCompilerOpt(CCompilerOpt): diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst index 1505c9285ea8..ae53995a3917 100644 --- a/doc/source/user/basics.dispatch.rst +++ b/doc/source/user/basics.dispatch.rst @@ -7,8 +7,8 @@ Writing custom array containers Numpy's dispatch mechanism, introduced in numpy version v1.16 is the recommended approach for writing custom N-dimensional array containers that are compatible with the numpy API and provide custom implementations of numpy -functionality. Applications include `dask `_ arrays, an -N-dimensional array distributed across multiple nodes, and `cupy +functionality. Applications include `dask `_ +arrays, an N-dimensional array distributed across multiple nodes, and `cupy `_ arrays, an N-dimensional array on a GPU. diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst index d791341ac560..c699760fdebd 100644 --- a/doc/source/user/c-info.python-as-glue.rst +++ b/doc/source/user/c-info.python-as-glue.rst @@ -144,7 +144,7 @@ written C-code. Cython ====== -`Cython `_ is a compiler for a Python dialect that adds +`Cython `_ is a compiler for a Python dialect that adds (optional) static typing for speed, and allows mixing C or C++ code into your modules. It produces C or C++ extensions that can be compiled and imported in Python code. diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst index a3ced19e23ad..8c7914ea8dec 100644 --- a/doc/source/user/numpy-for-matlab-users.rst +++ b/doc/source/user/numpy-for-matlab-users.rst @@ -810,7 +810,7 @@ Links ===== Another somewhat outdated MATLAB/NumPy cross-reference can be found at -http://mathesaurus.sf.net/ +https://mathesaurus.sf.net/ An extensive list of tools for scientific work with Python can be found in the `topical software page `__. diff --git a/numpy/__init__.pyi b/numpy/__init__.pyi index 5c16b91b7772..540cf188a967 100644 --- a/numpy/__init__.pyi +++ b/numpy/__init__.pyi @@ -57,10 +57,8 @@ from numpy._typing import ( NBitBase, # NOTE: Do not remove the extended precision bit-types even if seemingly unused; # they're used by the mypy plugin - _256Bit, _128Bit, _96Bit, - _80Bit, _64Bit, _32Bit, _16Bit, @@ -160,21 +158,12 @@ from numpy._typing._callable import ( _ComparisonOpGE, ) -# NOTE: Numpy's mypy plugin is used for removing the types unavailable -# to the specific platform +# NOTE: Numpy's mypy plugin is used for removing the types unavailable to the specific platform from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, float96, float128, - float256, - complex160, complex192, complex256, - complex512, ) from numpy._array_api_info import __array_namespace_info__ @@ -698,8 +687,7 @@ __all__ = [ # noqa: RUF022 "uint8", "ubyte", "int16", "short", "uint16", "ushort", "int32", "intc", "uint32", "uintc", "int64", "long", "uint64", "ulong", "longlong", "ulonglong", "intp", "uintp", "double", "cdouble", "single", "csingle", "half", "bool_", "int_", "uint", - "uint128", "uint256", "int128", "int256", "float80", "float96", "float128", - "float256", "complex160", "complex192", "complex256", "complex512", + "float96", "float128", "complex192", "complex256", "array2string", "array_str", "array_repr", "set_printoptions", "get_printoptions", "printoptions", "format_float_positional", "format_float_scientific", "require", "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall", @@ -2421,10 +2409,17 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... + @overload def repeat( self, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, + ) -> ndarray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> ndarray[_Shape, _DTypeT_co]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], _DTypeT_co]: ... @@ -2547,7 +2542,7 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): @overload # (dtype: ?, type: type[T]) def view(self, /, dtype: DTypeLike, type: type[_ArrayT]) -> _ArrayT: ... - def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: CanIndex = 0) -> None: ... + def setfield(self, /, val: ArrayLike, dtype: DTypeLike, offset: SupportsIndex = 0) -> None: ... @overload def getfield(self, dtype: _DTypeLike[_ScalarT], offset: SupportsIndex = 0) -> NDArray[_ScalarT]: ... @overload @@ -3148,66 +3143,70 @@ class ndarray(_ArrayOrScalarCommon, Generic[_ShapeT_co, _DTypeT_co]): def __rfloordiv__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... @overload - def __pow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __pow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __pow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __pow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __pow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __pow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __pow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __pow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __pow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __pow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __pow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __pow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __pow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload - def __rpow__(self: NDArray[_NumberT], other: int | np.bool, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... + def __rpow__(self: NDArray[_NumberT], other: int | np.bool, mod: None = None, /) -> ndarray[_ShapeT_co, dtype[_NumberT]]: ... @overload - def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[_NumberT], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLikeBool_co, mod: None = None, /) -> NDArray[int8]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] + def __rpow__(self: NDArray[np.bool], other: _ArrayLike[_NumberT], mod: None = None, /) -> NDArray[_NumberT]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, /) -> NDArray[float64]: ... + def __rpow__(self: NDArray[float64], other: _ArrayLikeFloat64_co, mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], /) -> NDArray[float64]: ... + def __rpow__(self: _ArrayFloat64_co, other: _ArrayLike[floating[_64Bit]], mod: None = None, /) -> NDArray[float64]: ... @overload - def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, /) -> NDArray[complex128]: ... + def __rpow__(self: NDArray[complex128], other: _ArrayLikeComplex128_co, mod: None = None, /) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], /) -> NDArray[complex128]: ... + def __rpow__( + self: _ArrayComplex128_co, other: _ArrayLike[complexfloating[_64Bit]], mod: None = None, / + ) -> NDArray[complex128]: ... @overload - def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayUInt_co, other: _ArrayLikeUInt_co, mod: None = None, /) -> NDArray[unsignedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayInt_co, other: _ArrayLikeInt_co, mod: None = None, /) -> NDArray[signedinteger]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] + def __rpow__(self: _ArrayFloat_co, other: _ArrayLikeFloat_co, mod: None = None, /) -> NDArray[floating]: ... # type: ignore[overload-overlap] @overload - def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, /) -> NDArray[complexfloating]: ... + def __rpow__(self: _ArrayComplex_co, other: _ArrayLikeComplex_co, mod: None = None, /) -> NDArray[complexfloating]: ... @overload - def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, /) -> NDArray[number]: ... + def __rpow__(self: NDArray[number], other: _ArrayLikeNumber_co, mod: None = None, /) -> NDArray[number]: ... @overload - def __rpow__(self: NDArray[object_], other: Any, /) -> Any: ... + def __rpow__(self: NDArray[object_], other: Any, mod: None = None, /) -> Any: ... @overload - def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, /) -> Any: ... + def __rpow__(self: NDArray[Any], other: _ArrayLikeObject_co, mod: None = None, /) -> Any: ... @overload def __lshift__(self: NDArray[np.bool], other: _ArrayLikeBool_co, /) -> NDArray[int8]: ... # type: ignore[misc] @@ -3685,7 +3684,7 @@ class generic(_ArrayOrScalarCommon, Generic[_ItemT_co]): mode: _ModeKind = ..., ) -> _ArrayT: ... - def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = ...) -> NDArray[Self]: ... + def repeat(self, repeats: _ArrayLikeInt_co, axis: SupportsIndex | None = None) -> ndarray[tuple[int], dtype[Self]]: ... def flatten(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... def ravel(self, /, order: _OrderKACF = "C") -> ndarray[tuple[int], dtype[Self]]: ... @@ -4247,21 +4246,25 @@ class float64(floating[_64Bit], float): # type: ignore[misc] def __rfloordiv__(self, other: complex, /) -> float64 | complex128: ... @overload - def __pow__(self, other: _Float64_co, /) -> float64: ... + def __pow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload - def __pow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __pow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __pow__(self, other: complex, /) -> float64 | complex128: ... + def __pow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... @overload - def __rpow__(self, other: _Float64_co, /) -> float64: ... + def __rpow__(self, other: _Float64_co, mod: None = None, /) -> float64: ... @overload - def __rpow__(self, other: complexfloating[_64Bit, _64Bit], /) -> complex128: ... + def __rpow__(self, other: complexfloating[_64Bit, _64Bit], mod: None = None, /) -> complex128: ... @overload - def __rpow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... @overload - def __rpow__(self, other: complex, /) -> float64 | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> float64 | complex128: ... def __mod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] def __rmod__(self, other: _Float64_co, /) -> float64: ... # type: ignore[override] @@ -4351,17 +4354,23 @@ class complexfloating(inexact[_NBit1, complex], Generic[_NBit1, _NBit2]): def __rtruediv__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... @overload - def __pow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __pow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __pow__(self, other: complex | float64 | complex128, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __pow__( + self, other: complex | float64 | complex128, mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __pow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __pow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... @overload - def __rpow__(self, other: _Complex64_co, /) -> complexfloating[_NBit1, _NBit2]: ... + def __rpow__(self, other: _Complex64_co, mod: None = None, /) -> complexfloating[_NBit1, _NBit2]: ... @overload - def __rpow__(self, other: complex, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... + def __rpow__(self, other: complex, mod: None = None, /) -> complexfloating[_NBit1, _NBit2] | complex128: ... @overload - def __rpow__(self, other: number[_NBit], /) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... + def __rpow__( + self, other: number[_NBit], mod: None = None, / + ) -> complexfloating[_NBit1, _NBit2] | complexfloating[_NBit, _NBit]: ... complex64: TypeAlias = complexfloating[_32Bit, _32Bit] @@ -4417,10 +4426,12 @@ class complex128(complexfloating[_64Bit, _64Bit], complex): # type: ignore[misc def __rtruediv__(self, other: _Complex128_co, /) -> complex128: ... @overload - def __pow__(self, other: _Complex128_co, /) -> complex128: ... + def __pow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... @overload - def __pow__(self, other: complexfloating[_NBit1, _NBit2], /) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... - def __rpow__(self, other: _Complex128_co, /) -> complex128: ... + def __pow__( + self, other: complexfloating[_NBit1, _NBit2], mod: None = None, / + ) -> complexfloating[_NBit1 | _64Bit, _NBit2 | _64Bit]: ... + def __rpow__(self, other: _Complex128_co, mod: None = None, /) -> complex128: ... csingle: TypeAlias = complexfloating[_NBitSingle, _NBitSingle] cdouble: TypeAlias = complexfloating[_NBitDouble, _NBitDouble] @@ -5280,7 +5291,8 @@ class matrix(ndarray[_2DShapeT_co, _DTypeT_co]): def __mul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... def __rmul__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... def __imul__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... - def __pow__(self, other: ArrayLike, /) -> matrix[_2D, Any]: ... + def __pow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... + def __rpow__(self, other: ArrayLike, mod: None = None, /) -> matrix[_2D, Any]: ... def __ipow__(self, other: ArrayLike, /) -> matrix[_2DShapeT_co, _DTypeT_co]: ... @overload diff --git a/numpy/_build_utils/tempita/_tempita.py b/numpy/_build_utils/tempita/_tempita.py index d8ce41742f3a..446658fc15f8 100644 --- a/numpy/_build_utils/tempita/_tempita.py +++ b/numpy/_build_utils/tempita/_tempita.py @@ -175,11 +175,7 @@ def from_filename( from_filename = classmethod(from_filename) def __repr__(self): - return "<%s %s name=%r>" % ( - self.__class__.__name__, - hex(id(self))[2:], - self.name, - ) + return f"<{self.__class__.__name__} {id(self):x} name={self.name!r}>" def substitute(self, *args, **kw): if args: diff --git a/numpy/_core/_exceptions.py b/numpy/_core/_exceptions.py index 180e71946e6c..aaa41648a1d2 100644 --- a/numpy/_core/_exceptions.py +++ b/numpy/_core/_exceptions.py @@ -5,7 +5,7 @@ By putting the formatting in `__str__`, we also avoid paying the cost for users who silence the exceptions. """ -from .._utils import set_module +from numpy._utils import set_module def _unpack_tuple(tup): if len(tup) == 1: diff --git a/numpy/_core/_internal.py b/numpy/_core/_internal.py index ed31afde6fa8..915510b220d0 100644 --- a/numpy/_core/_internal.py +++ b/numpy/_core/_internal.py @@ -10,7 +10,7 @@ import sys import warnings -from ..exceptions import DTypePromotionError +from numpy.exceptions import DTypePromotionError from .multiarray import dtype, array, ndarray, promote_types, StringDType from numpy import _NoValue try: diff --git a/numpy/_core/_machar.py b/numpy/_core/_machar.py index e9d621b764c3..84d1f82a89ab 100644 --- a/numpy/_core/_machar.py +++ b/numpy/_core/_machar.py @@ -9,7 +9,7 @@ from .fromnumeric import any from ._ufunc_config import errstate -from .._utils import set_module +from numpy._utils import set_module # Need to speed this up...especially for longdouble diff --git a/numpy/_core/_ufunc_config.py b/numpy/_core/_ufunc_config.py index ec9fd77ecbd8..edb533ea3c2b 100644 --- a/numpy/_core/_ufunc_config.py +++ b/numpy/_core/_ufunc_config.py @@ -8,7 +8,7 @@ import contextvars import functools -from .._utils import set_module +from numpy._utils import set_module from .umath import _make_extobj, _get_extobj_dict, _extobj_contextvar __all__ = [ diff --git a/numpy/_core/code_generators/generate_umath.py b/numpy/_core/code_generators/generate_umath.py index e9b7989afd28..fbda9393b5a5 100644 --- a/numpy/_core/code_generators/generate_umath.py +++ b/numpy/_core/code_generators/generate_umath.py @@ -259,16 +259,16 @@ def english_upper(s): return uppered -#each entry in defdict is a Ufunc object. +# each entry in defdict is a Ufunc object. -#name: [string of chars for which it is defined, -# string of characters using func interface, -# tuple of strings giving funcs for data, -# (in, out), or (instr, outstr) giving the signature as character codes, -# identity, -# docstring, -# output specification (optional) -# ] +# name: [string of chars for which it is defined, +# string of characters using func interface, +# tuple of strings giving funcs for data, +# (in, out), or (instr, outstr) giving the signature as character codes, +# identity, +# docstring, +# output specification (optional) +# ] chartoname = { '?': 'bool', @@ -396,7 +396,7 @@ def english_upper(s): TD(O, f='PyNumber_Multiply'), indexed=intfltcmplx ), -#'true_divide' : aliased to divide in umathmodule.c:initumath +# 'true_divide' : aliased to divide in umathmodule.c:initumath 'floor_divide': Ufunc(2, 1, None, # One is only a unit to the right, not the left docstrings.get('numpy._core.umath.floor_divide'), @@ -1382,7 +1382,7 @@ def indent(st, spaces): } } -#for each name +# for each name # 1) create functions, data, and signature # 2) fill in functions and data in InitOperators # 3) add function. @@ -1403,7 +1403,7 @@ def make_arrays(funcdict): sub = 0 for k, t in enumerate(uf.type_descriptions): - cfunc_alias = t.cfunc_alias if t.cfunc_alias else name + cfunc_alias = t.cfunc_alias or name cfunc_fname = None if t.func_data is FullTypeDescr: tname = english_upper(chartoname[t.type]) @@ -1570,7 +1570,7 @@ def make_ufuncs(funcdict): typenum=f"NPY_{english_upper(chartoname[c])}", count=uf.nin + uf.nout, name=name, - funcname = f"{english_upper(chartoname[c])}_{name}_indexed", + funcname=f"{english_upper(chartoname[c])}_{name}_indexed", )) mlist.append(r"""PyDict_SetItemString(dictionary, "%s", f);""" % name) diff --git a/numpy/_core/defchararray.py b/numpy/_core/defchararray.py index cde1c3a7f291..d782e6131337 100644 --- a/numpy/_core/defchararray.py +++ b/numpy/_core/defchararray.py @@ -18,7 +18,7 @@ import functools import numpy as np -from .._utils import set_module +from numpy._utils import set_module from .numerictypes import bytes_, str_, character from .numeric import ndarray, array as narray, asarray as asnarray from numpy._core.multiarray import compare_chararrays diff --git a/numpy/_core/fromnumeric.py b/numpy/_core/fromnumeric.py index 1b8b5198277a..bc00877612d6 100644 --- a/numpy/_core/fromnumeric.py +++ b/numpy/_core/fromnumeric.py @@ -6,7 +6,7 @@ import warnings import numpy as np -from .._utils import set_module +from numpy._utils import set_module from . import multiarray as mu from . import overrides from . import umath as um diff --git a/numpy/_core/fromnumeric.pyi b/numpy/_core/fromnumeric.pyi index 9e30a84165b4..f974dc33a027 100644 --- a/numpy/_core/fromnumeric.pyi +++ b/numpy/_core/fromnumeric.pyi @@ -277,13 +277,25 @@ def choose( def repeat( a: _ArrayLike[_ScalarT], repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[_ScalarT]]: ... +@overload +def repeat( + a: _ArrayLike[_ScalarT], + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> NDArray[_ScalarT]: ... @overload def repeat( a: ArrayLike, repeats: _ArrayLikeInt_co, - axis: SupportsIndex | None = ..., + axis: None = None, +) -> np.ndarray[tuple[int], np.dtype[Any]]: ... +@overload +def repeat( + a: ArrayLike, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, ) -> NDArray[Any]: ... def put( diff --git a/numpy/_core/function_base.pyi b/numpy/_core/function_base.pyi index 3d0a80c23e6a..5348ebfb40c3 100644 --- a/numpy/_core/function_base.pyi +++ b/numpy/_core/function_base.pyi @@ -1,48 +1,54 @@ -from typing import ( - Literal as L, - overload, - Any, - SupportsIndex, - TypeVar, -) +from typing import Literal as L +from typing import SupportsIndex, TypeAlias, TypeVar, overload -from numpy import floating, complexfloating, generic -from numpy._typing import ( - NDArray, - DTypeLike, - _DTypeLike, - _ArrayLikeFloat_co, - _ArrayLikeComplex_co, -) +from _typeshed import Incomplete -__all__ = ["logspace", "linspace", "geomspace"] +import numpy as np +from numpy._typing import DTypeLike, NDArray, _ArrayLikeComplex_co, _ArrayLikeFloat_co, _DTypeLike +from numpy._typing._array_like import _DualArrayLike -_ScalarT = TypeVar("_ScalarT", bound=generic) +__all__ = ["geomspace", "linspace", "logspace"] + +_ScalarT = TypeVar("_ScalarT", bound=np.generic) + +_ToArrayFloat64: TypeAlias = _DualArrayLike[np.dtype[np.float64 | np.integer | np.bool], float] +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, + *, + device: L["cpu"] | None = None, +) -> NDArray[np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[floating]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: None = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[complexfloating]: ... + device: L["cpu"] | None = None, +) -> NDArray[np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, @@ -51,103 +57,125 @@ def linspace( endpoint: bool, retstep: L[False], dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> NDArray[_ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - retstep: L[False] = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + retstep: L[False] = False, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, *, - device: L["cpu"] | None = ..., -) -> NDArray[Any]: ... + device: L["cpu"] | None = None, +) -> NDArray[Incomplete]: ... +@overload +def linspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + *, + retstep: L[True], + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.float64], np.float64]: ... @overload def linspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[floating], floating]: ... + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.floating], np.floating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: None = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[complexfloating], complexfloating]: ... + dtype: None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[np.complexfloating], np.complexfloating]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, ) -> tuple[NDArray[_ScalarT], _ScalarT]: ... @overload def linspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, retstep: L[True], - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., - device: L["cpu"] | None = ..., -) -> tuple[NDArray[Any], Any]: ... + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, + device: L["cpu"] | None = None, +) -> tuple[NDArray[Incomplete], Incomplete]: ... +@overload +def logspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ToArrayFloat64 = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def logspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeFloat_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeFloat_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... @overload def logspace( start: _ArrayLikeComplex_co, @@ -156,48 +184,57 @@ def logspace( endpoint: bool, base: _ArrayLikeComplex_co, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def logspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - base: _ArrayLikeComplex_co = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + base: _ArrayLikeComplex_co = 10.0, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... +@overload +def geomspace( + start: _ToArrayFloat64, + stop: _ToArrayFloat64, + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.float64]: ... @overload def geomspace( start: _ArrayLikeFloat_co, stop: _ArrayLikeFloat_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[floating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.floating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: None = ..., - axis: SupportsIndex = ..., -) -> NDArray[complexfloating]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: None = None, + axis: SupportsIndex = 0, +) -> NDArray[np.complexfloating]: ... @overload def geomspace( start: _ArrayLikeComplex_co, @@ -205,31 +242,31 @@ def geomspace( num: SupportsIndex, endpoint: bool, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., + num: SupportsIndex = 50, + endpoint: bool = True, *, dtype: _DTypeLike[_ScalarT], - axis: SupportsIndex = ..., + axis: SupportsIndex = 0, ) -> NDArray[_ScalarT]: ... @overload def geomspace( start: _ArrayLikeComplex_co, stop: _ArrayLikeComplex_co, - num: SupportsIndex = ..., - endpoint: bool = ..., - dtype: DTypeLike = ..., - axis: SupportsIndex = ..., -) -> NDArray[Any]: ... + num: SupportsIndex = 50, + endpoint: bool = True, + dtype: DTypeLike | None = None, + axis: SupportsIndex = 0, +) -> NDArray[Incomplete]: ... def add_newdoc( place: str, obj: str, doc: str | tuple[str, str] | list[tuple[str, str]], - warn_on_python: bool = ..., + warn_on_python: bool = True, ) -> None: ... diff --git a/numpy/_core/getlimits.py b/numpy/_core/getlimits.py index f33a1254467d..2dc6d1e7fad2 100644 --- a/numpy/_core/getlimits.py +++ b/numpy/_core/getlimits.py @@ -6,7 +6,7 @@ import types import warnings -from .._utils import set_module +from numpy._utils import set_module from ._machar import MachAr from . import numeric from . import numerictypes as ntypes diff --git a/numpy/_core/include/numpy/npy_math.h b/numpy/_core/include/numpy/npy_math.h index d11df12b7ceb..abc784bc686c 100644 --- a/numpy/_core/include/numpy/npy_math.h +++ b/numpy/_core/include/numpy/npy_math.h @@ -363,7 +363,7 @@ NPY_INPLACE npy_longdouble npy_heavisidel(npy_longdouble x, npy_longdouble h0); static inline double npy_creal(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[0]; + return z._Val[0]; #else return creal(z); #endif @@ -377,7 +377,7 @@ static inline void npy_csetreal(npy_cdouble *z, const double r) static inline double npy_cimag(const npy_cdouble z) { #if defined(__cplusplus) - return ((double *) &z)[1]; + return z._Val[1]; #else return cimag(z); #endif @@ -391,7 +391,7 @@ static inline void npy_csetimag(npy_cdouble *z, const double i) static inline float npy_crealf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[0]; + return z._Val[0]; #else return crealf(z); #endif @@ -405,7 +405,7 @@ static inline void npy_csetrealf(npy_cfloat *z, const float r) static inline float npy_cimagf(const npy_cfloat z) { #if defined(__cplusplus) - return ((float *) &z)[1]; + return z._Val[1]; #else return cimagf(z); #endif @@ -419,7 +419,7 @@ static inline void npy_csetimagf(npy_cfloat *z, const float i) static inline npy_longdouble npy_creall(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[0]; + return (npy_longdouble)z._Val[0]; #else return creall(z); #endif @@ -433,7 +433,7 @@ static inline void npy_csetreall(npy_clongdouble *z, const longdouble_t r) static inline npy_longdouble npy_cimagl(const npy_clongdouble z) { #if defined(__cplusplus) - return ((longdouble_t *) &z)[1]; + return (npy_longdouble)z._Val[1]; #else return cimagl(z); #endif diff --git a/numpy/_core/memmap.py b/numpy/_core/memmap.py index cf95687962af..561ac38a4d58 100644 --- a/numpy/_core/memmap.py +++ b/numpy/_core/memmap.py @@ -1,7 +1,7 @@ from contextlib import nullcontext import operator import numpy as np -from .._utils import set_module +from numpy._utils import set_module from .numeric import uint8, ndarray, dtype __all__ = ['memmap'] diff --git a/numpy/_core/numeric.py b/numpy/_core/numeric.py index da82391b4b8a..7adeaeddda54 100644 --- a/numpy/_core/numeric.py +++ b/numpy/_core/numeric.py @@ -26,7 +26,7 @@ from .overrides import finalize_array_function_like, set_module from .umath import (multiply, invert, sin, PINF, NAN) from . import numerictypes -from ..exceptions import AxisError +from numpy.exceptions import AxisError from ._ufunc_config import errstate bitwise_not = invert diff --git a/numpy/_core/numerictypes.py b/numpy/_core/numerictypes.py index 029390ab0a5a..cb8d3c11a23f 100644 --- a/numpy/_core/numerictypes.py +++ b/numpy/_core/numerictypes.py @@ -84,7 +84,7 @@ ndarray, dtype, datetime_data, datetime_as_string, busday_offset, busday_count, is_busday, busdaycalendar ) -from .._utils import set_module +from numpy._utils import set_module # we add more at the bottom __all__ = [ diff --git a/numpy/_core/numerictypes.pyi b/numpy/_core/numerictypes.pyi index f04d11303ad5..3b6b0c63713a 100644 --- a/numpy/_core/numerictypes.pyi +++ b/numpy/_core/numerictypes.pyi @@ -68,20 +68,7 @@ from .multiarray import ( ) from numpy._typing import DTypeLike -from numpy._typing._extended_precision import ( - uint128, - uint256, - int128, - int256, - float80, - float96, - float128, - float256, - complex160, - complex192, - complex256, - complex512, -) +from numpy._typing._extended_precision import float96, float128, complex192, complex256 __all__ = [ "ScalarType", @@ -146,18 +133,10 @@ __all__ = [ "bool_", "int_", "uint", - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] @type_check_only diff --git a/numpy/_core/overrides.py b/numpy/_core/overrides.py index cb466408cd39..aed83d17b836 100644 --- a/numpy/_core/overrides.py +++ b/numpy/_core/overrides.py @@ -2,10 +2,10 @@ import collections import functools -from .._utils import set_module -from .._utils._inspect import getargspec +from numpy._utils import set_module +from numpy._utils._inspect import getargspec from numpy._core._multiarray_umath import ( - add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) + add_docstring, _get_implementing_args, _ArrayFunctionDispatcher) ARRAY_FUNCTIONS = set() diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 09ac92f9c1f1..6d0331984bc7 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -6,7 +6,7 @@ from collections import Counter from contextlib import nullcontext -from .._utils import set_module +from numpy._utils import set_module from . import numeric as sb from . import numerictypes as nt from .arrayprint import _get_legacy_print_mode @@ -228,7 +228,7 @@ def __getattribute__(self, attr): try: dt = obj.dtype except AttributeError: - #happens if field is Object type + # happens if field is Object type return obj if dt.names is not None: return obj.view((self.__class__, obj.dtype)) diff --git a/numpy/_core/src/multiarray/dlpack.c b/numpy/_core/src/multiarray/dlpack.c index 4bea7f9fc1ab..ac37a04c30c6 100644 --- a/numpy/_core/src/multiarray/dlpack.c +++ b/numpy/_core/src/multiarray/dlpack.c @@ -504,36 +504,12 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } - /* Prepare the arguments to call objects __dlpack__() method */ - static PyObject *call_kwnames = NULL; - static PyObject *dl_cpu_device_tuple = NULL; - static PyObject *max_version = NULL; - - if (call_kwnames == NULL) { - call_kwnames = Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); - if (call_kwnames == NULL) { - return NULL; - } - } - if (dl_cpu_device_tuple == NULL) { - dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); - if (dl_cpu_device_tuple == NULL) { - return NULL; - } - } - if (max_version == NULL) { - max_version = Py_BuildValue("(i,i)", 1, 0); - if (max_version == NULL) { - return NULL; - } - } - /* * Prepare arguments for the full call. We always forward copy and pass * our max_version. `device` is always passed as `None`, but if the user * provided a device, we will replace it with the "cpu": (1, 0). */ - PyObject *call_args[] = {obj, Py_None, copy, max_version}; + PyObject *call_args[] = {obj, Py_None, copy, npy_static_pydata.dl_max_version}; Py_ssize_t nargsf = 1 | PY_VECTORCALL_ARGUMENTS_OFFSET; /* If device is passed it must be "cpu" and replace it with (1, 0) */ @@ -544,12 +520,13 @@ from_dlpack(PyObject *NPY_UNUSED(self), return NULL; } assert(device_request == NPY_DEVICE_CPU); - call_args[1] = dl_cpu_device_tuple; + call_args[1] = npy_static_pydata.dl_cpu_device_tuple; } PyObject *capsule = PyObject_VectorcallMethod( - npy_interned_str.__dlpack__, call_args, nargsf, call_kwnames); + npy_interned_str.__dlpack__, call_args, nargsf, + npy_static_pydata.dl_call_kwnames); if (capsule == NULL) { /* * TODO: This path should be deprecated in NumPy 2.1. Once deprecated diff --git a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src index 1299e55b4258..01ffd225274f 100644 --- a/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src +++ b/numpy/_core/src/multiarray/lowlevel_strided_loops.c.src @@ -708,6 +708,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * /************* STRIDED CASTING SPECIALIZED FUNCTIONS *************/ +#if defined(NPY_HAVE_NEON_FP16) + #define EMULATED_FP16 0 + #define NATIVE_FP16 1 + typedef _Float16 _npy_half; +#else + #define EMULATED_FP16 1 + #define NATIVE_FP16 0 + typedef npy_half _npy_half; +#endif + /**begin repeat * * #NAME1 = BOOL, @@ -723,15 +733,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype1 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# * #is_bool1 = 1, 0*17# - * #is_half1 = 0*11, 1, 0*6# + * #is_emu_half1 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half1 = 0*11, NATIVE_FP16, 0*6# * #is_float1 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double1 = 0*13, 1, 0, 0, 1, 0# * #is_complex1 = 0*15, 1*3# @@ -752,15 +763,16 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * * #type2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_cfloat, npy_cdouble, npy_clongdouble# * #rtype2 = npy_bool, * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong, * npy_byte, npy_short, npy_int, npy_long, npy_longlong, - * npy_half, npy_float, npy_double, npy_longdouble, + * _npy_half, npy_float, npy_double, npy_longdouble, * npy_float, npy_double, npy_longdouble# * #is_bool2 = 1, 0*17# - * #is_half2 = 0*11, 1, 0*6# + * #is_emu_half2 = 0*11, EMULATED_FP16, 0*6# + * #is_native_half2 = 0*11, NATIVE_FP16, 0*6# * #is_float2 = 0*12, 1, 0, 0, 1, 0, 0# * #is_double2 = 0*13, 1, 0, 0, 1, 0# * #is_complex2 = 0*15, 1*3# @@ -774,8 +786,8 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #if !(NPY_USE_UNALIGNED_ACCESS && !@aligned@) -/* For half types, don't use actual double/float types in conversion */ -#if @is_half1@ || @is_half2@ +/* For emulated half types, don't use actual double/float types in conversion */ +#if @is_emu_half1@ || @is_emu_half2@ # if @is_float1@ # define _TYPE1 npy_uint32 @@ -801,13 +813,13 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #endif /* Determine an appropriate casting conversion function */ -#if @is_half1@ +#if @is_emu_half1@ # if @is_float2@ # define _CONVERT_FN(x) npy_halfbits_to_floatbits(x) # elif @is_double2@ # define _CONVERT_FN(x) npy_halfbits_to_doublebits(x) -# elif @is_half2@ +# elif @is_emu_half2@ # define _CONVERT_FN(x) (x) # elif @is_bool2@ # define _CONVERT_FN(x) ((npy_bool)!npy_half_iszero(x)) @@ -815,13 +827,13 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * # define _CONVERT_FN(x) ((_TYPE2)npy_half_to_float(x)) # endif -#elif @is_half2@ +#elif @is_emu_half2@ # if @is_float1@ # define _CONVERT_FN(x) npy_floatbits_to_halfbits(x) # elif @is_double1@ # define _CONVERT_FN(x) npy_doublebits_to_halfbits(x) -# elif @is_half1@ +# elif @is_emu_half1@ # define _CONVERT_FN(x) (x) # elif @is_bool1@ # define _CONVERT_FN(x) npy_float_to_half((float)(x!=0)) @@ -839,7 +851,29 @@ NPY_NO_EXPORT PyArrayMethod_StridedLoop * #endif -static NPY_GCC_OPT_3 int +// Enable auto-vectorization for floating point casts with clang +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(ignore)") + #endif + #endif + #endif +#endif + +// Work around GCC bug for double->half casts. For SVE and +// OPT_LEVEL > 1, it implements this as double->single->half +// which is incorrect as it introduces double rounding with +// narrowing casts. +#if (@is_double1@ && @is_native_half2@) && \ + defined(NPY_HAVE_SVE) && defined(__GNUC__) + #define GCC_CAST_OPT_LEVEL __attribute__((optimize("O1"))) +#else + #define GCC_CAST_OPT_LEVEL NPY_GCC_OPT_3 +#endif + +static GCC_CAST_OPT_LEVEL int @prefix@_cast_@name1@_to_@name2@( PyArrayMethod_Context *context, char *const *args, const npy_intp *dimensions, const npy_intp *strides, @@ -933,6 +967,17 @@ static NPY_GCC_OPT_3 int return 0; } +#if @is_native_half1@ || @is_float1@ || @is_double1@ + #if @is_native_half2@ || @is_float2@ || @is_double2@ + #if defined(__clang__) && !defined(__EMSCRIPTEN__) + #if __clang_major__ >= 12 + _Pragma("clang fp exceptions(strict)") + #endif + #endif + #endif +#endif + +#undef GCC_CAST_OPT_LEVEL #undef _CONVERT_FN #undef _TYPE2 #undef _TYPE1 diff --git a/numpy/_core/src/multiarray/multiarraymodule.c b/numpy/_core/src/multiarray/multiarraymodule.c index cddfad16a972..a53dd0960ed0 100644 --- a/numpy/_core/src/multiarray/multiarraymodule.c +++ b/numpy/_core/src/multiarray/multiarraymodule.c @@ -1205,6 +1205,7 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, goto clean_ret; } + int needs_pyapi = PyDataType_FLAGCHK(PyArray_DESCR(ret), NPY_NEEDS_PYAPI); NPY_BEGIN_THREADS_DESCR(PyArray_DESCR(ret)); is1 = PyArray_STRIDES(ap1)[0]; is2 = PyArray_STRIDES(ap2)[0]; @@ -1215,6 +1216,9 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, n = n - n_left; for (i = 0; i < n_left; i++) { dot(ip1, is1, ip2, is2, op, n, ret); + if (needs_pyapi && PyErr_Occurred()) { + goto done; + } n++; ip2 -= is2; op += os; @@ -1226,19 +1230,21 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum, op += os * (n1 - n2 + 1); } else { - for (i = 0; i < (n1 - n2 + 1); i++) { + for (i = 0; i < (n1 - n2 + 1) && (!needs_pyapi || !PyErr_Occurred()); + i++) { dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } } - for (i = 0; i < n_right; i++) { + for (i = 0; i < n_right && (!needs_pyapi || !PyErr_Occurred()); i++) { n--; dot(ip1, is1, ip2, is2, op, n, ret); ip1 += is1; op += os; } +done: NPY_END_THREADS_DESCR(PyArray_DESCR(ret)); if (PyErr_Occurred()) { goto clean_ret; diff --git a/numpy/_core/src/multiarray/npy_static_data.c b/numpy/_core/src/multiarray/npy_static_data.c index 2cc6ea72c26e..62e1fd3c1b15 100644 --- a/numpy/_core/src/multiarray/npy_static_data.c +++ b/numpy/_core/src/multiarray/npy_static_data.c @@ -184,6 +184,22 @@ initialize_static_globals(void) return -1; } + npy_static_pydata.dl_call_kwnames = + Py_BuildValue("(sss)", "dl_device", "copy", "max_version"); + if (npy_static_pydata.dl_call_kwnames == NULL) { + return -1; + } + + npy_static_pydata.dl_cpu_device_tuple = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_cpu_device_tuple == NULL) { + return -1; + } + + npy_static_pydata.dl_max_version = Py_BuildValue("(i,i)", 1, 0); + if (npy_static_pydata.dl_max_version == NULL) { + return -1; + } + /* * Initialize contents of npy_static_cdata struct * diff --git a/numpy/_core/src/multiarray/npy_static_data.h b/numpy/_core/src/multiarray/npy_static_data.h index d6ee4a8dc54d..287dc80e4c1f 100644 --- a/numpy/_core/src/multiarray/npy_static_data.h +++ b/numpy/_core/src/multiarray/npy_static_data.h @@ -138,6 +138,13 @@ typedef struct npy_static_pydata_struct { PyObject *GenericToVoidMethod; PyObject *ObjectToGenericMethod; PyObject *GenericToObjectMethod; + + /* + * Used in from_dlpack + */ + PyObject *dl_call_kwnames; + PyObject *dl_cpu_device_tuple; + PyObject *dl_max_version; } npy_static_pydata_struct; diff --git a/numpy/_core/src/multiarray/stringdtype/dtype.c b/numpy/_core/src/multiarray/stringdtype/dtype.c index 41dfa7c21ca1..a06e7a1ed1b6 100644 --- a/numpy/_core/src/multiarray/stringdtype/dtype.c +++ b/numpy/_core/src/multiarray/stringdtype/dtype.c @@ -633,11 +633,16 @@ PyArray_Descr * stringdtype_finalize_descr(PyArray_Descr *dtype) { PyArray_StringDTypeObject *sdtype = (PyArray_StringDTypeObject *)dtype; + // acquire the allocator lock in case the descriptor we want to finalize + // is shared between threads, see gh-28813 + npy_string_allocator *allocator = NpyString_acquire_allocator(sdtype); if (sdtype->array_owned == 0) { sdtype->array_owned = 1; + NpyString_release_allocator(allocator); Py_INCREF(dtype); return dtype; } + NpyString_release_allocator(allocator); PyArray_StringDTypeObject *ret = (PyArray_StringDTypeObject *)new_stringdtype_instance( sdtype->na_object, sdtype->coerce); ret->array_owned = 1; @@ -850,14 +855,17 @@ init_string_dtype(void) return -1; } - PyArray_Descr *singleton = - NPY_DT_CALL_default_descr(&PyArray_StringDType); + PyArray_StringDTypeObject *singleton = + (PyArray_StringDTypeObject *)NPY_DT_CALL_default_descr(&PyArray_StringDType); if (singleton == NULL) { return -1; } - PyArray_StringDType.singleton = singleton; + // never associate the singleton with an array + singleton->array_owned = 1; + + PyArray_StringDType.singleton = (PyArray_Descr *)singleton; PyArray_StringDType.type_num = NPY_VSTRING; for (int i = 0; PyArray_StringDType_casts[i] != NULL; i++) { diff --git a/numpy/_core/strings.py b/numpy/_core/strings.py index befd76d8ceed..cd6d1ec439f1 100644 --- a/numpy/_core/strings.py +++ b/numpy/_core/strings.py @@ -1332,8 +1332,8 @@ def replace(a, old, new, count=-1): return _replace(arr, old, new, count) a_dt = arr.dtype - old = old.astype(old_dtype if old_dtype else a_dt, copy=False) - new = new.astype(new_dtype if new_dtype else a_dt, copy=False) + old = old.astype(old_dtype or a_dt, copy=False) + new = new.astype(new_dtype or a_dt, copy=False) max_int64 = np.iinfo(np.int64).max counts = _count_ufunc(arr, old, 0, max_int64) counts = np.where(count < 0, counts, np.minimum(counts, count)) diff --git a/numpy/_core/tests/test_api.py b/numpy/_core/tests/test_api.py index f0200d59cafe..8d7c617898e6 100644 --- a/numpy/_core/tests/test_api.py +++ b/numpy/_core/tests/test_api.py @@ -82,8 +82,8 @@ def test_array_array(): dtype=[('f0', int), ('f1', float), ('f2', str)]) o = type("o", (object,), {"__array_struct__": a.__array_struct__}) - ## wasn't what I expected... is np.array(o) supposed to equal a ? - ## instead we get a array([...], dtype=">V18") + # wasn't what I expected... is np.array(o) supposed to equal a ? + # instead we get a array([...], dtype=">V18") assert_equal(bytes(np.array(o).data), bytes(a.data)) # test array diff --git a/numpy/_core/tests/test_array_interface.py b/numpy/_core/tests/test_array_interface.py index 1917c8fecafe..ed56f7e79daf 100644 --- a/numpy/_core/tests/test_array_interface.py +++ b/numpy/_core/tests/test_array_interface.py @@ -2,6 +2,7 @@ import pytest import numpy as np from numpy.testing import extbuild, IS_WASM, IS_EDITABLE +import sysconfig @pytest.fixture @@ -123,6 +124,8 @@ def get_module(tmp_path): pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('array_interface_testing', functions, prologue=prologue, diff --git a/numpy/_core/tests/test_arrayprint.py b/numpy/_core/tests/test_arrayprint.py index 4b9da0ebb7c6..09ed71f342a2 100644 --- a/numpy/_core/tests/test_arrayprint.py +++ b/numpy/_core/tests/test_arrayprint.py @@ -320,8 +320,8 @@ def test_structure_format_float(self): assert_equal(np.array2string(array_scalar), "(1., 2.12345679, 3.)") def test_unstructured_void_repr(self): - a = np.array([27, 91, 50, 75, 7, 65, 10, 8, - 27, 91, 51, 49, 109, 82, 101, 100], dtype='u1').view('V8') + a = np.array([27, 91, 50, 75, 7, 65, 10, 8, 27, 91, 51, 49, 109, 82, 101, 100], + dtype='u1').view('V8') assert_equal(repr(a[0]), r"np.void(b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08')") assert_equal(str(a[0]), r"b'\x1B\x5B\x32\x4B\x07\x41\x0A\x08'") diff --git a/numpy/_core/tests/test_cpu_features.py b/numpy/_core/tests/test_cpu_features.py index 62b0eac5dda5..f4bd02ab55e5 100644 --- a/numpy/_core/tests/test_cpu_features.py +++ b/numpy/_core/tests/test_cpu_features.py @@ -117,7 +117,7 @@ def load_flags_auxv(self): @pytest.mark.skipif( sys.platform == 'emscripten', - reason= ( + reason=( "The subprocess module is not available on WASM platforms and" " therefore this test class cannot be properly executed." ), diff --git a/numpy/_core/tests/test_cython.py b/numpy/_core/tests/test_cython.py index 81ddc63258c2..fda70b9ac79c 100644 --- a/numpy/_core/tests/test_cython.py +++ b/numpy/_core/tests/test_cython.py @@ -3,6 +3,7 @@ import subprocess import sys import pytest +import sysconfig import numpy as np from numpy.testing import assert_array_equal, IS_WASM, IS_EDITABLE @@ -53,6 +54,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--buildtype=release", @@ -341,6 +344,7 @@ def test_npystring_allocators_other_dtype(install_temp): assert checks.npystring_allocators_other_types(arr1, arr2) == 0 +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='no checks module on win-arm64') def test_npy_uintp_type_enum(): import checks assert checks.check_npy_uintp_type_enum() diff --git a/numpy/_core/tests/test_datetime.py b/numpy/_core/tests/test_datetime.py index d2e4e5ec6cad..8d48e8a6630a 100644 --- a/numpy/_core/tests/test_datetime.py +++ b/numpy/_core/tests/test_datetime.py @@ -2492,7 +2492,7 @@ def test_isfinite_isinf_isnan_units(self, unit, dstr): '''check isfinite, isinf, isnan for all units of M, m dtypes ''' arr_val = [123, -321, "NaT"] - arr = np.array(arr_val, dtype= dstr % unit) + arr = np.array(arr_val, dtype=(dstr % unit)) pos = np.array([True, True, False]) neg = np.array([False, False, True]) false = np.array([False, False, False]) diff --git a/numpy/_core/tests/test_dtype.py b/numpy/_core/tests/test_dtype.py index 5d458729d278..68698fc229fb 100644 --- a/numpy/_core/tests/test_dtype.py +++ b/numpy/_core/tests/test_dtype.py @@ -742,7 +742,7 @@ def test_shape_invalid(self): assert_raises(ValueError, np.dtype, [('a', 'f4', (-1, -1))]) def test_alignment(self): - #Check that subarrays are aligned + # Check that subarrays are aligned t1 = np.dtype('(1,)i4', align=True) t2 = np.dtype('2i4', align=True) assert_equal(t1.alignment, t2.alignment) diff --git a/numpy/_core/tests/test_einsum.py b/numpy/_core/tests/test_einsum.py index 5aa75b5a6b97..f3fd137b7c5c 100644 --- a/numpy/_core/tests/test_einsum.py +++ b/numpy/_core/tests/test_einsum.py @@ -764,7 +764,7 @@ def __mul__(self, other): return 42 objMult = np.array([Mult()]) - objNULL = np.ndarray(buffer = b'\0' * np.intp(0).itemsize, shape=1, dtype=object) + objNULL = np.ndarray(buffer=b'\0' * np.intp(0).itemsize, shape=1, dtype=object) with pytest.raises(TypeError): np.einsum("i,j", [1], objNULL) @@ -1240,7 +1240,7 @@ def test_path_type_input(self): assert_almost_equal(noopt, opt) def test_path_type_input_internal_trace(self): - #gh-20962 + # gh-20962 path_test = self.build_operands('cab,cdd->ab') exp_path = ['einsum_path', (1,), (0, 1)] @@ -1266,7 +1266,7 @@ def test_path_type_input_invalid(self): RuntimeError, np.einsum_path, *path_test, optimize=exp_path) def test_spaces(self): - #gh-10794 + # gh-10794 arr = np.array([[1]]) for sp in itertools.product(['', ' '], repeat=4): # no error for any spacing @@ -1279,7 +1279,7 @@ def test_overlap(): # sanity check c = np.einsum('ij,jk->ik', a, b) assert_equal(c, d) - #gh-10080, out overlaps one of the operands + # gh-10080, out overlaps one of the operands c = np.einsum('ij,jk->ik', a, b, out=b) assert_equal(c, d) diff --git a/numpy/_core/tests/test_indexing.py b/numpy/_core/tests/test_indexing.py index c9508bb03bdc..b65533bbc5ef 100644 --- a/numpy/_core/tests/test_indexing.py +++ b/numpy/_core/tests/test_indexing.py @@ -219,7 +219,7 @@ def test_boolean_shape_mismatch(self): def test_boolean_indexing_onedim(self): # Indexing a 2-dimensional array with # boolean array of length one - a = np.array([[0., 0., 0.]]) + a = np.array([[0., 0., 0.]]) b = np.array([True], dtype=bool) assert_equal(a[b], a) # boolean assignment @@ -492,7 +492,7 @@ def test_unaligned(self): x = x.view(np.dtype("S8")) x[...] = np.array("b" * 8, dtype="S") b = np.arange(d.size) - #trivial + # trivial assert_equal(d[b], d) d[b] = x # nontrivial @@ -643,7 +643,7 @@ def test_prepend_not_one(self): a = np.zeros(5) # Too large and not only ones. - assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) + assert_raises(ValueError, assign, a, s_[...], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[1, 2, 3],], np.ones((2, 1))) assert_raises(ValueError, assign, a, s_[[[1], [2]],], np.ones((2, 2, 1))) diff --git a/numpy/_core/tests/test_limited_api.py b/numpy/_core/tests/test_limited_api.py index 51bed1d77561..58f2b5ce050d 100644 --- a/numpy/_core/tests/test_limited_api.py +++ b/numpy/_core/tests/test_limited_api.py @@ -52,6 +52,8 @@ def install_temp(tmpdir_factory): subprocess.check_call(["meson", "--version"]) except FileNotFoundError: pytest.skip("No usable 'meson' found") + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") if sys.platform == "win32": subprocess.check_call(["meson", "setup", "--werror", diff --git a/numpy/_core/tests/test_mem_policy.py b/numpy/_core/tests/test_mem_policy.py index 9846f89c404c..8d09a9ded659 100644 --- a/numpy/_core/tests/test_mem_policy.py +++ b/numpy/_core/tests/test_mem_policy.py @@ -3,6 +3,7 @@ import os import sys import threading +import sysconfig import pytest @@ -220,6 +221,8 @@ def get_module(tmp_path): except ImportError: pass # if it does not exist, build and load it + if sysconfig.get_platform() == "win-arm64": + pytest.skip("Meson unable to find MSVC linker on win-arm64") return extbuild.build_and_import_extension('mem_policy', functions, prologue=prologue, diff --git a/numpy/_core/tests/test_multiarray.py b/numpy/_core/tests/test_multiarray.py index 6d97124d66c0..0a62cb6945f0 100644 --- a/numpy/_core/tests/test_multiarray.py +++ b/numpy/_core/tests/test_multiarray.py @@ -2295,11 +2295,11 @@ def test_void_sort(self): arr[::-1].sort() def test_sort_raises(self): - #gh-9404 + # gh-9404 arr = np.array([0, datetime.now(), 1], dtype=object) for kind in self.sort_kinds: assert_raises(TypeError, arr.sort, kind=kind) - #gh-3879 + # gh-3879 class Raiser: def raises_anything(*args, **kwargs): @@ -9427,8 +9427,7 @@ def _make_readonly(x): np.array([1, 2, 3]), np.array([['one', 'two'], ['three', 'four']]), np.array((1, 2), dtype='i4,i4'), - np.zeros((2,), dtype= - np.dtype({ + np.zeros((2,), dtype=np.dtype({ "formats": [' 2 ** 31 c_arr = np.ctypeslib.as_ctypes(arr) diff --git a/numpy/_core/tests/test_scalarmath.py b/numpy/_core/tests/test_scalarmath.py index e9cac03c7a9b..0b086df21c60 100644 --- a/numpy/_core/tests/test_scalarmath.py +++ b/numpy/_core/tests/test_scalarmath.py @@ -171,11 +171,11 @@ def test_blocked(self): inp2[...] += np.arange(inp2.size, dtype=dt) + 1 assert_almost_equal(np.square(inp2), - np.multiply(inp2, inp2), err_msg=msg) + np.multiply(inp2, inp2), err_msg=msg) # skip true divide for ints if dt != np.int32: assert_almost_equal(np.reciprocal(inp2), - np.divide(1, inp2), err_msg=msg) + np.divide(1, inp2), err_msg=msg) inp1[...] = np.ones_like(inp1) np.add(inp1, 2, out=out) @@ -562,13 +562,13 @@ def test_numpy_scalar_relational_operators(self): assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()], f"type {dt1} and {dt2} failed") - #Unsigned integers + # Unsigned integers for dt1 in 'BHILQP': assert_(-1 < np.array(1, dtype=dt1)[()], f"type {dt1} failed") assert_(not -1 > np.array(1, dtype=dt1)[()], f"type {dt1} failed") assert_(-1 != np.array(1, dtype=dt1)[()], f"type {dt1} failed") - #unsigned vs signed + # unsigned vs signed for dt2 in 'bhilqp': assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()], f"type {dt1} and {dt2} failed") @@ -577,7 +577,7 @@ def test_numpy_scalar_relational_operators(self): assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()], f"type {dt1} and {dt2} failed") - #Signed integers and floats + # Signed integers and floats for dt1 in 'bhlqp' + np.typecodes['Float']: assert_(1 > np.array(-1, dtype=dt1)[()], f"type {dt1} failed") assert_(not 1 < np.array(-1, dtype=dt1)[()], f"type {dt1} failed") diff --git a/numpy/_core/tests/test_scalarprint.py b/numpy/_core/tests/test_scalarprint.py index 16a9267e235c..298eb232eafb 100644 --- a/numpy/_core/tests/test_scalarprint.py +++ b/numpy/_core/tests/test_scalarprint.py @@ -303,7 +303,7 @@ def test_dragon4_positional_interface_overflow(self, tp, pad_val): fpos = np.format_float_positional - #gh-28068 + # gh-28068 with pytest.raises(RuntimeError, match="Float formatting result too large"): fpos(tp('1.047'), unique=False, precision=pad_val) diff --git a/numpy/_core/tests/test_shape_base.py b/numpy/_core/tests/test_shape_base.py index 58a36e8c022f..9e4ef3a8e6e9 100644 --- a/numpy/_core/tests/test_shape_base.py +++ b/numpy/_core/tests/test_shape_base.py @@ -477,13 +477,13 @@ def test_stack(): with pytest.raises(TypeError, match="arrays to stack must be"): stack(x for x in range(3)) - #casting and dtype test + # casting and dtype test a = np.array([1, 2, 3]) b = np.array([2.5, 3.5, 4.5]) res = np.stack((a, b), axis=1, casting="unsafe", dtype=np.int64) expected_res = np.array([[1, 2], [2, 3], [3, 4]]) assert_array_equal(res, expected_res) - #casting and dtype with TypeError + # casting and dtype with TypeError with assert_raises(TypeError): stack((a, b), dtype=np.int64, axis=1, casting="safe") diff --git a/numpy/_core/tests/test_ufunc.py b/numpy/_core/tests/test_ufunc.py index 0dbef44300b1..26844fabd437 100644 --- a/numpy/_core/tests/test_ufunc.py +++ b/numpy/_core/tests/test_ufunc.py @@ -2104,7 +2104,7 @@ def __rmul__(self, other): def test_array_wrap_array_priority(self): class ArrayPriorityBase(np.ndarray): @classmethod - def __array_wrap__(cls, array, context=None, return_scalar = False): + def __array_wrap__(cls, array, context=None, return_scalar=False): return cls class ArrayPriorityMinus0(ArrayPriorityBase): diff --git a/numpy/_core/tests/test_umath.py b/numpy/_core/tests/test_umath.py index 0efa51bfd772..22ad1b8ac302 100644 --- a/numpy/_core/tests/test_umath.py +++ b/numpy/_core/tests/test_umath.py @@ -1167,14 +1167,14 @@ def assert_complex_equal(x, y): assert_array_equal(x.real, y.real) assert_array_equal(x.imag, y.imag) - #Complex powers with positive real part will not generate a warning + # Complex powers with positive real part will not generate a warning assert_complex_equal(np.power(zero, 1 + 4j), zero) assert_complex_equal(np.power(zero, 2 - 3j), zero) - #Testing zero values when real part is greater than zero + # Testing zero values when real part is greater than zero assert_complex_equal(np.power(zero, 1 + 1j), zero) assert_complex_equal(np.power(zero, 1 + 0j), zero) assert_complex_equal(np.power(zero, 1 - 1j), zero) - #Complex powers will negative real part or 0 (provided imaginary + # Complex powers will negative real part or 0 (provided imaginary # part is not zero) will generate a NAN and hence a RUNTIME warning with pytest.warns(expected_warning=RuntimeWarning) as r: assert_complex_equal(np.power(zero, -1 + 1j), cnan) diff --git a/numpy/_core/tests/test_umath_accuracy.py b/numpy/_core/tests/test_umath_accuracy.py index 0d3c99bf7d54..a0e0cbccc596 100644 --- a/numpy/_core/tests/test_umath_accuracy.py +++ b/numpy/_core/tests/test_umath_accuracy.py @@ -75,7 +75,7 @@ def test_validate_transcendentals(self): assert_array_max_ulp(npfunc(inval), outval, maxulperr) @pytest.mark.skipif(IS_AVX512FP16, - reason = "SVML FP16 have slightly higher ULP errors") + reason="SVML FP16 have slightly higher ULP errors") @pytest.mark.parametrize("ufunc", UNARY_OBJECT_UFUNCS) def test_validate_fp16_transcendentals(self, ufunc): with np.errstate(all='ignore'): diff --git a/numpy/_core/tests/test_umath_complex.py b/numpy/_core/tests/test_umath_complex.py index 81c1447c4cbb..eb221f15f327 100644 --- a/numpy/_core/tests/test_umath_complex.py +++ b/numpy/_core/tests/test_umath_complex.py @@ -16,7 +16,7 @@ # At least on Windows the results of many complex functions are not conforming # to the C99 standard. See ticket 1574. # Ditto for Solaris (ticket 1642) and OS X on PowerPC. -#FIXME: this will probably change when we require full C99 compatibility +# FIXME: this will probably change when we require full C99 compatibility with np.errstate(all='ignore'): functions_seem_flaky = ((np.exp(complex(np.inf, 0)).imag != 0) or (np.log(complex(ncu.NZERO, 0)).imag != np.pi)) @@ -333,7 +333,7 @@ def test_special_values(self): def _check_ninf_nan(dummy): msgform = "csqrt(-inf, nan) is (%f, %f), expected (nan, +-inf)" z = np.sqrt(np.array(complex(-np.inf, np.nan))) - #Fixme: ugly workaround for isinf bug. + # FIXME: ugly workaround for isinf bug. with np.errstate(invalid='ignore'): if not (np.isnan(z.real) and np.isinf(z.imag)): raise AssertionError(msgform % (z.real, z.imag)) diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py index 482c6eddbca0..f41d54f36bec 100644 --- a/numpy/_pytesttester.py +++ b/numpy/_pytesttester.py @@ -39,7 +39,7 @@ def _show_numpy_info(): print(f"NumPy version {np.__version__}") info = np.lib._utils_impl._opt_info() - print("NumPy CPU features: ", (info if info else 'nothing enabled')) + print("NumPy CPU features: ", (info or 'nothing enabled')) class PytestTester: diff --git a/numpy/_typing/__init__.py b/numpy/_typing/__init__.py index 92cdcec84900..a0ed7cd53622 100644 --- a/numpy/_typing/__init__.py +++ b/numpy/_typing/__init__.py @@ -6,15 +6,13 @@ _NestedSequence as _NestedSequence, ) from ._nbit_base import ( - NBitBase as NBitBase, + NBitBase as NBitBase, # pyright: ignore[reportDeprecated] _8Bit as _8Bit, _16Bit as _16Bit, _32Bit as _32Bit, _64Bit as _64Bit, - _80Bit as _80Bit, _96Bit as _96Bit, _128Bit as _128Bit, - _256Bit as _256Bit, ) from ._nbit import ( _NBitByte as _NBitByte, diff --git a/numpy/_typing/_add_docstring.py b/numpy/_typing/_add_docstring.py index 493e775ebae2..da415f1b94c6 100644 --- a/numpy/_typing/_add_docstring.py +++ b/numpy/_typing/_add_docstring.py @@ -137,7 +137,7 @@ def _parse_docstrings() -> str: >>> import numpy.typing as npt >>> print(npt.NDArray) - numpy.ndarray[tuple[int, ...], numpy.dtype[+_ScalarT_co]] + numpy.ndarray[tuple[int, ...], numpy.dtype[~_ScalarT]] >>> print(npt.NDArray[np.float64]) numpy.ndarray[tuple[int, ...], numpy.dtype[numpy.float64]] diff --git a/numpy/_typing/_array_like.py b/numpy/_typing/_array_like.py index 56388db1155e..b4c291639d6a 100644 --- a/numpy/_typing/_array_like.py +++ b/numpy/_typing/_array_like.py @@ -23,7 +23,7 @@ _DTypeT = TypeVar("_DTypeT", bound=dtype[Any]) _DTypeT_co = TypeVar("_DTypeT_co", covariant=True, bound=dtype[Any]) -NDArray: TypeAlias = np.ndarray[_Shape, dtype[_ScalarT_co]] +NDArray: TypeAlias = np.ndarray[_Shape, dtype[_ScalarT]] # The `_SupportsArray` protocol only cares about the default dtype # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned diff --git a/numpy/_typing/_extended_precision.py b/numpy/_typing/_extended_precision.py index 7246b47d0ee1..73a1847ccbeb 100644 --- a/numpy/_typing/_extended_precision.py +++ b/numpy/_typing/_extended_precision.py @@ -6,22 +6,9 @@ """ import numpy as np -from . import ( - _80Bit, - _96Bit, - _128Bit, - _256Bit, -) +from . import _96Bit, _128Bit -uint128 = np.unsignedinteger[_128Bit] -uint256 = np.unsignedinteger[_256Bit] -int128 = np.signedinteger[_128Bit] -int256 = np.signedinteger[_256Bit] -float80 = np.floating[_80Bit] float96 = np.floating[_96Bit] float128 = np.floating[_128Bit] -float256 = np.floating[_256Bit] -complex160 = np.complexfloating[_80Bit, _80Bit] complex192 = np.complexfloating[_96Bit, _96Bit] complex256 = np.complexfloating[_128Bit, _128Bit] -complex512 = np.complexfloating[_256Bit, _256Bit] diff --git a/numpy/_typing/_nbit_base.py b/numpy/_typing/_nbit_base.py index 4f764757c4ea..aa8b85cd1592 100644 --- a/numpy/_typing/_nbit_base.py +++ b/numpy/_typing/_nbit_base.py @@ -1,5 +1,5 @@ """A module with the precisions of generic `~numpy.number` types.""" -from .._utils import set_module +from numpy._utils import set_module from typing import final @@ -9,13 +9,17 @@ class NBitBase: """ A type representing `numpy.number` precision during static type checking. - Used exclusively for the purpose static type checking, `NBitBase` + Used exclusively for the purpose of static type checking, `NBitBase` represents the base of a hierarchical set of subclasses. Each subsequent subclass is herein used for representing a lower level of precision, *e.g.* ``64Bit > 32Bit > 16Bit``. .. versionadded:: 1.20 + .. deprecated:: 2.3 + Use ``@typing.overload`` or a ``TypeVar`` with a scalar-type as upper + bound, instead. + Examples -------- Below is a typical usage example: `NBitBase` is herein used for annotating @@ -48,11 +52,11 @@ class NBitBase: ... # note: out: numpy.floating[numpy.typing._64Bit*] """ + # Deprecated in NumPy 2.3, 2025-05-01 def __init_subclass__(cls) -> None: allowed_names = { - "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit", - "_64Bit", "_32Bit", "_16Bit", "_8Bit", + "NBitBase", "_128Bit", "_96Bit", "_64Bit", "_32Bit", "_16Bit", "_8Bit" } if cls.__name__ not in allowed_names: raise TypeError('cannot inherit from final class "NBitBase"') @@ -61,40 +65,30 @@ def __init_subclass__(cls) -> None: @final @set_module("numpy._typing") # Silence errors about subclassing a `@final`-decorated class -class _256Bit(NBitBase): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _128Bit(_256Bit): # type: ignore[misc] - pass - -@final -@set_module("numpy._typing") -class _96Bit(_128Bit): # type: ignore[misc] +class _128Bit(NBitBase): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _80Bit(_96Bit): # type: ignore[misc] +class _96Bit(_128Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _64Bit(_80Bit): # type: ignore[misc] +class _64Bit(_96Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _32Bit(_64Bit): # type: ignore[misc] +class _32Bit(_64Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _16Bit(_32Bit): # type: ignore[misc] +class _16Bit(_32Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass @final @set_module("numpy._typing") -class _8Bit(_16Bit): # type: ignore[misc] +class _8Bit(_16Bit): # type: ignore[misc] # pyright: ignore[reportGeneralTypeIssues] pass diff --git a/numpy/_typing/_nbit_base.pyi b/numpy/_typing/_nbit_base.pyi new file mode 100644 index 000000000000..ccf8f5ceac45 --- /dev/null +++ b/numpy/_typing/_nbit_base.pyi @@ -0,0 +1,40 @@ +# pyright: reportDeprecated=false +# pyright: reportGeneralTypeIssues=false +# mypy: disable-error-code=misc + +from typing import final + +from typing_extensions import deprecated + +# Deprecated in NumPy 2.3, 2025-05-01 +@deprecated( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", +) +@final +class NBitBase: ... + +@final +class _256Bit(NBitBase): ... + +@final +class _128Bit(_256Bit): ... + +@final +class _96Bit(_128Bit): ... + +@final +class _80Bit(_96Bit): ... + +@final +class _64Bit(_80Bit): ... + +@final +class _32Bit(_64Bit): ... + +@final +class _16Bit(_32Bit): ... + +@final +class _8Bit(_16Bit): ... diff --git a/numpy/_typing/_ufunc.py b/numpy/_typing/_ufunc.py index d0573c8f5463..db52a1fdb318 100644 --- a/numpy/_typing/_ufunc.py +++ b/numpy/_typing/_ufunc.py @@ -1,4 +1,4 @@ -from .. import ufunc +from numpy import ufunc _UFunc_Nin1_Nout1 = ufunc _UFunc_Nin2_Nout1 = ufunc diff --git a/numpy/_utils/__init__.pyi b/numpy/_utils/__init__.pyi index 4e8eca9e9a11..f3472df9a554 100644 --- a/numpy/_utils/__init__.pyi +++ b/numpy/_utils/__init__.pyi @@ -20,7 +20,7 @@ class _HasModule(Protocol): @overload def set_module(module: None) -> IdentityFunction: ... @overload -def set_module(module: _HasModuleT) -> _HasModuleT: ... +def set_module(module: str) -> Callable[[_HasModuleT], _HasModuleT]: ... # def _rename_parameter( diff --git a/numpy/compat/__init__.py b/numpy/compat/__init__.py index 729265aa9c27..8f926c4bd568 100644 --- a/numpy/compat/__init__.py +++ b/numpy/compat/__init__.py @@ -13,8 +13,8 @@ import warnings -from .._utils import _inspect -from .._utils._inspect import getargspec, formatargspec +from numpy._utils import _inspect +from numpy._utils._inspect import getargspec, formatargspec from . import py3k from .py3k import * diff --git a/numpy/conftest.py b/numpy/conftest.py index 9ae2b290ee71..84d856e55684 100644 --- a/numpy/conftest.py +++ b/numpy/conftest.py @@ -102,7 +102,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus, config): tr.line("code that re-enables the GIL should do so in a subprocess.") pytest.exit("GIL re-enabled during tests", returncode=1) -#FIXME when yield tests are gone. +# FIXME when yield tests are gone. @pytest.hookimpl() def pytest_itemcollected(item): """ diff --git a/numpy/ctypeslib/_ctypeslib.py b/numpy/ctypeslib/_ctypeslib.py index bd5632702d8c..40b9e58b5912 100644 --- a/numpy/ctypeslib/_ctypeslib.py +++ b/numpy/ctypeslib/_ctypeslib.py @@ -158,9 +158,9 @@ def load_library(libname, loader_path): try: return ctypes.cdll[libpath] except OSError: - ## defective lib file + # defective lib file raise - ## if no successful return in the libname_ext loop: + # if no successful return in the libname_ext loop: raise OSError("no file with expected extension") diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py index 2599a9e9a807..944ba2d03b33 100644 --- a/numpy/distutils/mingw32ccompiler.py +++ b/numpy/distutils/mingw32ccompiler.py @@ -262,6 +262,7 @@ def generate_def(dll, dfile): def find_dll(dll_name): arch = {'AMD64' : 'amd64', + 'ARM64' : 'arm64', 'Intel' : 'x86'}[get_build_architecture()] def _find_dll_in_winsxs(dll_name): @@ -351,6 +352,8 @@ def build_import_library(): arch = get_build_architecture() if arch == 'AMD64': return _build_import_library_amd64() + if arch == 'ARM64': + return _build_import_library_arm64() elif arch == 'Intel': return _build_import_library_x86() else: @@ -412,6 +415,26 @@ def _build_import_library_amd64(): cmd = ['dlltool', '-d', def_file, '-l', out_file] subprocess.check_call(cmd) +def _build_import_library_arm64(): + out_exists, out_file = _check_for_import_lib() + if out_exists: + log.debug('Skip building import library: "%s" exists', out_file) + return + + # get the runtime dll for which we are building import library + dll_file = find_python_dll() + log.info('Building import library (arch=ARM64): "%s" (from %s)' % + (out_file, dll_file)) + + # generate symbol list from this library + def_name = "python%d%d.def" % tuple(sys.version_info[:2]) + def_file = os.path.join(sys.prefix, 'libs', def_name) + generate_def(dll_file, def_file) + + # generate import library from this symbol list + cmd = ['dlltool', '-d', def_file, '-l', out_file] + subprocess.check_call(cmd) + def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ diff --git a/numpy/distutils/tests/test_mingw32ccompiler.py b/numpy/distutils/tests/test_mingw32ccompiler.py index 19b19450fc8c..c4eac7b72de1 100644 --- a/numpy/distutils/tests/test_mingw32ccompiler.py +++ b/numpy/distutils/tests/test_mingw32ccompiler.py @@ -3,6 +3,7 @@ import sys import pytest import os +import sysconfig from numpy.distutils import mingw32ccompiler @@ -10,6 +11,7 @@ @pytest.mark.skipif(sys.platform != 'win32', reason='win32 only test') @pytest.mark.skipif(not os.path.exists(os.path.join(sys.prefix, 'libs')), reason="test requires mingw library layout") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='mingw GNU objdump does not understand arm64 binary format yet') def test_build_import(): '''Test the mingw32ccompiler.build_import_library, which builds a `python.a` from the MSVC `python.lib` diff --git a/numpy/dtypes.pyi b/numpy/dtypes.pyi index fad612380359..07f889406353 100644 --- a/numpy/dtypes.pyi +++ b/numpy/dtypes.pyi @@ -1,6 +1,5 @@ # ruff: noqa: ANN401 -from types import MemberDescriptorType -from typing import Any, ClassVar, Generic, LiteralString, NoReturn, Self, TypeAlias, final, type_check_only +from typing import Any, Generic, LiteralString, Never, NoReturn, Self, TypeAlias, final, overload, type_check_only from typing import Literal as L from typing_extensions import TypeVar @@ -568,40 +567,54 @@ class TimeDelta64DType( # type: ignore[misc] "m8[as]", ]: ... +_NaObjectT_co = TypeVar("_NaObjectT_co", default=Never, covariant=True) + @final class StringDType( # type: ignore[misc] _TypeCodes[L["T"], L["T"], L[2056]], _NativeOrder, _NBit[L[8], L[16]], - # TODO: Replace the (invalid) `str` with the scalar type, once implemented - np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues,reportInvalidTypeArguments] + # TODO(jorenham): change once we have a string scalar type: + # https://github.com/numpy/numpy/issues/28165 + np.dtype[str], # type: ignore[type-var] # pyright: ignore[reportGeneralTypeIssues, reportInvalidTypeArguments] + Generic[_NaObjectT_co], ): + @property + def na_object(self) -> _NaObjectT_co: ... @property def coerce(self) -> L[True]: ... - na_object: ClassVar[MemberDescriptorType] # does not get instantiated # - def __new__(cls, /) -> StringDType: ... - def __getitem__(self, key: Any, /) -> NoReturn: ... - @property - def base(self) -> StringDType: ... + @overload + def __new__(cls, /, *, coerce: bool = True) -> Self: ... + @overload + def __new__(cls, /, *, na_object: _NaObjectT_co, coerce: bool = True) -> Self: ... + + # + def __getitem__(self, key: Never, /) -> NoReturn: ... # type: ignore[override] # pyright: ignore[reportIncompatibleMethodOverride] @property def fields(self) -> None: ... @property - def hasobject(self) -> L[True]: ... + def base(self) -> Self: ... @property - def isalignedstruct(self) -> L[False]: ... + def ndim(self) -> L[0]: ... @property - def isnative(self) -> L[True]: ... + def shape(self) -> tuple[()]: ... + + # @property def name(self) -> L["StringDType64", "StringDType128"]: ... @property - def ndim(self) -> L[0]: ... + def subdtype(self) -> None: ... @property - def shape(self) -> tuple[()]: ... + def type(self) -> type[str]: ... @property def str(self) -> L["|T8", "|T16"]: ... + + # @property - def subdtype(self) -> None: ... + def hasobject(self) -> L[True]: ... @property - def type(self) -> type[str]: ... # type: ignore[valid-type] + def isalignedstruct(self) -> L[False]: ... + @property + def isnative(self) -> L[True]: ... diff --git a/numpy/f2py/auxfuncs.py b/numpy/f2py/auxfuncs.py index 1adc2d6228c4..3c1b4500793b 100644 --- a/numpy/f2py/auxfuncs.py +++ b/numpy/f2py/auxfuncs.py @@ -923,7 +923,7 @@ def getuseblocks(pymod): all_uses.extend([x for x in modblock.get("use").keys() if "__" not in x]) return all_uses -def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose = False): +def process_f2cmap_dict(f2cmap_all, new_map, c2py_map, verbose=False): """ Update the Fortran-to-C type mapping dictionary with new mappings and return a list of successfully mapped C types. diff --git a/numpy/f2py/diagnose.py b/numpy/f2py/diagnose.py index cbcaa9eb2931..7eb1697cc787 100644 --- a/numpy/f2py/diagnose.py +++ b/numpy/f2py/diagnose.py @@ -4,12 +4,6 @@ import tempfile -def run_command(cmd): - print(f'Running {cmd!r}:') - os.system(cmd) - print('------') - - def run(): _path = os.getcwd() os.chdir(tempfile.gettempdir()) diff --git a/numpy/f2py/tests/test_array_from_pyobj.py b/numpy/f2py/tests/test_array_from_pyobj.py index e0e366fb94a2..9bdd91f47638 100644 --- a/numpy/f2py/tests/test_array_from_pyobj.py +++ b/numpy/f2py/tests/test_array_from_pyobj.py @@ -33,7 +33,7 @@ def setup_module(): src = [ get_testdir() / "wrapmodule.c", ] - wrap = util.build_meson(src, module_name = "test_array_from_pyobj_ext") + wrap = util.build_meson(src, module_name="test_array_from_pyobj_ext") def flags_info(arr): diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py index 7917d2fb6b7b..bf994ffa07a5 100644 --- a/numpy/f2py/tests/test_regression.py +++ b/numpy/f2py/tests/test_regression.py @@ -53,7 +53,7 @@ def ubound(xl, xh): return xh - xl + 1 rval = self.module.foo(is_=xlow, ie_=xhigh, arr=xvec[:ubound(xlow, xhigh)]) - expval = np.arange(11, dtype = np.float32) + expval = np.arange(11, dtype=np.float32) assert np.allclose(rval, expval) diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py index dbb7416b7765..ab2a1b6f8710 100644 --- a/numpy/f2py/tests/util.py +++ b/numpy/f2py/tests/util.py @@ -384,7 +384,7 @@ def setup_method(self): if self.module is not None: return - codes = self.sources if self.sources else [] + codes = self.sources or [] if self.code: codes.append(self.suffix) diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py index 1babc722b036..5dafb0ee3843 100644 --- a/numpy/lib/_datasource.py +++ b/numpy/lib/_datasource.py @@ -36,7 +36,7 @@ """ import os -from .._utils import set_module +from numpy._utils import set_module _open = open diff --git a/numpy/lib/_function_base_impl.py b/numpy/lib/_function_base_impl.py index 33bc64511009..e44b27a68adb 100644 --- a/numpy/lib/_function_base_impl.py +++ b/numpy/lib/_function_base_impl.py @@ -2434,8 +2434,8 @@ def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, cache=False, signature=None): if (pyfunc != np._NoValue) and (not callable(pyfunc)): - #Splitting the error message to keep - #the length below 79 characters. + # Splitting the error message to keep + # the length below 79 characters. part1 = "When used as a decorator, " part2 = "only accepts keyword arguments." raise TypeError(part1 + part2) diff --git a/numpy/lib/_index_tricks_impl.py b/numpy/lib/_index_tricks_impl.py index 77aa1394fa0e..7fe0539fa86d 100644 --- a/numpy/lib/_index_tricks_impl.py +++ b/numpy/lib/_index_tricks_impl.py @@ -4,7 +4,7 @@ import warnings import numpy as np -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as _nx from numpy._core.numeric import ScalarType, array from numpy._core.numerictypes import issubdtype diff --git a/numpy/lib/_polynomial_impl.py b/numpy/lib/_polynomial_impl.py index 91cf7405a1cc..a1d21c624c57 100644 --- a/numpy/lib/_polynomial_impl.py +++ b/numpy/lib/_polynomial_impl.py @@ -10,7 +10,7 @@ import re import warnings -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as NX from numpy._core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array, diff --git a/numpy/lib/_type_check_impl.py b/numpy/lib/_type_check_impl.py index 7b0c245335a4..671f27adc0d7 100644 --- a/numpy/lib/_type_check_impl.py +++ b/numpy/lib/_type_check_impl.py @@ -8,7 +8,7 @@ 'typename', 'mintypecode', 'common_type'] -from .._utils import set_module +from numpy._utils import set_module import numpy._core.numeric as _nx from numpy._core.numeric import asarray, asanyarray, isnan, zeros from numpy._core import overrides, getlimits diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py index dd6f4fc9c765..a0f9bdd9ebfe 100644 --- a/numpy/lib/tests/test_format.py +++ b/numpy/lib/tests/test_format.py @@ -396,7 +396,7 @@ ] -#BytesIO that reads a random number of bytes at a time +# BytesIO that reads a random number of bytes at a time class BytesIOSRandomSize(BytesIO): def read(self, size=None): import random @@ -423,7 +423,7 @@ def roundtrip_randsize(arr): def roundtrip_truncated(arr): f = BytesIO() format.write_array(f, arr) - #BytesIO is one byte short + # BytesIO is one byte short f2 = BytesIO(f.getvalue()[0:-1]) arr2 = format.read_array(f2) return arr2 @@ -456,14 +456,14 @@ def test_file_truncated(tmp_path): if arr.dtype != object: with open(path, 'wb') as f: format.write_array(f, arr) - #truncate the file by one byte + # truncate the file by one byte with open(path, 'rb+') as f: f.seek(-1, os.SEEK_END) f.truncate() with open(path, 'rb') as f: with pytest.raises( ValueError, - match = ( + match=( r"EOF: reading array header, " r"expected (\d+) bytes got (\d+)" ) if arr.size == 0 else ( diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py index 653a0f068372..7329287721c4 100644 --- a/numpy/lib/tests/test_function_base.py +++ b/numpy/lib/tests/test_function_base.py @@ -1915,7 +1915,7 @@ class subclass(np.ndarray): assert_equal(r, m * v) def test_name(self): - #See gh-23021 + # gh-23021 @np.vectorize def f2(a, b): return a + b @@ -1962,7 +1962,7 @@ def f(x): def test_bad_input(self): with assert_raises(TypeError): - A = np.vectorize(pyfunc = 3) + A = np.vectorize(pyfunc=3) def test_no_keywords(self): with assert_raises(TypeError): @@ -2413,7 +2413,7 @@ class TestCorrCoef: def test_non_array(self): assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), - [[1., -1.], [-1., 1.]]) + [[1., -1.], [-1., 1.]]) def test_simple(self): tgt1 = corrcoef(self.A) @@ -3401,10 +3401,10 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) - r1 = np.array([1.5, 5.5, 9.5]) + r1 = np.array([1.5, 5.5, 9.5]) assert_almost_equal(np.percentile(x, 50, axis=1), r1) assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) @@ -3422,11 +3422,11 @@ def test_scalar_q(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, method='lower'), 5.) assert_(np.isscalar(np.percentile(x, 50))) - r0 = np.array([4., 5., 6., 7.]) + r0 = np.array([4., 5., 6., 7.]) c0 = np.percentile(x, 50, method='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) - r1 = np.array([1., 5., 9.]) + r1 = np.array([1., 5., 9.]) c1 = np.percentile(x, 50, method='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) @@ -3496,18 +3496,18 @@ def test_percentile_out(self, percentile, with_weights): percentile(x, (25, 50), axis=0, out=out, weights=weights), r0 ) assert_equal(out, r0) - r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) + r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) out = np.empty((2, 3)) assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) assert_equal(out, r1) # q.dim > 1, int - r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) + r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) - r1 = np.array([[0, 4, 8], [1, 5, 9]]) + r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) c = np.percentile(x, (25, 50), method='lower', axis=1, out=out) assert_equal(c, r1) @@ -3583,7 +3583,7 @@ def test_extended_axis(self): d = np.arange(3 * 5 * 7 * 11).reshape((3, 5, 7, 11)) np.random.shuffle(d.ravel()) - assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], + assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], np.percentile(d[:, :, :, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], np.percentile(d[:, :, 1, :].flatten(), [10, 90])) @@ -3857,7 +3857,7 @@ def test_fraction(self): assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2)) def test_complex(self): - #See gh-22652 + # gh-22652 arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='G') assert_raises(TypeError, np.quantile, arr_c, 0.5) arr_c = np.array([0.5 + 3.0j, 2.1 + 0.5j, 1.6 + 2.3j], dtype='D') @@ -3887,8 +3887,8 @@ def test_quantile_preserve_int_type(self, dtype): def test_q_zero_one(self, method): # gh-24710 arr = [10, 11, 12] - quantile = np.quantile(arr, q = [0, 1], method=method) - assert_equal(quantile, np.array([10, 12])) + quantile = np.quantile(arr, q=[0, 1], method=method) + assert_equal(quantile, np.array([10, 12])) @pytest.mark.parametrize("method", quantile_methods) def test_quantile_monotonic(self, method): @@ -4163,10 +4163,10 @@ class TestLerp: min_value=0, max_value=1), t1=st.floats(allow_nan=False, allow_infinity=False, min_value=0, max_value=1), - a = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300), - b = st.floats(allow_nan=False, allow_infinity=False, - min_value=-1e300, max_value=1e300)) + a=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300), + b=st.floats(allow_nan=False, allow_infinity=False, + min_value=-1e300, max_value=1e300)) def test_linear_interpolation_formula_monotonic(self, t0, t1, a, b): l0 = nfb._lerp(a, b, t0) l1 = nfb._lerp(a, b, t1) @@ -4217,7 +4217,7 @@ def test_basic(self): assert_equal(np.median(a0), 1) assert_allclose(np.median(a1), 0.5) assert_allclose(np.median(a2), 2.5) - assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) + assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) assert_equal(np.median(a2, axis=1), [1, 4]) assert_allclose(np.median(a2, axis=None), 2.5) @@ -4244,8 +4244,8 @@ def test_axis_keyword(self): np.median(a, axis=ax) assert_array_equal(a, orig) - assert_allclose(np.median(a3, axis=0), [3, 4]) - assert_allclose(np.median(a3.T, axis=1), [3, 4]) + assert_allclose(np.median(a3, axis=0), [3, 4]) + assert_allclose(np.median(a3.T, axis=1), [3, 4]) assert_allclose(np.median(a3), 3.5) assert_allclose(np.median(a3, axis=None), 3.5) assert_allclose(np.median(a3.T), 3.5) @@ -4261,16 +4261,16 @@ def test_overwrite_keyword(self): assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) - assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), - [1.5, 2.5, 3.5]) + assert_allclose( + np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) assert_allclose( - np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) - assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), - [3, 4]) + np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) + assert_allclose( + np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) np.random.shuffle(a4.ravel()) diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py index 49ec7c34456f..bfb0248ebdcf 100644 --- a/numpy/lib/tests/test_histograms.py +++ b/numpy/lib/tests/test_histograms.py @@ -588,7 +588,7 @@ def test_simple_range(self): x3 = np.linspace(-100, -50, testlen) x = np.hstack((x1, x2, x3)) for estimator, numbins in expectedResults.items(): - a, b = np.histogram(x, estimator, range = (-20, 20)) + a, b = np.histogram(x, estimator, range=(-20, 20)) msg = f"For the {estimator} estimator" msg += f" with datasize of {testlen}" assert_equal(len(a), numbins, err_msg=msg) diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py index bf249100d17b..d17bd9e6259b 100644 --- a/numpy/lib/tests/test_index_tricks.py +++ b/numpy/lib/tests/test_index_tricks.py @@ -151,7 +151,7 @@ def test_clipmodes(self): ValueError, np.ravel_multi_index, [5, 1, -1, 2], (4, 3, 7, 12)) def test_writeability(self): - # See gh-7269 + # gh-7269 x, y = np.unravel_index([1, 2, 3], (4, 5)) assert_(x.flags.writeable) assert_(y.flags.writeable) diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 3276584779a7..6939e5ceffac 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2359,7 +2359,7 @@ def test_recfromcsv(self): assert_(isinstance(test, np.recarray)) assert_equal(test, control) - #gh-10394 + # gh-10394 data = TextIO('color\n"red"\n"blue"') test = recfromcsv(data, converters={0: lambda x: x.strip('\"')}) control = np.array([('red',), ('blue',)], dtype=[('color', (str, 4))]) diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py index 0a91b941526e..bf432348cb36 100644 --- a/numpy/lib/tests/test_polynomial.py +++ b/numpy/lib/tests/test_polynomial.py @@ -1,4 +1,5 @@ import numpy as np +import numpy.polynomial.polynomial as poly from numpy.testing import ( assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose @@ -121,6 +122,17 @@ def test_poly(self): def test_roots(self): assert_array_equal(np.roots([1, 0, 0]), [0, 0]) + # Testing for larger root values + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1.01, i]) + res = np.sort(np.roots(poly.polyfromroots(tgt)[::-1])) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + def test_str_leading_zeros(self): p = np.poly1d([4, 3, 2, 1]) p[3] = 0 diff --git a/numpy/linalg/__init__.pyi b/numpy/linalg/__init__.pyi index 119ca0d0683d..16c8048c1a11 100644 --- a/numpy/linalg/__init__.pyi +++ b/numpy/linalg/__init__.pyi @@ -1,70 +1,73 @@ -from numpy._core.fromnumeric import matrix_transpose -from numpy._core.numeric import tensordot, vecdot - +from . import _linalg as _linalg +from . import _umath_linalg as _umath_linalg +from . import linalg as linalg from ._linalg import ( - matrix_power, - solve, - tensorsolve, - tensorinv, - inv, cholesky, - outer, - eigvals, - eigvalsh, - pinv, - slogdet, + cond, + cross, det, - svd, - svdvals, + diagonal, eig, eigh, + eigvals, + eigvalsh, + inv, lstsq, - norm, + matmul, matrix_norm, - vector_norm, - qr, - cond, + matrix_power, matrix_rank, + matrix_transpose, multi_dot, - matmul, + norm, + outer, + pinv, + qr, + slogdet, + solve, + svd, + svdvals, + tensordot, + tensorinv, + tensorsolve, trace, - diagonal, - cross, + vecdot, + vector_norm, ) __all__ = [ - "matrix_power", - "solve", - "tensorsolve", - "tensorinv", - "inv", + "LinAlgError", "cholesky", - "eigvals", - "eigvalsh", - "pinv", - "slogdet", + "cond", + "cross", "det", - "svd", - "svdvals", + "diagonal", "eig", "eigh", + "eigvals", + "eigvalsh", + "inv", "lstsq", - "norm", - "qr", - "cond", + "matmul", + "matrix_norm", + "matrix_power", "matrix_rank", - "LinAlgError", + "matrix_transpose", "multi_dot", - "trace", - "diagonal", - "cross", + "norm", "outer", + "pinv", + "qr", + "slogdet", + "solve", + "svd", + "svdvals", "tensordot", - "matmul", - "matrix_transpose", - "matrix_norm", - "vector_norm", + "tensorinv", + "tensorsolve", + "trace", "vecdot", + "vector_norm", ] class LinAlgError(ValueError): ... diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py index 65231aed7998..1d903bd6409d 100644 --- a/numpy/linalg/lapack_lite/clapack_scrub.py +++ b/numpy/linalg/lapack_lite/clapack_scrub.py @@ -79,19 +79,19 @@ def endArgs(self, text): keep_ftnlen = (Str('ilaenv_') | Str('iparmq_') | Str('s_rnge')) + Str('(') lexicon = Lexicon([ - (iofunctions, TEXT), - (keep_ftnlen, beginArgs), + (iofunctions, TEXT), + (keep_ftnlen, beginArgs), State('args', [ (Str(')'), endArgs), (Str('('), beginArgs), (AnyChar, TEXT), ]), - (cS + Re(r'[1-9][0-9]*L'), IGNORE), - (cS + Str('ftnlen') + Opt(S + len_), IGNORE), - (cS + sep_seq(['(', 'ftnlen', ')'], S) + S + digits, IGNORE), - (Bol + Str('ftnlen ') + len_ + Str(';\n'), IGNORE), - (cS + len_, TEXT), - (AnyChar, TEXT), + (cS + Re(r'[1-9][0-9]*L'), IGNORE), + (cS + Str('ftnlen') + Opt(S + len_), IGNORE), + (cS + sep_seq(['(', 'ftnlen', ')'], S) + S + digits, IGNORE), + (Bol + Str('ftnlen ') + len_ + Str(';\n'), IGNORE), + (cS + len_, TEXT), + (AnyChar, TEXT), ]) def scrubFtnlen(source): diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py index 1a79629814e9..b47bb180a486 100644 --- a/numpy/linalg/tests/test_linalg.py +++ b/numpy/linalg/tests/test_linalg.py @@ -1034,7 +1034,7 @@ class TestMatrixPower: rshft_all = [rshft_0, rshft_1, rshft_2, rshft_3] noninv = array([[1, 0], [0, 0]]) stacked = np.block([[[rshft_0]]] * 2) - #FIXME the 'e' dtype might work in future + # FIXME the 'e' dtype might work in future dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')] def test_large_power(self, dt): diff --git a/numpy/linalg/tests/test_regression.py b/numpy/linalg/tests/test_regression.py index 3949b0b9e66f..e8159fd570bf 100644 --- a/numpy/linalg/tests/test_regression.py +++ b/numpy/linalg/tests/test_regression.py @@ -40,9 +40,9 @@ def test_eigh_build(self): # Ticket 662. rvals = [68.60568999, 89.57756725, 106.67185574] - cov = array([[77.70273908, 3.51489954, 15.64602427], - [3.51489954, 88.97013878, -1.07431931], - [15.64602427, -1.07431931, 98.18223512]]) + cov = array([[77.70273908, 3.51489954, 15.64602427], + [ 3.51489954, 88.97013878, -1.07431931], + [15.64602427, -1.07431931, 98.18223512]]) vals, vecs = linalg.eigh(cov) assert_array_almost_equal(vals, rvals) @@ -64,8 +64,8 @@ def test_norm_vector_badarg(self): def test_lapack_endian(self): # For bug #1482 - a = array([[5.7998084, -2.1825367], - [-2.1825367, 9.85910595]], dtype='>f8') + a = array([[ 5.7998084, -2.1825367], + [-2.1825367, 9.85910595]], dtype='>f8') b = array(a, dtype=' _DTypeT_co: ... - @dtype.setter - def dtype(self: MaskedArray[Any, _DTypeT], dtype: _DTypeT, /) -> None: ... - @property def shape(self) -> _ShapeT_co: ... @shape.setter def shape(self: MaskedArray[_ShapeT, Any], shape: _ShapeT, /) -> None: ... - def __setmask__(self, mask, copy=...): ... + def __setmask__(self, mask: _ArrayLikeBool_co, copy: bool = False) -> None: ... @property - def mask(self): ... + def mask(self) -> NDArray[MaskType] | MaskType: ... @mask.setter - def mask(self, value): ... + def mask(self, value: _ArrayLikeBool_co, /) -> None: ... @property def recordmask(self): ... @recordmask.setter def recordmask(self, mask): ... - def harden_mask(self): ... - def soften_mask(self): ... + def harden_mask(self) -> Self: ... + def soften_mask(self) -> Self: ... @property - def hardmask(self): ... - def unshare_mask(self): ... + def hardmask(self) -> bool: ... + def unshare_mask(self) -> Self: ... @property - def sharedmask(self): ... - def shrink_mask(self): ... + def sharedmask(self) -> bool: ... + def shrink_mask(self) -> Self: ... @property - def baseclass(self): ... + def baseclass(self) -> type[NDArray[Any]]: ... data: Any @property def flat(self): ... @@ -457,8 +455,8 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __rtruediv__(self, other): ... def __floordiv__(self, other): ... def __rfloordiv__(self, other): ... - def __pow__(self, other): ... - def __rpow__(self, other): ... + def __pow__(self, other, mod: None = None, /): ... + def __rpow__(self, other, mod: None = None, /): ... def __iadd__(self, other): ... def __isub__(self, other): ... def __imul__(self, other): ... @@ -467,10 +465,10 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __itruediv__(self, other): ... def __ipow__(self, other): ... @property # type: ignore[misc] - def imag(self): ... + def imag(self: _HasDTypeWithRealAndImag[object, _ScalarT], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... get_imag: Any @property # type: ignore[misc] - def real(self): ... + def real(self: _HasDTypeWithRealAndImag[_ScalarT, object], /) -> MaskedArray[_ShapeT_co, dtype[_ScalarT]]: ... get_real: Any # keep in sync with `np.ma.count` @@ -483,15 +481,104 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): @overload def count(self, axis: _ShapeLike | None, keepdims: Literal[True]) -> NDArray[int_]: ... - def ravel(self, order=...): ... + def ravel(self, order: _OrderKACF = "C") -> MaskedArray[tuple[int], _DTypeT_co]: ... def reshape(self, *s, **kwargs): ... def resize(self, newshape, refcheck=..., order=...): ... def put(self, indices: _ArrayLikeInt_co, values: ArrayLike, mode: _ModeKind = "raise") -> None: ... def ids(self) -> tuple[int, int]: ... def iscontiguous(self) -> bool: ... - def all(self, axis=..., out=..., keepdims=...): ... - def any(self, axis=..., out=..., keepdims=...): ... - def nonzero(self): ... + + @overload + def all( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def all( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def all( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + @overload + def any( + self, + axis: None = None, + out: None = None, + keepdims: Literal[False] | _NoValueType = ..., + ) -> bool_: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + *, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: None, + keepdims: Literal[True], + ) -> _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + out: None = None, + keepdims: bool | _NoValueType = ..., + ) -> bool_ | _MaskedArray[bool_]: ... + @overload + def any( + self, + axis: _ShapeLike | None = None, + *, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + @overload + def any( + self, + axis: _ShapeLike | None, + out: _ArrayT, + keepdims: bool | _NoValueType = ..., + ) -> _ArrayT: ... + + def nonzero(self) -> tuple[_Array1D[intp], *tuple[_Array1D[intp], ...]]: ... def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ... def dot(self, b, out=..., strict=...): ... def sum(self, axis=..., dtype=..., out=..., keepdims=...): ... @@ -777,14 +864,28 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): copy: Any diagonal: Any flatten: Any - repeat: Any + + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: None = None, + ) -> MaskedArray[tuple[int], _DTypeT_co]: ... + @overload + def repeat( + self, + repeats: _ArrayLikeInt_co, + axis: SupportsIndex, + ) -> MaskedArray[_Shape, _DTypeT_co]: ... + squeeze: Any - swapaxes: Any - T: Any - transpose: Any - @property # type: ignore[misc] - def mT(self): ... + def swapaxes( + self, + axis1: SupportsIndex, + axis2: SupportsIndex, + / + ) -> MaskedArray[_Shape, _DTypeT_co]: ... # def toflex(self) -> Incomplete: ... @@ -799,6 +900,12 @@ class MaskedArray(ndarray[_ShapeT_co, _DTypeT_co]): def __reduce__(self): ... def __deepcopy__(self, memo=...): ... + # Keep `dtype` at the bottom to avoid name conflicts with `np.dtype` + @property + def dtype(self) -> _DTypeT_co: ... + @dtype.setter + def dtype(self: MaskedArray[Any, _DTypeT], dtype: _DTypeT, /) -> None: ... + class mvoid(MaskedArray[_ShapeT_co, _DTypeT_co]): def __new__( self, # pyright: ignore[reportSelfClsParameterName] @@ -1166,9 +1273,9 @@ def sort( stable: Literal[False] | None = False, ) -> NDArray[Any]: ... @overload -def compressed(x: _ArrayLike[_ScalarT_co]) -> ndarray[tuple[int], dtype[_ScalarT_co]]: ... +def compressed(x: _ArrayLike[_ScalarT_co]) -> _Array1D[_ScalarT_co]: ... @overload -def compressed(x: ArrayLike) -> ndarray[tuple[int], dtype]: ... +def compressed(x: ArrayLike) -> _Array1D[Any]: ... def concatenate(arrays, axis=...): ... def diag(v, k=...): ... def left_shift(a, n): ... diff --git a/numpy/ma/extras.pyi b/numpy/ma/extras.pyi index ba76f3517526..c3f9fcde4a0a 100644 --- a/numpy/ma/extras.pyi +++ b/numpy/ma/extras.pyi @@ -56,7 +56,7 @@ __all__ = [ ] def count_masked(arr, axis=...): ... -def masked_all(shape, dtype = ...): ... +def masked_all(shape, dtype=...): ... def masked_all_like(arr): ... class _fromnxfunction: @@ -96,8 +96,8 @@ def compress_nd(x, axis=...): ... def compress_rowcols(x, axis=...): ... def compress_rows(a): ... def compress_cols(a): ... -def mask_rows(a, axis = ...): ... -def mask_cols(a, axis = ...): ... +def mask_rows(a, axis=...): ... +def mask_cols(a, axis=...): ... def ediff1d(arr, to_end=..., to_begin=...): ... def unique(ar1, return_index=..., return_inverse=...): ... def intersect1d(ar1, ar2, assume_unique=...): ... @@ -107,7 +107,7 @@ def isin(element, test_elements, assume_unique=..., invert=...): ... def union1d(ar1, ar2): ... def setdiff1d(ar1, ar2, assume_unique=...): ... def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... -def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ... +def corrcoef(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ... class MAxisConcatenator(AxisConcatenator): @staticmethod diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py index 280d94bc0fe8..64404d4dad8c 100644 --- a/numpy/ma/tests/test_core.py +++ b/numpy/ma/tests/test_core.py @@ -856,13 +856,13 @@ def test_fancy_printoptions(self): assert_equal(str(test), control) # Test 0-d array with multi-dimensional dtype - t_2d0 = masked_array(data = (0, [[0.0, 0.0, 0.0], - [0.0, 0.0, 0.0]], - 0.0), - mask = (False, [[True, False, True], - [False, False, True]], - False), - dtype = "int, (2,3)float, float") + t_2d0 = masked_array(data=(0, [[0.0, 0.0, 0.0], + [0.0, 0.0, 0.0]], + 0.0), + mask=(False, [[True, False, True], + [False, False, True]], + False), + dtype="int, (2,3)float, float") control = "(0, [[--, 0.0, --], [0.0, 0.0, --]], 0.0)" assert_equal(str(t_2d0), control) @@ -970,36 +970,36 @@ def test_mvoid_print(self): def test_mvoid_multidim_print(self): # regression test for gh-6019 - t_ma = masked_array(data = [([1, 2, 3],)], - mask = [([False, True, False],)], - fill_value = ([999999, 999999, 999999],), - dtype = [('a', 'ij', y1, y2) res = cheb.chebgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -205,12 +205,12 @@ def test_chebgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = cheb.chebgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = cheb.chebgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) @@ -590,11 +590,11 @@ def test_weight(self): assert_almost_equal(res, tgt) def test_chebpts1(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts1, 1.5) assert_raises(ValueError, cheb.chebpts1, 0) - #test points + # test points tgt = [0] assert_almost_equal(cheb.chebpts1(1), tgt) tgt = [-0.70710678118654746, 0.70710678118654746] @@ -605,11 +605,11 @@ def test_chebpts1(self): assert_almost_equal(cheb.chebpts1(4), tgt) def test_chebpts2(self): - #test exceptions + # test exceptions assert_raises(ValueError, cheb.chebpts2, 1.5) assert_raises(ValueError, cheb.chebpts2, 1) - #test points + # test points tgt = [-1, 1] assert_almost_equal(cheb.chebpts2(2), tgt) tgt = [-1, 0, 1] diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py index 3e2b7c0032c6..2f17091137b9 100644 --- a/numpy/polynomial/tests/test_hermite.py +++ b/numpy/polynomial/tests/test_hermite.py @@ -120,10 +120,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_hermval(self): - #check empty input + # check empty input assert_equal(herm.hermval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Hlist] for i in range(10): @@ -132,7 +132,7 @@ def test_hermval(self): res = herm.hermval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -144,15 +144,15 @@ def test_hermval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = herm.hermval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +161,15 @@ def test_hermval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herm.hermval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = herm.hermval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,12 +178,12 @@ def test_hermgrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herm.hermgrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -192,12 +192,12 @@ def test_hermgrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herm.hermgrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herm.hermgrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py index bd567d513027..ce55e2098b97 100644 --- a/numpy/polynomial/tests/test_hermite_e.py +++ b/numpy/polynomial/tests/test_hermite_e.py @@ -120,10 +120,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_hermeval(self): - #check empty input + # check empty input assert_equal(herme.hermeval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Helist] for i in range(10): @@ -132,7 +132,7 @@ def test_hermeval(self): res = herme.hermeval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -144,15 +144,15 @@ def test_hermeval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = herme.hermeval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -161,15 +161,15 @@ def test_hermeval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, herme.hermeval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = herme.hermeval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermeval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -178,12 +178,12 @@ def test_hermegrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = herme.hermegrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -192,12 +192,12 @@ def test_hermegrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = herme.hermegrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = herme.hermegrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py index f19c4d2fc2aa..1dd1977de684 100644 --- a/numpy/polynomial/tests/test_laguerre.py +++ b/numpy/polynomial/tests/test_laguerre.py @@ -117,10 +117,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_lagval(self): - #check empty input + # check empty input assert_equal(lag.lagval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(7): @@ -129,7 +129,7 @@ def test_lagval(self): res = lag.lagval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -141,15 +141,15 @@ def test_lagval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = lag.lagval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -158,15 +158,15 @@ def test_lagval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, lag.lagval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = lag.lagval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.lagval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -175,12 +175,12 @@ def test_laggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = lag.laggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -189,12 +189,12 @@ def test_laggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = lag.laggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = lag.laggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py index 80b428e31dcc..ee23b4a2527f 100644 --- a/numpy/polynomial/tests/test_legendre.py +++ b/numpy/polynomial/tests/test_legendre.py @@ -121,10 +121,10 @@ class TestEvaluation: y = polyval(x, [1., 2., 3.]) def test_legval(self): - #check empty input + # check empty input assert_equal(leg.legval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [polyval(x, c) for c in Llist] for i in range(10): @@ -133,7 +133,7 @@ def test_legval(self): res = leg.legval(x, [0] * i + [1]) assert_almost_equal(res, tgt, err_msg=msg) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -145,15 +145,15 @@ def test_legval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = leg.legval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -162,15 +162,15 @@ def test_legval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises(ValueError, leg.legval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = leg.legval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.legval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -179,12 +179,12 @@ def test_leggrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = leg.leggrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -193,12 +193,12 @@ def test_leggrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = leg.leggrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = leg.leggrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py index 177bd0893ec9..84ccc45fc4e4 100644 --- a/numpy/polynomial/tests/test_polynomial.py +++ b/numpy/polynomial/tests/test_polynomial.py @@ -155,10 +155,10 @@ class TestEvaluation: y = poly.polyval(x, [1., 2., 3.]) def test_polyval(self): - #check empty input + # check empty input assert_equal(poly.polyval([], [1]).size, 0) - #check normal input) + # check normal input) x = np.linspace(-1, 1) y = [x**i for i in range(5)] for i in range(5): @@ -169,7 +169,7 @@ def test_polyval(self): res = poly.polyval(x, [0, -1, 0, 1]) assert_almost_equal(res, tgt) - #check that shape is preserved + # check that shape is preserved for i in range(3): dims = [2] * i x = np.zeros(dims) @@ -177,13 +177,13 @@ def test_polyval(self): assert_equal(poly.polyval(x, [1, 0]).shape, dims) assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims) - #check masked arrays are processed correctly + # check masked arrays are processed correctly mask = [False, True, False] mx = np.ma.array([1, 2, 3], mask=mask) res = np.polyval([7, 5, 3], mx) assert_array_equal(res.mask, mask) - #check subtypes of ndarray are preserved + # check subtypes of ndarray are preserved class C(np.ndarray): pass @@ -258,16 +258,16 @@ def test_polyval2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval2d, x1, x2[:2], self.c2d) - #test values + # test values tgt = y1 * y2 res = poly.polyval2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval2d(z, z, self.c2d) assert_(res.shape == (2, 3)) @@ -276,16 +276,16 @@ def test_polyval3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test exceptions + # test exceptions assert_raises_regex(ValueError, 'incompatible', poly.polyval3d, x1, x2, x3[:2], self.c3d) - #test values + # test values tgt = y1 * y2 * y3 res = poly.polyval3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polyval3d(z, z, z, self.c3d) assert_(res.shape == (2, 3)) @@ -294,12 +294,12 @@ def test_polygrid2d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j->ij', y1, y2) res = poly.polygrid2d(x1, x2, self.c2d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid2d(z, z, self.c2d) assert_(res.shape == (2, 3) * 2) @@ -308,12 +308,12 @@ def test_polygrid3d(self): x1, x2, x3 = self.x y1, y2, y3 = self.y - #test values + # test values tgt = np.einsum('i,j,k->ijk', y1, y2, y3) res = poly.polygrid3d(x1, x2, x3, self.c3d) assert_almost_equal(res, tgt) - #test shape + # test shape z = np.ones((2, 3)) res = poly.polygrid3d(z, z, z, self.c3d) assert_(res.shape == (2, 3) * 3) @@ -543,6 +543,17 @@ def test_polyroots(self): res = poly.polyroots(poly.polyfromroots(tgt)) assert_almost_equal(trim(res), trim(tgt)) + # Testing for larger root values + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(res, tgt, 15 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + + for i in np.logspace(10, 25, num = 1000, base = 10): + tgt = np.array([-1, 1.01, i]) + res = poly.polyroots(poly.polyfromroots(tgt)) + assert_almost_equal(res, tgt, 14 - int(np.log10(i))) # Adapting the expected precision according to the root value, to take into account numerical calculation error + def test_polyfit(self): def f(x): return x * (x - 1) * (x - 2) diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py index 608f58756289..dad9b10449d6 100644 --- a/numpy/random/tests/test_extending.py +++ b/numpy/random/tests/test_extending.py @@ -54,6 +54,7 @@ ) @pytest.mark.skipif(IS_WASM, reason="Can't start subprocess") @pytest.mark.skipif(cython is None, reason="requires cython") +@pytest.mark.skipif(sysconfig.get_platform() == 'win-arm64', reason='Meson unable to find MSVC linker on win-arm64') @pytest.mark.slow def test_cython(tmp_path): import glob diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py index cdfddfa23abf..f3a71a83a5e7 100644 --- a/numpy/random/tests/test_generator_mt19937.py +++ b/numpy/random/tests/test_generator_mt19937.py @@ -1899,7 +1899,7 @@ def test_normal(self): scale = [1] bad_scale = [-1] random = Generator(MT19937(self.seed)) - desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) + desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097]) random = Generator(MT19937(self.seed)) actual = random.normal(loc * 3, scale) diff --git a/numpy/testing/_private/extbuild.py b/numpy/testing/_private/extbuild.py index af403bf7f8f4..f81184e9af1e 100644 --- a/numpy/testing/_private/extbuild.py +++ b/numpy/testing/_private/extbuild.py @@ -104,9 +104,9 @@ def compile_extension_module( dirname = builddir / name dirname.mkdir(exist_ok=True) cfile = _convert_str_to_file(source_string, dirname) - include_dirs = include_dirs if include_dirs else [] - libraries = libraries if libraries else [] - library_dirs = library_dirs if library_dirs else [] + include_dirs = include_dirs or [] + libraries = libraries or [] + library_dirs = library_dirs or [] return _c_compile( cfile, outputfilename=dirname / modname, diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py index 0074d77bce0c..5cbb5130dc1f 100644 --- a/numpy/testing/_private/utils.py +++ b/numpy/testing/_private/utils.py @@ -136,7 +136,7 @@ def GetPerformanceAttributes(object, counter, instance=None, # you should copy this function, but keep the counter open, and call # CollectQueryData() each time you need to know. # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp - #(dead link) + # (dead link) # My older explanation for this was that the "AddCounter" process # forced the CPU to 100%, but the above makes more sense :) import win32pdh @@ -2081,7 +2081,7 @@ def _gen_alignment_data(dtype=float32, type='binary', max_size=24): inp1 = lambda: arange(s, dtype=dtype)[o:] inp2 = lambda: arange(s, dtype=dtype)[o:] out = empty((s,), dtype=dtype)[o:] - yield out, inp1(), inp2(), bfmt % \ + yield out, inp1(), inp2(), bfmt % \ (o, o, o, s, dtype, 'out of place') d = inp1() yield d, d, inp2(), bfmt % \ diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py index f3652e387f3c..fa15eb642a81 100644 --- a/numpy/tests/test_scripts.py +++ b/numpy/tests/test_scripts.py @@ -11,7 +11,7 @@ import numpy as np from numpy.testing import assert_equal, IS_WASM -is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) +is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py')) def find_f2py_commands(): diff --git a/numpy/typing/__init__.py b/numpy/typing/__init__.py index 8a4a974a9928..2c75c348667e 100644 --- a/numpy/typing/__init__.py +++ b/numpy/typing/__init__.py @@ -155,15 +155,40 @@ # NOTE: The API section will be appended with additional entries # further down in this file -from numpy._typing import ( - ArrayLike, - DTypeLike, - NBitBase, - NDArray, -) +# pyright: reportDeprecated=false + +from numpy._typing import ArrayLike, DTypeLike, NBitBase, NDArray __all__ = ["ArrayLike", "DTypeLike", "NBitBase", "NDArray"] + +__DIR = __all__ + [k for k in globals() if k.startswith("__") and k.endswith("__")] +__DIR_SET = frozenset(__DIR) + + +def __dir__() -> list[str]: + return __DIR + +def __getattr__(name: str): + if name == "NBitBase": + import warnings + + # Deprecated in NumPy 2.3, 2025-05-01 + warnings.warn( + "`NBitBase` is deprecated and will be removed from numpy.typing in the " + "future. Use `@typing.overload` or a `TypeVar` with a scalar-type as upper " + "bound, instead. (deprecated in NumPy 2.3)", + DeprecationWarning, + stacklevel=2, + ) + return NBitBase + + if name in __DIR_SET: + return globals()[name] + + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") + + if __doc__ is not None: from numpy._typing._add_docstring import _docstrings __doc__ += _docstrings diff --git a/numpy/typing/mypy_plugin.py b/numpy/typing/mypy_plugin.py index 81ccf0b64fc1..5c01f261bb79 100644 --- a/numpy/typing/mypy_plugin.py +++ b/numpy/typing/mypy_plugin.py @@ -65,18 +65,10 @@ def _get_precision_dict() -> dict[str, str]: def _get_extended_precision_list() -> list[str]: extended_names = [ - "uint128", - "uint256", - "int128", - "int256", - "float80", "float96", "float128", - "float256", - "complex160", "complex192", "complex256", - "complex512", ] return [i for i in extended_names if hasattr(np, i)] @@ -169,8 +161,7 @@ def get_additional_deps( """Handle all import-based overrides. * Import platform-specific extended-precision `numpy.number` - subclasses (*e.g.* `numpy.float96`, `numpy.float128` and - `numpy.complex256`). + subclasses (*e.g.* `numpy.float96` and `numpy.float128`). * Import the appropriate `ctypes` equivalent to `numpy.intp`. """ diff --git a/numpy/typing/tests/data/fail/array_like.pyi b/numpy/typing/tests/data/fail/array_like.pyi index 53f0c1ec85e9..6b6c16dd6e70 100644 --- a/numpy/typing/tests/data/fail/array_like.pyi +++ b/numpy/typing/tests/data/fail/array_like.pyi @@ -11,3 +11,5 @@ scalar = np.int64(1) scalar.__array__(dtype=np.float64) # E: No overload variant array = np.array([1]) array.__array__(dtype=np.float64) # E: No overload variant + +array.setfield(np.eye(1), np.int32, (0, 1)) # E: No overload variant diff --git a/numpy/typing/tests/data/fail/ma.pyi b/numpy/typing/tests/data/fail/ma.pyi index a4f2517fd4a6..e93be464de10 100644 --- a/numpy/typing/tests/data/fail/ma.pyi +++ b/numpy/typing/tests/data/fail/ma.pyi @@ -63,6 +63,14 @@ np.ma.argmax(m, keepdims=1.0) # E: No overload variant np.ma.argmax(m, out=1.0) # E: No overload variant np.ma.argmax(m, fill_value=lambda x: 27) # E: No overload variant +m.all(axis=1.0) # E: No overload variant +m.all(keepdims=1.0) # E: No overload variant +m.all(out=1.0) # E: No overload variant + +m.any(axis=1.0) # E: No overload variant +m.any(keepdims=1.0) # E: No overload variant +m.any(out=1.0) # E: No overload variant + m.sort(axis=(0,1)) # E: No overload variant m.sort(axis=None) # E: No overload variant m.sort(kind='cabbage') # E: No overload variant @@ -122,3 +130,7 @@ np.ma.allequal(m, [1,2,3], fill_value=1.5) # E: No overload variant np.ma.allclose(m, [1,2,3], masked_equal=4.5) # E: No overload variant np.ma.allclose(m, [1,2,3], rtol='.4') # E: No overload variant np.ma.allclose(m, [1,2,3], atol='.5') # E: No overload variant + +m.__setmask__('mask') # E: No overload variant + +m.swapaxes(axis1=1, axis2=0) # E: No overload variant diff --git a/numpy/typing/tests/data/misc/extended_precision.pyi b/numpy/typing/tests/data/misc/extended_precision.pyi index 02dfcec6a8e2..84b5f516bdde 100644 --- a/numpy/typing/tests/data/misc/extended_precision.pyi +++ b/numpy/typing/tests/data/misc/extended_precision.pyi @@ -1,20 +1,9 @@ import numpy as np -from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit +from numpy._typing import _96Bit, _128Bit from typing import assert_type -assert_type(np.uint128(), np.unsignedinteger[_128Bit]) -assert_type(np.uint256(), np.unsignedinteger[_256Bit]) - -assert_type(np.int128(), np.signedinteger[_128Bit]) -assert_type(np.int256(), np.signedinteger[_256Bit]) - -assert_type(np.float80(), np.floating[_80Bit]) assert_type(np.float96(), np.floating[_96Bit]) assert_type(np.float128(), np.floating[_128Bit]) -assert_type(np.float256(), np.floating[_256Bit]) - -assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit]) assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit]) assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit]) -assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit]) diff --git a/numpy/typing/tests/data/pass/ma.py b/numpy/typing/tests/data/pass/ma.py index 7a4e9909e334..abd1a0103005 100644 --- a/numpy/typing/tests/data/pass/ma.py +++ b/numpy/typing/tests/data/pass/ma.py @@ -2,7 +2,11 @@ import numpy as np import numpy.ma +import numpy.typing as npt - +ar_b: npt.NDArray[np.bool] = np.array([True, False, True]) m: np.ma.MaskedArray[Any, np.dtype[np.float64]] = np.ma.masked_array([1.5, 2, 3], mask=[True, False, True]) +m.mask = ar_b +m.mask = np.False_ + diff --git a/numpy/typing/tests/data/reveal/array_constructors.pyi b/numpy/typing/tests/data/reveal/array_constructors.pyi index 07610d982751..2f32579c0816 100644 --- a/numpy/typing/tests/data/reveal/array_constructors.pyi +++ b/numpy/typing/tests/data/reveal/array_constructors.pyi @@ -132,21 +132,21 @@ assert_type(np.require(B, requirements="W"), SubClass[np.float64]) assert_type(np.require(B, requirements="A"), SubClass[np.float64]) assert_type(np.require(C), npt.NDArray[Any]) -assert_type(np.linspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.linspace(0, 10), npt.NDArray[np.float64]) assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating], np.floating]) +assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.float64], np.float64]) assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating], np.complexfloating]) assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64]) assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any]) -assert_type(np.logspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.logspace(0, 10), npt.NDArray[np.float64]) assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any]) -assert_type(np.geomspace(0, 10), npt.NDArray[np.floating]) +assert_type(np.geomspace(0, 10), npt.NDArray[np.float64]) assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating]) assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64]) assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any]) diff --git a/numpy/typing/tests/data/reveal/fromnumeric.pyi b/numpy/typing/tests/data/reveal/fromnumeric.pyi index ee761a2762b7..0827b27a056b 100644 --- a/numpy/typing/tests/data/reveal/fromnumeric.pyi +++ b/numpy/typing/tests/data/reveal/fromnumeric.pyi @@ -46,11 +46,13 @@ assert_type(np.choose([1], [True, True]), npt.NDArray[Any]) assert_type(np.choose([1], AR_b), npt.NDArray[np.bool]) assert_type(np.choose([1], AR_b, out=AR_f4), npt.NDArray[np.float32]) -assert_type(np.repeat(b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(f4, 1), npt.NDArray[np.float32]) -assert_type(np.repeat(f, 1), npt.NDArray[Any]) -assert_type(np.repeat(AR_b, 1), npt.NDArray[np.bool]) -assert_type(np.repeat(AR_f4, 1), npt.NDArray[np.float32]) +assert_type(np.repeat(b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(b, 1, axis=0), npt.NDArray[np.bool]) +assert_type(np.repeat(f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(f, 1), np.ndarray[tuple[int], np.dtype[Any]]) +assert_type(np.repeat(AR_b, 1), np.ndarray[tuple[int], np.dtype[np.bool]]) +assert_type(np.repeat(AR_f4, 1), np.ndarray[tuple[int], np.dtype[np.float32]]) +assert_type(np.repeat(AR_f4, 1, axis=0), npt.NDArray[np.float32]) # TODO: array_bdd tests for np.put() diff --git a/numpy/typing/tests/data/reveal/ma.pyi b/numpy/typing/tests/data/reveal/ma.pyi index 5c1b04a03718..e80426efc03e 100644 --- a/numpy/typing/tests/data/reveal/ma.pyi +++ b/numpy/typing/tests/data/reveal/ma.pyi @@ -5,10 +5,11 @@ import numpy as np from numpy import dtype, generic from numpy._typing import NDArray, _Shape -_ScalarT_co = TypeVar("_ScalarT_co", bound=generic, covariant=True) -MaskedNDArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT_co]] +_ScalarT = TypeVar("_ScalarT", bound=generic) +MaskedArray: TypeAlias = np.ma.MaskedArray[_Shape, dtype[_ScalarT]] +_Array1D: TypeAlias = np.ndarray[tuple[int], np.dtype[_ScalarT]] -class MaskedNDArraySubclass(MaskedNDArray[np.complex128]): ... +class MaskedArraySubclass(MaskedArray[np.complex128]): ... AR_b: NDArray[np.bool] AR_f4: NDArray[np.float32] @@ -16,18 +17,19 @@ AR_dt64: NDArray[np.datetime64] AR_td64: NDArray[np.timedelta64] AR_o: NDArray[np.timedelta64] -MAR_b: MaskedNDArray[np.bool] -MAR_f4: MaskedNDArray[np.float32] -MAR_f8: MaskedNDArray[np.float64] -MAR_i8: MaskedNDArray[np.int64] -MAR_dt64: MaskedNDArray[np.datetime64] -MAR_td64: MaskedNDArray[np.timedelta64] -MAR_o: MaskedNDArray[np.object_] -MAR_s: MaskedNDArray[np.str_] -MAR_byte: MaskedNDArray[np.bytes_] -MAR_V: MaskedNDArray[np.void] +MAR_c16: MaskedArray[np.complex128] +MAR_b: MaskedArray[np.bool] +MAR_f4: MaskedArray[np.float32] +MAR_f8: MaskedArray[np.float64] +MAR_i8: MaskedArray[np.int64] +MAR_dt64: MaskedArray[np.datetime64] +MAR_td64: MaskedArray[np.timedelta64] +MAR_o: MaskedArray[np.object_] +MAR_s: MaskedArray[np.str_] +MAR_byte: MaskedArray[np.bytes_] +MAR_V: MaskedArray[np.void] -MAR_subclass: MaskedNDArraySubclass +MAR_subclass: MaskedArraySubclass MAR_1d: np.ma.MaskedArray[tuple[int], np.dtype] MAR_2d_f4: np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]] @@ -49,9 +51,9 @@ assert_type(np.ma.min(MAR_b, axis=0), Any) assert_type(np.ma.min(MAR_f4, axis=0), Any) assert_type(np.ma.min(MAR_b, keepdims=True), Any) assert_type(np.ma.min(MAR_f4, keepdims=True), Any) -assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.min(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.min(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.min(), np.bool) assert_type(MAR_f4.min(), np.float32) @@ -59,9 +61,9 @@ assert_type(MAR_b.min(axis=0), Any) assert_type(MAR_f4.min(axis=0), Any) assert_type(MAR_b.min(keepdims=True), Any) assert_type(MAR_f4.min(keepdims=True), Any) -assert_type(MAR_f4.min(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.min(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.min(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.min(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.min(None, MAR_subclass), MaskedArraySubclass) assert_type(np.ma.max(MAR_b), np.bool) assert_type(np.ma.max(MAR_f4), np.float32) @@ -69,9 +71,9 @@ assert_type(np.ma.max(MAR_b, axis=0), Any) assert_type(np.ma.max(MAR_f4, axis=0), Any) assert_type(np.ma.max(MAR_b, keepdims=True), Any) assert_type(np.ma.max(MAR_f4, keepdims=True), Any) -assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.max(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.max(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.max(), np.bool) assert_type(MAR_f4.max(), np.float32) @@ -79,9 +81,9 @@ assert_type(MAR_b.max(axis=0), Any) assert_type(MAR_f4.max(axis=0), Any) assert_type(MAR_b.max(keepdims=True), Any) assert_type(MAR_f4.max(keepdims=True), Any) -assert_type(MAR_f4.max(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.max(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.max(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.max(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.max(None, MAR_subclass), MaskedArraySubclass) assert_type(np.ma.ptp(MAR_b), np.bool) assert_type(np.ma.ptp(MAR_f4), np.float32) @@ -89,9 +91,9 @@ assert_type(np.ma.ptp(MAR_b, axis=0), Any) assert_type(np.ma.ptp(MAR_f4, axis=0), Any) assert_type(np.ma.ptp(MAR_b, keepdims=True), Any) assert_type(np.ma.ptp(MAR_f4, keepdims=True), Any) -assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.ptp(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, 0, MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.ptp(MAR_f4, None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.ptp(), np.bool) assert_type(MAR_f4.ptp(), np.float32) @@ -99,9 +101,9 @@ assert_type(MAR_b.ptp(axis=0), Any) assert_type(MAR_f4.ptp(axis=0), Any) assert_type(MAR_b.ptp(keepdims=True), Any) assert_type(MAR_f4.ptp(keepdims=True), Any) -assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.ptp(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(0, MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.ptp(None, MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.argmin(), np.intp) assert_type(MAR_f4.argmin(), np.intp) @@ -109,8 +111,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmin(axis=0), Any) assert_type(MAR_f4.argmin(axis=0), Any) assert_type(MAR_b.argmin(keepdims=True), Any) -assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.argmin(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmin(None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.argmin(MAR_b), np.intp) assert_type(np.ma.argmin(MAR_f4), np.intp) @@ -118,8 +120,8 @@ assert_type(np.ma.argmin(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmin(MAR_b, axis=0), Any) assert_type(np.ma.argmin(MAR_f4, axis=0), Any) assert_type(np.ma.argmin(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.argmin(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmin(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_b.argmax(), np.intp) assert_type(MAR_f4.argmax(), np.intp) @@ -127,8 +129,8 @@ assert_type(MAR_f4.argmax(fill_value=6.28318, keepdims=False), np.intp) assert_type(MAR_b.argmax(axis=0), Any) assert_type(MAR_f4.argmax(axis=0), Any) assert_type(MAR_b.argmax(keepdims=True), Any) -assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f4.argmax(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.argmax(None, None, out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.argmax(MAR_b), np.intp) assert_type(np.ma.argmax(MAR_f4), np.intp) @@ -136,40 +138,62 @@ assert_type(np.ma.argmax(MAR_f4, fill_value=6.28318, keepdims=False), np.intp) assert_type(np.ma.argmax(MAR_b, axis=0), Any) assert_type(np.ma.argmax(MAR_f4, axis=0), Any) assert_type(np.ma.argmax(MAR_b, keepdims=True), Any) -assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.argmax(MAR_f4, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.argmax(MAR_f4, None, None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.all(), np.bool) +assert_type(MAR_f4.all(), np.bool) +assert_type(MAR_f4.all(keepdims=False), np.bool) +assert_type(MAR_b.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.all(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.all(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.all(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.all(None, out=MAR_subclass), MaskedArraySubclass) + +assert_type(MAR_b.any(), np.bool) +assert_type(MAR_f4.any(), np.bool) +assert_type(MAR_f4.any(keepdims=False), np.bool) +assert_type(MAR_b.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(axis=0, keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_b.any(0, None, True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(axis=0), np.bool | MaskedArray[np.bool]) +assert_type(MAR_b.any(keepdims=True), MaskedArray[np.bool]) +assert_type(MAR_f4.any(out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f4.any(None, out=MAR_subclass), MaskedArraySubclass) assert_type(MAR_f4.sort(), None) assert_type(MAR_f4.sort(axis=0, kind='quicksort', order='K', endwith=False, fill_value=42., stable=False), None) -assert_type(np.ma.sort(MAR_f4), MaskedNDArray[np.float32]) -assert_type(np.ma.sort(MAR_subclass), MaskedNDArraySubclass) +assert_type(np.ma.sort(MAR_f4), MaskedArray[np.float32]) +assert_type(np.ma.sort(MAR_subclass), MaskedArraySubclass) assert_type(np.ma.sort([[0, 1], [2, 3]]), NDArray[Any]) assert_type(np.ma.sort(AR_f4), NDArray[np.float32]) assert_type(MAR_f8.take(0), np.float64) assert_type(MAR_1d.take(0), Any) -assert_type(MAR_f8.take([0]), MaskedNDArray[np.float64]) -assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedNDArraySubclass) +assert_type(MAR_f8.take([0]), MaskedArray[np.float64]) +assert_type(MAR_f8.take(0, out=MAR_subclass), MaskedArraySubclass) +assert_type(MAR_f8.take([0], out=MAR_subclass), MaskedArraySubclass) assert_type(np.ma.take(f, 0), Any) assert_type(np.ma.take(f4, 0), np.float32) assert_type(np.ma.take(MAR_f8, 0), np.float64) assert_type(np.ma.take(AR_f4, 0), np.float32) assert_type(np.ma.take(MAR_1d, 0), Any) -assert_type(np.ma.take(MAR_f8, [0]), MaskedNDArray[np.float64]) -assert_type(np.ma.take(AR_f4, [0]), MaskedNDArray[np.float32]) -assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedNDArraySubclass) -assert_type(np.ma.take([1], [0]), MaskedNDArray[Any]) -assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedNDArray[np.float64]) +assert_type(np.ma.take(MAR_f8, [0]), MaskedArray[np.float64]) +assert_type(np.ma.take(AR_f4, [0]), MaskedArray[np.float32]) +assert_type(np.ma.take(MAR_f8, 0, out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take(MAR_f8, [0], out=MAR_subclass), MaskedArraySubclass) +assert_type(np.ma.take([1], [0]), MaskedArray[Any]) +assert_type(np.ma.take(np.eye(2), 1, axis=0), MaskedArray[np.float64]) assert_type(MAR_f4.partition(1), None) assert_type(MAR_V.partition(1, axis=0, kind='introselect', order='K'), None) -assert_type(MAR_f4.argpartition(1), MaskedNDArray[np.intp]) -assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedNDArray[np.intp]) +assert_type(MAR_f4.argpartition(1), MaskedArray[np.intp]) +assert_type(MAR_1d.argpartition(1, axis=0, kind='introselect', order='K'), MaskedArray[np.intp]) assert_type(np.ma.ndim(f4), int) assert_type(np.ma.ndim(MAR_b), int) @@ -185,55 +209,55 @@ assert_type(MAR_f4.ids(), tuple[int, int]) assert_type(MAR_f4.iscontiguous(), bool) -assert_type(MAR_f4 >= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 >= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 >= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o >= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d >= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s >= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte >= MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 > 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 > AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 > AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o > AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d > 0, MaskedNDArray[np.bool]) -assert_type(MAR_s > MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte > MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 <= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 <= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o <= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d <= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s <= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte <= MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 < 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 < AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 < AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o < AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d < 0, MaskedNDArray[np.bool]) -assert_type(MAR_s < MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte < MAR_byte, MaskedNDArray[np.bool]) - -assert_type(MAR_f4 <= 3, MaskedNDArray[np.bool]) -assert_type(MAR_i8 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_b <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_td64 <= AR_td64, MaskedNDArray[np.bool]) -assert_type(MAR_dt64 <= AR_dt64, MaskedNDArray[np.bool]) -assert_type(MAR_o <= AR_o, MaskedNDArray[np.bool]) -assert_type(MAR_1d <= 0, MaskedNDArray[np.bool]) -assert_type(MAR_s <= MAR_s, MaskedNDArray[np.bool]) -assert_type(MAR_byte <= MAR_byte, MaskedNDArray[np.bool]) +assert_type(MAR_f4 >= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 >= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 >= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o >= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d >= 0, MaskedArray[np.bool]) +assert_type(MAR_s >= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte >= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 > 3, MaskedArray[np.bool]) +assert_type(MAR_i8 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 > AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 > AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o > AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d > 0, MaskedArray[np.bool]) +assert_type(MAR_s > MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte > MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 < 3, MaskedArray[np.bool]) +assert_type(MAR_i8 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 < AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 < AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o < AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d < 0, MaskedArray[np.bool]) +assert_type(MAR_s < MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte < MAR_byte, MaskedArray[np.bool]) + +assert_type(MAR_f4 <= 3, MaskedArray[np.bool]) +assert_type(MAR_i8 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_b <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_td64 <= AR_td64, MaskedArray[np.bool]) +assert_type(MAR_dt64 <= AR_dt64, MaskedArray[np.bool]) +assert_type(MAR_o <= AR_o, MaskedArray[np.bool]) +assert_type(MAR_1d <= 0, MaskedArray[np.bool]) +assert_type(MAR_s <= MAR_s, MaskedArray[np.bool]) +assert_type(MAR_byte <= MAR_byte, MaskedArray[np.bool]) assert_type(MAR_byte.count(), int) assert_type(MAR_f4.count(axis=None), int) @@ -265,6 +289,7 @@ assert_type(np.ma.put(MAR_f4, 4, 999), None) assert_type(np.ma.put(MAR_f4, 4, 999, mode='clip'), None) assert_type(np.ma.putmask(MAR_f4, [True, False], [0, 1]), None) +assert_type(np.ma.putmask(MAR_f4, np.False_, [0, 1]), None) assert_type(MAR_f4.filled(float('nan')), NDArray[np.float32]) assert_type(MAR_i8.filled(), NDArray[np.int64]) @@ -276,6 +301,11 @@ assert_type(np.ma.filled([[1,2,3]]), NDArray[Any]) # https://github.com/numpy/numpy/pull/28742#discussion_r2048968375 assert_type(np.ma.filled(MAR_1d), np.ndarray[tuple[int], np.dtype]) # type: ignore[assert-type] +assert_type(MAR_b.repeat(3), np.ma.MaskedArray[tuple[int], np.dtype[np.bool]]) +assert_type(MAR_2d_f4.repeat(MAR_i8), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=None), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.repeat(MAR_i8, axis=0), MaskedArray[np.float32]) + assert_type(np.ma.allequal(AR_f4, MAR_f4), bool) assert_type(np.ma.allequal(AR_f4, MAR_f4, fill_value=False), bool) @@ -283,6 +313,9 @@ assert_type(np.ma.allclose(AR_f4, MAR_f4), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, masked_equal=False), bool) assert_type(np.ma.allclose(AR_f4, MAR_f4, rtol=.4, atol=.3), bool) +assert_type(MAR_2d_f4.ravel(), np.ma.MaskedArray[tuple[int], np.dtype[np.float32]]) +assert_type(MAR_1d.ravel(order='A'), np.ma.MaskedArray[tuple[int], np.dtype[Any]]) + assert_type(np.ma.getmask(MAR_f4), NDArray[np.bool] | np.bool) # PyRight detects this one correctly, but mypy doesn't: # `Revealed type is "Union[numpy.ndarray[Any, Any], numpy.bool[Any]]"` @@ -300,6 +333,39 @@ def func(x: object) -> None: else: assert_type(x, object) +assert_type(MAR_2d_f4.mT, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_c16.real, MaskedArray[np.float64]) +assert_type(MAR_c16.imag, MaskedArray[np.float64]) + +assert_type(MAR_2d_f4.baseclass, type[NDArray[Any]]) + +assert_type(MAR_b.swapaxes(0, 1), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.swapaxes(1, 0), MaskedArray[np.float32]) + assert_type(np.ma.nomask, np.bool[Literal[False]]) # https://github.com/python/mypy/issues/18974 assert_type(np.ma.MaskType, type[np.bool]) # type: ignore[assert-type] + +assert_type(MAR_1d.__setmask__([True, False]), None) +assert_type(MAR_1d.__setmask__(np.False_), None) + +assert_type(MAR_2d_f4.harden_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.harden_mask(), MaskedArray[np.int64]) +assert_type(MAR_2d_f4.soften_mask(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_i8.soften_mask(), MaskedArray[np.int64]) +assert_type(MAR_f4.unshare_mask(), MaskedArray[np.float32]) +assert_type(MAR_b.shrink_mask(), MaskedArray[np.bool_]) + +assert_type(MAR_i8.hardmask, bool) +assert_type(MAR_i8.sharedmask, bool) + +assert_type(MAR_b.transpose(), MaskedArray[np.bool]) +assert_type(MAR_2d_f4.transpose(), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose(1, 0), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_2d_f4.transpose((1, 0)), np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) +assert_type(MAR_b.T, MaskedArray[np.bool]) +assert_type(MAR_2d_f4.T, np.ma.MaskedArray[tuple[int, int], np.dtype[np.float32]]) + +assert_type(MAR_2d_f4.nonzero(), tuple[_Array1D[np.intp], *tuple[_Array1D[np.intp], ...]]) +assert_type(MAR_2d_f4.nonzero()[0], _Array1D[np.intp]) diff --git a/numpy/typing/tests/data/reveal/ndarray_misc.pyi b/numpy/typing/tests/data/reveal/ndarray_misc.pyi index 2016fb5c7971..682f9db50220 100644 --- a/numpy/typing/tests/data/reveal/ndarray_misc.pyi +++ b/numpy/typing/tests/data/reveal/ndarray_misc.pyi @@ -126,9 +126,12 @@ assert_type(f8.round(), np.float64) assert_type(AR_f8.round(), npt.NDArray[np.float64]) assert_type(AR_f8.round(out=B), SubClass) -assert_type(f8.repeat(1), npt.NDArray[np.float64]) -assert_type(AR_f8.repeat(1), npt.NDArray[np.float64]) -assert_type(B.repeat(1), npt.NDArray[np.object_]) +assert_type(f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(f8.repeat(1, axis=0), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1), np.ndarray[tuple[int], np.dtype[np.float64]]) +assert_type(AR_f8.repeat(1, axis=0), npt.NDArray[np.float64]) +assert_type(B.repeat(1), np.ndarray[tuple[int], np.dtype[np.object_]]) +assert_type(B.repeat(1, axis=0), npt.NDArray[np.object_]) assert_type(f8.std(), Any) assert_type(AR_f8.std(), Any) diff --git a/numpy/typing/tests/test_typing.py b/numpy/typing/tests/test_typing.py index c1e708c876e5..068b27a7709f 100644 --- a/numpy/typing/tests/test_typing.py +++ b/numpy/typing/tests/test_typing.py @@ -249,18 +249,10 @@ def test_code_runs(path: str) -> None: LINENO_MAPPING = { - 6: "uint128", - 7: "uint256", - 9: "int128", - 10: "int256", - 12: "float80", - 13: "float96", - 14: "float128", - 15: "float256", - 17: "complex160", - 18: "complex192", - 19: "complex256", - 20: "complex512", + 6: "float96", + 7: "float128", + 8: "complex192", + 9: "complex256", } diff --git a/pyproject.toml b/pyproject.toml index eb7015acc347..b62d71cbba73 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -188,6 +188,11 @@ select = "*-win32" config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" repair-wheel-command = "" +[[tool.cibuildwheel.overrides]] +select = "*-win_arm64" +config-settings = "setup-args=--vsenv setup-args=-Dallow-noblas=true build-dir=build" +repair-wheel-command = "" + [[tool.cibuildwheel.overrides]] select = "*pyodide*" before-test = "pip install -r {project}/requirements/emscripten_test_requirements.txt" diff --git a/ruff.toml b/ruff.toml index 56ac820ce23d..c01b5cf30991 100644 --- a/ruff.toml +++ b/ruff.toml @@ -17,7 +17,11 @@ extend-exclude = [ [lint] preview = true extend-select = [ + "C4", + "LOG", + "G", "PIE", + "TID", "FLY", "E", "W", @@ -27,20 +31,21 @@ extend-select = [ ] ignore = [ "F", # TODO: enable Pyflakes rules + "C408", # Unnecessary `dict()` call (rewrite as a literal) "PIE790", # Unnecessary `pass` statement - "E241", - "E251", - "E265", - "E266", - "E302", - "E402", + "E241", # Multiple spaces after comma + "E251", # Unexpected spaces around keyword / parameter equals + "E265", # Block comment should start with `# ` + "E266", # Too many leading `#` before block comment + "E302", # TODO: Expected 2 blank lines, found 1 + "E402", # Module level import not at top of file "E501", # TODO: Line too long - "E712", - "E721", - "E731", - "E741", - "UP015", # Unnecessary mode argument - "UP031", # TODO: Use format specifiers instead of percent format + "E712", # Avoid equality comparisons to `True` or `False` + "E721", # TODO: Use `is` and `is not` for type comparisons, or `isinstance()` for isinstance check + "E731", # Do not assign a `lambda` expression, use a `def` + "E741", # Ambiguous variable name + "UP015", # Unnecessary mode argument + "UP031", # TODO: Use format specifiers instead of percent format ] [lint.per-file-ignores] diff --git a/tools/ci/push_docs_to_repo.py b/tools/ci/push_docs_to_repo.py index 4b31f8df492e..9ea33da6ddc3 100755 --- a/tools/ci/push_docs_to_repo.py +++ b/tools/ci/push_docs_to_repo.py @@ -56,7 +56,7 @@ def run(cmd, stdout=True): # ensure the working branch is called "main" # (`--initial-branch=main` appeared to have failed on older git versions): run(['git', 'checkout', '-b', 'main']) -run(['git', 'remote', 'add', 'origin', args.remote]) +run(['git', 'remote', 'add', 'origin', args.remote]) run(['git', 'config', '--local', 'user.name', args.committer]) run(['git', 'config', '--local', 'user.email', args.email]) diff --git a/tools/commitstats.py b/tools/commitstats.py deleted file mode 100644 index 5aee433025b4..000000000000 --- a/tools/commitstats.py +++ /dev/null @@ -1,38 +0,0 @@ -# Run svn log -l - -import re -import numpy as np -import os - -names = re.compile(r'r\d+\s\|\s(.*)\s\|\s200') - -def get_count(filename, repo): - mystr = open(filename).read() - result = names.findall(mystr) - u = np.unique(result) - count = [(x, result.count(x), repo) for x in u] - return count - - -command = 'svn log -l 2300 > output.txt' -os.chdir('..') -os.system(command) - -count = get_count('output.txt', 'NumPy') - - -os.chdir('../scipy') -os.system(command) - -count.extend(get_count('output.txt', 'SciPy')) - -os.chdir('../scikits') -os.system(command) -count.extend(get_count('output.txt', 'SciKits')) -count.sort() - - -print("** SciPy and NumPy **") -print("=====================") -for val in count: - print(val) diff --git a/tools/download-wheels.py b/tools/download-wheels.py index 54dbdf1200a8..598075f0b03c 100644 --- a/tools/download-wheels.py +++ b/tools/download-wheels.py @@ -115,7 +115,7 @@ def download_wheels(version, wheelhouse, test=False): "[defaults to /release/installers]") parser.add_argument( "-t", "--test", - action = 'store_true', + action='store_true', help="only list available wheels, do not download") args = parser.parse_args() diff --git a/tools/find_deprecated_escaped_characters.py b/tools/find_deprecated_escaped_characters.py deleted file mode 100644 index d7225b8e85f6..000000000000 --- a/tools/find_deprecated_escaped_characters.py +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env python3 -r""" -Look for escape sequences deprecated in Python 3.6. - -Python 3.6 deprecates a number of non-escape sequences starting with '\' that -were accepted before. For instance, '\(' was previously accepted but must now -be written as '\\(' or r'\('. - -""" - - -def main(root): - """Find deprecated escape sequences. - - Checks for deprecated escape sequences in ``*.py files``. If `root` is a - file, that file is checked, if `root` is a directory all ``*.py`` files - found in a recursive descent are checked. - - If a deprecated escape sequence is found, the file and line where found is - printed. Note that for multiline strings the line where the string ends is - printed and the error(s) are somewhere in the body of the string. - - Parameters - ---------- - root : str - File or directory to check. - Returns - ------- - None - - """ - import ast - import tokenize - import warnings - from pathlib import Path - - count = 0 - base = Path(root) - paths = base.rglob("*.py") if base.is_dir() else [base] - for path in paths: - # use tokenize to auto-detect encoding on systems where no - # default encoding is defined (e.g. LANG='C') - with tokenize.open(str(path)) as f: - with warnings.catch_warnings(record=True) as w: - warnings.simplefilter('always') - tree = ast.parse(f.read()) - if w: - print("file: ", str(path)) - for e in w: - print('line: ', e.lineno, ': ', e.message) - print() - count += len(w) - print("Errors Found", count) - - -if __name__ == "__main__": - from argparse import ArgumentParser - - parser = ArgumentParser(description="Find deprecated escaped characters") - parser.add_argument('root', help='directory or file to be checked') - args = parser.parse_args() - main(args.root) diff --git a/tools/swig/README b/tools/swig/README index c539c597f8c6..876d6a698034 100644 --- a/tools/swig/README +++ b/tools/swig/README @@ -3,9 +3,7 @@ Notes for the numpy/tools/swig directory This set of files is for developing and testing file numpy.i, which is intended to be a set of typemaps for helping SWIG interface between C -and C++ code that uses C arrays and the python module NumPy. It is -ultimately hoped that numpy.i will be included as part of the SWIG -distribution. +and C++ code that uses C arrays and the python module NumPy. Documentation ------------- diff --git a/tools/swig/test/Array2.cxx b/tools/swig/test/Array2.cxx index 2da61f728569..11b523523617 100644 --- a/tools/swig/test/Array2.cxx +++ b/tools/swig/test/Array2.cxx @@ -160,7 +160,7 @@ void Array2::allocateRows() void Array2::deallocateMemory() { - if (_ownData && _nrows*_ncols && _buffer) + if (_ownData && _nrows && _ncols && _buffer) { delete [] _rows; delete [] _buffer; diff --git a/tools/swig/test/setup.py b/tools/swig/test/setup.py index bc310043d82e..c925f358ec7b 100755 --- a/tools/swig/test/setup.py +++ b/tools/swig/test/setup.py @@ -46,16 +46,16 @@ ) _Fortran = Extension("_Fortran", - ["Fortran_wrap.cxx", - "Fortran.cxx"], - include_dirs = [numpy_include], - ) + ["Fortran_wrap.cxx", + "Fortran.cxx"], + include_dirs = [numpy_include], + ) _Flat = Extension("_Flat", - ["Flat_wrap.cxx", - "Flat.cxx"], - include_dirs = [numpy_include], - ) + ["Flat_wrap.cxx", + "Flat.cxx"], + include_dirs = [numpy_include], + ) # NumyTypemapTests setup setup(name = "NumpyTypemapTests", diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py index 11cbc76f2642..5f185884641e 100644 --- a/tools/swig/test/testSuperTensor.py +++ b/tools/swig/test/testSuperTensor.py @@ -29,8 +29,8 @@ def testNorm(self): norm = SuperTensor.__dict__[self.typeStr + "Norm"] supertensor = np.arange(2 * 2 * 2 * 2, dtype=self.typeCode).reshape((2, 2, 2, 2)) - #Note: cludge to get an answer of the same type as supertensor. - #Answer is simply sqrt(sum(supertensor*supertensor)/16) + # Note: cludge to get an answer of the same type as supertensor. + # Answer is simply sqrt(sum(supertensor*supertensor)/16) answer = np.array([np.sqrt(np.sum(supertensor.astype('d') * supertensor) / 16.)], dtype=self.typeCode)[0] # noqa: E501 self.assertAlmostEqual(norm(supertensor), answer, 6) diff --git a/tools/wheels/LICENSE_linux.txt b/tools/wheels/LICENSE_linux.txt index 021b4b0289e7..9e2d9053b8a7 100644 --- a/tools/wheels/LICENSE_linux.txt +++ b/tools/wheels/LICENSE_linux.txt @@ -133,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -207,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -851,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -870,14 +870,14 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. Name: libquadmath Files: numpy.libs/libquadmath*.so diff --git a/tools/wheels/LICENSE_osx.txt b/tools/wheels/LICENSE_osx.txt index 81889131cfa7..7ef2e381874e 100644 --- a/tools/wheels/LICENSE_osx.txt +++ b/tools/wheels/LICENSE_osx.txt @@ -3,6 +3,7 @@ This binary distribution of NumPy also bundles the following software: + Name: OpenBLAS Files: numpy/.dylibs/libscipy_openblas*.so Description: bundled as a dynamically linked library @@ -132,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -206,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -850,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -869,14 +870,14 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. Name: libquadmath Files: numpy/.dylibs/libquadmath*.so diff --git a/tools/wheels/LICENSE_win32.txt b/tools/wheels/LICENSE_win32.txt index a2ccce66fbe5..c8277e7710a2 100644 --- a/tools/wheels/LICENSE_win32.txt +++ b/tools/wheels/LICENSE_win32.txt @@ -133,7 +133,7 @@ GCC RUNTIME LIBRARY EXCEPTION Version 3.1, 31 March 2009 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2009 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -207,7 +207,7 @@ requirements of the license of GCC. GNU GENERAL PUBLIC LICENSE Version 3, 29 June 2007 - Copyright (C) 2007 Free Software Foundation, Inc. + Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. @@ -851,7 +851,7 @@ the "copyright" line and a pointer to where the full notice is found. GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program. If not, see . + along with this program. If not, see . Also add information on how to contact you by electronic and paper mail. @@ -870,12 +870,12 @@ might be different; for a GUI interface, you would use an "about box". You should also get your employer (if you work as a programmer) or school, if any, to sign a "copyright disclaimer" for the program, if necessary. For more information on this, and how to apply and follow the GNU GPL, see -. +. The GNU General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. But first, please read -. +. diff --git a/tools/wheels/cibw_before_build.sh b/tools/wheels/cibw_before_build.sh index e2f464d32a2a..c8d119b1b39f 100644 --- a/tools/wheels/cibw_before_build.sh +++ b/tools/wheels/cibw_before_build.sh @@ -22,6 +22,9 @@ fi if [[ $(python -c"import sys; print(sys.maxsize)") < $(python -c"import sys; print(2**33)") ]]; then echo "No BLAS used for 32-bit wheels" export INSTALL_OPENBLAS=false +elif [[ $(python -c"import sysconfig; print(sysconfig.get_platform())") == "win-arm64" ]]; then + echo "No BLAS used for ARM64 wheels" + export INSTALL_OPENBLAS=false elif [ -z $INSTALL_OPENBLAS ]; then # the macos_arm64 build might not set this variable export INSTALL_OPENBLAS=true diff --git a/vendored-meson/meson b/vendored-meson/meson index 7300f5fd4c1c..f754c4258805 160000 --- a/vendored-meson/meson +++ b/vendored-meson/meson @@ -1 +1 @@ -Subproject commit 7300f5fd4c1c8b0406faeec4cc631f11f1ea324c +Subproject commit f754c4258805056ed7be09830d96af45215d341b