diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index bb4e85d03f..0e6c9ee0f2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -203,15 +203,14 @@ jobs: run: | if [[ $OS == "macos-15" ]]; then - micromamba install --yes -q "python~=${PYTHON_VERSION}" "numpy${NUMPY_VERSION}" scipy pip graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock libblas=*=*accelerate; + micromamba install --yes -q "python~=${PYTHON_VERSION}" "numpy${NUMPY_VERSION}" scipy pip graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock pytest-sphinx libblas=*=*accelerate; else - micromamba install --yes -q "python~=${PYTHON_VERSION}" mkl "numpy${NUMPY_VERSION}" scipy pip mkl-service graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock; + micromamba install --yes -q "python~=${PYTHON_VERSION}" mkl "numpy${NUMPY_VERSION}" scipy pip mkl-service graphviz cython pytest coverage pytest-cov pytest-benchmark pytest-mock pytest-sphinx; fi if [[ $INSTALL_NUMBA == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}" "numba>=0.57"; fi if [[ $INSTALL_JAX == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}" jax jaxlib numpyro && pip install tfp-nightly; fi if [[ $INSTALL_TORCH == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}" pytorch pytorch-cuda=12.1 "mkl<=2024.0" -c pytorch -c nvidia; fi if [[ $INSTALL_XARRAY == "1" ]]; then micromamba install --yes -q -c conda-forge "python~=${PYTHON_VERSION}" xarray xarray-einstats; fi - pip install pytest-sphinx pip install -e ./ micromamba list && pip freeze diff --git a/environment-osx-arm64.yml b/environment-osx-arm64.yml index 9db3fd8fe7..c4685d1c7c 100644 --- a/environment-osx-arm64.yml +++ b/environment-osx-arm64.yml @@ -31,8 +31,7 @@ dependencies: - pytest-xdist - pytest-benchmark - pytest-mock - - pip: - - pytest-sphinx + - pytest-sphinx # For building docs - sphinx>=5.1.0,<6 - sphinx_rtd_theme diff --git a/environment.yml b/environment.yml index 9bdddfb6f6..9909b000d1 100644 --- a/environment.yml +++ b/environment.yml @@ -33,8 +33,7 @@ dependencies: - pytest-xdist - pytest-benchmark - pytest-mock - - pip: - - pytest-sphinx + - pytest-sphinx # For building docs - sphinx>=5.1.0,<6 - sphinx_rtd_theme diff --git a/pytensor/link/jax/linker.py b/pytensor/link/jax/linker.py index 300f2f7323..dd634e630c 100644 --- a/pytensor/link/jax/linker.py +++ b/pytensor/link/jax/linker.py @@ -9,8 +9,10 @@ class JAXLinker(JITLinker): """A `Linker` that JIT-compiles NumPy-based operations using JAX.""" + scalar_shape_inputs: tuple[int, ...] + def __init__(self, *args, **kwargs): - self.scalar_shape_inputs: tuple[int] = () # type: ignore[annotation-unchecked] + self.scalar_shape_inputs = () super().__init__(*args, **kwargs) def fgraph_convert(self, fgraph, input_storage, storage_map, **kwargs): diff --git a/pytensor/link/numba/dispatch/vectorize_codegen.py b/pytensor/link/numba/dispatch/vectorize_codegen.py index e6bd7fa4ca..060418cb6c 100644 --- a/pytensor/link/numba/dispatch/vectorize_codegen.py +++ b/pytensor/link/numba/dispatch/vectorize_codegen.py @@ -517,9 +517,9 @@ def make_loop_call( output_slices = [] for output, output_type, bc in zip(outputs, output_types, output_bc, strict=True): core_ndim = output_type.ndim - len(bc) - size_type = output.shape.type.element # type: ignore - output_shape = cgutils.unpack_tuple(builder, output.shape) # type: ignore - output_strides = cgutils.unpack_tuple(builder, output.strides) # type: ignore + size_type = output.shape.type.element # pyright: ignore[reportAttributeAccessIssue] + output_shape = cgutils.unpack_tuple(builder, output.shape) # pyright: ignore[reportAttributeAccessIssue] + output_strides = cgutils.unpack_tuple(builder, output.strides) # pyright: ignore[reportAttributeAccessIssue] idxs_bc = [zero if bc else idx for idx, bc in zip(idxs, bc, strict=True)] + [ zero @@ -527,7 +527,7 @@ def make_loop_call( ptr = cgutils.get_item_pointer2( context, builder, - output.data, # type:ignore + output.data, output_shape, output_strides, output_type.layout, diff --git a/pytensor/npy_2_compat.py b/pytensor/npy_2_compat.py index 667a5c074e..207316c08f 100644 --- a/pytensor/npy_2_compat.py +++ b/pytensor/npy_2_compat.py @@ -41,7 +41,7 @@ if using_numpy_2: - ndarray_c_version = np._core._multiarray_umath._get_ndarray_c_version() + ndarray_c_version = np._core._multiarray_umath._get_ndarray_c_version() # type: ignore[attr-defined] else: ndarray_c_version = np.core._multiarray_umath._get_ndarray_c_version() # type: ignore[attr-defined] diff --git a/pytensor/scan/utils.py b/pytensor/scan/utils.py index 6a0cdde461..3b924225ac 100644 --- a/pytensor/scan/utils.py +++ b/pytensor/scan/utils.py @@ -109,7 +109,7 @@ def safe_new( except TestValueError: pass - return nw_x + return type_cast(Variable, nw_x) class until: diff --git a/pytensor/tensor/einsum.py b/pytensor/tensor/einsum.py index e119b6de11..3b1e68463f 100644 --- a/pytensor/tensor/einsum.py +++ b/pytensor/tensor/einsum.py @@ -597,10 +597,14 @@ def einsum(subscripts: str, *operands: "TensorLike", optimize=None) -> TensorVar # Numpy einsum_path requires arrays even though only the shapes matter # It's not trivial to duck-type our way around because of internal call to `asanyarray` *[np.empty(shape) for shape in shapes], - einsum_call=True, # Not part of public API + # einsum_call is not part of public API + einsum_call=True, # type: ignore[arg-type] optimize="optimal", - ) # type: ignore - np_path = tuple(contraction[0] for contraction in contraction_list) + ) + np_path: PATH | tuple[tuple[int, ...]] = tuple( + contraction[0] # type: ignore[misc] + for contraction in contraction_list + ) if len(np_path) == 1 and len(np_path[0]) > 2: # When there's nothing to optimize, einsum_path reduces all entries simultaneously instead of doing @@ -610,7 +614,7 @@ def einsum(subscripts: str, *operands: "TensorLike", optimize=None) -> TensorVar subscripts, tensor_operands, path ) else: - path = np_path + path = cast(PATH, np_path) optimized = True diff --git a/pytensor/tensor/random/rewriting/numba.py b/pytensor/tensor/random/rewriting/numba.py index b6dcf3b5e8..75d213eb26 100644 --- a/pytensor/tensor/random/rewriting/numba.py +++ b/pytensor/tensor/random/rewriting/numba.py @@ -53,10 +53,10 @@ def introduce_explicit_core_shape_rv(fgraph, node): # ← dirichlet_rv{"(a)->(a)"}.1 [id F] # └─ ··· """ - op: RandomVariable = node.op # type: ignore[annotation-unchecked] + op: RandomVariable = node.op next_rng, rv = node.outputs - shape_feature: ShapeFeature | None = getattr(fgraph, "shape_feature", None) # type: ignore[annotation-unchecked] + shape_feature: ShapeFeature | None = getattr(fgraph, "shape_feature", None) if shape_feature: core_shape = [ shape_feature.get_shape(rv, -i - 1) for i in reversed(range(op.ndim_supp)) diff --git a/pytensor/tensor/rewriting/blockwise.py b/pytensor/tensor/rewriting/blockwise.py index 7b70bf8860..023c8aae51 100644 --- a/pytensor/tensor/rewriting/blockwise.py +++ b/pytensor/tensor/rewriting/blockwise.py @@ -102,7 +102,7 @@ def local_blockwise_alloc(fgraph, node): This is critical to remove many unnecessary Blockwise, or to reduce the work done by it """ - op: Blockwise = node.op # type: ignore + op: Blockwise = node.op batch_ndim = op.batch_ndim(node) if not batch_ndim: diff --git a/pytensor/tensor/rewriting/numba.py b/pytensor/tensor/rewriting/numba.py index 91ab131424..60c4e41c2d 100644 --- a/pytensor/tensor/rewriting/numba.py +++ b/pytensor/tensor/rewriting/numba.py @@ -65,10 +65,10 @@ def introduce_explicit_core_shape_blockwise(fgraph, node): # [Blockwise{SVD{full_matrices=True, compute_uv=True}, (m,n)->(m,m),(k),(n,n)}].2 [id A] 6 # └─ ··· """ - op: Blockwise = node.op # type: ignore[annotation-unchecked] + op: Blockwise = node.op batch_ndim = op.batch_ndim(node) - shape_feature: ShapeFeature | None = getattr(fgraph, "shape_feature", None) # type: ignore[annotation-unchecked] + shape_feature: ShapeFeature | None = getattr(fgraph, "shape_feature", None) if shape_feature: core_shapes = [ [shape_feature.get_shape(out, i) for i in range(batch_ndim, out.type.ndim)] diff --git a/scripts/mypy-failing.txt b/scripts/mypy-failing.txt index 99dd26a26e..5b39180db9 100644 --- a/scripts/mypy-failing.txt +++ b/scripts/mypy-failing.txt @@ -3,7 +3,6 @@ pytensor/compile/debugmode.py pytensor/compile/function/pfunc.py pytensor/compile/function/types.py pytensor/compile/mode.py -pytensor/compile/sharedvalue.py pytensor/graph/rewriting/basic.py pytensor/ifelse.py pytensor/link/numba/dispatch/elemwise.py