From 6a297ff1d52f4d5f779e5a893055ddb0433c0ca2 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 9 Aug 2025 03:35:53 +0000 Subject: [PATCH 1/7] Initial plan From 98d7c87d93ef9990ec06e13c63721dcf4cff2e97 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 9 Aug 2025 03:47:29 +0000 Subject: [PATCH 2/7] Implement symbolic raveling for CumOp: handle axis=None symbolically using flatten Co-authored-by: ricardoV94 <28983449+ricardoV94@users.noreply.github.com> --- pytensor/link/numba/dispatch/extra_ops.py | 23 ++++---- pytensor/link/pytorch/dispatch/extra_ops.py | 9 +-- pytensor/tensor/extra_ops.py | 61 +++++++-------------- tests/tensor/test_extra_ops.py | 11 ++-- 4 files changed, 39 insertions(+), 65 deletions(-) diff --git a/pytensor/link/numba/dispatch/extra_ops.py b/pytensor/link/numba/dispatch/extra_ops.py index f7700acf47..416ff80b03 100644 --- a/pytensor/link/numba/dispatch/extra_ops.py +++ b/pytensor/link/numba/dispatch/extra_ops.py @@ -37,21 +37,20 @@ def numba_funcify_CumOp(op: CumOp, node: Apply, **kwargs): mode = op.mode ndim = cast(TensorVariable, node.outputs[0]).ndim - if axis is not None: - if axis < 0: - axis = ndim + axis - if axis < 0 or axis >= ndim: - raise ValueError(f"Invalid axis {axis} for array with ndim {ndim}") + if axis < 0: + axis = ndim + axis + if axis < 0 or axis >= ndim: + raise ValueError(f"Invalid axis {axis} for array with ndim {ndim}") - reaxis_first = (axis, *(i for i in range(ndim) if i != axis)) - reaxis_first_inv = tuple(np.argsort(reaxis_first)) + reaxis_first = (axis, *(i for i in range(ndim) if i != axis)) + reaxis_first_inv = tuple(np.argsort(reaxis_first)) if mode == "add": - if axis is None or ndim == 1: + if ndim == 1: @numba_basic.numba_njit def cumop(x): - return np.cumsum(x) + return np.cumsum(x, axis=axis) else: @@ -71,11 +70,11 @@ def cumop(x): return res.transpose(reaxis_first_inv) else: - if axis is None or ndim == 1: + if ndim == 1: @numba_basic.numba_njit def cumop(x): - return np.cumprod(x) + return np.cumprod(x, axis=axis) else: @@ -92,7 +91,7 @@ def cumop(x): for m in range(1, x.shape[axis]): res[m] = res[m - 1] * x_axis_first[m] - return res.transpose(reaxis_first) + return res.transpose(reaxis_first_inv) return cumop diff --git a/pytensor/link/pytorch/dispatch/extra_ops.py b/pytensor/link/pytorch/dispatch/extra_ops.py index 74284d651d..912083f9e3 100644 --- a/pytensor/link/pytorch/dispatch/extra_ops.py +++ b/pytensor/link/pytorch/dispatch/extra_ops.py @@ -10,15 +10,10 @@ def pytorch_funcify_Cumop(op, **kwargs): mode = op.mode def cumop(x): - if axis is None: - x = x.reshape(-1) - dim = 0 - else: - dim = axis if mode == "add": - return torch.cumsum(x, dim=dim) + return torch.cumsum(x, dim=axis) else: - return torch.cumprod(x, dim=dim) + return torch.cumprod(x, dim=axis) return cumop diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index a6eafcf485..939ce0cb5f 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -28,7 +28,7 @@ from pytensor.scalar import upcast from pytensor.tensor import TensorLike, as_tensor_variable from pytensor.tensor import basic as ptb -from pytensor.tensor.basic import alloc, join, second +from pytensor.tensor.basic import alloc, join, second, flatten from pytensor.tensor.exceptions import NotScalarConstantError from pytensor.tensor.math import abs as pt_abs from pytensor.tensor.math import all as pt_all @@ -297,27 +297,23 @@ class CumOp(COp): c_axis=int_t, mode=EnumList(("MODE_ADD", "add"), ("MODE_MUL", "mul")) ) - def __init__(self, axis: int | None = None, mode="add"): + def __init__(self, axis: int, mode="add"): if mode not in ("add", "mul"): raise ValueError(f'{type(self).__name__}: Unknown mode "{mode}"') - if not (isinstance(axis, int) or axis is None): - raise TypeError("axis must be an integer or None.") + if not isinstance(axis, int): + raise TypeError("axis must be an integer.") self.axis = axis self.mode = mode @property def c_axis(self) -> int: - if self.axis is None: - return numpy_axis_is_none_flag return self.axis def make_node(self, x): x = ptb.as_tensor_variable(x) out_type = x.type() - if self.axis is None: - out_type = vector(dtype=x.dtype) # Flatten - elif self.axis >= x.ndim or self.axis < -x.ndim: + if self.axis >= x.ndim or self.axis < -x.ndim: raise ValueError(f"axis(={self.axis}) out of bounds") return Apply(self, [x], [out_type]) @@ -334,17 +330,6 @@ def grad(self, inputs, output_gradients): (x,) = inputs (gi,) = output_gradients - if self.axis is None: - if self.mode == "add": - return [cumsum(gi[::-1])[::-1].reshape(x.shape)] - elif self.mode == "mul": - fx = cumprod(x, axis=self.axis) - return [cumsum((fx * gi)[::-1])[::-1].reshape(x.shape) / x] - else: - raise NotImplementedError( - f'{type(self).__name__}: unknown gradient for mode "{self.mode}"' - ) - reverse_slicing = [slice(None, None, None)] * gi.ndim reverse_slicing[self.axis] = slice(None, None, -1) reverse_slicing = tuple(reverse_slicing) @@ -361,9 +346,6 @@ def grad(self, inputs, output_gradients): ) def infer_shape(self, fgraph, node, shapes): - if self.axis is None and len(shapes[0]) > 1: - return [(prod(shapes[0]),)] # Flatten - return shapes def c_support_code_apply(self, node: Apply, name: str) -> str: @@ -376,10 +358,7 @@ def c_code(self, node, name, inames, onames, sub): fail = sub["fail"] params = sub["params"] - if self.axis is None: - axis_code = "int axis = NPY_RAVEL_AXIS;\n" - else: - axis_code = f"int axis = {params}->c_axis;\n" + axis_code = f"int axis = {params}->c_axis;\n" code = ( axis_code @@ -451,7 +430,12 @@ def cumsum(x, axis=None): .. versionadded:: 0.7 """ - return CumOp(axis=axis, mode="add")(x) + if axis is None: + # Handle raveling symbolically by flattening first, then applying cumsum with axis=0 + x_flattened = flatten(x, ndim=1) # This creates a 1D tensor + return CumOp(axis=0, mode="add")(x_flattened) + else: + return CumOp(axis=axis, mode="add")(x) def cumprod(x, axis=None): @@ -471,7 +455,12 @@ def cumprod(x, axis=None): .. versionadded:: 0.7 """ - return CumOp(axis=axis, mode="mul")(x) + if axis is None: + # Handle raveling symbolically by flattening first, then applying cumprod with axis=0 + x_flattened = flatten(x, ndim=1) # This creates a 1D tensor + return CumOp(axis=0, mode="mul")(x_flattened) + else: + return CumOp(axis=axis, mode="mul")(x) @_vectorize_node.register(CumOp) @@ -479,18 +468,8 @@ def vectorize_cum_op(op: CumOp, node: Apply, batch_x): """Vectorize the CumOp to work on a batch of inputs.""" [original_x] = node.inputs batch_ndim = batch_x.ndim - original_x.ndim - axis = op.axis - if axis is None and original_x.ndim == 1: - axis = 0 - elif axis is not None: - axis = normalize_axis_index(op.axis, original_x.ndim) - - if axis is None: - # Ravel all unbatched dimensions and perform CumOp on the last axis - batch_x_raveled = [batch_x.flatten(ndim=batch_ndim + 1) for x in batch_x] - return type(op)(axis=-1, mode=op.mode).make_node(batch_x_raveled) - else: - return type(op)(axis=axis + batch_ndim, mode=op.mode).make_node(batch_x) + axis = normalize_axis_index(op.axis, original_x.ndim) + return type(op)(axis=axis + batch_ndim, mode=op.mode).make_node(batch_x) def diff(x, n=1, axis=-1): diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 8274ddbcea..9ef331d956 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -194,7 +194,7 @@ class TestCumOp(utt.InferShapeTester): def setup_method(self): super().setup_method() self.op_class = CumOp - self.op = CumOp() + self.op = CumOp(axis=0) # Use a specific axis since None is no longer supported def test_cum_op(self): x = tensor3("x") @@ -225,8 +225,8 @@ def test_infer_shape(self): x = tensor3("x") a = np.random.random((3, 5, 2)).astype(config.floatX) - # Test axis=None - self._compile_and_check([x], [self.op(x)], [a], self.op_class) + # Test axis=None using cumsum function (which now handles it symbolically) + self._compile_and_check([x], [cumsum(x)], [a], type(cumsum(x).owner.op)) for axis in range(-len(a.shape), len(a.shape)): self._compile_and_check([x], [cumsum(x, axis=axis)], [a], self.op_class) @@ -234,8 +234,9 @@ def test_infer_shape(self): def test_grad(self): a = np.random.random((3, 5, 2)).astype(config.floatX) - utt.verify_grad(self.op_class(mode="add"), [a]) # Test axis=None - utt.verify_grad(self.op_class(mode="mul"), [a]) # Test axis=None + # Test axis=None using cumsum/cumprod functions (which now handle it symbolically) + utt.verify_grad(lambda x: cumsum(x), [a]) # Test axis=None for cumsum + utt.verify_grad(lambda x: cumprod(x), [a]) # Test axis=None for cumprod for axis in range(-len(a.shape), len(a.shape)): utt.verify_grad(self.op_class(axis=axis, mode="add"), [a], eps=4e-4) From d3bfb153ba7458d604bdc4fd5d4bceeaac9dca2f Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 9 Aug 2025 04:10:03 +0000 Subject: [PATCH 3/7] Address review comments: use .ravel(), normalize axis, simplify negative axis handling Co-authored-by: ricardoV94 <28983449+ricardoV94@users.noreply.github.com> --- pytensor/tensor/extra_ops.py | 20 +++++++++++--------- tests/tensor/test_extra_ops.py | 2 +- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 939ce0cb5f..4b41cc39e1 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -302,6 +302,8 @@ def __init__(self, axis: int, mode="add"): raise ValueError(f'{type(self).__name__}: Unknown mode "{mode}"') if not isinstance(axis, int): raise TypeError("axis must be an integer.") + if axis < 0: + raise ValueError("axis must be non-negative.") self.axis = axis self.mode = mode @@ -313,7 +315,7 @@ def make_node(self, x): x = ptb.as_tensor_variable(x) out_type = x.type() - if self.axis >= x.ndim or self.axis < -x.ndim: + if self.axis >= x.ndim: raise ValueError(f"axis(={self.axis}) out of bounds") return Apply(self, [x], [out_type]) @@ -431,10 +433,10 @@ def cumsum(x, axis=None): """ if axis is None: - # Handle raveling symbolically by flattening first, then applying cumsum with axis=0 - x_flattened = flatten(x, ndim=1) # This creates a 1D tensor - return CumOp(axis=0, mode="add")(x_flattened) + return CumOp(axis=0, mode="add")(ptb.as_tensor_variable(x).ravel()) else: + x = ptb.as_tensor_variable(x) + axis = normalize_axis_index(axis, x.ndim) return CumOp(axis=axis, mode="add")(x) @@ -456,10 +458,10 @@ def cumprod(x, axis=None): """ if axis is None: - # Handle raveling symbolically by flattening first, then applying cumprod with axis=0 - x_flattened = flatten(x, ndim=1) # This creates a 1D tensor - return CumOp(axis=0, mode="mul")(x_flattened) + return CumOp(axis=0, mode="mul")(ptb.as_tensor_variable(x).ravel()) else: + x = ptb.as_tensor_variable(x) + axis = normalize_axis_index(axis, x.ndim) return CumOp(axis=axis, mode="mul")(x) @@ -468,8 +470,8 @@ def vectorize_cum_op(op: CumOp, node: Apply, batch_x): """Vectorize the CumOp to work on a batch of inputs.""" [original_x] = node.inputs batch_ndim = batch_x.ndim - original_x.ndim - axis = normalize_axis_index(op.axis, original_x.ndim) - return type(op)(axis=axis + batch_ndim, mode=op.mode).make_node(batch_x) + # op.axis is already normalized and non-negative + return type(op)(axis=op.axis + batch_ndim, mode=op.mode).make_node(batch_x) def diff(x, n=1, axis=-1): diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 9ef331d956..529f6902f0 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -226,7 +226,7 @@ def test_infer_shape(self): a = np.random.random((3, 5, 2)).astype(config.floatX) # Test axis=None using cumsum function (which now handles it symbolically) - self._compile_and_check([x], [cumsum(x)], [a], type(cumsum(x).owner.op)) + self._compile_and_check([x], [cumsum(x)], [a], self.op_class) for axis in range(-len(a.shape), len(a.shape)): self._compile_and_check([x], [cumsum(x, axis=axis)], [a], self.op_class) From 007ec2d0a951ed2ca4e3098c26495a99e9d25828 Mon Sep 17 00:00:00 2001 From: "copilot-swe-agent[bot]" <198982749+Copilot@users.noreply.github.com> Date: Sat, 9 Aug 2025 04:31:48 +0000 Subject: [PATCH 4/7] Address review comments: remove redundant checks and c_axis property, eliminate code duplication Co-authored-by: ricardoV94 <28983449+ricardoV94@users.noreply.github.com> --- pytensor/link/numba/dispatch/extra_ops.py | 5 +---- pytensor/tensor/extra_ops.py | 21 ++++++++++----------- tests/tensor/test_extra_ops.py | 4 ++-- 3 files changed, 13 insertions(+), 17 deletions(-) diff --git a/pytensor/link/numba/dispatch/extra_ops.py b/pytensor/link/numba/dispatch/extra_ops.py index 416ff80b03..22e1dcf85c 100644 --- a/pytensor/link/numba/dispatch/extra_ops.py +++ b/pytensor/link/numba/dispatch/extra_ops.py @@ -37,10 +37,7 @@ def numba_funcify_CumOp(op: CumOp, node: Apply, **kwargs): mode = op.mode ndim = cast(TensorVariable, node.outputs[0]).ndim - if axis < 0: - axis = ndim + axis - if axis < 0 or axis >= ndim: - raise ValueError(f"Invalid axis {axis} for array with ndim {ndim}") + reaxis_first = (axis, *(i for i in range(ndim) if i != axis)) reaxis_first_inv = tuple(np.argsort(reaxis_first)) diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 4b41cc39e1..e884630764 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -294,7 +294,7 @@ class CumOp(COp): __props__ = ("axis", "mode") check_input = False params_type = ParamsType( - c_axis=int_t, mode=EnumList(("MODE_ADD", "add"), ("MODE_MUL", "mul")) + axis=int_t, mode=EnumList(("MODE_ADD", "add"), ("MODE_MUL", "mul")) ) def __init__(self, axis: int, mode="add"): @@ -307,9 +307,6 @@ def __init__(self, axis: int, mode="add"): self.axis = axis self.mode = mode - @property - def c_axis(self) -> int: - return self.axis def make_node(self, x): x = ptb.as_tensor_variable(x) @@ -360,7 +357,7 @@ def c_code(self, node, name, inames, onames, sub): fail = sub["fail"] params = sub["params"] - axis_code = f"int axis = {params}->c_axis;\n" + axis_code = f"int axis = {params}->axis;\n" code = ( axis_code @@ -432,12 +429,13 @@ def cumsum(x, axis=None): .. versionadded:: 0.7 """ + x = ptb.as_tensor_variable(x) if axis is None: - return CumOp(axis=0, mode="add")(ptb.as_tensor_variable(x).ravel()) + x = x.ravel() + axis = 0 else: - x = ptb.as_tensor_variable(x) axis = normalize_axis_index(axis, x.ndim) - return CumOp(axis=axis, mode="add")(x) + return CumOp(axis=axis, mode="add")(x) def cumprod(x, axis=None): @@ -457,12 +455,13 @@ def cumprod(x, axis=None): .. versionadded:: 0.7 """ + x = ptb.as_tensor_variable(x) if axis is None: - return CumOp(axis=0, mode="mul")(ptb.as_tensor_variable(x).ravel()) + x = x.ravel() + axis = 0 else: - x = ptb.as_tensor_variable(x) axis = normalize_axis_index(axis, x.ndim) - return CumOp(axis=axis, mode="mul")(x) + return CumOp(axis=axis, mode="mul")(x) @_vectorize_node.register(CumOp) diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 529f6902f0..833c1b3224 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -225,7 +225,7 @@ def test_infer_shape(self): x = tensor3("x") a = np.random.random((3, 5, 2)).astype(config.floatX) - # Test axis=None using cumsum function (which now handles it symbolically) + # Test default axis=None self._compile_and_check([x], [cumsum(x)], [a], self.op_class) for axis in range(-len(a.shape), len(a.shape)): @@ -234,7 +234,7 @@ def test_infer_shape(self): def test_grad(self): a = np.random.random((3, 5, 2)).astype(config.floatX) - # Test axis=None using cumsum/cumprod functions (which now handle it symbolically) + # Test default axis=None using cumsum/cumprod functions utt.verify_grad(lambda x: cumsum(x), [a]) # Test axis=None for cumsum utt.verify_grad(lambda x: cumprod(x), [a]) # Test axis=None for cumprod From 90e11bbe35995da7cfb29f00eab276a05b6583b6 Mon Sep 17 00:00:00 2001 From: Ricardo Vieira <28983449+ricardoV94@users.noreply.github.com> Date: Sat, 9 Aug 2025 15:38:57 +0200 Subject: [PATCH 5/7] Apply suggestions from code review --- tests/tensor/test_extra_ops.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 833c1b3224..bd8eb1ce73 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -194,7 +194,7 @@ class TestCumOp(utt.InferShapeTester): def setup_method(self): super().setup_method() self.op_class = CumOp - self.op = CumOp(axis=0) # Use a specific axis since None is no longer supported + self.op = CumOp(axis=0) def test_cum_op(self): x = tensor3("x") From 5bb857e8160d2826793cef41b0cc0c26064cc80e Mon Sep 17 00:00:00 2001 From: ricardoV94 Date: Sat, 9 Aug 2025 16:10:52 +0200 Subject: [PATCH 6/7] Cleanup C-code --- pytensor/tensor/extra_ops.py | 78 ++++++++++++++---------------------- 1 file changed, 31 insertions(+), 47 deletions(-) diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index e884630764..4bed73e25a 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -1,5 +1,6 @@ import warnings from collections.abc import Collection, Iterable +from textwrap import dedent import numpy as np @@ -20,7 +21,6 @@ from pytensor.npy_2_compat import ( normalize_axis_index, npy_2_compat_header, - numpy_axis_is_none_flag, old_np_unique, ) from pytensor.raise_op import Assert @@ -28,7 +28,7 @@ from pytensor.scalar import upcast from pytensor.tensor import TensorLike, as_tensor_variable from pytensor.tensor import basic as ptb -from pytensor.tensor.basic import alloc, join, second, flatten +from pytensor.tensor.basic import alloc, join, second from pytensor.tensor.exceptions import NotScalarConstantError from pytensor.tensor.math import abs as pt_abs from pytensor.tensor.math import all as pt_all @@ -48,7 +48,7 @@ from pytensor.tensor.math import sum as pt_sum from pytensor.tensor.shape import Shape_i from pytensor.tensor.subtensor import advanced_inc_subtensor1, set_subtensor -from pytensor.tensor.type import TensorType, dvector, int_dtypes, integer_dtypes, vector +from pytensor.tensor.type import TensorType, dvector, int_dtypes, integer_dtypes from pytensor.tensor.utils import normalize_reduce_axis from pytensor.tensor.variable import TensorVariable from pytensor.utils import LOCAL_BITWIDTH, PYTHON_INT_BITWIDTH @@ -307,7 +307,6 @@ def __init__(self, axis: int, mode="add"): self.axis = axis self.mode = mode - def make_node(self, x): x = ptb.as_tensor_variable(x) out_type = x.type() @@ -325,7 +324,7 @@ def perform(self, node, inputs, output_storage): else: z[0] = np.cumprod(x, axis=self.axis) - def grad(self, inputs, output_gradients): + def L_op(self, inputs, outputs, output_gradients): (x,) = inputs (gi,) = output_gradients @@ -357,58 +356,43 @@ def c_code(self, node, name, inames, onames, sub): fail = sub["fail"] params = sub["params"] - axis_code = f"int axis = {params}->axis;\n" - - code = ( - axis_code - + f""" - #undef NPY_UF_DBG_TRACING - #define NPY_UF_DBG_TRACING 1 - - if (axis == 0 && PyArray_NDIM({x}) == 1) - axis = NPY_RAVEL_AXIS; - npy_intp shape[1] = {{ PyArray_SIZE({x}) }}; - if(axis == NPY_RAVEL_AXIS && !({z} && PyArray_DIMS({z})[0] == shape[0])) - {{ - Py_XDECREF({z}); - {z} = (PyArrayObject*) PyArray_SimpleNew(1, shape, PyArray_TYPE({x})); - }} + return dedent( + f""" + int axis = {params}->axis; - else if(axis != NPY_RAVEL_AXIS && !({z} && PyArray_CompareLists(PyArray_DIMS({z}), PyArray_DIMS({x}), PyArray_NDIM({x})))) - {{ - Py_XDECREF({z}); - {z} = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM({x}), PyArray_DIMS({x}), PyArray_TYPE({x})); - }} + if (!({z} && PyArray_CompareLists(PyArray_DIMS({z}), PyArray_DIMS({x}), PyArray_NDIM({x})))) + {{ + Py_XDECREF({z}); + {z} = (PyArrayObject*) PyArray_SimpleNew(PyArray_NDIM({x}), PyArray_DIMS({x}), PyArray_TYPE({x})); + if (!{z}){{ {fail} }}; + }} + + {{ - if (!{z}) + PyObject * t = NULL; + if({params}->mode == MODE_ADD) + t = PyArray_CumSum({x}, axis, PyArray_TYPE({x}), {z}); + else if({params}->mode == MODE_MUL) + t = PyArray_CumProd({x}, axis, PyArray_TYPE({x}), {z}); + + if (!t){{ {fail}; - {{ - - PyObject * t = NULL; - if({params}->mode == MODE_ADD) - t = PyArray_CumSum( - {x}, axis, - PyArray_TYPE({x}), {z}); - else if({params}->mode == MODE_MUL) - t = PyArray_CumProd( - {x}, axis, - PyArray_TYPE({x}), {z}); - - if (!t){{ - {fail}; - }} - // Because PyArray_CumSum/CumProd returns a newly created reference on t. - Py_XDECREF(t); }} + + // Because PyArray_CumSum/CumProd returns a newly created reference on t. + Py_XDECREF(t); + }} """ ) - return code - def c_code_cache_version(self): - return (9,) + return (10,) def __str__(self): + if self.mode == "add": + return f"Cumsum{{axis={self.axis}}}" + elif self.mode == "mul": + return f"Cumprod{{axis={self.axis}}}" return f"{self.__class__.__name__}{{{self.axis}, {self.mode}}}" From 5a2af98bfb0c201776ac0876dc301c4c4822661d Mon Sep 17 00:00:00 2001 From: ricardoV94 Date: Sat, 9 Aug 2025 16:13:11 +0200 Subject: [PATCH 7/7] pre-commit --- pytensor/link/numba/dispatch/extra_ops.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/pytensor/link/numba/dispatch/extra_ops.py b/pytensor/link/numba/dispatch/extra_ops.py index 22e1dcf85c..318683ddab 100644 --- a/pytensor/link/numba/dispatch/extra_ops.py +++ b/pytensor/link/numba/dispatch/extra_ops.py @@ -37,8 +37,6 @@ def numba_funcify_CumOp(op: CumOp, node: Apply, **kwargs): mode = op.mode ndim = cast(TensorVariable, node.outputs[0]).ndim - - reaxis_first = (axis, *(i for i in range(ndim) if i != axis)) reaxis_first_inv = tuple(np.argsort(reaxis_first))