From 8dc72feb9d3d516366d9f714a52fa6e9d23edd2f Mon Sep 17 00:00:00 2001 From: emekaokoli19 Date: Mon, 24 Nov 2025 17:16:04 +0100 Subject: [PATCH] fix numpy2 --- pytensor/npy_2_compat.py | 22 --------------------- pytensor/tensor/extra_ops.py | 35 +++++++++++++++++++--------------- tests/tensor/test_extra_ops.py | 17 ++++++++--------- 3 files changed, 28 insertions(+), 46 deletions(-) delete mode 100644 pytensor/npy_2_compat.py diff --git a/pytensor/npy_2_compat.py b/pytensor/npy_2_compat.py deleted file mode 100644 index 29aa805422..0000000000 --- a/pytensor/npy_2_compat.py +++ /dev/null @@ -1,22 +0,0 @@ -import numpy as np - - -# function that replicates np.unique from numpy < 2.0 -def old_np_unique( - arr, return_index=False, return_inverse=False, return_counts=False, axis=None -): - """Replicate np.unique from numpy versions < 2.0""" - if not return_inverse: - return np.unique(arr, return_index, return_inverse, return_counts, axis) - - outs = list(np.unique(arr, return_index, return_inverse, return_counts, axis)) - - inv_idx = 2 if return_index else 1 - - if axis is None: - outs[inv_idx] = np.ravel(outs[inv_idx]) - else: - inv_shape = (arr.shape[axis],) - outs[inv_idx] = outs[inv_idx].reshape(inv_shape) - - return tuple(outs) diff --git a/pytensor/tensor/extra_ops.py b/pytensor/tensor/extra_ops.py index 33a5a6b8dc..031b4c5c92 100644 --- a/pytensor/tensor/extra_ops.py +++ b/pytensor/tensor/extra_ops.py @@ -19,7 +19,6 @@ from pytensor.link.c.op import COp from pytensor.link.c.params_type import ParamsType from pytensor.link.c.type import EnumList, Generic -from pytensor.npy_2_compat import old_np_unique from pytensor.raise_op import Assert from pytensor.scalar import int64 as int_t from pytensor.scalar import upcast @@ -37,7 +36,6 @@ lt, maximum, minimum, - prod, sign, switch, ) @@ -1204,31 +1202,36 @@ def make_node(self, x): if axis is None: out_shape = (None,) else: - if axis >= x.type.ndim: - raise ValueError( - f"Axis {axis} out of range for input {x} with ndim={x.type.ndim}." - ) out_shape = tuple( None if dim == axis else s for dim, s in enumerate(x.type.shape) ) - outputs = [TensorType(dtype=x.dtype, shape=out_shape)()] - typ = TensorType(dtype="int64", shape=(None,)) + + index_type = TensorType("int64", shape=(None,)) + count_type = TensorType("int64", shape=(None,)) + + # inverse gets NumPy-2 behavior + if axis is None: + inverse_shape = x.type.shape # same as input + else: + inverse_shape = (x.type.shape[axis],) + + inverse_type = TensorType("int64", shape=inverse_shape) if self.return_index: - outputs.append(typ()) + outputs.append(index_type()) if self.return_inverse: - outputs.append(typ()) + outputs.append(inverse_type()) if self.return_counts: - outputs.append(typ()) + outputs.append(count_type()) return Apply(self, [x], outputs) def perform(self, node, inputs, output_storage): [x] = inputs - outs = old_np_unique( + outs = np.unique( x, return_index=self.return_index, return_inverse=self.return_inverse, @@ -1255,10 +1258,12 @@ def infer_shape(self, fgraph, node, i0_shapes): if self.return_inverse: return_index_out_idx = 2 if self.return_index else 1 - if self.axis is not None: - shape = (x_shape[axis],) + if self.axis is None: + # NumPy 2: inverse has same shape as input + shape = x_shape else: - shape = (prod(x_shape),) + # NumPy 2: inverse has same shape as reduction axis + shape = (x_shape[self.axis],) out_shapes[return_index_out_idx] = shape diff --git a/tests/tensor/test_extra_ops.py b/tests/tensor/test_extra_ops.py index 01de6cb517..ad440ecf34 100644 --- a/tests/tensor/test_extra_ops.py +++ b/tests/tensor/test_extra_ops.py @@ -11,7 +11,6 @@ from pytensor.graph import rewrite_graph from pytensor.graph.basic import Constant, equal_computations from pytensor.graph.traversal import applys_between -from pytensor.npy_2_compat import old_np_unique from pytensor.raise_op import Assert from pytensor.tensor import alloc from pytensor.tensor.elemwise import DimShuffle @@ -902,14 +901,14 @@ def setup_method(self): ) def test_basic_vector(self, x, inp, axis): list_outs_expected = [ - old_np_unique(inp, axis=axis), - old_np_unique(inp, True, axis=axis), - old_np_unique(inp, False, True, axis=axis), - old_np_unique(inp, True, True, axis=axis), - old_np_unique(inp, False, False, True, axis=axis), - old_np_unique(inp, True, False, True, axis=axis), - old_np_unique(inp, False, True, True, axis=axis), - old_np_unique(inp, True, True, True, axis=axis), + np.unique(inp, axis=axis), + np.unique(inp, True, axis=axis), + np.unique(inp, False, True, axis=axis), + np.unique(inp, True, True, axis=axis), + np.unique(inp, False, False, True, axis=axis), + np.unique(inp, True, False, True, axis=axis), + np.unique(inp, False, True, True, axis=axis), + np.unique(inp, True, True, True, axis=axis), ] for params, outs_expected in zip( self.op_params, list_outs_expected, strict=True