|
24 | 24 | from pytensor.graph.op import Op |
25 | 25 | from pytensor.link.c.op import COp |
26 | 26 | from pytensor.link.c.type import generic |
27 | | -from pytensor.misc.safe_asarray import _asarray |
28 | 27 | from pytensor.sparse.type import SparseTensorType, _is_sparse |
29 | 28 | from pytensor.sparse.utils import hash_from_sparse |
30 | 29 | from pytensor.tensor import basic as ptb |
@@ -595,11 +594,11 @@ def perform(self, node, inputs, out): |
595 | 594 | (csm,) = inputs |
596 | 595 | out[0][0] = csm.data |
597 | 596 | if str(csm.data.dtype) == "int32": |
598 | | - out[0][0] = _asarray(out[0][0], dtype="int32") |
| 597 | + out[0][0] = np.asarray(out[0][0], dtype="int32") |
599 | 598 | # backport |
600 | | - out[1][0] = _asarray(csm.indices, dtype="int32") |
601 | | - out[2][0] = _asarray(csm.indptr, dtype="int32") |
602 | | - out[3][0] = _asarray(csm.shape, dtype="int32") |
| 599 | + out[1][0] = np.asarray(csm.indices, dtype="int32") |
| 600 | + out[2][0] = np.asarray(csm.indptr, dtype="int32") |
| 601 | + out[3][0] = np.asarray(csm.shape, dtype="int32") |
603 | 602 |
|
604 | 603 | def grad(self, inputs, g): |
605 | 604 | # g[1:] is all integers, so their Jacobian in this op |
@@ -698,17 +697,17 @@ def make_node(self, data, indices, indptr, shape): |
698 | 697 |
|
699 | 698 | if not isinstance(indices, Variable): |
700 | 699 | indices_ = np.asarray(indices) |
701 | | - indices_32 = _asarray(indices, dtype="int32") |
| 700 | + indices_32 = np.asarray(indices, dtype="int32") |
702 | 701 | assert (indices_ == indices_32).all() |
703 | 702 | indices = indices_32 |
704 | 703 | if not isinstance(indptr, Variable): |
705 | 704 | indptr_ = np.asarray(indptr) |
706 | | - indptr_32 = _asarray(indptr, dtype="int32") |
| 705 | + indptr_32 = np.asarray(indptr, dtype="int32") |
707 | 706 | assert (indptr_ == indptr_32).all() |
708 | 707 | indptr = indptr_32 |
709 | 708 | if not isinstance(shape, Variable): |
710 | 709 | shape_ = np.asarray(shape) |
711 | | - shape_32 = _asarray(shape, dtype="int32") |
| 710 | + shape_32 = np.asarray(shape, dtype="int32") |
712 | 711 | assert (shape_ == shape_32).all() |
713 | 712 | shape = shape_32 |
714 | 713 |
|
@@ -1461,7 +1460,7 @@ def perform(self, node, inputs, outputs): |
1461 | 1460 | (x, ind1, ind2) = inputs |
1462 | 1461 | (out,) = outputs |
1463 | 1462 | assert _is_sparse(x) |
1464 | | - out[0] = _asarray(x[ind1, ind2], x.dtype) |
| 1463 | + out[0] = np.asarray(x[ind1, ind2], x.dtype) |
1465 | 1464 |
|
1466 | 1465 |
|
1467 | 1466 | get_item_scalar = GetItemScalar() |
@@ -2142,7 +2141,7 @@ def perform(self, node, inputs, outputs): |
2142 | 2141 |
|
2143 | 2142 | # The asarray is needed as in some case, this return a |
2144 | 2143 | # numpy.matrixlib.defmatrix.matrix object and not an ndarray. |
2145 | | - out[0] = _asarray(x + y, dtype=node.outputs[0].type.dtype) |
| 2144 | + out[0] = np.asarray(x + y, dtype=node.outputs[0].type.dtype) |
2146 | 2145 |
|
2147 | 2146 | def grad(self, inputs, gout): |
2148 | 2147 | (x, y) = inputs |
@@ -3497,7 +3496,7 @@ def perform(self, node, inputs, outputs): |
3497 | 3496 |
|
3498 | 3497 | # The cast is needed as otherwise we hit the bug mentioned into |
3499 | 3498 | # _asarray function documentation. |
3500 | | - out[0] = _asarray(variable, str(variable.dtype)) |
| 3499 | + out[0] = np.asarray(variable, str(variable.dtype)) |
3501 | 3500 |
|
3502 | 3501 | def grad(self, inputs, gout): |
3503 | 3502 | # a is sparse, b is dense, g_out is dense |
@@ -4012,7 +4011,7 @@ def perform(self, node, inputs, out): |
4012 | 4011 | if x_is_sparse and y_is_sparse: |
4013 | 4012 | rval = rval.toarray() |
4014 | 4013 |
|
4015 | | - out[0] = _asarray(rval, dtype=node.outputs[0].dtype) |
| 4014 | + out[0] = np.asarray(rval, dtype=node.outputs[0].dtype) |
4016 | 4015 |
|
4017 | 4016 | def grad(self, inputs, gout): |
4018 | 4017 | (x, y) = inputs |
|
0 commit comments