Skip to content

Commit 4c02133

Browse files
Remove broadcastable shape arguments to TensorType
1 parent bec03eb commit 4c02133

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

75 files changed

+934
-870
lines changed

aesara/compile/debugmode.py

Lines changed: 14 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -848,17 +848,17 @@ def _get_preallocated_maps(
848848
or "ALL" in prealloc_modes
849849
):
850850
max_ndim = 0
851-
rev_out_broadcastable = []
851+
rev_out_shape = []
852852
for r in considered_outputs:
853853
if isinstance(r.type, TensorType):
854854
if max_ndim < r.ndim:
855-
rev_out_broadcastable += [True] * (r.ndim - max_ndim)
855+
rev_out_shape += [1] * (r.ndim - max_ndim)
856856
max_ndim = r.ndim
857-
assert len(rev_out_broadcastable) == max_ndim
857+
assert len(rev_out_shape) == max_ndim
858858

859-
for i, b in enumerate(r.broadcastable[::-1]):
860-
rev_out_broadcastable[i] = rev_out_broadcastable[i] and b
861-
out_broadcastable = rev_out_broadcastable[::-1]
859+
for i, s in enumerate(r.type.shape[::-1]):
860+
rev_out_shape[i] = 1 if rev_out_shape[i] == 1 and s == 1 else None
861+
out_shape = rev_out_shape[::-1]
862862

863863
if "strided" in prealloc_modes or "ALL" in prealloc_modes:
864864
check_ndim = config.DebugMode__check_preallocated_output_ndim
@@ -887,14 +887,14 @@ def _get_preallocated_maps(
887887
# Moreover, to avoid memory problems, we do not test with strides
888888
# 2 and -2 on those dimensions.
889889
step_signs_list = []
890-
for b in out_broadcastable[-check_ndim:]:
891-
if b:
890+
for s in out_shape[-check_ndim:]:
891+
if s == 1:
892892
step_signs_list.append((1,))
893893
else:
894894
step_signs_list.append((-1, 1))
895895

896896
# Use the same step on all dimensions before the last check_ndim.
897-
if all(out_broadcastable[:-check_ndim]):
897+
if all(s == 1 for s in out_shape[:-check_ndim]):
898898
step_signs_list = [(1,)] + step_signs_list
899899
else:
900900
step_signs_list = [(-1, 1)] + step_signs_list
@@ -905,7 +905,7 @@ def _get_preallocated_maps(
905905

906906
# First, the dimensions above check_ndim, then the other ones
907907
# Do not test with 2 or -2 for dimensions above check_ndim
908-
steps = [step_signs[0]] * len(out_broadcastable[:-check_ndim])
908+
steps = [step_signs[0]] * len(out_shape[:-check_ndim])
909909
steps += [s * step_size for s in step_signs[1:]]
910910

911911
name = f"strided{tuple(steps)}"
@@ -932,8 +932,8 @@ def _get_preallocated_maps(
932932

933933
if "wrong_size" in prealloc_modes or "ALL" in prealloc_modes:
934934
# For each dimension, try size-1, size, size+1
935-
for dim, b in enumerate(out_broadcastable):
936-
if b:
935+
for dim, s in enumerate(out_shape):
936+
if s == 1:
937937
# The shape has to be 1
938938
continue
939939

@@ -947,11 +947,11 @@ def _get_preallocated_maps(
947947
for r in considered_outputs:
948948
if isinstance(r.type, TensorType):
949949
r_shape_diff = shape_diff[: r.ndim]
950-
out_shape = [
950+
new_buf_shape = [
951951
max((s + sd), 0)
952952
for s, sd in zip(r_vals[r].shape, r_shape_diff)
953953
]
954-
new_buf = np.empty(out_shape, dtype=r.type.dtype)
954+
new_buf = np.empty(new_buf_shape, dtype=r.type.dtype)
955955
new_buf[...] = np.asarray(def_val).astype(r.type.dtype)
956956
wrong_size[r] = new_buf
957957

aesara/gradient.py

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1802,13 +1802,11 @@ def verify_grad(
18021802
mode=mode,
18031803
)
18041804

1805-
tensor_pt = [
1806-
aesara.tensor.type.TensorType(
1807-
aesara.tensor.as_tensor_variable(p).dtype,
1808-
aesara.tensor.as_tensor_variable(p).broadcastable,
1809-
)(name=f"input {i}")
1810-
for i, p in enumerate(pt)
1811-
]
1805+
tensor_pt = []
1806+
for i, p in enumerate(pt):
1807+
p_t = aesara.tensor.as_tensor_variable(p).type()
1808+
p_t.name = f"input {i}"
1809+
tensor_pt.append(p_t)
18121810

18131811
# fun can be either a function or an actual Op instance
18141812
o_output = fun(*tensor_pt)

aesara/link/c/params_type.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@
2929
3030
.. code-block:: python
3131
32-
params_type = ParamsType(attr1=TensorType('int32', (False, False)), attr2=ScalarType('float64'))
32+
params_type = ParamsType(attr1=TensorType('int32', shape=(None, None)), attr2=ScalarType('float64'))
3333
3434
If your op contains attributes ``attr1`` **and** ``attr2``, the default ``op.get_params()``
3535
implementation will automatically try to look for it and generate an appropriate Params object.
@@ -324,7 +324,7 @@ class ParamsType(CType):
324324
`ParamsType` constructor takes key-value args. Key will be the name of the
325325
attribute in the struct. Value is the Aesara type of this attribute,
326326
ie. an instance of (a subclass of) :class:`CType`
327-
(eg. ``TensorType('int64', (False,))``).
327+
(eg. ``TensorType('int64', (None,))``).
328328
329329
In a Python code any attribute named ``key`` will be available via::
330330

aesara/sandbox/multinomial.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,9 @@ def make_node(self, pvals, unis, n=1):
4444
odtype = pvals.dtype
4545
else:
4646
odtype = self.odtype
47-
out = at.tensor(dtype=odtype, shape=pvals.type.broadcastable)
47+
out = at.tensor(
48+
dtype=odtype, shape=tuple(1 if s == 1 else None for s in pvals.type.shape)
49+
)
4850
return Apply(self, [pvals, unis, as_scalar(n)], [out])
4951

5052
def grad(self, ins, outgrads):

aesara/sandbox/rng_mrg.py

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -379,20 +379,23 @@ def make_node(self, rstate, size):
379379
# this op should not be called directly.
380380
#
381381
# call through MRG_RandomStream instead.
382-
broad = []
382+
out_shape = ()
383383
for i in range(self.output_type.ndim):
384-
broad.append(at.extract_constant(size[i]) == 1)
385-
output_type = self.output_type.clone(shape=broad)()
384+
if at.extract_constant(size[i]) == 1:
385+
out_shape += (1,)
386+
else:
387+
out_shape += (None,)
388+
output_var = self.output_type.clone(shape=out_shape)()
386389
rstate = as_tensor_variable(rstate)
387390
size = as_tensor_variable(size)
388-
return Apply(self, [rstate, size], [rstate.type(), output_type])
391+
return Apply(self, [rstate, size], [rstate.type(), output_var])
389392

390393
@classmethod
391394
def new(cls, rstate, ndim, dtype, size):
392395
v_size = as_tensor_variable(size)
393396
if ndim is None:
394397
ndim = get_vector_length(v_size)
395-
op = cls(TensorType(dtype, (False,) * ndim))
398+
op = cls(TensorType(dtype, shape=(None,) * ndim))
396399
return op(rstate, v_size)
397400

398401
def perform(self, node, inp, out, params):

aesara/sparse/basic.py

Lines changed: 33 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -592,7 +592,7 @@ def make_node(self, csm):
592592

593593
csm = as_sparse_variable(csm)
594594
assert csm.format in ("csr", "csc")
595-
data = TensorType(dtype=csm.type.dtype, shape=(False,))()
595+
data = TensorType(dtype=csm.type.dtype, shape=(None,))()
596596
return Apply(self, [csm], [data, ivector(), ivector(), ivector()])
597597

598598
def perform(self, node, inputs, out):
@@ -994,7 +994,7 @@ def make_node(self, x):
994994
return Apply(
995995
self,
996996
[x],
997-
[TensorType(dtype=x.type.dtype, shape=(False, False))()],
997+
[TensorType(dtype=x.type.dtype, shape=(None, None))()],
998998
)
999999

10001000
def perform(self, node, inputs, outputs):
@@ -1753,11 +1753,13 @@ def __init__(self, axis=None, sparse_grad=True):
17531753
def make_node(self, x):
17541754
x = as_sparse_variable(x)
17551755
assert x.format in ("csr", "csc")
1756-
b = ()
1756+
17571757
if self.axis is not None:
1758-
b = (False,)
1758+
out_shape = (None,)
1759+
else:
1760+
out_shape = ()
17591761

1760-
z = TensorType(shape=b, dtype=x.dtype)()
1762+
z = TensorType(dtype=x.dtype, shape=out_shape)()
17611763
return Apply(self, [x], [z])
17621764

17631765
def perform(self, node, inputs, outputs):
@@ -1872,7 +1874,7 @@ def make_node(self, x):
18721874
"""
18731875
x = as_sparse_variable(x)
18741876
assert x.format in ("csr", "csc")
1875-
return Apply(self, [x], [tensor(shape=(False,), dtype=x.dtype)])
1877+
return Apply(self, [x], [tensor(dtype=x.dtype, shape=(None,))])
18761878

18771879
def perform(self, node, inputs, outputs):
18781880
(x,) = inputs
@@ -2138,7 +2140,7 @@ def make_node(self, x, y):
21382140
return Apply(
21392141
self,
21402142
[x, y],
2141-
[TensorType(dtype=out_dtype, shape=y.type.broadcastable)()],
2143+
[TensorType(dtype=out_dtype, shape=y.type.shape)()],
21422144
)
21432145

21442146
def perform(self, node, inputs, outputs):
@@ -2621,7 +2623,7 @@ def make_node(self, x, y):
26212623
x, y = as_sparse_variable(x), at.as_tensor_variable(y)
26222624

26232625
assert y.type.ndim == 2
2624-
out = TensorType(dtype="uint8", shape=(False, False))()
2626+
out = TensorType(dtype="uint8", shape=(None, None))()
26252627
return Apply(self, [x, y], [out])
26262628

26272629
def perform(self, node, inputs, outputs):
@@ -3462,7 +3464,7 @@ def make_node(self, a, b):
34623464
return Apply(
34633465
self,
34643466
[a, b],
3465-
[tensor(dtype_out, (False, b.type.broadcastable[1]))],
3467+
[tensor(dtype_out, shape=(None, 1 if b.type.shape[1] == 1 else None))],
34663468
)
34673469

34683470
def perform(self, node, inputs, outputs):
@@ -3593,7 +3595,7 @@ class StructuredDotGradCSC(COp):
35933595

35943596
def make_node(self, a_indices, a_indptr, b, g_ab):
35953597
return Apply(
3596-
self, [a_indices, a_indptr, b, g_ab], [tensor(g_ab.dtype, (False,))]
3598+
self, [a_indices, a_indptr, b, g_ab], [tensor(g_ab.dtype, shape=(None,))]
35973599
)
35983600

35993601
def perform(self, node, inputs, outputs):
@@ -3726,7 +3728,9 @@ class StructuredDotGradCSR(COp):
37263728
__props__ = ()
37273729

37283730
def make_node(self, a_indices, a_indptr, b, g_ab):
3729-
return Apply(self, [a_indices, a_indptr, b, g_ab], [tensor(b.dtype, (False,))])
3731+
return Apply(
3732+
self, [a_indices, a_indptr, b, g_ab], [tensor(b.dtype, shape=(None,))]
3733+
)
37303734

37313735
def perform(self, node, inputs, outputs):
37323736
(a_indices, a_indptr, b, g_ab) = inputs
@@ -3967,6 +3971,7 @@ def make_node(self, x, y):
39673971
x = as_sparse_variable(x)
39683972
if isinstance(y, scipy.sparse.spmatrix):
39693973
y = as_sparse_variable(y)
3974+
39703975
x_is_sparse_var = _is_sparse_variable(x)
39713976
y_is_sparse_var = _is_sparse_variable(y)
39723977

@@ -3978,34 +3983,35 @@ def make_node(self, x, y):
39783983
)
39793984

39803985
if x_is_sparse_var:
3981-
broadcast_x = (False,) * x.ndim
3986+
shape_x = (None,) * x.type.ndim
39823987
else:
39833988
x = at.as_tensor_variable(x)
3984-
broadcast_x = x.type.broadcastable
3989+
shape_x = x.type.shape
39853990
assert y.format in ("csr", "csc")
39863991
if x.ndim not in (1, 2):
39873992
raise TypeError(
39883993
"Input 0 (0-indexed) must have ndim of "
3989-
f"1 or 2, {int(x.ndim)} given."
3994+
f"1 or 2, {int(x.type.ndim)} given."
39903995
)
39913996

39923997
if y_is_sparse_var:
3993-
broadcast_y = (False,) * y.ndim
3998+
shape_y = (None,) * y.type.ndim
39943999
else:
39954000
y = at.as_tensor_variable(y)
3996-
broadcast_y = y.type.broadcastable
4001+
shape_y = y.type.shape
39974002
assert x.format in ("csr", "csc")
39984003
if y.ndim not in (1, 2):
39994004
raise TypeError(
40004005
"Input 1 (1-indexed) must have ndim of "
4001-
f"1 or 2, {int(y.ndim)} given."
4006+
f"1 or 2, {int(y.type.ndim)} given."
40024007
)
40034008

4004-
if len(broadcast_y) == 2:
4005-
broadcast_out = broadcast_x[:-1] + broadcast_y[1:]
4006-
elif len(broadcast_y) == 1:
4007-
broadcast_out = broadcast_x[:-1]
4008-
return Apply(self, [x, y], [tensor(dtype=dtype_out, shape=broadcast_out)])
4009+
if len(shape_y) == 2:
4010+
shape_out = shape_x[:-1] + shape_y[1:]
4011+
elif len(shape_y) == 1:
4012+
shape_out = shape_x[:-1]
4013+
4014+
return Apply(self, [x, y], [tensor(dtype=dtype_out, shape=shape_out)])
40094015

40104016
def perform(self, node, inputs, out):
40114017
x, y = inputs
@@ -4126,21 +4132,21 @@ def make_node(self, alpha, x, y, z):
41264132
alpha = at.as_tensor_variable(alpha)
41274133
z = at.as_tensor_variable(z)
41284134

4129-
assert z.ndim == 2
4130-
assert alpha.type.broadcastable == (True,) * alpha.ndim
4135+
assert z.type.ndim == 2
4136+
assert alpha.type.shape == (1,) * alpha.type.ndim
41314137
if not _is_sparse_variable(x):
41324138
x = at.as_tensor_variable(x)
41334139
assert y.format in ("csr", "csc")
4134-
assert x.ndim == 2
4140+
assert x.type.ndim == 2
41354141
if not _is_sparse_variable(y):
41364142
y = at.as_tensor_variable(y)
41374143
assert x.format in ("csr", "csc")
4138-
assert y.ndim == 2
4144+
assert y.type.ndim == 2
41394145

41404146
return Apply(
41414147
self,
41424148
[alpha, x, y, z],
4143-
[tensor(dtype=dtype_out, shape=(False, False))],
4149+
[tensor(dtype=dtype_out, shape=(None, None))],
41444150
)
41454151

41464152
def perform(self, node, inputs, outputs):

0 commit comments

Comments
 (0)