Skip to content

Commit fbfdacb

Browse files
Armavicamichaelosthege
authored andcommitted
Fix deprecation: broadcastable -> shape
1 parent abe7cdd commit fbfdacb

File tree

7 files changed

+19
-39
lines changed

7 files changed

+19
-39
lines changed

pymc/aesaraf.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ def make_shared_replacements(point, vars, model):
528528
"""
529529
othervars = set(model.value_vars) - set(vars)
530530
return {
531-
var: aesara.shared(point[var.name], var.name + "_shared", broadcastable=var.broadcastable)
531+
var: aesara.shared(point[var.name], var.name + "_shared", shape=var.type.shape)
532532
for var in othervars
533533
}
534534

pymc/distributions/continuous.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3994,9 +3994,9 @@ def make_node(self, x, h, z):
39943994
x = at.as_tensor_variable(floatX(x))
39953995
h = at.as_tensor_variable(floatX(h))
39963996
z = at.as_tensor_variable(floatX(z))
3997-
shape = broadcast_shape(x, h, z)
3998-
broadcastable = [] if not shape else [False] * len(shape)
3999-
return Apply(self, [x, h, z], [at.TensorType(aesara.config.floatX, broadcastable)()])
3997+
bshape = broadcast_shape(x, h, z)
3998+
shape = [None] * len(bshape)
3999+
return Apply(self, [x, h, z], [at.TensorType(aesara.config.floatX, shape)()])
40004000

40014001
def perform(self, node, ins, outs):
40024002
x, h, z = ins[0], ins[1], ins[2]

pymc/distributions/multivariate.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -851,7 +851,7 @@ class PosDefMatrix(Op):
851851
def make_node(self, x):
852852
x = at.as_tensor_variable(x)
853853
assert x.ndim == 2
854-
o = TensorType(dtype="int8", broadcastable=[])()
854+
o = TensorType(dtype="int8", shape=[])()
855855
return Apply(self, [x], [o])
856856

857857
# Python implementation:

pymc/model.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -364,7 +364,7 @@ def __init__(
364364
self._extra_vars_shared = {}
365365
for var, value in extra_vars_and_values.items():
366366
shared = aesara.shared(
367-
value, var.name + "_shared__", broadcastable=[s == 1 for s in value.shape]
367+
value, var.name + "_shared__", shape=[1 if s == 1 else None for s in value.shape]
368368
)
369369
self._extra_vars_shared[var.name] = shared
370370
givens.append((var, shared))

pymc/smc/smc.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -593,7 +593,7 @@ def _logp_forw(point, out_vars, in_vars, shared):
593593
new_in_vars = []
594594
for in_var in in_vars:
595595
if in_var.dtype in discrete_types:
596-
float_var = at.TensorType("floatX", in_var.broadcastable)(in_var.name)
596+
float_var = at.TensorType("floatX", in_var.type.shape)(in_var.name)
597597
new_in_vars.append(float_var)
598598
replace_int_input[in_var] = at.round(float_var).astype(in_var.dtype)
599599
else:

pymc/tests/test_sampling.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -550,7 +550,7 @@ def test_choose_chains(n_points, tune, expected_length, expected_n_traces):
550550
@pytest.mark.xfail(condition=(aesara.config.floatX == "float32"), reason="Fails on float32")
551551
class TestNamedSampling(SeededTest):
552552
def test_shared_named(self):
553-
G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False), name="G")
553+
G_var = shared(value=np.atleast_2d(1.0), shape=(1, None), name="G")
554554

555555
with pm.Model():
556556
theta0 = pm.Normal(
@@ -567,7 +567,7 @@ def test_shared_named(self):
567567
assert np.isclose(res, 0.0)
568568

569569
def test_shared_unnamed(self):
570-
G_var = shared(value=np.atleast_2d(1.0), broadcastable=(True, False))
570+
G_var = shared(value=np.atleast_2d(1.0), shape=(1, None))
571571
with pm.Model():
572572
theta0 = pm.Normal(
573573
"theta0",

pymc/variational/updates.py

Lines changed: 10 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -275,9 +275,7 @@ def apply_momentum(updates, params=None, momentum=0.9):
275275

276276
for param in params:
277277
value = param.get_value(borrow=True)
278-
velocity = aesara.shared(
279-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
280-
)
278+
velocity = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
281279
x = momentum * velocity + updates[param]
282280
updates[velocity] = x - param
283281
updates[param] = x
@@ -390,9 +388,7 @@ def apply_nesterov_momentum(updates, params=None, momentum=0.9):
390388

391389
for param in params:
392390
value = param.get_value(borrow=True)
393-
velocity = aesara.shared(
394-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
395-
)
391+
velocity = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
396392
x = momentum * velocity + updates[param] - param
397393
updates[velocity] = x
398394
updates[param] = momentum * x + updates[param]
@@ -534,9 +530,7 @@ def adagrad(loss_or_grads=None, params=None, learning_rate=1.0, epsilon=1e-6):
534530

535531
for param, grad in zip(params, grads):
536532
value = param.get_value(borrow=True)
537-
accu = aesara.shared(
538-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
539-
)
533+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
540534
accu_new = accu + grad**2
541535
updates[accu] = accu_new
542536
updates[param] = param - (learning_rate * grad / at.sqrt(accu_new + epsilon))
@@ -662,9 +656,7 @@ def rmsprop(loss_or_grads=None, params=None, learning_rate=1.0, rho=0.9, epsilon
662656

663657
for param, grad in zip(params, grads):
664658
value = param.get_value(borrow=True)
665-
accu = aesara.shared(
666-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
667-
)
659+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
668660
accu_new = rho * accu + (one - rho) * grad**2
669661
updates[accu] = accu_new
670662
updates[param] = param - (learning_rate * grad / at.sqrt(accu_new + epsilon))
@@ -755,13 +747,9 @@ def adadelta(loss_or_grads=None, params=None, learning_rate=1.0, rho=0.95, epsil
755747
for param, grad in zip(params, grads):
756748
value = param.get_value(borrow=True)
757749
# accu: accumulate gradient magnitudes
758-
accu = aesara.shared(
759-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
760-
)
750+
accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
761751
# delta_accu: accumulate update magnitudes (recursively!)
762-
delta_accu = aesara.shared(
763-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
764-
)
752+
delta_accu = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
765753

766754
# update accu (as in rmsprop)
767755
accu_new = rho * accu + (one - rho) * grad**2
@@ -850,12 +838,8 @@ def adam(
850838

851839
for param, g_t in zip(params, all_grads):
852840
value = param.get_value(borrow=True)
853-
m_prev = aesara.shared(
854-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
855-
)
856-
v_prev = aesara.shared(
857-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
858-
)
841+
m_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
842+
v_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
859843

860844
m_t = beta1 * m_prev + (one - beta1) * g_t
861845
v_t = beta2 * v_prev + (one - beta2) * g_t**2
@@ -938,12 +922,8 @@ def adamax(
938922

939923
for param, g_t in zip(params, all_grads):
940924
value = param.get_value(borrow=True)
941-
m_prev = aesara.shared(
942-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
943-
)
944-
u_prev = aesara.shared(
945-
np.zeros(value.shape, dtype=value.dtype), broadcastable=param.broadcastable
946-
)
925+
m_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
926+
u_prev = aesara.shared(np.zeros(value.shape, dtype=value.dtype), shape=param.type.shape)
947927

948928
m_t = beta1 * m_prev + (one - beta1) * g_t
949929
u_t = at.maximum(beta2 * u_prev, abs(g_t))

0 commit comments

Comments
 (0)