diff --git a/docs/source/api/pytensorf.rst b/docs/source/api/pytensorf.rst index 71e718d69..0c4609503 100644 --- a/docs/source/api/pytensorf.rst +++ b/docs/source/api/pytensorf.rst @@ -6,7 +6,7 @@ PyTensor utils .. autosummary:: :toctree: generated/ - compile_pymc + compile gradient hessian hessian_diag @@ -19,6 +19,4 @@ PyTensor utils CallableTensor join_nonshared_inputs make_shared_replacements - generator - convert_generator_data convert_data diff --git a/pymc/backends/base.py b/pymc/backends/base.py index fe05b8e5c..5a2a043a3 100644 --- a/pymc/backends/base.py +++ b/pymc/backends/base.py @@ -34,7 +34,7 @@ from pymc.backends.report import SamplerReport from pymc.model import modelcontext -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile from pymc.util import get_var_name logger = logging.getLogger(__name__) @@ -171,7 +171,7 @@ def __init__( if fn is None: # borrow=True avoids deepcopy when inputs=output which is the case for untransformed value variables - fn = compile_pymc( + fn = compile( inputs=[pytensor.In(v, borrow=True) for v in model.value_vars], outputs=[pytensor.Out(v, borrow=True) for v in vars], on_unused_input="ignore", diff --git a/pymc/func_utils.py b/pymc/func_utils.py index 21492a34e..72dc3b1a9 100644 --- a/pymc/func_utils.py +++ b/pymc/func_utils.py @@ -169,18 +169,18 @@ def find_constrained_prior( ) target = (pt.exp(logcdf_lower) - mass_below_lower) ** 2 - target_fn = pm.pytensorf.compile_pymc([dist_params], target, allow_input_downcast=True) + target_fn = pm.pytensorf.compile([dist_params], target, allow_input_downcast=True) constraint = pt.exp(logcdf_upper) - pt.exp(logcdf_lower) - constraint_fn = pm.pytensorf.compile_pymc([dist_params], constraint, allow_input_downcast=True) + constraint_fn = pm.pytensorf.compile([dist_params], constraint, allow_input_downcast=True) jac: str | Callable constraint_jac: str | Callable try: pytensor_jac = pm.gradient(target, [dist_params]) - jac = pm.pytensorf.compile_pymc([dist_params], pytensor_jac, allow_input_downcast=True) + jac = pm.pytensorf.compile([dist_params], pytensor_jac, allow_input_downcast=True) pytensor_constraint_jac = pm.gradient(constraint, [dist_params]) - constraint_jac = pm.pytensorf.compile_pymc( + constraint_jac = pm.pytensorf.compile( [dist_params], pytensor_constraint_jac, allow_input_downcast=True ) # when PyMC cannot compute the gradient diff --git a/pymc/gp/util.py b/pymc/gp/util.py index b2d7447b1..3aaf85ab5 100644 --- a/pymc/gp/util.py +++ b/pymc/gp/util.py @@ -23,7 +23,7 @@ from scipy.cluster.vq import kmeans from pymc.model.core import modelcontext -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile JITTER_DEFAULT = 1e-6 @@ -55,7 +55,7 @@ def replace_with_values(vars_needed, replacements=None, model=None): if len(inputs) == 0: return tuple(v.eval() for v in vars_needed) - fn = compile_pymc( + fn = compile( inputs, vars_needed, allow_input_downcast=True, diff --git a/pymc/initial_point.py b/pymc/initial_point.py index 0cb1a26dd..241409f68 100644 --- a/pymc/initial_point.py +++ b/pymc/initial_point.py @@ -26,7 +26,7 @@ from pymc.logprob.transforms import Transform from pymc.pytensorf import ( - compile_pymc, + compile, find_rng_nodes, replace_rng_nodes, reseed_rngs, @@ -157,7 +157,7 @@ def make_initial_point_fn( # Replace original rng shared variables so that we don't mess with them # when calling the final seeded function initial_values = replace_rng_nodes(initial_values) - func = compile_pymc(inputs=[], outputs=initial_values, mode=pytensor.compile.mode.FAST_COMPILE) + func = compile(inputs=[], outputs=initial_values, mode=pytensor.compile.mode.FAST_COMPILE) varnames = [] for var in model.free_RVs: diff --git a/pymc/model/core.py b/pymc/model/core.py index 782948b7e..99711e566 100644 --- a/pymc/model/core.py +++ b/pymc/model/core.py @@ -56,7 +56,7 @@ from pymc.pytensorf import ( PointFunc, SeedSequenceSeed, - compile_pymc, + compile, convert_observed_data, gradient, hessian, @@ -253,7 +253,7 @@ def __init__( ) inputs = grad_vars - self._pytensor_function = compile_pymc(inputs, outputs, givens=givens, **kwargs) + self._pytensor_function = compile(inputs, outputs, givens=givens, **kwargs) self._raveled_inputs = ravel_inputs def set_weights(self, values): @@ -1637,7 +1637,7 @@ def compile_fn( inputs = inputvars(outs) with self: - fn = compile_pymc( + fn = compile( inputs, outs, allow_input_downcast=True, diff --git a/pymc/pytensorf.py b/pymc/pytensorf.py index 09a603402..6fd44b038 100644 --- a/pymc/pytensorf.py +++ b/pymc/pytensorf.py @@ -60,6 +60,7 @@ __all__ = [ "CallableTensor", + "compile", "compile_pymc", "cont_inputs", "convert_data", @@ -981,7 +982,7 @@ def find_default_update(clients, rng: Variable) -> None | Variable: return rng_updates -def compile_pymc( +def compile( inputs, outputs, random_seed: SeedSequenceSeed = None, @@ -990,7 +991,7 @@ def compile_pymc( ) -> Function: """Use ``pytensor.function`` with specialized pymc rewrites always enabled. - This function also ensures shared RandomState/Generator used by RandomVariables + This function also ensures shared Generator used by RandomVariables in the graph are updated across calls, to ensure independent draws. Parameters @@ -1061,6 +1062,14 @@ def compile_pymc( return pytensor_function +def compile_pymc(*args, **kwargs): + warnings.warn( + "compile_pymc was renamed to compile. Old name will be removed in a future release of PyMC", + FutureWarning, + ) + return compile(*args, **kwargs) + + def constant_fold( xs: Sequence[TensorVariable], raise_not_constant: bool = True ) -> tuple[np.ndarray | Variable, ...]: diff --git a/pymc/sampling/forward.py b/pymc/sampling/forward.py index b3015e269..c07683555 100644 --- a/pymc/sampling/forward.py +++ b/pymc/sampling/forward.py @@ -51,7 +51,7 @@ from pymc.backends.base import MultiTrace from pymc.blocking import PointType from pymc.model import Model, modelcontext -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile from pymc.util import ( CustomProgress, RandomState, @@ -273,7 +273,7 @@ def expand(node): ] return ( - compile_pymc(inputs, fg.outputs, givens=givens, on_unused_input="ignore", **kwargs), + compile(inputs, fg.outputs, givens=givens, on_unused_input="ignore", **kwargs), set(basic_rvs) & (volatile_nodes - set(givens_dict)), # Basic RVs that will be resampled ) @@ -329,7 +329,7 @@ def draw( if random_seed is not None: (random_seed,) = _get_seeds_per_chain(random_seed, 1) - draw_fn = compile_pymc(inputs=[], outputs=vars, random_seed=random_seed, **kwargs) + draw_fn = compile(inputs=[], outputs=vars, random_seed=random_seed, **kwargs) if draws == 1: return draw_fn() diff --git a/pymc/smc/kernels.py b/pymc/smc/kernels.py index 608454ef3..db1b0cf5b 100644 --- a/pymc/smc/kernels.py +++ b/pymc/smc/kernels.py @@ -30,7 +30,7 @@ from pymc.initial_point import make_initial_point_expression from pymc.model import Point, modelcontext from pymc.pytensorf import ( - compile_pymc, + compile, floatX, join_nonshared_inputs, make_shared_replacements, @@ -636,6 +636,6 @@ def _logp_forw(point, out_vars, in_vars, shared): out_list, inarray0 = join_nonshared_inputs( point=point, outputs=out_vars, inputs=in_vars, shared_inputs=shared ) - f = compile_pymc([inarray0], out_list[0]) + f = compile([inarray0], out_list[0]) f.trust_input = True return f diff --git a/pymc/step_methods/metropolis.py b/pymc/step_methods/metropolis.py index a94458412..64455c893 100644 --- a/pymc/step_methods/metropolis.py +++ b/pymc/step_methods/metropolis.py @@ -31,7 +31,7 @@ from pymc.initial_point import PointType from pymc.pytensorf import ( CallableTensor, - compile_pymc, + compile, floatX, join_nonshared_inputs, replace_rng_nodes, @@ -1241,6 +1241,6 @@ def delta_logp( if compile_kwargs is None: compile_kwargs = {} - f = compile_pymc([inarray1, inarray0], logp1 - logp0, **compile_kwargs) + f = compile([inarray1, inarray0], logp1 - logp0, **compile_kwargs) f.trust_input = True return f diff --git a/pymc/step_methods/slicer.py b/pymc/step_methods/slicer.py index b84674390..73574c025 100644 --- a/pymc/step_methods/slicer.py +++ b/pymc/step_methods/slicer.py @@ -20,7 +20,7 @@ from pymc.blocking import RaveledVars, StatsType from pymc.initial_point import PointType from pymc.model import modelcontext -from pymc.pytensorf import compile_pymc, join_nonshared_inputs, make_shared_replacements +from pymc.pytensorf import compile, join_nonshared_inputs, make_shared_replacements from pymc.step_methods.arraystep import ArrayStepShared from pymc.step_methods.compound import Competence, StepMethodState from pymc.step_methods.state import dataclass_state @@ -109,7 +109,7 @@ def __init__( ) if compile_kwargs is None: compile_kwargs = {} - self.logp = compile_pymc([raveled_inp], logp, **compile_kwargs) + self.logp = compile([raveled_inp], logp, **compile_kwargs) self.logp.trust_input = True super().__init__(vars, shared, blocked=blocked, rng=rng) diff --git a/pymc/testing.py b/pymc/testing.py index 3970e9125..cc7433980 100644 --- a/pymc/testing.py +++ b/pymc/testing.py @@ -43,7 +43,7 @@ local_check_parameter_to_ninf_switch, rvs_in_graph, ) -from pymc.pytensorf import compile_pymc, floatX, inputvars +from pymc.pytensorf import compile, floatX, inputvars # This mode can be used for tests where model compilations takes the bulk of the runtime # AND where we don't care about posterior numerical or sampling stability (e.g., when @@ -645,7 +645,7 @@ def check_selfconsistency_discrete_logcdf( dist_logp_fn = pytensor.function(list(inputvars(dist_logp)), dist_logp) dist_logcdf = logcdf(dist, value) - dist_logcdf_fn = compile_pymc(list(inputvars(dist_logcdf)), dist_logcdf) + dist_logcdf_fn = compile(list(inputvars(dist_logcdf)), dist_logcdf) domains = paramdomains.copy() domains["value"] = domain @@ -721,7 +721,7 @@ def continuous_random_tester( model, param_vars = build_model(dist, valuedomain, paramdomains, extra_args) model_dist = change_dist_size(model.named_vars["value"], size, expand=True) - pymc_rand = compile_pymc([], model_dist) + pymc_rand = compile([], model_dist) domains = paramdomains.copy() for point in product(domains, n_samples=100): @@ -760,7 +760,7 @@ def discrete_random_tester( model, param_vars = build_model(dist, valuedomain, paramdomains) model_dist = change_dist_size(model.named_vars["value"], size, expand=True) - pymc_rand = compile_pymc([], model_dist) + pymc_rand = compile([], model_dist) domains = paramdomains.copy() for point in product(domains, n_samples=100): diff --git a/pymc/variational/opvi.py b/pymc/variational/opvi.py index a6a924233..9829ea2c3 100644 --- a/pymc/variational/opvi.py +++ b/pymc/variational/opvi.py @@ -72,7 +72,7 @@ from pymc.model import modelcontext from pymc.pytensorf import ( SeedSequenceSeed, - compile_pymc, + compile, find_rng_nodes, identity, reseed_rngs, @@ -388,9 +388,9 @@ def step_function( ) seed = self.approx.rng.randint(2**30, dtype=np.int64) if score: - step_fn = compile_pymc([], updates.loss, updates=updates, random_seed=seed, **fn_kwargs) + step_fn = compile([], updates.loss, updates=updates, random_seed=seed, **fn_kwargs) else: - step_fn = compile_pymc([], [], updates=updates, random_seed=seed, **fn_kwargs) + step_fn = compile([], [], updates=updates, random_seed=seed, **fn_kwargs) return step_fn @pytensor.config.change_flags(compute_test_value="off") @@ -420,7 +420,7 @@ def score_function( more_replacements = {} loss = self(sc_n_mc, more_replacements=more_replacements) seed = self.approx.rng.randint(2**30, dtype=np.int64) - return compile_pymc([], loss, random_seed=seed, **fn_kwargs) + return compile([], loss, random_seed=seed, **fn_kwargs) @pytensor.config.change_flags(compute_test_value="off") def __call__(self, nmc, **kwargs): @@ -1517,7 +1517,7 @@ def sample_dict_fn(self): names = [self.model.rvs_to_values[v].name for v in self.model.free_RVs] sampled = [self.rslice(name) for name in names] sampled = self.set_size_and_deterministic(sampled, s, 0) - sample_fn = compile_pymc([s], sampled) + sample_fn = compile([s], sampled) rng_nodes = find_rng_nodes(sampled) def inner(draws=100, *, random_seed: SeedSequenceSeed = None): diff --git a/tests/distributions/test_distribution.py b/tests/distributions/test_distribution.py index 74716081b..cd45b54d4 100644 --- a/tests/distributions/test_distribution.py +++ b/tests/distributions/test_distribution.py @@ -43,7 +43,7 @@ ) from pymc.distributions.shape_utils import change_dist_size from pymc.logprob.basic import conditional_logp, logp -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile from pymc.testing import ( BaseTestDistributionRandom, I, @@ -169,7 +169,7 @@ def update(self, node): outputs=[dummy_next_rng, dummy_x], ndim_supp=0, )(rng) - fn = compile_pymc(inputs=[], outputs=x, random_seed=431) + fn = compile(inputs=[], outputs=x, random_seed=431) assert fn() != fn() # Check that custom updates are respected, by using one that's broken @@ -182,7 +182,7 @@ def update(self, node): ValueError, match="No update found for at least one RNG used in SymbolicRandomVariable Op SymbolicRVCustomUpdates", ): - compile_pymc(inputs=[], outputs=x, random_seed=431) + compile(inputs=[], outputs=x, random_seed=431) def test_recreate_with_different_rng_inputs(self): """Test that we can recreate a SymbolicRandomVariable with new RNG inputs. diff --git a/tests/distributions/test_multivariate.py b/tests/distributions/test_multivariate.py index 6503050c9..cfd50fdd7 100644 --- a/tests/distributions/test_multivariate.py +++ b/tests/distributions/test_multivariate.py @@ -45,7 +45,7 @@ from pymc.logprob.basic import logp from pymc.logprob.utils import ParameterValueError from pymc.math import kronecker -from pymc.pytensorf import compile_pymc, floatX +from pymc.pytensorf import compile, floatX from pymc.sampling.forward import draw from pymc.testing import ( BaseTestDistributionRandom, @@ -168,7 +168,7 @@ def stickbreakingweights_logpdf(): _alpha = pt.scalar() _k = pt.iscalar() _logp = logp(pm.StickBreakingWeights.dist(_alpha, _k), _value) - core_fn = compile_pymc([_value, _alpha, _k], _logp) + core_fn = compile([_value, _alpha, _k], _logp) return np.vectorize(core_fn, signature="(n),(),()->()") diff --git a/tests/distributions/test_shape_utils.py b/tests/distributions/test_shape_utils.py index 58f75e1cb..f381d6db4 100644 --- a/tests/distributions/test_shape_utils.py +++ b/tests/distributions/test_shape_utils.py @@ -326,7 +326,7 @@ def test_size_from_dims_rng_update(self): with pm.Model(coords={"x_dim": range(2)}): x = pm.Normal("x", dims=("x_dim",)) - fn = pm.pytensorf.compile_pymc([], x) + fn = pm.pytensorf.compile([], x) # Check that both function outputs (rng and draws) come from the same Apply node assert fn.maker.fgraph.outputs[0].owner is fn.maker.fgraph.outputs[1].owner @@ -341,7 +341,7 @@ def test_size_from_observed_rng_update(self): with pm.Model(): x = pm.Normal("x", observed=[0, 1]) - fn = pm.pytensorf.compile_pymc([], x) + fn = pm.pytensorf.compile([], x) # Check that both function outputs (rng and draws) come from the same Apply node assert fn.maker.fgraph.outputs[0].owner is fn.maker.fgraph.outputs[1].owner diff --git a/tests/distributions/test_simulator.py b/tests/distributions/test_simulator.py index bddf440a1..873261386 100644 --- a/tests/distributions/test_simulator.py +++ b/tests/distributions/test_simulator.py @@ -28,7 +28,7 @@ from pymc import floatX from pymc.initial_point import make_initial_point_fn -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile from pymc.smc.kernels import IMH @@ -357,12 +357,12 @@ def test_dist(self, seeded_test): x = cloudpickle.loads(cloudpickle.dumps(x)) x_logp = pm.logp(x, [0, 1, 2]) - x_logp_fn = compile_pymc([], x_logp, random_seed=1) + x_logp_fn = compile([], x_logp, random_seed=1) res1, res2 = x_logp_fn(), x_logp_fn() assert res1.shape == (3,) assert np.all(res1 != res2) - x_logp_fn = compile_pymc([], x_logp, random_seed=1) + x_logp_fn = compile([], x_logp, random_seed=1) res3, res4 = x_logp_fn(), x_logp_fn() assert np.all(res1 == res3) assert np.all(res2 == res4) diff --git a/tests/model/test_core.py b/tests/model/test_core.py index 42094d919..2d3786637 100644 --- a/tests/model/test_core.py +++ b/tests/model/test_core.py @@ -800,10 +800,10 @@ def test_mode(self, mode): "a_interval__": model.rvs_to_transforms[a].forward(0.3, *a.owner.inputs).eval(), "b_interval__": model.rvs_to_transforms[b].forward(2.1, *b.owner.inputs).eval(), } - with patch("pymc.model.core.compile_pymc") as patched_compile_pymc: + with patch("pymc.model.core.compile") as patched_compile: model.check_start_vals(start, mode=mode) - patched_compile_pymc.assert_called_once() - assert patched_compile_pymc.call_args.kwargs["mode"] == mode + patched_compile.assert_called_once() + assert patched_compile.call_args.kwargs["mode"] == mode def test_set_initval(): diff --git a/tests/sampling/test_forward.py b/tests/sampling/test_forward.py index 2b5ce1265..404f74a96 100644 --- a/tests/sampling/test_forward.py +++ b/tests/sampling/test_forward.py @@ -33,7 +33,7 @@ import pymc as pm from pymc.backends.base import MultiTrace -from pymc.pytensorf import compile_pymc +from pymc.pytensorf import compile from pymc.sampling.forward import ( compile_forward_sampling_function, get_constant_coords, @@ -1195,7 +1195,7 @@ def test_layers(self): a = pm.Uniform("a", lower=0, upper=1, size=10) b = pm.Binomial("b", n=1, p=a, size=10) - b_sampler = compile_pymc([], b, mode="FAST_RUN", random_seed=232093) + b_sampler = compile([], b, mode="FAST_RUN", random_seed=232093) avg = np.stack([b_sampler() for i in range(10000)]).mean(0) npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2) diff --git a/tests/stats/test_log_density.py b/tests/stats/test_log_density.py index 5128913e8..00ee5d499 100644 --- a/tests/stats/test_log_density.py +++ b/tests/stats/test_log_density.py @@ -187,12 +187,12 @@ def test_compilation_kwargs(self): with ( # apply_function_over_dataset fails with patched `compile_pymc` patch("pymc.stats.log_density.apply_function_over_dataset"), - patch("pymc.model.core.compile_pymc") as patched_compile_pymc, + patch("pymc.model.core.compile") as patched_compile, ): compute_log_prior(idata, compile_kwargs={"mode": "JAX"}, extend_inferencedata=False) compute_log_likelihood( idata, compile_kwargs={"mode": "NUMBA"}, extend_inferencedata=False ) - assert len(patched_compile_pymc.call_args_list) == 2 - assert patched_compile_pymc.call_args_list[0].kwargs["mode"] == "JAX" - assert patched_compile_pymc.call_args_list[1].kwargs["mode"] == "NUMBA" + assert len(patched_compile.call_args_list) == 2 + assert patched_compile.call_args_list[0].kwargs["mode"] == "JAX" + assert patched_compile.call_args_list[1].kwargs["mode"] == "NUMBA" diff --git a/tests/test_pytensorf.py b/tests/test_pytensorf.py index 562bb49b5..f8353ce9c 100644 --- a/tests/test_pytensorf.py +++ b/tests/test_pytensorf.py @@ -39,7 +39,7 @@ from pymc.pytensorf import ( GeneratorOp, collect_default_updates, - compile_pymc, + compile, constant_fold, convert_data, convert_generator_data, @@ -348,23 +348,23 @@ def test_check_bounds_flag(self): m.check_bounds = False with m: - assert np.all(compile_pymc([], bound)() == 1) + assert np.all(compile([], bound)() == 1) m.check_bounds = True with m: - assert np.all(compile_pymc([], bound)() == -np.inf) + assert np.all(compile([], bound)() == -np.inf) def test_check_parameters_can_be_replaced_by_ninf(self): expr = pt.vector("expr", shape=(3,)) cond = pt.ge(expr, 0) final_expr = check_parameters(expr, cond, can_be_replaced_by_ninf=True) - fn = compile_pymc([expr], final_expr) + fn = compile([expr], final_expr) np.testing.assert_array_equal(fn(expr=[1, 2, 3]), [1, 2, 3]) np.testing.assert_array_equal(fn(expr=[-1, 2, 3]), [-np.inf, -np.inf, -np.inf]) final_expr = check_parameters(expr, cond, msg="test", can_be_replaced_by_ninf=False) - fn = compile_pymc([expr], final_expr) + fn = compile([expr], final_expr) np.testing.assert_array_equal(fn(expr=[1, 2, 3]), [1, 2, 3]) with pytest.raises(ParameterValueError, match="test"): fn([-1, 2, 3]) @@ -373,7 +373,7 @@ def test_compile_pymc_sets_rng_updates(self): rng = pytensor.shared(np.random.default_rng(0)) x = pm.Normal.dist(rng=rng) assert x.owner.inputs[0] is rng - f = compile_pymc([], x) + f = compile([], x) assert not np.isclose(f(), f()) # Check that update was not done inplace @@ -383,7 +383,7 @@ def test_compile_pymc_sets_rng_updates(self): def test_compile_pymc_with_updates(self): x = pytensor.shared(0) - f = compile_pymc([], x, updates={x: x + 1}) + f = compile([], x, updates={x: x + 1}) assert f() == 0 assert f() == 1 @@ -392,21 +392,21 @@ def test_compile_pymc_missing_default_explicit_updates(self): x = pm.Normal.dist(rng=rng) # By default, compile_pymc should update the rng of x - f = compile_pymc([], x) + f = compile([], x) assert f() != f() # An explicit update should override the default_update, like pytensor.function does # For testing purposes, we use an update that leaves the rng unchanged - f = compile_pymc([], x, updates={rng: rng}) + f = compile([], x, updates={rng: rng}) assert f() == f() # If we specify a custom default_update directly it should use that instead. rng.default_update = rng - f = compile_pymc([], x) + f = compile([], x) assert f() == f() # And again, it should be overridden by an explicit update - f = compile_pymc([], x, updates={rng: x.owner.outputs[0]}) + f = compile([], x, updates={rng: x.owner.outputs[0]}) assert f() != f() def test_compile_pymc_updates_inputs(self): @@ -425,7 +425,7 @@ def test_compile_pymc_updates_inputs(self): ([x, y], 1), ([x, y, z], 0), ): - fn = compile_pymc(inputs, z, on_unused_input="ignore") + fn = compile(inputs, z, on_unused_input="ignore") fn_fgraph = fn.maker.fgraph # Each RV adds a shared input for its rng assert len(fn_fgraph.inputs) == len(inputs) + rvs_in_graph @@ -452,7 +452,7 @@ def update(self, node): [dummy_rng1], pt.random.normal(rng=dummy_rng1).owner.outputs, )(rng1) - fn = compile_pymc(inputs=[], outputs=dummy_x1, random_seed=433) + fn = compile(inputs=[], outputs=dummy_x1, random_seed=433) assert fn() != fn() # Now there's a problem as there is no update rule for rng2 @@ -468,7 +468,7 @@ def update(self, node): with pytest.raises( ValueError, match="No update found for at least one RNG used in SymbolicRandomVariable" ): - compile_pymc(inputs=[], outputs=[dummy_x1, dummy_x2]) + compile(inputs=[], outputs=[dummy_x1, dummy_x2]) def test_random_seed(self): seedx = pytensor.shared(np.random.default_rng(1)) @@ -482,17 +482,17 @@ def test_random_seed(self): assert x0_eval == y0_eval # The variables will be reseeded with new seeds by default - f1 = compile_pymc([], [x, y]) + f1 = compile([], [x, y]) x1_eval, y1_eval = f1() assert x1_eval != y1_eval # Check that seeding works - f2 = compile_pymc([], [x, y], random_seed=1) + f2 = compile([], [x, y], random_seed=1) x2_eval, y2_eval = f2() assert x2_eval != x1_eval assert y2_eval != y1_eval - f3 = compile_pymc([], [x, y], random_seed=1) + f3 = compile([], [x, y], random_seed=1) x3_eval, y3_eval = f3() assert x3_eval == x2_eval assert y3_eval == y2_eval @@ -504,23 +504,23 @@ def test_multiple_updates_same_variable(self): y = pt.random.normal(1, rng=rng) # No warnings if only one variable is used - assert compile_pymc([], [x]) - assert compile_pymc([], [y]) + assert compile([], [x]) + assert compile([], [y]) user_warn_msg = "RNG Variable rng has multiple distinct clients" with pytest.warns(UserWarning, match=user_warn_msg): - f = compile_pymc([], [x, y], random_seed=456) + f = compile([], [x, y], random_seed=456) assert f() == f() # The user can provide an explicit update, but we will still issue a warning with pytest.warns(UserWarning, match=user_warn_msg): - f = compile_pymc([], [x, y], updates={rng: y.owner.outputs[0]}, random_seed=456) + f = compile([], [x, y], updates={rng: y.owner.outputs[0]}, random_seed=456) assert f() != f() # Same with default update rng.default_update = x.owner.outputs[0] with pytest.warns(UserWarning, match=user_warn_msg): - f = compile_pymc([], [x, y], updates={rng: y.owner.outputs[0]}, random_seed=456) + f = compile([], [x, y], updates={rng: y.owner.outputs[0]}, random_seed=456) assert f() != f() @pytest.mark.filterwarnings("error") # This is part of the test @@ -530,7 +530,7 @@ def test_duplicated_client_nodes(self): x = pt.random.normal(rng=rng) y = x.owner.clone().default_output() - fn = compile_pymc([], [x, y], random_seed=1) + fn = compile([], [x, y], random_seed=1) res_x1, res_y1 = fn() assert res_x1 == res_y1 res_x2, res_y2 = fn() @@ -545,7 +545,7 @@ def test_nested_updates(self): collect_default_updates(inputs=[], outputs=[x, y, z]) == {rng: next_rng3} - fn = compile_pymc([], [x, y, z], random_seed=514) + fn = compile([], [x, y, z], random_seed=514) assert not set(np.array(fn())) & set(np.array(fn())) # A local myopic rule (as PyMC used before, would not work properly) @@ -600,7 +600,7 @@ def step_wo_update(x, rng): assert collect_default_updates([ys]) == {rng: next(iter(next_rng.values()))} - fn = compile_pymc([], ys, random_seed=1) + fn = compile([], ys, random_seed=1) assert not (set(fn()) & set(fn())) def test_op_from_graph_updates(self): @@ -616,7 +616,7 @@ def test_op_from_graph_updates(self): next_rng, x = OpFromGraph([], [next_rng_, x_])() assert collect_default_updates([x]) == {rng: next_rng} - fn = compile_pymc([], x, random_seed=1) + fn = compile([], x, random_seed=1) assert not (set(fn()) & set(fn()))