Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,14 @@ def strict_float32():
yield


@pytest.fixture(scope="function", autouse=False)
def seeded_test():
np.random.seed(20160911)
@pytest.fixture(scope="function")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I rather move away from magic fixtures, just let each test create its own rng, it's not that much extra code

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, I will make required changes

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Please check the changes

def rng():
"""Provides a seeded random number generator for reproducible tests.

Uses the modern numpy Generator API instead of the legacy global
np.random.seed() approach. Each test gets an independent RNG instance.
"""
return np.random.default_rng(20160911)


@pytest.fixture
Expand Down
24 changes: 12 additions & 12 deletions tests/distributions/test_mixture.py
Original file line number Diff line number Diff line change
Expand Up @@ -783,7 +783,7 @@ def test_preventing_mixing_cont_and_discrete(self):


class TestNormalMixture:
def test_normal_mixture_sampling(self, seeded_test):
def test_normal_mixture_sampling(self, rng):
norm_w = np.array([0.75, 0.25])
norm_mu = np.array([0.0, 5.0])
norm_sigma = np.ones_like(norm_mu)
Expand Down Expand Up @@ -813,12 +813,12 @@ def test_normal_mixture_sampling(self, seeded_test):
@pytest.mark.parametrize(
"nd, ncomp", [((), 5), (1, 5), (3, 5), ((3, 3), 5), (3, 3), ((3, 3), 3)], ids=str
)
def test_normal_mixture_nd(self, seeded_test, nd, ncomp):
def test_normal_mixture_nd(self, rng, nd, ncomp):
nd = to_tuple(nd)
ncomp = int(ncomp)
comp_shape = (*nd, ncomp)
test_mus = np.random.randn(*comp_shape)
test_taus = np.random.gamma(1, 1, size=comp_shape)
test_mus = rng.standard_normal(comp_shape)
test_taus = rng.gamma(1, 1, size=comp_shape)
observed = generate_normal_mixture_data(
w=np.ones(ncomp) / ncomp, mu=test_mus, sigma=1 / np.sqrt(test_taus), size=10
)
Expand Down Expand Up @@ -860,10 +860,10 @@ def test_normal_mixture_nd(self, seeded_test, nd, ncomp):
assert_allclose(logp0, logp1)
assert_allclose(logp0, logp2)

def test_random(self, seeded_test):
def test_random(self, rng):
def ref_rand(size, w, mu, sigma):
component = np.random.choice(w.size, size=size, p=w)
return np.random.normal(mu[component], sigma[component], size=size)
component = rng.choice(w.size, size=size, p=w)
return rng.normal(mu[component], sigma[component], size=size)

continuous_random_tester(
NormalMixture,
Expand Down Expand Up @@ -1028,8 +1028,8 @@ def setup_class(cls):
cls.mixture_comps = 10

@pytest.mark.parametrize("batch_shape", [(3, 4), (20,)], ids=str)
def test_with_multinomial(self, seeded_test, batch_shape):
p = np.random.uniform(size=(*batch_shape, self.mixture_comps, 3))
def test_with_multinomial(self, rng, batch_shape):
p = rng.uniform(size=(*batch_shape, self.mixture_comps, 3))
p /= p.sum(axis=-1, keepdims=True)
n = 100 * np.ones((*batch_shape, 1))
w = np.ones(self.mixture_comps) / self.mixture_comps
Expand Down Expand Up @@ -1063,10 +1063,10 @@ def test_with_multinomial(self, seeded_test, batch_shape):
rtol,
)

def test_with_mvnormal(self, seeded_test):
def test_with_mvnormal(self, rng):
# 10 batch, 3-variate Gaussian
mu = np.random.randn(self.mixture_comps, 3)
mat = np.random.randn(3, 3)
mu = rng.standard_normal((self.mixture_comps, 3))
mat = rng.standard_normal((3, 3))
cov = mat @ mat.T
chol = np.linalg.cholesky(cov)
w = np.ones(self.mixture_comps) / self.mixture_comps
Expand Down
29 changes: 14 additions & 15 deletions tests/distributions/test_simulator.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def setup_class(self):
c = pm.Potential("c", pm.math.switch(a > 0, 0, -np.inf))
s = pm.Simulator("s", self.normal_sim, a, b, observed=self.data)

def test_one_gaussian(self, seeded_test):
def test_one_gaussian(self, rng):
assert self.count_rvs(self.SMABC_test.logp()) == 1

with self.SMABC_test:
Expand Down Expand Up @@ -102,7 +102,7 @@ def test_one_gaussian(self, seeded_test):
"float64",
],
)
def test_custom_dist_sum_stat(self, seeded_test, floatX):
def test_custom_dist_sum_stat(self, rng, floatX):
with pytensor.config.change_flags(floatX=floatX):
with pm.Model() as m:
a = pm.Normal("a", mu=0, sigma=1)
Expand All @@ -125,7 +125,7 @@ def test_custom_dist_sum_stat(self, seeded_test, floatX):
pm.sample_smc(draws=100)

@pytest.mark.parametrize("floatX", ["float32", "float64"])
def test_custom_dist_sum_stat_scalar(self, seeded_test, floatX):
def test_custom_dist_sum_stat_scalar(self, rng, floatX):
"""
Test that automatically wrapped functions cope well with scalar inputs
"""
Expand Down Expand Up @@ -156,22 +156,22 @@ def test_custom_dist_sum_stat_scalar(self, seeded_test, floatX):
)
assert self.count_rvs(m.logp()) == 1

def test_model_with_potential(self, seeded_test):
def test_model_with_potential(self, rng):
assert self.count_rvs(self.SMABC_potential.logp()) == 1

with self.SMABC_potential:
trace = pm.sample_smc(draws=100, chains=1, return_inferencedata=False)
assert np.all(trace["a"] >= 0)

def test_simulator_metropolis_mcmc(self, seeded_test):
def test_simulator_metropolis_mcmc(self, rng):
with self.SMABC_test as m:
step = pm.Metropolis([m.rvs_to_values[m["a"]], m.rvs_to_values[m["b"]]])
trace = pm.sample(step=step, return_inferencedata=False)

assert abs(self.data.mean() - trace["a"].mean()) < 0.05
assert abs(self.data.std() - trace["b"].mean()) < 0.05

def test_multiple_simulators(self, seeded_test):
def test_multiple_simulators(self, rng):
true_a = 2
true_b = -2

Expand Down Expand Up @@ -221,9 +221,8 @@ def test_multiple_simulators(self, seeded_test):
assert abs(true_a - trace["a"].mean()) < 0.05
assert abs(true_b - trace["b"].mean()) < 0.05

def test_nested_simulators(self, seeded_test):
def test_nested_simulators(self, rng):
true_a = 2
rng = np.random.RandomState(20160911)
data = rng.normal(true_a, 0.1, size=1000)

with pm.Model() as m:
Expand Down Expand Up @@ -251,7 +250,7 @@ def test_nested_simulators(self, seeded_test):

assert np.abs(true_a - trace["sim1"].mean()) < 0.1

def test_upstream_rngs_not_in_compiled_logp(self, seeded_test):
def test_upstream_rngs_not_in_compiled_logp(self, rng):
smc = IMH(model=self.SMABC_test)
smc.initialize_population()
smc._initialize_kernel()
Expand All @@ -270,7 +269,7 @@ def test_upstream_rngs_not_in_compiled_logp(self, seeded_test):
]
assert len(shared_rng_vars) == 1

def test_simulator_error_msg(self, seeded_test):
def test_simulator_error_msg(self, rng):
msg = "The distance metric not_real is not implemented"
with pytest.raises(ValueError, match=msg):
with pm.Model() as m:
Expand All @@ -287,7 +286,7 @@ def test_simulator_error_msg(self, seeded_test):
sim = pm.Simulator("sim", self.normal_sim, 0, params=(1))

@pytest.mark.xfail(reason="KL not refactored")
def test_automatic_use_of_sort(self, seeded_test):
def test_automatic_use_of_sort(self, rng):
with pm.Model() as model:
s_k = pm.Simulator(
"s_k",
Expand All @@ -299,7 +298,7 @@ def test_automatic_use_of_sort(self, seeded_test):
)
assert s_k.distribution.sum_stat is pm.distributions.simulator.identity

def test_name_is_string_type(self, seeded_test):
def test_name_is_string_type(self, rng):
with self.SMABC_potential:
assert not self.SMABC_potential.name
with warnings.catch_warnings():
Expand All @@ -310,7 +309,7 @@ def test_name_is_string_type(self, seeded_test):
trace = pm.sample_smc(draws=10, chains=1, return_inferencedata=False)
assert isinstance(trace._straces[0].name, str)

def test_named_model(self, seeded_test):
def test_named_model(self, rng):
# Named models used to fail with Simulator because the arguments to the
# random fn used to be passed by name. This is no longer true.
# https://github.com/pymc-devs/pymc/pull/4365#issuecomment-761221146
Expand All @@ -330,7 +329,7 @@ def test_named_model(self, seeded_test):
@pytest.mark.parametrize("mu", [0, np.arange(3)], ids=str)
@pytest.mark.parametrize("sigma", [1, np.array([1, 2, 5])], ids=str)
@pytest.mark.parametrize("size", [None, 3, (5, 3)], ids=str)
def test_simulator_support_point(self, seeded_test, mu, sigma, size):
def test_simulator_support_point(self, rng, mu, sigma, size):
def normal_sim(rng, mu, sigma, size):
return rng.normal(mu, sigma, size=size)

Expand Down Expand Up @@ -364,7 +363,7 @@ def normal_sim(rng, mu, sigma, size):

assert np.all(np.abs((result - expected_sample_mean) / expected_sample_mean_std) < cutoff)

def test_dist(self, seeded_test):
def test_dist(self, rng):
x = pm.Simulator.dist(self.normal_sim, 0, 1, sum_stat="sort", shape=(3,))
x = cloudpickle.loads(cloudpickle.dumps(x))

Expand Down
26 changes: 13 additions & 13 deletions tests/sampling/test_forward.py
Original file line number Diff line number Diff line change
Expand Up @@ -631,8 +631,8 @@ def test_sum_normal(self):
_, pval = stats.kstest(ppc["b"].flatten(), stats.norm(scale=scale).cdf)
assert pval > 0.001

def test_model_not_drawable_prior(self, seeded_test):
data = np.random.poisson(lam=10, size=200)
def test_model_not_drawable_prior(self, rng):
data = rng.poisson(lam=10, size=200)
model = pm.Model()
with model:
mu = pm.HalfFlat("sigma")
Expand Down Expand Up @@ -1155,8 +1155,8 @@ def point_list_arg_bug_fixture() -> tuple[pm.Model, pm.backends.base.MultiTrace]


class TestSamplePriorPredictive:
def test_ignores_observed(self, seeded_test):
observed = np.random.normal(10, 1, size=200)
def test_ignores_observed(self, rng):
observed = rng.normal(10, 1, size=200)
with pm.Model():
# Use a prior that's way off to show we're ignoring the observed variables
observed_data = pm.Data("observed_data", observed)
Expand Down Expand Up @@ -1196,9 +1196,9 @@ def test_multivariate(self):

assert trace.prior["m"].shape == (1, 10, 4)

def test_multivariate2(self, seeded_test):
def test_multivariate2(self, rng):
# Added test for issue #3271
mn_data = np.random.multinomial(n=100, pvals=[1 / 6.0] * 6, size=10)
mn_data = rng.multinomial(n=100, pvals=[1 / 6.0] * 6, size=10)
with pm.Model() as dm_model:
probs = pm.Dirichlet("probs", a=np.ones(6))
obs = pm.Multinomial("obs", n=100, p=probs, observed=mn_data)
Expand Down Expand Up @@ -1229,10 +1229,10 @@ def test_layers(self):
avg = np.stack([b_sampler() for i in range(10000)]).mean(0)
npt.assert_array_almost_equal(avg, 0.5 * np.ones((10,)), decimal=2)

def test_transformed(self, seeded_test):
def test_transformed(self, rng):
n = 18
at_bats = 45 * np.ones(n, dtype=int)
hits = np.random.randint(1, 40, size=n, dtype=int)
hits = rng.integers(1, 40, size=n, dtype=int)
draws = 50

with pm.Model() as model:
Expand All @@ -1250,9 +1250,9 @@ def test_transformed(self, seeded_test):
assert gen.prior_predictive["y"].shape == (1, draws, n)
assert "thetas" in gen.prior.data_vars

def test_shared(self, seeded_test):
def test_shared(self, rng):
n1 = 10
obs = shared(np.random.rand(n1) < 0.5)
obs = shared(rng.random(n1) < 0.5)
draws = 50

with pm.Model() as m:
Expand All @@ -1265,15 +1265,15 @@ def test_shared(self, seeded_test):
assert gen1.prior["o"].shape == (1, draws, n1)

n2 = 20
obs.set_value(np.random.rand(n2) < 0.5)
obs.set_value(rng.random(n2) < 0.5)
with m:
gen2 = pm.sample_prior_predictive(draws)

assert gen2.prior_predictive["y"].shape == (1, draws, n2)
assert gen2.prior["o"].shape == (1, draws, n2)

def test_density_dist(self, seeded_test):
obs = np.random.normal(-1, 0.1, size=10)
def test_density_dist(self, rng):
obs = rng.normal(-1, 0.1, size=10)
with pm.Model():
mu = pm.Normal("mu", 0, 1)
sigma = pm.HalfNormal("sigma", 1e-6)
Expand Down
6 changes: 3 additions & 3 deletions tests/sampling/test_mcmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -874,9 +874,9 @@ def test_float32(self):


class TestShared:
def test_sample(self, seeded_test):
x = np.random.normal(size=100)
y = x + np.random.normal(scale=1e-2, size=100)
def test_sample(self, rng):
x = rng.normal(size=100)
y = x + rng.normal(scale=1e-2, size=100)

x_pred = np.linspace(-3, 3, 200)

Expand Down
18 changes: 9 additions & 9 deletions tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ def test_deterministic(self):
pm.Normal("y", 0, 1, observed=X)
model.compile_logp()(model.initial_point())

def test_sample(self, seeded_test):
x = np.random.normal(size=100)
y = x + np.random.normal(scale=1e-2, size=100)
def test_sample(self, rng):
x = rng.normal(size=100)
y = x + rng.normal(scale=1e-2, size=100)

x_pred = np.linspace(-3, 3, 200, dtype="float32")

Expand Down Expand Up @@ -311,10 +311,10 @@ def test_model_to_graphviz_for_model_with_data_container(self, tmp_path):
pm.model_to_graphviz(model, save=tmp_path / "a_model", dpi=100)
assert path.exists(tmp_path / "a_model.png")

def test_explicit_coords(self, seeded_test):
def test_explicit_coords(self, rng):
N_rows = 5
N_cols = 7
data = np.random.uniform(size=(N_rows, N_cols))
data = rng.uniform(size=(N_rows, N_cols))
coords = {
"rows": [f"R{r + 1}" for r in range(N_rows)],
"columns": [f"C{c + 1}" for c in range(N_cols)],
Expand Down Expand Up @@ -369,10 +369,10 @@ def test_symbolic_coords(self):
assert pmodel.dim_lengths["row"].eval() == 4
assert pmodel.dim_lengths["column"].eval() == 5

def test_implicit_coords_series(self, seeded_test):
def test_implicit_coords_series(self, rng):
pd = pytest.importorskip("pandas")
ser_sales = pd.Series(
data=np.random.randint(low=0, high=30, size=22),
data=rng.integers(low=0, high=30, size=22),
index=pd.date_range(start="2020-05-01", periods=22, freq="24h", name="date"),
name="sales",
)
Expand All @@ -383,13 +383,13 @@ def test_implicit_coords_series(self, seeded_test):
assert len(pmodel.coords["date"]) == 22
assert pmodel.named_vars_to_dims == {"sales": ("date",)}

def test_implicit_coords_dataframe(self, seeded_test):
def test_implicit_coords_dataframe(self, rng):
pd = pytest.importorskip("pandas")
N_rows = 5
N_cols = 7
df_data = pd.DataFrame()
for c in range(N_cols):
df_data[f"Column {c + 1}"] = np.random.normal(size=(N_rows,))
df_data[f"Column {c + 1}"] = rng.normal(size=(N_rows,))
df_data.index.name = "rows"
df_data.columns.name = "columns"

Expand Down
2 changes: 1 addition & 1 deletion tests/variational/test_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
from pymc.variational.opvi import NotImplementedInference
from tests import models

pytestmark = pytest.mark.usefixtures("strict_float32", "seeded_test", "fail_on_warning")
pytestmark = pytest.mark.usefixtures("strict_float32", "fail_on_warning")


@pytest.mark.parametrize("score", [True, False])
Expand Down