diff --git a/bayesflow/simulators/__init__.py b/bayesflow/simulators/__init__.py index 7f808188b..26b3ac7dc 100644 --- a/bayesflow/simulators/__init__.py +++ b/bayesflow/simulators/__init__.py @@ -12,8 +12,16 @@ from .simulator import Simulator from .benchmark_simulators import ( + BernoulliGLM, + BernoulliGLMRaw, + GaussianLinear, + GaussianLinearUniform, + GaussianMixture, + InverseKinematics, LotkaVolterra, SIR, + SLCP, + SLCPDistractors, TwoMoons, ) diff --git a/bayesflow/simulators/benchmark_simulators/__init__.py b/bayesflow/simulators/benchmark_simulators/__init__.py index f11c3f1e6..6cd957388 100644 --- a/bayesflow/simulators/benchmark_simulators/__init__.py +++ b/bayesflow/simulators/benchmark_simulators/__init__.py @@ -1,3 +1,11 @@ +from .bernoulli_glm import BernoulliGLM +from .bernoulli_glm_raw import BernoulliGLMRaw +from .gaussian_linear import GaussianLinear +from .gaussian_linear_uniform import GaussianLinearUniform +from .gaussian_mixture import GaussianMixture +from .inverse_kinematics import InverseKinematics from .lotka_volterra import LotkaVolterra from .sir import SIR +from .slcp import SLCP +from .slcp_distractors import SLCPDistractors from .two_moons import TwoMoons diff --git a/bayesflow/simulators/benchmark_simulators/gaussian_linear.py b/bayesflow/simulators/benchmark_simulators/gaussian_linear.py index 822bf5a3c..e6a740df1 100644 --- a/bayesflow/simulators/benchmark_simulators/gaussian_linear.py +++ b/bayesflow/simulators/benchmark_simulators/gaussian_linear.py @@ -75,5 +75,10 @@ def observation_model(self, params: np.ndarray): # Generate prior predictive samples, possibly a single if n_obs is None if self.n_obs is None: return self.rng.normal(loc=params, scale=self.obs_scale) - x = self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0], params.shape[1])) - return np.transpose(x, (1, 0, 2)) + if params.ndim == 2: + # batched sampling with n_obs + x = self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0], params.shape[1])) + return np.transpose(x, (1, 0, 2)) + elif params.ndim == 1: + # non-batched sampling with n_obs + return self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0])) diff --git a/bayesflow/simulators/benchmark_simulators/gaussian_linear_uniform.py b/bayesflow/simulators/benchmark_simulators/gaussian_linear_uniform.py index d7a67c407..9d3533a64 100644 --- a/bayesflow/simulators/benchmark_simulators/gaussian_linear_uniform.py +++ b/bayesflow/simulators/benchmark_simulators/gaussian_linear_uniform.py @@ -79,5 +79,10 @@ def observation_model(self, params: np.ndarray): # Generate prior predictive samples, possibly a single if n_obs is None if self.n_obs is None: return self.rng.normal(loc=params, scale=self.obs_scale) - x = self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0], params.shape[1])) - return np.transpose(x, (1, 0, 2)) + if params.ndim == 2: + # batched sampling with n_obs + x = self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0], params.shape[1])) + return np.transpose(x, (1, 0, 2)) + elif params.ndim == 1: + # non-batched sampling with n_obs + return self.rng.normal(loc=params, scale=self.obs_scale, size=(self.n_obs, params.shape[0])) diff --git a/tests/test_simulators/conftest.py b/tests/test_simulators/conftest.py index 15b26100d..29d7eaf15 100644 --- a/tests/test_simulators/conftest.py +++ b/tests/test_simulators/conftest.py @@ -22,6 +22,97 @@ def use_squeezed(request): return request.param +@pytest.fixture() +def bernoulli_glm(): + from bayesflow.simulators import BernoulliGLM + + return BernoulliGLM() + + +@pytest.fixture() +def bernoulli_glm_raw(): + from bayesflow.simulators import BernoulliGLMRaw + + return BernoulliGLMRaw() + + +@pytest.fixture() +def gaussian_linear(): + from bayesflow.simulators import GaussianLinear + + return GaussianLinear() + + +@pytest.fixture() +def gaussian_linear_n_obs(): + from bayesflow.simulators import GaussianLinear + + return GaussianLinear(n_obs=5) + + +@pytest.fixture() +def gaussian_linear_uniform(): + from bayesflow.simulators import GaussianLinearUniform + + return GaussianLinearUniform() + + +@pytest.fixture() +def gaussian_linear_uniform_n_obs(): + from bayesflow.simulators import GaussianLinearUniform + + return GaussianLinearUniform(n_obs=5) + + +@pytest.fixture( + params=["gaussian_linear", "gaussian_linear_n_obs", "gaussian_linear_uniform", "gaussian_linear_uniform_n_obs"] +) +def gaussian_linear_simulator(request): + return request.getfixturevalue(request.param) + + +@pytest.fixture() +def gaussian_mixture(): + from bayesflow.simulators import GaussianMixture + + return GaussianMixture() + + +@pytest.fixture() +def inverse_kinematics(): + from bayesflow.simulators import InverseKinematics + + return InverseKinematics() + + +@pytest.fixture() +def lotka_volterra(): + from bayesflow.simulators import LotkaVolterra + + return LotkaVolterra() + + +@pytest.fixture() +def sir(): + from bayesflow.simulators import SIR + + return SIR() + + +@pytest.fixture() +def slcp(): + from bayesflow.simulators import SLCP + + return SLCP() + + +@pytest.fixture() +def slcp_distractors(): + from bayesflow.simulators import SLCPDistractors + + return SLCPDistractors() + + @pytest.fixture() def composite_two_moons(): from bayesflow.simulators import make_simulator @@ -40,13 +131,40 @@ def observables(parameters): return make_simulator([parameters, observables]) -@pytest.fixture(params=["composite_two_moons", "two_moons"]) -def simulator(request): - return request.getfixturevalue(request.param) - - @pytest.fixture() def two_moons(): from bayesflow.simulators import TwoMoons return TwoMoons() + + +@pytest.fixture( + params=[ + "composite_two_moons", + "two_moons", + ] +) +def two_moons_simulator(request): + return request.getfixturevalue(request.param) + + +@pytest.fixture( + params=[ + "bernoulli_glm", + "bernoulli_glm_raw", + "gaussian_linear", + "gaussian_linear_n_obs", + "gaussian_linear_uniform", + "gaussian_linear_uniform_n_obs", + "gaussian_mixture", + "inverse_kinematics", + "lotka_volterra", + "sir", + "slcp", + "slcp_distractors", + "composite_two_moons", + "two_moons", + ] +) +def simulator(request): + return request.getfixturevalue(request.param) diff --git a/tests/test_simulators/test_simulators.py b/tests/test_simulators/test_simulators.py index 87b642618..f95d3fef8 100644 --- a/tests/test_simulators/test_simulators.py +++ b/tests/test_simulators/test_simulators.py @@ -2,8 +2,8 @@ import numpy as np -def test_two_moons(simulator, batch_size): - samples = simulator.sample((batch_size,)) +def test_two_moons(two_moons_simulator, batch_size): + samples = two_moons_simulator.sample((batch_size,)) assert isinstance(samples, dict) assert list(samples.keys()) == ["parameters", "observables"] @@ -13,6 +13,14 @@ def test_two_moons(simulator, batch_size): assert samples["observables"].shape == (batch_size, 2) +def test_gaussian_linear(gaussian_linear_simulator, batch_size): + samples = gaussian_linear_simulator.sample((batch_size,)) + + # test n_obs respected if applicable + if hasattr(gaussian_linear_simulator, "n_obs") and isinstance(gaussian_linear_simulator.n_obs, int): + assert samples["observables"].shape[1] == gaussian_linear_simulator.n_obs + + def test_sample(simulator, batch_size): samples = simulator.sample((batch_size,))