diff --git a/bayesflow/experimental/stable_consistency_model/stable_consistency_model.py b/bayesflow/experimental/stable_consistency_model/stable_consistency_model.py index 0f787d44f..b061e4fe4 100644 --- a/bayesflow/experimental/stable_consistency_model/stable_consistency_model.py +++ b/bayesflow/experimental/stable_consistency_model/stable_consistency_model.py @@ -222,9 +222,9 @@ def _inverse(self, z: Tensor, conditions: Tensor = None, **kwargs) -> Tensor: z : Tensor Samples from a standard normal distribution conditions : Tensor, optional, default: None - Conditions for a approximate conditional distribution + Conditions for an approximate conditional distribution **kwargs : dict, optional, default: {} - Additional keyword arguments. Include `steps` (default: 30) to + Additional keyword arguments. Include `steps` (default: 15) to adjust the number of sampling steps. Returns diff --git a/bayesflow/simulators/benchmark_simulators/lotka_volterra.py b/bayesflow/simulators/benchmark_simulators/lotka_volterra.py index e6b22d0f9..fd6aefbc4 100644 --- a/bayesflow/simulators/benchmark_simulators/lotka_volterra.py +++ b/bayesflow/simulators/benchmark_simulators/lotka_volterra.py @@ -10,10 +10,10 @@ def __init__( X0: int = 30, Y0: int = 1, T: int | None = 20, - subsample: int = 10, + subsample: int | str = "original", flatten: bool = True, obs_noise: float = 0.1, - dt: float = None, + dt: float = 0.1, rng: np.random.Generator = None, ): """Lotka Volterra simulated benchmark. @@ -27,14 +27,17 @@ def __init__( Initial number of predator species. T: int, optional, default: 20 The duration (time horizon) of the simulation. - subsample: int or None, optional, default: 10 + subsample: int, str or None, optional, default: 'original' The number of evenly spaced time points to return. If None, no subsampling will be performed and all T timepoints will be returned. + If 'original', the original benchmark task subsampling of 20 points is used. flatten: bool, optional, default: True A flag to indicate whether a 1D (`flatten=True`) or 2D (`flatten=False`) representation of the simulated data is returned. obs_noise: float, optional, default: 0.1 The standard deviation of the log-normal likelihood. + dt: float, optional, default: 0.1 + The time step size for the ODE solver. rng: np.random.Generator or None, optional, default: None An optional random number generator to use. """ @@ -95,21 +98,23 @@ def observation_model(self, params: np.ndarray) -> np.ndarray: # Unpack parameter vector into scalars alpha, beta, gamma, delta = params - # Prepate time vector between 0 and T of length T - t_vec = np.linspace(0, self.T, int(1 / self.dt)) + # Prepare time vector between 0 and T of length T + t_vec = np.arange(0, self.T + self.dt, self.dt) # Integrate using scipy and retain only infected (2-nd dimension) pp = odeint(self._deriv, x0, t_vec, args=(alpha, beta, gamma, delta)) # Subsample evenly the specified number of points, if specified - if self.subsample is not None: + if self.subsample == "original": + pp = pp[::21] + elif self.subsample is not None: pp = pp[:: (self.T // self.subsample)] - # Ensure minimum count is 0, which will later pass by log(0 + 1) - pp[pp < 0] = 0.0 + # Ensure minimum count is 0 + pp = np.clip(pp, a_min=1e-10, a_max=10000.0) # Add noise, decide whether to flatten and return - x = self.rng.lognormal(np.log1p(pp), sigma=self.obs_noise) + x = self.rng.lognormal(pp, sigma=self.obs_noise) if self.flatten: return x.flatten() return x diff --git a/bayesflow/simulators/benchmark_simulators/sir.py b/bayesflow/simulators/benchmark_simulators/sir.py index e7b8a1118..4b889b6a5 100644 --- a/bayesflow/simulators/benchmark_simulators/sir.py +++ b/bayesflow/simulators/benchmark_simulators/sir.py @@ -11,7 +11,7 @@ def __init__( T: int = 160, I0: float = 1.0, R0: float = 0.0, - subsample: int = None, + subsample: int | str = "original", total_count: int = 1000, scale_by_total: bool = True, rng: np.random.Generator = None, @@ -27,15 +27,17 @@ def __init__( The size of the simulated population. T: int, optional, default: 160 The duration (time horizon) of the simulation. + The last time-point is not included. I0: float, optional, default: 1.0 The number of initially infected individuals. R0: float, optional, default: 0.0 The number of initially recovered individuals. - subsample: int or None, optional, default: 10 + subsample: int, str or None, optional, default: 'original' The number of evenly spaced time points to return. If `None`, no subsampling will be performed, all `T` timepoints will be returned and a trailing dimension will be added. If an integer is provided, subsampling is performed and no trailing dimension will be added. + 'original' reproduces the original benchmark task subsampling of 10 points. total_count: int, optional, default: 1000 The `N` parameter of the binomial noise distribution. Used just for scaling the data and magnifying the effect of noise, such that @@ -100,14 +102,16 @@ def observation_model(self, params: np.ndarray): # Unpack parameter vector into scalars beta, gamma = params - # Prepate time vector between 0 and T of length T - t_vec = np.linspace(0, self.T, self.T) + # Prepare time vector between 0 and T of length T + t_vec = np.arange(0, self.T) # Integrate using scipy and retain only infected (2-nd dimension) irt = odeint(self._deriv, x0, t_vec, args=(self.N, beta, gamma))[:, 1] # Subsample evenly the specified number of points, if specified - if self.subsample is not None: + if self.subsample == "original": + irt = irt[::17] + elif self.subsample is not None: irt = irt[:: (self.T // self.subsample)] else: irt = irt[:, None]