|
34 | 34 | BetaNegativeBinomial,
|
35 | 35 | GeneralizedPoisson,
|
36 | 36 | Skellam,
|
| 37 | + GrassiaIIGeometric, |
37 | 38 | )
|
38 | 39 |
|
39 | 40 |
|
@@ -208,3 +209,119 @@ def test_logp(self):
|
208 | 209 | {"mu1": Rplus_small, "mu2": Rplus_small},
|
209 | 210 | lambda value, mu1, mu2: scipy.stats.skellam.logpmf(value, mu1, mu2),
|
210 | 211 | )
|
| 212 | + |
| 213 | + |
| 214 | +class TestGrassiaIIGeometric: |
| 215 | + class TestRandomVariable(BaseTestDistributionRandom): |
| 216 | + pymc_dist = GrassiaIIGeometric |
| 217 | + pymc_dist_params = {"r": 1.0, "alpha": 2.0} |
| 218 | + expected_rv_op_params = {"r": 1.0, "alpha": 2.0} |
| 219 | + tests_to_run = [ |
| 220 | + "check_pymc_params_match_rv_op", |
| 221 | + "check_rv_size", |
| 222 | + ] |
| 223 | + |
| 224 | + def test_random_basic_properties(self): |
| 225 | + discrete_random_tester( |
| 226 | + dist=self.pymc_dist, |
| 227 | + paramdomains={"r": Rplus, "alpha": Rplus}, |
| 228 | + ref_rand=lambda r, alpha, size: np.random.geometric( |
| 229 | + 1 - np.exp(-np.random.gamma(r, 1/alpha, size=size)), size=size |
| 230 | + ), |
| 231 | + ) |
| 232 | + |
| 233 | + @pytest.mark.parametrize("r,alpha", [ |
| 234 | + (0.5, 1.0), |
| 235 | + (1.0, 2.0), |
| 236 | + (2.0, 0.5), |
| 237 | + (5.0, 1.0), |
| 238 | + ]) |
| 239 | + def test_random_moments(self, r, alpha): |
| 240 | + dist = self.pymc_dist.dist(r=r, alpha=alpha, size=10_000) |
| 241 | + draws = dist.eval() |
| 242 | + |
| 243 | + # Check that all values are positive integers |
| 244 | + assert np.all(draws > 0) |
| 245 | + assert np.all(draws.astype(int) == draws) |
| 246 | + |
| 247 | + # Check that values are reasonably distributed |
| 248 | + # Note: Exact moments are complex for this distribution |
| 249 | + # so we just check basic properties |
| 250 | + assert np.mean(draws) > 0 |
| 251 | + assert np.var(draws) > 0 |
| 252 | + |
| 253 | + def test_logp_basic(self): |
| 254 | + r = pt.scalar("r") |
| 255 | + alpha = pt.scalar("alpha") |
| 256 | + value = pt.vector("value", dtype="int64") |
| 257 | + |
| 258 | + logp = pm.logp(GrassiaIIGeometric.dist(r, alpha), value) |
| 259 | + logp_fn = pytensor.function([value, r, alpha], logp) |
| 260 | + |
| 261 | + # Test basic properties of logp |
| 262 | + test_value = np.array([1, 2, 3, 4, 5]) |
| 263 | + test_r = 1.0 |
| 264 | + test_alpha = 1.0 |
| 265 | + |
| 266 | + logp_vals = logp_fn(test_value, test_r, test_alpha) |
| 267 | + assert not np.any(np.isnan(logp_vals)) |
| 268 | + assert np.all(np.isfinite(logp_vals)) |
| 269 | + |
| 270 | + # Test invalid values |
| 271 | + assert logp_fn(np.array([0]), test_r, test_alpha) == np.inf # Value must be > 0 |
| 272 | + |
| 273 | + with pytest.raises(TypeError): |
| 274 | + logp_fn(np.array([1.5]), test_r, test_alpha) == -np.inf # Value must be integer |
| 275 | + |
| 276 | + # Test parameter restrictions |
| 277 | + with pytest.raises(ParameterValueError): |
| 278 | + logp_fn(np.array([1]), -1.0, test_alpha) # r must be > 0 |
| 279 | + |
| 280 | + with pytest.raises(ParameterValueError): |
| 281 | + logp_fn(np.array([1]), test_r, -1.0) # alpha must be > 0 |
| 282 | + |
| 283 | + def test_sampling_consistency(self): |
| 284 | + """Test that sampling from the distribution produces reasonable results""" |
| 285 | + r = 2.0 |
| 286 | + alpha = 1.0 |
| 287 | + with pm.Model(): |
| 288 | + x = GrassiaIIGeometric("x", r=r, alpha=alpha) |
| 289 | + trace = pm.sample(chains=1, draws=1000, random_seed=42).posterior |
| 290 | + |
| 291 | + samples = trace["x"].values.flatten() |
| 292 | + |
| 293 | + # Check basic properties of samples |
| 294 | + assert np.all(samples > 0) # All values should be positive |
| 295 | + assert np.all(samples.astype(int) == samples) # All values should be integers |
| 296 | + |
| 297 | + # Check mean and variance are reasonable |
| 298 | + # (exact values depend on the parameterization) |
| 299 | + assert 0 < np.mean(samples) < np.inf |
| 300 | + assert 0 < np.var(samples) < np.inf |
| 301 | + |
| 302 | + @pytest.mark.parametrize( |
| 303 | + "r, alpha, size, expected_shape", |
| 304 | + [ |
| 305 | + (1.0, 1.0, None, ()), # Scalar output |
| 306 | + ([1.0, 2.0], 1.0, None, (2,)), # Vector output from r |
| 307 | + (1.0, [1.0, 2.0], None, (2,)), # Vector output from alpha |
| 308 | + (1.0, 1.0, (3, 2), (3, 2)), # Explicit size |
| 309 | + ], |
| 310 | + ) |
| 311 | + def test_support_point(self, r, alpha, size, expected_shape): |
| 312 | + """Test that support_point returns reasonable values with correct shapes""" |
| 313 | + with pm.Model() as model: |
| 314 | + GrassiaIIGeometric("x", r=r, alpha=alpha, size=size) |
| 315 | + |
| 316 | + init_point = model.initial_point()["x"] |
| 317 | + |
| 318 | + # Check shape |
| 319 | + assert init_point.shape == expected_shape |
| 320 | + |
| 321 | + # Check values are positive integers |
| 322 | + assert np.all(init_point > 0) |
| 323 | + assert np.all(init_point.astype(int) == init_point) |
| 324 | + |
| 325 | + # Check values are finite and reasonable |
| 326 | + assert np.all(np.isfinite(init_point)) |
| 327 | + assert np.all(init_point < 1e6) # Should not be extremely large |
0 commit comments