diff --git a/batchglm/train/numpy/glm_norm/utils.py b/batchglm/train/numpy/glm_norm/utils.py index 356e0e6e..47cb75f9 100644 --- a/batchglm/train/numpy/glm_norm/utils.py +++ b/batchglm/train/numpy/glm_norm/utils.py @@ -1,6 +1,7 @@ import math import numpy as np + def ll(scale, loc, x): resid = loc - x ll = -.5 * np.log(2 * math.pi) - np.log(scale) - .5 * np.power(resid / scale, 2) diff --git a/tests/numpy/test_shape.py b/tests/numpy/test_shape.py new file mode 100644 index 00000000..a0421498 --- /dev/null +++ b/tests/numpy/test_shape.py @@ -0,0 +1,74 @@ +import logging +import unittest + +from utils import get_estimator, get_generated_model + +from batchglm.train.numpy.base_glm import BaseModelContainer + +logger = logging.getLogger("batchglm") + +n_obs = 2000 +n_vars = 100 +num_batches = 4 +num_conditions = 2 + + +def _test_shape_of_model(model_container: BaseModelContainer) -> bool: + """Check the shape of different fitted/parametrized values against what is epected""" + assert model_container.theta_scale.shape == (model_container.model.num_scale_params, n_vars) + assert model_container.theta_location.shape == (model_container.model.num_loc_params, n_vars) + + assert model_container.fim_weight_location_location.shape == (n_obs, n_vars) + + assert model_container.hessian_weight_location_location.shape == (n_obs, n_vars) + assert model_container.hessian_weight_location_scale.shape == (n_obs, n_vars) + assert model_container.hessian_weight_scale_scale.shape == (n_obs, n_vars) + + assert model_container.jac_scale.shape == (n_vars, model_container.model.num_scale_params) + assert model_container.jac_location.shape == (n_vars, model_container.model.num_loc_params) + + +class TestShape(unittest.TestCase): + + def _test_shape(self) -> bool: + dense_model = get_generated_model( + noise_model=self._model_name, num_conditions=num_conditions, num_batches=num_batches, sparse=False, + n_obs=n_obs, n_vars=n_vars + ) + sparse_model = get_generated_model( + noise_model=self._model_name, num_conditions=num_conditions, num_batches=num_batches, sparse=True, + n_obs=n_obs, n_vars=n_vars + ) + dense_estimator = get_estimator( + noise_model=self._model_name, model=dense_model, init_location="standard", init_scale="standard" + ) + sparse_estimator = get_estimator( + noise_model=self._model_name, model=sparse_model, init_location="standard", init_scale="standard" + ) + model_container_dense = dense_estimator.model_container + model_container_sparse = sparse_estimator.model_container + _test_shape_of_model(model_container_dense) + _test_shape_of_model(model_container_sparse) + return True + + +class TestShapeNB(TestShape): + + def __init__(self, *args, **kwargs): + self._model_name = "nb" + super(TestShapeNB, self).__init__(*args, **kwargs) + + def test_shape(self) -> bool: + return self._test_shape() + +class TestShapeNorm(TestShape): + + def __init__(self, *args, **kwargs): + self._model_name = "norm" + super(TestShapeNorm, self).__init__(*args, **kwargs) + + def test_shape(self) -> bool: + return self._test_shape() + +if __name__ == "__main__": + unittest.main() diff --git a/tests/numpy/utils.py b/tests/numpy/utils.py index f89bcdb5..af41a7bc 100644 --- a/tests/numpy/utils.py +++ b/tests/numpy/utils.py @@ -39,7 +39,8 @@ def get_model(noise_model: str) -> _ModelGLM: def get_generated_model( - noise_model: str, num_conditions: int, num_batches: int, sparse: bool, mode: Optional[str] = None + noise_model: str, num_conditions: int, num_batches: int, sparse: bool, mode: Optional[str] = None, + n_obs: Optional[int] = 2000, n_vars: Optional[int] = 100, ) -> _ModelGLM: model = get_model(noise_model=noise_model) @@ -85,8 +86,8 @@ def const(offset: float): raise ValueError(f"Mode {mode} not recognized.") model.generate_artificial_data( - n_obs=2000, - n_vars=100, + n_obs=n_obs, + n_vars=n_vars, num_conditions=num_conditions, num_batches=num_batches, intercept_scale=True,