|
3 | 3 |
|
4 | 4 | from .helpers import SeededTest, select_by_precision
|
5 | 5 | from ..vartypes import continuous_types
|
6 |
| -from ..model import Model, Point, Potential, Deterministic |
7 |
| -from ..blocking import DictToVarBijection, DictToArrayBijection, ArrayOrdering |
| 6 | +from ..model import Model, Point, Deterministic |
| 7 | +from ..blocking import DictToVarBijection |
8 | 8 | from ..distributions import (
|
9 | 9 | DensityDist, Categorical, Multinomial, VonMises, Dirichlet,
|
10 | 10 | MvStudentT, MvNormal, MatrixNormal, ZeroInflatedPoisson,
|
@@ -471,37 +471,9 @@ def check_int_to_1(self, model, value, domain, paramdomains):
|
471 | 471 | area = integrate_nd(pdfx, domain, value.dshape, value.dtype)
|
472 | 472 | assert_almost_equal(area, 1, err_msg=str(pt))
|
473 | 473 |
|
474 |
| - def check_dlogp(self, model, value, domain, paramdomains): |
475 |
| - try: |
476 |
| - from numdifftools import Gradient |
477 |
| - except ImportError: |
478 |
| - return |
479 |
| - if not model.cont_vars: |
480 |
| - return |
481 |
| - |
482 |
| - domains = paramdomains.copy() |
483 |
| - domains['value'] = domain |
484 |
| - bij = DictToArrayBijection( |
485 |
| - ArrayOrdering(model.cont_vars), model.test_point) |
486 |
| - dlogp = bij.mapf(model.fastdlogp(model.cont_vars)) |
487 |
| - logp = bij.mapf(model.fastlogp) |
488 |
| - |
489 |
| - def wrapped_logp(x): |
490 |
| - try: |
491 |
| - return logp(x) |
492 |
| - except: |
493 |
| - return np.nan |
494 |
| - |
495 |
| - ndlogp = Gradient(wrapped_logp) |
496 |
| - for pt in product(domains, n_samples=100): |
497 |
| - pt = Point(pt, model=model) |
498 |
| - pt = bij.map(pt) |
499 |
| - decimals = select_by_precision(float64=6, float32=4) |
500 |
| - assert_almost_equal(dlogp(pt), ndlogp(pt), decimal=decimals, err_msg=str(pt)) |
501 |
| - |
502 | 474 | def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):
|
503 | 475 | if checks is None:
|
504 |
| - checks = (self.check_int_to_1, self.check_dlogp) |
| 476 | + checks = (self.check_int_to_1, ) |
505 | 477 |
|
506 | 478 | if extra_args is None:
|
507 | 479 | extra_args = {}
|
@@ -940,7 +912,8 @@ def test_wishart(self, n):
|
940 | 912 | # This check compares the autodiff gradient to the numdiff gradient.
|
941 | 913 | # However, due to the strict constraints of the wishart,
|
942 | 914 | # it is impossible to numerically determine the gradient as a small
|
943 |
| - # pertubation breaks the symmetry. Thus disabling. |
| 915 | + # pertubation breaks the symmetry. Thus disabling. Also, numdifftools was |
| 916 | + # removed in June 2019, so an alternative would be needed. |
944 | 917 | #
|
945 | 918 | # self.checkd(Wishart, PdMatrix(n), {'n': Domain([2, 3, 4, 2000]), 'V': PdMatrix(n)},
|
946 | 919 | # checks=[self.check_dlogp])
|
@@ -1120,12 +1093,6 @@ def logp(x):
|
1120 | 1093 | return -log(2 * .5) - abs(x - .5) / .5
|
1121 | 1094 | self.checkd(DensityDist, R, {}, extra_args={'logp': logp})
|
1122 | 1095 |
|
1123 |
| - def test_addpotential(self): |
1124 |
| - with Model() as model: |
1125 |
| - value = Normal('value', 1, 1) |
1126 |
| - Potential('value_squared', -value ** 2) |
1127 |
| - self.check_dlogp(model, value, R, {}) |
1128 |
| - |
1129 | 1096 | def test_get_tau_sigma(self):
|
1130 | 1097 | sigma = np.array([2])
|
1131 | 1098 | assert_almost_equal(continuous.get_tau_sigma(sigma=sigma), [1. / sigma**2, sigma])
|
|
0 commit comments