Skip to content

Commit 9d44c39

Browse files
ColCarrolltwiecki
authored andcommitted
Add dangerous-default-variable check to pylint
1 parent 2f2d961 commit 9d44c39

File tree

6 files changed

+44
-23
lines changed

6 files changed

+44
-23
lines changed

.pylintrc

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ enable=import-error,
4747
used-before-assignment,
4848
cell-var-from-loop,
4949
global-variable-undefined,
50+
dangerous-default-value,
5051
# redefined-builtin,
5152
redefine-in-handler,
5253
unused-import,

pymc3/gp/gp.py

Lines changed: 25 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55
from pymc3.gp.cov import Covariance, Constant
66
from pymc3.gp.mean import Zero
77
from pymc3.gp.util import (conditioned_vars,
8-
infer_shape, stabilize, cholesky, solve_lower, solve_upper)
8+
infer_shape, stabilize, cholesky, solve_lower, solve_upper)
99
from pymc3.distributions import draw_values
1010

1111
__all__ = ['Latent', 'Marginal', 'TP', 'MarginalSparse']
@@ -113,7 +113,7 @@ def _build_prior(self, name, X, reparameterize=True, **kwargs):
113113

114114
def prior(self, name, X, reparameterize=True, **kwargs):
115115
R"""
116-
Returns the GP prior distribution evaluated over the input
116+
Returns the GP prior distribution evaluated over the input
117117
locations `X`.
118118
119119
This is the prior probability over the space
@@ -142,6 +142,8 @@ def prior(self, name, X, reparameterize=True, **kwargs):
142142
return f
143143

144144
def _get_given_vals(self, given):
145+
if given is None:
146+
given = {}
145147
if 'gp' in given:
146148
cov_total = given['gp'].cov_func
147149
mean_total = given['gp'].mean_func
@@ -165,9 +167,9 @@ def _build_conditional(self, Xnew, X, f, cov_total, mean_total):
165167
cov = Kss - tt.dot(tt.transpose(A), A)
166168
return mu, cov
167169

168-
def conditional(self, name, Xnew, given={}, **kwargs):
170+
def conditional(self, name, Xnew, given=None, **kwargs):
169171
R"""
170-
Returns the conditional distribution evaluated over new input
172+
Returns the conditional distribution evaluated over new input
171173
locations `Xnew`.
172174
173175
Given a set of function values `f` that
@@ -194,7 +196,6 @@ def conditional(self, name, Xnew, given={}, **kwargs):
194196
Extra keyword arguments that are passed to `MvNormal` distribution
195197
constructor.
196198
"""
197-
198199
givens = self._get_given_vals(given)
199200
mu, cov = self._build_conditional(Xnew, *givens)
200201
chol = cholesky(stabilize(cov))
@@ -255,7 +256,7 @@ def _build_prior(self, name, X, reparameterize=True, **kwargs):
255256

256257
def prior(self, name, X, reparameterize=True, **kwargs):
257258
R"""
258-
Returns the TP prior distribution evaluated over the input
259+
Returns the TP prior distribution evaluated over the input
259260
locations `X`.
260261
261262
This is the prior probability over the space
@@ -295,7 +296,7 @@ def _build_conditional(self, Xnew, X, f):
295296

296297
def conditional(self, name, Xnew, **kwargs):
297298
R"""
298-
Returns the conditional distribution evaluated over new input
299+
Returns the conditional distribution evaluated over new input
299300
locations `Xnew`.
300301
301302
Given a set of function values `f` that
@@ -379,7 +380,7 @@ def _build_marginal_likelihood(self, X, noise):
379380

380381
def marginal_likelihood(self, name, X, y, noise, is_observed=True, **kwargs):
381382
R"""
382-
Returns the marginal likelihood distribution, given the input
383+
Returns the marginal likelihood distribution, given the input
383384
locations `X` and the data `y`.
384385
385386
This is integral over the product of the GP prior and a normal likelihood.
@@ -423,6 +424,9 @@ def marginal_likelihood(self, name, X, y, noise, is_observed=True, **kwargs):
423424
return pm.MvNormal(name, mu=mu, chol=chol, shape=shape, **kwargs)
424425

425426
def _get_given_vals(self, given):
427+
if given is None:
428+
given = {}
429+
426430
if 'gp' in given:
427431
cov_total = given['gp'].cov_func
428432
mean_total = given['gp'].mean_func
@@ -460,9 +464,9 @@ def _build_conditional(self, Xnew, pred_noise, diag, X, y, noise,
460464
cov += noise(Xnew)
461465
return mu, stabilize(cov)
462466

463-
def conditional(self, name, Xnew, pred_noise=False, given={}, **kwargs):
467+
def conditional(self, name, Xnew, pred_noise=False, given=None, **kwargs):
464468
R"""
465-
Returns the conditional distribution evaluated over new input
469+
Returns the conditional distribution evaluated over new input
466470
locations `Xnew`.
467471
468472
Given a set of function values `f` that the GP prior was over, the
@@ -499,7 +503,7 @@ def conditional(self, name, Xnew, pred_noise=False, given={}, **kwargs):
499503
shape = infer_shape(Xnew, kwargs.pop("shape", None))
500504
return pm.MvNormal(name, mu=mu, chol=chol, shape=shape, **kwargs)
501505

502-
def predict(self, Xnew, point=None, diag=False, pred_noise=False, given={}):
506+
def predict(self, Xnew, point=None, diag=False, pred_noise=False, given=None):
503507
R"""
504508
Return the mean vector and covariance matrix of the conditional
505509
distribution as numpy arrays, given a `point`, such as the MAP
@@ -521,11 +525,13 @@ def predict(self, Xnew, point=None, diag=False, pred_noise=False, given={}):
521525
given : dict
522526
Same as `conditional` method.
523527
"""
528+
if given is None:
529+
given = {}
524530

525531
mu, cov = self.predictt(Xnew, diag, pred_noise, given)
526532
return draw_values([mu, cov], point=point)
527533

528-
def predictt(self, Xnew, diag=False, pred_noise=False, given={}):
534+
def predictt(self, Xnew, diag=False, pred_noise=False, given=None):
529535
R"""
530536
Return the mean vector and covariance matrix of the conditional
531537
distribution as symbolic variables.
@@ -544,7 +550,6 @@ def predictt(self, Xnew, diag=False, pred_noise=False, given={}):
544550
given : dict
545551
Same as `conditional` method.
546552
"""
547-
548553
givens = self._get_given_vals(given)
549554
mu, cov = self._build_conditional(Xnew, pred_noise, diag, *givens)
550555
return mu, cov
@@ -646,7 +651,7 @@ def _build_marginal_likelihood_logp(self, X, Xu, y, sigma):
646651
trace = ((1.0 / (2.0 * sigma2)) *
647652
(tt.sum(self.cov_func(X, diag=True)) -
648653
tt.sum(tt.sum(A * A, 0))))
649-
else: # DTC
654+
else: # DTC
650655
Lamd = tt.ones_like(Qffd) * sigma2
651656
trace = 0.0
652657
A_l = A / Lamd
@@ -661,7 +666,7 @@ def _build_marginal_likelihood_logp(self, X, Xu, y, sigma):
661666

662667
def marginal_likelihood(self, name, X, Xu, y, sigma, is_observed=True, **kwargs):
663668
R"""
664-
Returns the approximate marginal likelihood distribution, given the input
669+
Returns the approximate marginal likelihood distribution, given the input
665670
locations `X`, inducing point locations `Xu`, data `y`, and white noise
666671
standard deviations `sigma`.
667672
@@ -708,7 +713,7 @@ def _build_conditional(self, Xnew, pred_noise, diag, X, Xu, y, sigma, cov_total,
708713
if self.approx == "FITC":
709714
Kffd = cov_total(X, diag=True)
710715
Lamd = tt.clip(Kffd - Qffd, 0.0, np.inf) + sigma2
711-
else: # VFE or DTC
716+
else: # VFE or DTC
712717
Lamd = tt.ones_like(Qffd) * sigma2
713718
A_l = A / Lamd
714719
L_B = cholesky(tt.eye(Xu.shape[0]) + tt.dot(A_l, tt.transpose(A)))
@@ -733,6 +738,8 @@ def _build_conditional(self, Xnew, pred_noise, diag, X, Xu, y, sigma, cov_total,
733738
return mu, stabilize(cov)
734739

735740
def _get_given_vals(self, given):
741+
if given is None:
742+
given = {}
736743
if 'gp' in given:
737744
cov_total = given['gp'].cov_func
738745
mean_total = given['gp'].mean_func
@@ -745,9 +752,9 @@ def _get_given_vals(self, given):
745752
X, Xu, y, sigma = self.X, self.Xu, self.y, self.sigma
746753
return X, Xu, y, sigma, cov_total, mean_total
747754

748-
def conditional(self, name, Xnew, pred_noise=False, given={}, **kwargs):
755+
def conditional(self, name, Xnew, pred_noise=False, given=None, **kwargs):
749756
R"""
750-
Returns the approximate conditional distribution of the GP evaluated over
757+
Returns the approximate conditional distribution of the GP evaluated over
751758
new input locations `Xnew`.
752759
753760
Parameters

pymc3/plots/energyplot.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44

55

66
def energyplot(trace, kind='kde', figsize=None, ax=None, legend=True,
7-
shade=0.35, frame=True, kwargs_shade={}, **kwargs):
7+
shade=0.35, frame=True, kwargs_shade=None, **kwargs):
88
"""Plot energy transition distribution and marginal energy distribution in
99
order to diagnose poor exploration by HMC algorithms.
1010
@@ -47,6 +47,9 @@ def energyplot(trace, kind='kde', figsize=None, ax=None, legend=True,
4747
if ax is None:
4848
_, ax = plt.subplots(figsize=figsize)
4949

50+
if kwargs_shade is None:
51+
kwargs_shade = {}
52+
5053
if kind == 'kde':
5154
for label, value in series:
5255
kdeplot(value, label=label, shade=shade, ax=ax,

pymc3/plots/kdeplot.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
from scipy.signal import gaussian, convolve
44

55

6-
def kdeplot(values, label=None, shade=0, ax=None, kwargs_shade={}, **kwargs):
6+
def kdeplot(values, label=None, shade=0, ax=None, kwargs_shade=None, **kwargs):
77
"""
88
1D KDE plot taking into account boundary conditions
99
@@ -27,6 +27,10 @@ def kdeplot(values, label=None, shade=0, ax=None, kwargs_shade={}, **kwargs):
2727
"""
2828
if ax is None:
2929
_, ax = plt.subplots()
30+
31+
if kwargs_shade is None:
32+
kwargs_shade = {}
33+
3034
density, l, u = fast_kde(values)
3135
x = np.linspace(l, u, len(density))
3236
ax.plot(x, density, label=label, **kwargs)

pymc3/tests/test_distributions.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,10 @@ def PdMatrixCholUpper(n):
358358

359359

360360
class TestMatchesScipy(SeededTest):
361-
def pymc3_matches_scipy(self, pymc3_dist, domain, paramdomains, scipy_dist, decimal=None, extra_args={}):
361+
def pymc3_matches_scipy(self, pymc3_dist, domain, paramdomains, scipy_dist,
362+
decimal=None, extra_args=None):
363+
if extra_args is None:
364+
extra_args = {}
362365
model = build_model(pymc3_dist, domain, paramdomains, extra_args)
363366
value = model.named_vars['value']
364367

@@ -413,9 +416,12 @@ def wrapped_logp(x):
413416
decimals = select_by_precision(float64=6, float32=4)
414417
assert_almost_equal(dlogp(pt), ndlogp(pt), decimal=decimals, err_msg=str(pt))
415418

416-
def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args={}):
419+
def checkd(self, distfam, valuedomain, vardomains, checks=None, extra_args=None):
417420
if checks is None:
418421
checks = (self.check_int_to_1, self.check_dlogp)
422+
423+
if extra_args is None:
424+
extra_args = {}
419425
m = build_model(distfam, valuedomain, vardomains, extra_args=extra_args)
420426
for check in checks:
421427
check(m, m.named_vars['value'], valuedomain, vardomains)

pymc3/tests/test_memo.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
def getmemo():
55
@memoize
6-
def f(a, b=['a']):
6+
def f(a, b=('a')):
77
return str(a) + str(b)
88
return f
99

0 commit comments

Comments
 (0)