Skip to content

Commit ec40eb7

Browse files
committed
run "pre-commit run --all-files" and then manually fix instead of --unsafe-fixes
1 parent fc474f7 commit ec40eb7

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

62 files changed

+1376
-588
lines changed

conftest.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22

33

44
def pytest_addoption(parser):
5-
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
5+
parser.addoption(
6+
"--runslow", action="store_true", default=False, help="run slow tests"
7+
)
68

79

810
def pytest_configure(config):

docs/conf.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -171,7 +171,9 @@
171171

172172
# One entry per manual page. List of tuples
173173
# (source start file, name, description, authors, manual section).
174-
man_pages = [(master_doc, "pymc_experimental", "pymc_experimental Documentation", [author], 1)]
174+
man_pages = [
175+
(master_doc, "pymc_experimental", "pymc_experimental Documentation", [author], 1)
176+
]
175177

176178

177179
# -- Options for Texinfo output ----------------------------------------------

pymc_experimental/__init__.py

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,10 @@
1313
# limitations under the License.
1414
import logging
1515

16+
from pymc_experimental import distributions, gp, statespace, utils
17+
from pymc_experimental.inference.fit import fit
18+
from pymc_experimental.model.marginal_model import MarginalModel
19+
from pymc_experimental.model.model_api import as_model
1620
from pymc_experimental.version import __version__
1721

1822
_log = logging.getLogger("pmx")
@@ -23,7 +27,14 @@
2327
handler = logging.StreamHandler()
2428
_log.addHandler(handler)
2529

26-
from pymc_experimental import distributions, gp, statespace, utils
27-
from pymc_experimental.inference.fit import fit
28-
from pymc_experimental.model.marginal_model import MarginalModel
29-
from pymc_experimental.model.model_api import as_model
30+
31+
__all__ = [
32+
"__version__",
33+
"distributions",
34+
"gp",
35+
"statespace",
36+
"utils",
37+
"fit",
38+
"MarginalModel",
39+
"as_model",
40+
]

pymc_experimental/distributions/continuous.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,9 @@ class GenExtremeRV(RandomVariable):
4141
dtype: str = "floatX"
4242
_print_name: Tuple[str, str] = ("Generalized Extreme Value", "\\operatorname{GEV}")
4343

44-
def __call__(self, mu=0.0, sigma=1.0, xi=0.0, size=None, **kwargs) -> TensorVariable:
44+
def __call__(
45+
self, mu=0.0, sigma=1.0, xi=0.0, size=None, **kwargs
46+
) -> TensorVariable:
4547
return super().__call__(mu, sigma, xi, size=size, **kwargs)
4648

4749
@classmethod
@@ -54,7 +56,9 @@ def rng_fn(
5456
size: Tuple[int, ...],
5557
) -> np.ndarray:
5658
# Notice negative here, since remainder of GenExtreme is based on Coles parametrization
57-
return stats.genextreme.rvs(c=-xi, loc=mu, scale=sigma, random_state=rng, size=size)
59+
return stats.genextreme.rvs(
60+
c=-xi, loc=mu, scale=sigma, random_state=rng, size=size
61+
)
5862

5963

6064
gev = GenExtremeRV()
@@ -214,7 +218,9 @@ def support_point(rv, size, mu, sigma, xi):
214218
r"""
215219
Using the mode, as the mean can be infinite when :math:`\xi > 1`
216220
"""
217-
mode = pt.switch(pt.isclose(xi, 0), mu, mu + sigma * (pt.pow(1 + xi, -xi) - 1) / xi)
221+
mode = pt.switch(
222+
pt.isclose(xi, 0), mu, mu + sigma * (pt.pow(1 + xi, -xi) - 1) / xi
223+
)
218224
if not rv_size_is_none(size):
219225
mode = pt.full(size, mode)
220226
return mode

pymc_experimental/distributions/discrete.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -51,14 +51,14 @@ def rng_fn(cls, rng, theta, lam, size):
5151
x = np.empty(dist_size)
5252
idxs_mask = np.broadcast_to(lam < 0, dist_size)
5353
if np.any(idxs_mask):
54-
x[idxs_mask] = cls._inverse_rng_fn(rng, theta, lam, dist_size, idxs_mask=idxs_mask)[
55-
idxs_mask
56-
]
54+
x[idxs_mask] = cls._inverse_rng_fn(
55+
rng, theta, lam, dist_size, idxs_mask=idxs_mask
56+
)[idxs_mask]
5757
idxs_mask = ~idxs_mask
5858
if np.any(idxs_mask):
59-
x[idxs_mask] = cls._branching_rng_fn(rng, theta, lam, dist_size, idxs_mask=idxs_mask)[
60-
idxs_mask
61-
]
59+
x[idxs_mask] = cls._branching_rng_fn(
60+
rng, theta, lam, dist_size, idxs_mask=idxs_mask
61+
)[idxs_mask]
6262
return x
6363

6464
@classmethod
@@ -159,7 +159,9 @@ def support_point(rv, size, mu, lam):
159159

160160
def logp(value, mu, lam):
161161
mu_lam_value = mu + lam * value
162-
logprob = np.log(mu) + logpow(mu_lam_value, value - 1) - mu_lam_value - factln(value)
162+
logprob = (
163+
np.log(mu) + logpow(mu_lam_value, value - 1) - mu_lam_value - factln(value)
164+
)
163165

164166
# Probability is 0 when value > m, where m is the largest positive integer for
165167
# which mu + m * lam > 0 (when lam < 0).
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,3 @@
11
from pymc_experimental.distributions.multivariate.r2d2m2cp import R2D2M2CP
2+
3+
__all__ = ["R2D2M2CP"]

pymc_experimental/distributions/multivariate/r2d2m2cp.py

Lines changed: 20 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,9 @@ def _R2D2M2CP_beta(
9292
raw = pt.zeros_like(mu_param)
9393
else:
9494
raw = pm.Normal("raw", dims=dims)
95-
beta = pm.Deterministic(name, (raw * std_param + mu_param) / input_sigma, dims=dims)
95+
beta = pm.Deterministic(
96+
name, (raw * std_param + mu_param) / input_sigma, dims=dims
97+
)
9698
else:
9799
if psi_mask is not None and psi_mask.any():
98100
# limit case where some probs are not 1 or 0
@@ -113,7 +115,9 @@ def _R2D2M2CP_beta(
113115
# all variables are deterministic
114116
beta = pm.Deterministic(name, (mu_param / input_sigma), dims=dims)
115117
else:
116-
beta = pm.Normal(name, mu_param / input_sigma, std_param / input_sigma, dims=dims)
118+
beta = pm.Normal(
119+
name, mu_param / input_sigma, std_param / input_sigma, dims=dims
120+
)
117121
return beta
118122

119123

@@ -137,7 +141,8 @@ def _psi_masked(
137141
dims: Sequence[str],
138142
) -> Tuple[Union[pt.TensorLike, None], pt.TensorVariable]:
139143
if not (
140-
isinstance(positive_probs, pt.Constant) and isinstance(positive_probs_std, pt.Constant)
144+
isinstance(positive_probs, pt.Constant)
145+
and isinstance(positive_probs_std, pt.Constant)
141146
):
142147
raise TypeError(
143148
"Only constant values for positive_probs and positive_probs_std are accepted"
@@ -147,7 +152,9 @@ def _psi_masked(
147152
)
148153
mask = ~np.bitwise_or(positive_probs == 1, positive_probs == 0)
149154
if np.bitwise_and(~mask, positive_probs_std != 0).any():
150-
raise ValueError("Can't have both positive_probs == '1 or 0' and positive_probs_std != 0")
155+
raise ValueError(
156+
"Can't have both positive_probs == '1 or 0' and positive_probs_std != 0"
157+
)
151158
if (~mask).any() and mask.any():
152159
# limit case where some probs are not 1 or 0
153160
# setsubtensor is required
@@ -206,7 +213,9 @@ def _phi(
206213
if variance_explained is not None:
207214
raise TypeError("Can't use variable importance with variance explained")
208215
if len(model.coords[dim]) <= 1:
209-
raise TypeError("Can't use variable importance with less than two variables")
216+
raise TypeError(
217+
"Can't use variable importance with less than two variables"
218+
)
210219
variables_importance = pt.as_tensor(variables_importance)
211220
if importance_concentration is not None:
212221
variables_importance *= importance_concentration
@@ -218,7 +227,9 @@ def _phi(
218227
else:
219228
phi = _broadcast_as_dims(1.0, dims=dims)
220229
if importance_concentration is not None:
221-
return pm.Dirichlet("phi", importance_concentration * phi, dims=broadcast_dims + [dim])
230+
return pm.Dirichlet(
231+
"phi", importance_concentration * phi, dims=broadcast_dims + [dim]
232+
)
222233
else:
223234
return phi
224235

@@ -428,7 +439,9 @@ def R2D2M2CP(
428439
dims=dims,
429440
)
430441
mask, psi = _psi(
431-
positive_probs=positive_probs, positive_probs_std=positive_probs_std, dims=dims
442+
positive_probs=positive_probs,
443+
positive_probs_std=positive_probs_std,
444+
dims=dims,
432445
)
433446

434447
beta = _R2D2M2CP_beta(

pymc_experimental/distributions/timeseries.py

Lines changed: 16 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,9 @@
2626
from pytensor.tensor.random.op import RandomVariable
2727

2828

29-
def _make_outputs_info(n_lags: int, init_dist: Distribution) -> List[Union[Distribution, dict]]:
29+
def _make_outputs_info(
30+
n_lags: int, init_dist: Distribution
31+
) -> List[Union[Distribution, dict]]:
3032
"""
3133
Two cases are needed for outputs_info in the scans used by DiscreteMarkovRv. If n_lags = 1, we need to throw away
3234
the first dimension of init_dist_ or else markov_chain will have shape (steps, 1, *batch_size) instead of
@@ -124,7 +126,9 @@ def __new__(cls, *args, steps=None, n_lags=1, **kwargs):
124126
@classmethod
125127
def dist(cls, P=None, logit_P=None, steps=None, init_dist=None, n_lags=1, **kwargs):
126128
steps = get_support_shape_1d(
127-
support_shape=steps, shape=kwargs.get("shape", None), support_shape_offset=n_lags
129+
support_shape=steps,
130+
shape=kwargs.get("shape", None),
131+
support_shape_offset=n_lags,
128132
)
129133

130134
if steps is None:
@@ -199,7 +203,9 @@ def transition(*args):
199203

200204
(state_next_rng,) = tuple(state_updates.values())
201205

202-
discrete_mc_ = pt.moveaxis(pt.concatenate([init_dist_, markov_chain], axis=0), 0, -1)
206+
discrete_mc_ = pt.moveaxis(
207+
pt.concatenate([init_dist_, markov_chain], axis=0), 0, -1
208+
)
203209

204210
discrete_mc_op = DiscreteMarkovChainRV(
205211
inputs=[P_, steps_, init_dist_, state_rng],
@@ -218,7 +224,9 @@ def change_mc_size(op, dist, new_size, expand=False):
218224
old_size = dist.shape[:-1]
219225
new_size = tuple(new_size) + tuple(old_size)
220226

221-
return DiscreteMarkovChain.rv_op(*dist.owner.inputs[:-1], size=new_size, n_lags=op.n_lags)
227+
return DiscreteMarkovChain.rv_op(
228+
*dist.owner.inputs[:-1], size=new_size, n_lags=op.n_lags
229+
)
222230

223231

224232
@_support_point.register(DiscreteMarkovChainRV)
@@ -247,7 +255,10 @@ def discrete_mc_logp(op, values, P, steps, init_dist, state_rng, **kwargs):
247255
value = values[0]
248256
n_lags = op.n_lags
249257

250-
indexes = [value[..., i : -(n_lags - i) if n_lags != i else None] for i in range(n_lags + 1)]
258+
indexes = [
259+
value[..., i : -(n_lags - i) if n_lags != i else None]
260+
for i in range(n_lags + 1)
261+
]
251262

252263
mc_logprob = logp(init_dist, value[..., :n_lags]).sum(axis=-1)
253264
mc_logprob += pt.log(P[tuple(indexes)]).sum(axis=-1)

pymc_experimental/gp/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,3 +14,5 @@
1414

1515

1616
from pymc_experimental.gp.latent_approx import KarhunenLoeveExpansion, ProjectedProcess
17+
18+
__all__ = ["KarhunenLoeveExpansion", "ProjectedProcess"]

pymc_experimental/gp/latent_approx.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,9 @@ def _build_prior(self, name, X, X_inducing, jitter=JITTER_DEFAULT, **kwargs):
4747
L = cholesky(stabilize(Kuu, jitter))
4848

4949
n_inducing_points = np.shape(X_inducing)[0]
50-
v = pm.Normal(name + "_u_rotated_", mu=0.0, sigma=1.0, size=n_inducing_points, **kwargs)
50+
v = pm.Normal(
51+
name + "_u_rotated_", mu=0.0, sigma=1.0, size=n_inducing_points, **kwargs
52+
)
5153
u = pm.Deterministic(name + "_u", L @ v)
5254

5355
Kfu = self.cov_func(X, X_inducing)
@@ -111,7 +113,9 @@ def _build_conditional(self, name, Xnew, X_inducing, L, Kuuiu, jitter, **kwargs)
111113
Ksu = self.cov_func(Xnew, X_inducing)
112114
mu = self.mean_func(Xnew) + Ksu @ Kuuiu
113115
tmp = solve_lower(L, pt.transpose(Ksu))
114-
Qss = pt.transpose(tmp) @ tmp # Qss = tt.dot(tt.dot(Ksu, tt.nlinalg.pinv(Kuu)), Ksu.T)
116+
Qss = (
117+
pt.transpose(tmp) @ tmp
118+
) # Qss = tt.dot(tt.dot(Ksu, tt.nlinalg.pinv(Kuu)), Ksu.T)
115119
Kss = self.cov_func(Xnew)
116120
Lss = cholesky(stabilize(Kss - Qss, jitter))
117121
return mu, Lss
@@ -137,7 +141,7 @@ def __init__(
137141
super().__init__(mean_func=mean_func, cov_func=cov_func)
138142

139143
def _build_prior(self, name, X, jitter=1e-6, **kwargs):
140-
mu = self.mean_func(X)
144+
# mu = self.mean_func(X)
141145
Kxx = pm.gp.util.stabilize(self.cov_func(X), jitter)
142146
vals, vecs = pt.linalg.eigh(Kxx)
143147
## NOTE: REMOVED PRECISION CUTOFF
@@ -147,7 +151,9 @@ def _build_prior(self, name, X, jitter=1e-6, **kwargs):
147151
if self.variance_limit == 1:
148152
n_eigs = len(vals)
149153
else:
150-
n_eigs = ((vals[::-1].cumsum() / vals.sum()) > self.variance_limit).nonzero()[0][0]
154+
n_eigs = (
155+
(vals[::-1].cumsum() / vals.sum()) > self.variance_limit
156+
).nonzero()[0][0]
151157
U = vecs[:, -n_eigs:]
152158
s = vals[-n_eigs:]
153159
basis = U * pt.sqrt(s)

0 commit comments

Comments
 (0)