Skip to content
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ repos:
# - --exclude=binder/
# - --exclude=versioneer.py
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.4
rev: v0.9.1
hooks:
- id: ruff
args: [--fix, --show-fixes]
Expand Down
2 changes: 1 addition & 1 deletion docs/source/learn/core_notebooks/pymc_pytensor.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -1849,7 +1849,7 @@
"print(\n",
" f\"\"\"\n",
"mu_value -> {scipy.stats.norm.logpdf(x=0, loc=0, scale=2)}\n",
"sigma_log_value -> {- 10 + scipy.stats.halfnorm.logpdf(x=np.exp(-10), loc=0, scale=3)}\n",
"sigma_log_value -> {-10 + scipy.stats.halfnorm.logpdf(x=np.exp(-10), loc=0, scale=3)}\n",
"x_value -> {scipy.stats.norm.logpdf(x=0, loc=0, scale=np.exp(-10))}\n",
"\"\"\"\n",
")"
Expand Down
2 changes: 1 addition & 1 deletion pymc/data.py
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ def determine_coords(
if isinstance(value, np.ndarray) and dims is not None:
if len(dims) != value.ndim:
raise pm.exceptions.ShapeError(
"Invalid data shape. The rank of the dataset must match the " "length of `dims`.",
"Invalid data shape. The rank of the dataset must match the length of `dims`.",
actual=value.shape,
expected=value.ndim,
)
Expand Down
8 changes: 3 additions & 5 deletions pymc/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -992,8 +992,7 @@ def get_mu_lam_phi(mu, lam, phi):
return mu, lam, lam / mu

raise ValueError(
"Wald distribution must specify either mu only, "
"mu and lam, mu and phi, or lam and phi."
"Wald distribution must specify either mu only, mu and lam, mu and phi, or lam and phi."
)

def logp(value, mu, lam, alpha):
Expand Down Expand Up @@ -1603,8 +1602,7 @@ def dist(cls, kappa=None, mu=None, b=None, q=None, *args, **kwargs):
def get_kappa(cls, kappa=None, q=None):
if kappa is not None and q is not None:
raise ValueError(
"Incompatible parameterization. Either use "
"kappa or q to specify the distribution."
"Incompatible parameterization. Either use kappa or q to specify the distribution."
)
elif q is not None:
if isinstance(q, Variable):
Expand Down Expand Up @@ -3483,7 +3481,7 @@ def get_nu_b(cls, nu, b, sigma):
elif nu is not None and b is None:
b = nu / sigma
return nu, b, sigma
raise ValueError("Rice distribution must specify either nu" " or b.")
raise ValueError("Rice distribution must specify either nu or b.")

def support_point(rv, size, nu, sigma):
nu_sigma_ratio = -(nu**2) / (2 * sigma**2)
Expand Down
10 changes: 6 additions & 4 deletions pymc/distributions/multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,7 +247,9 @@ class MvNormal(Continuous):
data = np.random.multivariate_normal(mu, true_cov, 10)

sd_dist = pm.Exponential.dist(1.0, shape=3)
chol, corr, stds = pm.LKJCholeskyCov("chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True)
chol, corr, stds = pm.LKJCholeskyCov(
"chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True
)
vals = pm.MvNormal("vals", mu=mu, chol=chol, observed=data)

For unobserved values it can be better to use a non-centered
Expand Down Expand Up @@ -2793,9 +2795,9 @@ def dist(cls, sigma=1.0, n_zerosum_axes=None, support_shape=None, **kwargs):

support_shape = pt.as_tensor(support_shape, dtype="int64", ndim=1)

assert n_zerosum_axes == pt.get_vector_length(
support_shape
), "support_shape has to be as long as n_zerosum_axes"
assert n_zerosum_axes == pt.get_vector_length(support_shape), (
"support_shape has to be as long as n_zerosum_axes"
)

return super().dist([sigma, support_shape], **kwargs)

Expand Down
3 changes: 1 addition & 2 deletions pymc/gp/cov.py
Original file line number Diff line number Diff line change
Expand Up @@ -328,8 +328,7 @@ def power_spectral_density(self, omega: TensorLike) -> TensorVariable:
check = Counter([isinstance(factor, Covariance) for factor in self._factor_list])
if check.get(True, 0) >= 2:
raise NotImplementedError(
"The power spectral density of products of covariance "
"functions is not implemented."
"The power spectral density of products of covariance functions is not implemented."
)
return reduce(mul, self._merge_factors_psd(omega))

Expand Down
3 changes: 1 addition & 2 deletions pymc/gp/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -211,8 +211,7 @@ def plot_gp_dist(
samples_kwargs = {}
if np.any(np.isnan(samples)):
warnings.warn(
"There are `nan` entries in the [samples] arguments. "
"The plot will not contain a band!",
"There are `nan` entries in the [samples] arguments. The plot will not contain a band!",
UserWarning,
)

Expand Down
5 changes: 2 additions & 3 deletions pymc/sampling/jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,8 +108,7 @@ def _replace_shared_variables(graph: list[TensorVariable]) -> list[TensorVariabl

if any(var.default_update is not None for var in shared_variables):
raise ValueError(
"Graph contains shared variables with default_update which cannot "
"be safely replaced."
"Graph contains shared variables with default_update which cannot be safely replaced."
)

replacements = {var: pt.constant(var.get_value(borrow=True)) for var in shared_variables}
Expand Down Expand Up @@ -360,7 +359,7 @@ def _sample_blackjax_nuts(
map_fn = jax.vmap
else:
raise ValueError(
"Only supporting the following methods to draw chains:" ' "parallel" or "vectorized"'
'Only supporting the following methods to draw chains: "parallel" or "vectorized"'
)

if chains == 1:
Expand Down
6 changes: 3 additions & 3 deletions pymc/sampling/mcmc.py
Original file line number Diff line number Diff line change
Expand Up @@ -1000,7 +1000,7 @@ def _sample_return(
total_draws = draws_per_chain.sum()

_log.info(
f'Sampling {n_chains} chain{"s" if n_chains > 1 else ""} for {desired_tune:_d} desired tune and {desired_draw:_d} desired draw iterations '
f"Sampling {n_chains} chain{'s' if n_chains > 1 else ''} for {desired_tune:_d} desired tune and {desired_draw:_d} desired draw iterations "
f"(Actually sampled {total_n_tune:_d} tune and {total_draws:_d} draws total) "
f"took {t_sampling:.0f} seconds."
)
Expand Down Expand Up @@ -1062,8 +1062,8 @@ def _sample_return(

n_chains = len(mtrace.chains)
_log.info(
f'Sampling {n_chains} chain{"s" if n_chains > 1 else ""} for {n_tune:_d} tune and {n_draws:_d} draw iterations '
f"({n_tune*n_chains:_d} + {n_draws*n_chains:_d} draws total) "
f"Sampling {n_chains} chain{'s' if n_chains > 1 else ''} for {n_tune:_d} tune and {n_draws:_d} draw iterations "
f"({n_tune * n_chains:_d} + {n_draws * n_chains:_d} draws total) "
f"took {t_sampling:.0f} seconds."
)

Expand Down
6 changes: 3 additions & 3 deletions pymc/sampling/population.py
Original file line number Diff line number Diff line change
Expand Up @@ -386,9 +386,9 @@ def _prepare_iter_population(

# 2. Set up the steppers
steppers: list[Step] = []
assert (
len(rngs) == nchains
), f"There must be one random Generator per chain. Got {len(rngs)} instead of {nchains}"
assert len(rngs) == nchains, (
f"There must be one random Generator per chain. Got {len(rngs)} instead of {nchains}"
)
for c, rng in enumerate(rngs):
# need independent samplers for each chain
# it is important to copy the actual steppers (but not the delta_logp)
Expand Down
6 changes: 3 additions & 3 deletions pymc/step_methods/compound.py
Original file line number Diff line number Diff line change
Expand Up @@ -282,9 +282,9 @@ def sampling_state(self) -> DataClassState:

@sampling_state.setter
def sampling_state(self, state: DataClassState):
assert isinstance(
state, self._state_class
), f"Invalid sampling state class {type(state)}. Expected {self._state_class}"
assert isinstance(state, self._state_class), (
f"Invalid sampling state class {type(state)}. Expected {self._state_class}"
)
for method, state_method in zip(self.methods, state.methods):
method.sampling_state = state_method

Expand Down
6 changes: 3 additions & 3 deletions pymc/step_methods/state.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,9 +90,9 @@ def sampling_state(self) -> DataClassState:
@sampling_state.setter
def sampling_state(self, state: DataClassState):
state_class = self._state_class
assert isinstance(
state, state_class
), f"Encountered invalid state class '{state.__class__}'. State must be '{state_class}'"
assert isinstance(state, state_class), (
f"Encountered invalid state class '{state.__class__}'. State must be '{state_class}'"
)
for field in fields(state_class):
is_tensor_name = field.metadata.get("tensor_name", False)
state_val = deepcopy(getattr(state, field.name))
Expand Down
6 changes: 3 additions & 3 deletions pymc/testing.py
Original file line number Diff line number Diff line change
Expand Up @@ -964,9 +964,9 @@ def check_rv_size(self):
assert actual == expected_symbolic == expected

def validate_tests_list(self):
assert len(self.checks_to_run) == len(
set(self.checks_to_run)
), "There are duplicates in the list of checks_to_run"
assert len(self.checks_to_run) == len(set(self.checks_to_run)), (
"There are duplicates in the list of checks_to_run"
)


def seeded_scipy_distribution_builder(dist_name: str) -> Callable:
Expand Down
8 changes: 4 additions & 4 deletions pymc/variational/opvi.py
Original file line number Diff line number Diff line change
Expand Up @@ -710,9 +710,9 @@ class Group(WithMemoization):

@classmethod
def register(cls, sbcls):
assert (
frozenset(sbcls.__param_spec__) not in cls.__param_registry
), "Duplicate __param_spec__"
assert frozenset(sbcls.__param_spec__) not in cls.__param_registry, (
"Duplicate __param_spec__"
)
cls.__param_registry[frozenset(sbcls.__param_spec__)] = sbcls
assert sbcls.short_name not in cls.__name_registry, "Duplicate short_name"
cls.__name_registry[sbcls.short_name] = sbcls
Expand Down Expand Up @@ -1234,7 +1234,7 @@ def __init__(self, groups, model=None):
for g in groups:
if g.group is None:
if rest is not None:
raise GroupError("More than one group is specified for " "the rest variables")
raise GroupError("More than one group is specified for the rest variables")
else:
rest = g
else:
Expand Down
2 changes: 1 addition & 1 deletion pymc/variational/updates.py
Original file line number Diff line number Diff line change
Expand Up @@ -1006,7 +1006,7 @@ def norm_constraint(tensor_var, max_norm, norm_axes=None, epsilon=1e-7):
elif ndim in [3, 4, 5]: # Conv{1,2,3}DLayer
sum_over = tuple(range(1, ndim))
else:
raise ValueError(f"Unsupported tensor dimensionality {ndim}." "Must specify `norm_axes`")
raise ValueError(f"Unsupported tensor dimensionality {ndim}.Must specify `norm_axes`")

dtype = np.dtype(pytensor.config.floatX).type
norms = pt.sqrt(pt.sum(pt.sqr(tensor_var), axis=sum_over, keepdims=True))
Expand Down
12 changes: 6 additions & 6 deletions tests/distributions/test_multivariate.py
Original file line number Diff line number Diff line change
Expand Up @@ -1531,14 +1531,14 @@ class TestZeroSumNormal:
def assert_zerosum_axes(self, random_samples, axes_to_check, check_zerosum_axes=True):
if check_zerosum_axes:
for ax in axes_to_check:
assert np.isclose(
random_samples.mean(axis=ax), 0
).all(), f"{ax} is a zerosum_axis but is not summing to 0 across all samples."
assert np.isclose(random_samples.mean(axis=ax), 0).all(), (
f"{ax} is a zerosum_axis but is not summing to 0 across all samples."
)
else:
for ax in axes_to_check:
assert not np.isclose(
random_samples.mean(axis=ax), 0
).all(), f"{ax} is not a zerosum_axis, but is nonetheless summing to 0 across all samples."
assert not np.isclose(random_samples.mean(axis=ax), 0).all(), (
f"{ax} is not a zerosum_axis, but is nonetheless summing to 0 across all samples."
)

@pytest.mark.parametrize(
"dims, n_zerosum_axes",
Expand Down
12 changes: 6 additions & 6 deletions tests/gp/test_hsgp_approx.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,9 +135,9 @@ def test_mean_invariance(self):
with model:
pm.set_data({"X": x_new})

assert np.allclose(
gp._X_center, original_center
), "gp._X_center should not change after updating data for out-of-sample predictions."
assert np.allclose(gp._X_center, original_center), (
"gp._X_center should not change after updating data for out-of-sample predictions."
)

def test_parametrization(self):
err_msg = (
Expand Down Expand Up @@ -188,9 +188,9 @@ def test_parametrization_drop_first(self, model, cov_func, X1, drop_first):

n_coeffs = model.f1_hsgp_coeffs.type.shape[0]
if drop_first:
assert (
n_coeffs == n_basis - 1
), f"one basis vector should have been dropped, {n_coeffs}"
assert n_coeffs == n_basis - 1, (
f"one basis vector should have been dropped, {n_coeffs}"
)
else:
assert n_coeffs == n_basis, "one was dropped when it shouldn't have been"

Expand Down
6 changes: 3 additions & 3 deletions tests/test_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,8 +318,8 @@ def test_explicit_coords(self, seeded_test):
N_cols = 7
data = np.random.uniform(size=(N_rows, N_cols))
coords = {
"rows": [f"R{r+1}" for r in range(N_rows)],
"columns": [f"C{c+1}" for c in range(N_cols)],
"rows": [f"R{r + 1}" for r in range(N_rows)],
"columns": [f"C{c + 1}" for c in range(N_cols)],
}
# pass coordinates explicitly, use numpy array in Data container
with pm.Model(coords=coords) as pmodel:
Expand Down Expand Up @@ -391,7 +391,7 @@ def test_implicit_coords_dataframe(self, seeded_test):
N_cols = 7
df_data = pd.DataFrame()
for c in range(N_cols):
df_data[f"Column {c+1}"] = np.random.normal(size=(N_rows,))
df_data[f"Column {c + 1}"] = np.random.normal(size=(N_rows,))
df_data.index.name = "rows"
df_data.columns.name = "columns"

Expand Down
Loading