Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update pre-commit-config replacing black and pylint with ruff #366

Closed
wants to merge 2 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 5 additions & 10 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -20,17 +20,12 @@ repos:
hooks:
- id: pyupgrade
args: [--py37-plus]
- repo: https://github.com/psf/black
rev: 24.1.1
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.5.4
hooks:
- id: black
- id: black-jupyter
- repo: https://github.com/PyCQA/pylint
rev: v3.0.3
hooks:
- id: pylint
args: [--rcfile=.pylintrc]
files: ^pymc_experimental/
- id: ruff
args: ["--fix", "--output-format=full"]
- id: ruff-format
- repo: https://github.com/MarcoGorelli/madforhooks
rev: 0.4.1
hooks:
Expand Down
4 changes: 3 additions & 1 deletion conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@


def pytest_addoption(parser):
parser.addoption("--runslow", action="store_true", default=False, help="run slow tests")
parser.addoption(
"--runslow", action="store_true", default=False, help="run slow tests"
)


def pytest_configure(config):
Expand Down
4 changes: 3 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,9 @@

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pymc_experimental", "pymc_experimental Documentation", [author], 1)]
man_pages = [
(master_doc, "pymc_experimental", "pymc_experimental Documentation", [author], 1)
]


# -- Options for Texinfo output ----------------------------------------------
Expand Down
19 changes: 15 additions & 4 deletions pymc_experimental/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@
# limitations under the License.
import logging

from pymc_experimental import distributions, gp, statespace, utils
from pymc_experimental.inference.fit import fit
from pymc_experimental.model.marginal_model import MarginalModel
from pymc_experimental.model.model_api import as_model
from pymc_experimental.version import __version__

_log = logging.getLogger("pmx")
Expand All @@ -23,7 +27,14 @@
handler = logging.StreamHandler()
_log.addHandler(handler)

from pymc_experimental import distributions, gp, statespace, utils
from pymc_experimental.inference.fit import fit
from pymc_experimental.model.marginal_model import MarginalModel
from pymc_experimental.model.model_api import as_model

__all__ = [
"__version__",
"distributions",
"gp",
"statespace",
"utils",
"fit",
"MarginalModel",
"as_model",
]
12 changes: 9 additions & 3 deletions pymc_experimental/distributions/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,9 @@ class GenExtremeRV(RandomVariable):
dtype: str = "floatX"
_print_name: Tuple[str, str] = ("Generalized Extreme Value", "\\operatorname{GEV}")

def __call__(self, mu=0.0, sigma=1.0, xi=0.0, size=None, **kwargs) -> TensorVariable:
def __call__(
self, mu=0.0, sigma=1.0, xi=0.0, size=None, **kwargs
) -> TensorVariable:
return super().__call__(mu, sigma, xi, size=size, **kwargs)

@classmethod
Expand All @@ -54,7 +56,9 @@ def rng_fn(
size: Tuple[int, ...],
) -> np.ndarray:
# Notice negative here, since remainder of GenExtreme is based on Coles parametrization
return stats.genextreme.rvs(c=-xi, loc=mu, scale=sigma, random_state=rng, size=size)
return stats.genextreme.rvs(
c=-xi, loc=mu, scale=sigma, random_state=rng, size=size
)


gev = GenExtremeRV()
Expand Down Expand Up @@ -214,7 +218,9 @@ def support_point(rv, size, mu, sigma, xi):
r"""
Using the mode, as the mean can be infinite when :math:`\xi > 1`
"""
mode = pt.switch(pt.isclose(xi, 0), mu, mu + sigma * (pt.pow(1 + xi, -xi) - 1) / xi)
mode = pt.switch(
pt.isclose(xi, 0), mu, mu + sigma * (pt.pow(1 + xi, -xi) - 1) / xi
)
if not rv_size_is_none(size):
mode = pt.full(size, mode)
return mode
Expand Down
16 changes: 9 additions & 7 deletions pymc_experimental/distributions/discrete.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,14 +51,14 @@ def rng_fn(cls, rng, theta, lam, size):
x = np.empty(dist_size)
idxs_mask = np.broadcast_to(lam < 0, dist_size)
if np.any(idxs_mask):
x[idxs_mask] = cls._inverse_rng_fn(rng, theta, lam, dist_size, idxs_mask=idxs_mask)[
idxs_mask
]
x[idxs_mask] = cls._inverse_rng_fn(
rng, theta, lam, dist_size, idxs_mask=idxs_mask
)[idxs_mask]
idxs_mask = ~idxs_mask
if np.any(idxs_mask):
x[idxs_mask] = cls._branching_rng_fn(rng, theta, lam, dist_size, idxs_mask=idxs_mask)[
idxs_mask
]
x[idxs_mask] = cls._branching_rng_fn(
rng, theta, lam, dist_size, idxs_mask=idxs_mask
)[idxs_mask]
return x

@classmethod
Expand Down Expand Up @@ -159,7 +159,9 @@ def support_point(rv, size, mu, lam):

def logp(value, mu, lam):
mu_lam_value = mu + lam * value
logprob = np.log(mu) + logpow(mu_lam_value, value - 1) - mu_lam_value - factln(value)
logprob = (
np.log(mu) + logpow(mu_lam_value, value - 1) - mu_lam_value - factln(value)
)

# Probability is 0 when value > m, where m is the largest positive integer for
# which mu + m * lam > 0 (when lam < 0).
Expand Down
2 changes: 2 additions & 0 deletions pymc_experimental/distributions/multivariate/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +1,3 @@
from pymc_experimental.distributions.multivariate.r2d2m2cp import R2D2M2CP

__all__ = ["R2D2M2CP"]
27 changes: 20 additions & 7 deletions pymc_experimental/distributions/multivariate/r2d2m2cp.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,9 @@ def _R2D2M2CP_beta(
raw = pt.zeros_like(mu_param)
else:
raw = pm.Normal("raw", dims=dims)
beta = pm.Deterministic(name, (raw * std_param + mu_param) / input_sigma, dims=dims)
beta = pm.Deterministic(
name, (raw * std_param + mu_param) / input_sigma, dims=dims
)
else:
if psi_mask is not None and psi_mask.any():
# limit case where some probs are not 1 or 0
Expand All @@ -113,7 +115,9 @@ def _R2D2M2CP_beta(
# all variables are deterministic
beta = pm.Deterministic(name, (mu_param / input_sigma), dims=dims)
else:
beta = pm.Normal(name, mu_param / input_sigma, std_param / input_sigma, dims=dims)
beta = pm.Normal(
name, mu_param / input_sigma, std_param / input_sigma, dims=dims
)
return beta


Expand All @@ -137,7 +141,8 @@ def _psi_masked(
dims: Sequence[str],
) -> Tuple[Union[pt.TensorLike, None], pt.TensorVariable]:
if not (
isinstance(positive_probs, pt.Constant) and isinstance(positive_probs_std, pt.Constant)
isinstance(positive_probs, pt.Constant)
and isinstance(positive_probs_std, pt.Constant)
):
raise TypeError(
"Only constant values for positive_probs and positive_probs_std are accepted"
Expand All @@ -147,7 +152,9 @@ def _psi_masked(
)
mask = ~np.bitwise_or(positive_probs == 1, positive_probs == 0)
if np.bitwise_and(~mask, positive_probs_std != 0).any():
raise ValueError("Can't have both positive_probs == '1 or 0' and positive_probs_std != 0")
raise ValueError(
"Can't have both positive_probs == '1 or 0' and positive_probs_std != 0"
)
if (~mask).any() and mask.any():
# limit case where some probs are not 1 or 0
# setsubtensor is required
Expand Down Expand Up @@ -206,7 +213,9 @@ def _phi(
if variance_explained is not None:
raise TypeError("Can't use variable importance with variance explained")
if len(model.coords[dim]) <= 1:
raise TypeError("Can't use variable importance with less than two variables")
raise TypeError(
"Can't use variable importance with less than two variables"
)
variables_importance = pt.as_tensor(variables_importance)
if importance_concentration is not None:
variables_importance *= importance_concentration
Expand All @@ -218,7 +227,9 @@ def _phi(
else:
phi = _broadcast_as_dims(1.0, dims=dims)
if importance_concentration is not None:
return pm.Dirichlet("phi", importance_concentration * phi, dims=broadcast_dims + [dim])
return pm.Dirichlet(
"phi", importance_concentration * phi, dims=broadcast_dims + [dim]
)
else:
return phi

Expand Down Expand Up @@ -428,7 +439,9 @@ def R2D2M2CP(
dims=dims,
)
mask, psi = _psi(
positive_probs=positive_probs, positive_probs_std=positive_probs_std, dims=dims
positive_probs=positive_probs,
positive_probs_std=positive_probs_std,
dims=dims,
)

beta = _R2D2M2CP_beta(
Expand Down
21 changes: 16 additions & 5 deletions pymc_experimental/distributions/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,9 @@
from pytensor.tensor.random.op import RandomVariable


def _make_outputs_info(n_lags: int, init_dist: Distribution) -> List[Union[Distribution, dict]]:
def _make_outputs_info(
n_lags: int, init_dist: Distribution
) -> List[Union[Distribution, dict]]:
"""
Two cases are needed for outputs_info in the scans used by DiscreteMarkovRv. If n_lags = 1, we need to throw away
the first dimension of init_dist_ or else markov_chain will have shape (steps, 1, *batch_size) instead of
Expand Down Expand Up @@ -124,7 +126,9 @@ def __new__(cls, *args, steps=None, n_lags=1, **kwargs):
@classmethod
def dist(cls, P=None, logit_P=None, steps=None, init_dist=None, n_lags=1, **kwargs):
steps = get_support_shape_1d(
support_shape=steps, shape=kwargs.get("shape", None), support_shape_offset=n_lags
support_shape=steps,
shape=kwargs.get("shape", None),
support_shape_offset=n_lags,
)

if steps is None:
Expand Down Expand Up @@ -199,7 +203,9 @@ def transition(*args):

(state_next_rng,) = tuple(state_updates.values())

discrete_mc_ = pt.moveaxis(pt.concatenate([init_dist_, markov_chain], axis=0), 0, -1)
discrete_mc_ = pt.moveaxis(
pt.concatenate([init_dist_, markov_chain], axis=0), 0, -1
)

discrete_mc_op = DiscreteMarkovChainRV(
inputs=[P_, steps_, init_dist_, state_rng],
Expand All @@ -218,7 +224,9 @@ def change_mc_size(op, dist, new_size, expand=False):
old_size = dist.shape[:-1]
new_size = tuple(new_size) + tuple(old_size)

return DiscreteMarkovChain.rv_op(*dist.owner.inputs[:-1], size=new_size, n_lags=op.n_lags)
return DiscreteMarkovChain.rv_op(
*dist.owner.inputs[:-1], size=new_size, n_lags=op.n_lags
)


@_support_point.register(DiscreteMarkovChainRV)
Expand Down Expand Up @@ -247,7 +255,10 @@ def discrete_mc_logp(op, values, P, steps, init_dist, state_rng, **kwargs):
value = values[0]
n_lags = op.n_lags

indexes = [value[..., i : -(n_lags - i) if n_lags != i else None] for i in range(n_lags + 1)]
indexes = [
value[..., i : -(n_lags - i) if n_lags != i else None]
for i in range(n_lags + 1)
]

mc_logprob = logp(init_dist, value[..., :n_lags]).sum(axis=-1)
mc_logprob += pt.log(P[tuple(indexes)]).sum(axis=-1)
Expand Down
2 changes: 2 additions & 0 deletions pymc_experimental/gp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,5 @@


from pymc_experimental.gp.latent_approx import KarhunenLoeveExpansion, ProjectedProcess

__all__ = ["KarhunenLoeveExpansion", "ProjectedProcess"]
14 changes: 10 additions & 4 deletions pymc_experimental/gp/latent_approx.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,9 @@ def _build_prior(self, name, X, X_inducing, jitter=JITTER_DEFAULT, **kwargs):
L = cholesky(stabilize(Kuu, jitter))

n_inducing_points = np.shape(X_inducing)[0]
v = pm.Normal(name + "_u_rotated_", mu=0.0, sigma=1.0, size=n_inducing_points, **kwargs)
v = pm.Normal(
name + "_u_rotated_", mu=0.0, sigma=1.0, size=n_inducing_points, **kwargs
)
u = pm.Deterministic(name + "_u", L @ v)

Kfu = self.cov_func(X, X_inducing)
Expand Down Expand Up @@ -111,7 +113,9 @@ def _build_conditional(self, name, Xnew, X_inducing, L, Kuuiu, jitter, **kwargs)
Ksu = self.cov_func(Xnew, X_inducing)
mu = self.mean_func(Xnew) + Ksu @ Kuuiu
tmp = solve_lower(L, pt.transpose(Ksu))
Qss = pt.transpose(tmp) @ tmp # Qss = tt.dot(tt.dot(Ksu, tt.nlinalg.pinv(Kuu)), Ksu.T)
Qss = (
pt.transpose(tmp) @ tmp
) # Qss = tt.dot(tt.dot(Ksu, tt.nlinalg.pinv(Kuu)), Ksu.T)
Kss = self.cov_func(Xnew)
Lss = cholesky(stabilize(Kss - Qss, jitter))
return mu, Lss
Expand All @@ -137,7 +141,7 @@ def __init__(
super().__init__(mean_func=mean_func, cov_func=cov_func)

def _build_prior(self, name, X, jitter=1e-6, **kwargs):
mu = self.mean_func(X)
# mu = self.mean_func(X)
Kxx = pm.gp.util.stabilize(self.cov_func(X), jitter)
vals, vecs = pt.linalg.eigh(Kxx)
## NOTE: REMOVED PRECISION CUTOFF
Expand All @@ -147,7 +151,9 @@ def _build_prior(self, name, X, jitter=1e-6, **kwargs):
if self.variance_limit == 1:
n_eigs = len(vals)
else:
n_eigs = ((vals[::-1].cumsum() / vals.sum()) > self.variance_limit).nonzero()[0][0]
n_eigs = (
(vals[::-1].cumsum() / vals.sum()) > self.variance_limit
).nonzero()[0][0]
U = vecs[:, -n_eigs:]
s = vals[-n_eigs:]
basis = U * pt.sqrt(s)
Expand Down
2 changes: 2 additions & 0 deletions pymc_experimental/inference/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,3 +14,5 @@


from pymc_experimental.inference.fit import fit

__all__ = ["fit"]
3 changes: 1 addition & 2 deletions pymc_experimental/inference/fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ def fit(method, **kwargs):
"""
if method == "pathfinder":
try:
import blackjax
import blackjax # noqa: F401
except ImportError as exc:
raise RuntimeError("Need BlackJAX to use `pathfinder`") from exc

Expand All @@ -40,7 +40,6 @@ def fit(method, **kwargs):
return fit_pathfinder(**kwargs)

if method == "laplace":

from pymc_experimental.inference.laplace import laplace

return laplace(**kwargs)
8 changes: 6 additions & 2 deletions pymc_experimental/inference/laplace.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,11 +146,15 @@ def addFitToInferenceData(vars, idata, mean, covariance):
# Convert to xarray DataArray
mean_dataarray = xr.DataArray(mean, dims=["rows"], coords={"rows": coord_names})
cov_dataarray = xr.DataArray(
covariance, dims=["rows", "columns"], coords={"rows": coord_names, "columns": coord_names}
covariance,
dims=["rows", "columns"],
coords={"rows": coord_names, "columns": coord_names},
)

# Create xarray dataset
dataset = xr.Dataset({"mean_vector": mean_dataarray, "covariance_matrix": cov_dataarray})
dataset = xr.Dataset(
{"mean_vector": mean_dataarray, "covariance_matrix": cov_dataarray}
)

idata.add_groups(fit=dataset)

Expand Down
4 changes: 3 additions & 1 deletion pymc_experimental/inference/pathfinder.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@ def convert_flat_trace_to_idata(
trace = {k: np.asarray(v)[None, ...] for k, v in trace.items()}

var_names = model.unobserved_value_vars
vars_to_sample = list(get_default_varnames(var_names, include_transformed=include_transformed))
vars_to_sample = list(
get_default_varnames(var_names, include_transformed=include_transformed)
)
print("Transforming variables...", file=sys.stdout)
jax_fn = get_jaxified_graph(inputs=model.value_vars, outputs=vars_to_sample)
result = jax.vmap(jax.vmap(jax_fn))(
Expand Down
Loading
Loading