Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update pre-commit hooks #369

Merged
merged 2 commits into from
Dec 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ repos:
- id: rst-inline-touching-normal

- repo: https://github.com/rbubley/mirrors-prettier
rev: v3.4.1
rev: v3.4.2
hooks:
- id: prettier
types_or: [yaml, html, css, scss, javascript, json]
Expand All @@ -68,7 +68,7 @@ repos:
- id: codespell

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.8.1
rev: v0.8.2
hooks:
- id: ruff
args: [--fix, --show-fixes]
Expand Down
80 changes: 40 additions & 40 deletions lmo/_lm.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,16 +232,16 @@ def l_weights(
w = _w
else:
# when caching, use at least 4 orders, to avoid cache misses
_r_max = 4 if cache and r_max < 4 else r_max
r_max_ = 4 if cache and r_max < 4 else r_max

_cache_default = False
cache_default = False
if r_max + s + t <= 24 and isinstance(s, int) and isinstance(t, int):
w = _l_weights_pwm(_r_max, n, trim=(s, t), dtype=sctype)
_cache_default = True
w = _l_weights_pwm(r_max_, n, trim=(s, t), dtype=sctype)
cache_default = True
else:
w = _l_weights_ostat(_r_max, n, trim=(s, t), dtype=sctype)
w = _l_weights_ostat(r_max_, n, trim=(s, t), dtype=sctype)

if cache or cache is None and _cache_default:
if cache or cache is None and cache_default:
w.setflags(write=False)
# be wary of a potential race condition
if key not in _CACHE or w.shape[0] >= _CACHE[key].shape[0]:
Expand Down Expand Up @@ -463,8 +463,8 @@ def l_moment(
x_k = ensure_axis_at(x_k, axis, -1)
n = x_k.shape[-1]

_r = clean_order(r)
r_min, r_max = np.min(_r), np.max(_r)
r_ = clean_order(r)
r_min, r_max = np.min(r_), np.max(r_)

# TODO @jorenham: nan handling, see:
# https://github.com/jorenham/Lmo/issues/70
Expand All @@ -481,9 +481,9 @@ def l_moment(
l_r = np.inner(l_weights(r_max, n, st, dtype=dtype, cache=cache), x_k)

if r_min > 0:
return l_r.take(_r - 1, 0)
return l_r.take(r_ - 1, 0)

return np.r_[np.ones((1, *l_r.shape[1:]), l_r.dtype), l_r].take(_r, 0)
return np.r_[np.ones((1, *l_r.shape[1:]), l_r.dtype), l_r].take(r_, 0)


@overload
Expand Down Expand Up @@ -1282,21 +1282,21 @@ def l_moment_cov(
Todo:
- Use the direct (Jacobi) method from Hosking (2015).
"""
_r_max = clean_order(r_max, "r_max")
_trim = cast("tuple[int, int]", clean_trim(trim))
r_max_ = clean_order(r_max, "r_max")
trim_ = cast("tuple[int, int]", clean_trim(trim))

if any(int(t) != t for t in _trim):
if any(int(t) != t for t in trim_):
msg = "l_moment_cov does not support fractional trimming (yet)"
raise TypeError(msg)

ks = _r_max + sum(_trim)
if ks < _r_max:
ks = r_max_ + sum(trim_)
if ks < r_max_:
msg = "trimmings must be positive"
raise ValueError(msg)

# projection matrix: PWMs -> generalized trimmed L-moments
p_l: npt.NDArray[np.floating[Any]]
p_l = trim_matrix(_r_max, trim=_trim, dtype=dtype) @ sh_legendre(ks)
p_l = trim_matrix(r_max_, trim=trim_, dtype=dtype) @ sh_legendre(ks)
# clean some numerical noise
# p_l = np.round(p_l, 12) + 0.

Expand Down Expand Up @@ -1420,32 +1420,32 @@ def l_ratio_se(
L-moments](https://doi.org/10.1016/S0378-3758(03)00213-1)

"""
_r, _s = np.broadcast_arrays(np.asarray(r), np.asarray(s))
_rs = np.stack((_r, _s))
r_max = np.amax(np.r_[_r, _s].ravel())
r_, s_ = np.broadcast_arrays(np.asarray(r), np.asarray(s))
rs = np.stack((r_, s_))
r_max = np.amax(np.r_[r_, s_].ravel())

# L-moments
l_rs = l_moment(a, _rs, trim, axis=axis, dtype=dtype, **kwds)
l_rs = l_moment(a, rs, trim, axis=axis, dtype=dtype, **kwds)
l_r, l_s = l_rs[0], l_rs[1]

# L-moment auto-covariance matrix
k_l = l_moment_cov(a, r_max, trim, axis=axis, dtype=dtype, **kwds)
# prepend the "zeroth" moment, with has 0 (co)variance
k_l = np.pad(k_l, (1, 0), constant_values=0)

s_rr = k_l[_r, _r] # Var[l_r]
s_ss = k_l[_s, _s] # Var[l_r]
s_rs = k_l[_r, _s] # Cov[l_r, l_s]
s_rr = k_l[r_, r_] # Var[l_r]
s_ss = k_l[s_, s_] # Var[l_r]
s_rs = k_l[r_, s_] # Cov[l_r, l_s]

# the classic approximation to propagation of uncertainty for an RV ratio
with np.errstate(divide="ignore", invalid="ignore"):
_s_tt = (l_r / l_s) ** 2 * (
s_tt_ = (l_r / l_s) ** 2 * (
s_rr / l_r**2 + s_ss / l_s**2 - 2 * s_rs / (l_r * l_s)
)
# Var[l_r / l_r] = Var[1] = 0
_s_tt = np.where(_s == 0, s_rr, _s_tt)
s_tt_ = np.where(s_ == 0, s_rr, s_tt_)
# Var[l_r / l_0] == Var[l_r / 1] == Var[l_r]
s_tt = np.where(_r == _s, 0, _s_tt)
s_tt = np.where(r_ == s_, 0, s_tt_)

return np.sqrt(s_tt)

Expand Down Expand Up @@ -1573,7 +1573,7 @@ def l_moment_influence(
TypeError: If `a` is not a floating-point type.

"""
_r = clean_order(r)
r_ = clean_order(r)
s, t = clean_trim(trim)

x_k = np.array(a, copy=bool(sort), dtype=np.float64)
Expand All @@ -1584,31 +1584,31 @@ def l_moment_influence(

n = len(x_k)

w_k: onp.Array1D[np.float64] = l_weights(_r, n, (s, t))[-1]
w_k: onp.Array1D[np.float64] = l_weights(r_, n, (s, t))[-1]
l_r = cast("np.float64", w_k @ x_k)

def influence_function(x: _T_x, /) -> _T_x:
_x = np.asanyarray(x)
x_ = np.asanyarray(x)

# ECDF
# k = np.maximum(np.searchsorted(x_k, _x, side='right') - 1, 0)
w = np.interp(
_x,
x_,
x_k,
w_k,
left=0 if s else w_k[0],
right=0 if t else w_k[-1],
)
alpha = n * w * np.where(w, _x, 0)
alpha = n * w * np.where(w, x_, 0)
out = round0(alpha - l_r, tol=tol)

if _x.ndim == 0 and np.isscalar(x):
if x_.ndim == 0 and np.isscalar(x):
return out.item()
return cast("_T_x", out)

influence_function.__doc__ = (
f"Empirical L-moment influence function given "
f"`r = {_r}`, `trim = {(s, t)}` and `{n = }`."
f"`r = {r_}`, `trim = {(s, t)}` and `{n = }`."
)
# piggyback the L-moment, to avoid recomputing it in l_ratio_influence
influence_function.l = l_r # pyright: ignore[reportFunctionMemberAccess]
Expand Down Expand Up @@ -1653,14 +1653,14 @@ def l_ratio_influence(
The (vectorized) empirical influence function.

"""
_r, _s = clean_order(r), clean_order(s, name="s")
r_, s_ = clean_order(r), clean_order(s, name="s")

_x = np.array(a, copy=bool(sort))
_x = sort_maybe(_x, sort=sort, inplace=True)
n = len(_x)
x = np.array(a, copy=bool(sort))
x = sort_maybe(x, sort=sort, inplace=True)
n = len(x)

eif_r = l_moment_influence(_x, _r, trim, sort=False, tol=0)
eif_k = l_moment_influence(_x, _s, trim, sort=False, tol=0)
eif_r = l_moment_influence(x, r_, trim, sort=False, tol=0)
eif_k = l_moment_influence(x, s_, trim, sort=False, tol=0)

l_r, l_k = cast(
"tuple[float, float]",
Expand All @@ -1681,6 +1681,6 @@ def influence_function(x: _T_x, /) -> _T_x:

influence_function.__doc__ = (
f"Theoretical influence function for L-moment ratio with "
f"`r = {_r}`, `k = {_s}`, `{trim = }`, and `{n = }`."
f"`r = {r_}`, `k = {s_}`, `{trim = }`, and `{n = }`."
)
return influence_function
14 changes: 7 additions & 7 deletions lmo/_lm_co.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,15 +214,15 @@ def l_comoment(
x = x.T
m, n = x.shape

_r = np.asarray(clean_order(r), np.intp)
r_ = np.asarray(clean_order(r), np.intp)

if not m:
return np.empty((*np.shape(_r), 0, 0), dtype=dtype)
return np.empty((*np.shape(r_), 0, 0), dtype=dtype)

r_min = np.min(_r)
r_max = np.max(_r)
r_min = np.min(r_)
r_max = np.max(r_)

if r_min == r_max == 0 and _r.ndim == 0:
if r_min == r_max == 0 and r_.ndim == 0:
return np.identity(m, dtype=dtype)

# projection/hat matrix of shape (r_max - r_min, n)
Expand All @@ -242,9 +242,9 @@ def l_comoment(
# the zeroth L-comoment is the delta function, so the L-comoment
# matrix is the identity matrix
l_0ij = np.identity(m, dtype=dtype)[None, :]
return np.concat((l_0ij, l_kij)).take(_r, 0)
return np.concat((l_0ij, l_kij)).take(r_, 0)

return l_kij.take(_r - r_min, 0)
return l_kij.take(r_ - r_min, 0)


@overload
Expand Down
50 changes: 25 additions & 25 deletions lmo/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ def ensure_axis_at(

if source is None:
# numpy typing issue workaround
_copy = None if copy is None else np.bool_(copy)
return a.reshape(-1, order=order, copy=_copy)
copy_ = None if copy is None else np.bool_(copy)
return a.reshape(-1, order=order, copy=copy_)

if (src := int(source)) == (dst := int(destination)):
return a.copy() if copy else a
Expand Down Expand Up @@ -105,9 +105,9 @@ def round0(
Todo:
- Add an `inplace: bool = False` kwarg
"""
_a = np.asanyarray(a)
_tol = np.finfo(_a.dtype).resolution * 2 if tol is None else abs(tol)
out = np.where(np.abs(_a) <= _tol, 0, a)
a_ = np.asanyarray(a)
tol_ = np.finfo(a_.dtype).resolution * 2 if tol is None else abs(tol)
out = np.where(np.abs(a_) <= tol_, 0, a)
return out[()] if np.isscalar(a) else out


Expand All @@ -123,17 +123,17 @@ def _apply_aweights(
# ensure that the samples are on the last axis, for easy iterating
axis = int(axis)
if swap_axes := axis % x.ndim != x.ndim - 1:
_x = np.swapaxes(x, axis, -1)
_vv = np.moveaxis(vv, axis, -1)
x_ = np.swapaxes(x, axis, -1)
vv_ = np.moveaxis(vv, axis, -1)
else:
_x, _vv = x, vv
x_, vv_ = x, vv

# cannot use np.apply_along_axis here, since both x_k and w_k need to be
# applied simultaneously
out: _AT_f = np.empty_like(x)

for j in np.ndindex(out.shape[:-1]):
x_jk, w_jk = _x[j], _vv[j]
x_jk, w_jk = x_[j], vv_[j]
if w_jk[-1] <= 0:
msg = "weight sum must be positive"
raise ValueError(msg)
Expand Down Expand Up @@ -233,36 +233,36 @@ def ordered( # noqa: C901
Calculate `n = len(x)` order stats of `x`, optionally weighted.
If `y` is provided, the order of `y` is used instead.
"""
_x = _z = np.asanyarray(x, dtype=dtype)
x_ = z = np.asanyarray(x, dtype=dtype)

# ravel/flatten, without copying
if axis is None:
_x = _x.reshape(-1)
x_ = x_.reshape(-1)

# figure out the ordering
if y is not None:
_y = np.asanyarray(y)
y_ = np.asanyarray(y)
if axis is None:
_y = _y.reshape(-1)
y_ = y_.reshape(-1)

# sort first by y, then by x (faster than lexsort)
if _y.ndim == _x.ndim:
_z = _y + 1j * _x
if y_.ndim == x_.ndim:
z = y_ + 1j * x_
else:
assert axis is not None
_z = np.apply_along_axis(np.add, axis, 1j * _x, _y)
z = np.apply_along_axis(np.add, axis, 1j * x_, y_)

# apply the ordering
if sort or sort is None: # pyright: ignore[reportUnnecessaryComparison]
kind = sort if isinstance(sort, str) else None
i_kk = np.argsort(_z, axis=axis, kind=kind)
x_kk = _sort_like(_x, i_kk, axis=axis)
i_kk = np.argsort(z, axis=axis, kind=kind)
x_kk = _sort_like(x_, i_kk, axis=axis)
else:
if axis is None:
i_kk = np.arange(len(_z))
i_kk = np.arange(len(z))
else:
i_kk = np.mgrid[tuple(slice(0, j) for j in _z.shape)][axis]
x_kk = _x
i_kk = np.mgrid[tuple(slice(0, j) for j in z.shape)][axis]
x_kk = x_

# prepare observation weights
w_kk = None
Expand Down Expand Up @@ -340,14 +340,14 @@ def clean_orders(
rmin: onp.ToInt = 0,
) -> onp.ArrayND[np.intp]:
"""Validates and cleans an array-like of (L-)moment orders."""
_r = np.asarray_chkfinite(r, dtype=np.intp)
r_ = np.asarray_chkfinite(r, dtype=np.intp)

if np.any(invalid := _r < rmin):
if np.any(invalid := r_ < rmin):
i = np.argmax(invalid)
msg = f"expected all {name} >= {rmin}, got {name}[{i}] = {_r[i]} "
msg = f"expected all {name} >= {rmin}, got {name}[{i}] = {r_[i]} "
raise TypeError(msg)

return _r
return r_


_COMMON_TRIM1: Final[frozenset[int]] = frozenset({0, 1, 2})
Expand Down
Loading
Loading