Skip to content

Commit bcc79f8

Browse files
committed
⬆️ UPGRADE: Autoupdate pre-commit config
1 parent d018cba commit bcc79f8

36 files changed

+148
-151
lines changed

.pre-commit-config.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ repos:
2424
- id: pyupgrade
2525
args: [--py37-plus]
2626
- repo: https://github.com/psf/black
27-
rev: 21.12b0
27+
rev: 22.1.0
2828
hooks:
2929
- id: black
3030
- repo: https://github.com/PyCQA/pylint

benchmarks/benchmarks/benchmarks.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -32,9 +32,9 @@ def glm_hierarchical_model(random_seed=123):
3232

3333
n_counties = len(data.county.unique())
3434
with pm.Model() as model:
35-
mu_a = pm.Normal("mu_a", mu=0.0, sd=100 ** 2)
35+
mu_a = pm.Normal("mu_a", mu=0.0, sd=100**2)
3636
sigma_a = pm.HalfCauchy("sigma_a", 5)
37-
mu_b = pm.Normal("mu_b", mu=0.0, sd=100 ** 2)
37+
mu_b = pm.Normal("mu_b", mu=0.0, sd=100**2)
3838
sigma_b = pm.HalfCauchy("sigma_b", 5)
3939
a = pm.Normal("a", mu=0, sd=1, shape=n_counties)
4040
b = pm.Normal("b", mu=0, sd=1, shape=n_counties)
@@ -70,7 +70,7 @@ def mixture_model(random_seed=1234):
7070
# Initialization can be poorly specified, this is a hack to make it work
7171
start = {
7272
"mu": mu_true.copy(),
73-
"tau_log__": np.log(1.0 / sigma ** 2),
73+
"tau_log__": np.log(1.0 / sigma**2),
7474
"w_stickbreaking__": np.array([-0.03, 0.44]),
7575
}
7676
return model, start
@@ -137,8 +137,8 @@ def time_drug_evaluation(self):
137137
group2_mean = pm.Normal("group2_mean", y_mean, sd=y_std)
138138
group1_std = pm.Uniform("group1_std", lower=sigma_low, upper=sigma_high)
139139
group2_std = pm.Uniform("group2_std", lower=sigma_low, upper=sigma_high)
140-
lambda_1 = group1_std ** -2
141-
lambda_2 = group2_std ** -2
140+
lambda_1 = group1_std**-2
141+
lambda_2 = group2_std**-2
142142

143143
nu = pm.Exponential("ν_minus_one", 1 / 29.0) + 1
144144

@@ -147,7 +147,7 @@ def time_drug_evaluation(self):
147147
diff_of_means = pm.Deterministic("difference of means", group1_mean - group2_mean)
148148
pm.Deterministic("difference of stds", group1_std - group2_std)
149149
pm.Deterministic(
150-
"effect size", diff_of_means / np.sqrt((group1_std ** 2 + group2_std ** 2) / 2)
150+
"effect size", diff_of_means / np.sqrt((group1_std**2 + group2_std**2) / 2)
151151
)
152152
pm.sample(
153153
draws=20000, cores=4, chains=4, progressbar=False, compute_convergence_checks=False

pymc/bart/pgbart.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -76,10 +76,10 @@ def __init__(self, vars=None, num_particles=40, max_stages=100, batch="auto", mo
7676
# if data is binary
7777
Y_unique = np.unique(self.Y)
7878
if Y_unique.size == 2 and np.all(Y_unique == [0, 1]):
79-
self.mu_std = 6 / (self.k * self.m ** 0.5)
79+
self.mu_std = 6 / (self.k * self.m**0.5)
8080
# maybe we need to check for count data
8181
else:
82-
self.mu_std = (2 * self.Y.std()) / (self.k * self.m ** 0.5)
82+
self.mu_std = (2 * self.Y.std()) / (self.k * self.m**0.5)
8383

8484
self.num_observations = self.X.shape[0]
8585
self.num_variates = self.X.shape[1]
@@ -354,7 +354,7 @@ def compute_prior_probability(alpha):
354354
prior_leaf_prob = [0]
355355
depth = 1
356356
while prior_leaf_prob[-1] < 1:
357-
prior_leaf_prob.append(1 - alpha ** depth)
357+
prior_leaf_prob.append(1 - alpha**depth)
358358
depth += 1
359359
return prior_leaf_prob
360360

pymc/distributions/continuous.py

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ def get_tau_sigma(tau=None, sigma=None):
235235
else:
236236
assert np.all(np.asarray(sigma) > 0)
237237
sigma_ = sigma
238-
tau = sigma_ ** -2.0
238+
tau = sigma_**-2.0
239239

240240
else:
241241
if sigma is not None:
@@ -247,7 +247,7 @@ def get_tau_sigma(tau=None, sigma=None):
247247
assert np.all(np.asarray(tau) > 0)
248248
tau_ = tau
249249

250-
sigma = tau_ ** -0.5
250+
sigma = tau_**-0.5
251251

252252
return floatX(tau), floatX(sigma)
253253

@@ -1240,7 +1240,7 @@ def get_alpha_beta(self, alpha=None, beta=None, mu=None, sigma=None):
12401240
if (alpha is not None) and (beta is not None):
12411241
pass
12421242
elif (mu is not None) and (sigma is not None):
1243-
kappa = mu * (1 - mu) / sigma ** 2 - 1
1243+
kappa = mu * (1 - mu) / sigma**2 - 1
12441244
alpha = mu * kappa
12451245
beta = (1 - mu) * kappa
12461246
else:
@@ -1375,7 +1375,7 @@ def logp(value, a, b):
13751375
-------
13761376
TensorVariable
13771377
"""
1378-
res = at.log(a) + at.log(b) + (a - 1) * at.log(value) + (b - 1) * at.log(1 - value ** a)
1378+
res = at.log(a) + at.log(b) + (a - 1) * at.log(value) + (b - 1) * at.log(1 - value**a)
13791379
res = at.switch(
13801380
at.or_(at.lt(value, 0), at.gt(value, 1)),
13811381
-np.inf,
@@ -1409,7 +1409,7 @@ def logcdf(value, a, b):
14091409
-np.inf,
14101410
at.switch(
14111411
at.lt(value, 1),
1412-
at.log1mexp(b * at.log1p(-(value ** a))),
1412+
at.log1mexp(b * at.log1p(-(value**a))),
14131413
0,
14141414
),
14151415
)
@@ -1608,9 +1608,9 @@ class AsymmetricLaplaceRV(RandomVariable):
16081608
@classmethod
16091609
def rng_fn(cls, rng, b, kappa, mu, size=None) -> np.ndarray:
16101610
u = rng.uniform(size=size)
1611-
switch = kappa ** 2 / (1 + kappa ** 2)
1611+
switch = kappa**2 / (1 + kappa**2)
16121612
non_positive_x = mu + kappa * np.log(u * (1 / switch)) / b
1613-
positive_x = mu - np.log((1 - u) * (1 + kappa ** 2)) / (kappa * b)
1613+
positive_x = mu - np.log((1 - u) * (1 + kappa**2)) / (kappa * b)
16141614
draws = non_positive_x * (u <= switch) + positive_x * (u > switch)
16151615
return np.asarray(draws)
16161616

@@ -1691,7 +1691,7 @@ def logp(value, b, kappa, mu):
16911691
TensorVariable
16921692
"""
16931693
value = value - mu
1694-
res = at.log(b / (kappa + (kappa ** -1))) + (
1694+
res = at.log(b / (kappa + (kappa**-1))) + (
16951695
-value * b * at.sgn(value) * (kappa ** at.sgn(value))
16961696
)
16971697

@@ -1781,7 +1781,7 @@ def dist(cls, mu=0, sigma=None, tau=None, sd=None, *args, **kwargs):
17811781
return super().dist([mu, sigma], *args, **kwargs)
17821782

17831783
def get_moment(rv, size, mu, sigma):
1784-
mean = at.exp(mu + 0.5 * sigma ** 2)
1784+
mean = at.exp(mu + 0.5 * sigma**2)
17851785
if not rv_size_is_none(size):
17861786
mean = at.full(size, mean)
17871787
return mean
@@ -1955,7 +1955,7 @@ def logcdf(value, nu, mu, sigma):
19551955
_, sigma = get_tau_sigma(sigma=sigma)
19561956

19571957
t = (value - mu) / sigma
1958-
sqrt_t2_nu = at.sqrt(t ** 2 + nu)
1958+
sqrt_t2_nu = at.sqrt(t**2 + nu)
19591959
x = (t + sqrt_t2_nu) / (2.0 * sqrt_t2_nu)
19601960

19611961
res = at.log(at.betainc(nu / 2.0, nu / 2.0, x))
@@ -2307,8 +2307,8 @@ def get_alpha_beta(cls, alpha=None, beta=None, mu=None, sigma=None):
23072307
sigma = check_parameters(sigma, sigma > 0, msg="sigma > 0")
23082308
else:
23092309
assert np.all(np.asarray(sigma) > 0)
2310-
alpha = mu ** 2 / sigma ** 2
2311-
beta = mu / sigma ** 2
2310+
alpha = mu**2 / sigma**2
2311+
beta = mu / sigma**2
23122312
else:
23132313
raise ValueError(
23142314
"Incompatible parameterization. Either use "
@@ -2435,8 +2435,8 @@ def _get_alpha_beta(cls, alpha, beta, mu, sigma):
24352435
sigma = check_parameters(sigma, sigma > 0, msg="sigma > 0")
24362436
else:
24372437
assert np.all(np.asarray(sigma) > 0)
2438-
alpha = (2 * sigma ** 2 + mu ** 2) / sigma ** 2
2439-
beta = mu * (mu ** 2 + sigma ** 2) / sigma ** 2
2438+
alpha = (2 * sigma**2 + mu**2) / sigma**2
2439+
beta = mu * (mu**2 + sigma**2) / sigma**2
24402440
else:
24412441
raise ValueError(
24422442
"Incompatible parameterization. Either use "
@@ -2759,8 +2759,8 @@ def logp(value, nu, sigma):
27592759
at.log(2)
27602760
+ gammaln((nu + 1.0) / 2.0)
27612761
- gammaln(nu / 2.0)
2762-
- 0.5 * at.log(nu * np.pi * sigma ** 2)
2763-
- (nu + 1.0) / 2.0 * at.log1p(value ** 2 / (nu * sigma ** 2))
2762+
- 0.5 * at.log(nu * np.pi * sigma**2)
2763+
- (nu + 1.0) / 2.0 * at.log1p(value**2 / (nu * sigma**2))
27642764
)
27652765

27662766
res = at.switch(
@@ -2898,7 +2898,7 @@ def logp(value, mu, sigma, nu):
28982898
-at.log(nu)
28992899
+ (mu - value) / nu
29002900
+ 0.5 * (sigma / nu) ** 2
2901-
+ normal_lcdf(mu + (sigma ** 2) / nu, sigma, value)
2901+
+ normal_lcdf(mu + (sigma**2) / nu, sigma, value)
29022902
),
29032903
log_normal(value, mean=mu, sigma=sigma),
29042904
)
@@ -2939,7 +2939,7 @@ def logcdf(value, mu, sigma, nu):
29392939
(
29402940
(mu - value) / nu
29412941
+ 0.5 * (sigma / nu) ** 2
2942-
+ normal_lcdf(mu + (sigma ** 2) / nu, sigma, value)
2942+
+ normal_lcdf(mu + (sigma**2) / nu, sigma, value)
29432943
),
29442944
),
29452945
normal_lcdf(mu, sigma, value),
@@ -3103,7 +3103,7 @@ def dist(cls, alpha=1, mu=0.0, sigma=None, tau=None, sd=None, *args, **kwargs):
31033103
return super().dist([mu, sigma, alpha], *args, **kwargs)
31043104

31053105
def get_moment(rv, size, mu, sigma, alpha):
3106-
mean = mu + sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha ** 2) ** 0.5
3106+
mean = mu + sigma * (2 / np.pi) ** 0.5 * alpha / (1 + alpha**2) ** 0.5
31073107
if not rv_size_is_none(size):
31083108
mean = at.full(size, mean)
31093109
return mean
@@ -3437,7 +3437,7 @@ def get_nu_b(cls, nu, b, sigma):
34373437
raise ValueError("Rice distribution must specify either nu" " or b.")
34383438

34393439
def get_moment(rv, size, nu, sigma):
3440-
nu_sigma_ratio = -(nu ** 2) / (2 * sigma ** 2)
3440+
nu_sigma_ratio = -(nu**2) / (2 * sigma**2)
34413441
mean = (
34423442
sigma
34433443
* np.sqrt(np.pi / 2)
@@ -3939,7 +3939,7 @@ def logcdf(value, mu, sigma):
39393939
TensorVariable
39403940
"""
39413941
scaled = (value - mu) / sigma
3942-
res = at.log(at.erfc(at.exp(-scaled / 2) * (2 ** -0.5)))
3942+
res = at.log(at.erfc(at.exp(-scaled / 2) * (2**-0.5)))
39433943
return check_parameters(
39443944
res,
39453945
0 < sigma,

pymc/distributions/dist_math.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -213,7 +213,7 @@ def log_normal(x, mean, **kwargs):
213213
else:
214214
std = tau ** (-1)
215215
std += f(eps)
216-
return f(c) - at.log(at.abs_(std)) - (x - mean) ** 2 / (2.0 * std ** 2)
216+
return f(c) - at.log(at.abs_(std)) - (x - mean) ** 2 / (2.0 * std**2)
217217

218218

219219
def MvNormalLogp():
@@ -455,20 +455,20 @@ def log_i0(x):
455455
return at.switch(
456456
at.lt(x, 5),
457457
at.log1p(
458-
x ** 2.0 / 4.0
459-
+ x ** 4.0 / 64.0
460-
+ x ** 6.0 / 2304.0
461-
+ x ** 8.0 / 147456.0
462-
+ x ** 10.0 / 14745600.0
463-
+ x ** 12.0 / 2123366400.0
458+
x**2.0 / 4.0
459+
+ x**4.0 / 64.0
460+
+ x**6.0 / 2304.0
461+
+ x**8.0 / 147456.0
462+
+ x**10.0 / 14745600.0
463+
+ x**12.0 / 2123366400.0
464464
),
465465
x
466466
- 0.5 * at.log(2.0 * np.pi * x)
467467
+ at.log1p(
468468
1.0 / (8.0 * x)
469-
+ 9.0 / (128.0 * x ** 2.0)
470-
+ 225.0 / (3072.0 * x ** 3.0)
471-
+ 11025.0 / (98304.0 * x ** 4.0)
469+
+ 9.0 / (128.0 * x**2.0)
470+
+ 225.0 / (3072.0 * x**3.0)
471+
+ 11025.0 / (98304.0 * x**4.0)
472472
),
473473
)
474474

pymc/distributions/multivariate.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def quaddist_chol(delta, chol_mat):
151151
chol_cov = at.switch(ok, chol_mat, 1)
152152

153153
delta_trans = solve_lower(chol_cov, delta.T).T
154-
quaddist = (delta_trans ** 2).sum(axis=-1)
154+
quaddist = (delta_trans**2).sum(axis=-1)
155155
logdet = at.sum(at.log(diag))
156156
return quaddist, logdet, ok
157157

@@ -165,7 +165,7 @@ def quaddist_tau(delta, chol_mat):
165165
chol_tau = at.switch(ok, chol_mat, 1)
166166

167167
delta_trans = at.dot(delta, chol_tau)
168-
quaddist = (delta_trans ** 2).sum(axis=-1)
168+
quaddist = (delta_trans**2).sum(axis=-1)
169169
logdet = -at.sum(at.log(diag))
170170
return quaddist, logdet, ok
171171

@@ -1095,14 +1095,14 @@ def _lkj_normalizing_constant(eta, n):
10951095
result = gammaln(2.0 * at.arange(1, int((n - 1) / 2) + 1)).sum()
10961096
if n % 2 == 1:
10971097
result += (
1098-
0.25 * (n ** 2 - 1) * at.log(np.pi)
1098+
0.25 * (n**2 - 1) * at.log(np.pi)
10991099
- 0.25 * (n - 1) ** 2 * at.log(2.0)
11001100
- (n - 1) * gammaln(int((n + 1) / 2))
11011101
)
11021102
else:
11031103
result += (
11041104
0.25 * n * (n - 2) * at.log(np.pi)
1105-
+ 0.25 * (3 * n ** 2 - 4 * n) * at.log(2.0)
1105+
+ 0.25 * (3 * n**2 - 4 * n) * at.log(2.0)
11061106
+ n * gammaln(n / 2)
11071107
- (n - 1) * gammaln(n)
11081108
)
@@ -1227,7 +1227,7 @@ def logp(value, n, eta, sd_dist):
12271227
raise ValueError("LKJCholeskyCov logp is only implemented for vector values (ndim=1)")
12281228

12291229
diag_idxs = at.cumsum(at.arange(1, n + 1)) - 1
1230-
cumsum = at.cumsum(value ** 2)
1230+
cumsum = at.cumsum(value**2)
12311231
variance = at.zeros(at.atleast_1d(n))
12321232
variance = at.inc_subtensor(variance[0], value[0] ** 2)
12331233
variance = at.inc_subtensor(variance[1:], cumsum[diag_idxs[1:]] - cumsum[diag_idxs[:-1]])
@@ -1480,7 +1480,7 @@ def _random_corr_matrix(cls, rng, n, eta, flat_size):
14801480
r12 = 2.0 * stats.beta.rvs(a=beta, b=beta, size=flat_size, random_state=rng) - 1.0
14811481
P = np.full((flat_size, n, n), np.eye(n))
14821482
P[..., 0, 1] = r12
1483-
P[..., 1, 1] = np.sqrt(1.0 - r12 ** 2)
1483+
P[..., 1, 1] = np.sqrt(1.0 - r12**2)
14841484
for mp1 in range(2, n):
14851485
beta -= 0.5
14861486
y = stats.beta.rvs(a=mp1 / 2.0, b=beta, size=flat_size, random_state=rng)
@@ -1852,7 +1852,7 @@ def rng_fn(self, rng, mu, sigma, *covs, size=None):
18521852
cov = reduce(linalg.kron, covs)
18531853

18541854
if sigma:
1855-
cov = cov + sigma ** 2 * np.eye(cov.shape[0])
1855+
cov = cov + sigma**2 * np.eye(cov.shape[0])
18561856

18571857
x = multivariate_normal.rng_fn(rng=rng, mean=mu, cov=cov, size=size)
18581858
return x
@@ -2015,7 +2015,7 @@ def logp(value, mu, sigma, *covs):
20152015

20162016
eigs_sep = list(map(at.as_tensor_variable, eigs_sep))
20172017
eigs = kron_diag(*eigs_sep) # Combine separate eigs
2018-
eigs += sigma ** 2
2018+
eigs += sigma**2
20192019
N = eigs.shape[0]
20202020

20212021
sqrt_quad = kron_dot(QTs, delta.T)

pymc/distributions/timeseries.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def __init__(self, k, tau_e, *args, **kwargs):
4949
super().__init__(*args, **kwargs)
5050
self.k = k = at.as_tensor_variable(k)
5151
self.tau_e = tau_e = at.as_tensor_variable(tau_e)
52-
self.tau = tau_e * (1 - k ** 2)
52+
self.tau = tau_e * (1 - k**2)
5353
self.mode = at.as_tensor_variable(0.0)
5454

5555
def logp(self, x):
@@ -67,7 +67,7 @@ def logp(self, x):
6767
"""
6868
k = self.k
6969
tau_e = self.tau_e # innovation precision
70-
tau = tau_e * (1 - k ** 2) # ar1 precision
70+
tau = tau_e * (1 - k**2) # ar1 precision
7171

7272
x_im1 = x[:-1]
7373
x_i = x[1:]

pymc/gp/gp.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1175,7 +1175,7 @@ def _build_conditional(self, Xnew, diag, pred_noise):
11751175
QTs = list(map(at.transpose, Qs))
11761176
eigs = kron_diag(*eigs_sep) # Combine separate eigs
11771177
if sigma is not None:
1178-
eigs += sigma ** 2
1178+
eigs += sigma**2
11791179

11801180
Km = self.cov_func(Xnew, diag=diag)
11811181
Knm = self.cov_func(X, Xnew)

pymc/model.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1016,7 +1016,7 @@ def compute_initial_point(self, seed=None) -> Dict[str, np.ndarray]:
10161016
Maps names of transformed variables to numeric initial values in the transformed space.
10171017
"""
10181018
if seed is None:
1019-
seed = self.rng_seeder.randint(2 ** 30, dtype=np.int64)
1019+
seed = self.rng_seeder.randint(2**30, dtype=np.int64)
10201020
fn = make_initial_point_fn(model=self, return_transformed=True)
10211021
return Point(fn(seed), model=self)
10221022

@@ -1043,7 +1043,7 @@ def next_rng(self) -> RandomStateSharedVariable:
10431043
The new ``RandomStateSharedVariable`` is also added to
10441044
``Model.rng_seq``.
10451045
"""
1046-
new_seed = self.rng_seeder.randint(2 ** 30, dtype=np.int64)
1046+
new_seed = self.rng_seeder.randint(2**30, dtype=np.int64)
10471047
next_rng = aesara.shared(np.random.RandomState(new_seed), borrow=True)
10481048
next_rng.tag.is_rng = True
10491049

0 commit comments

Comments
 (0)