Skip to content

Commit 4300be1

Browse files
Armavicatwiecki
authored andcommitted
Enable ruff to format code in docstrings
1 parent 79fafb0 commit 4300be1

File tree

20 files changed

+178
-148
lines changed

20 files changed

+178
-148
lines changed

pymc/backends/__init__.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
Values can be accessed in a few ways. The easiest way is to index the
2424
backend object with a variable or variable name.
2525
26-
>>> trace['x'] # or trace.x or trace[x]
26+
>>> trace["x"] # or trace.x or trace[x]
2727
2828
The call will return the sampling values of `x`, with the values for
2929
all chains concatenated. (For a single call to `sample`, the number of
@@ -32,18 +32,18 @@
3232
To discard the first N values of each chain, slicing syntax can be
3333
used.
3434
35-
>>> trace['x', 1000:]
35+
>>> trace["x", 1000:]
3636
3737
The `get_values` method offers more control over which values are
3838
returned. The call below will discard the first 1000 iterations
3939
from each chain and keep the values for each chain as separate arrays.
4040
41-
>>> trace.get_values('x', burn=1000, combine=False)
41+
>>> trace.get_values("x", burn=1000, combine=False)
4242
4343
The `chains` parameter of `get_values` can be used to limit the chains
4444
that are retrieved.
4545
46-
>>> trace.get_values('x', burn=1000, chains=[0, 2])
46+
>>> trace.get_values("x", burn=1000, chains=[0, 2])
4747
4848
MultiTrace objects also support slicing. For example, the following
4949
call would return a new trace object without the first 1000 sampling

pymc/data.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -390,16 +390,16 @@ def Data(
390390
>>> observed_data = [mu + np.random.randn(20) for mu in true_mu]
391391
392392
>>> with pm.Model() as model:
393-
... data = pm.Data('data', observed_data[0])
394-
... mu = pm.Normal('mu', 0, 10)
395-
... pm.Normal('y', mu=mu, sigma=1, observed=data)
393+
... data = pm.Data("data", observed_data[0])
394+
... mu = pm.Normal("mu", 0, 10)
395+
... pm.Normal("y", mu=mu, sigma=1, observed=data)
396396
397397
>>> # Generate one trace for each dataset
398398
>>> idatas = []
399399
>>> for data_vals in observed_data:
400400
... with model:
401401
... # Switch out the observed dataset
402-
... model.set_data('data', data_vals)
402+
... model.set_data("data", data_vals)
403403
... idatas.append(pm.sample())
404404
"""
405405
if coords is None:

pymc/distributions/continuous.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -488,10 +488,10 @@ class Normal(Continuous):
488488
.. code-block:: python
489489
490490
with pm.Model():
491-
x = pm.Normal('x', mu=0, sigma=10)
491+
x = pm.Normal("x", mu=0, sigma=10)
492492
493493
with pm.Model():
494-
x = pm.Normal('x', mu=0, tau=1/23)
494+
x = pm.Normal("x", mu=0, tau=1 / 23)
495495
"""
496496

497497
rv_op = normal
@@ -636,13 +636,13 @@ class TruncatedNormal(BoundedContinuous):
636636
.. code-block:: python
637637
638638
with pm.Model():
639-
x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0)
639+
x = pm.TruncatedNormal("x", mu=0, sigma=10, lower=0)
640640
641641
with pm.Model():
642-
x = pm.TruncatedNormal('x', mu=0, sigma=10, upper=1)
642+
x = pm.TruncatedNormal("x", mu=0, sigma=10, upper=1)
643643
644644
with pm.Model():
645-
x = pm.TruncatedNormal('x', mu=0, sigma=10, lower=0, upper=1)
645+
x = pm.TruncatedNormal("x", mu=0, sigma=10, lower=0, upper=1)
646646
647647
"""
648648

@@ -817,10 +817,10 @@ class HalfNormal(PositiveContinuous):
817817
.. code-block:: python
818818
819819
with pm.Model():
820-
x = pm.HalfNormal('x', sigma=10)
820+
x = pm.HalfNormal("x", sigma=10)
821821
822822
with pm.Model():
823-
x = pm.HalfNormal('x', tau=1/15)
823+
x = pm.HalfNormal("x", tau=1 / 15)
824824
"""
825825

826826
rv_op = halfnormal
@@ -1711,10 +1711,10 @@ class LogNormal(PositiveContinuous):
17111711
17121712
# Example to show that we pass in only ``sigma`` or ``tau`` but not both.
17131713
with pm.Model():
1714-
x = pm.LogNormal('x', mu=2, sigma=30)
1714+
x = pm.LogNormal("x", mu=2, sigma=30)
17151715
17161716
with pm.Model():
1717-
x = pm.LogNormal('x', mu=2, tau=1/100)
1717+
x = pm.LogNormal("x", mu=2, tau=1 / 100)
17181718
"""
17191719

17201720
rv_op = lognormal
@@ -1828,10 +1828,10 @@ class StudentT(Continuous):
18281828
.. code-block:: python
18291829
18301830
with pm.Model():
1831-
x = pm.StudentT('x', nu=15, mu=0, sigma=10)
1831+
x = pm.StudentT("x", nu=15, mu=0, sigma=10)
18321832
18331833
with pm.Model():
1834-
x = pm.StudentT('x', nu=15, mu=0, lam=1/23)
1834+
x = pm.StudentT("x", nu=15, mu=0, lam=1 / 23)
18351835
"""
18361836

18371837
rv_op = t
@@ -2802,10 +2802,10 @@ class HalfStudentT(PositiveContinuous):
28022802
28032803
# Only pass in one of lam or sigma, but not both.
28042804
with pm.Model():
2805-
x = pm.HalfStudentT('x', sigma=10, nu=10)
2805+
x = pm.HalfStudentT("x", sigma=10, nu=10)
28062806
28072807
with pm.Model():
2808-
x = pm.HalfStudentT('x', lam=4, nu=10)
2808+
x = pm.HalfStudentT("x", lam=4, nu=10)
28092809
"""
28102810

28112811
rv_type = HalfStudentTRV
@@ -4104,9 +4104,9 @@ class PolyaGamma(PositiveContinuous):
41044104
41054105
rng = np.random.default_rng()
41064106
with pm.Model():
4107-
x = pm.PolyaGamma('x', h=1, z=5.5)
4107+
x = pm.PolyaGamma("x", h=1, z=5.5)
41084108
with pm.Model():
4109-
x = pm.PolyaGamma('x', h=25, z=-2.3, rng=rng, size=(100, 5))
4109+
x = pm.PolyaGamma("x", h=25, z=-2.3, rng=rng, size=(100, 5))
41104110
41114111
References
41124112
----------

pymc/distributions/custom.py

Lines changed: 15 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -571,13 +571,15 @@ class CustomDist:
571571
import pymc as pm
572572
from pytensor.tensor import TensorVariable
573573
574+
574575
def logp(value: TensorVariable, mu: TensorVariable) -> TensorVariable:
575-
return -(value - mu)**2
576+
return -((value - mu) ** 2)
577+
576578
577579
with pm.Model():
578-
mu = pm.Normal('mu',0,1)
580+
mu = pm.Normal("mu", 0, 1)
579581
pm.CustomDist(
580-
'custom_dist',
582+
"custom_dist",
581583
mu,
582584
logp=logp,
583585
observed=np.random.randn(100),
@@ -596,20 +598,23 @@ def logp(value: TensorVariable, mu: TensorVariable) -> TensorVariable:
596598
import pymc as pm
597599
from pytensor.tensor import TensorVariable
598600
601+
599602
def logp(value: TensorVariable, mu: TensorVariable) -> TensorVariable:
600-
return -(value - mu)**2
603+
return -((value - mu) ** 2)
604+
601605
602606
def random(
603607
mu: np.ndarray | float,
604608
rng: Optional[np.random.Generator] = None,
605-
size : Optional[Tuple[int]]=None,
606-
) -> np.ndarray | float :
609+
size: Optional[Tuple[int]] = None,
610+
) -> np.ndarray | float:
607611
return rng.normal(loc=mu, scale=1, size=size)
608612
613+
609614
with pm.Model():
610-
mu = pm.Normal('mu', 0 , 1)
615+
mu = pm.Normal("mu", 0, 1)
611616
pm.CustomDist(
612-
'custom_dist',
617+
"custom_dist",
613618
mu,
614619
logp=logp,
615620
random=random,
@@ -629,13 +634,15 @@ def random(
629634
import pymc as pm
630635
from pytensor.tensor import TensorVariable
631636
637+
632638
def dist(
633639
lam: TensorVariable,
634640
shift: TensorVariable,
635641
size: TensorVariable,
636642
) -> TensorVariable:
637643
return pm.Exponential.dist(lam, size=size) + shift
638644
645+
639646
with pm.Model() as m:
640647
lam = pm.HalfNormal("lam")
641648
shift = -1

pymc/distributions/mixture.py

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,10 @@ class Mixture(Distribution):
194194
195195
# Mixture of 2 Poisson variables
196196
with pm.Model() as model:
197-
w = pm.Dirichlet('w', a=np.array([1, 1])) # 2 mixture weights
197+
w = pm.Dirichlet("w", a=np.array([1, 1])) # 2 mixture weights
198198
199-
lam1 = pm.Exponential('lam1', lam=1)
200-
lam2 = pm.Exponential('lam2', lam=1)
199+
lam1 = pm.Exponential("lam1", lam=1)
200+
lam2 = pm.Exponential("lam2", lam=1)
201201
202202
# As we just need the logp, rather than add a RV to the model, we need to call `.dist()`
203203
# These two forms are equivalent, but the second benefits from vectorization
@@ -208,14 +208,14 @@ class Mixture(Distribution):
208208
# `shape=(2,)` indicates 2 mixture components
209209
components = pm.Poisson.dist(mu=pm.math.stack([lam1, lam2]), shape=(2,))
210210
211-
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
211+
like = pm.Mixture("like", w=w, comp_dists=components, observed=data)
212212
213213
214214
.. code-block:: python
215215
216216
# Mixture of Normal and StudentT variables
217217
with pm.Model() as model:
218-
w = pm.Dirichlet('w', a=np.array([1, 1])) # 2 mixture weights
218+
w = pm.Dirichlet("w", a=np.array([1, 1])) # 2 mixture weights
219219
220220
mu = pm.Normal("mu", 0, 1)
221221
@@ -224,7 +224,7 @@ class Mixture(Distribution):
224224
pm.StudentT.dist(nu=4, mu=mu, sigma=1),
225225
]
226226
227-
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
227+
like = pm.Mixture("like", w=w, comp_dists=components, observed=data)
228228
229229
230230
.. code-block:: python
@@ -233,10 +233,10 @@ class Mixture(Distribution):
233233
with pm.Model() as model:
234234
# w is a stack of 5 independent size 3 weight vectors
235235
# If shape was `(3,)`, the weights would be shared across the 5 replication dimensions
236-
w = pm.Dirichlet('w', a=np.ones(3), shape=(5, 3))
236+
w = pm.Dirichlet("w", a=np.ones(3), shape=(5, 3))
237237
238238
# Each of the 3 mixture components has an independent mean
239-
mu = pm.Normal('mu', mu=np.arange(3), sigma=1, shape=3)
239+
mu = pm.Normal("mu", mu=np.arange(3), sigma=1, shape=3)
240240
241241
# These two forms are equivalent, but the second benefits from vectorization
242242
components = [
@@ -249,14 +249,14 @@ class Mixture(Distribution):
249249
# The mixture is an array of 5 elements
250250
# Each element can be thought of as an independent scalar mixture of 3
251251
# components with different means
252-
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
252+
like = pm.Mixture("like", w=w, comp_dists=components, observed=data)
253253
254254
255255
.. code-block:: python
256256
257257
# Mixture of 2 Dirichlet variables
258258
with pm.Model() as model:
259-
w = pm.Dirichlet('w', a=np.ones(2)) # 2 mixture weights
259+
w = pm.Dirichlet("w", a=np.ones(2)) # 2 mixture weights
260260
261261
# These two forms are equivalent, but the second benefits from vectorization
262262
components = [
@@ -267,7 +267,7 @@ class Mixture(Distribution):
267267
268268
# The mixture is an array of 3 elements
269269
# Each element comes from only one of the two core Dirichlet components
270-
like = pm.Mixture('like', w=w, comp_dists=components, observed=data)
270+
like = pm.Mixture("like", w=w, comp_dists=components, observed=data)
271271
"""
272272

273273
rv_type = MarginalMixtureRV

pymc/distributions/multivariate.py

Lines changed: 28 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -230,34 +230,36 @@ class MvNormal(Continuous):
230230
Define a multivariate normal variable for a given covariance
231231
matrix::
232232
233-
cov = np.array([[1., 0.5], [0.5, 2]])
233+
cov = np.array([[1.0, 0.5], [0.5, 2]])
234234
mu = np.zeros(2)
235-
vals = pm.MvNormal('vals', mu=mu, cov=cov, shape=(5, 2))
235+
vals = pm.MvNormal("vals", mu=mu, cov=cov, shape=(5, 2))
236236
237237
Most of the time it is preferable to specify the cholesky
238238
factor of the covariance instead. For example, we could
239239
fit a multivariate outcome like this (see the docstring
240240
of `LKJCholeskyCov` for more information about this)::
241241
242242
mu = np.zeros(3)
243-
true_cov = np.array([[1.0, 0.5, 0.1],
244-
[0.5, 2.0, 0.2],
245-
[0.1, 0.2, 1.0]])
243+
true_cov = np.array(
244+
[
245+
[1.0, 0.5, 0.1],
246+
[0.5, 2.0, 0.2],
247+
[0.1, 0.2, 1.0],
248+
],
249+
)
246250
data = np.random.multivariate_normal(mu, true_cov, 10)
247251
248252
sd_dist = pm.Exponential.dist(1.0, shape=3)
249-
chol, corr, stds = pm.LKJCholeskyCov('chol_cov', n=3, eta=2,
250-
sd_dist=sd_dist, compute_corr=True)
251-
vals = pm.MvNormal('vals', mu=mu, chol=chol, observed=data)
253+
chol, corr, stds = pm.LKJCholeskyCov("chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True)
254+
vals = pm.MvNormal("vals", mu=mu, chol=chol, observed=data)
252255
253256
For unobserved values it can be better to use a non-centered
254257
parametrization::
255258
256259
sd_dist = pm.Exponential.dist(1.0, shape=3)
257-
chol, _, _ = pm.LKJCholeskyCov('chol_cov', n=3, eta=2,
258-
sd_dist=sd_dist, compute_corr=True)
259-
vals_raw = pm.Normal('vals_raw', mu=0, sigma=1, shape=(5, 3))
260-
vals = pm.Deterministic('vals', pt.dot(chol, vals_raw.T).T)
260+
chol, _, _ = pm.LKJCholeskyCov("chol_cov", n=3, eta=2, sd_dist=sd_dist, compute_corr=True)
261+
vals_raw = pm.Normal("vals_raw", mu=0, sigma=1, shape=(5, 3))
262+
vals = pm.Deterministic("vals", pt.dot(chol, vals_raw.T).T)
261263
"""
262264

263265
rv_op = multivariate_normal
@@ -1806,13 +1808,12 @@ class MatrixNormal(Continuous):
18061808
Define a matrixvariate normal variable for given row and column covariance
18071809
matrices::
18081810
1809-
colcov = np.array([[1., 0.5], [0.5, 2]])
1811+
colcov = np.array([[1.0, 0.5], [0.5, 2]])
18101812
rowcov = np.array([[1, 0, 0], [0, 4, 0], [0, 0, 16]])
18111813
m = rowcov.shape[0]
18121814
n = colcov.shape[0]
18131815
mu = np.zeros((m, n))
1814-
vals = pm.MatrixNormal('vals', mu=mu, colcov=colcov,
1815-
rowcov=rowcov)
1816+
vals = pm.MatrixNormal("vals", mu=mu, colcov=colcov, rowcov=rowcov)
18161817
18171818
Above, the ith row in vals has a variance that is scaled by 4^i.
18181819
Alternatively, row or column cholesky matrices could be substituted for
@@ -2418,23 +2419,25 @@ class ICAR(Continuous):
24182419
# 4x4 adjacency matrix
24192420
# arranged in a square lattice
24202421
2421-
W = np.array([
2422-
[0,1,0,1],
2423-
[1,0,1,0],
2424-
[0,1,0,1],
2425-
[1,0,1,0]
2426-
])
2422+
W = np.array(
2423+
[
2424+
[0, 1, 0, 1],
2425+
[1, 0, 1, 0],
2426+
[0, 1, 0, 1],
2427+
[1, 0, 1, 0],
2428+
],
2429+
)
24272430
24282431
# centered parameterization
24292432
with pm.Model():
2430-
sigma = pm.Exponential('sigma', 1)
2431-
phi = pm.ICAR('phi', W=W, sigma=sigma)
2433+
sigma = pm.Exponential("sigma", 1)
2434+
phi = pm.ICAR("phi", W=W, sigma=sigma)
24322435
mu = phi
24332436
24342437
# non-centered parameterization
24352438
with pm.Model():
2436-
sigma = pm.Exponential('sigma', 1)
2437-
phi = pm.ICAR('phi', W=W)
2439+
sigma = pm.Exponential("sigma", 1)
2440+
phi = pm.ICAR("phi", W=W)
24382441
mu = sigma * phi
24392442
24402443
References

pymc/distributions/simulator.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -122,6 +122,7 @@ class Simulator(Distribution):
122122
def simulator_fn(rng, loc, scale, size):
123123
return rng.normal(loc, scale, size=size)
124124
125+
125126
with pm.Model() as m:
126127
loc = pm.Normal("loc", 0, 1)
127128
scale = pm.HalfNormal("scale", 1)

0 commit comments

Comments
 (0)