Skip to content

Commit 9f1e24c

Browse files
denadai2twiecki
authored andcommitted
Updated documentation of stats.summary
1 parent 09557a6 commit 9f1e24c

File tree

1 file changed

+77
-31
lines changed

1 file changed

+77
-31
lines changed

pymc3/stats.py

Lines changed: 77 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -58,21 +58,35 @@ def wrapped_f(pymc3_obj, *args, **kwargs):
5858
@statfunc
5959
def autocorr(x, lag=1):
6060
"""Sample autocorrelation at specified lag.
61-
The autocorrelation is the correlation of x_i with x_{i+lag}.
62-
"""
6361
62+
Parameters
63+
----------
64+
x : Numpy array
65+
An array containing MCMC samples
66+
lag : int
67+
The desidered lag to take in consideration
68+
"""
6469
S = autocov(x, lag)
6570
return S[0, 1] / np.sqrt(np.prod(np.diag(S)))
6671

6772

6873
@statfunc
6974
def autocov(x, lag=1):
70-
"""
71-
Sample autocovariance at specified lag.
72-
The autocovariance is a 2x2 matrix with the variances of
73-
x[:-lag] and x[lag:] in the diagonal and the autocovariance
74-
on the off-diagonal.
75-
"""
75+
"""Sample autocovariance at specified lag.
76+
77+
Parameters
78+
----------
79+
x : Numpy array
80+
An array containing MCMC samples
81+
lag : int
82+
The desidered lag to take in consideration
83+
84+
Returns
85+
-------
86+
2x2 matrix with the variances of
87+
x[:-lag] and x[lag:] in the diagonal and the autocovariance
88+
on the off-diagonal.
89+
"""
7690
x = np.asarray(x)
7791

7892
if not lag:
@@ -85,7 +99,18 @@ def autocov(x, lag=1):
8599
def dic(trace, model=None):
86100
"""
87101
Calculate the deviance information criterion of the samples in trace from model
88-
Read more theory here - in a paper by some of the leading authorities on Model Selection - dx.doi.org/10.1111/1467-9868.00353
102+
Read more theory here - in a paper by some of the leading authorities on Model Selection -
103+
dx.doi.org/10.1111/1467-9868.00353
104+
105+
Parameters
106+
----------
107+
trace : result of MCMC run
108+
model : PyMC Model
109+
Optional model. Default None, taken from context.
110+
111+
Returns
112+
-------
113+
`float` representing the deviance information criterion of the model and trace
89114
"""
90115
model = modelcontext(model)
91116

@@ -99,9 +124,15 @@ def dic(trace, model=None):
99124

100125

101126
def log_post_trace(trace, model):
102-
'''
127+
"""
103128
Calculate the elementwise log-posterior for the sampled trace.
104-
'''
129+
130+
Parameters
131+
----------
132+
trace : result of MCMC run
133+
model : PyMC Model
134+
Optional model. Default None, taken from context.
135+
"""
105136
return np.vstack([obs.logp_elemwise(pt) for obs in model.observed_RVs] for pt in trace)
106137

107138

@@ -249,7 +280,14 @@ def loo(trace, model=None, pointwise=False):
249280
def bpic(trace, model=None):
250281
"""
251282
Calculates Bayesian predictive information criterion n of the samples in trace from model
252-
Read more theory here - in a paper by some of the leading authorities on Model Selection - dx.doi.org/10.1111/1467-9868.00353
283+
Read more theory here - in a paper by some of the leading authorities on Model Selection -
284+
dx.doi.org/10.1111/1467-9868.00353
285+
286+
Parameters
287+
----------
288+
trace : result of MCMC run
289+
model : PyMC Model
290+
Optional model. Default None, taken from context.
253291
"""
254292
model = modelcontext(model)
255293

@@ -319,6 +357,7 @@ def compare(traces, models, ic='WAIC'):
319357
warns = np.zeros(len(models))
320358

321359
c = 0
360+
322361
def add_warns(*args):
323362
warns[c] = 1
324363

@@ -450,18 +489,21 @@ def hpd(x, alpha=0.05, transform=lambda x: x):
450489

451490
@statfunc
452491
def mc_error(x, batches=5):
453-
"""
454-
Calculates the simulation standard error, accounting for non-independent
455-
samples. The trace is divided into batches, and the standard deviation of
456-
the batch means is calculated.
492+
R"""Calculates the simulation standard error, accounting for non-independent
493+
samples. The trace is divided into batches, and the standard deviation of
494+
the batch means is calculated.
457495
458-
:Arguments:
459-
x : Numpy array
460-
An array containing MCMC samples
461-
batches : integer
462-
Number of batches
463-
"""
496+
Parameters
497+
----------
498+
x : Numpy array
499+
An array containing MCMC samples
500+
batches : integer
501+
Number of batches
464502
503+
Returns
504+
-------
505+
`float` representing the error
506+
"""
465507
if x.ndim > 1:
466508

467509
dims = np.shape(x)
@@ -489,17 +531,21 @@ def mc_error(x, batches=5):
489531

490532
@statfunc
491533
def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5), transform=lambda x: x):
492-
"""Returns a dictionary of requested quantiles from array
534+
R"""Returns a dictionary of requested quantiles from array
493535
494-
:Arguments:
495-
x : Numpy array
496-
An array containing MCMC samples
497-
qlist : tuple or list
498-
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
499-
transform : callable
500-
Function to transform data (defaults to identity)
501-
"""
536+
Parameters
537+
----------
538+
x : Numpy array
539+
An array containing MCMC samples
540+
qlist : tuple or list
541+
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
542+
transform : callable
543+
Function to transform data (defaults to identity)
502544
545+
Returns
546+
-------
547+
`dictionary` with the quantiles {quantile: value}
548+
"""
503549
# Make a copy of trace
504550
x = transform(x.copy())
505551

0 commit comments

Comments
 (0)