@@ -58,21 +58,35 @@ def wrapped_f(pymc3_obj, *args, **kwargs):
58
58
@statfunc
59
59
def autocorr (x , lag = 1 ):
60
60
"""Sample autocorrelation at specified lag.
61
- The autocorrelation is the correlation of x_i with x_{i+lag}.
62
- """
63
61
62
+ Parameters
63
+ ----------
64
+ x : Numpy array
65
+ An array containing MCMC samples
66
+ lag : int
67
+ The desidered lag to take in consideration
68
+ """
64
69
S = autocov (x , lag )
65
70
return S [0 , 1 ] / np .sqrt (np .prod (np .diag (S )))
66
71
67
72
68
73
@statfunc
69
74
def autocov (x , lag = 1 ):
70
- """
71
- Sample autocovariance at specified lag.
72
- The autocovariance is a 2x2 matrix with the variances of
73
- x[:-lag] and x[lag:] in the diagonal and the autocovariance
74
- on the off-diagonal.
75
- """
75
+ """Sample autocovariance at specified lag.
76
+
77
+ Parameters
78
+ ----------
79
+ x : Numpy array
80
+ An array containing MCMC samples
81
+ lag : int
82
+ The desidered lag to take in consideration
83
+
84
+ Returns
85
+ -------
86
+ 2x2 matrix with the variances of
87
+ x[:-lag] and x[lag:] in the diagonal and the autocovariance
88
+ on the off-diagonal.
89
+ """
76
90
x = np .asarray (x )
77
91
78
92
if not lag :
@@ -85,7 +99,18 @@ def autocov(x, lag=1):
85
99
def dic (trace , model = None ):
86
100
"""
87
101
Calculate the deviance information criterion of the samples in trace from model
88
- Read more theory here - in a paper by some of the leading authorities on Model Selection - dx.doi.org/10.1111/1467-9868.00353
102
+ Read more theory here - in a paper by some of the leading authorities on Model Selection -
103
+ dx.doi.org/10.1111/1467-9868.00353
104
+
105
+ Parameters
106
+ ----------
107
+ trace : result of MCMC run
108
+ model : PyMC Model
109
+ Optional model. Default None, taken from context.
110
+
111
+ Returns
112
+ -------
113
+ `float` representing the deviance information criterion of the model and trace
89
114
"""
90
115
model = modelcontext (model )
91
116
@@ -99,9 +124,15 @@ def dic(trace, model=None):
99
124
100
125
101
126
def log_post_trace (trace , model ):
102
- '''
127
+ """
103
128
Calculate the elementwise log-posterior for the sampled trace.
104
- '''
129
+
130
+ Parameters
131
+ ----------
132
+ trace : result of MCMC run
133
+ model : PyMC Model
134
+ Optional model. Default None, taken from context.
135
+ """
105
136
return np .vstack ([obs .logp_elemwise (pt ) for obs in model .observed_RVs ] for pt in trace )
106
137
107
138
@@ -249,7 +280,14 @@ def loo(trace, model=None, pointwise=False):
249
280
def bpic (trace , model = None ):
250
281
"""
251
282
Calculates Bayesian predictive information criterion n of the samples in trace from model
252
- Read more theory here - in a paper by some of the leading authorities on Model Selection - dx.doi.org/10.1111/1467-9868.00353
283
+ Read more theory here - in a paper by some of the leading authorities on Model Selection -
284
+ dx.doi.org/10.1111/1467-9868.00353
285
+
286
+ Parameters
287
+ ----------
288
+ trace : result of MCMC run
289
+ model : PyMC Model
290
+ Optional model. Default None, taken from context.
253
291
"""
254
292
model = modelcontext (model )
255
293
@@ -319,6 +357,7 @@ def compare(traces, models, ic='WAIC'):
319
357
warns = np .zeros (len (models ))
320
358
321
359
c = 0
360
+
322
361
def add_warns (* args ):
323
362
warns [c ] = 1
324
363
@@ -450,18 +489,21 @@ def hpd(x, alpha=0.05, transform=lambda x: x):
450
489
451
490
@statfunc
452
491
def mc_error (x , batches = 5 ):
453
- """
454
- Calculates the simulation standard error, accounting for non-independent
455
- samples. The trace is divided into batches, and the standard deviation of
456
- the batch means is calculated.
492
+ R"""Calculates the simulation standard error, accounting for non-independent
493
+ samples. The trace is divided into batches, and the standard deviation of
494
+ the batch means is calculated.
457
495
458
- :Arguments:
459
- x : Numpy array
460
- An array containing MCMC samples
461
- batches : integer
462
- Number of batches
463
- """
496
+ Parameters
497
+ ----------
498
+ x : Numpy array
499
+ An array containing MCMC samples
500
+ batches : integer
501
+ Number of batches
464
502
503
+ Returns
504
+ -------
505
+ `float` representing the error
506
+ """
465
507
if x .ndim > 1 :
466
508
467
509
dims = np .shape (x )
@@ -489,17 +531,21 @@ def mc_error(x, batches=5):
489
531
490
532
@statfunc
491
533
def quantiles (x , qlist = (2.5 , 25 , 50 , 75 , 97.5 ), transform = lambda x : x ):
492
- """Returns a dictionary of requested quantiles from array
534
+ R """Returns a dictionary of requested quantiles from array
493
535
494
- :Arguments:
495
- x : Numpy array
496
- An array containing MCMC samples
497
- qlist : tuple or list
498
- A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
499
- transform : callable
500
- Function to transform data (defaults to identity)
501
- """
536
+ Parameters
537
+ ----------
538
+ x : Numpy array
539
+ An array containing MCMC samples
540
+ qlist : tuple or list
541
+ A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
542
+ transform : callable
543
+ Function to transform data (defaults to identity)
502
544
545
+ Returns
546
+ -------
547
+ `dictionary` with the quantiles {quantile: value}
548
+ """
503
549
# Make a copy of trace
504
550
x = transform (x .copy ())
505
551
0 commit comments