Skip to content

Commit 1dc2da0

Browse files
denadai2twiecki
authored andcommitted
Removed unnecessary spaces from comments
1 parent 9f1e24c commit 1dc2da0

File tree

1 file changed

+18
-34
lines changed

1 file changed

+18
-34
lines changed

pymc3/stats.py

Lines changed: 18 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -74,19 +74,19 @@ def autocorr(x, lag=1):
7474
def autocov(x, lag=1):
7575
"""Sample autocovariance at specified lag.
7676
77-
Parameters
78-
----------
79-
x : Numpy array
80-
An array containing MCMC samples
81-
lag : int
82-
The desidered lag to take in consideration
83-
84-
Returns
85-
-------
86-
2x2 matrix with the variances of
87-
x[:-lag] and x[lag:] in the diagonal and the autocovariance
88-
on the off-diagonal.
89-
"""
77+
Parameters
78+
----------
79+
x : Numpy array
80+
An array containing MCMC samples
81+
lag : int
82+
The desidered lag to take in consideration
83+
84+
Returns
85+
-------
86+
2x2 matrix with the variances of
87+
x[:-lag] and x[lag:] in the diagonal and the autocovariance
88+
on the off-diagonal.
89+
"""
9090
x = np.asarray(x)
9191

9292
if not lag:
@@ -97,8 +97,7 @@ def autocov(x, lag=1):
9797

9898

9999
def dic(trace, model=None):
100-
"""
101-
Calculate the deviance information criterion of the samples in trace from model
100+
"""Calculate the deviance information criterion of the samples in trace from model
102101
Read more theory here - in a paper by some of the leading authorities on Model Selection -
103102
dx.doi.org/10.1111/1467-9868.00353
104103
@@ -124,8 +123,7 @@ def dic(trace, model=None):
124123

125124

126125
def log_post_trace(trace, model):
127-
"""
128-
Calculate the elementwise log-posterior for the sampled trace.
126+
"""Calculate the elementwise log-posterior for the sampled trace.
129127
130128
Parameters
131129
----------
@@ -137,13 +135,11 @@ def log_post_trace(trace, model):
137135

138136

139137
def waic(trace, model=None, pointwise=False):
140-
"""
141-
Calculate the widely available information criterion, its standard error
138+
"""Calculate the widely available information criterion, its standard error
142139
and the effective number of parameters of the samples in trace from model.
143140
Read more theory here - in a paper by some of the leading authorities on
144141
Model Selection - dx.doi.org/10.1111/1467-9868.00353
145142
146-
147143
Parameters
148144
----------
149145
trace : result of MCMC run
@@ -153,7 +149,6 @@ def waic(trace, model=None, pointwise=False):
153149
if True the pointwise predictive accuracy will be returned.
154150
Default False
155151
156-
157152
Returns
158153
-------
159154
namedtuple with the following elements:
@@ -191,12 +186,10 @@ def waic(trace, model=None, pointwise=False):
191186

192187

193188
def loo(trace, model=None, pointwise=False):
194-
"""
195-
Calculates leave-one-out (LOO) cross-validation for out of sample predictive
189+
"""Calculates leave-one-out (LOO) cross-validation for out of sample predictive
196190
model fit, following Vehtari et al. (2015). Cross-validation is computed using
197191
Pareto-smoothed importance sampling (PSIS).
198192
199-
200193
Parameters
201194
----------
202195
trace : result of MCMC run
@@ -206,7 +199,6 @@ def loo(trace, model=None, pointwise=False):
206199
if True the pointwise predictive accuracy will be returned.
207200
Default False
208201
209-
210202
Returns
211203
-------
212204
namedtuple with the following elements:
@@ -215,7 +207,6 @@ def loo(trace, model=None, pointwise=False):
215207
p_loo: effective number of parameters
216208
loo_i: and array of the pointwise predictive accuracy, only if pointwise True
217209
"""
218-
219210
model = modelcontext(model)
220211

221212
log_py = log_post_trace(trace, model)
@@ -301,8 +292,7 @@ def bpic(trace, model=None):
301292

302293

303294
def compare(traces, models, ic='WAIC'):
304-
"""
305-
Compare models based on the widely available information criterion (WAIC)
295+
"""Compare models based on the widely available information criterion (WAIC)
306296
or leave-one-out (LOO) cross-validation.
307297
Read more theory here - in a paper by some of the leading authorities on
308298
Model Selection - dx.doi.org/10.1111/1467-9868.00353
@@ -419,7 +409,6 @@ def calc_min_interval(x, alpha):
419409
420410
Assumes that x is sorted numpy array.
421411
"""
422-
423412
n = len(x)
424413
cred_mass = 1.0 - alpha
425414

@@ -450,7 +439,6 @@ def hpd(x, alpha=0.05, transform=lambda x: x):
450439
Function to transform data (defaults to identity)
451440
452441
"""
453-
454442
# Make a copy of trace
455443
x = transform(x.copy())
456444

@@ -609,17 +597,14 @@ def df_summary(trace, varnames=None, stat_funcs=None, extend=False, include_tran
609597
samples. Defaults to the smaller of 100 or the number of samples.
610598
This is only meaningful when `stat_funcs` is None.
611599
612-
613600
See also
614601
--------
615602
summary : Generate a pretty-printed summary of a trace.
616603
617-
618604
Returns
619605
-------
620606
`pandas.DataFrame` with summary statistics for each variable
621607
622-
623608
Examples
624609
--------
625610
.. code:: ipython
@@ -714,7 +699,6 @@ def summary(trace, varnames=None, transform=lambda x: x, alpha=0.05, start=0,
714699
original variables (defaults to False).
715700
to_file : None or string
716701
File to write results to. If not given, print to stdout.
717-
718702
"""
719703
if varnames is None:
720704
if include_transformed:

0 commit comments

Comments
 (0)