5
5
reveal the true nature of underlying problem. In some applications it can
6
6
yield unreliable decisions.
7
7
8
- Recently on NIPS 2017 [ OPVI]( https://arxiv.org/abs/1610.09033) framework
8
+ Recently on NIPS 2017 ` OPVI < https://arxiv.org/abs/1610.09033/>`_ framework
9
9
was presented. It generalizes variational inverence so that the problem is
10
10
build with blocks. The first and essential block is Model itself. Second is
11
11
Approximation, in some cases :math:`log Q(D)` is not really needed. Necessity
@@ -68,8 +68,10 @@ class ObjectiveFunction(object):
68
68
69
69
Parameters
70
70
----------
71
- op : Operator
72
- tf : TestFunction
71
+ op : :class:`Operator`
72
+ OPVI Functional operator
73
+ tf : :class:`TestFunction`
74
+ OPVI TestFunction
73
75
"""
74
76
def __init__ (self , op , tf ):
75
77
self .op = op
@@ -85,7 +87,7 @@ def random(self, size=None):
85
87
86
88
Parameters
87
89
----------
88
- size : int
90
+ size : ` int`
89
91
number of samples from distribution
90
92
91
93
Returns
@@ -101,26 +103,26 @@ def updates(self, obj_n_mc=None, tf_n_mc=None, obj_optimizer=adam, test_optimize
101
103
102
104
Parameters
103
105
----------
104
- obj_n_mc : int
106
+ obj_n_mc : ` int`
105
107
Number of monte carlo samples used for approximation of objective gradients
106
- tf_n_mc : int
108
+ tf_n_mc : ` int`
107
109
Number of monte carlo samples used for approximation of test function gradients
108
110
obj_optimizer : function (loss, params) -> updates
109
111
Optimizer that is used for objective params
110
112
test_optimizer : function (loss, params) -> updates
111
113
Optimizer that is used for test function params
112
- more_obj_params : list
114
+ more_obj_params : ` list`
113
115
Add custom params for objective optimizer
114
- more_tf_params : list
116
+ more_tf_params : ` list`
115
117
Add custom params for test function optimizer
116
- more_updates : dict
118
+ more_updates : ` dict`
117
119
Add custom updates to resulting updates
118
- more_replacements : dict
120
+ more_replacements : ` dict`
119
121
Apply custom replacements before calculating gradients
120
122
121
123
Returns
122
124
-------
123
- ObjectiveUpdates
125
+ :class:` ObjectiveUpdates`
124
126
"""
125
127
if more_obj_params is None :
126
128
more_obj_params = []
@@ -182,36 +184,37 @@ def step_function(self, obj_n_mc=None, tf_n_mc=None,
182
184
"""Step function that should be called on each optimization step.
183
185
184
186
Generally it solves the following problem:
187
+
185
188
.. math::
186
189
187
190
\t extbf{\lambda^{*}} = \inf_{\lambda} \sup_{\t heta} t(\mathbb{E}_{\lambda}[(O^{p,q}f_{\t heta})(z)])
188
191
189
192
Parameters
190
193
----------
191
- obj_n_mc : int
194
+ obj_n_mc : ` int`
192
195
Number of monte carlo samples used for approximation of objective gradients
193
- tf_n_mc : int
196
+ tf_n_mc : ` int`
194
197
Number of monte carlo samples used for approximation of test function gradients
195
198
obj_optimizer : function (loss, params) -> updates
196
199
Optimizer that is used for objective params
197
200
test_optimizer : function (loss, params) -> updates
198
201
Optimizer that is used for test function params
199
- more_obj_params : list
202
+ more_obj_params : ` list`
200
203
Add custom params for objective optimizer
201
- more_tf_params : list
204
+ more_tf_params : ` list`
202
205
Add custom params for test function optimizer
203
- more_updates : dict
206
+ more_updates : ` dict`
204
207
Add custom updates to resulting updates
205
- score : bool
208
+ score : ` bool`
206
209
calculate loss on each step? Defaults to False for speed
207
- fn_kwargs : dict
210
+ fn_kwargs : ` dict`
208
211
Add kwargs to theano.function (e.g. `{'profile': True}`)
209
- more_replacements : dict
212
+ more_replacements : ` dict`
210
213
Apply custom replacements before calculating gradients
211
214
212
215
Returns
213
216
-------
214
- theano.function
217
+ ` theano.function`
215
218
"""
216
219
if fn_kwargs is None :
217
220
fn_kwargs = {}
@@ -237,11 +240,11 @@ def score_function(self, sc_n_mc=None, more_replacements=None, fn_kwargs=None):
237
240
238
241
Parameters
239
242
----------
240
- sc_n_mc : int
243
+ sc_n_mc : ` int`
241
244
number of scoring MC samples
242
245
more_replacements:
243
246
Apply custom replacements before compiling a function
244
- fn_kwargs:
247
+ fn_kwargs: `dict`
245
248
arbitrary kwargs passed to theano.function
246
249
247
250
Returns
@@ -278,10 +281,11 @@ class Operator(object):
278
281
279
282
Parameters
280
283
----------
281
- approx : Approximation
284
+ approx : :class:`Approximation`
285
+ an approximation instance
282
286
283
- Subclassing
284
- -----------
287
+ Notes
288
+ -----
285
289
For implementing Custom operator it is needed to define :code:`.apply(f)` method
286
290
"""
287
291
@@ -326,19 +330,21 @@ def logq_norm(self, z):
326
330
327
331
def apply (self , f ): # pragma: no cover
328
332
"""Operator itself
333
+
329
334
.. math::
330
335
331
336
(O^{p,q}f_{\t heta})(z)
332
337
333
338
Parameters
334
339
----------
335
- f : TestFunction or None if not required
340
+ f : :class:` TestFunction` or None if not required
336
341
function that takes `z = self.input` and returns
337
342
same dimensional output
338
343
339
344
Returns
340
345
-------
341
- symbolically applied operator
346
+ tt.TensorVariable
347
+ symbolically applied operator
342
348
"""
343
349
raise NotImplementedError
344
350
@@ -426,7 +432,8 @@ def _setup(self, dim):
426
432
427
433
Parameters
428
434
----------
429
- dim : int dimension of posterior distribution
435
+ dim : int
436
+ dimension of posterior distribution
430
437
"""
431
438
pass
432
439
@@ -445,12 +452,11 @@ class Approximation(object):
445
452
Parameters
446
453
----------
447
454
local_rv : dict[var->tuple]
448
- mapping {model_variable -> local_variable (:math:`\\ mu`, math:`\\ rho`)}
455
+ mapping {model_variable -> local_variable (:math:`\\ mu`, : math:`\\ rho`)}
449
456
Local Vars are used for Autoencoding Variational Bayes
450
457
See (AEVB; Kingma and Welling, 2014) for details
451
-
452
- model : PyMC3 model for inference
453
-
458
+ model : :class:`Model`
459
+ PyMC3 model for inference
454
460
cost_part_grad_scale : float or scalar tensor
455
461
Scaling score part of gradient can be useful near optimum for
456
462
archiving better convergence properties. Common schedule is
@@ -463,10 +469,11 @@ class Approximation(object):
463
469
leave None to use package global RandomStream or other
464
470
valid value to create instance specific one
465
471
466
- Subclassing
467
- -----------
472
+ Notes
473
+ -----
468
474
Defining an approximation needs
469
475
custom implementation of the following methods:
476
+
470
477
- :code:`.create_shared_params(**kwargs)`
471
478
Returns {dict|list|theano.shared}
472
479
@@ -481,19 +488,21 @@ class Approximation(object):
481
488
Returns Scalar
482
489
483
490
You can also override the following methods:
491
+
484
492
- :code:`._setup(**kwargs)`
485
493
Do some specific stuff having :code:`kwargs` before calling :code:`.create_shared_params`
486
494
487
495
- :code:`.check_model(model, **kwargs)`
488
496
Do some specific check for model having :code:`kwargs`
489
497
490
- Notes
491
- -----
498
+ See Also
499
+ --------
492
500
:code:`kwargs` mentioned above are supplied as additional arguments
493
501
for :code:`Approximation.__init__`
494
502
495
503
There are some defaults class attributes for approximation classes that can be
496
504
optionally overriden.
505
+
497
506
- :code:`initial_dist_name`
498
507
string that represents name of the initial distribution.
499
508
In most cases if will be `uniform` or `normal`
@@ -553,7 +562,7 @@ def seed(self, seed=None):
553
562
554
563
Parameters
555
564
----------
556
- seed : int
565
+ seed : ` int`
557
566
"""
558
567
self ._seed = seed
559
568
self ._rng .seed (seed )
@@ -609,16 +618,16 @@ def construct_replacements(self, include=None, exclude=None,
609
618
610
619
Parameters
611
620
----------
612
- include : list
621
+ include : ` list`
613
622
latent variables to be replaced
614
- exclude : list
623
+ exclude : ` list`
615
624
latent variables to be excluded for replacements
616
- more_replacements : dict
625
+ more_replacements : ` dict`
617
626
add custom replacements to graph, e.g. change input source
618
627
619
628
Returns
620
629
-------
621
- dict
630
+ ` dict`
622
631
Replacements
623
632
"""
624
633
if include is not None and exclude is not None :
@@ -647,11 +656,11 @@ def apply_replacements(self, node, deterministic=False,
647
656
deterministic : bool
648
657
whether to use zeros as initial distribution
649
658
if True - zero initial point will produce constant latent variables
650
- include : list
659
+ include : ` list`
651
660
latent variables to be replaced
652
- exclude : list
661
+ exclude : ` list`
653
662
latent variables to be excluded for replacements
654
- more_replacements : dict
663
+ more_replacements : ` dict`
655
664
add custom replacements to graph, e.g. change input source
656
665
657
666
Returns
@@ -674,7 +683,7 @@ def sample_node(self, node, size=100,
674
683
node : Theano Variables (or Theano expressions)
675
684
size : scalar
676
685
number of samples
677
- more_replacements : dict
686
+ more_replacements : ` dict`
678
687
add custom replacements to graph, e.g. change input source
679
688
680
689
Returns
@@ -716,13 +725,16 @@ def initial(self, size, no_rand=False, l=None):
716
725
717
726
Parameters
718
727
----------
719
- size : int - number of samples
720
- no_rand : bool - return zeros if True
721
- l : length of sample, defaults to latent space dim
728
+ size : `int`
729
+ number of samples
730
+ no_rand : `bool`
731
+ return zeros if True
732
+ l : `int`
733
+ length of sample, defaults to latent space dim
722
734
723
735
Returns
724
736
-------
725
- Tensor
737
+ `tt.TensorVariable`
726
738
sampled latent space shape == size + latent_dim
727
739
"""
728
740
@@ -754,8 +766,10 @@ def random_local(self, size=None, no_rand=False):
754
766
755
767
Parameters
756
768
----------
757
- size : number of samples from distribution
758
- no_rand : whether use deterministic distribution
769
+ size : `scalar`
770
+ number of samples from distribution
771
+ no_rand : `bool`
772
+ whether use deterministic distribution
759
773
760
774
Returns
761
775
-------
@@ -771,8 +785,10 @@ def random_global(self, size=None, no_rand=False): # pragma: no cover
771
785
772
786
Parameters
773
787
----------
774
- size : number of samples from distribution
775
- no_rand : whether use deterministic distribution
788
+ size : `scalar`
789
+ number of samples from distribution
790
+ no_rand : `bool`
791
+ whether use deterministic distribution
776
792
777
793
Returns
778
794
-------
@@ -785,8 +801,10 @@ def random(self, size=None, no_rand=False):
785
801
786
802
Parameters
787
803
----------
788
- size : number of samples from distribution
789
- no_rand : whether use deterministic distribution
804
+ size : `scalar`
805
+ number of samples from distribution
806
+ no_rand : `bool`
807
+ whether use deterministic distribution
790
808
791
809
Returns
792
810
-------
@@ -816,8 +834,10 @@ def random_fn(self):
816
834
817
835
Parameters
818
836
----------
819
- size : number of samples from distribution
820
- no_rand : whether use deterministic distribution
837
+ size : `int`
838
+ number of samples from distribution
839
+ no_rand : `bool`
840
+ whether use deterministic distribution
821
841
822
842
Returns
823
843
-------
@@ -844,14 +864,14 @@ def sample(self, draws=1, include_transformed=False):
844
864
845
865
Parameters
846
866
----------
847
- draws : int
867
+ draws : ` int`
848
868
Number of random samples.
849
- include_transformed : bool
869
+ include_transformed : ` bool`
850
870
If True, transformed variables are also sampled. Default is False.
851
871
852
872
Returns
853
873
-------
854
- trace : pymc3.backends.base.MultiTrace
874
+ trace : :class:` pymc3.backends.base.MultiTrace`
855
875
Samples drawn from variational posterior.
856
876
"""
857
877
vars_sampled = get_default_varnames (self .model .unobserved_RVs ,
@@ -910,15 +930,17 @@ def view(self, space, name, reshape=True):
910
930
911
931
Parameters
912
932
----------
913
- space : space to take view of variable from
914
- name : str
933
+ space : matrix or vector
934
+ space to take view of variable from
935
+ name : `str`
915
936
name of variable
916
- reshape : bool
937
+ reshape : ` bool`
917
938
whether to reshape variable from vectorized view
918
939
919
940
Returns
920
941
-------
921
- variable view
942
+ (reshaped) slice of matrix
943
+ variable view
922
944
"""
923
945
theano_is_here = isinstance (space , tt .TensorVariable )
924
946
slc = self ._view [name ].slc
0 commit comments