Skip to content

Commit d95acc0

Browse files
authored
Updated docstrings in pymc.model.core.Model (#7118)
1 parent cb412cb commit d95acc0

File tree

1 file changed

+72
-42
lines changed

1 file changed

+72
-42
lines changed

pymc/model/core.py

Lines changed: 72 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -398,10 +398,10 @@ class Model(WithMemoization, metaclass=ContextMeta):
398398
399399
Parameters
400400
----------
401-
name: str
401+
name : str
402402
name that will be used as prefix for names of all random
403403
variables defined within model
404-
check_bounds: bool
404+
check_bounds : bool
405405
Ensure that input parameters to distributions are in a valid
406406
range. If your model is built in a way where you know your
407407
parameters can only take on valid values you can set this to
@@ -580,10 +580,10 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs):
580580
581581
Parameters
582582
----------
583-
grad_vars: list of random variables, optional
583+
grad_vars : list of random variables, optional
584584
Compute the gradient with respect to those variables. If None,
585585
use all free random variables of this model.
586-
tempered: bool
586+
tempered : bool
587587
Compute the tempered logp `free_logp + alpha * observed_logp`.
588588
`alpha` can be changed using `ValueGradFunction.set_weights([alpha])`.
589589
"""
@@ -619,12 +619,12 @@ def compile_logp(
619619
620620
Parameters
621621
----------
622-
vars: list of random variables or potential terms, optional
622+
vars : list of random variables or potential terms, optional
623623
Compute the gradient with respect to those variables. If None, use all
624624
free and observed random variables, as well as potential terms in model.
625-
jacobian:
625+
jacobian : bool
626626
Whether to include jacobian terms in logprob graph. Defaults to True.
627-
sum:
627+
sum : bool
628628
Whether to sum all logp terms or return elemwise logp for each variable.
629629
Defaults to True.
630630
"""
@@ -639,10 +639,10 @@ def compile_dlogp(
639639
640640
Parameters
641641
----------
642-
vars: list of random variables or potential terms, optional
642+
vars : list of random variables or potential terms, optional
643643
Compute the gradient with respect to those variables. If None, use all
644644
free and observed random variables, as well as potential terms in model.
645-
jacobian:
645+
jacobian : bool
646646
Whether to include jacobian terms in logprob graph. Defaults to True.
647647
"""
648648
return self.compile_fn(self.dlogp(vars=vars, jacobian=jacobian))
@@ -656,10 +656,10 @@ def compile_d2logp(
656656
657657
Parameters
658658
----------
659-
vars: list of random variables or potential terms, optional
659+
vars : list of random variables or potential terms, optional
660660
Compute the gradient with respect to those variables. If None, use all
661661
free and observed random variables, as well as potential terms in model.
662-
jacobian:
662+
jacobian : bool
663663
Whether to include jacobian terms in logprob graph. Defaults to True.
664664
"""
665665
return self.compile_fn(self.d2logp(vars=vars, jacobian=jacobian))
@@ -674,12 +674,12 @@ def logp(
674674
675675
Parameters
676676
----------
677-
vars: list of random variables or potential terms, optional
677+
vars : list of random variables or potential terms, optional
678678
Compute the gradient with respect to those variables. If None, use all
679679
free and observed random variables, as well as potential terms in model.
680-
jacobian:
680+
jacobian : bool
681681
Whether to include jacobian terms in logprob graph. Defaults to True.
682-
sum:
682+
sum : bool
683683
Whether to sum all logp terms or return elemwise logp for each variable.
684684
Defaults to True.
685685
@@ -752,10 +752,10 @@ def dlogp(
752752
753753
Parameters
754754
----------
755-
vars: list of random variables or potential terms, optional
755+
vars : list of random variables or potential terms, optional
756756
Compute the gradient with respect to those variables. If None, use all
757757
free and observed random variables, as well as potential terms in model.
758-
jacobian:
758+
jacobian : bool
759759
Whether to include jacobian terms in logprob graph. Defaults to True.
760760
761761
Returns
@@ -791,10 +791,10 @@ def d2logp(
791791
792792
Parameters
793793
----------
794-
vars: list of random variables or potential terms, optional
794+
vars : list of random variables or potential terms, optional
795795
Compute the gradient with respect to those variables. If None, use all
796796
free and observed random variables, as well as potential terms in model.
797-
jacobian:
797+
jacobian : bool
798798
Whether to include jacobian terms in logprob graph. Defaults to True.
799799
800800
Returns
@@ -961,7 +961,7 @@ def add_coord(
961961
name : str
962962
Name of the dimension.
963963
Forbidden: {"chain", "draw", "__sample__"}
964-
values : optional, array-like
964+
values : optional, array_like
965965
Coordinate values or ``None`` (for auto-numbering).
966966
If ``None`` is passed, a ``length`` must be specified.
967967
mutable : bool
@@ -1021,11 +1021,11 @@ def set_dim(self, name: str, new_length: int, coord_values: Optional[Sequence] =
10211021
10221022
Parameters
10231023
----------
1024-
name
1024+
name : str
10251025
Name of the dimension.
1026-
new_length
1026+
new_length : int
10271027
New length of the dimension.
1028-
coord_values
1028+
coord_values : array_like, optional
10291029
Optional sequence of coordinate values.
10301030
"""
10311031
if not isinstance(self.dim_lengths[name], SharedVariable):
@@ -1085,7 +1085,7 @@ def set_data(
10851085
----------
10861086
name : str
10871087
Name of a shared variable in the model.
1088-
values : array-like
1088+
values : array_like
10891089
New values for the shared variable.
10901090
coords : optional, dict
10911091
New coordinate values for dimensions of the shared variable.
@@ -1213,14 +1213,14 @@ def register_rv(
12131213
12141214
Parameters
12151215
----------
1216-
rv_var: TensorVariable
1217-
name: str
1216+
rv_var : TensorVariable
1217+
name : str
12181218
Intended name for the model variable.
1219-
observed: array_like (optional)
1219+
observed : array_like, optional
12201220
Data values for observed variables.
1221-
total_size: scalar
1221+
total_size : scalar
12221222
upscales logp of variable with ``coef = total_size/var.shape[0]``
1223-
dims: tuple
1223+
dims : tuple
12241224
Dimension names for the variable.
12251225
transform
12261226
A transform for the random variable in log-likelihood space.
@@ -1287,16 +1287,19 @@ def make_obs_var(
12871287
12881288
Parameters
12891289
----------
1290-
rv_var
1290+
rv_var : TensorVariable
12911291
The random variable that is observed.
12921292
Its dimensionality must be compatible with the data already.
1293-
data
1293+
data : array_like
12941294
The observed data.
1295-
dims: tuple
1295+
dims : tuple
12961296
Dimension names for the variable.
1297-
transform
1297+
transform : int, optional
12981298
A transform for the random variable in log-likelihood space.
12991299
1300+
Returns
1301+
-------
1302+
TensorVariable
13001303
"""
13011304
name = rv_var.name
13021305
data = convert_observed_data(data).astype(rv_var.dtype)
@@ -1371,6 +1374,17 @@ def create_value_var(
13711374
observed data. That's why value variables are only referenced in
13721375
this branch of the conditional.
13731376
1377+
Parameters
1378+
----------
1379+
rv_var : TensorVariable
1380+
1381+
transform : Any
1382+
1383+
value_var : Variable, optional
1384+
1385+
Returns
1386+
-------
1387+
TensorVariable
13741388
"""
13751389

13761390
# Make the value variable a transformed value variable,
@@ -1414,6 +1428,13 @@ def add_named_variable(self, var, dims: Optional[tuple[Union[str, None], ...]] =
14141428
14151429
This can include several types of variables such basic_RVs, Data, Deterministics,
14161430
and Potentials.
1431+
1432+
Parameters
1433+
----------
1434+
var
1435+
1436+
dims : tuple, optional
1437+
14171438
"""
14181439
if var.name is None:
14191440
raise ValueError("Variable is unnamed.")
@@ -1486,8 +1507,12 @@ def replace_rvs_by_values(
14861507
14871508
Parameters
14881509
----------
1489-
graphs
1510+
graphs : array_like
14901511
The graphs in which to perform the replacements.
1512+
1513+
Returns
1514+
-------
1515+
array_like
14911516
"""
14921517
return replace_rvs_by_values(
14931518
graphs,
@@ -1508,9 +1533,9 @@ def compile_fn(
15081533
15091534
Parameters
15101535
----------
1511-
outs
1536+
outs : Variable or sequence of Variables
15121537
PyTensor variable or iterable of PyTensor variables.
1513-
inputs
1538+
inputs : sequence of Variables, optional
15141539
PyTensor input variables, defaults to pytensorf.inputvars(outs).
15151540
mode
15161541
PyTensor compilation mode, default=None.
@@ -1545,12 +1570,12 @@ def profile(self, outs, *, n=1000, point=None, profile=True, **kwargs):
15451570
15461571
Parameters
15471572
----------
1548-
outs: PyTensor variable or iterable of PyTensor variables
1549-
n: int, default 1000
1573+
outs : PyTensor variable or iterable of PyTensor variables
1574+
n : int, default 1000
15501575
Number of iterations to run
1551-
point: point
1576+
point : Point
15521577
Point to pass to the function
1553-
profile: True or ProfileStats
1578+
profile : True or ProfileStats
15541579
args, kwargs
15551580
Compilation args
15561581
@@ -1575,6 +1600,11 @@ def update_start_vals(self, a: dict[str, np.ndarray], b: dict[str, np.ndarray]):
15751600
Values specified for transformed variables in `a` will be recomputed
15761601
conditional on the values of `b` and stored in `b`.
15771602
1603+
Parameters
1604+
----------
1605+
a : dict
1606+
1607+
b : dict
15781608
"""
15791609
raise FutureWarning(
15801610
"The `Model.update_start_vals` method was removed."
@@ -1662,10 +1692,10 @@ def point_logps(self, point=None, round_vals=2):
16621692
16631693
Parameters
16641694
----------
1665-
point: Point, optional
1695+
point : Point, optional
16661696
Point to be evaluated. If ``None``, then ``model.initial_point``
16671697
is used.
1668-
round_vals: int, default 2
1698+
round_vals : int, default 2
16691699
Number of decimals to round log-probabilities.
16701700
16711701
Returns
@@ -1704,7 +1734,7 @@ def debug(
17041734
17051735
Parameters
17061736
----------
1707-
point : Point
1737+
point : Point, optional
17081738
Point at which model function should be evaluated
17091739
fn : str, default "logp"
17101740
Function to be used for debugging. Can be one of [logp, dlogp, random].

0 commit comments

Comments
 (0)