diff --git a/pymc/model/core.py b/pymc/model/core.py index 01f35c45b7..112ed4ba9f 100644 --- a/pymc/model/core.py +++ b/pymc/model/core.py @@ -404,10 +404,10 @@ class Model(WithMemoization, metaclass=ContextMeta): Parameters ---------- - name: str + name : str name that will be used as prefix for names of all random variables defined within model - check_bounds: bool + check_bounds : bool Ensure that input parameters to distributions are in a valid range. If your model is built in a way where you know your parameters can only take on valid values you can set this to @@ -586,10 +586,10 @@ def logp_dlogp_function(self, grad_vars=None, tempered=False, **kwargs): Parameters ---------- - grad_vars: list of random variables, optional + grad_vars : list of random variables, optional Compute the gradient with respect to those variables. If None, use all free random variables of this model. - tempered: bool + tempered : bool Compute the tempered logp `free_logp + alpha * observed_logp`. `alpha` can be changed using `ValueGradFunction.set_weights([alpha])`. """ @@ -625,12 +625,12 @@ def compile_logp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. - sum: + sum : bool Whether to sum all logp terms or return elemwise logp for each variable. Defaults to True. """ @@ -645,10 +645,10 @@ def compile_dlogp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. """ return self.compile_fn(self.dlogp(vars=vars, jacobian=jacobian)) @@ -662,10 +662,10 @@ def compile_d2logp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. """ return self.compile_fn(self.d2logp(vars=vars, jacobian=jacobian)) @@ -680,12 +680,12 @@ def logp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. - sum: + sum : bool Whether to sum all logp terms or return elemwise logp for each variable. Defaults to True. @@ -758,10 +758,10 @@ def dlogp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. Returns @@ -797,10 +797,10 @@ def d2logp( Parameters ---------- - vars: list of random variables or potential terms, optional + vars : list of random variables or potential terms, optional Compute the gradient with respect to those variables. If None, use all free and observed random variables, as well as potential terms in model. - jacobian: + jacobian : bool Whether to include jacobian terms in logprob graph. Defaults to True. Returns @@ -967,7 +967,7 @@ def add_coord( name : str Name of the dimension. Forbidden: {"chain", "draw", "__sample__"} - values : optional, array-like + values : optional, array_like Coordinate values or ``None`` (for auto-numbering). If ``None`` is passed, a ``length`` must be specified. mutable : bool @@ -1026,11 +1026,11 @@ def set_dim(self, name: str, new_length: int, coord_values: Optional[Sequence] = Parameters ---------- - name + name : str Name of the dimension. - new_length + new_length : int New length of the dimension. - coord_values + coord_values : array_like, optional Optional sequence of coordinate values. """ if not isinstance(self.dim_lengths[name], ScalarSharedVariable): @@ -1090,7 +1090,7 @@ def set_data( ---------- name : str Name of a shared variable in the model. - values : array-like + values : array_like New values for the shared variable. coords : optional, dict New coordinate values for dimensions of the shared variable. @@ -1218,14 +1218,14 @@ def register_rv( Parameters ---------- - rv_var: TensorVariable - name: str + rv_var : TensorVariable + name : str Intended name for the model variable. - observed: array_like (optional) + observed : array_like, optional Data values for observed variables. - total_size: scalar + total_size : scalar upscales logp of variable with ``coef = total_size/var.shape[0]`` - dims: tuple + dims : tuple Dimension names for the variable. transform A transform for the random variable in log-likelihood space. @@ -1292,16 +1292,19 @@ def make_obs_var( Parameters ---------- - rv_var + rv_var : TensorVariable The random variable that is observed. Its dimensionality must be compatible with the data already. - data + data : array_like The observed data. - dims: tuple + dims : tuple Dimension names for the variable. - transform + transform : int, optional A transform for the random variable in log-likelihood space. + Returns + ------- + TensorVariable """ name = rv_var.name data = convert_observed_data(data).astype(rv_var.dtype) @@ -1376,6 +1379,17 @@ def create_value_var( observed data. That's why value variables are only referenced in this branch of the conditional. + Parameters + ---------- + rv_var : TensorVariable + + transform : Any + + value_var : Variable, optional + + Returns + ------- + TensorVariable """ # Make the value variable a transformed value variable, @@ -1419,6 +1433,13 @@ def add_named_variable(self, var, dims: Optional[Tuple[Union[str, None], ...]] = This can include several types of variables such basic_RVs, Data, Deterministics, and Potentials. + + Parameters + ---------- + var + + dims : tuple, optional + """ if var.name is None: raise ValueError("Variable is unnamed.") @@ -1491,8 +1512,12 @@ def replace_rvs_by_values( Parameters ---------- - graphs + graphs : array_like The graphs in which to perform the replacements. + + Returns + ------- + array_like """ return replace_rvs_by_values( graphs, @@ -1513,9 +1538,9 @@ def compile_fn( Parameters ---------- - outs + outs : Variable or sequence of Variables PyTensor variable or iterable of PyTensor variables. - inputs + inputs : sequence of Variables, optional PyTensor input variables, defaults to pytensorf.inputvars(outs). mode PyTensor compilation mode, default=None. @@ -1550,12 +1575,12 @@ def profile(self, outs, *, n=1000, point=None, profile=True, **kwargs): Parameters ---------- - outs: PyTensor variable or iterable of PyTensor variables - n: int, default 1000 + outs : PyTensor variable or iterable of PyTensor variables + n : int, default 1000 Number of iterations to run - point: point + point : Point Point to pass to the function - profile: True or ProfileStats + profile : True or ProfileStats args, kwargs Compilation args @@ -1580,6 +1605,11 @@ def update_start_vals(self, a: Dict[str, np.ndarray], b: Dict[str, np.ndarray]): Values specified for transformed variables in `a` will be recomputed conditional on the values of `b` and stored in `b`. + Parameters + ---------- + a : dict + + b : dict """ raise FutureWarning( "The `Model.update_start_vals` method was removed." @@ -1667,10 +1697,10 @@ def point_logps(self, point=None, round_vals=2): Parameters ---------- - point: Point, optional + point : Point, optional Point to be evaluated. If ``None``, then ``model.initial_point`` is used. - round_vals: int, default 2 + round_vals : int, default 2 Number of decimals to round log-probabilities. Returns @@ -1709,7 +1739,7 @@ def debug( Parameters ---------- - point : Point + point : Point, optional Point at which model function should be evaluated fn : str, default "logp" Function to be used for debugging. Can be one of [logp, dlogp, random].