diff --git a/examples/case_studies/BART_introduction.ipynb b/examples/case_studies/BART_introduction.ipynb index 936ba5b8d..906665199 100644 --- a/examples/case_studies/BART_introduction.ipynb +++ b/examples/case_studies/BART_introduction.ipynb @@ -8,7 +8,7 @@ "(BART_introduction)=\n", "# Bayesian Additive Regression Trees: Introduction\n", ":::{post} Dec 21, 2021\n", - ":tags: BART, Bayesian additive regression trees, non-parametric, regression\n", + ":tags: BART, Bayesian additive regression trees, non-parametric, regression \n", ":category: intermediate, explanation\n", ":author: Osvaldo Martin\n", ":::" diff --git a/examples/case_studies/BEST.ipynb b/examples/case_studies/BEST.ipynb index dbf924ac9..24452dd90 100644 --- a/examples/case_studies/BEST.ipynb +++ b/examples/case_studies/BEST.ipynb @@ -8,7 +8,7 @@ "# Bayesian Estimation Supersedes the T-Test\n", "\n", ":::{post} Jan 07, 2022\n", - ":tags: hypothesis testing, model comparison, pymc3.Deterministic, pymc3.Exponential, pymc3.Model, pymc3.Normal, pymc3.StudentT, pymc3.Uniform\n", + ":tags: hypothesis testing, model comparison, \n", ":category: beginner\n", ":author: Andrew Straw, Thomas Wiecki, Chris Fonnesbeck, Andrés suárez\n", ":::" diff --git a/examples/case_studies/binning.ipynb b/examples/case_studies/binning.ipynb index a42463b55..11a26428a 100644 --- a/examples/case_studies/binning.ipynb +++ b/examples/case_studies/binning.ipynb @@ -8,7 +8,7 @@ "(awkward_binning)=\n", "# Estimating parameters of a distribution from awkwardly binned data\n", ":::{post} Oct 23, 2021\n", - ":tags: binned data, case study, parameter estimation, pymc3.Bound, pymc3.Deterministic, pymc3.Gamma, pymc3.HalfNormal, pymc3.Model, pymc3.Multinomial, pymc3.Normal\n", + ":tags: binned data, case study, parameter estimation, \n", ":category: intermediate\n", ":author: Eric Ma, Benjamin T. Vincent\n", ":::" diff --git a/examples/case_studies/blackbox_external_likelihood_numpy.ipynb b/examples/case_studies/blackbox_external_likelihood_numpy.ipynb index db2bb54dc..88936d160 100644 --- a/examples/case_studies/blackbox_external_likelihood_numpy.ipynb +++ b/examples/case_studies/blackbox_external_likelihood_numpy.ipynb @@ -8,7 +8,7 @@ "# Using a \"black box\" likelihood function (numpy)\n", "\n", ":::{post} Dec 16, 2021\n", - ":tags: case study, external likelihood, pymc.Model, pymc.Normal, pymc.Potential, pymc.Uniform, pymc3.Model, pymc3.Normal, pymc3.Potential, pymc3.Uniform\n", + ":tags: case study, external likelihood, \n", ":category: beginner\n", ":author: Matt Pitkin, Jørgen Midtbø, Oriol Abril\n", ":::\n", diff --git a/examples/case_studies/factor_analysis.ipynb b/examples/case_studies/factor_analysis.ipynb index 6041100d8..9348ce2ff 100644 --- a/examples/case_studies/factor_analysis.ipynb +++ b/examples/case_studies/factor_analysis.ipynb @@ -8,7 +8,7 @@ "# Factor analysis\n", "\n", ":::{post} 19 Mar, 2022\n", - ":tags: factor analysis, matrix factorization, pca\n", + ":tags: factor analysis, matrix factorization, pca \n", ":category: advanced, how-to\n", ":author: Chris Hartl, Christopher Krapu, Oriol Abril-Pla\n", ":::" diff --git a/examples/case_studies/hierarchical_partial_pooling.ipynb b/examples/case_studies/hierarchical_partial_pooling.ipynb index 64137c52a..053d9ed02 100644 --- a/examples/case_studies/hierarchical_partial_pooling.ipynb +++ b/examples/case_studies/hierarchical_partial_pooling.ipynb @@ -7,7 +7,7 @@ "# Hierarchical Partial Pooling\n", "\n", ":::{post} Oct 07, 2021\n", - ":tags: hierarchical model, pymc.Beta, pymc.Binomial, pymc.Deterministic, pymc.Exponential, pymc.Model, pymc.Uniform, pymc3.Beta, pymc3.Binomial, pymc3.Deterministic, pymc3.Exponential, pymc3.Model, pymc3.Uniform\n", + ":tags: hierarchical model, \n", ":category: intermediate\n", ":::" ] diff --git a/examples/case_studies/item_response_nba.ipynb b/examples/case_studies/item_response_nba.ipynb index 12a867ff3..af223156e 100644 --- a/examples/case_studies/item_response_nba.ipynb +++ b/examples/case_studies/item_response_nba.ipynb @@ -8,7 +8,7 @@ "# NBA Foul Analysis with Item Response Theory\n", "\n", ":::{post} Apr 17, 2022\n", - ":tags: hierarchical model, case study, generalized linear model\n", + ":tags: hierarchical model, case study, generalized linear model \n", ":category: intermediate, tutorial\n", ":author: Austin Rochford, Lorenzo Toniazzi\n", ":::" diff --git a/examples/case_studies/mediation_analysis.ipynb b/examples/case_studies/mediation_analysis.ipynb index f4483dfb5..aa2f5d722 100644 --- a/examples/case_studies/mediation_analysis.ipynb +++ b/examples/case_studies/mediation_analysis.ipynb @@ -9,7 +9,7 @@ "# Bayesian mediation analysis\n", "\n", ":::{post} February, 2022\n", - ":tags: mediation, path analysis, regression\n", + ":tags: mediation, path analysis, regression \n", ":category: beginner\n", ":author: Benjamin T. Vincent\n", ":::\n", diff --git a/examples/case_studies/moderation_analysis.ipynb b/examples/case_studies/moderation_analysis.ipynb index bb792059f..045e7ed28 100644 --- a/examples/case_studies/moderation_analysis.ipynb +++ b/examples/case_studies/moderation_analysis.ipynb @@ -9,7 +9,7 @@ "# Bayesian moderation analysis\n", "\n", ":::{post} March, 2022\n", - ":tags: moderation, path analysis,\n", + ":tags: moderation, path analysis, \n", ":category: beginner\n", ":author: Benjamin T. Vincent\n", ":::\n", diff --git a/examples/case_studies/multilevel_modeling.ipynb b/examples/case_studies/multilevel_modeling.ipynb index 39b58de72..f25fc3a88 100644 --- a/examples/case_studies/multilevel_modeling.ipynb +++ b/examples/case_studies/multilevel_modeling.ipynb @@ -8,7 +8,7 @@ "# A Primer on Bayesian Methods for Multilevel Modeling\n", "\n", ":::{post} 27 February, 2022\n", - ":tags: hierarchical model, case study\n", + ":tags: hierarchical model, case study \n", ":category: intermediate\n", ":author: Chris Fonnesbeck, Colin Carroll, Alex Andorra, Oriol Abril, Farhan Reynaldo\n", ":::" diff --git a/examples/case_studies/probabilistic_matrix_factorization.ipynb b/examples/case_studies/probabilistic_matrix_factorization.ipynb index 3c7d35107..d4b4a0e46 100644 --- a/examples/case_studies/probabilistic_matrix_factorization.ipynb +++ b/examples/case_studies/probabilistic_matrix_factorization.ipynb @@ -7,7 +7,7 @@ "# Probabilistic Matrix Factorization for Making Personalized Recommendations\n", "\n", ":::{post} Sept 20, 2021\n", - ":tags: case study, pymc3.Model, pymc3.MvNormal, pymc3.Normal\n", + ":tags: case study, \n", ":category: intermediate\n", ":::" ] diff --git a/examples/case_studies/putting_workflow.ipynb b/examples/case_studies/putting_workflow.ipynb index c6a658ecf..b951c044e 100644 --- a/examples/case_studies/putting_workflow.ipynb +++ b/examples/case_studies/putting_workflow.ipynb @@ -8,7 +8,7 @@ "# Model building and expansion for golf putting\n", "\n", ":::{post} Apr 2, 2022\n", - ":tags: bayesian workflow, model expansion, sports\n", + ":tags: bayesian workflow, model expansion, sports \n", ":category: intermediate, how-to\n", ":author: Colin Carroll, Marco Gorelli, Oriol Abril-Pla\n", ":::\n", diff --git a/examples/case_studies/regression_discontinuity.ipynb b/examples/case_studies/regression_discontinuity.ipynb index 031abd8b3..c69362a5a 100644 --- a/examples/case_studies/regression_discontinuity.ipynb +++ b/examples/case_studies/regression_discontinuity.ipynb @@ -9,7 +9,7 @@ "# Regression discontinuity design analysis\n", "\n", ":::{post} April, 2022\n", - ":tags: regression, causal inference, quasi experimental design, counterfactuals\n", + ":tags: regression, causal inference, quasi experimental design, counterfactuals \n", ":category: beginner, explanation\n", ":author: Benjamin T. Vincent\n", ":::\n", diff --git a/examples/case_studies/rugby_analytics.ipynb b/examples/case_studies/rugby_analytics.ipynb index 4a7ede3ef..eb5e4da6e 100644 --- a/examples/case_studies/rugby_analytics.ipynb +++ b/examples/case_studies/rugby_analytics.ipynb @@ -7,7 +7,7 @@ "# A Hierarchical model for Rugby prediction\n", "\n", ":::{post} 19 Mar, 2022\n", - ":tags: hierarchical, sports\n", + ":tags: hierarchical, sports \n", ":category: intermediate, how-to\n", ":author: Peadar Coyle, Meenal Jhajharia, Oriol Abril-Pla\n", ":::" diff --git a/examples/case_studies/spline.ipynb b/examples/case_studies/spline.ipynb index 8a0567826..161fcb4a6 100644 --- a/examples/case_studies/spline.ipynb +++ b/examples/case_studies/spline.ipynb @@ -8,7 +8,7 @@ "# Splines\n", "\n", ":::{post} June 4, 2022 \n", - ":tags: patsy, regression, spline\n", + ":tags: patsy, regression, spline \n", ":category: beginner\n", ":author: Joshua Cook\n", ":::" diff --git a/examples/case_studies/wrapping_jax_function.ipynb b/examples/case_studies/wrapping_jax_function.ipynb index a8eec1212..617bf3f59 100644 --- a/examples/case_studies/wrapping_jax_function.ipynb +++ b/examples/case_studies/wrapping_jax_function.ipynb @@ -8,7 +8,7 @@ "# How to wrap a JAX function for use in PyMC\n", "\n", ":::{post} Mar 24, 2022\n", - ":tags: Aesara, hidden markov model, JAX\n", + ":tags: Aesara, hidden markov model, JAX \n", ":category: advanced, how-to\n", ":author: Ricardo Vieira\n", ":::" diff --git a/examples/diagnostics_and_criticism/Bayes_factor.ipynb b/examples/diagnostics_and_criticism/Bayes_factor.ipynb index f8dc3a735..b771b2d6f 100644 --- a/examples/diagnostics_and_criticism/Bayes_factor.ipynb +++ b/examples/diagnostics_and_criticism/Bayes_factor.ipynb @@ -7,7 +7,7 @@ "(Bayes_factor)=\n", "# Bayes Factors and Marginal Likelihood\n", ":::{post} Jun 1, 2022\n", - ":tags: Bayes Factors, model comparison\n", + ":tags: Bayes Factors, model comparison \n", ":category: beginner, explanation\n", ":author: Osvaldo Martin\n", ":::" diff --git a/examples/diagnostics_and_criticism/sampler-stats.ipynb b/examples/diagnostics_and_criticism/sampler-stats.ipynb index ae6bd214a..81a308d2c 100644 --- a/examples/diagnostics_and_criticism/sampler-stats.ipynb +++ b/examples/diagnostics_and_criticism/sampler-stats.ipynb @@ -13,7 +13,7 @@ "statistics for each generated sample.\n", "\n", ":::{post} May 31, 2022\n", - ":tags: diagnostics\n", + ":tags: diagnostics \n", ":category: beginner\n", ":author: Meenal Jhajharia, Christian Luhmann\n", ":::" diff --git a/examples/gaussian_processes/GP-MaunaLoa.ipynb b/examples/gaussian_processes/GP-MaunaLoa.ipynb index 7ef200a69..0f77efed7 100644 --- a/examples/gaussian_processes/GP-MaunaLoa.ipynb +++ b/examples/gaussian_processes/GP-MaunaLoa.ipynb @@ -8,7 +8,7 @@ "# Gaussian Process for CO2 at Mauna Loa\n", "\n", ":::{post} April, 2022\n", - ":tags: gaussian process, CO2\n", + ":tags: gaussian process, CO2 \n", ":category: intermediate\n", ":author: Bill Engels, Chris Fonnesbeck\n", ":::" diff --git a/examples/generalized_linear_models/GLM-binomial-regression.ipynb b/examples/generalized_linear_models/GLM-binomial-regression.ipynb index 182b3d56f..1257ccc17 100644 --- a/examples/generalized_linear_models/GLM-binomial-regression.ipynb +++ b/examples/generalized_linear_models/GLM-binomial-regression.ipynb @@ -9,7 +9,7 @@ "# Binomial regression\n", "\n", ":::{post} February, 2022\n", - ":tags: binomial regression, generalized linear model, pymc.Binomial, pymc.ConstantData, pymc.Deterministic, pymc.Model, pymc.Normal, pymc3.Binomial, pymc3.ConstantData, pymc3.Deterministic, pymc3.Model, pymc3.Normal\n", + ":tags: binomial regression, generalized linear model, \n", ":category: beginner\n", ":author: Benjamin T. Vincent\n", ":::" diff --git a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb index b96122313..d54515c90 100644 --- a/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb +++ b/examples/generalized_linear_models/GLM-hierarchical-binomial-model.ipynb @@ -6,7 +6,7 @@ "source": [ "# Hierarchical Binomial Model: Rat Tumor Example\n", ":::{post} Nov 11, 2021\n", - ":tags: generalized linear model, hierarchical model\n", + ":tags: generalized linear model, hierarchical model \n", ":category: intermediate\n", ":author: Demetri Pananos, Junpeng Lao, Raúl Maldonado, Farhan Reynaldo\n", ":::" diff --git a/examples/generalized_linear_models/GLM-model-selection.ipynb b/examples/generalized_linear_models/GLM-model-selection.ipynb index 7189e9e0e..481647340 100644 --- a/examples/generalized_linear_models/GLM-model-selection.ipynb +++ b/examples/generalized_linear_models/GLM-model-selection.ipynb @@ -8,7 +8,7 @@ "# GLM: Model Selection\n", "\n", ":::{post} Jan 8, 2022\n", - ":tags: cross validation, generalized linear models, loo, model comparison, pymc3.HalfCauchy, pymc3.Model, pymc3.Normal, waic\n", + ":tags: cross validation, generalized linear models, loo, model comparison, waic \n", ":category: intermediate\n", ":author: Jon Sedar, Junpeng Lao, Abhipsha Das, Oriol Abril-Pla\n", ":::" diff --git a/examples/generalized_linear_models/GLM-robust-with-outlier-detection.ipynb b/examples/generalized_linear_models/GLM-robust-with-outlier-detection.ipynb index 889b0923f..e7071bc02 100644 --- a/examples/generalized_linear_models/GLM-robust-with-outlier-detection.ipynb +++ b/examples/generalized_linear_models/GLM-robust-with-outlier-detection.ipynb @@ -8,7 +8,7 @@ "# GLM: Robust Regression using Custom Likelihood for Outlier Classification\n", "\n", ":::{post} 17 Nov, 2021\n", - ":tags: pymc3.Bernoulli, pymc3.Data, pymc3.Deterministic, pymc3.DiscreteUniform, pymc3.Exponential, pymc3.GaussianRandomWalk, pymc3.HalfNormal, pymc3.InverseGamma, pymc3.Model, pymc3.Normal, pymc3.Poisson, pymc3.Potential, pymc3.Slice, pymc3.StudentT, pymc3.Uniform, regression, robust analysis\n", + ":tags: regression, robust analysis \n", ":category: intermediate\n", ":author: Jon Sedar, Thomas Wiecki, Raul Maldonado, Oriol Abril\n", ":::\n", diff --git a/examples/generalized_linear_models/GLM-rolling-regression.ipynb b/examples/generalized_linear_models/GLM-rolling-regression.ipynb index 93618c937..e0bfe94fd 100644 --- a/examples/generalized_linear_models/GLM-rolling-regression.ipynb +++ b/examples/generalized_linear_models/GLM-rolling-regression.ipynb @@ -8,7 +8,7 @@ "# Rolling Regression\n", "\n", ":::{post} June, 2022\n", - ":tags: generalized linear model, regression\n", + ":tags: generalized linear model, regression \n", ":category: intermediate\n", ":author: Thomas Wiecki\n", ":::" diff --git a/examples/generalized_linear_models/GLM-simpsons-paradox.ipynb b/examples/generalized_linear_models/GLM-simpsons-paradox.ipynb index 1883c9145..c6536dbbf 100644 --- a/examples/generalized_linear_models/GLM-simpsons-paradox.ipynb +++ b/examples/generalized_linear_models/GLM-simpsons-paradox.ipynb @@ -8,7 +8,7 @@ "# Simpson's paradox and mixed models\n", "\n", ":::{post} March, 2022\n", - ":tags: regression, hierarchical model, linear model, multi level model, posterior predictive, Simpson's paradox\n", + ":tags: regression, hierarchical model, linear model, multi level model, posterior predictive, Simpson's paradox \n", ":category: beginner\n", ":author: Benjamin T. Vincent\n", ":::" diff --git a/examples/generalized_linear_models/GLM-truncated-censored-regression.ipynb b/examples/generalized_linear_models/GLM-truncated-censored-regression.ipynb index 33da40ea1..25503e95e 100644 --- a/examples/generalized_linear_models/GLM-truncated-censored-regression.ipynb +++ b/examples/generalized_linear_models/GLM-truncated-censored-regression.ipynb @@ -8,7 +8,7 @@ "# Bayesian regression with truncated or censored data\n", "\n", ":::{post} January, 2022\n", - ":tags: censored, censoring, generalized linear model, pymc3.Censored, pymc3.HalfNormal, pymc3.Model, pymc3.Normal, pymc3.TruncatedNormal, regression, truncated, truncation\n", + ":tags: censored, censoring, generalized linear model, regression, truncated, truncation \n", ":category: beginner\n", ":author: Benjamin T. Vincent\n", ":::\n", diff --git a/examples/howto/data_container.ipynb b/examples/howto/data_container.ipynb index 8e892f12f..4a845ccba 100644 --- a/examples/howto/data_container.ipynb +++ b/examples/howto/data_container.ipynb @@ -8,7 +8,7 @@ "# Using shared variables (`Data` container adaptation)\n", "\n", ":::{post} Dec 16, 2021\n", - ":tags: posterior predictive, predictions, pymc3.Bernoulli, pymc3.Data, pymc3.Deterministic, pymc3.HalfNormal, pymc3.Model, pymc3.Normal, shared data\n", + ":tags: posterior predictive, predictions, shared data \n", ":category: beginner\n", ":author: Juan Martin Loyola, Kavya Jaiswal, Oriol Abril\n", ":::" diff --git a/examples/howto/lasso_block_update.ipynb b/examples/howto/lasso_block_update.ipynb index 8c9e255bc..d5daeeec7 100644 --- a/examples/howto/lasso_block_update.ipynb +++ b/examples/howto/lasso_block_update.ipynb @@ -8,7 +8,7 @@ "# Lasso regression with block updating\n", "\n", ":::{post} Feb 10, 2022\n", - ":tags: pymc3.Exponential, pymc3.Laplace, pymc3.Metropolis, pymc3.Model, pymc3.Normal, pymc3.Slice, pymc3.Uniform, regression\n", + ":tags: regression \n", ":category: beginner\n", ":author: Chris Fonnesbeck, Raul Maldonado, Michael Osthege, Thomas Wiecki, Lorenzo Toniazzi\n", ":::" diff --git a/examples/mixture_models/dirichlet_mixture_of_multinomials.ipynb b/examples/mixture_models/dirichlet_mixture_of_multinomials.ipynb index 30965d36e..1b9504d87 100644 --- a/examples/mixture_models/dirichlet_mixture_of_multinomials.ipynb +++ b/examples/mixture_models/dirichlet_mixture_of_multinomials.ipynb @@ -8,7 +8,7 @@ "# Dirichlet mixtures of multinomials\n", "\n", ":::{post} Jan 8, 2022\n", - ":tags: mixture model, pymc3.Dirichlet, pymc3.DirichletMultinomial, pymc3.Lognormal, pymc3.Model, pymc3.Multinomial\n", + ":tags: mixture model, \n", ":category: advanced\n", ":author: Byron J. Smith, Abhipsha Das, Oriol Abril-Pla\n", ":::" diff --git a/examples/mixture_models/dp_mix.ipynb b/examples/mixture_models/dp_mix.ipynb index 23c1d48cc..3eeb14e2a 100644 --- a/examples/mixture_models/dp_mix.ipynb +++ b/examples/mixture_models/dp_mix.ipynb @@ -8,7 +8,7 @@ "# Dirichlet process mixtures for density estimation\n", "\n", ":::{post} Sept 16, 2021\n", - ":tags: mixture model, pymc3.Beta, pymc3.Deterministic, pymc3.Gamma, pymc3.Mixture, pymc3.Model, pymc3.Normal, pymc3.NormalMixture\n", + ":tags: mixture model, \n", ":category: advanced\n", ":author: Austin Rochford, Abhipsha Das\n", ":::" diff --git a/examples/mixture_models/gaussian_mixture_model.ipynb b/examples/mixture_models/gaussian_mixture_model.ipynb index 5c0747ca6..8c2b1b2a4 100644 --- a/examples/mixture_models/gaussian_mixture_model.ipynb +++ b/examples/mixture_models/gaussian_mixture_model.ipynb @@ -9,7 +9,7 @@ "# Gaussian Mixture Model\n", "\n", ":::{post} April, 2022\n", - ":tags: mixture model, classification\n", + ":tags: mixture model, classification \n", ":category: beginner\n", ":author: Abe Flaxman\n", ":::\n", diff --git a/examples/mixture_models/marginalized_gaussian_mixture_model.ipynb b/examples/mixture_models/marginalized_gaussian_mixture_model.ipynb index 8ba65d49b..6a8fd2db7 100644 --- a/examples/mixture_models/marginalized_gaussian_mixture_model.ipynb +++ b/examples/mixture_models/marginalized_gaussian_mixture_model.ipynb @@ -16,7 +16,7 @@ "# Marginalized Gaussian Mixture Model\n", "\n", ":::{post} Sept 18, 2021\n", - ":tags: mixture model, pymc3.Dirichlet, pymc3.Gamma, pymc3.Model, pymc3.Normal, pymc3.NormalMixture\n", + ":tags: mixture model, \n", ":category: intermediate\n", ":::" ] diff --git a/examples/samplers/SMC-ABC_Lotka-Volterra_example.ipynb b/examples/samplers/SMC-ABC_Lotka-Volterra_example.ipynb index d824eef87..afe778668 100644 --- a/examples/samplers/SMC-ABC_Lotka-Volterra_example.ipynb +++ b/examples/samplers/SMC-ABC_Lotka-Volterra_example.ipynb @@ -7,7 +7,7 @@ "(ABC_introduction)=\n", "# Approximate Bayesian Computation\n", ":::{post} May 31, 2022\n", - ":tags: SMC, ABC\n", + ":tags: SMC, ABC \n", ":category: beginner, explanation\n", ":::" ] diff --git a/examples/samplers/SMC2_gaussians.ipynb b/examples/samplers/SMC2_gaussians.ipynb index 77a73bef9..373924688 100644 --- a/examples/samplers/SMC2_gaussians.ipynb +++ b/examples/samplers/SMC2_gaussians.ipynb @@ -7,7 +7,7 @@ "# Sequential Monte Carlo\n", "\n", ":::{post} Oct 19, 2021\n", - ":tags: SMC\n", + ":tags: SMC \n", ":category: beginner\n", ":::" ] diff --git a/examples/survival_analysis/censored_data.ipynb b/examples/survival_analysis/censored_data.ipynb index 2c82a3d9c..46ec8c019 100644 --- a/examples/survival_analysis/censored_data.ipynb +++ b/examples/survival_analysis/censored_data.ipynb @@ -8,7 +8,7 @@ "# Censored Data Models\n", "\n", ":::{post} May, 2022\n", - ":tags: censoring, survival analysis\n", + ":tags: censoring, survival analysis \n", ":category: intermediate, how-to\n", ":author: Luis Mario Domenzain\n", ":::" diff --git a/examples/time_series/Air_passengers-Prophet_with_Bayesian_workflow.ipynb b/examples/time_series/Air_passengers-Prophet_with_Bayesian_workflow.ipynb index 9cc5c48c7..128ce436e 100644 --- a/examples/time_series/Air_passengers-Prophet_with_Bayesian_workflow.ipynb +++ b/examples/time_series/Air_passengers-Prophet_with_Bayesian_workflow.ipynb @@ -8,7 +8,7 @@ "# Air passengers - Prophet-like model\n", "\n", ":::{post} April, 2022\n", - ":tags: time series, prophet\n", + ":tags: time series, prophet \n", ":category: intermediate\n", ":author: Marco Gorelli, Danh Phan\n", ":::" diff --git a/examples/time_series/MvGaussianRandomWalk_demo.ipynb b/examples/time_series/MvGaussianRandomWalk_demo.ipynb index 9aed57dd8..16accb234 100644 --- a/examples/time_series/MvGaussianRandomWalk_demo.ipynb +++ b/examples/time_series/MvGaussianRandomWalk_demo.ipynb @@ -7,7 +7,7 @@ "source": [ "# Multivariate Gaussian Random Walk\n", ":::{post} Sep 25, 2021\n", - ":tags: linear model, pymc3.HalfNormal, pymc3.LKJCholeskyCov, pymc3.Model, pymc3.MvGaussianRandomWalk, pymc3.Normal, regression, time series\n", + ":tags: linear model, regression, time series \n", ":category: beginner\n", ":::" ] diff --git a/myst_nbs/case_studies/BART_introduction.myst.md b/myst_nbs/case_studies/BART_introduction.myst.md index fa4a8628d..58400a484 100644 --- a/myst_nbs/case_studies/BART_introduction.myst.md +++ b/myst_nbs/case_studies/BART_introduction.myst.md @@ -14,7 +14,7 @@ kernelspec: (BART_introduction)= # Bayesian Additive Regression Trees: Introduction :::{post} Dec 21, 2021 -:tags: BART, Bayesian additive regression trees, non-parametric, regression +:tags: BART, Bayesian additive regression trees, non-parametric, regression :category: intermediate, explanation :author: Osvaldo Martin ::: diff --git a/myst_nbs/case_studies/BEST.myst.md b/myst_nbs/case_studies/BEST.myst.md index e8b41fd4a..fe90f6405 100644 --- a/myst_nbs/case_studies/BEST.myst.md +++ b/myst_nbs/case_studies/BEST.myst.md @@ -15,7 +15,7 @@ kernelspec: # Bayesian Estimation Supersedes the T-Test :::{post} Jan 07, 2022 -:tags: hypothesis testing, model comparison, pymc3.Deterministic, pymc3.Exponential, pymc3.Model, pymc3.Normal, pymc3.StudentT, pymc3.Uniform +:tags: hypothesis testing, model comparison, :category: beginner :author: Andrew Straw, Thomas Wiecki, Chris Fonnesbeck, Andrés suárez ::: @@ -124,7 +124,7 @@ with model: az.plot_kde(rng.exponential(scale=30, size=10000), fill_kwargs={"alpha": 0.5}); ``` -Since PyMC parametrizes the Student-T in terms of precision, rather than standard deviation, we must transform the standard deviations before specifying our likelihoods. +Since PyMC parametrizes the Student-T in terms of precision, rather than standard deviation, we must transform the standard deviations before specifying our likelihoods. ```{code-cell} ipython3 with model: diff --git a/myst_nbs/case_studies/binning.myst.md b/myst_nbs/case_studies/binning.myst.md index 4b6595910..23bcb7db9 100644 --- a/myst_nbs/case_studies/binning.myst.md +++ b/myst_nbs/case_studies/binning.myst.md @@ -14,7 +14,7 @@ kernelspec: (awkward_binning)= # Estimating parameters of a distribution from awkwardly binned data :::{post} Oct 23, 2021 -:tags: binned data, case study, parameter estimation, pymc3.Bound, pymc3.Deterministic, pymc3.Gamma, pymc3.HalfNormal, pymc3.Model, pymc3.Multinomial, pymc3.Normal +:tags: binned data, case study, parameter estimation, :category: intermediate :author: Eric Ma, Benjamin T. Vincent ::: @@ -843,7 +843,7 @@ ax[1, 0].set(xlim=(0, 50), xlabel="BMI", ylabel="observed frequency", title="Sam ### Model specification -This is a variation of Example 3 above. The only changes are: +This is a variation of Example 3 above. The only changes are: - update the probability distribution to match our target (the Gumbel distribution) - ensure we specify priors for our target distribution, appropriate given our domain knowledge. diff --git a/myst_nbs/case_studies/blackbox_external_likelihood_numpy.myst.md b/myst_nbs/case_studies/blackbox_external_likelihood_numpy.myst.md index e999cc478..2ee5ccb0f 100644 --- a/myst_nbs/case_studies/blackbox_external_likelihood_numpy.myst.md +++ b/myst_nbs/case_studies/blackbox_external_likelihood_numpy.myst.md @@ -15,7 +15,7 @@ kernelspec: # Using a "black box" likelihood function (numpy) :::{post} Dec 16, 2021 -:tags: case study, external likelihood, pymc.Model, pymc.Normal, pymc.Potential, pymc.Uniform, pymc3.Model, pymc3.Normal, pymc3.Potential, pymc3.Uniform +:tags: case study, external likelihood, :category: beginner :author: Matt Pitkin, Jørgen Midtbø, Oriol Abril ::: @@ -447,7 +447,7 @@ with test_model: print(f'Gradient returned by PyMC "Normal" distribution: {grad_vals_pymc}') ``` -We could also do some profiling to compare performance between implementations. The {ref}`profiling` notebook shows how to do it. +We could also do some profiling to compare performance between implementations. The {ref}`profiling` notebook shows how to do it. +++ diff --git a/myst_nbs/case_studies/factor_analysis.myst.md b/myst_nbs/case_studies/factor_analysis.myst.md index 45b17bcf0..c45cb0d9e 100644 --- a/myst_nbs/case_studies/factor_analysis.myst.md +++ b/myst_nbs/case_studies/factor_analysis.myst.md @@ -18,7 +18,7 @@ substitutions: # Factor analysis :::{post} 19 Mar, 2022 -:tags: factor analysis, matrix factorization, pca +:tags: factor analysis, matrix factorization, pca :category: advanced, how-to :author: Chris Hartl, Christopher Krapu, Oriol Abril-Pla ::: @@ -136,7 +136,7 @@ for i in trace.posterior.chain.values: plt.legend(ncol=4, loc="upper center", fontsize=12, frameon=True), plt.xlabel("Sample"); ``` -Each chain appears to have a different sample mean and we can also see that there is a great deal of autocorrelation across chains, manifest as long-range trends over sampling iterations. Some of the chains may have divergences as well, lending further evidence to the claim that using MCMC for this model as shown is suboptimal. +Each chain appears to have a different sample mean and we can also see that there is a great deal of autocorrelation across chains, manifest as long-range trends over sampling iterations. Some of the chains may have divergences as well, lending further evidence to the claim that using MCMC for this model as shown is suboptimal. One of the primary drawbacks for this model formulation is its lack of identifiability. With this model representation, only the product $WF$ matters for the likelihood of $X$, so $P(X|W, F) = P(X|W\Omega, \Omega^{-1}F)$ for any invertible matrix $\Omega$. While the priors on $W$ and $F$ constrain $|\Omega|$ to be neither too large or too small, factors and loadings can still be rotated, reflected, and/or permuted *without changing the model likelihood*. Expect it to happen between runs of the sampler, or even for the parametrization to "drift" within run, and to produce the highly autocorrelated $W$ traceplot above. diff --git a/myst_nbs/case_studies/hierarchical_partial_pooling.myst.md b/myst_nbs/case_studies/hierarchical_partial_pooling.myst.md index 46e8438a6..44bb2a1ad 100644 --- a/myst_nbs/case_studies/hierarchical_partial_pooling.myst.md +++ b/myst_nbs/case_studies/hierarchical_partial_pooling.myst.md @@ -14,7 +14,7 @@ kernelspec: # Hierarchical Partial Pooling :::{post} Oct 07, 2021 -:tags: hierarchical model, pymc.Beta, pymc.Binomial, pymc.Deterministic, pymc.Exponential, pymc.Model, pymc.Uniform, pymc3.Beta, pymc3.Binomial, pymc3.Deterministic, pymc3.Exponential, pymc3.Model, pymc3.Uniform +:tags: hierarchical model, :category: intermediate ::: @@ -24,7 +24,7 @@ Suppose you are tasked with estimating baseball batting skills for several playe So, suppose a player came to bat only 4 times, and never hit the ball. Are they a bad player? -As a disclaimer, the author of this notebook assumes little to non-existent knowledge about baseball and its rules. The number of times at bat in his entire life is around "4". +As a disclaimer, the author of this notebook assumes little to non-existent knowledge about baseball and its rules. The number of times at bat in his entire life is around "4". ## Data diff --git a/myst_nbs/case_studies/item_response_nba.myst.md b/myst_nbs/case_studies/item_response_nba.myst.md index c07b68c3c..402b8f25e 100644 --- a/myst_nbs/case_studies/item_response_nba.myst.md +++ b/myst_nbs/case_studies/item_response_nba.myst.md @@ -15,7 +15,7 @@ kernelspec: # NBA Foul Analysis with Item Response Theory :::{post} Apr 17, 2022 -:tags: hierarchical model, case study, generalized linear model +:tags: hierarchical model, case study, generalized linear model :category: intermediate, tutorial :author: Austin Rochford, Lorenzo Toniazzi ::: @@ -465,7 +465,7 @@ else: ``` These plots suggest that scoring high in `theta` does not correlate with high or low scores in `b`. Moreover, with a little knowledge of NBA basketball, one can visually note that a higher score in `b` is expected from players playing center or forward rather than guards or point guards. -Given the last observation, we decide to plot a histogram for the occurrence of different positions for top disadvantaged (`theta`) and committing (`b`) players. Interestingly, we see below that the largest share of best disadvantaged players are guards, meanwhile, the largest share of best committing players are centers (and at the same time a very small share of guards). +Given the last observation, we decide to plot a histogram for the occurrence of different positions for top disadvantaged (`theta`) and committing (`b`) players. Interestingly, we see below that the largest share of best disadvantaged players are guards, meanwhile, the largest share of best committing players are centers (and at the same time a very small share of guards). ```{code-cell} ipython3 :tags: [] diff --git a/myst_nbs/case_studies/mediation_analysis.myst.md b/myst_nbs/case_studies/mediation_analysis.myst.md index c7c8f60d7..e64528a42 100644 --- a/myst_nbs/case_studies/mediation_analysis.myst.md +++ b/myst_nbs/case_studies/mediation_analysis.myst.md @@ -15,7 +15,7 @@ kernelspec: # Bayesian mediation analysis :::{post} February, 2022 -:tags: mediation, path analysis, regression +:tags: mediation, path analysis, regression :category: beginner :author: Benjamin T. Vincent ::: @@ -206,7 +206,7 @@ As we can see, the posterior distributions over the direct effects are near-iden +++ ## Parameter estimation versus hypothesis testing -This notebook has focused on the approach of Bayesian parameter estimation. For many situations this is entirely sufficient, and more information can be found in {cite:t}`yuan2009bayesian`. It will tell us, amongst other things, what our posterior beliefs are in the direct effects, indirect effects, and total effects. And we can use those posterior beliefs to conduct posterior predictive checks to visually check how well the model accounts for the data. +This notebook has focused on the approach of Bayesian parameter estimation. For many situations this is entirely sufficient, and more information can be found in {cite:t}`yuan2009bayesian`. It will tell us, amongst other things, what our posterior beliefs are in the direct effects, indirect effects, and total effects. And we can use those posterior beliefs to conduct posterior predictive checks to visually check how well the model accounts for the data. However, depending upon the use case it may be preferable to test hypotheses about the presence or absence of an indirect effect ($x \rightarrow m \rightarrow y$) for example. In this case, it may be more appropriate to take a more explicit hypothesis testing approach to see examine the relative credibility of the mediation model as compared to a simple direct effect model (i.e. $y_i = \mathrm{Normal}(i_{Y*} + c \cdot x_i, \sigma_{Y*})$). Readers are referred to {cite:t}`nuijten2015default` for a hypothesis testing approach to Bayesian mediation models and to {cite:t}`kruschke2011bayesian` for more information on parameter estimation versus hypothesis testing. diff --git a/myst_nbs/case_studies/moderation_analysis.myst.md b/myst_nbs/case_studies/moderation_analysis.myst.md index 4b1ea20a6..a8b78233c 100644 --- a/myst_nbs/case_studies/moderation_analysis.myst.md +++ b/myst_nbs/case_studies/moderation_analysis.myst.md @@ -15,7 +15,7 @@ kernelspec: # Bayesian moderation analysis :::{post} March, 2022 -:tags: moderation, path analysis, +:tags: moderation, path analysis, :category: beginner :author: Benjamin T. Vincent ::: diff --git a/myst_nbs/case_studies/multilevel_modeling.myst.md b/myst_nbs/case_studies/multilevel_modeling.myst.md index 00191cdd6..642042040 100644 --- a/myst_nbs/case_studies/multilevel_modeling.myst.md +++ b/myst_nbs/case_studies/multilevel_modeling.myst.md @@ -15,7 +15,7 @@ kernelspec: # A Primer on Bayesian Methods for Multilevel Modeling :::{post} 27 February, 2022 -:tags: hierarchical model, case study +:tags: hierarchical model, case study :category: intermediate :author: Chris Fonnesbeck, Colin Carroll, Alex Andorra, Oriol Abril, Farhan Reynaldo ::: @@ -1053,7 +1053,7 @@ az.summary(contextual_effect_trace, var_names=["g"], round_to=2) So we might infer from this that counties with higher proportions of houses without basements tend to have higher baseline levels of radon. This seems to be new, as up to this point we saw that `floor` was *negatively* associated with radon levels. But remember this was at the household-level: radon tends to be higher in houses with basements. But at the county-level it seems that the less basements on average in the county, the more radon. So it's not that contradictory. What's more, the estimate for $\gamma_2$ is quite uncertain and overlaps with zero, so it's possible that the relationship is not that strong. And finally, let's note that $\gamma_2$ estimates something else than uranium's effect, as this is already taken into account by $\gamma_1$ -- it answers the question "once we know uranium level in the county, is there any value in learning about the proportion of houses without basements?". -All of this is to say that we shouldn't interpret this causally: there is no credible mechanism by which a basement (or absence thereof) *causes* radon emissions. More probably, our causal graph is missing something: a confounding variable, one that influences both basement construction and radon levels, is lurking somewhere in the dark... Perhaps is it the type of soil, which might influence what type of structures are built *and* the level of radon? Maybe adding this to our model would help with causal inference. +All of this is to say that we shouldn't interpret this causally: there is no credible mechanism by which a basement (or absence thereof) *causes* radon emissions. More probably, our causal graph is missing something: a confounding variable, one that influences both basement construction and radon levels, is lurking somewhere in the dark... Perhaps is it the type of soil, which might influence what type of structures are built *and* the level of radon? Maybe adding this to our model would help with causal inference. +++ diff --git a/myst_nbs/case_studies/probabilistic_matrix_factorization.myst.md b/myst_nbs/case_studies/probabilistic_matrix_factorization.myst.md index 114a29f4c..9fc9d1b28 100644 --- a/myst_nbs/case_studies/probabilistic_matrix_factorization.myst.md +++ b/myst_nbs/case_studies/probabilistic_matrix_factorization.myst.md @@ -14,7 +14,7 @@ kernelspec: # Probabilistic Matrix Factorization for Making Personalized Recommendations :::{post} Sept 20, 2021 -:tags: case study, pymc3.Model, pymc3.MvNormal, pymc3.Normal +:tags: case study, :category: intermediate ::: @@ -721,7 +721,7 @@ print("Improvement from MAP: %.5f" % (pmf_map_rmse - final_test_rmse) print("Improvement from Mean of Means: %.5f" % (baselines["mom"] - final_test_rmse)) ``` -We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision parameters $\alpha_U$ and $\alpha_V$ and we chose a fixed precision $\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the movie ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\alpha$ is likely different as well. +We have some interesting results here. As expected, our MCMC sampler provides lower error on the training set. However, it seems it does so at the cost of overfitting the data. This results in a decrease in test RMSE as compared to the MAP, even though it is still much better than our best baseline. So why might this be the case? Recall that we used point estimates for our precision parameters $\alpha_U$ and $\alpha_V$ and we chose a fixed precision $\alpha$. It is quite likely that by doing this, we constrained our posterior in a way that biased it towards the training data. In reality, the variance in the user ratings and the movie ratings is unlikely to be equal to the means of sample variances we used. Also, the most reasonable observation precision $\alpha$ is likely different as well. +++ diff --git a/myst_nbs/case_studies/putting_workflow.myst.md b/myst_nbs/case_studies/putting_workflow.myst.md index ed7187c14..c78ab9438 100644 --- a/myst_nbs/case_studies/putting_workflow.myst.md +++ b/myst_nbs/case_studies/putting_workflow.myst.md @@ -18,7 +18,7 @@ substitutions: # Model building and expansion for golf putting :::{post} Apr 2, 2022 -:tags: bayesian workflow, model expansion, sports +:tags: bayesian workflow, model expansion, sports :category: intermediate, how-to :author: Colin Carroll, Marco Gorelli, Oriol Abril-Pla ::: diff --git a/myst_nbs/case_studies/regression_discontinuity.myst.md b/myst_nbs/case_studies/regression_discontinuity.myst.md index 940692df0..4a8f585d9 100644 --- a/myst_nbs/case_studies/regression_discontinuity.myst.md +++ b/myst_nbs/case_studies/regression_discontinuity.myst.md @@ -16,7 +16,7 @@ kernelspec: # Regression discontinuity design analysis :::{post} April, 2022 -:tags: regression, causal inference, quasi experimental design, counterfactuals +:tags: regression, causal inference, quasi experimental design, counterfactuals :category: beginner, explanation :author: Benjamin T. Vincent ::: @@ -219,7 +219,7 @@ The blue shaded region shows the 95% credible region of the expected value of th The orange shaded region shows the 95% credible region of the expected value of the post-test measurement for a range of possible pre-test measures in the case of treatment. -Both are actually very interesting as examples of counterfactual inference. We did not observe any units that were untreated below the threshold, nor any treated units above the threshold. But assuming our model is a good description of reality, we can ask the counterfactual questions "What if a unit above the threshold was treated?" and "What if a unit below the threshold was treated?" +Both are actually very interesting as examples of counterfactual inference. We did not observe any units that were untreated below the threshold, nor any treated units above the threshold. But assuming our model is a good description of reality, we can ask the counterfactual questions "What if a unit above the threshold was treated?" and "What if a unit below the threshold was treated?" +++ diff --git a/myst_nbs/case_studies/rugby_analytics.myst.md b/myst_nbs/case_studies/rugby_analytics.myst.md index a5e645958..a316ec6bc 100644 --- a/myst_nbs/case_studies/rugby_analytics.myst.md +++ b/myst_nbs/case_studies/rugby_analytics.myst.md @@ -16,7 +16,7 @@ substitutions: # A Hierarchical model for Rugby prediction :::{post} 19 Mar, 2022 -:tags: hierarchical, sports +:tags: hierarchical, sports :category: intermediate, how-to :author: Peadar Coyle, Meenal Jhajharia, Oriol Abril-Pla ::: @@ -468,7 +468,7 @@ We see according to this model that Ireland finishes with the most points about > As an Irish rugby fan - I like this model. However it indicates some problems with shrinkage, and bias. Since recent form suggests England will win. -Nevertheless the point of this model was to illustrate how a Hierarchical model could be applied to a sports analytics problem, and illustrate the power of PyMC. +Nevertheless the point of this model was to illustrate how a Hierarchical model could be applied to a sports analytics problem, and illustrate the power of PyMC. +++ diff --git a/myst_nbs/case_studies/spline.myst.md b/myst_nbs/case_studies/spline.myst.md index 237d53c92..f13abc8ad 100644 --- a/myst_nbs/case_studies/spline.myst.md +++ b/myst_nbs/case_studies/spline.myst.md @@ -15,7 +15,7 @@ kernelspec: # Splines :::{post} June 4, 2022 -:tags: patsy, regression, spline +:tags: patsy, regression, spline :category: beginner :author: Joshua Cook ::: diff --git a/myst_nbs/case_studies/wrapping_jax_function.myst.md b/myst_nbs/case_studies/wrapping_jax_function.myst.md index 077320d35..add11f1c0 100644 --- a/myst_nbs/case_studies/wrapping_jax_function.myst.md +++ b/myst_nbs/case_studies/wrapping_jax_function.myst.md @@ -17,7 +17,7 @@ substitutions: # How to wrap a JAX function for use in PyMC :::{post} Mar 24, 2022 -:tags: Aesara, hidden markov model, JAX +:tags: Aesara, hidden markov model, JAX :category: advanced, how-to :author: Ricardo Vieira ::: diff --git a/myst_nbs/diagnostics_and_criticism/Bayes_factor.myst.md b/myst_nbs/diagnostics_and_criticism/Bayes_factor.myst.md index 2df4b4f34..095c49353 100644 --- a/myst_nbs/diagnostics_and_criticism/Bayes_factor.myst.md +++ b/myst_nbs/diagnostics_and_criticism/Bayes_factor.myst.md @@ -14,7 +14,7 @@ kernelspec: (Bayes_factor)= # Bayes Factors and Marginal Likelihood :::{post} Jun 1, 2022 -:tags: Bayes Factors, model comparison +:tags: Bayes Factors, model comparison :category: beginner, explanation :author: Osvaldo Martin ::: diff --git a/myst_nbs/diagnostics_and_criticism/sampler-stats.myst.md b/myst_nbs/diagnostics_and_criticism/sampler-stats.myst.md index ef18c9d38..ad69e24ea 100644 --- a/myst_nbs/diagnostics_and_criticism/sampler-stats.myst.md +++ b/myst_nbs/diagnostics_and_criticism/sampler-stats.myst.md @@ -21,7 +21,7 @@ sampler is doing. For this purpose some samplers export statistics for each generated sample. :::{post} May 31, 2022 -:tags: diagnostics +:tags: diagnostics :category: beginner :author: Meenal Jhajharia, Christian Luhmann ::: diff --git a/myst_nbs/gaussian_processes/GP-MaunaLoa.myst.md b/myst_nbs/gaussian_processes/GP-MaunaLoa.myst.md index 10b729bb2..0e8f2a827 100644 --- a/myst_nbs/gaussian_processes/GP-MaunaLoa.myst.md +++ b/myst_nbs/gaussian_processes/GP-MaunaLoa.myst.md @@ -17,7 +17,7 @@ substitutions: # Gaussian Process for CO2 at Mauna Loa :::{post} April, 2022 -:tags: gaussian process, CO2 +:tags: gaussian process, CO2 :category: intermediate :author: Bill Engels, Chris Fonnesbeck ::: @@ -32,7 +32,7 @@ This Gaussian Process (GP) example shows how to: +++ -Since the late 1950's, the Mauna Loa observatory has been taking regular measurements of atmospheric CO$_2$. In the late 1950's Charles Keeling invented a accurate way to measure atmospheric CO$_2$ concentration. +Since the late 1950's, the Mauna Loa observatory has been taking regular measurements of atmospheric CO$_2$. In the late 1950's Charles Keeling invented a accurate way to measure atmospheric CO$_2$ concentration. Since then, CO$_2$ measurements have been recorded nearly continuously at the Mauna Loa observatory. Check out last hours measurement result [here](https://www.co2.earth/daily-co2). ![](http://sites.gsu.edu/geog1112/files/2014/07/MaunaLoaObservatory_small-2g29jvt.png) diff --git a/myst_nbs/generalized_linear_models/GLM-binomial-regression.myst.md b/myst_nbs/generalized_linear_models/GLM-binomial-regression.myst.md index 32db103b7..a8bdf8bd5 100644 --- a/myst_nbs/generalized_linear_models/GLM-binomial-regression.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-binomial-regression.myst.md @@ -15,7 +15,7 @@ kernelspec: # Binomial regression :::{post} February, 2022 -:tags: binomial regression, generalized linear model, pymc.Binomial, pymc.ConstantData, pymc.Deterministic, pymc.Model, pymc.Normal, pymc3.Binomial, pymc3.ConstantData, pymc3.Deterministic, pymc3.Model, pymc3.Normal +:tags: binomial regression, generalized linear model, :category: beginner :author: Benjamin T. Vincent ::: diff --git a/myst_nbs/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md b/myst_nbs/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md index 375eff3bf..73c9bdba0 100644 --- a/myst_nbs/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-hierarchical-binomial-model.myst.md @@ -13,7 +13,7 @@ kernelspec: # Hierarchical Binomial Model: Rat Tumor Example :::{post} Nov 11, 2021 -:tags: generalized linear model, hierarchical model +:tags: generalized linear model, hierarchical model :category: intermediate :author: Demetri Pananos, Junpeng Lao, Raúl Maldonado, Farhan Reynaldo ::: diff --git a/myst_nbs/generalized_linear_models/GLM-model-selection.myst.md b/myst_nbs/generalized_linear_models/GLM-model-selection.myst.md index 9a721fbd1..05f136850 100644 --- a/myst_nbs/generalized_linear_models/GLM-model-selection.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-model-selection.myst.md @@ -15,7 +15,7 @@ kernelspec: # GLM: Model Selection :::{post} Jan 8, 2022 -:tags: cross validation, generalized linear models, loo, model comparison, pymc3.HalfCauchy, pymc3.Model, pymc3.Normal, waic +:tags: cross validation, generalized linear models, loo, model comparison, waic :category: intermediate :author: Jon Sedar, Junpeng Lao, Abhipsha Das, Oriol Abril-Pla ::: @@ -45,7 +45,7 @@ plt.rcParams["figure.constrained_layout.use"] = False ``` ## Introduction -A fairly minimal reproducible example of Model Selection using WAIC, and LOO as currently implemented in PyMC3. +A fairly minimal reproducible example of Model Selection using WAIC, and LOO as currently implemented in PyMC3. This example creates two toy datasets under linear and quadratic models, and then tests the fit of a range of polynomial linear models upon those datasets by using Widely Applicable Information Criterion (WAIC), and leave-one-out (LOO) cross-validation using Pareto-smoothed importance sampling (PSIS). diff --git a/myst_nbs/generalized_linear_models/GLM-robust-with-outlier-detection.myst.md b/myst_nbs/generalized_linear_models/GLM-robust-with-outlier-detection.myst.md index 8def99cb3..8e422c8d5 100644 --- a/myst_nbs/generalized_linear_models/GLM-robust-with-outlier-detection.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-robust-with-outlier-detection.myst.md @@ -17,7 +17,7 @@ substitutions: # GLM: Robust Regression using Custom Likelihood for Outlier Classification :::{post} 17 Nov, 2021 -:tags: pymc3.Bernoulli, pymc3.Data, pymc3.Deterministic, pymc3.DiscreteUniform, pymc3.Exponential, pymc3.GaussianRandomWalk, pymc3.HalfNormal, pymc3.InverseGamma, pymc3.Model, pymc3.Normal, pymc3.Poisson, pymc3.Potential, pymc3.Slice, pymc3.StudentT, pymc3.Uniform, regression, robust analysis +:tags: regression, robust analysis :category: intermediate :author: Jon Sedar, Thomas Wiecki, Raul Maldonado, Oriol Abril ::: @@ -613,7 +613,7 @@ _ = az.plot_trace(trc_hogg, var_names=rvs, compact=False); + However, at `target_accept = 0.9` (and increasing `tune` from 5000 to 10000), the traces exhibit fewer divergences and appear slightly better behaved. + The traces for the inlier model `beta` parameters, and for outlier model parameter `y_est_out` (the mean) look reasonably converged + The traces for outlier model param `y_sigma_out` (the additional pooled variance) occasionally go a bit wild -+ It's interesting that `frac_outliers` is so dispersed: that's quite a flat distribution: suggests that there are a few datapoints where their inlier/outlier status is subjective ++ It's interesting that `frac_outliers` is so dispersed: that's quite a flat distribution: suggests that there are a few datapoints where their inlier/outlier status is subjective + Indeed as Thomas noted in his v2.0 Notebook, because we're explicitly modeling the latent label (inlier/outlier) as binary choice the sampler could have a problem - rewriting this model into a marginal mixture model would be better. +++ diff --git a/myst_nbs/generalized_linear_models/GLM-rolling-regression.myst.md b/myst_nbs/generalized_linear_models/GLM-rolling-regression.myst.md index 08bbe89c1..3b0667ebb 100644 --- a/myst_nbs/generalized_linear_models/GLM-rolling-regression.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-rolling-regression.myst.md @@ -15,7 +15,7 @@ kernelspec: # Rolling Regression :::{post} June, 2022 -:tags: generalized linear model, regression +:tags: generalized linear model, regression :category: intermediate :author: Thomas Wiecki ::: diff --git a/myst_nbs/generalized_linear_models/GLM-simpsons-paradox.myst.md b/myst_nbs/generalized_linear_models/GLM-simpsons-paradox.myst.md index c793944fc..b186d7849 100644 --- a/myst_nbs/generalized_linear_models/GLM-simpsons-paradox.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-simpsons-paradox.myst.md @@ -15,7 +15,7 @@ kernelspec: # Simpson's paradox and mixed models :::{post} March, 2022 -:tags: regression, hierarchical model, linear model, multi level model, posterior predictive, Simpson's paradox +:tags: regression, hierarchical model, linear model, multi level model, posterior predictive, Simpson's paradox :category: beginner :author: Benjamin T. Vincent ::: diff --git a/myst_nbs/generalized_linear_models/GLM-truncated-censored-regression.myst.md b/myst_nbs/generalized_linear_models/GLM-truncated-censored-regression.myst.md index ff5afef7b..1f38861e5 100644 --- a/myst_nbs/generalized_linear_models/GLM-truncated-censored-regression.myst.md +++ b/myst_nbs/generalized_linear_models/GLM-truncated-censored-regression.myst.md @@ -15,7 +15,7 @@ kernelspec: # Bayesian regression with truncated or censored data :::{post} January, 2022 -:tags: censored, censoring, generalized linear model, pymc3.Censored, pymc3.HalfNormal, pymc3.Model, pymc3.Normal, pymc3.TruncatedNormal, regression, truncated, truncation +:tags: censored, censoring, generalized linear model, regression, truncated, truncation :category: beginner :author: Benjamin T. Vincent ::: @@ -356,7 +356,7 @@ This brings an end to our guide on truncated and censored data and truncated and ## Further topics It is also possible to treat the bounds as unknown latent parameters. If these are not known exactly and it is possible to fomulate a prior over these bounds, then it would be possible to infer what the bounds are. This could be argued as overkill however - depending on your data analysis context it may be entirely sufficient to extract 'good enough' point estimates of the bounds in order to get reasonable regression estimates. -The censored regression model presented above takes one particular approach, and there are others. For example, it did not attempt to infer posterior beliefs over the true latent `y` values of the censored data. It is possible to build censored regression models which do impute these censored `y` values, but we did not address that here as the topic of [imputation](https://en.wikipedia.org/wiki/Imputation_(statistics)) deserves its own focused treatment. The PyMC {ref}`censored_data` example also covers this topic, with a particular {ref}`example model to impute censored data `. +The censored regression model presented above takes one particular approach, and there are others. For example, it did not attempt to infer posterior beliefs over the true latent `y` values of the censored data. It is possible to build censored regression models which do impute these censored `y` values, but we did not address that here as the topic of [imputation](https://en.wikipedia.org/wiki/Imputation_(statistics)) deserves its own focused treatment. The PyMC {ref}`censored_data` example also covers this topic, with a particular {ref}`example model to impute censored data `. +++ diff --git a/myst_nbs/howto/data_container.myst.md b/myst_nbs/howto/data_container.myst.md index 7b380783e..8c67811e0 100644 --- a/myst_nbs/howto/data_container.myst.md +++ b/myst_nbs/howto/data_container.myst.md @@ -15,7 +15,7 @@ kernelspec: # Using shared variables (`Data` container adaptation) :::{post} Dec 16, 2021 -:tags: posterior predictive, predictions, pymc3.Bernoulli, pymc3.Data, pymc3.Deterministic, pymc3.HalfNormal, pymc3.Model, pymc3.Normal, shared data +:tags: posterior predictive, predictions, shared data :category: beginner :author: Juan Martin Loyola, Kavya Jaiswal, Oriol Abril ::: diff --git a/myst_nbs/howto/lasso_block_update.myst.md b/myst_nbs/howto/lasso_block_update.myst.md index 5aa47ea25..2bbd83671 100644 --- a/myst_nbs/howto/lasso_block_update.myst.md +++ b/myst_nbs/howto/lasso_block_update.myst.md @@ -15,7 +15,7 @@ kernelspec: # Lasso regression with block updating :::{post} Feb 10, 2022 -:tags: pymc3.Exponential, pymc3.Laplace, pymc3.Metropolis, pymc3.Model, pymc3.Normal, pymc3.Slice, pymc3.Uniform, regression +:tags: regression :category: beginner :author: Chris Fonnesbeck, Raul Maldonado, Michael Osthege, Thomas Wiecki, Lorenzo Toniazzi ::: diff --git a/myst_nbs/mixture_models/dirichlet_mixture_of_multinomials.myst.md b/myst_nbs/mixture_models/dirichlet_mixture_of_multinomials.myst.md index 47ce4864f..51a15e64a 100644 --- a/myst_nbs/mixture_models/dirichlet_mixture_of_multinomials.myst.md +++ b/myst_nbs/mixture_models/dirichlet_mixture_of_multinomials.myst.md @@ -15,7 +15,7 @@ kernelspec: # Dirichlet mixtures of multinomials :::{post} Jan 8, 2022 -:tags: mixture model, pymc3.Dirichlet, pymc3.DirichletMultinomial, pymc3.Lognormal, pymc3.Model, pymc3.Multinomial +:tags: mixture model, :category: advanced :author: Byron J. Smith, Abhipsha Das, Oriol Abril-Pla ::: diff --git a/myst_nbs/mixture_models/dp_mix.myst.md b/myst_nbs/mixture_models/dp_mix.myst.md index 11c070b94..e97d5af6b 100644 --- a/myst_nbs/mixture_models/dp_mix.myst.md +++ b/myst_nbs/mixture_models/dp_mix.myst.md @@ -15,7 +15,7 @@ kernelspec: # Dirichlet process mixtures for density estimation :::{post} Sept 16, 2021 -:tags: mixture model, pymc3.Beta, pymc3.Deterministic, pymc3.Gamma, pymc3.Mixture, pymc3.Model, pymc3.Normal, pymc3.NormalMixture +:tags: mixture model, :category: advanced :author: Austin Rochford, Abhipsha Das ::: diff --git a/myst_nbs/mixture_models/gaussian_mixture_model.myst.md b/myst_nbs/mixture_models/gaussian_mixture_model.myst.md index 0d04e8f35..1546f2d5b 100644 --- a/myst_nbs/mixture_models/gaussian_mixture_model.myst.md +++ b/myst_nbs/mixture_models/gaussian_mixture_model.myst.md @@ -15,7 +15,7 @@ kernelspec: # Gaussian Mixture Model :::{post} April, 2022 -:tags: mixture model, classification +:tags: mixture model, classification :category: beginner :author: Abe Flaxman ::: diff --git a/myst_nbs/mixture_models/marginalized_gaussian_mixture_model.myst.md b/myst_nbs/mixture_models/marginalized_gaussian_mixture_model.myst.md index 0dc30dc44..0ed1504df 100644 --- a/myst_nbs/mixture_models/marginalized_gaussian_mixture_model.myst.md +++ b/myst_nbs/mixture_models/marginalized_gaussian_mixture_model.myst.md @@ -16,7 +16,7 @@ kernelspec: # Marginalized Gaussian Mixture Model :::{post} Sept 18, 2021 -:tags: mixture model, pymc3.Dirichlet, pymc3.Gamma, pymc3.Model, pymc3.Normal, pymc3.NormalMixture +:tags: mixture model, :category: intermediate ::: diff --git a/myst_nbs/samplers/SMC-ABC_Lotka-Volterra_example.myst.md b/myst_nbs/samplers/SMC-ABC_Lotka-Volterra_example.myst.md index e81ebca59..236ce5e3b 100644 --- a/myst_nbs/samplers/SMC-ABC_Lotka-Volterra_example.myst.md +++ b/myst_nbs/samplers/SMC-ABC_Lotka-Volterra_example.myst.md @@ -14,7 +14,7 @@ kernelspec: (ABC_introduction)= # Approximate Bayesian Computation :::{post} May 31, 2022 -:tags: SMC, ABC +:tags: SMC, ABC :category: beginner, explanation ::: diff --git a/myst_nbs/samplers/SMC2_gaussians.myst.md b/myst_nbs/samplers/SMC2_gaussians.myst.md index 6e4dc18f3..8d9b02910 100644 --- a/myst_nbs/samplers/SMC2_gaussians.myst.md +++ b/myst_nbs/samplers/SMC2_gaussians.myst.md @@ -14,7 +14,7 @@ kernelspec: # Sequential Monte Carlo :::{post} Oct 19, 2021 -:tags: SMC +:tags: SMC :category: beginner ::: diff --git a/myst_nbs/survival_analysis/censored_data.myst.md b/myst_nbs/survival_analysis/censored_data.myst.md index 029581afc..90b7c9793 100644 --- a/myst_nbs/survival_analysis/censored_data.myst.md +++ b/myst_nbs/survival_analysis/censored_data.myst.md @@ -15,7 +15,7 @@ kernelspec: # Censored Data Models :::{post} May, 2022 -:tags: censoring, survival analysis +:tags: censoring, survival analysis :category: intermediate, how-to :author: Luis Mario Domenzain ::: diff --git a/myst_nbs/time_series/Air_passengers-Prophet_with_Bayesian_workflow.myst.md b/myst_nbs/time_series/Air_passengers-Prophet_with_Bayesian_workflow.myst.md index 2dda3b1ae..1a7ec7fc4 100644 --- a/myst_nbs/time_series/Air_passengers-Prophet_with_Bayesian_workflow.myst.md +++ b/myst_nbs/time_series/Air_passengers-Prophet_with_Bayesian_workflow.myst.md @@ -15,7 +15,7 @@ kernelspec: # Air passengers - Prophet-like model :::{post} April, 2022 -:tags: time series, prophet +:tags: time series, prophet :category: intermediate :author: Marco Gorelli, Danh Phan ::: diff --git a/myst_nbs/time_series/MvGaussianRandomWalk_demo.myst.md b/myst_nbs/time_series/MvGaussianRandomWalk_demo.myst.md index 8f2ba4d40..77b375a46 100644 --- a/myst_nbs/time_series/MvGaussianRandomWalk_demo.myst.md +++ b/myst_nbs/time_series/MvGaussianRandomWalk_demo.myst.md @@ -13,7 +13,7 @@ kernelspec: # Multivariate Gaussian Random Walk :::{post} Sep 25, 2021 -:tags: linear model, pymc3.HalfNormal, pymc3.LKJCholeskyCov, pymc3.Model, pymc3.MvGaussianRandomWalk, pymc3.Normal, regression, time series +:tags: linear model, regression, time series :category: beginner :::