diff --git a/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb b/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb index 07aede9a..9e67b92c 100644 --- a/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb +++ b/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb @@ -1208,7 +1208,7 @@ "# Set the chain's start state.\n", "initial_chain_state = [\n", " tf.cast(tf.reduce_mean(count_data), tf.float32) * tf.ones([], dtype=tf.float32, name=\"init_lambda1\"),\n", - " tf.cast(tf.reduce_mean(count_data), tf.float32) * tf.ones([], dtype=tf.float32, name=\"init_lambda2\", tf.float32),\n", + " tf.cast(tf.reduce_mean(count_data), tf.float32) * tf.ones([], dtype=tf.float32, name=\"init_lambda2\"),\n", " 0.5 * tf.ones([], dtype=tf.float32, name=\"init_tau\"),\n", "]\n", "\n", @@ -1234,7 +1234,7 @@ "\n", " lambda_ = tf.gather(\n", " [lambda_1, lambda_2],\n", - " indices=tf.to_int32(tau * tf.cast(tf.size(count_data), tf.float32) <= tf.cast(tf.range(tf.size(count_data)), tf.float32)))\n", + " indices=tf.cast(tau * tf.cast(tf.size(count_data), tf.float32) <= tf.cast(tf.range(tf.size(count_data)), tf.float32), tf.int32))\n", " rv_observation = tfd.Poisson(rate=lambda_)\n", " \n", " return (\n", @@ -1273,11 +1273,11 @@ " target_log_prob_fn=unnormalized_log_posterior,\n", " num_leapfrog_steps=2,\n", " step_size=step_size,\n", - " step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(),\n", + " step_size_update_fn=tfp.mcmc.make_simple_step_size_update_policy(num_adaptation_steps=None),\n", " state_gradients_are_stopped=True),\n", " bijector=unconstraining_bijectors))\n", "\n", - "tau_samples = tf.floor(posterior_tau * tf.cast(tf.size(count_data)), tf.float32)\n", + "tau_samples = tf.floor(posterior_tau * tf.cast(tf.size(count_data), dtype=tf.float32))\n", "\n", "# tau_samples, lambda_1_samples, lambda_2_samples contain\n", "# N samples from the corresponding posterior distribution\n", diff --git a/Chapter3_MCMC/Ch3_IntroMCMC_TFP.ipynb b/Chapter3_MCMC/Ch3_IntroMCMC_TFP.ipynb index c1257def..5ccdf3df 100644 --- a/Chapter3_MCMC/Ch3_IntroMCMC_TFP.ipynb +++ b/Chapter3_MCMC/Ch3_IntroMCMC_TFP.ipynb @@ -461,8 +461,12 @@ "\n", "# plotting details.\n", "x_ = y_ = np.linspace(.01, 5, 100)\n", - "likelihood_x_ = evaluate(tfd.Poisson(rate=x_).prob(data_[:, 0]))\n", - "likelihood_y_ = evaluate(tfd.Poisson(rate=y_).prob(data_[:, 1]))\n", + "likelihood_x = tfd.Poisson(rate=tf.expand_dims(x_, 1)).prob(data_[:, 0])\n", + "likelihood_x = tf.reduce_prod(likelihood_x, axis=1)\n", + "likelihood_x_ = evaluate(likelihood_x)\n", + "likelihood_y = tfd.Poisson(rate=tf.expand_dims(y_, 1)).prob(data_[:, 1])\n", + "likelihood_y = tf.reduce_prod(likelihood_y, axis=1)\n", + "likelihood_y_ = evaluate(likelihood_y)\n", "L_ = evaluate(tf.matmul(tf.expand_dims(likelihood_x_, 1), \n", " tf.expand_dims(likelihood_y_, 0)))" ]