diff --git a/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb b/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb index daaa24ea..07aede9a 100644 --- a/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb +++ b/Chapter1_Introduction/Ch1_Introduction_TFP.ipynb @@ -395,8 +395,8 @@ "cumulative_headcounts = tf.gather(tf.cumsum(coin_flip_data), num_trials)\n", "\n", "rv_observed_heads = tfp.distributions.Beta(\n", - " concentration1=tf.to_float(1 + cumulative_headcounts),\n", - " concentration0=tf.to_float(1 + num_trials - cumulative_headcounts))\n", + " concentration1=tf.cast(1 + cumulative_headcounts, tf.float32),\n", + " concentration0=tf.cast(1 + num_trials - cumulative_headcounts, tf.float32))\n", "\n", "probs_of_heads = tf.linspace(start=0., stop=1., num=100, name=\"linspace\")\n", "observed_probs_heads = tf.transpose(rv_observed_heads.prob(probs_of_heads[:, tf.newaxis]))" @@ -1207,8 +1207,8 @@ "source": [ "# Set the chain's start state.\n", "initial_chain_state = [\n", - " tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name=\"init_lambda1\"),\n", - " tf.to_float(tf.reduce_mean(count_data)) * tf.ones([], dtype=tf.float32, name=\"init_lambda2\"),\n", + " tf.cast(tf.reduce_mean(count_data), tf.float32) * tf.ones([], dtype=tf.float32, name=\"init_lambda1\"),\n", + " tf.cast(tf.reduce_mean(count_data), tf.float32) * tf.ones([], dtype=tf.float32, name=\"init_lambda2\", tf.float32),\n", " 0.5 * tf.ones([], dtype=tf.float32, name=\"init_tau\"),\n", "]\n", "\n", @@ -1234,7 +1234,7 @@ "\n", " lambda_ = tf.gather(\n", " [lambda_1, lambda_2],\n", - " indices=tf.to_int32(tau * tf.to_float(tf.size(count_data)) <= tf.to_float(tf.range(tf.size(count_data)))))\n", + " indices=tf.to_int32(tau * tf.cast(tf.size(count_data), tf.float32) <= tf.cast(tf.range(tf.size(count_data)), tf.float32)))\n", " rv_observation = tfd.Poisson(rate=lambda_)\n", " \n", " return (\n", @@ -1277,7 +1277,7 @@ " state_gradients_are_stopped=True),\n", " bijector=unconstraining_bijectors))\n", "\n", - "tau_samples = tf.floor(posterior_tau * tf.to_float(tf.size(count_data)))\n", + "tau_samples = tf.floor(posterior_tau * tf.cast(tf.size(count_data)), tf.float32)\n", "\n", "# tau_samples, lambda_1_samples, lambda_2_samples contain\n", "# N samples from the corresponding posterior distribution\n", @@ -1644,4 +1644,4 @@ ] } ] -} \ No newline at end of file +}