@@ -3,8 +3,10 @@ jupytext:
3
3
text_representation :
4
4
extension : .md
5
5
format_name : myst
6
+ format_version : 0.13
7
+ jupytext_version : 1.16.7
6
8
kernelspec :
7
- display_name : Python 3
9
+ display_name : Python 3 (ipykernel)
8
10
language : python
9
11
name : python3
10
12
---
@@ -29,10 +31,9 @@ kernelspec:
29
31
30
32
In addition to what's in Anaconda, this lecture will need the following libraries:
31
33
32
- ``` {code-cell} ipython
33
- ---
34
- tags: [hide-output]
35
- ---
34
+ ``` {code-cell} ipython3
35
+ :tags: [hide-output]
36
+
36
37
!pip install quantecon
37
38
```
38
39
@@ -54,9 +55,8 @@ Required knowledge: Familiarity with matrix manipulations, multivariate normal d
54
55
55
56
We'll need the following imports:
56
57
57
- ``` {code-cell} ipython
58
+ ``` {code-cell} ipython3
58
59
import matplotlib.pyplot as plt
59
- plt.rcParams["figure.figsize"] = (11, 5) #set default figure size
60
60
from scipy import linalg
61
61
import numpy as np
62
62
import matplotlib.cm as cm
@@ -122,10 +122,9 @@ $2 \times 2$ covariance matrix. In our simulations, we will suppose that
122
122
123
123
This density $p(x)$ is shown below as a contour map, with the center of the red ellipse being equal to $\hat x$.
124
124
125
- ``` {code-cell} python3
126
- ---
127
- tags: [output_scroll]
128
- ---
125
+ ``` {code-cell} ipython3
126
+ :tags: [output_scroll]
127
+
129
128
# Set up the Gaussian prior density p
130
129
Σ = [[0.4, 0.3], [0.3, 0.45]]
131
130
Σ = np.matrix(Σ)
@@ -186,7 +185,7 @@ def bivariate_normal(x, y, σ_x=1.0, σ_y=1.0, μ_x=0.0, μ_y=0.0, σ_xy=0.0):
186
185
187
186
def gen_gaussian_plot_vals(μ, C):
188
187
"Z values for plotting the bivariate Gaussian N(μ, C)"
189
- m_x, m_y = float(μ[0]), float(μ[1])
188
+ m_x, m_y = float(μ[0,0 ]), float(μ[1,0 ])
190
189
s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1])
191
190
s_xy = C[0, 1]
192
191
return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy)
@@ -213,15 +212,15 @@ The good news is that the missile has been located by our sensors, which report
213
212
The next figure shows the original prior $p(x)$ and the new reported
214
213
location $y$
215
214
216
- ``` {code-cell} python3
215
+ ``` {code-cell} ipython3
217
216
fig, ax = plt.subplots(figsize=(10, 8))
218
217
ax.grid()
219
218
220
219
Z = gen_gaussian_plot_vals(x_hat, Σ)
221
220
ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet)
222
221
cs = ax.contour(X, Y, Z, 6, colors="black")
223
222
ax.clabel(cs, inline=1, fontsize=10)
224
- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black")
223
+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black")
225
224
226
225
plt.show()
227
226
```
@@ -284,7 +283,7 @@ This new density $p(x \,|\, y) = N(\hat x^F, \Sigma^F)$ is shown in the next fig
284
283
285
284
The original density is left in as contour lines for comparison
286
285
287
- ``` {code-cell} python3
286
+ ``` {code-cell} ipython3
288
287
fig, ax = plt.subplots(figsize=(10, 8))
289
288
ax.grid()
290
289
@@ -298,7 +297,7 @@ new_Z = gen_gaussian_plot_vals(x_hat_F, Σ_F)
298
297
cs2 = ax.contour(X, Y, new_Z, 6, colors="black")
299
298
ax.clabel(cs2, inline=1, fontsize=10)
300
299
ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
301
- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black")
300
+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black")
302
301
303
302
plt.show()
304
303
```
391
390
Q = 0.3 * \Sigma
392
391
$$
393
392
394
- ``` {code-cell} python3
393
+ ``` {code-cell} ipython3
395
394
fig, ax = plt.subplots(figsize=(10, 8))
396
395
ax.grid()
397
396
@@ -415,7 +414,7 @@ new_Z = gen_gaussian_plot_vals(new_x_hat, new_Σ)
415
414
cs3 = ax.contour(X, Y, new_Z, 6, colors="black")
416
415
ax.clabel(cs3, inline=1, fontsize=10)
417
416
ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
418
- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black")
417
+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black")
419
418
420
419
plt.show()
421
420
```
@@ -577,7 +576,7 @@ Your figure should -- modulo randomness -- look something like this
577
576
:class: dropdown
578
577
```
579
578
580
- ``` {code-cell} python3
579
+ ``` {code-cell} ipython3
581
580
# Parameters
582
581
θ = 10 # Constant value of state x_t
583
582
A, C, G, H = 1, 0, 1, 1
@@ -598,7 +597,7 @@ xgrid = np.linspace(θ - 5, θ + 2, 200)
598
597
599
598
for i in range(N):
600
599
# Record the current predicted mean and variance
601
- m, v = [float(z) for z in (kalman.x_hat, kalman.Sigma)]
600
+ m, v = [float(z) for z in (kalman.x_hat.item() , kalman.Sigma.item() )]
602
601
# Plot, update filter
603
602
ax.plot(xgrid, norm.pdf(xgrid, loc=m, scale=np.sqrt(v)), label=f'$t={i}$')
604
603
kalman.update(y[i])
@@ -641,7 +640,7 @@ Your figure should show error erratically declining something like this
641
640
:class: dropdown
642
641
```
643
642
644
- ``` {code-cell} python3
643
+ ``` {code-cell} ipython3
645
644
ϵ = 0.1
646
645
θ = 10 # Constant value of state x_t
647
646
A, C, G, H = 1, 0, 1, 1
@@ -657,7 +656,7 @@ y = y.flatten()
657
656
658
657
for t in range(T):
659
658
# Record the current predicted mean and variance and plot their densities
660
- m, v = [float(temp) for temp in (kalman.x_hat, kalman.Sigma)]
659
+ m, v = [float(temp) for temp in (kalman.x_hat.item() , kalman.Sigma.item() )]
661
660
662
661
f = lambda x: norm.pdf(x, loc=m, scale=np.sqrt(v))
663
662
integral, error = quad(f, θ - ϵ, θ + ϵ)
@@ -745,7 +744,7 @@ Observe how, after an initial learning period, the Kalman filter performs quite
745
744
:class: dropdown
746
745
```
747
746
748
- ``` {code-cell} python3
747
+ ``` {code-cell} ipython3
749
748
# Define A, C, G, H
750
749
G = np.identity(2)
751
750
H = np.sqrt(0.5) * np.identity(2)
0 commit comments