@@ -3,8 +3,10 @@ jupytext:
3
3
text_representation :
4
4
extension : .md
5
5
format_name : myst
6
+ format_version : 0.13
7
+ jupytext_version : 1.14.5
6
8
kernelspec :
7
- display_name : Python 3
9
+ display_name : Python 3 (ipykernel)
8
10
language : python
9
11
name : python3
10
12
---
@@ -29,10 +31,9 @@ kernelspec:
29
31
30
32
In addition to what's in Anaconda, this lecture will need the following libraries:
31
33
32
- ``` {code-cell} ipython
33
- ---
34
- tags: [hide-output]
35
- ---
34
+ ``` {code-cell} ipython3
35
+ :tags: [hide-output]
36
+
36
37
!pip install --upgrade quantecon
37
38
```
38
39
@@ -79,7 +80,7 @@ In reading this lecture, please don't think that our decision-maker is paranoid
79
80
80
81
Let's start with some imports:
81
82
82
- ``` {code-cell} ipython
83
+ ``` {code-cell} ipython3
83
84
import pandas as pd
84
85
import numpy as np
85
86
from scipy.linalg import eig
@@ -941,7 +942,7 @@ We compute value-entropy correspondences for two policies
941
942
942
943
The code for producing the graph shown above, with blue being for the robust policy, is as follows
943
944
944
- ``` {code-cell} python3
945
+ ``` {code-cell} ipython3
945
946
# Model parameters
946
947
947
948
a_0 = 100
@@ -987,7 +988,7 @@ def evaluate_policy(θ, F):
987
988
as well as the entropy level.
988
989
"""
989
990
990
- rlq = qe.robustlq. RBLQ(Q, R, A, B, C, β, θ)
991
+ rlq = qe.RBLQ(Q, R, A, B, C, β, θ)
991
992
K_F, P_F, d_F, O_F, o_F = rlq.evaluate_F(F)
992
993
x0 = np.array([[1.], [0.], [0.]])
993
994
value = - x0.T @ P_F @ x0 - d_F
@@ -1044,11 +1045,11 @@ def value_and_entropy(emax, F, bw, grid_size=1000):
1044
1045
1045
1046
1046
1047
# Compute the optimal rule
1047
- optimal_lq = qe.lqcontrol. LQ(Q, R, A, B, C, beta=β)
1048
+ optimal_lq = qe.LQ(Q, R, A, B, C, beta=β)
1048
1049
Po, Fo, do = optimal_lq.stationary_values()
1049
1050
1050
1051
# Compute a robust rule given θ
1051
- baseline_robust = qe.robustlq. RBLQ(Q, R, A, B, C, β, θ)
1052
+ baseline_robust = qe.RBLQ(Q, R, A, B, C, β, θ)
1052
1053
Fb, Kb, Pb = baseline_robust.robust_rule()
1053
1054
1054
1055
# Check the positive definiteness of worst-case covariance matrix to
@@ -1189,4 +1190,3 @@ latter is just $\hat P$.
1189
1190
``` {hint}
1190
1191
Use the fact that $\hat P = \mathcal B( \mathcal D( \hat P))$
1191
1192
```
1192
-
0 commit comments