diff --git a/lectures/mccall_model.md b/lectures/mccall_model.md index a3c02487e..93f074215 100644 --- a/lectures/mccall_model.md +++ b/lectures/mccall_model.md @@ -661,6 +661,56 @@ Repeat a large number of times and take the average. Plot mean unemployment duration as a function of $c$ in `c_vals`. ``` +```{solution-start} mm_ex1 +:class: dropdown +``` + +Here's one solution + +```{code-cell} python3 +cdf = np.cumsum(q_default) + +@jit(nopython=True) +def compute_stopping_time(w_bar, seed=1234): + + np.random.seed(seed) + t = 1 + while True: + # Generate a wage draw + w = w_default[qe.random.draw(cdf)] + # Stop when the draw is above the reservation wage + if w >= w_bar: + stopping_time = t + break + else: + t += 1 + return stopping_time + +@jit(nopython=True) +def compute_mean_stopping_time(w_bar, num_reps=100000): + obs = np.empty(num_reps) + for i in range(num_reps): + obs[i] = compute_stopping_time(w_bar, seed=i) + return obs.mean() + +c_vals = np.linspace(10, 40, 25) +stop_times = np.empty_like(c_vals) +for i, c in enumerate(c_vals): + mcm = McCallModel(c=c) + w_bar = compute_reservation_wage_two(mcm) + stop_times[i] = compute_mean_stopping_time(w_bar) + +fig, ax = plt.subplots() + +ax.plot(c_vals, stop_times, label="mean unemployment duration") +ax.set(xlabel="unemployment compensation", ylabel="months") +ax.legend() + +plt.show() +``` + +```{solution-end} +``` ```{exercise-start} :label: mm_ex2 @@ -722,60 +772,6 @@ Once your code is working, investigate how the reservation wage changes with $c$ ```{exercise-end} ``` -## Solutions - -```{solution-start} mm_ex1 -:class: dropdown -``` - -Here's one solution - -```{code-cell} python3 -cdf = np.cumsum(q_default) - -@jit(nopython=True) -def compute_stopping_time(w_bar, seed=1234): - - np.random.seed(seed) - t = 1 - while True: - # Generate a wage draw - w = w_default[qe.random.draw(cdf)] - # Stop when the draw is above the reservation wage - if w >= w_bar: - stopping_time = t - break - else: - t += 1 - return stopping_time - -@jit(nopython=True) -def compute_mean_stopping_time(w_bar, num_reps=100000): - obs = np.empty(num_reps) - for i in range(num_reps): - obs[i] = compute_stopping_time(w_bar, seed=i) - return obs.mean() - -c_vals = np.linspace(10, 40, 25) -stop_times = np.empty_like(c_vals) -for i, c in enumerate(c_vals): - mcm = McCallModel(c=c) - w_bar = compute_reservation_wage_two(mcm) - stop_times[i] = compute_mean_stopping_time(w_bar) - -fig, ax = plt.subplots() - -ax.plot(c_vals, stop_times, label="mean unemployment duration") -ax.set(xlabel="unemployment compensation", ylabel="months") -ax.legend() - -plt.show() -``` - -```{solution-end} -``` - - ```{solution-start} mm_ex2 :class: dropdown ``` diff --git a/lectures/newton_method.md b/lectures/newton_method.md index 3a5a19a49..880dbdee4 100644 --- a/lectures/newton_method.md +++ b/lectures/newton_method.md @@ -3,8 +3,10 @@ jupytext: text_representation: extension: .md format_name: myst + format_version: 0.13 + jupytext_version: 1.14.4 kernelspec: - display_name: Python 3 + display_name: Python 3 (ipykernel) language: python name: python3 --- @@ -26,6 +28,11 @@ kernelspec: :depth: 2 ``` +```{seealso} +**GPU:** A version of this lecture which makes use of [jax](https://jax.readthedocs.io) to run the code +on a `GPU` is [available here](https://jax.quantecon.org/newtons_method.html) +``` + ## Overview Many economic problems involve finding [fixed @@ -65,23 +72,28 @@ approximation and Newton's method. Then we apply Newton's method to multi-dimensional settings to solve market for equilibria with multiple goods. -At the end of the lecture we leverage the power of JAX and automatic -differentiation to solve a very high-dimensional equilibrium problem. +At the end of the lecture we leverage the power of automatic +differentiation in [`autograd`](https://github.com/HIPS/autograd) to solve a very high-dimensional equilibrium problem + +```{code-cell} ipython3 +:tags: [hide-output] + +!pip install autograd +``` We use the following imports in this lecture -```{code-cell} python3 -import numpy as np +```{code-cell} ipython3 import matplotlib.pyplot as plt from collections import namedtuple from scipy.optimize import root -import jax -import jax.numpy as jnp +from autograd import jacobian +# Thinly-wrapped numpy to enable automatic differentiation +import autograd.numpy as np plt.rcParams["figure.figsize"] = (10, 5.7) ``` - ## Fixed Point Computation Using Newton's Method In this section we solve the fixed point of the law of motion for capital in @@ -126,13 +138,13 @@ $$ k^* = \left(\frac{s A}{δ}\right)^{1/(1 - α)} $$ Let's store our parameters in [`namedtuple`](https://docs.python.org/3/library/collections.html#collections.namedtuple) to help us keep our code clean and concise. -```{code-cell} python3 +```{code-cell} ipython3 SolowParameters = namedtuple("SolowParameters", ('A', 's', 'α', 'δ')) ``` This function creates a suitable `namedtuple` with default parameter values. -```{code-cell} python3 +```{code-cell} ipython3 def create_solow_params(A=2.0, s=0.3, α=0.3, δ=0.4): "Creates a Solow model parameterization with default values." return SolowParameters(A=A, s=s, α=α, δ=δ) @@ -140,7 +152,7 @@ def create_solow_params(A=2.0, s=0.3, α=0.3, δ=0.4): The next two functions implement the law of motion [](motion_law) and store the true fixed point $k^*$. -```{code-cell} python3 +```{code-cell} ipython3 def g(k, params): A, s, α, δ = params return A * s * k**α + (1 - δ) * k @@ -149,9 +161,10 @@ def exact_fixed_point(params): A, s, α, δ = params return ((s * A) / δ)**(1/(1 - α)) ``` + Here is a function to provide a 45 degree plot of the dynamics. -```{code-cell} python3 +```{code-cell} ipython3 def plot_45(params, ax, fontsize=14): k_min, k_max = 0.0, 3.0 @@ -184,14 +197,14 @@ def plot_45(params, ax, fontsize=14): Let's look at the 45 degree diagram for two parameterizations. -```{code-cell} python3 +```{code-cell} ipython3 params = create_solow_params() fig, ax = plt.subplots(figsize=(8, 8)) plot_45(params, ax) plt.show() ``` -```{code-cell} python3 +```{code-cell} ipython3 params = create_solow_params(α=0.05, δ=0.5) fig, ax = plt.subplots(figsize=(8, 8)) plot_45(params, ax) @@ -210,8 +223,7 @@ from some initial state $k_0$ using the law of motion. Here's a time series from a particular choice of $k_0$. - -```{code-cell} python3 +```{code-cell} ipython3 def compute_iterates(k_0, f, params, n=25): "Compute time series of length n generated by arbitrary function f." k = k_0 @@ -222,7 +234,7 @@ def compute_iterates(k_0, f, params, n=25): return k_iterates ``` -```{code-cell} python3 +```{code-cell} ipython3 params = create_solow_params() k_0 = 0.25 k_series = compute_iterates(k_0, g, params) @@ -237,7 +249,7 @@ plt.show() Let's see the output for a long time series. -```{code-cell} python3 +```{code-cell} ipython3 k_series = compute_iterates(k_0, g, params, n=10_000) k_star_approx = k_series[-1] k_star_approx @@ -246,11 +258,11 @@ k_star_approx This is close to the true value. (solved_k)= -```{code-cell} python3 + +```{code-cell} ipython3 k_star ``` - #### Newton's Method In general, when applying Newton's fixed point method to some function $g$, @@ -295,7 +307,7 @@ g'(k) = \alpha s A k^{\alpha-1} + (1-\delta) Let's define this: -```{code-cell} python3 +```{code-cell} ipython3 def Dg(k, params): A, s, α, δ = params return α * A * s * k**(α-1) + (1 - δ) @@ -303,14 +315,14 @@ def Dg(k, params): Here's a function $q$ representing [](newtons_method). -```{code-cell} python3 +```{code-cell} ipython3 def q(k, params): return (g(k, params) - Dg(k, params) * k) / (1 - Dg(k, params)) ``` Now let's plot some trajectories. -```{code-cell} python3 +```{code-cell} ipython3 def plot_trajectories(params, k0_a=0.8, # first initial condition k0_b=3.1, # second initial condition @@ -343,7 +355,7 @@ def plot_trajectories(params, plt.show() ``` -```{code-cell} python3 +```{code-cell} ipython3 params = create_solow_params() plot_trajectories(params) ``` @@ -396,7 +408,8 @@ x_{t+1} = x_t - \frac{ f(x_t) }{ f'(x_t) }, The following code implements the iteration [](oneD-newton) (first_newton_attempt)= -```{code-cell} python3 + +```{code-cell} ipython3 def newton(f, Df, x_0, tol=1e-7, max_iter=100_000): x = x_0 @@ -436,14 +449,14 @@ Any zero of $f$ is clearly a fixed point of $g$. Let's apply this idea to the Solow problem -```{code-cell} python3 +```{code-cell} ipython3 params = create_solow_params() k_star_approx_newton = newton(f=lambda x: g(x, params) - x, Df=lambda x: Dg(x, params) - 1, x_0=0.8) ``` -```{code-cell} python3 +```{code-cell} ipython3 k_star_approx_newton ``` @@ -534,12 +547,11 @@ $$ The function below calculates the excess demand for given parameters -```{code-cell} python3 +```{code-cell} ipython3 def e(p, A, b, c): return np.exp(- A @ p) + c - b * np.sqrt(p) ``` - Our default parameter values will be @@ -560,8 +572,7 @@ A = \begin{pmatrix} \end{pmatrix} $$ - -```{code-cell} python3 +```{code-cell} ipython3 A = np.array([ [0.5, 0.4], [0.8, 0.2] @@ -570,23 +581,20 @@ b = np.ones(2) c = np.ones(2) ``` -At a price level of $p = (1, 0.5)$, the excess demand is +At a price level of $p = (1, 0.5)$, the excess demand is - -```{code-cell} python3 +```{code-cell} ipython3 ex_demand = e((1.0, 0.5), A, b, c) print(f'The excess demand for good 0 is {ex_demand[0]:.3f} \n' f'The excess demand for good 1 is {ex_demand[1]:.3f}') ``` - Next we plot the two functions $e_0$ and $e_1$ on a grid of $(p_0, p_1)$ values, using contour surfaces and lines. We will use the following function to build the contour plots -```{code-cell} python3 - +```{code-cell} ipython3 def plot_excess_demand(ax, good=0, grid_size=100, grid_max=4, surface=True): # Create a 100x100 grid @@ -609,14 +617,16 @@ def plot_excess_demand(ax, good=0, grid_size=100, grid_max=4, surface=True): ``` Here's our plot of $e_0$: -```{code-cell} python3 + +```{code-cell} ipython3 fig, ax = plt.subplots() plot_excess_demand(ax, good=0) plt.show() ``` Here's our plot of $e_1$: -```{code-cell} python3 + +```{code-cell} ipython3 fig, ax = plt.subplots() plot_excess_demand(ax, good=1) plt.show() @@ -628,8 +638,7 @@ For a price vector $p$ such that $e_i(p)=0$ we know that good $i$ is in equilibr If these two contour lines cross at some price vector $p^*$, then $p^*$ is an equilibrium price vector. - -```{code-cell} python3 +```{code-cell} ipython3 fig, ax = plt.subplots(figsize=(10, 5.7)) for good in (0, 1): plot_excess_demand(ax, good=good, surface=False) @@ -645,27 +654,27 @@ To solve for $p^*$ more precisely, we use a zero-finding algorithm from `scipy.o We supply $p = (1, 1)$ as our initial guess. -```{code-cell} python3 +```{code-cell} ipython3 init_p = np.ones(2) ``` This uses the [modified Powell method](https://docs.scipy.org/doc/scipy/reference/optimize.root-hybr.html#optimize-root-hybr) to find the zero -```{code-cell} python3 +```{code-cell} ipython3 %%time solution = root(lambda p: e(p, A, b, c), init_p, method='hybr') ``` Here's the resulting value: -```{code-cell} python3 +```{code-cell} ipython3 p = solution.x p ``` This looks close to our guess from observing the figure. We can plug it back into $e$ to test that $e(p) \approx 0$: -```{code-cell} python3 +```{code-cell} ipython3 np.max(np.abs(e(p, A, b, c))) ``` @@ -686,8 +695,8 @@ J(p) = \end{pmatrix} $$ -```{code-cell} python3 -def jacobian(p, A, b, c): +```{code-cell} ipython3 +def jacobian_e(p, A, b, c): p_0, p_1 = p a_00, a_01 = A[0, :] a_10, a_11 = A[1, :] @@ -700,22 +709,21 @@ def jacobian(p, A, b, c): return np.array(J) ``` -```{code-cell} python3 +```{code-cell} ipython3 %%time solution = root(lambda p: e(p, A, b, c), init_p, - jac=lambda p: jacobian(p, A, b, c), + jac=lambda p: jacobian_e(p, A, b, c), method='hybr') ``` Now the solution is even more accurate (although, in this low-dimensional problem, the difference is quite small): -```{code-cell} python3 +```{code-cell} ipython3 p = solution.x np.max(np.abs(e(p, A, b, c))) ``` - #### Using Newton's Method Now let's use Newton's method to compute the equilibrium price using the multivariate version of Newton's method @@ -732,14 +740,14 @@ This is a multivariate version of [](oneD-newton) The iteration starts from some initial guess of the price vector $p_0$. -Here, instead of coding Jacobian by hand, We use the `jax.jacobian()` function to auto-differentiate and calculate the Jacobian. +Here, instead of coding Jacobian by hand, We use the `jacobian()` function in the `autograd` library to auto-differentiate and calculate the Jacobian. With only slight modification, we can generalize [our previous attempt](first_newton_attempt) to multi-dimensional problems -```{code-cell} python3 +```{code-cell} ipython3 def newton(f, x_0, tol=1e-5, max_iter=10): x = x_0 - q = jax.jit(lambda x: x - jnp.linalg.solve(jax.jacobian(f)(x), f(x))) + q = lambda x: x - np.linalg.solve(jacobian(f)(x), f(x)) error = tol + 1 n = 0 while error > tol: @@ -747,28 +755,28 @@ def newton(f, x_0, tol=1e-5, max_iter=10): if(n > max_iter): raise Exception('Max iteration reached without convergence') y = q(x) - if(any(jnp.isnan(y))): + if(any(np.isnan(y))): raise Exception('Solution not found with NaN generated') - error = jnp.linalg.norm(x - y) + error = np.linalg.norm(x - y) x = y print(f'iteration {n}, error = {error:.5f}') print('\n' + f'Result = {x} \n') return x ``` -```{code-cell} python3 +```{code-cell} ipython3 def e(p, A, b, c): - return jnp.exp(- jnp.dot(A, p)) + c - b * jnp.sqrt(p) + return np.exp(- np.dot(A, p)) + c - b * np.sqrt(p) ``` We find the algorithm terminates in 4 steps -```{code-cell} python3 +```{code-cell} ipython3 %%time -p = newton(lambda p: e(p, A, b, c), init_p).block_until_ready() +p = newton(lambda p: e(p, A, b, c), init_p) ``` -```{code-cell} python3 +```{code-cell} ipython3 np.max(np.abs(e(p, A, b, c))) ``` @@ -776,81 +784,62 @@ The result is very accurate. With the larger overhead, the speed is not better than the optimized `scipy` function. -However, things will change when we move to higher dimensional problems. - - ### A High-Dimensional Problem -Our next step is to investigate a large market with 5,000 goods. +Our next step is to investigate a large market with 3,000 goods. -To handle this large problem we will use Google JAX. +A JAX version of this section using GPU accelerated linear algebra and +automatic differentiation is available [here](https://jax.quantecon.org/newtons_method.html#application) -The excess demand function is essentially the same, but now the matrix $A$ is $5000 \times 5000$ and the parameter vectors $b$ and $c$ are $5000 \times 1$. +The excess demand function is essentially the same, but now the matrix $A$ is $3000 \times 3000$ and the parameter vectors $b$ and $c$ are $3000 \times 1$. - -```{code-cell} python3 -dim = 5_000 +```{code-cell} ipython3 +dim = 3000 np.random.seed(123) # Create a random matrix A and normalize the rows to sum to one A = np.random.rand(dim, dim) -A = jnp.asarray(A) -s = jnp.sum(A, axis=0) +A = np.asarray(A) +s = np.sum(A, axis=0) A = A / s # Set up b and c -b = jnp.ones(dim) -c = jnp.ones(dim) -``` - -Here is essentially the same demand function we applied before, but now using `jax.numpy` for the calculations. - -```{code-cell} python3 -def e(p, A, b, c): - return jnp.exp(- jnp.dot(A, p)) + c - b * jnp.sqrt(p) +b = np.ones(dim) +c = np.ones(dim) ``` Here's our initial condition -```{code-cell} python3 -init_p = jnp.ones(dim) +```{code-cell} ipython3 +init_p = np.ones(dim) ``` -By leveraging the power of Newton's method, JAX accelerated linear algebra, -automatic differentiation, and a GPU, we obtain a relatively small error for -this very large problem in just a few seconds: - -```{code-cell} python3 +```{code-cell} ipython3 %%time -p = newton(lambda p: e(p, A, b, c), init_p).block_until_ready() +p = newton(lambda p: e(p, A, b, c), init_p) ``` -```{code-cell} python3 +```{code-cell} ipython3 np.max(np.abs(e(p, A, b, c))) ``` -With the same tolerance, SciPy's `root` function takes much longer to run, -even with the Jacobian supplied. - +With the same tolerance, we compare the runtime and accuracy of Newton's method to SciPy's `root` function -```{code-cell} python3 +```{code-cell} ipython3 %%time solution = root(lambda p: e(p, A, b, c), init_p, - jac=lambda p: jax.jacobian(e)(p, A, b, c), + jac=lambda p: jacobian(e)(p, A, b, c), method='hybr', tol=1e-5) ``` -```{code-cell} python3 +```{code-cell} ipython3 p = solution.x np.max(np.abs(e(p, A, b, c))) ``` -The result is also less accurate. - - ## Exercises @@ -924,38 +913,37 @@ The result should converge to the [analytical solution](solved_k). Let's first define the parameters for this problem -```{code-cell} python3 -A = jnp.array([[2.0, 3.0, 3.0], - [2.0, 4.0, 2.0], - [1.0, 5.0, 1.0]]) +```{code-cell} ipython3 +A = np.array([[2.0, 3.0, 3.0], + [2.0, 4.0, 2.0], + [1.0, 5.0, 1.0]]) s = 0.2 α = 0.5 δ = 0.8 -initLs = [jnp.ones(3), - jnp.array([3.0, 5.0, 5.0]), - jnp.repeat(50.0, 3)] +initLs = [np.ones(3), + np.array([3.0, 5.0, 5.0]), + np.repeat(50.0, 3)] ``` Then define the multivariate version of the formula for the [law of motion of captial](motion_law) -```{code-cell} python3 +```{code-cell} ipython3 def multivariate_solow(k, A=A, s=s, α=α, δ=δ): - return (s * jnp.dot(A, k**α) + (1 - δ) * k) + return (s * np.dot(A, k**α) + (1 - δ) * k) ``` Let's run through each starting value and see the output -```{code-cell} python3 +```{code-cell} ipython3 attempt = 1 for init in initLs: print(f'Attempt {attempt}: Starting value is {init} \n') - %time k = newton(lambda k: multivariate_solow(k) - k, \ - init).block_until_ready() + init) print('-'*64) - attempt +=1 + attempt += 1 ``` We find that the results are invariant to the starting values given the well-defined property of this question. @@ -964,7 +952,7 @@ But the number of iterations it takes to converge is dependent on the starting v Let substitute the output back to the formulate to check our last result -```{code-cell} python3 +```{code-cell} ipython3 multivariate_solow(k) - k ``` @@ -972,8 +960,8 @@ Note the error is very small. We can also test our results on the known solution -```{code-cell} python3 -A = jnp.array([[2.0, 0.0, 0.0], +```{code-cell} ipython3 +A = np.array([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0], [0.0, 0.0, 2.0]]) @@ -981,15 +969,16 @@ s = 0.3 α = 0.3 δ = 0.4 -init = jnp.repeat(1.0, 3) +init = np.repeat(1.0, 3) %time k = newton(lambda k: multivariate_solow(k, A=A, s=s, α=α, δ=δ) - k, \ - init).block_until_ready() + init) ``` The result is very close to the ground truth but still slightly different. + We can increase the precision of the floating point numbers and restrict the tolerance to obtain a more accurate approximation (see detailed discussion in the [lecture on JAX](https://jax.quantecon.org/jax_intro.html)) ```{code-cell} python3 @@ -1001,8 +990,8 @@ init = init.astype('float64') %time k = newton(lambda k: multivariate_solow(k, A=A, s=s, α=α, δ=δ) - k, \ init,\ - tol=1e-7).block_until_ready() -``` + tol=1e-7) +``` We can see it steps towards a more accurate solution. @@ -1051,14 +1040,6 @@ $$ Set the tolerance to $0.0$ for more accurate output. - -```{hint} -:class: dropdown - -Similar to [exercise 1](newton_ex1), enabling `float64` for JAX can improve the precision of our results. -``` - - ```{exercise-end} ``` @@ -1068,35 +1049,37 @@ Similar to [exercise 1](newton_ex1), enabling `float64` for JAX can improve the Define parameters and initial values -```{code-cell} python3 -A = jnp.array([ +```{code-cell} ipython3 +A = np.array([ [0.2, 0.1, 0.7], [0.3, 0.2, 0.5], [0.1, 0.8, 0.1] ]) -b = jnp.array([1.0, 1.0, 1.0]) -c = jnp.array([1.0, 1.0, 1.0]) +b = np.array([1.0, 1.0, 1.0]) +c = np.array([1.0, 1.0, 1.0]) -initLs = [jnp.repeat(5.0, 3), - jnp.ones(3), - jnp.array([4.5, 0.1, 4.0])] +initLs = [np.repeat(5.0, 3), + np.ones(3), + np.array([4.5, 0.1, 4.0])] ``` Let’s run through each initial guess and check the output -```{code-cell} python3 +```{code-cell} ipython3 +--- +tags: [raises-exception] +--- + attempt = 1 for init in initLs: print(f'Attempt {attempt}: Starting value is {init} \n') - - init = init.astype('float64') - %time p = newton(lambda p: e(p, A, b, c), \ - init, \ - tol=0.0).block_until_ready() + init, \ + tol=1e-15, \ + max_iter=15) print('-'*64) - attempt +=1 + attempt += 1 ``` We can find that Newton's method may fail for some starting values. @@ -1105,7 +1088,7 @@ Sometimes it may take a few initial guesses to achieve convergence. Substitute the result back to the formula to check our result -```{code-cell} python3 +```{code-cell} ipython3 e(p, A, b, c) ```