Skip to content
This repository was archived by the owner on Apr 24, 2020. It is now read-only.

Commit 3e00148

Browse files
update optgrowth lecture
1 parent 0c62c4a commit 3e00148

File tree

1 file changed

+110
-119
lines changed

1 file changed

+110
-119
lines changed

rst_files/optgrowth.rst

Lines changed: 110 additions & 119 deletions
Original file line numberDiff line numberDiff line change
@@ -529,6 +529,9 @@ because it comes in handy later when we want to just-in-time compile our code
529529
import numpy as np
530530
import matplotlib.pyplot as plt
531531
from interpolation import interp
532+
from numba import njit, prange
533+
from quantecon.optimize.scalar_maximization import brent_max
534+
532535
533536
def f(x):
534537
y1 = 2 * np.cos(6 * x) + np.sin(14 * x)
@@ -553,6 +556,34 @@ because it comes in handy later when we want to just-in-time compile our code
553556
554557
Another advantage of piecewise linear interpolation is that it preserves useful shape properties such as monotonicity and concavity / convexity
555558

559+
Optimal Growth Model
560+
---------------------
561+
562+
We will hold the primitives of the optimal growth model in a class
563+
564+
The distribution $ \phi $ of the shock is assumed to be lognormal,
565+
and so a draw from $ \exp(\mu + \sigma \zeta) $ when $ \zeta $ is standard normal
566+
567+
.. code-block:: python3
568+
569+
class OptimalGrowthModel:
570+
571+
def __init__(self,
572+
f,
573+
u,
574+
β=0.96,
575+
μ=0,
576+
s=0.1,
577+
grid_max=4,
578+
grid_size=200,
579+
shock_size=250):
580+
581+
self.β, self.μ, self.s = β, μ, s
582+
self.f, self.u = f, u
583+
584+
self.y_grid = np.linspace(1e-5, grid_max, grid_size) # Set up grid
585+
self.shocks = np.exp(μ + s * np.random.randn(shock_size)) # Store shocks
586+
556587
557588
The Bellman Operator
558589
-----------------------
@@ -562,32 +593,38 @@ Here's a function that generates a Bellman operator using linear interpolation
562593

563594
.. code-block:: python3
564595
565-
from numba import njit, prange
566-
from quantecon.optimize.scalar_maximization import brent_max
596+
def bellman_function_factory(og, parallel_flag=True):
567597
568-
def generate_T_operator(lg, parallel_flag=True):
569-
α, β, μ, s = lg.parameters()
570-
shocks = lg.shocks
571-
y_grid = lg.y_grid
572-
u = np.log # Utility function
573-
574-
@njit
575-
def f(k): # Production function
576-
return k**α
598+
'''og is an OptimalGrowthModel'''
599+
600+
f, u = og.f, og.u
601+
y_grid, shocks = og.y_grid, og.shocks
577602
578603
@njit
579-
def objective(c, w, y): # Bellman equation
580-
return u(c) + β * np.mean(interp(y_grid, w, f(y - c) * shocks))
604+
def objective(c, w, y):
605+
# Right hand side of Bellman equation
606+
w_func = lambda x: interp(y_grid, w, x)
607+
return u(c) + β * np.mean(w_func(f(y - c) * shocks))
581608
582609
@njit(parallel=parallel_flag)
583610
def T(w):
584611
w_new = np.empty_like(w)
585612
for i in prange(len(y_grid)):
586-
c_star = brent_max(objective, 1e-10, y_grid[i], args=(w, y_grid[i]))[0] # Solve the model
587-
w_new[i] = objective(c_star, w, y_grid[i]) # Back out optimal w
613+
y = y_grid[i]
614+
w_max = brent_max(objective, 1e-10, y, args=(w, y))[1] # Solve for optimal w at y
615+
w_new[i] = w_max
588616
return w_new
617+
618+
@njit
619+
def get_greedy(v):
620+
σ = np.empty_like(v)
621+
for i in range(len(y_grid)):
622+
y = y_grid[i]
623+
c_max = brent_max(objective, 1e-10, y, args=(v, y))[0] # Solve for optimal c at y
624+
σ[i] = c_max
625+
return σ
589626
590-
return T
627+
return T, get_greedy
591628
592629
The `generate_T_operator` function takes a class that represents the growth model,
593630
and returns a function `T` that we will use to solve the model
@@ -642,70 +679,44 @@ The optimal consumption policy is
642679
643680
\sigma^*(y) = (1 - \alpha \beta ) y
644681
645-
646-
Let's wrap this model in a class so we can generate its Bellman operator
647-
682+
We will define functions to compute the closed form solutions to check our answers
648683

649684
.. code-block:: python3
650685
651-
class LogLinearOG:
652-
"""
653-
Log linear optimal growth model, with log utility, CD production and
654-
multiplicative lognormal shock, so that
655-
656-
y = f(k, z) = z k^α
657-
658-
with z ~ LN(μ, s).
659-
660-
The class holds parameters and true value and policy functions.
661-
"""
662-
663-
def __init__(self,
664-
α=0.4,
665-
β=0.96,
666-
μ=0,
667-
s=0.1,
668-
grid_max=4,
669-
grid_size=200,
670-
shock_size=250):
671-
672-
self.α, self.β, self.μ, self.s = α, β, μ, s
673-
674-
# == Some useful constants == #
675-
self.ab = α * β
676-
self.c1 = np.log(1 - self.ab) / (1 - β)
677-
self.c2 = (μ + α * np.log(self.ab)) / (1 - α)
678-
self.c3 = 1 / (1 - β)
679-
self.c4 = 1 / (1 - self.ab)
680-
681-
self.y_grid = np.linspace(1e-5, grid_max, grid_size) # Set up grid
682-
self.shocks = np.exp(μ + s * np.random.randn(shock_size)) # Store shocks
686+
def σ_star(y, α, β):
687+
# True optimal policy
688+
return (1 - α * β) * y
683689
684-
def c_star(self, y): # True optimal policy
685-
return (1 - self.α * self.β) * y
686-
687-
def v_star(self, y): # True value function
688-
return self.c1 + self.c2 * (self.c3 - self.c4) + self.c4 * np.log(y)
689-
690-
def parameters(self):
691-
return self.α, self.β, self.μ, self.s
690+
def v_star(y, α, β, μ):
691+
# True value function
692+
c1 = np.log(1 - α * β) / (1 - β)
693+
c2 = (μ + α * np.log(α * β)) / (1 - α)
694+
c3 = 1 / (1 - β)
695+
c4 = 1 / (1 - α * β)
696+
return c1 + c2 * (c3 - c4) + c4 * np.log(y)
692697
693698
694699
A First Test
695700
--------------
696701

697702
To test our code, we want to see if we can replicate the analytical solution numerically, using fitted value function iteration
698703

699-
First, having run the code for the log linear model shown above, let's
704+
First, having run the code for the general model shown above, let's
700705
generate an instance of the model and generate its Bellman operator
701706

707+
We first need to define a jitted version of the production function
702708

703709
.. code-block:: python3
704710
705-
lg = LogLinearOG()
706-
v_star, y_grid = lg.v_star, lg.y_grid # Unpack parameters/functions for convenience
707-
708-
T = generate_T_operator(lg)
711+
α = 0.4 # Production function parameter
712+
713+
@njit
714+
def f(k):
715+
# Production function
716+
return k**α
717+
718+
og = OptimalGrowthModel(f=f, u=np.log)
719+
T, get_greedy = bellman_function_factory(og)
709720
710721
711722
Now let's do some tests
@@ -718,8 +729,11 @@ In practice we expect some small numerical error
718729

719730
.. code-block:: python3
720731
721-
w_init = v_star(lg.y_grid) # Start at the solution
722-
w = T(w_init) # Apply the Bellman operator once
732+
y_grid = og.y_grid
733+
β, μ = og.β, og.μ
734+
735+
w_init = v_star(y_grid, α, β, μ) # Start at the solution
736+
w = T(w_init) # Apply the Bellman operator once
723737
724738
fig, ax = plt.subplots(figsize=(9, 5))
725739
ax.set_ylim(-35, -24)
@@ -746,11 +760,12 @@ The initial condition we'll start with is :math:`w(y) = 5 \ln (y)`
746760
747761
ax.plot(y_grid, w, color=plt.cm.jet(0),
748762
lw=2, alpha=0.6, label='Initial condition')
763+
749764
for i in range(n):
750765
w = T(w) # Apply the Bellman operator
751766
ax.plot(y_grid, w, color=plt.cm.jet(i / n), lw=2, alpha=0.6)
752767
753-
ax.plot(y_grid, v_star(y_grid), 'k-', lw=2,
768+
ax.plot(y_grid, v_star(y_grid, α, β, μ), 'k-', lw=2,
754769
alpha=0.8, label='True value function')
755770
756771
ax.legend(loc='lower right')
@@ -774,15 +789,15 @@ tolerance level
774789

775790
.. code-block:: python3
776791
777-
def compute_fixed_point(lg,
792+
def compute_fixed_point(og,
778793
w_init,
779794
use_parallel=True,
780795
tol=1e-4,
781796
max_iter=1000,
782797
verbose=True,
783798
print_skip=25):
784799
785-
T = generate_T_operator(lg, parallel_flag=use_parallel)
800+
T, _ = bellman_function_factory(og, parallel_flag=use_parallel)
786801
787802
# Set up loop
788803
w = w_init
@@ -810,12 +825,16 @@ We can check our result by plotting it against the true value
810825
.. code-block:: python3
811826
812827
initial_w = 5 * np.log(y_grid)
813-
828+
v_solution = compute_fixed_point(og, initial_w)
829+
814830
fig, ax = plt.subplots(figsize=(9, 5))
815-
ax.plot(y_grid, compute_fixed_point(lg, initial_w),
816-
lw=2, alpha=0.6, label='Approximate value function')
817-
ax.plot(y_grid, v_star(y_grid), lw=2,
831+
832+
ax.plot(y_grid, v_solution, lw=2, alpha=0.6,
833+
label='Approximate value function')
834+
835+
ax.plot(y_grid, v_star(y_grid, α, β, μ), lw=2,
818836
alpha=0.6, label='True value function')
837+
819838
ax.legend(loc='lower right')
820839
ax.set_ylim(-35, -24)
821840
plt.show()
@@ -831,48 +850,22 @@ The Policy Function
831850
.. index::
832851
single: Optimal Growth; Policy Function
833852

834-
To compute an approximate optimal policy, we will need to write a new
835-
Bellman operator that returns the greedy policy function rather than the
836-
optimal wage
853+
To compute an approximate optimal policy, we will write a function
854+
that backs out the optimal policy from the optimal wage rate
837855

838856
The next figure compares the result to the exact solution, which, as mentioned
839857
above, is :math:`\sigma(y) = (1 - \alpha \beta) y`
840858

841859
.. code-block:: python3
842860
843-
def generate_Tσ_operator(lg, parallel_flag=True):
844-
α, β, μ, s = lg.parameters()
845-
shocks = lg.shocks
846-
y_grid = lg.y_grid
847-
u = np.log
848-
849-
@njit
850-
def f(k):
851-
return k**α
852-
853-
@njit
854-
def objective(c, w, y):
855-
return u(c) + β * np.mean(interp(y_grid, w, f(y - c) * shocks))
856-
857-
@njit(parallel=parallel_flag)
858-
def T(w):
859-
c_new = np.empty_like(w)
860-
for i in prange(len(y_grid)):
861-
c_star = brent_max(objective, 1e-10, y_grid[i], args=(w, y_grid[i]))[0] # Solve the model
862-
c_new[i] = c_star
863-
return c_new
864-
865-
return T
866-
867-
Tσ = generate_Tσ_operator(lg)
868-
w_init = v_star(lg.y_grid) # Start at the solution
869-
σ = Tσ(w_init) # Apply the Bellman operator once
870-
871-
872861
fig, ax = plt.subplots(figsize=(9, 5))
873-
ax.plot(y_grid, σ, lw=2, alpha=0.6, label='Approximate policy function')
874-
cstar = (1 - lg.α * lg.β) * y_grid
875-
ax.plot(y_grid, cstar, lw=2, alpha=0.6, label='True policy function')
862+
863+
ax.plot(y_grid, get_greedy(v_solution), lw=2,
864+
alpha=0.6, label='Approximate policy function')
865+
866+
ax.plot(y_grid, σ_star(y_grid, α, β),
867+
lw=2, alpha=0.6, label='True policy function')
868+
876869
ax.legend(loc='lower right')
877870
plt.show()
878871
@@ -922,15 +915,15 @@ Here's one solution (assuming as usual that you've executed everything above)
922915

923916
.. code-block:: python3
924917
925-
def simulate_og(σ, lg, y0=0.1, ts_length=100):
918+
def simulate_og(σ_func, og, α, y0=0.1, ts_length=100):
926919
'''
927920
Compute a time series given consumption policy σ.
928921
'''
929922
y = np.empty(ts_length)
930923
ξ = np.random.randn(ts_length-1)
931924
y[0] = y0
932925
for t in range(ts_length-1):
933-
y[t+1] = (y[t] - σ(y[t]))**lg.α * np.exp(lg.μ + lg.s * ξ[t])
926+
y[t+1] = (y[t] - σ_func(y[t]))**α * np.exp(og.μ + og.s * ξ[t])
934927
return y
935928
936929
.. code-block:: python3
@@ -939,17 +932,15 @@ Here's one solution (assuming as usual that you've executed everything above)
939932
940933
for β in (0.8, 0.9, 0.98):
941934
942-
lg = LogLinearOG(β=β, s=0.05)
943-
v_star, y_grid = lg.v_star, lg.y_grid # Unpack parameters/functions for convenience
944-
T = generate_T_operator(lg)
935+
og = OptimalGrowthModel(f, np.log, β=β, s=0.05)
936+
y_grid = og.y_grid
945937
946938
initial_w = 5 * np.log(y_grid)
947-
v_star_approx = compute_fixed_point(lg, initial_w, verbose=False) # Solve the model
948-
949-
σ = Tσ(v_star_approx) # Find optimal policy
950-
951-
σ_func = lambda x: interp(y_grid, σ, x) # Define an optimal policy function
952-
y = simulate_og(σ_func, lg) # Simulate time series
939+
v_solution = compute_fixed_point(og, initial_w, verbose=False)
940+
941+
σ_star = get_greedy(v_solution)
942+
σ_func = lambda x: interp(y_grid, σ_star, x) # Define an optimal policy function
943+
y = simulate_og(σ_func, og, α)
953944
ax.plot(y, lw=2, alpha=0.6, label=rf'$\beta = {β}$')
954945
955946
ax.legend(loc='lower right')

0 commit comments

Comments
 (0)