@@ -3,8 +3,10 @@ jupytext:
33  text_representation :
44    extension : .md 
55    format_name : myst 
6+     format_version : 0.13 
7+     jupytext_version : 1.16.7 
68kernelspec :
7-   display_name : Python 3 
9+   display_name : Python 3 (ipykernel)  
810  language : python 
911  name : python3 
1012--- 
@@ -29,10 +31,9 @@ kernelspec:
2931
3032In addition to what's in Anaconda, this lecture will need the following libraries:
3133
32- ``` {code-cell}  ipython
33- --- 
34- tags: [hide-output] 
35- --- 
34+ ``` {code-cell}  ipython3
35+ :tags: [hide-output] 
36+ 
3637!pip install quantecon 
3738``` 
3839
@@ -54,9 +55,8 @@ Required knowledge: Familiarity with matrix manipulations, multivariate normal d
5455
5556We'll need the following imports:
5657
57- ``` {code-cell}  ipython 
58+ ``` {code-cell}  ipython3 
5859import matplotlib.pyplot as plt 
59- plt.rcParams["figure.figsize"] = (11, 5)  #set default figure size 
6060from scipy import linalg 
6161import numpy as np 
6262import matplotlib.cm as cm 
@@ -122,10 +122,9 @@ $2 \times 2$ covariance matrix.  In our simulations, we will suppose that
122122
123123This density $p(x)$ is shown below as a contour map, with the center of the red ellipse being equal to $\hat x$.
124124
125- ``` {code-cell}  python3
126- --- 
127- tags: [output_scroll] 
128- --- 
125+ ``` {code-cell}  ipython3
126+ :tags: [output_scroll] 
127+ 
129128# Set up the Gaussian prior density p 
130129Σ = [[0.4, 0.3], [0.3, 0.45]] 
131130Σ = np.matrix(Σ) 
@@ -186,7 +185,7 @@ def bivariate_normal(x, y, σ_x=1.0, σ_y=1.0, μ_x=0.0, μ_y=0.0, σ_xy=0.0):
186185
187186def gen_gaussian_plot_vals(μ, C): 
188187    "Z values for plotting the bivariate Gaussian N(μ, C)" 
189-     m_x, m_y = float(μ[0]), float(μ[1]) 
188+     m_x, m_y = float(μ[0,0 ]), float(μ[1,0 ]) 
190189    s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1]) 
191190    s_xy = C[0, 1] 
192191    return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy) 
@@ -213,15 +212,15 @@ The good news is that the missile has been located by our sensors, which report
213212The next figure shows the original prior $p(x)$ and the new reported
214213location $y$
215214
216- ``` {code-cell}  python3 
215+ ``` {code-cell}  ipython3 
217216fig, ax = plt.subplots(figsize=(10, 8)) 
218217ax.grid() 
219218
220219Z = gen_gaussian_plot_vals(x_hat, Σ) 
221220ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet) 
222221cs = ax.contour(X, Y, Z, 6, colors="black") 
223222ax.clabel(cs, inline=1, fontsize=10) 
224- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black") 
223+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black") 
225224
226225plt.show() 
227226``` 
@@ -284,7 +283,7 @@ This new density $p(x \,|\, y) = N(\hat x^F, \Sigma^F)$ is shown in the next fig
284283
285284The original density is left in as contour lines for comparison
286285
287- ``` {code-cell}  python3 
286+ ``` {code-cell}  ipython3 
288287fig, ax = plt.subplots(figsize=(10, 8)) 
289288ax.grid() 
290289
@@ -298,7 +297,7 @@ new_Z = gen_gaussian_plot_vals(x_hat_F, Σ_F)
298297cs2 = ax.contour(X, Y, new_Z, 6, colors="black") 
299298ax.clabel(cs2, inline=1, fontsize=10) 
300299ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet) 
301- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black") 
300+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black") 
302301
303302plt.show() 
304303``` 
391390Q = 0.3 * \Sigma 
392391$$ 
393392
394- ``` {code-cell}  python3 
393+ ``` {code-cell}  ipython3 
395394fig, ax = plt.subplots(figsize=(10, 8)) 
396395ax.grid() 
397396
@@ -415,7 +414,7 @@ new_Z = gen_gaussian_plot_vals(new_x_hat, new_Σ)
415414cs3 = ax.contour(X, Y, new_Z, 6, colors="black") 
416415ax.clabel(cs3, inline=1, fontsize=10) 
417416ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet) 
418- ax.text(float(y[0]) , float(y[1]), "$y$", fontsize=20, color="black") 
417+ ax.text(float(y[0].item()) , float(y[1].item() ), "$y$", fontsize=20, color="black") 
419418
420419plt.show() 
421420``` 
@@ -577,7 +576,7 @@ Your figure should -- modulo randomness -- look something like this
577576:class: dropdown 
578577``` 
579578
580- ``` {code-cell}  python3 
579+ ``` {code-cell}  ipython3 
581580# Parameters 
582581θ = 10  # Constant value of state x_t 
583582A, C, G, H = 1, 0, 1, 1 
@@ -598,7 +597,7 @@ xgrid = np.linspace(θ - 5, θ + 2, 200)
598597
599598for i in range(N): 
600599    # Record the current predicted mean and variance 
601-     m, v = [float(z) for z in (kalman.x_hat, kalman.Sigma)] 
600+     m, v = [float(z) for z in (kalman.x_hat.item() , kalman.Sigma.item() )] 
602601    # Plot, update filter 
603602    ax.plot(xgrid, norm.pdf(xgrid, loc=m, scale=np.sqrt(v)), label=f'$t={i}$') 
604603    kalman.update(y[i]) 
@@ -641,7 +640,7 @@ Your figure should show error erratically declining something like this
641640:class: dropdown 
642641``` 
643642
644- ``` {code-cell}  python3 
643+ ``` {code-cell}  ipython3 
645644ϵ = 0.1 
646645θ = 10  # Constant value of state x_t 
647646A, C, G, H = 1, 0, 1, 1 
@@ -657,7 +656,7 @@ y = y.flatten()
657656
658657for t in range(T): 
659658    # Record the current predicted mean and variance and plot their densities 
660-     m, v = [float(temp) for temp in (kalman.x_hat, kalman.Sigma)] 
659+     m, v = [float(temp) for temp in (kalman.x_hat.item() , kalman.Sigma.item() )] 
661660
662661    f = lambda x: norm.pdf(x, loc=m, scale=np.sqrt(v)) 
663662    integral, error = quad(f, θ - ϵ, θ + ϵ) 
@@ -745,7 +744,7 @@ Observe how, after an initial learning period, the Kalman filter performs quite
745744:class: dropdown 
746745``` 
747746
748- ``` {code-cell}  python3 
747+ ``` {code-cell}  ipython3 
749748# Define A, C, G, H 
750749G = np.identity(2) 
751750H = np.sqrt(0.5) * np.identity(2) 
0 commit comments