Skip to content

Commit 7d5c7d1

Browse files
committed
fix kalman.md
1 parent b40262a commit 7d5c7d1

File tree

1 file changed

+34
-23
lines changed

1 file changed

+34
-23
lines changed

lectures/kalman.md

Lines changed: 34 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,10 @@ jupytext:
33
text_representation:
44
extension: .md
55
format_name: myst
6+
format_version: 0.13
7+
jupytext_version: 1.16.7
68
kernelspec:
7-
display_name: Python 3
9+
display_name: Python 3 (ipykernel)
810
language: python
911
name: python3
1012
---
@@ -29,10 +31,9 @@ kernelspec:
2931

3032
In addition to what's in Anaconda, this lecture will need the following libraries:
3133

32-
```{code-cell} ipython
33-
---
34-
tags: [hide-output]
35-
---
34+
```{code-cell} ipython3
35+
:tags: [hide-output]
36+
3637
!pip install quantecon
3738
```
3839

@@ -54,9 +55,8 @@ Required knowledge: Familiarity with matrix manipulations, multivariate normal d
5455

5556
We'll need the following imports:
5657

57-
```{code-cell} ipython
58+
```{code-cell} ipython3
5859
import matplotlib.pyplot as plt
59-
plt.rcParams["figure.figsize"] = (11, 5) #set default figure size
6060
from scipy import linalg
6161
import numpy as np
6262
import matplotlib.cm as cm
@@ -122,10 +122,13 @@ $2 \times 2$ covariance matrix. In our simulations, we will suppose that
122122

123123
This density $p(x)$ is shown below as a contour map, with the center of the red ellipse being equal to $\hat x$.
124124

125-
```{code-cell} python3
126-
---
127-
tags: [output_scroll]
128-
---
125+
```{code-cell} ipython3
126+
a
127+
```
128+
129+
```{code-cell} ipython3
130+
:tags: [output_scroll]
131+
129132
# Set up the Gaussian prior density p
130133
Σ = [[0.4, 0.3], [0.3, 0.45]]
131134
Σ = np.matrix(Σ)
@@ -186,7 +189,7 @@ def bivariate_normal(x, y, σ_x=1.0, σ_y=1.0, μ_x=0.0, μ_y=0.0, σ_xy=0.0):
186189
187190
def gen_gaussian_plot_vals(μ, C):
188191
"Z values for plotting the bivariate Gaussian N(μ, C)"
189-
m_x, m_y = float(μ[0]), float(μ[1])
192+
m_x, m_y = float(μ[0,0]), float(μ[1,0])
190193
s_x, s_y = np.sqrt(C[0, 0]), np.sqrt(C[1, 1])
191194
s_xy = C[0, 1]
192195
return bivariate_normal(X, Y, s_x, s_y, m_x, m_y, s_xy)
@@ -213,15 +216,19 @@ The good news is that the missile has been located by our sensors, which report
213216
The next figure shows the original prior $p(x)$ and the new reported
214217
location $y$
215218

216-
```{code-cell} python3
219+
```{code-cell} ipython3
220+
y[0].item()
221+
```
222+
223+
```{code-cell} ipython3
217224
fig, ax = plt.subplots(figsize=(10, 8))
218225
ax.grid()
219226
220227
Z = gen_gaussian_plot_vals(x_hat, Σ)
221228
ax.contourf(X, Y, Z, 6, alpha=0.6, cmap=cm.jet)
222229
cs = ax.contour(X, Y, Z, 6, colors="black")
223230
ax.clabel(cs, inline=1, fontsize=10)
224-
ax.text(float(y[0]), float(y[1]), "$y$", fontsize=20, color="black")
231+
ax.text(float(y[0].item()), float(y[1].item()), "$y$", fontsize=20, color="black")
225232
226233
plt.show()
227234
```
@@ -284,7 +291,7 @@ This new density $p(x \,|\, y) = N(\hat x^F, \Sigma^F)$ is shown in the next fig
284291

285292
The original density is left in as contour lines for comparison
286293

287-
```{code-cell} python3
294+
```{code-cell} ipython3
288295
fig, ax = plt.subplots(figsize=(10, 8))
289296
ax.grid()
290297
@@ -298,7 +305,7 @@ new_Z = gen_gaussian_plot_vals(x_hat_F, Σ_F)
298305
cs2 = ax.contour(X, Y, new_Z, 6, colors="black")
299306
ax.clabel(cs2, inline=1, fontsize=10)
300307
ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
301-
ax.text(float(y[0]), float(y[1]), "$y$", fontsize=20, color="black")
308+
ax.text(float(y[0].item()), float(y[1].item()), "$y$", fontsize=20, color="black")
302309
303310
plt.show()
304311
```
@@ -391,7 +398,7 @@ A
391398
Q = 0.3 * \Sigma
392399
$$
393400

394-
```{code-cell} python3
401+
```{code-cell} ipython3
395402
fig, ax = plt.subplots(figsize=(10, 8))
396403
ax.grid()
397404
@@ -415,7 +422,7 @@ new_Z = gen_gaussian_plot_vals(new_x_hat, new_Σ)
415422
cs3 = ax.contour(X, Y, new_Z, 6, colors="black")
416423
ax.clabel(cs3, inline=1, fontsize=10)
417424
ax.contourf(X, Y, new_Z, 6, alpha=0.6, cmap=cm.jet)
418-
ax.text(float(y[0]), float(y[1]), "$y$", fontsize=20, color="black")
425+
ax.text(float(y[0].item()), float(y[1].item()), "$y$", fontsize=20, color="black")
419426
420427
plt.show()
421428
```
@@ -577,7 +584,11 @@ Your figure should -- modulo randomness -- look something like this
577584
:class: dropdown
578585
```
579586

580-
```{code-cell} python3
587+
```{code-cell} ipython3
588+
kalman.x_hat
589+
```
590+
591+
```{code-cell} ipython3
581592
# Parameters
582593
θ = 10 # Constant value of state x_t
583594
A, C, G, H = 1, 0, 1, 1
@@ -598,7 +609,7 @@ xgrid = np.linspace(θ - 5, θ + 2, 200)
598609
599610
for i in range(N):
600611
# Record the current predicted mean and variance
601-
m, v = [float(z) for z in (kalman.x_hat, kalman.Sigma)]
612+
m, v = [float(z) for z in (kalman.x_hat.item(), kalman.Sigma.item())]
602613
# Plot, update filter
603614
ax.plot(xgrid, norm.pdf(xgrid, loc=m, scale=np.sqrt(v)), label=f'$t={i}$')
604615
kalman.update(y[i])
@@ -641,7 +652,7 @@ Your figure should show error erratically declining something like this
641652
:class: dropdown
642653
```
643654

644-
```{code-cell} python3
655+
```{code-cell} ipython3
645656
ϵ = 0.1
646657
θ = 10 # Constant value of state x_t
647658
A, C, G, H = 1, 0, 1, 1
@@ -657,7 +668,7 @@ y = y.flatten()
657668
658669
for t in range(T):
659670
# Record the current predicted mean and variance and plot their densities
660-
m, v = [float(temp) for temp in (kalman.x_hat, kalman.Sigma)]
671+
m, v = [float(temp) for temp in (kalman.x_hat.item(), kalman.Sigma.item())]
661672
662673
f = lambda x: norm.pdf(x, loc=m, scale=np.sqrt(v))
663674
integral, error = quad(f, θ - ϵ, θ + ϵ)
@@ -745,7 +756,7 @@ Observe how, after an initial learning period, the Kalman filter performs quite
745756
:class: dropdown
746757
```
747758

748-
```{code-cell} python3
759+
```{code-cell} ipython3
749760
# Define A, C, G, H
750761
G = np.identity(2)
751762
H = np.sqrt(0.5) * np.identity(2)

0 commit comments

Comments
 (0)