From 936e8fd084230196fd75809327ac8c0f4af93040 Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Thu, 18 Sep 2025 16:58:17 +0900 Subject: [PATCH 1/6] Update kalman_2.md --- lectures/kalman_2.md | 110 ++++++++++++++++++++++++++++--------------- 1 file changed, 71 insertions(+), 39 deletions(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index c1049dc36..13eaa415b 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -30,7 +30,7 @@ kernelspec: ``` In this quantecon lecture {doc}`A First Look at the Kalman filter `, we used -a Kalman filter to estimate locations of a rocket. +a Kalman filter to estimate locations of a rocket. In this lecture, we'll use the Kalman filter to infer a worker's human capital and the effort that the worker devotes to accumulating @@ -38,7 +38,7 @@ human capital, neither of which the firm observes directly. The firm learns about those things only by observing a history of the output that the worker generates for the firm, and from understanding how that output depends on the worker's human capital and how human capital evolves as a function of the worker's effort. -We'll posit a rule that expresses how the much firm pays the worker each period as a function of the firm's information each period. +We'll posit a rule that expresses how much the firm pays the worker each period as a function of the firm's information each period. In addition to what's in Anaconda, this lecture will need the following libraries: @@ -53,8 +53,11 @@ To conduct simulations, we bring in these imports, as in {doc}`A First Look at t ```{code-cell} ipython3 import matplotlib.pyplot as plt import numpy as np +import jax +import jax.numpy as jnp from quantecon import Kalman, LinearStateSpace from collections import namedtuple +from typing import NamedTuple from scipy.stats import multivariate_normal import matplotlib as mpl mpl.rcParams['text.usetex'] = True @@ -71,7 +74,7 @@ The workers' output is described by the following dynamic process: :label: worker_model \begin{aligned} -h_{t+1} &= \alpha h_t + \beta u_t + c w_{t+1}, \quad c_{t+1} \sim {\mathcal N}(0,1) \\ +h_{t+1} &= \alpha h_t + \beta u_t + c w_{t+1}, \quad w_{t+1} \sim {\mathcal N}(0,1) \\ u_{t+1} & = u_t \\ y_t & = g h_t + v_t , \quad v_t \sim {\mathcal N} (0, R) \end{aligned} @@ -85,7 +88,7 @@ Here * $h_0 \sim {\mathcal N}(\hat h_0, \sigma_{h,0})$ * $u_0 \sim {\mathcal N}(\hat u_0, \sigma_{u,0})$ -Parameters of the model are $\alpha, \beta, c, R, g, \hat h_0, \hat u_0, \sigma_h, \sigma_u$. +Parameters of the model are $\alpha, \beta, c, R, g, \hat h_0, \hat u_0, \sigma_{h,0}, \sigma_{u,0}$. At time $0$, a firm has hired the worker. @@ -129,8 +132,17 @@ Write system [](worker_model) in the state-space form ```{math} \begin{aligned} -\begin{bmatrix} h_{t+1} \cr u_{t+1} \end{bmatrix} &= \begin{bmatrix} \alpha & \beta \cr 0 & 1 \end{bmatrix}\begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} + \begin{bmatrix} c \cr 0 \end{bmatrix} w_{t+1} \cr -y_t & = \begin{bmatrix} g & 0 \end{bmatrix} \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} + v_t + \begin{bmatrix} h_{t+1} \cr u_{t+1} \end{bmatrix} + &= + \begin{bmatrix} \alpha & \beta \cr 0 & 1 \end{bmatrix} + \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} + + + \begin{bmatrix} c \cr 0 \end{bmatrix} + w_{t+1} \cr + y_t & = + \begin{bmatrix} g & 0 \end{bmatrix} + \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} + + v_t \end{aligned} ``` @@ -151,67 +163,87 @@ where x_t = \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} , \quad \hat x_0 = \begin{bmatrix} \hat h_0 \cr \hat u_0 \end{bmatrix} , \quad \Sigma_0 = \begin{bmatrix} \sigma_{h,0} & 0 \cr - 0 & \sigma_{u,0} \end{bmatrix} + 0 & \sigma_{u,0} \end{bmatrix} ``` -To compute the firm's wage setting policy, we first we create a `namedtuple` to store the parameters of the model +To compute the firm's wage setting policy, we first we create a `NamedTuple` to store the parameters of the model ```{code-cell} ipython3 -WorkerModel = namedtuple("WorkerModel", - ('A', 'C', 'G', 'R', 'xhat_0', 'Σ_0')) +class WorkerModel(NamedTuple): + """ + Parameters for the worker model state-space representation. + + A : jax.Array + Transition matrix of state variables + C : jax.Array + Parameter of the state shock + G : jax.Array + Parameter of state variables in observation equation + R : jax.Array + Coefficient of observation shock + xhat_0 : jax.Array + Initial state estimate + Σ_0 : jax.Array + Initial covariance matrix + """ + A: jax.Array + C: jax.Array + G: jax.Array + R: jax.Array + xhat_0: jax.Array + Σ_0: jax.Array def create_worker(α=.8, β=.2, c=.2, R=.5, g=1.0, hhat_0=4, uhat_0=4, σ_h=4, σ_u=4): - A = np.array([[α, β], + A = jnp.array([[α, β], [0, 1]]) - C = np.array([[c], + C = jnp.array([[c], [0]]) - G = np.array([g, 1]) + G = jnp.array([g, 0]) + R = jnp.array(R) # Define initial state and covariance matrix - xhat_0 = np.array([[hhat_0], + xhat_0 = jnp.array([[hhat_0], [uhat_0]]) - Σ_0 = np.array([[σ_h, 0], + Σ_0 = jnp.array([[σ_h, 0], [0, σ_u]]) return WorkerModel(A=A, C=C, G=G, R=R, xhat_0=xhat_0, Σ_0=Σ_0) ``` -Please note how the `WorkerModel` namedtuple creates all of the objects required to compute an associated -state-space representation {eq}`ssrepresent`. +Please note how the `WorkerModel` namedtuple creates all of the objects required to compute an associated state-space representation {eq}`ssrepresent`. -This is handy, because in order to simulate a history $\{y_t, h_t\}$ for a worker, we'll want to form - state space system for him/her by using the [`LinearStateSpace`](https://quanteconpy.readthedocs.io/en/latest/tools/lss.html) class. +This is handy, because in order to simulate a history $\{y_t, h_t\}$ for a worker, we'll want to form state space system for him/her by using the [`LinearStateSpace`](https://quanteconpy.readthedocs.io/en/latest/tools/lss.html) class. ```{code-cell} ipython3 +# TODO write it into a function # Define A, C, G, R, xhat_0, Σ_0 worker = create_worker() A, C, G, R = worker.A, worker.C, worker.G, worker.R xhat_0, Σ_0 = worker.xhat_0, worker.Σ_0 # Create a LinearStateSpace object -ss = LinearStateSpace(A, C, G, np.sqrt(R), - mu_0=xhat_0, Sigma_0=np.zeros((2,2))) +ss = LinearStateSpace(A, C, G, jnp.sqrt(R), + mu_0=xhat_0, Sigma_0=Σ_0) T = 100 -x, y = ss.simulate(T) +seed = 1234 +x, y = ss.simulate(T, seed) y = y.flatten() h_0, u_0 = x[0, 0], x[1, 0] ``` -Next, to compute the firm's policy for setting the log wage based on the information it has about the worker, -we use the Kalman filter described in this quantecon lecture {doc}`A First Look at the Kalman filter `. +Next, to compute the firm's policy for setting the log wage based on the information it has about the worker, we use the Kalman filter described in this quantecon lecture {doc}`A First Look at the Kalman filter `. In particular, we want to compute all of the objects in an "innovation representation". ## An Innovations Representation -We have all the objects in hand required to form an innovations representation for the output -process $\{y_t\}_{t=0}^T$ for a worker. +We have all the objects in hand required to form an innovations representation for the output process $\{y_t\}_{t=0}^T$ for a worker. Let's code that up now. @@ -227,30 +259,30 @@ where $K_t$ is the Kalman gain matrix at time $t$. We accomplish this in the following code that uses the [`Kalman`](https://quanteconpy.readthedocs.io/en/latest/tools/kalman.html) class. ```{code-cell} ipython3 +# TODO check the names of variables kalman = Kalman(ss, xhat_0, Σ_0) -Σ_t = np.zeros((*Σ_0.shape, T-1)) -y_hat_t = np.zeros(T-1) -x_hat_t = np.zeros((2, T-1)) +Σ_t = jnp.zeros((*Σ_0.shape, T-1)) +y_hat_t = jnp.zeros(T-1) +x_hat_t = jnp.zeros((2, T-1)) for t in range(1, T): kalman.update(y[t]) x_hat, Σ = kalman.x_hat, kalman.Sigma - Σ_t[:, :, t-1] = Σ - x_hat_t[:, t-1] = x_hat.reshape(-1) - [y_hat_t[t-1]] = worker.G @ x_hat + Σ_t = Σ_t.at[:, :, t-1].set(Σ) + x_hat_t = x_hat_t.at[:, t-1].set(x_hat.reshape(-1)) + y_hat_t = y_hat_t.at[t-1].set((worker.G @ x_hat).item()) -x_hat_t = np.concatenate((x[:, 1][:, np.newaxis], - x_hat_t), axis=1) -Σ_t = np.concatenate((worker.Σ_0[:, :, np.newaxis], - Σ_t), axis=2) +# Add the initial +x_hat_t = jnp.concatenate((xhat_0, x_hat_t), axis=1) +Σ_t = jnp.concatenate((worker.Σ_0[:, :, np.newaxis], Σ_t), axis=2) u_hat_t = x_hat_t[1, :] ``` -For a draw of $h_0, u_0$, we plot $E y_t = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. +For a draw of $h_0, u_0$, we plot $E[y_t] = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. -We also plot $E [u_0 | y^{t-1}]$, which is the firm inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. +We also plot $\hat u_t = E [u_0 | y^{t-1}]$, which is the firm inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. -We can watch as the firm's inference $E [u_0 | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. +We can watch as the firm's inference $E [u_0 | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. ```{code-cell} ipython3 fig, ax = plt.subplots(1, 2) From b1808f5133e3fb5037b9be48847a5cf8be79096c Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Fri, 10 Oct 2025 17:23:46 +0900 Subject: [PATCH 2/6] modified: lectures/kalman_2.md --- lectures/kalman_2.md | 286 ++++++++++++++++++++++--------------------- 1 file changed, 149 insertions(+), 137 deletions(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index 13eaa415b..f306b95ed 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -85,10 +85,10 @@ Here * $h_t$ is the logarithm of human capital at time $t$ * $u_t$ is the logarithm of the worker's effort at accumulating human capital at $t$ * $y_t$ is the logarithm of the worker's output at time $t$ -* $h_0 \sim {\mathcal N}(\hat h_0, \sigma_{h,0})$ -* $u_0 \sim {\mathcal N}(\hat u_0, \sigma_{u,0})$ +* $h_0 \sim {\mathcal N}(\mu_{h, 0}, \sigma_{h,0})$ +* $u_0 \sim {\mathcal N}(\mu_{u, 0}, \sigma_{u,0})$ -Parameters of the model are $\alpha, \beta, c, R, g, \hat h_0, \hat u_0, \sigma_{h,0}, \sigma_{u,0}$. +Parameters of the model are $\alpha, \beta, c, R, g, \mu_{h, 0}, \mu_{u, 0}, \sigma_{h,0}, \sigma_{u,0}$. At time $0$, a firm has hired the worker. @@ -153,7 +153,7 @@ which is equivalent with \begin{aligned} x_{t+1} & = A x_t + C w_{t+1} \cr y_t & = G x_t + v_t \cr -x_0 & \sim {\mathcal N}(\hat x_0, \Sigma_0) +x_0 & \sim {\mathcal N}(\mu_0, \Sigma_0) \end{aligned} ``` @@ -161,7 +161,7 @@ where ```{math} x_t = \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} , \quad -\hat x_0 = \begin{bmatrix} \hat h_0 \cr \hat u_0 \end{bmatrix} , \quad +\mu_0 = \begin{bmatrix} \mu_{h ,0} \cr \mu_{u, 0} \end{bmatrix} , \quad \Sigma_0 = \begin{bmatrix} \sigma_{h,0} & 0 \cr 0 & \sigma_{u,0} \end{bmatrix} ``` @@ -170,48 +170,34 @@ To compute the firm's wage setting policy, we first we create a `NamedTuple` to ```{code-cell} ipython3 class WorkerModel(NamedTuple): - """ - Parameters for the worker model state-space representation. - - A : jax.Array - Transition matrix of state variables - C : jax.Array - Parameter of the state shock - G : jax.Array - Parameter of state variables in observation equation - R : jax.Array - Coefficient of observation shock - xhat_0 : jax.Array - Initial state estimate - Σ_0 : jax.Array - Initial covariance matrix - """ + A: jax.Array C: jax.Array G: jax.Array R: jax.Array - xhat_0: jax.Array + μ_0: jax.Array Σ_0: jax.Array + def create_worker(α=.8, β=.2, c=.2, - R=.5, g=1.0, hhat_0=4, uhat_0=4, + R=.5, g=1.0, μ_h=4, μ_u=4, σ_h=4, σ_u=4): A = jnp.array([[α, β], - [0, 1]]) + [0, 1]]) C = jnp.array([[c], - [0]]) + [0]]) G = jnp.array([g, 0]) R = jnp.array(R) # Define initial state and covariance matrix - xhat_0 = jnp.array([[hhat_0], - [uhat_0]]) + μ_0 = jnp.array([[μ_h], + [μ_u]]) Σ_0 = jnp.array([[σ_h, 0], - [0, σ_u]]) + [0, σ_u]]) - return WorkerModel(A=A, C=C, G=G, R=R, xhat_0=xhat_0, Σ_0=Σ_0) + return WorkerModel(A=A, C=C, G=G, R=R, μ_0=μ_0, Σ_0=Σ_0) ``` Please note how the `WorkerModel` namedtuple creates all of the objects required to compute an associated state-space representation {eq}`ssrepresent`. @@ -219,15 +205,14 @@ Please note how the `WorkerModel` namedtuple creates all of the objects required This is handy, because in order to simulate a history $\{y_t, h_t\}$ for a worker, we'll want to form state space system for him/her by using the [`LinearStateSpace`](https://quanteconpy.readthedocs.io/en/latest/tools/lss.html) class. ```{code-cell} ipython3 -# TODO write it into a function -# Define A, C, G, R, xhat_0, Σ_0 +# Define A, C, G, R, μ_0, Σ_0 worker = create_worker() A, C, G, R = worker.A, worker.C, worker.G, worker.R -xhat_0, Σ_0 = worker.xhat_0, worker.Σ_0 +μ_0, Σ_0 = worker.μ_0, worker.Σ_0 # Create a LinearStateSpace object ss = LinearStateSpace(A, C, G, jnp.sqrt(R), - mu_0=xhat_0, Sigma_0=Σ_0) + mu_0=μ_0, Sigma_0=Σ_0) T = 100 seed = 1234 @@ -258,42 +243,49 @@ where $K_t$ is the Kalman gain matrix at time $t$. We accomplish this in the following code that uses the [`Kalman`](https://quanteconpy.readthedocs.io/en/latest/tools/kalman.html) class. +Suppose the belief of firm coincides with the real distribution of $x_0$. + ```{code-cell} ipython3 -# TODO check the names of variables -kalman = Kalman(ss, xhat_0, Σ_0) -Σ_t = jnp.zeros((*Σ_0.shape, T-1)) -y_hat_t = jnp.zeros(T-1) -x_hat_t = jnp.zeros((2, T-1)) +x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 +kalman = Kalman(ss, x_hat_0, Σ_hat_0) + +x_hat = jnp.zeros((2, T)) +Σ_hat = jnp.zeros((*Σ_0.shape, T)) +# The data y[T] isn't used because we aren't making prediction about T+1 for t in range(1, T): - kalman.update(y[t]) - x_hat, Σ = kalman.x_hat, kalman.Sigma - Σ_t = Σ_t.at[:, :, t-1].set(Σ) - x_hat_t = x_hat_t.at[:, t-1].set(x_hat.reshape(-1)) - y_hat_t = y_hat_t.at[t-1].set((worker.G @ x_hat).item()) + kalman.update(y[t-1]) + x_hat_t, Σ_hat_t = kalman.x_hat, kalman.Sigma + # x_hat_t = E(x_t | y^{t-1}) + x_hat = x_hat.at[:, t].set(x_hat_t.reshape(-1)) + Σ_hat = Σ_hat.at[:, :, t].set(Σ_hat_t) + y_hat = y_hat.at[t].set((worker.G @ x_hat_t).item()) # Add the initial -x_hat_t = jnp.concatenate((xhat_0, x_hat_t), axis=1) -Σ_t = jnp.concatenate((worker.Σ_0[:, :, np.newaxis], Σ_t), axis=2) -u_hat_t = x_hat_t[1, :] +x_hat = x_hat.at[:, 0].set(x_hat_0.reshape(-1)) +Σ_hat = Σ_hat.at[:, :, 0].set(Σ_hat_0) + +# Compute other variables +y_hat = worker.G @ x_hat +u_hat = x_hat[1, :] ``` -For a draw of $h_0, u_0$, we plot $E[y_t] = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. +For a draw of $h_0, u_0$, we plot $E[y_t | y^{t-1}] = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. -We also plot $\hat u_t = E [u_0 | y^{t-1}]$, which is the firm inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. +We also plot $\hat u_t = E [u_t | y^{t-1}]$, which is the firm inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. -We can watch as the firm's inference $E [u_0 | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. +We can watch as the firm's inference $E [u_t | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. ```{code-cell} ipython3 fig, ax = plt.subplots(1, 2) -ax[0].plot(y_hat_t, label=r'$E[y_t| y^{t-1}]$') +ax[0].plot(y_hat, label=r'$E[y_t| y^{t-1}]$') ax[0].set_xlabel('Time') -ax[0].set_ylabel(r'$E[y_t]$') -ax[0].set_title(r'$E[y_t]$ over time') +ax[0].set_ylabel(r'$E[y_t | y^{t-1}]$') +ax[0].set_title(r'$E[y_t | y^{t-1}]$ over time') ax[0].legend() -ax[1].plot(u_hat_t, label=r'$E[u_t|y^{t-1}]$') +ax[1].plot(u_hat, label=r'$E[u_t|y^{t-1}]$') ax[1].axhline(y=u_0, color='grey', linestyle='dashed', label=fr'$u_0={u_0:.2f}$') ax[1].set_xlabel('Time') @@ -310,11 +302,11 @@ plt.show() Let's look at $\Sigma_0$ and $\Sigma_T$ in order to see how much the firm learns about the hidden state during the horizon we have set. ```{code-cell} ipython3 -print(Σ_t[:, :, 0]) +print(Σ_hat[:, :, 0]) ``` ```{code-cell} ipython3 -print(Σ_t[:, :, -1]) +print(Σ_hat[:, :, -1]) ``` Evidently, entries in the conditional covariance matrix become smaller over time. @@ -323,11 +315,11 @@ It is enlightening to portray how conditional covariance matrices $\Sigma_t$ e ```{code-cell} ipython3 # Create a grid of points for contour plotting -h_range = np.linspace(x_hat_t[0, :].min()-0.5*Σ_t[0, 0, 1], - x_hat_t[0, :].max()+0.5*Σ_t[0, 0, 1], 100) -u_range = np.linspace(x_hat_t[1, :].min()-0.5*Σ_t[1, 1, 1], - x_hat_t[1, :].max()+0.5*Σ_t[1, 1, 1], 100) -h, u = np.meshgrid(h_range, u_range) +h_range = jnp.linspace(x_hat[0, :].min()-0.5*Σ_hat[0, 0, 1], + x_hat[0, :].max()+0.5*Σ_hat[0, 0, 1], 100) +u_range = jnp.linspace(x_hat[1, :].min()-0.5*Σ_hat[1, 1, 1], + x_hat[1, :].max()+0.5*Σ_hat[1, 1, 1], 100) +h, u = jnp.meshgrid(h_range, u_range) # Create a figure with subplots for each time step fig, axs = plt.subplots(1, 3, figsize=(12, 7)) @@ -335,8 +327,8 @@ fig, axs = plt.subplots(1, 3, figsize=(12, 7)) # Iterate through each time step for i, t in enumerate(np.linspace(0, T-1, 3, dtype=int)): # Create a multivariate normal distribution with x_hat and Σ at time step t - mu = x_hat_t[:, t] - cov = Σ_t[:, :, t] + mu = x_hat[:, t] + cov = Σ_hat[:, :, t] mvn = multivariate_normal(mean=mu, cov=cov) # Evaluate the multivariate normal PDF on the grid @@ -370,16 +362,17 @@ Here is one way to do this. ```{code-cell} ipython3 # For example, we might want h_0 = 0 and u_0 = 4 -mu_0 = np.array([0.0, 4.0]) +ss_μ_0 = jnp.array([0.0, 4.0]) # Create a LinearStateSpace object with Sigma_0 as a matrix of zeros -ss_example = LinearStateSpace(A, C, G, np.sqrt(R), mu_0=mu_0, +ss_example = LinearStateSpace(A, C, G, np.sqrt(R), mu_0=ss_μ_0, # This line forces exact h_0=0 and u_0=4 Sigma_0=np.zeros((2, 2)) ) T = 100 -x, y = ss_example.simulate(T) +seed = 1234 +x, y = ss_example.simulate(T, seed) y = y.flatten() # Now h_0=0 and u_0=4 @@ -390,23 +383,26 @@ print('u_0 =', u_0) Another way to accomplish the same goal is to use the following code. +However, in this way we assume the underlying distribution of $x_0$ has the mean $\mu_0 = (0, 4)\top$. + ```{code-cell} ipython3 # If we want to set the initial -# h_0 = hhat_0 = 0 and u_0 = uhhat_0 = 4.0: -worker = create_worker(hhat_0=0.0, uhat_0=4.0) +# h_0 = μ_h = 0 and u_0 = μ_u = 4.0: +worker = create_worker(μ_h=0.0, μ_u=4.0) ss_example = LinearStateSpace(A, C, G, np.sqrt(R), - # This line takes h_0=hhat_0 and u_0=uhhat_0 - mu_0=worker.xhat_0, - # This line forces exact h_0=hhat_0 and u_0=uhhat_0 + # This line takes h_0=μ_h and u_0=μ_u + mu_0=worker.μ_0, + # This line forces exact h_0=μ_h and u_0=μ_u Sigma_0=np.zeros((2, 2)) ) T = 100 -x, y = ss_example.simulate(T) +seed = 1234 +x, y = ss_example.simulate(T, seed) y = y.flatten() -# Now h_0 and u_0 will be exactly hhat_0 +# Now h_0 and u_0 will be exactly μ_0 h_0, u_0 = x[0, 0], x[1, 0] print('h_0 =', h_0) print('u_0 =', u_0) @@ -415,32 +411,40 @@ print('u_0 =', u_0) For this worker, let's generate a plot like the one above. ```{code-cell} ipython3 -# First we compute the Kalman filter with initial xhat_0 and Σ_0 -kalman = Kalman(ss, xhat_0, Σ_0) -Σ_t = [] -y_hat_t = np.zeros(T-1) -u_hat_t = np.zeros(T-1) +# First we compute the Kalman filter with initial x_hat_0 and Σ_hat_0 +x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 +kalman = Kalman(ss, x_hat_0, Σ_hat_0) + +x_hat = jnp.zeros((2, T)) +Σ_hat = jnp.zeros((*Σ_0.shape, T)) # Then we iteratively update the Kalman filter class using # observation y based on the linear state model above: for t in range(1, T): - kalman.update(y[t]) - x_hat, Σ = kalman.x_hat, kalman.Sigma - Σ_t.append(Σ) - [y_hat_t[t-1]] = worker.G @ x_hat - [u_hat_t[t-1]] = x_hat[1] + kalman.update(y[t-1]) + x_hat_t, Σ_hat_t = kalman.x_hat, kalman.Sigma + + x_hat = x_hat.at[:, t].set(x_hat_t.reshape(-1)) + Σ_hat = Σ_hat.at[:, :, t].set(Σ_hat_t) + +# Add the initial +x_hat = x_hat.at[:, 0].set(x_hat_0.reshape(-1)) +Σ_hat = Σ_hat.at[:, :, 0].set(Σ_hat_0) +# Compute other variables +y_hat = worker.G @ x_hat +u_hat = x_hat[1, :] -# Generate plots for y_hat_t and u_hat_t +# Generate plots for y_hat and u_hat fig, ax = plt.subplots(1, 2) -ax[0].plot(y_hat_t, label=r'$E[y_t| y^{t-1}]$') +ax[0].plot(y_hat, label=r'$E[y_t| y^{t-1}]$') ax[0].set_xlabel('Time') -ax[0].set_ylabel(r'$E[y_t]$') -ax[0].set_title(r'$E[y_t]$ over time') +ax[0].set_ylabel(r'$E[y_t | y^{t-1}]$') +ax[0].set_title(r'$E[y_t | y^{t-1}]$ over time') ax[0].legend() -ax[1].plot(u_hat_t, label=r'$E[u_t|y^{t-1}]$') +ax[1].plot(u_hat, label=r'$E[u_t|y^{t-1}]$') ax[1].axhline(y=u_0, color='grey', linestyle='dashed', label=fr'$u_0={u_0:.2f}$') ax[1].set_xlabel('Time') @@ -460,7 +464,7 @@ Here is an example. ```{code-cell} ipython3 # We can set these parameters when creating a worker -- just like classes! hard_working_worker = create_worker(α=.4, β=.8, - hhat_0=7.0, uhat_0=100, σ_h=2.5, σ_u=3.2) + μ_h=7.0, μ_u=100, σ_h=2.5, σ_u=3.2) print(hard_working_worker) ``` @@ -474,43 +478,49 @@ This shows that the filter is gradually teaching the worker and firm about the w ```{code-cell} ipython3 :tags: [hide-input] -def simulate_workers(worker, T, ax, mu_0=None, Sigma_0=None, - diff=True, name=None, title=None): +def simulate_workers(worker, T, ax, ss_μ=None, ss_Σ=None, + diff=True, name=None, title=None, seed=1234): A, C, G, R = worker.A, worker.C, worker.G, worker.R - xhat_0, Σ_0 = worker.xhat_0, worker.Σ_0 + μ_0, Σ_0 = worker.μ_0, worker.Σ_0 - if mu_0 is None: - mu_0 = xhat_0 - if Sigma_0 is None: - Sigma_0 = worker.Σ_0 + if ss_μ is None: + ss_μ = μ_0 + if ss_Σ is None: + ss_Σ = Σ_0 - ss = LinearStateSpace(A, C, G, np.sqrt(R), - mu_0=mu_0, Sigma_0=Sigma_0) + ss = LinearStateSpace(A, C, G, jnp.sqrt(R), + mu_0=ss_μ, Sigma_0=ss_Σ) - x, y = ss.simulate(T) + x, y = ss.simulate(T, seed) y = y.flatten() u_0 = x[1, 0] # Compute Kalman filter - kalman = Kalman(ss, xhat_0, Σ_0) - Σ_t = [] - - y_hat_t = np.zeros(T) - u_hat_t = np.zeros(T) + x_hat_0, Σ_hat_0 = μ_0, Σ_0 + kalman = Kalman(ss, x_hat_0, Σ_hat_0) + Σ_hat = jnp.zeros((*Σ_0.shape, T)) + x_hat = jnp.zeros((2, T)) + + for t in range(1, T): + kalman.update(y[t-1]) + x_hat_t, Σ_hat_t = kalman.x_hat, kalman.Sigma + + x_hat = x_hat.at[:, t].set(x_hat_t.reshape(-1)) + Σ_hat = Σ_hat.at[:, :, t].set(Σ_hat_t) + + # Add the initial + x_hat = x_hat.at[:, 0].set(x_hat_0.reshape(-1)) + Σ_hat = Σ_hat.at[:, :, 0].set(Σ_hat_0) - for i in range(T): - kalman.update(y[i]) - x_hat, Σ = kalman.x_hat, kalman.Sigma - Σ_t.append(Σ) - y_hat_t[i] = (worker.G @ x_hat).item() - u_hat_t[i] = x_hat[1].item() + # Compute other variables + y_hat = G @ x_hat + u_hat = x_hat[1, :] if diff : - title = ('Difference between inferred and true work ethic over time' - if title is None else title) + title = ('Difference between inferred and true work ethic over time' if title is None else title) - ax.plot(u_hat_t - u_0, alpha=.5) + ax.plot(u_hat - u_0, alpha=.5) ax.axhline(y=0, color='grey', linestyle='dashed') ax.set_xlabel('Time') ax.set_ylabel(r'$E[u_t|y^{t-1}] - u_0$') @@ -522,7 +532,7 @@ def simulate_workers(worker, T, ax, mu_0=None, Sigma_0=None, title = ('Inferred work ethic over time' if title is None else title) - u_hat_plot = ax.plot(u_hat_t, label=label_line) + u_hat_plot = ax.plot(u_hat, label=label_line) ax.axhline(y=u_0, color=u_hat_plot[0].get_color(), linestyle='dashed', alpha=0.5) ax.set_xlabel('Time') @@ -536,8 +546,8 @@ T = 50 fig, ax = plt.subplots(figsize=(7, 7)) for i in range(num_workers): - worker = create_worker(uhat_0=4+2*i) - simulate_workers(worker, T, ax) + worker = create_worker(μ_u=4+2*i) + simulate_workers(worker, T, ax, seed=1234+i) ax.set_ylim(ymin=-2, ymax=2) plt.show() ``` @@ -548,15 +558,16 @@ plt.show() T = 50 fig, ax = plt.subplots(figsize=(7, 7)) -uhat_0s = [2, -2, 1] +μ_us = [2, -2, 1] αs = [0.2, 0.3, 0.5] βs = [0.1, 0.9, 0.3] -for i, (uhat_0, α, β) in enumerate(zip(uhat_0s, αs, βs)): - worker = create_worker(uhat_0=uhat_0, α=α, β=β) +for i, (μ_u, α, β) in enumerate(zip(μ_us, αs, βs)): + worker = create_worker(μ_u=μ_u, α=α, β=β) simulate_workers(worker, T, ax, # By setting diff=False, it will give u_t - diff=False, name=r'$u_{{{}, t}}$'.format(i)) + diff=False, name=r'$u_{{{}, t}}$'.format(i), + seed=1234+i) ax.legend(bbox_to_anchor=(1, 0.5)) plt.show() @@ -568,19 +579,20 @@ plt.show() T = 50 fig, ax = plt.subplots(figsize=(7, 7)) -# These two lines set u_0=1 and h_0=2 for all workers -mu_0 = np.array([[1], - [2]]) -Sigma_0 = np.zeros((2,2)) +# These two lines set the generated u_0=1 and h_0=2 for all workers +ss_μ = jnp.array([[1], + [2]]) +ss_Σ = jnp.zeros((2,2)) -uhat_0s = [2, -2, 1] +μ_us = [2, -2, 1] αs = [0.2, 0.3, 0.5] βs = [0.1, 0.9, 0.3] -for i, (uhat_0, α, β) in enumerate(zip(uhat_0s, αs, βs)): - worker = create_worker(uhat_0=uhat_0, α=α, β=β) - simulate_workers(worker, T, ax, mu_0=mu_0, Sigma_0=Sigma_0, - diff=False, name=r'$u_{{{}, t}}$'.format(i)) +for i, (μ_u, α, β) in enumerate(zip(μ_us, αs, βs)): + worker = create_worker(μ_u=μ_u, α=α, β=β) + simulate_workers(worker, T, ax, ss_μ=ss_μ, ss_Σ=ss_Σ, + diff=False, name=r'$u_{{{}, t}}$'.format(i), + seed=1234+i) # This controls the boundary of plots ax.set_ylim(ymin=-3, ymax=3) @@ -594,20 +606,20 @@ plt.show() T = 50 fig, ax = plt.subplots(figsize=(7, 7)) -mu_0_1 = np.array([[1], - [100]]) -mu_0_2 = np.array([[1], - [30]]) -Sigma_0 = np.zeros((2,2)) +ss_μ_1 = np.array([[1], + [100]]) +ss_μ_2 = np.array([[1], + [30]]) +ss_Σ = np.zeros((2,2)) -uhat_0s = 100 +μ_us = 100 αs = 0.5 βs = 0.3 -worker = create_worker(uhat_0=uhat_0, α=α, β=β) -simulate_workers(worker, T, ax, mu_0=mu_0_1, Sigma_0=Sigma_0, +worker = create_worker(μ_u=μ_us, α=α, β=β) +simulate_workers(worker, T, ax, ss_μ=ss_μ_1, ss_Σ=ss_Σ, diff=False, name=r'Hard-working worker') -simulate_workers(worker, T, ax, mu_0=mu_0_2, Sigma_0=Sigma_0, +simulate_workers(worker, T, ax, ss_μ=ss_μ_2, ss_Σ=ss_Σ, diff=False, title='A hard-working worker and a less hard-working worker', name=r'Normal worker') From d3912a0d6d3a9466f6265f749613fb8286ef6fd1 Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Fri, 10 Oct 2025 17:52:50 +0900 Subject: [PATCH 3/6] check grammar mistakes and typos --- lectures/kalman_2.md | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index f306b95ed..efd8b53cc 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -68,7 +68,7 @@ mpl.rcParams['text.latex.preamble'] = r'\usepackage{{amsmath}}' A representative worker is permanently employed at a firm. -The workers' output is described by the following dynamic process: +The worker's output is described by the following dynamic process: ```{math} :label: worker_model @@ -166,7 +166,7 @@ x_t = \begin{bmatrix} h_{t} \cr u_{t} \end{bmatrix} , \quad 0 & \sigma_{u,0} \end{bmatrix} ``` -To compute the firm's wage setting policy, we first we create a `NamedTuple` to store the parameters of the model +To compute the firm's wage-setting policy, we first create a `NamedTuple` to store the parameters of the model ```{code-cell} ipython3 class WorkerModel(NamedTuple): @@ -202,7 +202,7 @@ def create_worker(α=.8, β=.2, c=.2, Please note how the `WorkerModel` namedtuple creates all of the objects required to compute an associated state-space representation {eq}`ssrepresent`. -This is handy, because in order to simulate a history $\{y_t, h_t\}$ for a worker, we'll want to form state space system for him/her by using the [`LinearStateSpace`](https://quanteconpy.readthedocs.io/en/latest/tools/lss.html) class. +This is handy, because in order to simulate a history $\{y_t, h_t\}$ for a worker, we'll want to form a state space system for him/her by using the [`LinearStateSpace`](https://quanteconpy.readthedocs.io/en/latest/tools/lss.html) class. ```{code-cell} ipython3 # Define A, C, G, R, μ_0, Σ_0 @@ -241,9 +241,9 @@ y_{t} & = G \hat x_t + a_t where $K_t$ is the Kalman gain matrix at time $t$. -We accomplish this in the following code that uses the [`Kalman`](https://quanteconpy.readthedocs.io/en/latest/tools/kalman.html) class. +We accomplish this in the following code that uses the [`Kalman`](https://quanteconpy.readthedocs.io/en/latest/tools/kalman.html) class. -Suppose the belief of firm coincides with the real distribution of $x_0$. +Suppose the belief of the firm coincides with the real distribution of $x_0$. ```{code-cell} ipython3 x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 @@ -259,7 +259,6 @@ for t in range(1, T): # x_hat_t = E(x_t | y^{t-1}) x_hat = x_hat.at[:, t].set(x_hat_t.reshape(-1)) Σ_hat = Σ_hat.at[:, :, t].set(Σ_hat_t) - y_hat = y_hat.at[t].set((worker.G @ x_hat_t).item()) # Add the initial x_hat = x_hat.at[:, 0].set(x_hat_0.reshape(-1)) @@ -270,11 +269,11 @@ y_hat = worker.G @ x_hat u_hat = x_hat[1, :] ``` -For a draw of $h_0, u_0$, we plot $E[y_t | y^{t-1}] = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. +For a draw of $h_0, u_0$, we plot $E[y_t | y^{t-1}] = G \hat x_t $ where $\hat x_t = E [x_t | y^{t-1}]$. -We also plot $\hat u_t = E [u_t | y^{t-1}]$, which is the firm inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. +We also plot $\hat u_t = E [u_t | y^{t-1}]$, which is the firm's inference about a worker's hard-wired "work ethic" $u_0$, conditioned on information $y^{t-1}$ that it has about him or her coming into period $t$. -We can watch as the firm's inference $E [u_t | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. +We can watch as the firm's inference $E [u_t | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. ```{code-cell} ipython3 fig, ax = plt.subplots(1, 2) @@ -309,9 +308,9 @@ print(Σ_hat[:, :, 0]) print(Σ_hat[:, :, -1]) ``` -Evidently, entries in the conditional covariance matrix become smaller over time. +Evidently, entries in the conditional covariance matrix become smaller over time. -It is enlightening to portray how conditional covariance matrices $\Sigma_t$ evolve by plotting confidence ellipsoides around $E [x_t |y^{t-1}] $ at various $t$'s. +It is enlightening to portray how conditional covariance matrices $\Sigma_t$ evolve by plotting confidence ellipsoids around $E [x_t |y^{t-1}] $ at various $t$'s. ```{code-cell} ipython3 # Create a grid of points for contour plotting @@ -341,9 +340,9 @@ for i, t in enumerate(np.linspace(0, T-1, 3, dtype=int)): axs[i].set_xlabel(r'$h_{{{}}}$'.format(str(t+1))) axs[i].set_ylabel(r'$u_{{{}}}$'.format(str(t+1))) - cov_latex = r'$\Sigma_{{{}}}= \begin{{bmatrix}} {:.2f} & {:.2f} \\ {:.2f} & {:.2f} \end{{bmatrix}}$'.format( - t+1, cov[0, 0], cov[0, 1], cov[1, 0], cov[1, 1] - ) + cov_latex = (r'$\Sigma_{{{}}}= \begin{{bmatrix}} {:.2f} & {:.2f} \\ ' + r'{:.2f} & {:.2f} \end{{bmatrix}}$'.format( + t+1, cov[0, 0], cov[0, 1], cov[1, 0], cov[1, 1])) axs[i].text(0.33, -0.15, cov_latex, transform=axs[i].transAxes) @@ -463,7 +462,7 @@ Here is an example. ```{code-cell} ipython3 # We can set these parameters when creating a worker -- just like classes! -hard_working_worker = create_worker(α=.4, β=.8, +hard_working_worker = create_worker(α=.4, β=.8, μ_h=7.0, μ_u=100, σ_h=2.5, σ_u=3.2) print(hard_working_worker) @@ -517,8 +516,9 @@ def simulate_workers(worker, T, ax, ss_μ=None, ss_Σ=None, y_hat = G @ x_hat u_hat = x_hat[1, :] - if diff : - title = ('Difference between inferred and true work ethic over time' if title is None else title) + if diff: + title = ('Difference between inferred and true work ethic over time' + if title is None else title) ax.plot(u_hat - u_0, alpha=.5) ax.axhline(y=0, color='grey', linestyle='dashed') From 3264d736315cc18396d29086a41156373f4ffc89 Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Mon, 13 Oct 2025 15:19:27 +0900 Subject: [PATCH 4/6] add metadata cell to figures and improve styling --- lectures/kalman_2.md | 70 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 10 deletions(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index efd8b53cc..1eed311ce 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -28,7 +28,15 @@ kernelspec: ```{contents} Contents :depth: 2 ``` +```{admonition} GPU +:class: warning +This lecture is accelerated via [hardware](status:machine-details) that has access to a GPU and JAX for GPU programming. + +Free GPUs are available on Google Colab. To use this option, please click on the play icon top right, select Colab, and set the runtime environment to include a GPU. + +Alternatively, if you have your own GPU, you can follow the [instructions](https://github.com/google/jax) for installing JAX with GPU support. If you would like to install JAX running on the `cpu` only you can use `pip install jax[cpu]` +``` In this quantecon lecture {doc}`A First Look at the Kalman filter `, we used a Kalman filter to estimate locations of a rocket. @@ -226,7 +234,7 @@ Next, to compute the firm's policy for setting the log wage based on the informa In particular, we want to compute all of the objects in an "innovation representation". -## An Innovations Representation +## An Innovations representation We have all the objects in hand required to form an innovations representation for the output process $\{y_t\}_{t=0}^T$ for a worker. @@ -276,6 +284,13 @@ We also plot $\hat u_t = E [u_t | y^{t-1}]$, which is the firm's inference about We can watch as the firm's inference $E [u_t | y^{t-1}]$ of the worker's work ethic converges toward the hidden $u_0$, which is not directly observed by the firm. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Inferred variables using Kalman filter + name: fig_infer +--- fig, ax = plt.subplots(1, 2) ax[0].plot(y_hat, label=r'$E[y_t| y^{t-1}]$') @@ -296,7 +311,7 @@ fig.tight_layout() plt.show() ``` -## Some Computational Experiments +## Some computational experiments Let's look at $\Sigma_0$ and $\Sigma_T$ in order to see how much the firm learns about the hidden state during the horizon we have set. @@ -313,6 +328,13 @@ Evidently, entries in the conditional covariance matrix become smaller over time It is enlightening to portray how conditional covariance matrices $\Sigma_t$ evolve by plotting confidence ellipsoids around $E [x_t |y^{t-1}] $ at various $t$'s. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Confidence ellipsoid over updating + name: fig_ellipsoid +--- # Create a grid of points for contour plotting h_range = jnp.linspace(x_hat[0, :].min()-0.5*Σ_hat[0, 0, 1], x_hat[0, :].max()+0.5*Σ_hat[0, 0, 1], 100) @@ -410,6 +432,13 @@ print('u_0 =', u_0) For this worker, let's generate a plot like the one above. ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Inferred variables with fixing initial + name: fig_infer_fix +--- # First we compute the Kalman filter with initial x_hat_0 and Σ_hat_0 x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 kalman = Kalman(ss, x_hat_0, Σ_hat_0) @@ -517,30 +546,30 @@ def simulate_workers(worker, T, ax, ss_μ=None, ss_Σ=None, u_hat = x_hat[1, :] if diff: - title = ('Difference between inferred and true work ethic over time' - if title is None else title) - ax.plot(u_hat - u_0, alpha=.5) ax.axhline(y=0, color='grey', linestyle='dashed') ax.set_xlabel('Time') ax.set_ylabel(r'$E[u_t|y^{t-1}] - u_0$') - ax.set_title(title) else: label_line = (r'$E[u_t|y^{t-1}]$' if name is None else name) - title = ('Inferred work ethic over time' - if title is None else title) u_hat_plot = ax.plot(u_hat, label=label_line) ax.axhline(y=u_0, color=u_hat_plot[0].get_color(), linestyle='dashed', alpha=0.5) ax.set_xlabel('Time') ax.set_ylabel(r'$E[u_t|y^{t-1}]$') - ax.set_title(title) ``` ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Difference between inferred and true ethic + name: fig_diff +--- num_workers = 3 T = 50 fig, ax = plt.subplots(figsize=(7, 7)) @@ -553,6 +582,13 @@ plt.show() ``` ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Inferred work ethic over time + name: fig_ethic +--- # We can also generate plots of u_t: T = 50 @@ -574,6 +610,13 @@ plt.show() ``` ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Inferred ethic with fixed initial + name: fig_ethic_fix +--- # We can also use exact u_0=1 and h_0=2 for all workers T = 50 @@ -601,6 +644,13 @@ plt.show() ``` ```{code-cell} ipython3 +--- +mystnb: + figure: + caption: | + Inferred ethic with two fixed initial + name: fig_ethic_two +--- # We can generate a plot for only one of the workers: T = 50 @@ -629,7 +679,7 @@ ax.legend(bbox_to_anchor=(1, 0.5)) plt.show() ``` -## Future Extensions +## Future extensions We can do lots of enlightening experiments by creating new types of workers and letting the firm learn about their hidden (to the firm) states by observing just their output histories. From 76c649fe5f5596c578604e44e10fc35c7aef89af Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Mon, 13 Oct 2025 15:44:18 +0900 Subject: [PATCH 5/6] fix typos --- lectures/kalman_2.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index 1eed311ce..8f185888f 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -104,7 +104,7 @@ The worker is permanently attached to the firm and so works for the same firm a At the beginning of time $0$, the firm observes neither the worker's innate initial human capital $h_0$ nor its hard-wired permanent effort level $u_0$. -The firm believes that $u_0$ for a particular worker is drawn from a Gaussian probability distribution, and so is described by $u_0 \sim {\mathcal N}(\hat u_0, \sigma_{u,0})$. +The firm believes that $u_0$ for a particular worker is drawn from a Gaussian probability distribution, and so is described by $u_0 \sim {\mathcal N}(\mu_{u, 0}, \sigma_{u,0})$. The $h_t$ part of a worker's "type" moves over time, but the effort component of the worker's type is $u_t = u_0$. @@ -127,7 +127,7 @@ $$ and at time $0$ pays the worker a log wage equal to the unconditional mean of $y_0$: $$ -w_0 = g \hat h_0 +w_0 = g \mu_{h, 0} $$ In using this payment rule, the firm is taking into account that the worker's log output today is partly due @@ -251,10 +251,8 @@ where $K_t$ is the Kalman gain matrix at time $t$. We accomplish this in the following code that uses the [`Kalman`](https://quanteconpy.readthedocs.io/en/latest/tools/kalman.html) class. -Suppose the belief of the firm coincides with the real distribution of $x_0$. - ```{code-cell} ipython3 -x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 +x_hat_0, Σ_hat_0 = worker.μ_0, worker.Σ_0 # First guess of the firm kalman = Kalman(ss, x_hat_0, Σ_hat_0) x_hat = jnp.zeros((2, T)) From 70e74dff1f0ded83009b67ba0348b0f369815277 Mon Sep 17 00:00:00 2001 From: Kenko LI Date: Mon, 13 Oct 2025 16:06:06 +0900 Subject: [PATCH 6/6] adjust import --- lectures/kalman_2.md | 1 - 1 file changed, 1 deletion(-) diff --git a/lectures/kalman_2.md b/lectures/kalman_2.md index 8f185888f..75882ce3f 100644 --- a/lectures/kalman_2.md +++ b/lectures/kalman_2.md @@ -64,7 +64,6 @@ import numpy as np import jax import jax.numpy as jnp from quantecon import Kalman, LinearStateSpace -from collections import namedtuple from typing import NamedTuple from scipy.stats import multivariate_normal import matplotlib as mpl