forked from TeachingReps/Stochastic-Processes
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlecture-17.tex
More file actions
executable file
·263 lines (239 loc) · 13.5 KB
/
lecture-17.tex
File metadata and controls
executable file
·263 lines (239 loc) · 13.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
% !TEX spellcheck = en_US
% !TEX spellcheck = LaTeX
\documentclass[a4paper,10pt,english]{article}
\input{header}
\title{Lecture 17 : Limiting Probabilities and Uniformization }%Time Reversibility of Discrete Time Markov Chains}
\author{}
\begin{document}
\maketitle
\section{Jump chain}
Let $\{S_n: n \in \N_0\}$ the jump times of a homogeneous CTMC $\{X(t) \in E, t \geqslant 0\}$ and the probability transition of the embedded Markov chain $\{X^J_n = X(S_n): n \in \N_0\}$ is denoted by $P_J = \{p_{ij}: i \neq j \in E\}$.
%\begin{defn}
If the embedded Markov chain is irreducible and positive recurrent then the associated stationary distribution $\alpha$
%of the embedded Markov chain
is the unique non-negative solution to
\begin{xalignat}{3}
\label{eq:StationaryDistribution}
&\alpha = \alpha P,&&%\alpha_i \geq 0 \text{ for} i \in E,&&
\sum_{i \in E}\alpha_i = 1.
\end{xalignat}
%\end{defn}
We denote the mean of inter-arrival time by
\begin{align*}
\frac{1}{\nu_i} = \E[T_{n}|X(S_{n-1}) = i].
\end{align*}
%\begin{defn}
The \textbf{first return time} to a state $i$ for a stochastic process $\{X(t), t \geqslant 0\}$ with $X(0) = i$ is
\begin{align*}
T_{ii} = \inf\{t > S_1 : X(t) = X(0) = i\}.
\end{align*}
%\end{defn}
\begin{thm}
For a homogeneous CTMC with the irreducible and positive recurrent embedded Markov chain and the stationary distribution $\alpha$, following holds for the limiting process,
\begin{align}
\label{eq:LimitingProbability}
\pi_j \triangleq \lim_{t \rightarrow \infty } P_{ij}(t)= \frac{\alpha_j / \nu_j}{\sum_j \alpha_j /\nu_j},
\end{align}
From~\eqref{eq:LimitingProbability} and~\eqref{eq:StationaryDistribution}, we see that $\{\pi_j: j \in E\}$ is the unique non-negative solution to
\begin{xalignat}{3}
&\nu_j\pi_j=\sum_{i \in E} \nu_i\pi_iP_{ij},&&\sum_{j \in E} \pi_j =1.
\end{xalignat}
\end{thm}
\begin{proof}
For any CTMC on state space $E$, return to a state $j \in E$ is a renewal process.
We can define an alternating renewal process with on and off times characterized by the time CTMC spends in state $j$ and otherwise respectively.
Therefore, by alternating delayed renewal process theory, we have
\begin{align*}
\lim_{t \to \infty}\Pr\{ \text{CTMC is on at time } t\} = \lim_{t \to \infty}P_{ij}(t) = \frac{\E \tau_j }{\E T_{jj}}.
\end{align*}
The time spent in state $j \in E$ during $k$th visit to this state is defined by
\begin{align*}
\tau_j(k) = \inf\{ t > 0: X() = j\}.
\end{align*}
Number of visits to state $j$ in the first $m$ transitions of the CTMC is defined by
\begin{align*}
N_j(m) = \sum_{l=1}^m1_{\{X(S_l) = j\}}.
\end{align*}
The proportion of time in state $j$ during the first $m$ transitions of the CTMC is
\begin{align*}
\beta(m) &= \frac{\frac{N_j(m)}{m}\sum_{k=1}^{N_j(m)}\frac{\tau_j(k)}{N_j(m)}}{\sum_j \frac{N_j(m)}{m}\sum_{k=1}^{N_j(m)}\frac{\tau_j(k)}{N_j(m)}}.%,\\
%&= \frac{\frac{N_i(m)}{m}\sum_{j=1}^{N_i(m)}Y_i(j)}{\sum_l \frac{N_i(m)}{m} \sum_{j=1}^{N_i(m)}Y_i(j) }.\\
\end{align*}
Since $N_j(m)\rightarrow \infty$ as $m \rightarrow \infty$, it follows from the strong law of large numbers that
\begin{xalignat*}{3}
&\lim_{m \in \N}\frac{\sum_{k=1}^{N_j(m)}\tau_j(k)}{N_j(m)} = \E \tau_j, && \lim_{m \in \N}\frac{N_j(m)}{m} = \alpha_j.
\end{xalignat*}
%and $ (E[\text{number of transitions between visits to state }i])^{-1}=\alpha_i$. Letting $m \rightarrow \infty$, result follows.
%\begin{align*}
%\lim_{t \to \infty}\Pr\{ \text{CTMC is on at time } t | X(0) = i\} = \lim_{t \to \infty}P_{ij}(t) = \lim_{t \to \infty}\frac{\Pr\{\text{CTMC is on during time }[0,t] |X(0) = t\}}{t} = \frac{\E \tau_j }{\E T_{jj}}.
%\end{align*}
\end{proof}
\begin{cor}
The limiting probability $\pi_j$ is the long-run proportion of time the process is in state $j$.
\end{cor}
%\begin{defn} We denote \textbf{stationary distribution} of an irreducible and positive recurrent CTMC $P(t)$ by $\pi$ such that for all $t > 0$
%\begin{align}
%\label{eq:StationaryCTMCDistribution}
%\pi = \pi P(t),~~ \sum_{i}\pi_i = 1.
%\end{align}
%\end{defn}
%
%Since a Continuous Time Markov Chain (CTMC) is a semi-Markov chain with $F_{ij}(t)=1-e^{\nu_i t}$. From the theory of semi-Markov process, if the embedded Markov chain with transition probabilities $P_{ij}$ is irreducible and positive recurrent, then the limiting process,
%\begin{align}
%\label{eq:LimitingProbability}
%\pi_j \triangleq \lim_{t \rightarrow \infty } P_{ij}(t)= \frac{\alpha_j / \nu_j}{\sum_j \alpha_j /\nu_j},
%\end{align}
%where $\alpha_i$ is the stationary distribution of the embedded Markov chain. From \ref{eq:LimitingProbability} and \ref{eq:StationaryDistribution}, we see that $\{\pi_j\}$ is the unique non-negative solution to
%\begin{equation}
%\nu_j\pi_j=\sum_i \nu_i\pi_iP_{ij},~ \sum_j \pi_j =1.
%\end{equation}
\begin{lem}
For an irreducible CTMC with transition probability $P(t)$, if the initial state is chosen according to the limiting probability distribution $\pi$, then the resultant process will be stationary. That is,
\begin{equation*}
\pi P(t)=\pi,~ \text{for all}~ t > 0.
\end{equation*}
\end{lem}
\begin{proof}
\begin{flalign*}
\sum_{i}P_{ij}(t)\pi_i &=\sum_{i}P_{ij}(t)\lim_{s \rightarrow \infty}P_{ki}(s) =\lim_{s \rightarrow \infty} \sum_{i}P_{ij}(t)P_{ki}(s) =\lim_{s \rightarrow \infty}P_{kj}(t+s) =\pi_j.
\end{flalign*}
\end{proof}
\begin{rem}
Another way of arriving at the limiting probabilities are by forward equations
\begin{equation}
P_{ij}'(t)=\sum_{k \neq j}q_{kj}P_{ik}(t)-\nu_iP_{ij}(t).
\end{equation}
Assume that the limiting probabilities exist. Then, since $P_{ij}(t)$ are necessarily bounded, it is easy to observe that $P_{ij}'(t) \rightarrow 0$ as $t \rightarrow \infty$. Letting $t \rightarrow \infty$, assuming that the limit and summation can be exchanged, we get
\begin{align}
\nu_j \pi_j = \sum_{k\not=j} q_{kj} \pi_k = \sum_k \nu_k P_{kj} \pi_k.
\end{align}
Letting $\gamma_j :=\pi_j \nu_j$, we see that $\gamma_j$ satisfies the relations for the steady state equation of the embedded Markov chain, but for a scale factor. Hence $\gamma_j = \frac{\alpha_j}{\sum_k \alpha_k}$, and we obtain the expression for $\pi_j$.
\end{rem}
\begin{rem}
In any interval $(0,t)$, the number of transitions into state $j$ must equal to within 1 the number of transitions out of state $j$. Hence, in the long run, The rate at which transitions occur into state $j$ = The rate at which transitions occur out of state $j$. That is,
\begin{equation}
\nu_i \pi_i =\sum_{ i}\pi_i q_{ij}.
\end{equation}
Hence,
\begin{equation}
\nu_iP_{ij}=\sum_{i}\pi_iq_{ij},~ \sum_j \pi_j =1.
\end{equation}
are called balance equations.
\end{rem}
\section{Uniformization}
Consider a continuous-time Markov chain in which the mean time spent in a state is the same for all states. That is, say $\nu_i=\nu$ for all states $i$. Let $N(t)$ denote the number of state transitions by time $t$. Since the amount of time spent in each state is exponential $\nu$, $\{N(t), ~ t \geq 0 \}$ is a Poisson process with parameter $\nu$. To compute the transition probabilities $P_{ij}(t)$, we can condition on $N(t)$ as follows:
\begin{flalign*}
P_{ij}(t)&=Pr(X(t)=j|X(0)=i)\\
&=\sum_{n \in \mathbb{N}_0} Pr(X(t)=j,N(t)=n|X(0)=i)\\
&=\sum_{n \in \mathbb{N}_0} Pr( N(t)=n|X(0)=i) Pr(X(t)=j|X(0)=i,N(t)=n).
\end{flalign*}
Hence,
\begin{equation*}
P_{ij}(t)= \sum_{n \in \mathbb{N}_0} P_{ij}^{(n)}e^{-\nu t}\frac{(\nu t)^n}{n !}.
\end{equation*}
The above equation helps to compute $P_{ij}(t)$ approximately by computing an appropriate partial sum. But its application is limited as the rates are all assumed to be equal. But it so turns out that most Markov chains can be put in that form by allowing hypothetical transitions from a state to itself.
\subsection{Uniformization step}
Consider a CTMC with bounded $\nu_i$s. Choose $\nu$ such that
\begin{equation}
\label{eq: UniformizationBound}
\nu_i \leq \nu,
\end{equation}
for all $i$. Since from each stage, the Markov chain leaves at rate $\nu_i$, we could equivalently assume that the transitions occur at a rate $\nu$ but only $\frac{\nu_i}{\nu}$ are real transitions and the remaining transitions are fictitious. Any Markov chain satisfying \eqref{eq: UniformizationBound} can be thought of as being in a process that spends an exponential amount of time with rate $\nu$ in state $i$ and then makes a transition to state $j$ with probability $P_{ij}^*$, where
\begin{equation}
P_{ij}^* = \left\{
\begin{array}{lr}
1-\frac{\nu_i}{\nu} & : j =i\\
\frac{\nu_i}{\nu}P_{ij} & : j \neq i.
\end{array}
\right.
\end{equation}
The transition probabilities are computed by
\begin{equation*}
P_{ij}(t)=\sum_{n=0}^{\infty}P_{ij}^{*n}e^{\nu t} \frac{{(\nu t)}^n}{n!}
\end{equation*}
This technique of uniformizing the rate in which a transition occurs from each state to state by introducing self transitions is called uniformization.
\subsection{Semi Markov Processes}
Consider a stochastic process with states $0,1,2 \hdots$ such that whenever it enters state $i$,
\begin{enumerate}
\item {The next state it enters is state $j$ with probability $P_{ij}$.}\\
\item {Given the next state is going to be $j$, the time until the next transition from state $i$ to state $j$ has distribution $F_{ij}$. If we denote the state at time $t$ to be $Z(t)$, $\{Z(t), t \geq 0\}$ is called a Semi Markov process.}
\end{enumerate}
Markov chain is a semi Markov process with
\begin{flalign*}
F_{ij}(t) = \left\{
\begin{array}{lr}
0 & : t \leq 1 \\
1 & : t > 1.
\end{array}
\right.
\end{flalign*}
Let $H_i$ the distribution of time the semi Markov process stays in state $i$ before transition. We have $H_j(t)= \sum_i P_{ij}F_{ij}(t)$, $\mu_i = \int_0 ^ \infty X dH_i(x)$. Let $X_n$ denote the $n^{\text{th}}$ state visited. Then $\{X_n\}$ is a Markov chain with transition probability $P$ called the embedded Markov chain of the semi Markov process. \\
\begin{defn}If the embedded Markov chain is irreducible, then the Semi Markov process is said to be irreducible.
\end{defn}
Let $T_{ii}$ denote the time between successive transitions to state $i$. Let $\mu_{ii}=E[T_{ii}]$.
\begin{thm}
If the semi Markov process is irreducible and if $T_{ii}$ has non-lattice distribution with $\mu_{ii}< \infty$ then,
\begin{flalign*}
\pi_i=\lim_{t \rightarrow \infty}P(Z(t)=i|Z(0)=j)
\end{flalign*}
exists and is independent of the initial state. Furthermore, $\pi_i=\frac{\mu_i}{\mu_{ii}}$.
\end{thm}
\begin{cor}
If the semi-Markov process is irreducible and $\mu_{ii}<\infty$, then with probability 1,
$\frac{\mu_i}{\mu_{ii}}=\frac{\lim_{t \rightarrow \infty} \text{Amount of time in [0,t]}}{t}~\text{a.s}$.\\
\end{cor}
\begin{thm}
Suppose conditions of the previous thmrem hold and the embedded Markov chain is positive recurrent. Then $\pi_i= \frac{\alpha_i\mu_i}{\sum_{i}\alpha_j \mu_j}$.
\end{thm}
\begin{proof}
Define the notation as follows:
$Y_i(j)=$ amount of time spent in state $i$ during $j^\text{th}$ visit to that state. $i,j \geq 0$. \\
$N_i(m)=$ number of visits to state $i$ in the first $m$ transitions of the semi-Markov process.\\
The proportion of time in $i$ during the first $m$ transitions:\\
\begin{flalign*}
P_{i=m}&= \frac{\sum_{j=1}^{N_i(m)}Y_i(j)}{\sum_l \sum_{j=1}^{N_i(m)}Y_i(j) }\\
&= \frac{\frac{N_i(m)}{m}\sum_{j=1}^{N_i(m)}Y_i(j)}{\sum_l \frac{N_i(m)}{m} \sum_{j=1}^{N_i(m)}Y_i(j) }\\
\end{flalign*}
Since $N_i(m)\rightarrow \infty$ as $m \rightarrow \infty$, it follows from the strong law of large numbers that $\frac{\sum_{i=2}^{N_i(m)}Y_i(j)}{N_i(m)}\rightarrow \mu_i$ and $\frac{N_i(m)}{m}\rightarrow (E[\text{number of transitions between visits to state }i])^{-1}=\alpha_i$. Letting $m \rightarrow \infty$, result follows.
\end{proof}
\begin{thm}
If Semi Markov process is irreducible and non lattice, then $\lim_{t \rightarrow \infty}P(Z(t)=i,Y(t)>x,S(t)=j|Z(0)=k)=\frac{P_{ij}\int_x^\infty F_{ij}^c(y)d(y)}{\mu_{ii}}$. Let $Y(t)$ denote the time from $t$ until the next transition. $S(t)$ state entered at the first transition after $t$.
\end{thm}
\begin{proof}
The trick lies in defining the ``ON" time.
\begin{flalign*}
E[\text{ON time in a cycle}]=E[(X_{ij}-x)^+].
\end{flalign*}
\end{proof}
\begin{cor}
\begin{flalign*}
\lim_{t \rightarrow \infty} P(Z(t)=i, Y(t) >x|Z(0)=k)= \frac{\int_{x}^{\infty}H_i^c(y)d(y)}{\mu_{ii}}.
\end{flalign*}
\end{cor}
Since a Continuous Time Markov Chain (CTMC) is a semi-Markov chain with $F_{ij}(t)=1-e^{\nu_i t}$. From the theory of semi-Markov process, if the embedded Markov chain with transition probabilities $P_{ij}$ is irreducible and positive recurrent, then the limiting process,
\begin{align}
\label{eq:LimitingProbability}
\pi_j \triangleq \lim_{t \rightarrow \infty } P_{ij}(t)= \frac{\alpha_j / \nu_j}{\sum_j \alpha_j /\nu_j},
\end{align}
where $\alpha_i$ is the stationary distribution of the embedded Markov chain. From \ref{eq:LimitingProbability} and \ref{eq:StationaryDistribution}, we see that $\{\pi_j\}$ is the unique non-negative solution to
\begin{equation}
\nu_j\pi_j=\sum_i \nu_i\pi_iP_{ij},~ \sum_j \pi_j =1.
\end{equation}
\begin{rem}
From the theory of semi-Markov process, it also follows that $\pi_j$ also equals the long-run proportion of time the process is in state $j$.
\end{rem}
\begin{rem}
If the initial state is chosen according to the limiting probabilities $\{\pi_j\}$ then the resultant process will be stationary. That is,
\begin{equation*}
\sum_i \pi_iP_{ij}(t)=\pi_j,~ \text{for all}~ t.
\end{equation*}
\begin{proof}
\begin{flalign*}
\sum_{i}P_{ij}(t)\pi_i &=\sum_{i}P_{ij}(t)\lim_{s \rightarrow \infty}P_{ki}(s)\\
&=\lim_{s \rightarrow \infty} \sum_{i}P_{ij}(t)P_{ki}(s)\\
&=\lim_{s \rightarrow \infty}P_{ki}(t+s)\\
&=\pi_j.
\end{flalign*}
\end{proof}
\end{rem}
\end{document}