Skip to content

Commit ba6fc0c

Browse files
committed
Updating Latex
1 parent 800588c commit ba6fc0c

File tree

4 files changed

+79
-50
lines changed

4 files changed

+79
-50
lines changed

blueprint/src/content.tex

Lines changed: 74 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,17 @@
11
% Main content for QKD blueprint
22

3-
This blueprint documents the formalization of \textbf{Lemma 1} from the seminal paper
3+
This blueprint formalizes \textbf{Lemma 1} from the paper
44
\href{https://arxiv.org/abs/quant-ph/9802025}{\emph{Unconditional Security Of Quantum Key Distribution Over Arbitrarily Long Distances}}
55
by Lo and Chau (1998).
66

7-
The lemma establishes a fundamental result in quantum key distribution security:
8-
high fidelity with a pure state implies bounded von Neumann entropy.
9-
This formalization is expected to attract significant interest from the quantum cryptography community,
10-
as it provides a machine-checked proof of a cornerstone result in the security analysis of quantum key distribution protocols.
7+
Lemma 1 establishes a quantitative relation between \emph{fidelity} and \emph{von Neumann entropy}:
8+
high fidelity with a reference pure state implies an upper bound on the entropy of the underlying density operator.
9+
The purpose of this blueprint is to give a precise, machine-checked version of this argument.
1110

1211
\bigskip
1312

13+
\noindent For reference, we reproduce below the statement and proof of Lemma~1 as it appears in Lo--Chau (1998).
14+
1415
\begin{center}
1516
\fbox{\begin{minipage}{0.95\textwidth}
1617
\vspace{0.5em}
@@ -38,13 +39,24 @@
3839

3940
\bigskip
4041

41-
\begin{definition}
42+
\subsection*{Preliminaries}
43+
44+
\noindent We begin with several basic definitions used to verify the proof of Lemma~1.
45+
46+
\begin{definition}[von Neumann Entropy]
4247
\label{def:vonNeumannEntropy}
4348
\lean{vonNeumannEntropy}
4449
\leanok
45-
For a Hermitian matrix $A$, the \emph{von Neumann entropy} is defined as:
46-
$$S(A) = -\sum_i \lambda_i \log(\lambda_i)$$
47-
where $\lambda_i$ are the eigenvalues of $A$.
50+
For a Hermitian matrix $A$ with eigenvalues $\lambda_i$ (for instance, a density matrix with $\lambda_i \geq 0$ and $\sum_i \lambda_i = 1$),
51+
the \emph{von Neumann entropy} is defined as:
52+
$$S(A) = -\sum_i \lambda_i \log(\lambda_i).$$
53+
\end{definition}
54+
55+
\begin{definition}[Decreasing Vector]
56+
\label{def:decreasing_vector}
57+
A vector $x \in \mathbb{R}^n$ is called \emph{decreasing} if its coordinates are ordered
58+
$$x_1 \geq x_2 \geq \cdots \geq x_n.$$
59+
Given any $x \in \mathbb{R}^n$, we write $x^{\downarrow}$ for the vector obtained by rearranging the coordinates of $x$ in decreasing order.
4860
\end{definition}
4961

5062
\begin{definition}[Majorization Ordering]
@@ -68,28 +80,38 @@
6880

6981
\bigskip
7082

71-
\begin{lemma}
83+
\subsection*{Auxiliary Results}
84+
85+
\noindent In this subsection we record the auxiliary results needed for the proof of Lemma~1.
86+
We first establish Schur-convexity of the map $x \mapsto \sum_i x_i \log(x_i)$, and then prove a spectral estimate showing that high fidelity forces the existence of a large eigenvalue.
87+
88+
\begin{proposition}
7289
\label{lem:slope_comparison}
7390
\lean{slope_comparison_tlogt}
7491
\leanok
75-
For $f(t) = t \cdot \log(t)$, if $a \geq c$, $b \geq d$, $a \neq b$, $c \neq d$, then:
76-
$$\frac{f(c) - f(d)}{c - d} \leq \frac{f(a) - f(b)}{a - b}$$
77-
\end{lemma}
92+
Let $f(t) = t \log(t)$ for $t \geq 0$, where we set $f(0) = 0$. For any $a, b, c, d \in [0,\infty)$ satisfying $a \geq c$, $b \geq d$, $a \neq b$, and $c \neq d$, we have
93+
$$\frac{f(c) - f(d)}{c - d} \leq \frac{f(a) - f(b)}{a - b}.$$
94+
\end{proposition}
7895

79-
\begin{lemma}
96+
\begin{proof}
97+
This is a standard consequence of the convexity of $f(t) = t \log(t)$.
98+
\end{proof}
99+
100+
\begin{proposition}
80101
\label{lem:schur_reduction}
81102
\lean{schur_convex_reduction_to_distinct}
82103
\leanok
83104
\uses{lem:slope_comparison}
84-
For decreasing vectors $x, y$ with $x_i \neq y_i$ for all $i$, if $x \preceq y$ then:
85-
$$\sum_i x_i \log(x_i) \leq \sum_i y_i \log(y_i)$$
86-
\end{lemma}
105+
Let $x, y \in \mathbb{R}^n$ be decreasing vectors with $x_i \neq y_i$ for all $i$.
106+
If $x \preceq y$, then:
107+
$$\sum_i x_i \log(x_i) \leq \sum_i y_i \log(y_i).$$
108+
\end{proposition}
87109

88110
\begin{proof}
89111
For $f(t) = t \log(t)$, define the secant slopes:
90112
$$c_i := \frac{f(x_i) - f(y_i)}{x_i - y_i} = \frac{x_i \log(x_i) - y_i \log(y_i)}{x_i - y_i}$$
91113

92-
By Lemma~\ref{lem:slope_comparison} (convexity of $f$), these slopes are monotonically non-decreasing:
114+
By Lemma~\ref{lem:slope_comparison} (convexity of $f$), these slopes are monotonically non-increasing:
93115
$$c_{i+1} \leq c_i \quad \text{for all } i \in \{1, \ldots, n-1\}$$
94116

95117
Using the telescoping identity $x_i = \left(\sum_{j \leq i} x_j\right) - \left(\sum_{j < i} x_j\right)$, we rewrite:
@@ -99,26 +121,26 @@
99121
&= \sum_{i=1}^n c_i \left[\left(\sum_{j \leq i} x_j - \sum_{j \leq i} y_j\right) - \left(\sum_{j < i} x_j - \sum_{j < i} y_j\right)\right]
100122
\end{align*}
101123

102-
By majorization, $\sum_{j \leq k} x_j \geq \sum_{j \leq k} y_j$ for all $k < n$, and equality holds at $k = n$ by normalization.
124+
By majorization, $\sum_{j \leq k} x_j \leq \sum_{j \leq k} y_j$ for all $k < n$, and equality holds at $k = n$ by normalization.
103125

104126
Expanding the telescoping sum and using the boundary condition (the term at $i = n$ vanishes):
105127
\begin{align*}
106128
\sum_{i=1}^n c_i (x_i - y_i)
107129
&= \sum_{i=1}^{n-1} (c_i - c_{i+1}) \left(\sum_{j \leq i} x_j - \sum_{j \leq i} y_j\right) \\
108-
&\geq 0
130+
&\leq 0
109131
\end{align*}
110132

111133
The final inequality holds because $(c_i - c_{i+1}) \geq 0$ (slopes non-increasing) and
112-
$\left(\sum_{j \leq i} x_j - \sum_{j \leq i} y_j\right) \geq 0$ (majorization).
134+
$\left(\sum_{j \leq i} x_j - \sum_{j \leq i} y_j\right) \leq 0$ (majorization), so their product is non-positive.
113135
\end{proof}
114136

115-
\begin{theorem}
137+
\begin{proposition}
116138
\label{thm:schur_convex_xlogx}
117139
\lean{schur_convex_xlogx}
118140
\leanok
119141
\uses{lem:schur_reduction}
120-
The entropy function $f(x) = \sum_i x_i \log(x_i)$ is Schur-convex.
121-
\end{theorem}
142+
The function $f(x) = \sum_i x_i \log(x_i)$ is Schur-convex.
143+
\end{proposition}
122144

123145
\begin{proof}
124146
We first reduce to decreasing vectors by sorting, which preserves both the majorization relation and the function value.
@@ -128,29 +150,38 @@
128150
directly.
129151
\end{proof}
130152

131-
\begin{lemma}[Eigenvalue Bound from Inner Product]
153+
\noindent We now turn to the spectral estimate that links high fidelity with the existence of a large eigenvalue.
154+
In particular, it formalizes the observation that a large expectation value $\langle v, \rho v \rangle$ forces the spectrum of $\rho$ to have a correspondingly large eigenvalue.
155+
156+
\begin{proposition}[Eigenvalue Bound from Inner Product]
132157
\label{lem:eigenvalue_bound}
133158
\lean{eigenvalue_bound_eigenbasis}
134159
\leanok
135160
Let $\rho$ be a Hermitian matrix and $v$ a unit vector with $\|v\| = 1$.
136161
If $\langle v, \rho v \rangle > C$, then $\rho$ has an eigenvalue $\lambda_i > C$.
137-
\end{lemma}
162+
\end{proposition}
138163

139164
\bigskip
165+
\bigskip
166+
167+
\subsection*{Proof of Lemma~1}
168+
169+
\noindent Combining these ingredients, we obtain the following entropy bound, which formalizes Lemma~1 of Lo--Chau (1998).
140170

141-
\begin{theorem}
171+
\begin{lemma}
142172
\label{thm:main}
143173
\lean{high_fidelity_implies_low_entropy_equivalent}
144174
\leanok
145175
\uses{def:vonNeumannEntropy, thm:schur_convex_xlogx, lem:eigenvalue_bound}
146-
Let $\rho$ be an $R \times R$ positive semidefinite density matrix with trace 1.
147-
If there exists a unit vector $v$ such that $\langle v, \rho v \rangle > 1 - \delta$ where $0 \leq \delta < 1$ and $\delta \leq (R-1)/R$, then:
148-
$$S(\rho) \leq -(1-\delta)\log(1-\delta) - \delta \log\left(\frac{\delta}{R-1}\right)$$
149-
\end{theorem}
176+
Let $\rho$ be an $R \times R$ positive semidefinite density matrix with trace $1$.
177+
If there exists a unit vector $v$ such that $\langle v, \rho v \rangle > 1 - \delta$ where $0 \leq \delta < 1$ and $\delta \leq (R-1)/R$,
178+
then the von Neumann entropy of $\rho$ satisfies
179+
$$S(\rho) \leq -(1-\delta)\log(1-\delta) - \delta \log\left(\frac{\delta}{R-1}\right).$$
180+
\end{lemma}
150181

151182
\begin{proof}
152183
\uses{lem:eigenvalue_bound, thm:schur_convex_xlogx}
153-
The proof proceeds in 9 steps:
184+
For clarity, the proof is organized into 9 short steps:
154185

155186
\textbf{Step 1: Eigenvalue Bound from High Fidelity.}
156187
From the hypothesis $\langle v, \rho v \rangle > 1 - \delta$, we apply
@@ -159,7 +190,7 @@
159190
\textbf{Step 2: Eigenvalue Non-negativity.}
160191
Since $\rho$ is positive semidefinite, all eigenvalues are non-negative: $\lambda_i \geq 0$ for all $i$.
161192

162-
\textbf{Step 3: Construct Comparison Distribution.}
193+
\textbf{Step 3: Construction of the Comparison Eigenvalue Distribution.}
163194
Define a comparison eigenvalue distribution $\lambda_{\text{comp}}$:
164195
$$\lambda_{\text{comp}}(i) = \begin{cases}
165196
1 - \delta & \text{if } i = 0 \\
@@ -180,19 +211,15 @@
180211
\end{enumerate}
181212
The proof uses case analysis ($k=0$ vs $k>0$) with a proof by contradiction for the $k>0$ case.
182213

183-
\textbf{Step 6: Apply Schur-Convexity (Payoff).}
214+
\textbf{Step 6: Application of Schur-Convexity.}
184215
Using Theorem~\ref{thm:schur_convex_xlogx} and the majorization from Step 5:
185216
$$\sum_i \lambda_i \log(\lambda_i) \geq \sum_i \lambda_{\text{comp}}(i) \log(\lambda_{\text{comp}}(i))$$
186217

187-
\textbf{Step 7: Unfold Entropy Definition.}
188-
By Definition~\ref{def:vonNeumannEntropy}:
189-
$$S(\rho) = -\sum_i \lambda_i \log(\lambda_i)$$
190-
191-
\textbf{Step 8: Compute Comparison Entropy.}
218+
\textbf{Step 7: Compute Comparison Entropy.}
192219
Direct calculation yields:
193220
$$\sum_i \lambda_{\text{comp}}(i) \log(\lambda_{\text{comp}}(i)) = (1-\delta)\log(1-\delta) + \delta \log\left(\frac{\delta}{R-1}\right)$$
194221

195-
\textbf{Step 9: Final Calculation.}
222+
\textbf{Step 8: Final Calculation.}
196223
Combining all steps:
197224
\begin{align*}
198225
S(\rho) &= -\sum_i \lambda_i \log(\lambda_i) \\
@@ -203,9 +230,11 @@
203230

204231
\bigskip
205232

206-
\noindent\textbf{Remark:} The original paper assumes $\delta \ll 1$, but for rigorous formalization we require
207-
$\delta \leq \frac{R-1}{R}$. This ensures $\frac{\delta}{R-1} \leq 1 - \delta$, which guarantees the comparison
208-
distribution $\lambda_{\text{comp}}$ is properly ordered with one dominant eigenvalue. Without this constraint,
209-
the majorization argument fails. Since $\frac{R-1}{R} \to 1$ as $R \to \infty$, this constraint is nearly equivalent
210-
to $\delta < 1$ for large systems and is always satisfied in practical QKD where $\delta \ll 1$.
233+
\noindent\textbf{Remarks:}
234+
235+
\noindent\textbf{(1) Notation.} In our formalization, $R$ denotes the dimension of the Hilbert space, corresponding to $2^{2R}$ in Lo \& Chau's notation (where $R$ represents the number of singlet pairs).
236+
237+
\medskip
238+
239+
\noindent\textbf{(2) Constraint on $\delta$.} The original paper requires $\delta \ll 1$. Our formalization makes this precise: we require $\delta \leq \frac{R-1}{R}$ to ensure the comparison distribution has decreasing eigenvalues. Since $\frac{R-1}{R} \to 1$ as $R \to \infty$, this constraint is satisfied for large systems with $\delta \ll 1$.
211240

blueprint/src/macros/common.tex

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,9 @@
1010
% If you want for instance to number them within chapters then you can add
1111
% [chapter] at the end of the next line.
1212
\newtheorem{theorem}{Theorem}
13-
\newtheorem{proposition}[theorem]{Proposition}
13+
\newtheorem{proposition}{Proposition}
1414
\newtheorem{lemma}[theorem]{Lemma}
1515
\newtheorem{corollary}[theorem]{Corollary}
1616

1717
\theoremstyle{definition}
18-
\newtheorem{definition}[theorem]{Definition}
18+
\newtheorem{definition}{Definition}

blueprint/src/print.tex

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,8 +24,8 @@
2424
\input{macros/common}
2525
\input{macros/print}
2626

27-
\title{QKD}
28-
\author{Ben Breen}
27+
\title{Quantum Key Distribution: High Fidelity Implies Low Entropy}
28+
\author{Axiomatic-AI}
2929

3030
\begin{document}
3131
\maketitle

blueprint/src/web.tex

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
\dochome{https://BenKBreen.github.io/QKD/docs}
2222

2323
\title{Quantum Key Distribution: High Fidelity Implies Low Entropy}
24-
\author{Ben Breen \& Kfir Sulimany}
24+
\author{Axiomatic-AI}
2525

2626
\begin{document}
2727
\maketitle

0 commit comments

Comments
 (0)