Skip to content

Commit 8266638

Browse files
add ng whitepaper example scripts
1 parent 4235d6b commit 8266638

25 files changed

+4876
-0
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,3 +34,5 @@ docs/source/locale/zh/LC_MESSAGES/whitepapertoc_cn.po
3434
docs/source/locale/zh/LC_MESSAGES/textbooktoc.po
3535
test.qasm
3636
venv/
37+
examples/ng_whitepaper/*.pdf
38+
examples/ng_whitepaper/trials/
Lines changed: 285 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,285 @@
1+
"""
2+
Batch VQE on Transverse Field Ising Model (TFIM) with TensorCircuit
3+
4+
Demonstrates TensorCircuit's "Tensor-Native" paradigms:
5+
- Tensor Network: Quantum gates as local tensor contractions
6+
- AD: Reverse-mode autodiff for efficient gradient computation
7+
- JIT: XLA compilation for hardware acceleration
8+
- vmap: Batch parallel optimization with multiple random initializations
9+
10+
The script optimizes multiple random parameter sets in parallel and generates
11+
a visualization of all optimization trajectories.
12+
"""
13+
14+
import numpy as np
15+
from scipy.sparse.linalg import eigsh
16+
import optax
17+
import matplotlib.pyplot as plt
18+
19+
import tensorcircuit as tc
20+
21+
# Use JAX backend for best vmap + JIT support
22+
K = tc.set_backend("jax")
23+
# Use cotengra contractor for tensor network contraction with default settings
24+
tc.set_contractor("cotengra")
25+
# Use double precision for the simulation
26+
tc.set_dtype("complex128")
27+
28+
# ============================================================================
29+
# Configuration
30+
# ============================================================================
31+
32+
n = 10 # Number of qubits
33+
nlayers = 6 # Ansatz depth
34+
g = 1.0 # Transverse field strength
35+
batch_size = 16 # Number of parallel random initializations
36+
n_steps = 500 # Optimization steps
37+
learning_rate = 0.02
38+
39+
# ============================================================================
40+
# TFIM Energy Function (Matching Whitepaper Code)
41+
# ============================================================================
42+
43+
44+
def tfim_energy(param, g=1.0, n=10):
45+
"""
46+
Compute TFIM energy expectation value.
47+
48+
The ansatz is a Hardware-Efficient Ansatz with:
49+
- Layer 1: Single-qubit RX rotations
50+
- Layer 2: Nearest-neighbor RZZ entangling gates
51+
52+
The TFIM Hamiltonian is:
53+
H = -∑_{i} Z_i Z_{i+1} - g ∑_{i} X_i (OBC)
54+
55+
Args:
56+
param: Shape [nlayers, n] - variational parameters
57+
g: Transverse field strength
58+
n: Number of qubits
59+
60+
Returns:
61+
Real-valued energy expectation
62+
"""
63+
c = tc.Circuit(n)
64+
c.h(range(n))
65+
# --- Ansatz Construction ---
66+
for layer in range(param.shape[0] // 2):
67+
# Layer 1: Single qubit rotations
68+
for i in range(n):
69+
c.rx(i, theta=param[2 * layer, i])
70+
# Layer 2: Entangling ZZ gates
71+
for i in range(n - 1):
72+
c.rzz(i, i + 1, theta=param[2 * layer + 1, i])
73+
74+
# --- Expectation Calculation ---
75+
e = 0.0
76+
# Interaction term: -<Z_i Z_{i+1}>
77+
for i in range(n - 1):
78+
e -= c.expectation_ps(z=[i, i + 1])
79+
# Transverse field term: -g * <X_i>
80+
for i in range(n):
81+
e -= g * c.expectation_ps(x=[i])
82+
83+
return K.real(e)
84+
85+
86+
# ============================================================================
87+
# Exact Ground State Energy (via Sparse Diagonalization)
88+
# ============================================================================
89+
90+
91+
def compute_exact_ground_energy(n, g):
92+
"""
93+
Compute exact TFIM ground state energy via sparse diagonalization.
94+
95+
TFIM Hamiltonian: H = -∑ Z_i Z_{i+1} - g ∑ X_i
96+
"""
97+
# Build Pauli string representation
98+
ps = []
99+
weights = []
100+
101+
# ZZ interaction terms: -Z_i Z_{i+1}
102+
for i in range(n - 1):
103+
l = [0] * n
104+
l[i] = 3 # Z
105+
l[i + 1] = 3 # Z
106+
ps.append(l)
107+
weights.append(-1.0)
108+
109+
# Transverse field terms: -g X_i
110+
for i in range(n):
111+
l = [0] * n
112+
l[i] = 1 # X
113+
ps.append(l)
114+
weights.append(-g)
115+
116+
# Convert to sparse matrix and diagonalize
117+
hm = tc.quantum.PauliStringSum2COO(ps, weights, numpy=True)
118+
119+
return eigsh(hm, k=1, which="SA", return_eigenvectors=False)[0]
120+
121+
122+
# ============================================================================
123+
# Batch VQE with vmap
124+
# ============================================================================
125+
126+
# Transform 1: Add gradient capability (Value, Gradient)
127+
vqe_grad = K.value_and_grad(tfim_energy)
128+
129+
# Transform 2: Apply vmap for batch parallelism
130+
# Vectorize over the first argument (param)
131+
vqe_batch = K.vmap(vqe_grad, vectorized_argnums=0)
132+
133+
# Transform 3: Compile for speed (JIT)
134+
vqe_step = K.jit(vqe_batch, static_argnums=(2,))
135+
136+
# ============================================================================
137+
# Training Loop
138+
# ============================================================================
139+
140+
141+
def run_batch_vqe():
142+
"""Run batch VQE optimization and return trajectories."""
143+
print("=" * 60)
144+
print("Batch VQE on TFIM with TensorCircuit")
145+
print("=" * 60)
146+
print(f"Qubits: {n}, Layers: {nlayers}, Batch Size: {batch_size}")
147+
print(f"Transverse field g: {g}")
148+
print()
149+
150+
# Compute exact ground state energy
151+
exact_energy = compute_exact_ground_energy(n, g)
152+
print(f"Exact Ground State Energy: {exact_energy:.6f}")
153+
print()
154+
155+
# Initialize optimizer
156+
optimizer = K.optimizer(optax.adam(learning_rate))
157+
158+
# Random initialization: [batch_size, 2*nlayers, n]
159+
K.set_random_state(42)
160+
params = K.implicit_randn(shape=[batch_size, 2 * nlayers, n], stddev=0.1)
161+
162+
# Store energy trajectories
163+
trajectories = []
164+
165+
print("Starting optimization...")
166+
print("-" * 40)
167+
168+
for step in range(n_steps):
169+
# Batched forward + backward pass
170+
energies, grads = vqe_step(params)
171+
172+
# Update parameters
173+
params = optimizer.update(grads, params)
174+
175+
# Store energies
176+
energies_np = np.array(energies)
177+
trajectories.append(energies_np)
178+
179+
# Print progress
180+
if step % 20 == 0 or step == n_steps - 1:
181+
mean_e = np.mean(energies_np)
182+
min_e = np.min(energies_np)
183+
print(
184+
f"Step {step:3d}: Mean Energy = {mean_e:.6f}, "
185+
f"Min Energy = {min_e:.6f}"
186+
)
187+
188+
print("-" * 40)
189+
print()
190+
191+
# Final results
192+
final_energies = trajectories[-1]
193+
print("Final Energies (all batches):")
194+
for i, e in enumerate(final_energies):
195+
delta = e - exact_energy
196+
print(f" Batch {i}: {e:.6f} (Δ = {delta:+.6f})")
197+
198+
print()
199+
print(f"Best Final Energy: {np.min(final_energies):.6f}")
200+
print(f"Exact Ground State: {exact_energy:.6f}")
201+
print(f"Best Error: {np.min(final_energies) - exact_energy:.6f}")
202+
203+
return np.array(trajectories), exact_energy
204+
205+
206+
# ============================================================================
207+
# Visualization
208+
# ============================================================================
209+
210+
211+
def plot_trajectories(trajectories, exact_energy, save_path="batch_vqe_trajectory.pdf"):
212+
"""
213+
Plot optimization trajectories and save as PDF.
214+
215+
Args:
216+
trajectories: Shape [n_steps, batch_size] - energy history
217+
exact_energy: Exact ground state energy for reference
218+
save_path: Output PDF path
219+
"""
220+
_, ax = plt.subplots(figsize=(10, 6))
221+
222+
n_steps, batch_size = trajectories.shape
223+
steps = np.arange(n_steps)
224+
225+
# Color palette
226+
colors = plt.cm.viridis(np.linspace(0.1, 0.9, batch_size))
227+
228+
# Plot individual trajectories
229+
for i in range(batch_size):
230+
ax.plot(
231+
steps,
232+
trajectories[:, i],
233+
color=colors[i],
234+
alpha=0.7,
235+
linewidth=1.5,
236+
label=f"Batch {i}" if i < 4 else None,
237+
)
238+
239+
# Plot exact ground state
240+
ax.axhline(
241+
exact_energy,
242+
color="red",
243+
linestyle="--",
244+
linewidth=2,
245+
label=f"Exact GS = {exact_energy:.4f}",
246+
)
247+
248+
# Plot mean trajectory
249+
mean_traj = np.mean(trajectories, axis=1)
250+
ax.plot(
251+
steps,
252+
mean_traj,
253+
color="black",
254+
linewidth=2.5,
255+
linestyle="-",
256+
label="Mean",
257+
)
258+
259+
# Styling
260+
ax.set_xlabel("Optimization Step", fontsize=12)
261+
ax.set_ylabel("Energy", fontsize=12)
262+
ax.set_title(
263+
f"Batch VQE Optimization Trajectories (TFIM, n={n}, g={g})",
264+
fontsize=14,
265+
)
266+
ax.legend(loc="upper right", fontsize=10)
267+
ax.grid(True, alpha=0.3)
268+
269+
# Tight layout and save
270+
plt.tight_layout()
271+
plt.savefig(save_path, format="pdf", dpi=150, bbox_inches="tight")
272+
print(f"\nOptimization trajectories saved to: {save_path}")
273+
plt.close()
274+
275+
276+
# ============================================================================
277+
# Main
278+
# ============================================================================
279+
280+
if __name__ == "__main__":
281+
# Run batch VQE
282+
trajectories, exact_energy = run_batch_vqe()
283+
284+
# Generate visualization
285+
plot_trajectories(trajectories, exact_energy)

0 commit comments

Comments
 (0)