Skip to content

Commit 4c52710

Browse files
Merge branch 'reproduce-paper-data-reuploading-10402208435151954485' of github.com:tensorcircuit/tensorcircuit-ng into ngpr69
2 parents 6282655 + 9c5a561 commit 4c52710

File tree

4 files changed

+214
-0
lines changed

4 files changed

+214
-0
lines changed
Lines changed: 193 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,193 @@
1+
"""
2+
Reproduction of "Data re-uploading for a universal quantum classifier"
3+
Link: https://arxiv.org/abs/1907.02085
4+
5+
Description:
6+
This script reproduces Figure 6 from the paper using TensorCircuit.
7+
It implements a single-qubit quantum classifier using data re-uploading.
8+
The task is to classify points inside/outside a circle.
9+
"""
10+
11+
import time
12+
from functools import partial
13+
import numpy as np
14+
import matplotlib.pyplot as plt
15+
import optax
16+
import jax
17+
import tensorcircuit as tc
18+
19+
# Set backend to JAX for better performance
20+
K = tc.set_backend("jax")
21+
22+
23+
def generate_circle_data(n_samples: int):
24+
"""
25+
Generate 2D data points inside/outside a circle.
26+
Returns:
27+
X: (n_samples, 2) array of coordinates in [-1, 1]
28+
Y: (n_samples,) array of labels (0 or 1)
29+
"""
30+
# Use fixed seed for reproducibility
31+
np.random.seed(42)
32+
X = np.random.uniform(-1, 1, size=(n_samples, 2))
33+
# Circle radius sqrt(2/pi) covers half the area of the square [-1, 1]x[-1, 1] (area 4)
34+
# Area of circle = pi * r^2. Area of square = 4.
35+
# To have balanced classes, pi * r^2 = 2 => r^2 = 2/pi => r = sqrt(2/pi) ~ 0.798
36+
radius = np.sqrt(2 / np.pi)
37+
Y = np.sum(X**2, axis=1) < radius**2
38+
return X, Y.astype(int)
39+
40+
41+
def clf_circuit(params, x, n_layers):
42+
"""
43+
Quantum circuit for classification.
44+
params: (n_layers, 4)
45+
x: (2,)
46+
"""
47+
c = tc.Circuit(1)
48+
for i in range(n_layers):
49+
# params[i] -> [w1, b1, w0, b0]
50+
# ansatz: Rz(w1*x1 + b1) Ry(w0*x0 + b0)
51+
# Note: x is [x0, x1]
52+
theta_z = params[i, 0] * x[1] + params[i, 1]
53+
theta_y = params[i, 2] * x[0] + params[i, 3]
54+
c.rz(0, theta=theta_z)
55+
c.ry(0, theta=theta_y)
56+
return c
57+
58+
59+
def predict_point(params, xi, n_layers):
60+
c = clf_circuit(params, xi, n_layers)
61+
# Probability of state |1>
62+
# TC z expectation is <Z> = P(0) - P(1) = 1 - 2P(1)
63+
# So P(1) = (1 - <Z>) / 2
64+
z_exp = c.expectation_ps(z=[0])
65+
p1 = (1.0 - z_exp) / 2.0
66+
return p1
67+
68+
69+
def loss(params, x, y, n_layers):
70+
"""
71+
Calculate the weighted fidelity loss.
72+
params: flat parameters from scipy (or reshaped)
73+
x: (n_samples, 2)
74+
y: (n_samples,)
75+
"""
76+
probs_1 = K.vmap(predict_point, vectorized_argnums=1)(params, x, n_layers)
77+
loss_val = K.mean((y - probs_1) ** 2)
78+
return K.real(loss_val)
79+
80+
81+
def main():
82+
n_samples = 200
83+
X, Y = generate_circle_data(n_samples)
84+
85+
# Convert data to backend tensors once
86+
X_tc = K.convert_to_tensor(X)
87+
Y_tc = K.convert_to_tensor(Y)
88+
89+
# Different number of layers to test
90+
layers_list = [1, 2, 4]
91+
92+
plt.figure(figsize=(15, 5))
93+
94+
for idx, n_layers in enumerate(layers_list):
95+
print(f"Training with {n_layers} layers...")
96+
97+
# Initial parameters
98+
# shape: (n_layers, 4)
99+
# Initialize randomly
100+
param_shape = (n_layers, 4)
101+
init_params = np.random.normal(0, 1, size=param_shape)
102+
params = K.convert_to_tensor(init_params)
103+
104+
# Use optax.lbfgs as requested
105+
solver = optax.lbfgs(learning_rate=1.0)
106+
opt_state = solver.init(params)
107+
108+
# Jitted update step for L-BFGS
109+
@jax.jit
110+
def update_step(params, opt_state, x, y):
111+
loss_val, grads = jax.value_and_grad(loss)(params, x, y, n_layers)
112+
updates, opt_state = solver.update(
113+
grads,
114+
opt_state,
115+
params,
116+
value=loss_val,
117+
grad=grads,
118+
value_fn=partial(loss, x=x, y=y, n_layers=n_layers),
119+
)
120+
params = optax.apply_updates(params, updates)
121+
return params, opt_state, loss_val
122+
123+
start_time = time.time()
124+
loss_history = []
125+
# L-BFGS often converges faster in fewer steps, but needs more computation per step (line search)
126+
# We'll use fewer iterations compared to Adam (e.g., 50 or 100)
127+
for _ in range(50):
128+
params, opt_state, loss_val = update_step(params, opt_state, X_tc, Y_tc)
129+
loss_history.append(loss_val)
130+
131+
end_time = time.time()
132+
final_loss = loss_history[-1]
133+
print(
134+
f"Optimization finished in {end_time - start_time:.2f}s. Loss: {final_loss}"
135+
)
136+
137+
opt_params = params
138+
139+
# Visualization
140+
plt.subplot(1, 3, idx + 1)
141+
142+
# Generate grid
143+
grid_size = 50
144+
xx, yy = np.meshgrid(
145+
np.linspace(-1, 1, grid_size), np.linspace(-1, 1, grid_size)
146+
)
147+
grid_points = np.c_[xx.ravel(), yy.ravel()]
148+
149+
# Predict on grid
150+
@K.jit
151+
def predict_batch(p, x_in):
152+
# reusing predict_point
153+
return K.vmap(predict_point, vectorized_argnums=1)(p, x_in, n_layers)
154+
155+
# Convert grid_points to backend
156+
grid_points_tc = K.convert_to_tensor(grid_points)
157+
158+
probs_grid = predict_batch(opt_params, grid_points_tc)
159+
probs_grid_np = K.numpy(probs_grid).reshape(grid_size, grid_size).real
160+
161+
# Plot contour
162+
plt.contourf(xx, yy, probs_grid_np, levels=[0, 0.5, 1], cmap="RdBu", alpha=0.6)
163+
164+
# Plot data points
165+
plt.scatter(
166+
X[Y == 0, 0],
167+
X[Y == 0, 1],
168+
c="blue",
169+
s=20,
170+
edgecolors="k",
171+
label="Class 0",
172+
)
173+
plt.scatter(
174+
X[Y == 1, 0],
175+
X[Y == 1, 1],
176+
c="red",
177+
s=20,
178+
edgecolors="k",
179+
label="Class 1",
180+
)
181+
182+
plt.title(f"Layers: {n_layers}\nLoss: {final_loss:.4f}")
183+
if idx == 0:
184+
plt.legend()
185+
186+
plt.tight_layout()
187+
output_path = "examples/reproduce_papers/2019_Data_re_uploading/outputs/result.png"
188+
plt.savefig(output_path)
189+
print(f"Results saved to {output_path}")
190+
191+
192+
if __name__ == "__main__":
193+
main()
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
title: "Data re-uploading for a universal quantum classifier"
2+
arxiv_id: "1907.02085"
3+
url: "https://arxiv.org/abs/1907.02085"
4+
year: 2019
5+
authors:
6+
- "Adrián Pérez-Salinas"
7+
- "Alba Cervera-Lierta"
8+
- "Elies Gil-Fuster"
9+
- "José I. Latorre"
10+
tags:
11+
- "QML"
12+
- "Classification"
13+
- "Data Re-uploading"
14+
hardware_requirements:
15+
gpu: False
16+
min_memory: "1GB"
17+
description: "Reproduces the single-qubit classifier experiment (Figure 6) using the data re-uploading technique."
18+
outputs:
19+
- target: "Figure 6"
20+
path: "outputs/result.png"
21+
script: "main.py"
65.2 KB
Loading

examples/reproduce_papers/__init__.py

Whitespace-only changes.

0 commit comments

Comments
 (0)