|
10 | 10 | The script plots the infidelity (1 - Fidelity) as a function of the bond dimension. |
11 | 11 |
|
12 | 12 | Implementation Note: |
13 | | -This script implements a "DMRG-style" simulation logic. Instead of standard TEBD (local SVD), |
14 | | -we use a variational compression step after applying each layer of gates. Specifically, |
15 | | -we apply a layer of gates to obtain a target state (potentially with increased bond dimension), |
16 | | -and then we optimize an MPS with fixed bond dimension `chi` to maximize its overlap with the |
17 | | -target state using a sweeping algorithm (DMRG/variational compression). |
| 13 | +This script implements a 1-site DMRG-like simulation logic. |
| 14 | +We construct an MPO for each chunk of layers (e.g., 2 layers) and then |
| 15 | +variationally optimize the MPS to maximize the overlap with the state after |
| 16 | +applying the MPO. This avoids forming intermediate high-bond-dimension states |
| 17 | +explicitly (like standard TEBD or global contraction) and aligns with the |
| 18 | +standard DMRG algorithm for time evolution / circuit simulation. |
18 | 19 | """ |
19 | 20 |
|
20 | 21 | import time |
@@ -126,72 +127,293 @@ def q(r, col): |
126 | 127 | return c, layers |
127 | 128 |
|
128 | 129 |
|
129 | | -def variational_compress(target_mps, chi, sweeps=1): |
| 130 | +def build_mpo_from_layers(n_qubits, layers): |
130 | 131 | """ |
131 | | - Compresses the `target_mps` to an MPS with bond dimension `chi` |
132 | | - by maximizing the overlap (variational compression / DMRG-style). |
133 | | -
|
134 | | - This function implements a basic 2-site variational sweep. |
| 132 | + Constructs an MPO (list of tensors) representing the layers of gates. |
| 133 | + The MPO tensors have shape (left_bond, right_bond, phys_out, phys_in). |
135 | 134 | """ |
136 | | - n = target_mps._nqubits |
137 | | - |
138 | | - # Initialize the compressed MPS |
139 | | - # A good starting point is the SVD truncated version (TEBD result) |
140 | | - # or just the target itself if we are going to truncate during sweep |
141 | | - compressed_mps = target_mps.copy() |
142 | | - |
143 | | - # We enforce the bond dimension constraint during the sweep |
144 | | - # We use `reduce_dimension` to perform 2-site SVD update which is |
145 | | - # the optimal update for maximizing overlap in a local 2-site window. |
146 | | - # By sweeping back and forth, we approximate the global optimum. |
| 135 | + # Start with Identity MPO |
| 136 | + # shape: (1, 1, 2, 2) |
| 137 | + # mpo_tensors = [ |
| 138 | + # np.eye(2).reshape(1, 1, 2, 2).astype(np.complex128) for _ in range(n_qubits) |
| 139 | + # ] |
| 140 | + |
| 141 | + # We use MPSCircuit to simulate the superoperator evolution |
| 142 | + # State space dimension is 4 (2*2). |
| 143 | + # Initial state is Identity vector (unnormalized trace but it's operator). |
| 144 | + # Wait, MPSCircuit usually starts in |0...0>. |
| 145 | + # We want it to start in |I...I> where I is flattened identity. |
| 146 | + # Identity (2x2) flattened is [1, 0, 0, 1]. |
| 147 | + |
| 148 | + initial_tensors = [] |
| 149 | + for _ in range(n_qubits): |
| 150 | + # Shape (1, 4, 1) -> (1, dim, 1) |
| 151 | + t = np.array([1.0, 0.0, 0.0, 1.0], dtype=np.complex128).reshape(1, 4, 1) |
| 152 | + initial_tensors.append(t) |
| 153 | + |
| 154 | + mpo_mps = tc.MPSCircuit(n_qubits, tensors=initial_tensors, dim=4) |
| 155 | + mpo_mps.set_split_rules({}) # No truncation |
147 | 156 |
|
148 | | - # Note: `reduce_dimension` in MPSCircuit performs SVD on the bond. |
149 | | - # If we apply it sequentially, it is equivalent to the standard |
150 | | - # "variational compression via SVD sweeping" if the state is canonical. |
| 157 | + for layer in layers: |
| 158 | + for gate in layer: |
| 159 | + idx = gate["index"] |
| 160 | + params = gate.get("parameters", {}) |
| 161 | + g_obj = gate["gatef"](**params) |
| 162 | + u = g_obj.tensor # (2, 2) or (2, 2, 2, 2) |
| 163 | + |
| 164 | + # Create superoperator gate U \otimes I |
| 165 | + # U acts on the "output" index of the MPO, which corresponds to the first factor in 2x2. |
| 166 | + # Flattened index i corresponds to (i // 2, i % 2) -> (out, in). |
| 167 | + # We want U on out, I on in. |
| 168 | + |
| 169 | + if len(idx) == 1: |
| 170 | + # U: (2, 2) |
| 171 | + # Super: (4, 4) |
| 172 | + # U_{ik} \delta_{jl} -> index (i,j), (k,l) |
| 173 | + # kron(U, I) does exactly this. |
| 174 | + u_super = np.kron(u, np.eye(2)) |
| 175 | + g_super = tc.gates.Gate(u_super.reshape(4, 4)) |
| 176 | + mpo_mps.apply(g_super, *idx) |
| 177 | + |
| 178 | + elif len(idx) == 2: |
| 179 | + # U: (2, 2, 2, 2) -> (out1, out2, in1, in2) |
| 180 | + # We need (4, 4, 4, 4) -> (site1_out, site1_in), (site2_out, site2_in) etc. |
| 181 | + # Actually, kron(U_mat, I_4) gives a 16x16 matrix acting on (sys1, sys2, anc1, anc2). |
| 182 | + # Indices: s1, s2, a1, a2. |
| 183 | + # We want: s1, a1, s2, a2. |
| 184 | + # So we need to permute. |
| 185 | + |
| 186 | + # U matrix: (s1_out * s2_out, s1_in * s2_in) |
| 187 | + u_mat = u.reshape(4, 4) |
| 188 | + super_op = np.kron(u_mat, np.eye(4)) # (16, 16) |
| 189 | + # Indices of super_op: (s1_out, s2_out, a1, a2), (s1_in, s2_in, a1, a2) (row, col) |
| 190 | + # We want to act on MPS sites which are (s1, a1) and (s2, a2). |
| 191 | + # So row indices should be (s1_out, a1), (s2_out, a2). |
| 192 | + # Col indices should be (s1_in, a1), (s2_in, a2). |
| 193 | + |
| 194 | + # Reshape to tensor (4, 4, 4, 4) |
| 195 | + t = super_op.reshape(2, 2, 2, 2, 2, 2, 2, 2) |
| 196 | + # (s1o, s2o, a1o, a2o, s1i, s2i, a1i, a2i) |
| 197 | + # Note: a1o = a1i (Identity on ancilla). |
| 198 | + |
| 199 | + # Permute to (s1o, a1o, s2o, a2o, s1i, a1i, s2i, a2i) |
| 200 | + t = np.transpose(t, (0, 2, 1, 3, 4, 6, 5, 7)) |
| 201 | + |
| 202 | + # Reshape to (4, 4, 4, 4) |
| 203 | + u_super = t.reshape(4, 4, 4, 4) |
| 204 | + g_super = tc.gates.Gate(u_super) |
| 205 | + |
| 206 | + # Custom handling for non-adjacent gates with d=4 |
| 207 | + # because standard MPSCircuit uses qubit swap. |
| 208 | + idx1, idx2 = idx |
| 209 | + if abs(idx1 - idx2) > 1: |
| 210 | + # Construct Swap(d=4) |
| 211 | + swap_d4 = np.zeros((4, 4, 4, 4), dtype=np.complex128) |
| 212 | + for i in range(4): |
| 213 | + for j in range(4): |
| 214 | + swap_d4[j, i, i, j] = 1.0 |
| 215 | + g_swap_d4 = tc.gates.Gate(swap_d4) |
| 216 | + |
| 217 | + # Normalize index order |
| 218 | + p1, p2 = min(idx1, idx2), max(idx1, idx2) |
| 219 | + |
| 220 | + # Move p2 to p1 + 1 |
| 221 | + for k in range(p2, p1 + 1, -1): |
| 222 | + mpo_mps.apply_adjacent_double_gate(g_swap_d4, k - 1, k) |
| 223 | + |
| 224 | + # Apply gate |
| 225 | + mpo_mps.apply_adjacent_double_gate(g_super, p1, p1 + 1) |
| 226 | + |
| 227 | + # Move back |
| 228 | + for k in range(p1 + 1, p2): |
| 229 | + mpo_mps.apply_adjacent_double_gate(g_swap_d4, k, k + 1) |
| 230 | + else: |
| 231 | + mpo_mps.apply(g_super, *idx) |
| 232 | + |
| 233 | + # Extract tensors and convert to MPO format (l, r, p_out, p_in) |
| 234 | + # MPS format: (l, d=4, r) |
| 235 | + final_mps_tensors = mpo_mps.get_tensors() |
| 236 | + final_mpo = [] |
| 237 | + for t in final_mps_tensors: |
| 238 | + # t: (l, 4, r) |
| 239 | + # Transpose to (l, r, 4) |
| 240 | + t = np.transpose(t, (0, 2, 1)) |
| 241 | + # Reshape to (l, r, 2, 2) |
| 242 | + # Note: 4 corresponds to (out, in) via kron(U, I) logic, i.e. (out * 2 + in). |
| 243 | + final_mpo.append(t.reshape(t.shape[0], t.shape[1], 2, 2)) |
| 244 | + |
| 245 | + return final_mpo |
| 246 | + |
| 247 | + |
| 248 | +def run_1site_dmrg(mps_old, mpo_tensors, chi, sweeps=1): |
| 249 | + """ |
| 250 | + Optimizes a new MPS to approximate MPO @ MPS_old using 1-site DMRG sweep. |
| 251 | + """ |
| 252 | + n = mps_old._nqubits |
| 253 | + |
| 254 | + # Initialize guess: Copy of old MPS |
| 255 | + # To allow bond dimension to adapt up to chi, we rely on svd truncation limit. |
| 256 | + # If starting bond dim < chi, it will grow? |
| 257 | + # In 1-site DMRG, bond dimension is fixed by the guess. |
| 258 | + # To allow growth, we usually use 2-site or subspace expansion. |
| 259 | + # User asked for "1-site dmrg". |
| 260 | + # But strictly 1-site cannot increase bond dimension. |
| 261 | + # If we want to simulate circuit, we need to increase bond dimension. |
| 262 | + # So we probably need 2-site update logic to adapt bond dimension, |
| 263 | + # OR start with a guess that has max bond dimension. |
| 264 | + # Given "Figure 4... 1-site DMRG", maybe they do subspace expansion. |
| 265 | + # Simpler: Use 2-site update which naturally handles bond dimension. |
| 266 | + # Or start with a guess of size chi (random or padded). |
| 267 | + |
| 268 | + # Let's implement 2-site update logic for the sweep as it's more robust |
| 269 | + # for dynamic bond dimension and standard in TEBD/DMRG codes. |
| 270 | + # It updates 2 sites at a time, SVDs, and truncates to chi. |
| 271 | + |
| 272 | + mps_new = mps_old.copy() |
| 273 | + |
| 274 | + # Tensors |
| 275 | + A_new = [t.copy() for t in mps_new.get_tensors()] # (l, d, r) |
| 276 | + A_old = mps_old.get_tensors() # (l, d, r) |
| 277 | + W = mpo_tensors # (l, r, u, d) |
| 278 | + |
| 279 | + # Environments |
| 280 | + L = [np.ones((1, 1, 1))] * (n + 1) |
| 281 | + R = [np.ones((1, 1, 1))] * (n + 1) |
| 282 | + |
| 283 | + # Build initial R environments |
| 284 | + for i in range(n - 1, 0, -1): |
| 285 | + # Contract R[i+1] with site i |
| 286 | + # R: (r_n, r_m, r_o) |
| 287 | + # A_n: (l_n, p, r_n) |
| 288 | + T = np.tensordot(A_new[i], R[i + 1], axes=[[2], [0]]) # (l_n, p, r_m, r_o) |
| 289 | + # W: (l_m, r_m, p, p') |
| 290 | + T = np.tensordot(T, W[i], axes=[[2, 1], [1, 2]]) # (l_n, r_o, l_m, p') |
| 291 | + # A_o: (l_o, p', r_o) |
| 292 | + R[i] = np.tensordot(T, A_old[i], axes=[[1, 3], [2, 1]]) # (l_n, l_m, l_o) |
151 | 293 |
|
152 | 294 | # Sweep |
153 | 295 | for _ in range(sweeps): |
154 | | - # Left -> Right |
155 | | - compressed_mps.position(0) |
| 296 | + # Left -> Right (2-site update) |
156 | 297 | for i in range(n - 1): |
157 | | - compressed_mps.reduce_dimension( |
158 | | - i, center_left=False, split={"max_singular_values": chi} |
159 | | - ) |
160 | | - |
161 | | - # Right -> Left |
162 | | - # compressed_mps.position(n-1) # reduce_dimension handles center position |
| 298 | + # Form effective tensor for sites i, i+1 |
| 299 | + # E = L[i] * W[i] * W[i+1] * A_old[i] * A_old[i+1] * R[i+2] |
| 300 | + |
| 301 | + # 1. Contract Left block: L[i] * A_old[i] * W[i] |
| 302 | + # L: (l_n, l_m, l_o) |
| 303 | + # A_o: (l_o, p1', r_o) |
| 304 | + T = np.tensordot(L[i], A_old[i], axes=[[2], [0]]) # (l_n, l_m, p1', r_o) |
| 305 | + # W: (l_m, r_m, p1, p1') |
| 306 | + T = np.tensordot(T, W[i], axes=[[1, 2], [0, 3]]) # (l_n, r_o, r_m, p1) |
| 307 | + |
| 308 | + # 2. Contract with site i+1 parts |
| 309 | + # A_o[i+1]: (r_o, p2', r_o2) |
| 310 | + T = np.tensordot( |
| 311 | + T, A_old[i + 1], axes=[[1], [0]] |
| 312 | + ) # (l_n, r_m, p1, p2', r_o2) |
| 313 | + # W[i+1]: (r_m, r_m2, p2, p2') |
| 314 | + T = np.tensordot( |
| 315 | + T, W[i + 1], axes=[[1, 3], [0, 3]] |
| 316 | + ) # (l_n, p1, r_o2, r_m2, p2) |
| 317 | + |
| 318 | + # 3. Contract with Right block: R[i+2] |
| 319 | + # R: (r_n2, r_m2, r_o2) |
| 320 | + # T: (l_n, p1, r_o2, r_m2, p2) |
| 321 | + # axes: [2, 3] with [2, 1] |
| 322 | + E = np.tensordot(T, R[i + 2], axes=[[2, 3], [2, 1]]) # (l_n, p1, p2, r_n2) |
| 323 | + |
| 324 | + # E is the target 2-site tensor (l_n, p1, p2, r_n2) |
| 325 | + |
| 326 | + # 4. SVD and Truncate |
| 327 | + # Reshape to (l_n * p1, p2 * r_n2) |
| 328 | + shape = E.shape |
| 329 | + E_flat = E.reshape(shape[0] * shape[1], shape[2] * shape[3]) |
| 330 | + |
| 331 | + u, s, v = np.linalg.svd(E_flat, full_matrices=False) |
| 332 | + |
| 333 | + rank = min(len(s), chi) |
| 334 | + u = u[:, :rank] |
| 335 | + s = s[:rank] |
| 336 | + v = v[:rank, :] |
| 337 | + |
| 338 | + # 5. Update A_new[i] and A_new[i+1] |
| 339 | + A_new[i] = u.reshape(shape[0], shape[1], rank) # (l_n, p1, bond) |
| 340 | + |
| 341 | + # Absorb s into v for next site |
| 342 | + sv = np.dot(np.diag(s), v) |
| 343 | + A_new[i + 1] = sv.reshape(rank, shape[2], shape[3]) # (bond, p2, r_n2) |
| 344 | + |
| 345 | + # 6. Update L[i+1] using new A_new[i] |
| 346 | + # Same logic as before |
| 347 | + # L[i+1] = L[i] * A_new[i] * W[i] * A_old[i] |
| 348 | + T_L = np.tensordot(L[i], A_new[i], axes=[[0], [0]]) # (l_m, l_o, p1, bond) |
| 349 | + T_L = np.tensordot( |
| 350 | + T_L, W[i], axes=[[0, 2], [0, 2]] |
| 351 | + ) # (l_o, bond, r_m, p1') |
| 352 | + L[i + 1] = np.tensordot( |
| 353 | + T_L, A_old[i], axes=[[0, 3], [0, 1]] |
| 354 | + ) # (bond, r_m, r_o) |
| 355 | + # (r_n, r_m, r_o) -> Matches. |
| 356 | + |
| 357 | + # Right -> Left sweep (optional but good for stability) |
163 | 358 | for i in range(n - 2, -1, -1): |
164 | | - compressed_mps.reduce_dimension( |
165 | | - i, center_left=True, split={"max_singular_values": chi} |
166 | | - ) |
167 | | - |
168 | | - return compressed_mps |
| 359 | + # Form effective tensor E (same as above) |
| 360 | + T = np.tensordot(L[i], A_old[i], axes=[[2], [0]]) |
| 361 | + T = np.tensordot(T, W[i], axes=[[1, 2], [0, 3]]) |
| 362 | + T = np.tensordot(T, A_old[i + 1], axes=[[1], [0]]) |
| 363 | + T = np.tensordot(T, W[i + 1], axes=[[1, 3], [0, 3]]) |
| 364 | + E = np.tensordot(T, R[i + 2], axes=[[2, 3], [2, 1]]) # (l_n, p1, p2, r_n2) |
| 365 | + |
| 366 | + # SVD |
| 367 | + shape = E.shape |
| 368 | + E_flat = E.reshape(shape[0] * shape[1], shape[2] * shape[3]) |
| 369 | + u, s, v = np.linalg.svd(E_flat, full_matrices=False) |
| 370 | + |
| 371 | + rank = min(len(s), chi) |
| 372 | + u = u[:, :rank] |
| 373 | + s = s[:rank] |
| 374 | + v = v[:rank, :] |
| 375 | + |
| 376 | + # Update A_new[i+1] (Right-isometric) |
| 377 | + A_new[i + 1] = v.reshape(rank, shape[2], shape[3]) |
| 378 | + |
| 379 | + # Absorb u * s into A_new[i] |
| 380 | + us = np.dot(u, np.diag(s)) |
| 381 | + A_new[i] = us.reshape(shape[0], shape[1], rank) |
| 382 | + |
| 383 | + # Update R[i+1] using new A_new[i+1] |
| 384 | + # R[i+1] = R[i+2] * A_new[i+1] * W[i+1] * A_old[i+1] |
| 385 | + T_R = np.tensordot( |
| 386 | + A_new[i + 1], R[i + 2], axes=[[2], [0]] |
| 387 | + ) # (bond, p2, r_m2, r_o2) |
| 388 | + T_R = np.tensordot( |
| 389 | + T_R, W[i + 1], axes=[[2, 1], [1, 2]] |
| 390 | + ) # (bond, r_o2, l_m2, p2') |
| 391 | + R[i + 1] = np.tensordot( |
| 392 | + T_R, A_old[i + 1], axes=[[1, 3], [2, 1]] |
| 393 | + ) # (bond, l_m2, l_o2) |
| 394 | + |
| 395 | + # Return new MPSCircuit |
| 396 | + # Center is at 0 after backward sweep |
| 397 | + return tc.MPSCircuit(n, tensors=A_new, center_position=0) |
169 | 398 |
|
170 | 399 |
|
171 | 400 | def run_mps_simulation_dmrg(n_qubits, layers, bond_dim): |
172 | 401 | """ |
173 | | - Runs the simulation using MPSCircuit with layerwise DMRG-like compression. |
| 402 | + Runs simulation using DMRG-style update (2-site sweep for bond adaptability). |
174 | 403 | """ |
175 | 404 | mps = tc.MPSCircuit(n_qubits) |
| 405 | + mps.position(0) |
176 | 406 |
|
177 | | - # Manual truncation control |
178 | | - mps.set_split_rules({}) # Infinite bond dimension during application |
| 407 | + # Process layers in chunks |
| 408 | + chunk_size = 2 # Process 2 layers at a time (Fig 4 idea: several layers) |
| 409 | + for i in range(0, len(layers), chunk_size): |
| 410 | + chunk = layers[i : i + chunk_size] |
179 | 411 |
|
180 | | - for layer in layers: |
181 | | - # 1. Apply all gates in the layer |
182 | | - # This increases the bond dimension. |
183 | | - # This creates the "target state" for the variational compression. |
184 | | - for gate in layer: |
185 | | - index = gate["index"] |
186 | | - params = gate.get("parameters", {}) |
187 | | - g_obj = gate["gatef"](**params) |
188 | | - mps.apply(g_obj, *index) |
| 412 | + # Build MPO for this chunk |
| 413 | + mpo_tensors = build_mpo_from_layers(n_qubits, chunk) |
189 | 414 |
|
190 | | - # 2. Perform variational compression (DMRG sweep) |
191 | | - # We compress the state back to bond_dim. |
192 | | - # The SVD sweep (iterative fitting) is the standard DMRG-style compression |
193 | | - # for MPO-MPS multiplication or time evolution. |
194 | | - mps = variational_compress(mps, bond_dim, sweeps=2) |
| 415 | + # Run DMRG sweep (2-site update to adapt bond dim up to chi) |
| 416 | + mps = run_1site_dmrg(mps, mpo_tensors, bond_dim, sweeps=1) |
195 | 417 |
|
196 | 418 | return mps |
197 | 419 |
|
@@ -235,7 +457,7 @@ def main(): |
235 | 457 | infidelities = [] |
236 | 458 |
|
237 | 459 | # 2. MPS Simulation with varying bond dimension |
238 | | - logger.info("Running MPS Simulations (Layerwise DMRG)...") |
| 460 | + logger.info("Running MPS Simulations (DMRG Sweep)...") |
239 | 461 | for chi in BOND_DIMS: |
240 | 462 | start_time = time.time() |
241 | 463 | mps = run_mps_simulation_dmrg(circuit._nqubits, layers, chi) |
|
0 commit comments