44from tensorcircuit .templates .lattice import SquareLattice , get_compatible_layers
55from tensorcircuit .templates .hamiltonians import heisenberg_hamiltonian
66
7- # ===================
8- # Backend and Hardware Configuration
9- # ===================
107# Use JAX for high-performance, especially on GPU.
11- # For CPU-only environments, TensorFlow can also be efficient.
128K = tc .set_backend ("jax" )
139tc .set_dtype ("complex64" )
14- # Use a more powerful contractor for better performance on larger graphs.
1510# On Windows, cotengra's multiprocessing can cause issues.
16- # We disable it here to ensure stability.
1711tc .set_contractor ("cotengra-8192-8192" , parallel = False )
1812
1913
2014def run_vqe ():
21- # ===================
22- # Lattice and Hamiltonian Definition
23- # ===================
24- # Define the 2D lattice dimensions and the number of VQE layers.
2515 n , m , nlayers = 4 , 4 , 6
26-
27- # 1. Create a SquareLattice instance.
28- # This object holds all geometric information, such as site coordinates and neighbors.
2916 lattice = SquareLattice (size = (n , m ), pbc = True , precompute_neighbors = 1 )
30-
31- # 2. Generate the Heisenberg Hamiltonian using the new interface.
32- # This function directly takes the lattice object and coupling constants.
33- # It's cleaner than the old method that required a separate graph object.
3417 h = heisenberg_hamiltonian (lattice , j_coupling = [1.0 , 1.0 , 0.8 ]) # Jx, Jy, Jz
35-
36- # 3. Get nearest-neighbor bonds and partition them into compatible layers.
37- # This is the core of the gate scheduling logic. `get_compatible_layers`
38- # ensures that gates within each layer can be applied in parallel without overlap.
3918 nn_bonds = lattice .get_neighbor_pairs (k = 1 , unique = True )
4019 gate_layers = get_compatible_layers (nn_bonds )
4120
42- # ===================
43- # VQE Ansatz and Forward Pass
44- # ===================
45-
46- def singlet_init (
47- circuit ,
48- ): # A good initial state for Heisenberg ground state search
21+ def singlet_init (circuit ):
22+ # A good initial state for Heisenberg ground state search
4923 nq = circuit ._nqubits
5024 for i in range (0 , nq - 1 , 2 ):
5125 j = (i + 1 ) % nq
@@ -58,65 +32,37 @@ def singlet_init(
5832 def vqe_forward (param ):
5933 """
6034 Defines the VQE ansatz and computes the energy expectation.
61-
62- The ansatz structure is:
63- - Initial state preparation (singlet pairs).
64- - nlayers of parameterized blocks.
65- - Each block consists of RZZ, RXX, and RYY entangling layers.
66- - Gates within each entangling layer are applied according to the pre-computed
67- `gate_layers` for maximum parallelism. All gates of the same type in a
68- VQE layer share the same parameter.
35+ The ansatz consists of nlayers of RZZ, RXX, and RYY entangling layers.
6936 """
7037 c = tc .Circuit (n * m )
7138 c = singlet_init (c )
7239
7340 for i in range (nlayers ):
74- # RZZ layer
7541 for layer in gate_layers :
7642 for j , k in layer :
7743 c .rzz (int (j ), int (k ), theta = param [i , 0 ])
78-
79- # RXX layer
8044 for layer in gate_layers :
8145 for j , k in layer :
8246 c .rxx (int (j ), int (k ), theta = param [i , 1 ])
83-
84- # RYY layer
8547 for layer in gate_layers :
8648 for j , k in layer :
8749 c .ryy (int (j ), int (k ), theta = param [i , 2 ])
8850
89- # The Hamiltonian is a sparse matrix, so we use the corresponding expectation method.
9051 return tc .templates .measurements .operator_expectation (c , h )
9152
92- # ===================
93- # Training and Optimization (JAX-based for performance)
94- # ===================
95- # Value_and_grad for single (non-batched) training instance.
9653 vgf = K .jit (K .value_and_grad (vqe_forward ))
97-
98- # Parameters for a single training instance.
99- # Shape: (nlayers, 3) -> 3 for RZZ, RXX, RYY angles per layer.
10054 param = tc .backend .implicit_randn (stddev = 0.02 , shape = [nlayers , 3 ])
101-
102- # Use the Adam optimizer from Optax.
10355 optimizer = optax .adam (learning_rate = 3e-3 )
10456 opt_state = optimizer .init (param )
10557
10658 @K .jit
10759 def train_step (param , opt_state ):
108- """
109- A single training step, JIT-compiled for maximum speed.
110- This follows the standard Optax optimization paradigm.
111- """
60+ """A single training step, JIT-compiled for maximum speed."""
11261 loss_val , grads = vgf (param )
11362 updates , opt_state = optimizer .update (grads , opt_state , param )
11463 param = optax .apply_updates (param , updates )
11564 return param , opt_state , loss_val
11665
117- # ===================
118- # Main Training Loop
119- # ===================
12066 print ("Starting VQE optimization..." )
12167 for i in range (1000 ):
12268 time0 = time .time ()
@@ -128,7 +74,6 @@ def train_step(param, opt_state):
12874 )
12975
13076 print ("Optimization finished." )
131- # Example result on A800 GPU: ~-25.3
13277 print (f"Final Loss: { loss :.6f} " )
13378
13479
0 commit comments