|
| 1 | +using ITensors, ITensorMPS |
| 2 | +using OptimKit |
| 3 | +using Random |
| 4 | +using Zygote |
| 5 | + |
| 6 | +nsites = 4 # Number of sites |
| 7 | +nlayers = 2 # Layers of gates in the ansatz |
| 8 | +gradtol = 1e-4 # Tolerance for stopping gradient descent |
| 9 | + |
| 10 | +# The Hamiltonian we are minimizing |
| 11 | +function ising_hamiltonian(nsites; h) |
| 12 | + ℋ = OpSum() |
| 13 | + for j in 1:(nsites - 1) |
| 14 | + ℋ -= 1, "Z", j, "Z", j + 1 |
| 15 | + end |
| 16 | + for j in 1:nsites |
| 17 | + ℋ += h, "X", j |
| 18 | + end |
| 19 | + return ℋ |
| 20 | +end |
| 21 | + |
| 22 | +# A layer of the circuit we want to optimize |
| 23 | +function layer(nsites, θ⃗) |
| 24 | + RY_layer = [("Ry", (n,), (θ=θ⃗[n],)) for n in 1:nsites] |
| 25 | + CX_layer = [("CX", (n, n + 1)) for n in 1:2:(nsites - 1)] |
| 26 | + return [RY_layer; CX_layer] |
| 27 | +end |
| 28 | + |
| 29 | +# The variational circuit we want to optimize |
| 30 | +function variational_circuit(nsites, nlayers, θ⃗) |
| 31 | + range = 1:nsites |
| 32 | + circuit = layer(nsites, θ⃗[range]) |
| 33 | + for n in 1:(nlayers - 1) |
| 34 | + circuit = [circuit; layer(nsites, θ⃗[range .+ n * nsites])] |
| 35 | + end |
| 36 | + return circuit |
| 37 | +end |
| 38 | + |
| 39 | +s = siteinds("Qubit", nsites) |
| 40 | + |
| 41 | +h = 1.3 |
| 42 | +ℋ = ising_hamiltonian(nsites; h=h) |
| 43 | +H = MPO(ℋ, s) |
| 44 | +ψ0 = MPS(s, "0") |
| 45 | + |
| 46 | +# |
| 47 | +# The loss function, a function of the gate parameters |
| 48 | +# and implicitly depending on the Hamiltonian and state: |
| 49 | +# |
| 50 | +# loss(θ⃗) = ⟨0|U(θ⃗)† H U(θ⃗)|0⟩ = ⟨θ⃗|H|θ⃗⟩ |
| 51 | +# |
| 52 | +function loss(θ⃗) |
| 53 | + nsites = length(ψ0) |
| 54 | + s = siteinds(ψ0) |
| 55 | + 𝒰θ⃗ = variational_circuit(nsites, nlayers, θ⃗) |
| 56 | + Uθ⃗ = ops(𝒰θ⃗, s) |
| 57 | + ψθ⃗ = apply(Uθ⃗, ψ0; cutoff=1e-8) |
| 58 | + return inner(ψθ⃗, H, ψθ⃗; cutoff=1e-8) |
| 59 | +end |
| 60 | + |
| 61 | +Random.seed!(1234) |
| 62 | +θ⃗₀ = 2π * rand(nsites * nlayers) |
| 63 | + |
| 64 | +@show loss(θ⃗₀) |
| 65 | + |
| 66 | +println("\nOptimize circuit with gradient optimization") |
| 67 | + |
| 68 | +loss_∇loss(x) = (loss(x), convert(Vector, loss'(x))) |
| 69 | +algorithm = LBFGS(; gradtol=1e-3, verbosity=2) |
| 70 | +θ⃗ₒₚₜ, lossₒₚₜ, ∇lossₒₚₜ, numfg, normgradhistory = optimize(loss_∇loss, θ⃗₀, algorithm) |
| 71 | + |
| 72 | +@show loss(θ⃗ₒₚₜ) |
| 73 | + |
| 74 | +println("\nRun DMRG as a comparison") |
| 75 | + |
| 76 | +e_dmrg, ψ_dmrg = dmrg(H, ψ0; nsweeps=5, maxdim=10) |
| 77 | + |
| 78 | +println("\nCompare variational circuit energy to DMRG energy") |
| 79 | +@show loss(θ⃗ₒₚₜ), e_dmrg |
| 80 | + |
| 81 | +nothing |
0 commit comments