Skip to content

Commit a2e3cad

Browse files
authored
Absorb ITensors.ITensorMPS and ITensorTDVP.jl (#31)
1 parent df0bab2 commit a2e3cad

File tree

150 files changed

+19414
-266
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

150 files changed

+19414
-266
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -25,3 +25,5 @@ Manifest.toml
2525

2626
# Vi/Vim backup files
2727
.*.swp
28+
29+
.DS_Store

Project.toml

Lines changed: 42 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,17 +1,53 @@
11
name = "ITensorMPS"
22
uuid = "0d1a4710-d33b-49a5-8f18-73bdf49b47e2"
33
authors = ["Matthew Fishman <[email protected]>", "Miles Stoudenmire <[email protected]>"]
4-
version = "0.2.6"
4+
version = "0.3.0"
55

66
[deps]
7-
ITensorTDVP = "25707e16-a4db-4a07-99d9-4d67b7af0342"
7+
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
8+
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
89
ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5"
9-
Reexport = "189a3867-3050-52da-a836-e630ba90ab69"
10+
IsApprox = "28f27b66-4bd8-47e7-9110-e2746eb8bed7"
11+
KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77"
12+
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
13+
NDTensors = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
14+
Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7"
15+
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
16+
SerializedElementArrays = "d3ce8812-9567-47e9-a7b5-65a6d70a3065"
17+
TupleTools = "9d95972d-f1c8-5527-a6e0-b4b365fa01f6"
18+
19+
[weakdeps]
20+
ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
21+
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
22+
Observers = "338f10d5-c7f1-4033-a7d1-f9dec39bcaa0"
23+
PackageCompiler = "9b87118b-4619-50d2-8e1e-99f35a4d4d9d"
24+
ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444"
25+
26+
[extensions]
27+
ITensorMPSChainRulesCoreExt = "ChainRulesCore"
28+
ITensorMPSHDF5Ext = "HDF5"
29+
ITensorMPSObserversExt = "Observers"
30+
ITensorMPSPackageCompilerExt = "PackageCompiler"
31+
ITensorMPSZygoteRulesExt = ["ChainRulesCore", "ZygoteRules"]
1032

1133
[compat]
12-
ITensorTDVP = "0.4.1"
13-
ITensors = "0.6.7"
14-
Reexport = "1"
34+
Adapt = "4.1.0"
35+
ChainRulesCore = "1.10"
36+
Compat = "4.16.0"
37+
HDF5 = "0.14, 0.15, 0.16, 0.17"
38+
ITensors = "0.7"
39+
IsApprox = "2.0.0"
40+
KrylovKit = "0.8.1"
41+
LinearAlgebra = "1.10"
42+
NDTensors = "0.3.46"
43+
Observers = "0.2"
44+
PackageCompiler = "1, 2"
45+
Printf = "1.10"
46+
Random = "1.10"
47+
SerializedElementArrays = "0.1.0"
48+
Test = "1.10"
49+
TupleTools = "1.5.0"
50+
ZygoteRules = "0.2.2"
1551
julia = "1.10"
1652

1753
[extras]

README.md

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,22 +1,20 @@
11
# ITensorMPS.jl
22

3-
[![Stable](https://img.shields.io/badge/docs-stable-blue.svg)](https://itensor.github.io/ITensorMPS.jl/stable/)
4-
[![Dev](https://img.shields.io/badge/docs-dev-blue.svg)](https://itensor.github.io/ITensorMPS.jl/dev/)
3+
[![Docs](https://img.shields.io/badge/docs-latest-blue.svg)](https://itensor.github.io/ITensors.jl/dev/)
54
[![Build Status](https://github.com/ITensor/ITensorMPS.jl/actions/workflows/CI.yml/badge.svg?branch=main)](https://github.com/ITensor/ITensorMPS.jl/actions/workflows/CI.yml?query=branch%3Amain)
6-
[![Coverage](https://codecov.io/gh/ITensor/ITensorMPS.jl/branch/main/graph/badge.svg)](https://codecov.io/gh/ITensor/ITensorMPS.jl)
75
[![Code Style: Blue](https://img.shields.io/badge/code%20style-blue-4495d1.svg)](https://github.com/invenia/BlueStyle)
86

9-
Finite MPS and MPO methods based on ITensor (ITensors.jl).
7+
Finite MPS and MPO methods based on the Julia version of [ITensor](https://www.itensor.org) ([ITensors.jl](https://github.com/ITensor/ITensors.jl)). See the [ITensors.jl documentation](https://itensor.github.io/ITensors.jl/dev/) for more details.
108

11-
This package currently re-exports the MPS and MPO functionality of the [ITensors.jl](https://github.com/ITensor/ITensors.jl), including functionality like DMRG, applying MPO to MPS, applying gates to MPS and MPO, etc. See the [ITensor documentation](https://itensor.github.io/ITensors.jl/dev) for guides and examples on using this package.
9+
## News
1210

13-
Additionally, it re-exports the functionality of the [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) package, which provides other DMRG-like MPS solvers such as TDVP and MPS linear equation solving.
11+
### ITensorMPS.jl v0.3 release notes
1412

15-
## Upgrade guide
13+
All MPS/MPO code from [ITensors.jl](https://github.com/ITensor/ITensors.jl) and [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) has been moved into this repository and this repository now relies on ITensors.jl v0.7 and above. All of the MPS/MPO functionality that was previously in ITensors.jl and ITensorTDVP.jl will be developed here from now on. For users of this repository, this change should not break any code, though please let us know if you have any issues.
1614

17-
The goal will be to move the MPS and MPO code from the ITensors.jl package, along with all of the code from the [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl) package, into this repository. If you are using any MPS/MPO functionality of ITensors.jl, such as the `MPS` and `MPO` types or constructors thereof (like `randomMPS`), `OpSum`, `siteinds`, `dmrg`, `apply`, etc. you should install the ITensorMPS.jl package with `import Pkg; Pkg.add("ITensorMPS")` and add `using ITensorMPS` to your code. Additionally, if you are currently using [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl), you should replace `using ITensorTDVP` with `using ITensorMPS` in your codes.
15+
#### Upgrade guide
1816

19-
## News
17+
If you are using any MPS/MPO functionality of ITensors.jl, such as the `MPS` and `MPO` types or constructors thereof (like `random_mps`), `OpSum`, `siteinds`, `dmrg`, `apply`, etc. you should install the ITensorMPS.jl package with `import Pkg; Pkg.add("ITensorMPS")` and add `using ITensorMPS` to your code. Additionally, if you are currently using [ITensorTDVP.jl](https://github.com/ITensor/ITensorTDVP.jl), you should replace `using ITensorTDVP` with `using ITensorMPS` in your code.
2018

2119
### ITensorMPS.jl v0.2.1 release notes
2220

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
using ITensors, ITensorMPS
2+
using Zygote
3+
4+
s = siteind("Qubit")
5+
6+
f(x) = op("Ry", s; θ=x)[1, 1]
7+
8+
x = 0.2
9+
@show f(x), cos(x / 2)
10+
@show f'(x), -sin(x / 2) / 2
11+
12+
# Simple gate optimization
13+
ψ0 = state(s, "0")
14+
ψp = state(s, "+")
15+
16+
function loss(x)
17+
U = op("Ry", s; θ=x)
18+
Uψ0 = replaceprime(U * ψ0, 1 => 0)
19+
return -(dag(ψp) * Uψ0)[]
20+
end
21+
22+
# Extremely simple gradient descent implementation,
23+
# where gradients are computing with automatic differentiation
24+
# using Zygote.
25+
function gradient_descent(f, x0; γ, nsteps, grad_tol)
26+
@show γ, nsteps
27+
x = x0
28+
f_x = f(x)
29+
∇f_x = f'(x)
30+
step = 0
31+
@show step, x, f_x, ∇f_x
32+
for step in 1:nsteps
33+
x -= γ * ∇f_x
34+
f_x = f(x)
35+
∇f_x = f'(x)
36+
@show step, x, f_x, ∇f_x
37+
if norm(∇f_x) grad_tol
38+
break
39+
end
40+
end
41+
return x, f_x, ∇f_x
42+
end
43+
44+
x0 = 0
45+
γ = 2.0 # Learning rate
46+
nsteps = 30 # Number of steps of gradient descent
47+
grad_tol = 1e-4 # Stop if gradient falls below this value
48+
x, loss_x, ∇loss_x = gradient_descent(loss, x0; γ=γ, nsteps=nsteps, grad_tol=grad_tol)
49+
50+
@show x0, loss(x0)
51+
@show x, loss(x)
52+
@show π / 2, loss/ 2)
Lines changed: 65 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,65 @@
1+
using ITensors, ITensorMPS
2+
using OptimKit
3+
using Random
4+
using Zygote
5+
6+
nsites = 20 # Number of sites
7+
nlayers = 3 # Layers of gates in the ansatz
8+
gradtol = 1e-4 # Tolerance for stopping gradient descent
9+
10+
# A layer of the circuit we want to optimize
11+
function layer(nsites, θ⃗)
12+
RY_layer = [("Ry", (n,), (θ=θ⃗[n],)) for n in 1:nsites]
13+
CX_layer = [("CX", (n, n + 1)) for n in 1:2:(nsites - 1)]
14+
return [RY_layer; CX_layer]
15+
end
16+
17+
# The variational circuit we want to optimize
18+
function variational_circuit(nsites, nlayers, θ⃗)
19+
range = 1:nsites
20+
circuit = layer(nsites, θ⃗[range])
21+
for n in 1:(nlayers - 1)
22+
circuit = [circuit; layer(nsites, θ⃗[range .+ n * nsites])]
23+
end
24+
return circuit
25+
end
26+
27+
Random.seed!(1234)
28+
29+
θ⃗ᵗᵃʳᵍᵉᵗ = 2π * rand(nsites * nlayers)
30+
𝒰ᵗᵃʳᵍᵉᵗ = variational_circuit(nsites, nlayers, θ⃗ᵗᵃʳᵍᵉᵗ)
31+
32+
s = siteinds("Qubit", nsites)
33+
Uᵗᵃʳᵍᵉᵗ = ops(𝒰ᵗᵃʳᵍᵉᵗ, s)
34+
35+
ψ0 = MPS(s, "0")
36+
37+
# Create the random target state
38+
ψᵗᵃʳᵍᵉᵗ = apply(Uᵗᵃʳᵍᵉᵗ, ψ0; cutoff=1e-8)
39+
40+
#
41+
# The loss function, a function of the gate parameters
42+
# and implicitly depending on the target state:
43+
#
44+
# loss(θ⃗) = -|⟨θ⃗ᵗᵃʳᵍᵉᵗ|U(θ⃗)|0⟩|² = -|⟨θ⃗ᵗᵃʳᵍᵉᵗ|θ⃗⟩|²
45+
#
46+
function loss(θ⃗)
47+
nsites = length(ψ0)
48+
s = siteinds(ψ0)
49+
𝒰θ⃗ = variational_circuit(nsites, nlayers, θ⃗)
50+
Uθ⃗ = ops(𝒰θ⃗, s)
51+
ψθ⃗ = apply(Uθ⃗, ψ0)
52+
return -abs(inner(ψᵗᵃʳᵍᵉᵗ, ψθ⃗))^2
53+
end
54+
55+
θ⃗₀ = randn!(copy(θ⃗ᵗᵃʳᵍᵉᵗ))
56+
57+
@show loss(θ⃗₀), loss(θ⃗ᵗᵃʳᵍᵉᵗ)
58+
59+
loss_∇loss(x) = (loss(x), convert(Vector, loss'(x)))
60+
algorithm = LBFGS(; gradtol=gradtol, verbosity=2)
61+
θ⃗ₒₚₜ, lossₒₚₜ, ∇lossₒₚₜ, numfg, normgradhistory = optimize(loss_∇loss, θ⃗₀, algorithm)
62+
63+
@show loss(θ⃗ₒₚₜ), loss(θ⃗ᵗᵃʳᵍᵉᵗ)
64+
65+
nothing
Lines changed: 81 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,81 @@
1+
using ITensors, ITensorMPS
2+
using OptimKit
3+
using Random
4+
using Zygote
5+
6+
nsites = 4 # Number of sites
7+
nlayers = 2 # Layers of gates in the ansatz
8+
gradtol = 1e-4 # Tolerance for stopping gradient descent
9+
10+
# The Hamiltonian we are minimizing
11+
function ising_hamiltonian(nsites; h)
12+
= OpSum()
13+
for j in 1:(nsites - 1)
14+
-= 1, "Z", j, "Z", j + 1
15+
end
16+
for j in 1:nsites
17+
+= h, "X", j
18+
end
19+
return
20+
end
21+
22+
# A layer of the circuit we want to optimize
23+
function layer(nsites, θ⃗)
24+
RY_layer = [("Ry", (n,), (θ=θ⃗[n],)) for n in 1:nsites]
25+
CX_layer = [("CX", (n, n + 1)) for n in 1:2:(nsites - 1)]
26+
return [RY_layer; CX_layer]
27+
end
28+
29+
# The variational circuit we want to optimize
30+
function variational_circuit(nsites, nlayers, θ⃗)
31+
range = 1:nsites
32+
circuit = layer(nsites, θ⃗[range])
33+
for n in 1:(nlayers - 1)
34+
circuit = [circuit; layer(nsites, θ⃗[range .+ n * nsites])]
35+
end
36+
return circuit
37+
end
38+
39+
s = siteinds("Qubit", nsites)
40+
41+
h = 1.3
42+
= ising_hamiltonian(nsites; h=h)
43+
H = MPO(ℋ, s)
44+
ψ0 = MPS(s, "0")
45+
46+
#
47+
# The loss function, a function of the gate parameters
48+
# and implicitly depending on the Hamiltonian and state:
49+
#
50+
# loss(θ⃗) = ⟨0|U(θ⃗)† H U(θ⃗)|0⟩ = ⟨θ⃗|H|θ⃗⟩
51+
#
52+
function loss(θ⃗)
53+
nsites = length(ψ0)
54+
s = siteinds(ψ0)
55+
𝒰θ⃗ = variational_circuit(nsites, nlayers, θ⃗)
56+
Uθ⃗ = ops(𝒰θ⃗, s)
57+
ψθ⃗ = apply(Uθ⃗, ψ0; cutoff=1e-8)
58+
return inner(ψθ⃗, H, ψθ⃗; cutoff=1e-8)
59+
end
60+
61+
Random.seed!(1234)
62+
θ⃗₀ = 2π * rand(nsites * nlayers)
63+
64+
@show loss(θ⃗₀)
65+
66+
println("\nOptimize circuit with gradient optimization")
67+
68+
loss_∇loss(x) = (loss(x), convert(Vector, loss'(x)))
69+
algorithm = LBFGS(; gradtol=1e-3, verbosity=2)
70+
θ⃗ₒₚₜ, lossₒₚₜ, ∇lossₒₚₜ, numfg, normgradhistory = optimize(loss_∇loss, θ⃗₀, algorithm)
71+
72+
@show loss(θ⃗ₒₚₜ)
73+
74+
println("\nRun DMRG as a comparison")
75+
76+
e_dmrg, ψ_dmrg = dmrg(H, ψ0; nsweeps=5, maxdim=10)
77+
78+
println("\nCompare variational circuit energy to DMRG energy")
79+
@show loss(θ⃗ₒₚₜ), e_dmrg
80+
81+
nothing

examples/autodiff/mps_autodiff.jl

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
using ITensors, ITensorMPS
2+
using OptimKit
3+
using Zygote
4+
5+
function ising(n; J, h)
6+
os = OpSum()
7+
for j in 1:(n - 1)
8+
os -= J, "Z", j, "Z", j + 1
9+
end
10+
for j in 1:n
11+
os -= h, "X", j
12+
end
13+
return os
14+
end
15+
16+
function loss(H, ψ)
17+
n = length(ψ)
18+
ψHψ = ITensor(1.0)
19+
ψψ = ITensor(1.0)
20+
for j in 1:n
21+
ψHψ = ψHψ * dag(ψ[j]') * H[j] * ψ[j]
22+
ψψ = ψψ * replaceinds(dag(ψ[j]'), s[j]' => s[j]) * ψ[j]
23+
end
24+
return ψHψ[] / ψψ[]
25+
end
26+
27+
n = 10
28+
s = siteinds("S=1/2", n)
29+
J = 1.0
30+
h = 0.5
31+
32+
# Loss function only works with `Vector{ITensor}`,
33+
# extract with `ITensors.data`.
34+
ψ0 = ITensors.data(random_mps(s; linkdims=10))
35+
H = ITensors.data(MPO(ising(n; J, h), s))
36+
37+
loss(ψ) = loss(H, ψ)
38+
39+
optimizer = LBFGS(; maxiter=25, verbosity=2)
40+
function loss_and_grad(x)
41+
y, (∇,) = withgradient(loss, x)
42+
return y, ∇
43+
end
44+
ψ, fs, gs, niter, normgradhistory = optimize(loss_and_grad, ψ0, optimizer)
45+
Edmrg, ψdmrg = dmrg(MPO(H), MPS(ψ0); nsweeps=10, cutoff=1e-8)
46+
47+
@show loss(ψ0), norm(loss'(ψ0))
48+
@show loss(ψ), norm(loss'(ψ))
49+
@show loss(ITensors.data(ψdmrg)), norm(loss'(ITensors.data(ψdmrg)))

0 commit comments

Comments
 (0)