@@ -14,11 +14,11 @@ using Unitful
14
14
include (" utils.jl" )
15
15
include (" Circuit.jl" )
16
16
17
- # numbe of qubits
17
+ # number of qubits
18
18
N = 20
19
19
20
20
# number of layers
21
- L = 4
21
+ L = 6
22
22
23
23
# generate parametric circuit
24
24
ansatz = efficient_su2 (N, L)
@@ -69,28 +69,15 @@ function expectation(params, obs, coef)
69
69
U_dagger = adjoint (U)
70
70
bra = adjoint (ket)
71
71
72
- # rename index labels to avoid conflicts
73
- resetinds! (ket)
74
- resetinds! (U)
75
- resetinds! (obs)
76
- resetinds! (U_dagger)
77
- resetinds! (bra)
78
-
79
- # align the indices
80
- @align! outputs (ket) => inputs (U)
81
- @align! outputs (U) => inputs (obs)
82
- @align! outputs (obs) => inputs (U_dagger)
83
- @align! outputs (U_dagger) => inputs (bra)
84
-
85
- # construct the tensor network for the expectation value
86
- tn = GenericTensorNetwork ()
87
- append! (tn, all_tensors (ket))
88
- append! (tn, all_tensors (U))
89
- append! (tn, all_tensors (obs))
90
- append! (tn, all_tensors (U_dagger))
91
- append! (tn, all_tensors (bra))
92
-
93
- res = contract (tn; optimizer= LineGraph ())
72
+ tn = generic_stack (ket, U, obs, U_dagger, bra)
73
+
74
+ # print path flops and max rank to consistenly check that the same contraction path is used
75
+ # (exponentially big changes can be seen if not)
76
+ path = einexpr (tn; optimizer= Greedy ())
77
+ @info " Contraction path" max_rank = maximum (ndims, Branches (path)) total_flops = mapreduce (
78
+ EinExprs. flops, + , Branches (path)
79
+ )
80
+ res = contract (tn; path)
94
81
return real (coef * res[]) # ⟨ψ|U† O U|ψ⟩
95
82
end
96
83
@@ -123,15 +110,15 @@ results = Vector{Tuple{String,String,T,T,Float64}}()
123
110
f_xla = @compile compile_options = Reactant. DefaultXLACompileOptions (; sync= true ) expectation (
124
111
params_re, observable_re, coef_re
125
112
)
126
- b = @benchmark f_xla ($ params_re, $ observable_re, $ coef_re) setup = (GC. gc (true ))
113
+ b = @benchmark $ f_xla ($ params_re, $ observable_re, $ coef_re) setup = (GC. gc (true ))
127
114
baseline = median (b). time
128
115
push! (
129
116
results, (" Primal" , " Only XLA" , median (b). time * 1.0 u " ns" , std (b). time * 1.0 u " ns" , 1.0 )
130
117
)
131
118
132
119
# # default
133
120
f_default = @compile sync = true expectation (params_re, observable_re, coef_re)
134
- b = @benchmark f_default ($ params_re, $ observable_re, $ coef_re) setup = (GC. gc (true ))
121
+ b = @benchmark $ f_default ($ params_re, $ observable_re, $ coef_re) setup = (GC. gc (true ))
135
122
push! (
136
123
results,
137
124
(
0 commit comments