From 79193c41f0ff20dbf69f593c3ee047afe62932be Mon Sep 17 00:00:00 2001 From: Joey Date: Tue, 25 Mar 2025 17:17:15 -0400 Subject: [PATCH 01/18] Remove optimal sequence finding functions in favour of TensorOperations --- Project.toml | 3 + .../ITensorsTensorOperationsExt.jl | 14 + src/ITensors.jl | 4 +- src/imports.jl | 3 - .../src/ContractionSequenceOptimization.jl | 42 -- .../src/breadth_first_constructive.jl | 235 ----------- .../src/depth_first_constructive.jl | 85 ---- .../src/three_tensors.jl | 69 ---- .../src/utils.jl | 371 ------------------ .../contraction_cost.jl | 0 src/tensor_operations/tensor_algebra.jl | 44 +-- .../ITensorsTensorOperationsExt/Project.toml | 3 + .../ITensorsTensorOperationsExt/runtests.jl} | 26 +- .../runtests.jl | 16 - 14 files changed, 44 insertions(+), 871 deletions(-) create mode 100644 ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/three_tensors.jl delete mode 100644 src/lib/ContractionSequenceOptimization/src/utils.jl rename src/{lib/ContractionSequenceOptimization/src => tensor_operations}/contraction_cost.jl (100%) create mode 100644 test/ext/ITensorsTensorOperationsExt/Project.toml rename test/{lib/ContractionSequenceOptimization/test_itensor_contract.jl => ext/ITensorsTensorOperationsExt/runtests.jl} (91%) delete mode 100644 test/lib/ContractionSequenceOptimization/runtests.jl diff --git a/Project.toml b/Project.toml index 1c6887fe98..9717da9e92 100644 --- a/Project.toml +++ b/Project.toml @@ -34,6 +34,7 @@ ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" [extensions] ITensorsHDF5Ext = "HDF5" +ITensorsTensorOperationsExt = "TensorOperations" ITensorsVectorInterfaceExt = "VectorInterface" ITensorsZygoteRulesExt = "ZygoteRules" @@ -58,6 +59,7 @@ SimpleTraits = "0.9.4" SparseArrays = "<0.0.1, 1.10" StaticArrays = "0.12, 1.0" Strided = "1.1, 2" +TensorOperations = "5.1.4" TimerOutputs = "0.5.5" TupleTools = "1.2" VectorInterface = "0.4, 0.5" @@ -68,5 +70,6 @@ julia = "1.10" [extras] ChainRulesCore = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" +TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl new file mode 100644 index 0000000000..73511c02e8 --- /dev/null +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -0,0 +1,14 @@ +module ITensorsTensorOperationsExt + +using ITensors: ITensors, ITensor, dim, inds +using NDTensors.AlgorithmSelection: @Algorithm_str +using TensorOperations: TensorOperations, optimaltree + +function ITensors.optimal_contraction_sequence(As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}) + network = collect.(inds.(As)) + inds_to_dims = Dict(i => Float64(dim(i)) for i in unique(reduce(vcat, network))) + seq, _ = optimaltree(network, inds_to_dims) + return seq +end + +end diff --git a/src/ITensors.jl b/src/ITensors.jl index 58763abda2..3c3fe4b37b 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -51,9 +51,6 @@ Documentation: https://itensor.github.io/ITensors.jl/stable/ module ITensors include("usings.jl") include("utils.jl") -include("lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl") -# TODO: `using .ContractionSequenceOptimization: ContractionSequenceOptimization, ...`. -using .ContractionSequenceOptimization include("lib/LazyApply/src/LazyApply.jl") # TODO: `using .LazyApply: LazyApply, ...`. using .LazyApply @@ -105,6 +102,7 @@ include("indexset.jl") include("itensor.jl") include("qn/flux.jl") include("oneitensor.jl") +include("tensor_operations/contraction_cost.jl") include("tensor_operations/tensor_algebra.jl") include("tensor_operations/matrix_algebra.jl") include("tensor_operations/permutations.jl") diff --git a/src/imports.jl b/src/imports.jl index 727ab54f83..7cc8130b4c 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -81,9 +81,6 @@ import Base.Broadcast: broadcastable, instantiate -import ITensors.ContractionSequenceOptimization: - contraction_cost, optimal_contraction_sequence - import Adapt: adapt_structure, adapt_storage import LinearAlgebra: diff --git a/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl b/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl deleted file mode 100644 index e580b90903..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/ContractionSequenceOptimization.jl +++ /dev/null @@ -1,42 +0,0 @@ -module ContractionSequenceOptimization - -export optimal_contraction_sequence, contraction_cost - -include("utils.jl") -include("three_tensors.jl") -include("breadth_first_constructive.jl") -include("contraction_cost.jl") - -# -# The integer type of the dimensions and costs. -# Needs to be large to avoid overflow, but larger -# integer types are a little bit slower. -# -# For very large dimensions, one could try: -# -# https://github.com/rfourquet/BitIntegers.jl -# -# however in that limit it would be best to use -# symbolic dimensions. -# -const DimT = UInt128 - -""" - optimal_contraction_sequence(T) - -Returns a contraction sequence for contracting the tensors `T`. The sequence is -generally optimal (currently, outer product contractions are skipped, but some -optimal sequences require outer product contractions). -""" -function optimal_contraction_sequence(T) - if length(T) == 1 - return Any[1] - elseif length(T) == 2 - return Any[1, 2] - elseif length(T) == 3 - return optimal_contraction_sequence(T[1], T[2], T[3]) - end - return breadth_first_constructive(T) -end - -end # module ContractionSequenceOptimization diff --git a/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl b/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl deleted file mode 100644 index cf5b6919d6..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/breadth_first_constructive.jl +++ /dev/null @@ -1,235 +0,0 @@ - -# -# Breadth-first constructive approach -# - -function breadth_first_constructive(indsT::Vector) - ntensors = length(indsT) - if ntensors ≤ 16 - return breadth_first_constructive(UInt16, DimT, indsT) - elseif ntensors ≤ 32 - return breadth_first_constructive(UInt32, DimT, indsT) - elseif ntensors ≤ 64 - return breadth_first_constructive(UInt64, DimT, indsT) - elseif ntensors ≤ 128 - return breadth_first_constructive(UInt128, DimT, indsT) - else - return breadth_first_constructive(BitSet, DimT, indsT) - end -end - -breadth_first_constructive(T::Tuple) = breadth_first_constructive(collect(T)) - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT,TensorSetT,DimT} - labels, alldims = label_dims(DimT, T) - nlabels = length(alldims) - if nlabels ≤ 16 - return breadth_first_constructive(TensorSetT, UInt16, labels, alldims) - elseif nlabels ≤ 32 - return breadth_first_constructive(TensorSetT, UInt32, labels, alldims) - elseif nlabels ≤ 64 - return breadth_first_constructive(TensorSetT, UInt64, labels, alldims) - elseif nlabels ≤ 128 - return breadth_first_constructive(TensorSetT, UInt128, labels, alldims) - else - return breadth_first_constructive(TensorSetT, BitSet, labels, alldims) - end -end - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{LabelSetT}, labels::Vector, alldims::Vector -) where {TensorSetT,LabelSetT} - return breadth_first_constructive( - TensorSetT, map(label -> bitset(LabelSetT, label), labels), alldims - ) -end - -# TODO: delete? -#function breadth_first_constructive(::Type{TensorSetT}, ::Type{LabelSetT}, ::Type{DimT}, -# T::Vector{<: ITensor}) where {TensorSetT, LabelSetT, DimT} -# indsT = [inds(Tₙ) for Tₙ in T] -# return breadth_first_constructive(TensorSetT, LabelSetT, DimT, indsT) -#end - -function breadth_first_constructive( - ::Type{TensorSetT}, ::Type{LabelSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT,TensorSetT,LabelSetT,DimT} - labels, alldims = label_dims(DimT, T) - return breadth_first_constructive( - TensorSetT, map(label -> bitset(LabelSetT, label), labels), alldims - ) -end - -# A type storing information about subnetworks -const SubNetwork{LabelSetT,DimT} = NamedTuple{ - (:inds, :cost, :sequence),Tuple{LabelSetT,DimT,Vector{Any}} -} - -function breadth_first_constructive( - ::Type{TensorSetT}, T::Vector{LabelSetT}, alldims::Vector{DimT} -) where {TensorSetT,LabelSetT,DimT} - components = connectedcomponents(T, alldims) - N = length(components) - if N == 1 - return _breadth_first_constructive(TensorSetT, collect(1:length(T)), T, alldims) - end - sequences = Vector{Any}(undef, N) - for n in 1:N - componentsₙ = components[n] - if length(componentsₙ) == 1 - sequences[n] = only(componentsₙ) - continue - elseif length(componentsₙ) == 2 - sequences[n] = componentsₙ - continue - end - sequences[n] = _breadth_first_constructive( - TensorSetT, componentsₙ, T[componentsₙ], alldims - ) - end - return sequences -end - -# Apply breadth_first_constructive to a single disconnected subnetwork -# Based on: https://arxiv.org/abs/1304.6112 and https://github.com/Jutho/TensorOperations.jl/blob/v3.1.0/src/indexnotation/optimaltree.jl -function _breadth_first_constructive( - ::Type{TensorSetT}, Tlabels::Vector, T::Vector{LabelSetT}, alldims::Vector{DimT} -) where {TensorSetT,LabelSetT,DimT} - n = length(T) - - # `cache[c]` is the set of all objects made up by - # contracting `c` unique tensors from the original tensors `1:n`. - cache = Vector{Dict{TensorSetT,SubNetwork{LabelSetT,DimT}}}(undef, n) - for c in 1:n - # Initialized to empty - cache[c] = eltype(cache)() - end - # Fill the first cache with trivial data - for i in 1:n - cache[1][bitset(TensorSetT, [Tlabels[i]])] = (inds=T[i], cost=0, sequence=Any[]) - end - - # TODO: pick a reasonable maxcost, the product of all dimensions - # of tensors in the network. Could also be `typemax(DimT)`? - maxcost = try - # This may overflow, so we catch the error and return typemax(DimT) - Base.Checked.checked_mul( - reduce(Base.Checked.checked_mul, alldims; init=one(DimT)), maximum(alldims) - ) - catch - typemax(DimT) - end - - # TODO: pick a reasonable initialcost - # Maybe use the cost of the trivial contraction [4, [3, [2, 1]]]? - tensordims = Vector{DimT}(undef, n) - for k in 1:n - tensordims[k] = dim(T[k], alldims) - end - _initialcost, overflow = Base.Checked.mul_with_overflow( - maximum(tensordims), minimum(tensordims) - ) - _initialcost = overflow ? typemax(DimT) : _initialcost - initialcost = min(maxcost, _initialcost) - - # Factor to increase the cost cap by each iteration - costfac = maximum(alldims) - - currentcost = initialcost - previouscost = zero(initialcost) - - while isempty(cache[n]) - nextcost = maxcost - - # c is the total number of tensors being contracted - # in the current sequence - for c in 2:n - # For each pair of sets Sᵈ, Sᶜ⁻ᵈ, 1 ≤ d ≤ ⌊c/2⌋ - for d in 1:(c ÷ 2) - for a in keys(cache[d]), b in keys(cache[c - d]) - if d == c - d && _isless(b, a) - # When d == c-d (the subset sizes are equal), check that - # b > a so that that case (a,b) and (b,a) are not repeated - continue - end - - if !_isemptyset(_intersect(a, b)) - # Check that each element of S¹ appears - # at most once in (TᵃTᵇ). - continue - end - - # Use previously computed cost of contracting network `ab` and compare against the previouscost - ab = _union(a, b) - cache_c = @inbounds cache[c] - cache_ab = get(cache_c, ab, nothing) - currentcost_ab = isnothing(cache_ab) ? currentcost : cache_ab.cost - if currentcost_ab ≤ previouscost - continue - end - - # Determine the cost μ of contracting Tᵃ, Tᵇ - # These dictionary calls and `contraction_cost` take - # up most of the time. - cache_a = cache[d][a] - cache_b = cache[c - d][b] - - if dim(_intersect(cache_a.inds, cache_b.inds), alldims) < 2 - # XXX: For now, ignore outer products contractions. - # In the future, handle this in a more sophisticated way. - continue - end - - cost, inds_ab = contraction_cost(cache_a.inds, cache_b.inds, alldims) - if iszero(cost) - # If the cost is zero, that means the multiplication overflowed - continue - end - - if d > 1 - # Add to cost of contracting the subnetwork `a` - cost, overflow = Base.Checked.add_with_overflow(cost, cache_a.cost) - overflow && continue - end - if c - d > 1 - # Add to cost of contracting the subnetwork `b` - cost, overflow = Base.Checked.add_with_overflow(cost, cache_b.cost) - overflow && continue - end - - if cost ≤ currentcost_ab - cost_ab = cost - if d == 1 - sequence_a = _only(a) - else - sequence_a = cache_a.sequence - end - if c - d == 1 - sequence_b = _only(b) - else - sequence_b = cache_b.sequence - end - sequence_ab = Any[sequence_a, sequence_b] - - # XXX: this call is pretty slow (maybe takes 1/3 of total time in large n limit) - cache_c[ab] = (inds=inds_ab, cost=cost_ab, sequence=sequence_ab) - end # if cost ≤ currentcost_ab - end # for a in S[d], b in S[c-d] - end # for d in 1:c÷2 - end # for c in 2:n - previouscost = currentcost - currentcost = min(maxcost, nextcost * costfac) - - # Reset all tensors to old - for i in 1:n - for a in eachindex(cache[i]) - cache_a = cache[i][a] - cache[i][a] = (inds=cache_a.inds, cost=cache_a.cost, sequence=cache_a.sequence) - end - end - end # while isempty(S[n]) - Sⁿ = bitset(TensorSetT, Tlabels) - return cache[n][Sⁿ].sequence -end diff --git a/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl b/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl deleted file mode 100644 index b814268b08..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/depth_first_constructive.jl +++ /dev/null @@ -1,85 +0,0 @@ - -# -# `depth_first_constructive` is a very simple recursive implementation -# but it is more difficult to cap the costs so scales very badly -# - -function depth_first_constructive(T::Vector{<:ITensor}) where {LabelSetT} - indsT = [inds(Tₙ) for Tₙ in T] - return depth_first_constructive(DimT, indsT) -end - -function depth_first_constructive( - ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT<:IndexSet,DimT} - labels, dims = label_dims(DimT, T) - nlabels = length(dims) - if nlabels ≤ 16 - return depth_first_constructive(UInt16, labels, dims) - elseif nlabels ≤ 32 - return depth_first_constructive(UInt32, labels, dims) - elseif nlabels ≤ 64 - return depth_first_constructive(UInt64, labels, dims) - elseif nlabels ≤ 128 - return depth_first_constructive(UInt128, labels, dims) - else - return depth_first_constructive(BitSet, labels, dims) - end -end - -function depth_first_constructive( - ::Type{LabelSetT}, labels::Vector, dims::Vector -) where {LabelSetT} - return depth_first_constructive(map(label -> bitset(LabelSetT, label), labels), dims) -end - -function depth_first_constructive( - ::Type{LabelSetT}, ::Type{DimT}, T::Vector{<:ITensor} -) where {LabelSetT,DimT} - indsT = [inds(Tₙ) for Tₙ in T] - return depth_first_constructive(LabelSetT, DimT, indsT) -end - -function depth_first_constructive( - ::Type{LabelSetT}, ::Type{DimT}, T::Vector{IndexSetT} -) where {IndexSetT<:IndexSet,LabelSetT,DimT} - labels, dims = label_dims(DimT, T) - return depth_first_constructive(map(label -> bitset(LabelSetT, label), labels), dims) -end - -function depth_first_constructive(T::Vector, ind_dims::Vector) - optimal_cost = Ref(typemax(eltype(ind_dims))) - optimal_sequence = Vector{Pair{Int,Int}}(undef, length(T) - 1) - _depth_first_constructive!( - optimal_sequence, optimal_cost, Pair{Int,Int}[], T, ind_dims, collect(1:length(T)), 0 - ) - return pair_sequence_to_tree(optimal_sequence, length(T)) -end - -function _depth_first_constructive!( - optimal_sequence, optimal_cost, sequence, T, ind_dims, remaining, cost -) - if length(remaining) == 1 - # Only should get here if the contraction was the best - # Otherwise it would have hit the `continue` below - @assert cost ≤ optimal_cost[] - optimal_cost[] = cost - optimal_sequence .= sequence - end - for aᵢ in 1:(length(remaining) - 1), bᵢ in (aᵢ + 1):length(remaining) - a = remaining[aᵢ] - b = remaining[bᵢ] - current_cost, Tᵈ = contraction_cost(T[a], T[b], ind_dims) - new_cost = cost + current_cost - if new_cost ≥ optimal_cost[] - continue - end - new_sequence = push!(copy(sequence), a => b) - new_T = push!(copy(T), Tᵈ) - new_remaining = deleteat!(copy(remaining), (aᵢ, bᵢ)) - push!(new_remaining, length(new_T)) - _depth_first_constructive!( - optimal_sequence, optimal_cost, new_sequence, new_T, ind_dims, new_remaining, new_cost - ) - end -end diff --git a/src/lib/ContractionSequenceOptimization/src/three_tensors.jl b/src/lib/ContractionSequenceOptimization/src/three_tensors.jl deleted file mode 100644 index 55763a62db..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/three_tensors.jl +++ /dev/null @@ -1,69 +0,0 @@ - -# -# Special case for three tensors -# - -function compute_cost(external_dims::Tuple{Int,Int,Int}, internal_dims::Tuple{Int,Int,Int}) - dim11, dim22, dim33 = external_dims - dim12, dim23, dim31 = internal_dims - cost12 = dim11 * dim22 * dim12 * dim23 * dim31 - return cost12 + dim11 * dim22 * dim33 * dim31 * dim23 -end - -function three_tensor_contraction_sequence(which_sequence::Int)::Vector{Any} - @assert 1 ≤ which_sequence ≤ 3 - return if which_sequence == 1 - Any[3, [1, 2]] - elseif which_sequence == 2 - Any[1, [2, 3]] - else - Any[2, [3, 1]] - end -end - -function optimal_contraction_sequence(is1, is2, is3) - N1 = length(is1) - N2 = length(is2) - N3 = length(is3) - dim2 = dim(is2) - dim3 = dim(is3) - dim11 = 1 - dim12 = 1 - dim31 = 1 - @inbounds for n1 in 1:N1 - i1 = is1[n1] - n2 = findfirst(==(i1), is2) - if isnothing(n2) - n3 = findfirst(==(i1), is3) - if isnothing(n3) - dim11 *= dim(i1) - continue - end - dim31 *= dim(i1) - continue - end - dim12 *= dim(i1) - end - dim23 = 1 - @inbounds for n2 in 1:length(is2) - i2 = is2[n2] - n3 = findfirst(==(i2), is3) - if !isnothing(n3) - dim23 *= dim(i2) - end - end - dim22 = dim2 ÷ (dim12 * dim23) - dim33 = dim3 ÷ (dim23 * dim31) - external_dims1 = (dim11, dim22, dim33) - internal_dims1 = (dim12, dim23, dim31) - external_dims2 = (dim22, dim33, dim11) - internal_dims2 = (dim23, dim31, dim12) - external_dims3 = (dim33, dim11, dim22) - internal_dims3 = (dim31, dim12, dim23) - cost1 = compute_cost(external_dims1, internal_dims1) - cost2 = compute_cost(external_dims2, internal_dims2) - cost3 = compute_cost(external_dims3, internal_dims3) - mincost, which_sequence = findmin((cost1, cost2, cost3)) - sequence = three_tensor_contraction_sequence(which_sequence) - return sequence -end diff --git a/src/lib/ContractionSequenceOptimization/src/utils.jl b/src/lib/ContractionSequenceOptimization/src/utils.jl deleted file mode 100644 index 509050149c..0000000000 --- a/src/lib/ContractionSequenceOptimization/src/utils.jl +++ /dev/null @@ -1,371 +0,0 @@ - -# -# General helper functionality -# - -# -# Operations for tree data structures -# - -""" - deepmap(f, tree; filter=(x -> x isa AbstractArray)) - -Recursive map on a tree-like data structure. -`filter` is a function that returns `true` if the iteration -should continue `false` if the iteration should -stop (for example, because we are at a leaf and the function -`f` should be applied). - -```julia -julia> deepmap(x -> 2x, [1, [2, [3, 4]]]) -[2, [4, [6, 8]]] - -julia> deepmap(x -> 2 .* x, [1, (2, [3, 4])]) -2-element Vector{Any}: - 2 - (4, [6, 8]) - -julia> deepmap(x -> 3x, [[1 2; 3 4], [5 6; 7 8]]) -2-element Vector{Matrix{Int64}}: - [3 6; 9 12] - [15 18; 21 24] - -julia> deepmap(x -> 2x, (1, (2, (3, 4))); filter=(x -> x isa Tuple)) -(2, (4, (6, 8))) -``` -""" -function deepmap(f, tree; filter=(x -> x isa AbstractArray)) - return filter(tree) ? map(t -> deepmap(f, t; filter=filter), tree) : f(tree) -end - -# -# Contracting index sets and getting costs -# - -# TODO: make a type: -# ShortBitSet{T <: Unsigned} -# data::T -# end -# -# That is like a BitSet/BitVector but with a maximum set size. -# Aliases such as: -# -# const SBitSet64 = SBitSet{UInt64} -# -# would be helpful to specify a BitSet with a maximum number of -# 64 elements in the set. -# (See https://discourse.julialang.org/t/parse-an-array-of-bits-bitarray-to-an-integer/42361/11). - -# Previously we used the definition in NDTensors: -#import NDTensors: dim -import ITensors: dim - -# `is` could be Vector{Int} for BitSet -function dim(is::IndexSetT, ind_dims::Vector) where {IndexSetT<:Union{Vector{Int},BitSet}} - dim = one(eltype(ind_dims)) - for i in is - dim *= ind_dims[i] - end - return dim -end - -function dim(is::Unsigned, ind_dims::Vector{DimT}) where {DimT} - _isemptyset(is) && return one(eltype(ind_dims)) - dim = one(eltype(ind_dims)) - i = 1 - @inbounds while !iszero(is) - if isodd(is) - # TODO: use Base.Checked.mul_with_overflow - dim, overflow = Base.Checked.mul_with_overflow(dim, ind_dims[i]) - overflow && return zero(DimT) - end - is = is >> 1 - i += 1 - end - return dim -end - -function contraction_cost(indsTᵃ::BitSet, indsTᵇ::BitSet, dims::Vector) - indsTᵃTᵇ = _symdiff(indsTᵃ, indsTᵇ) - dim_a = dim(indsTᵃ, dims) - dim_b = dim(indsTᵇ, dims) - dim_ab = dim(indsTᵃTᵇ, dims) - # Perform the sqrt first to avoid overflow. - # Alternatively, use a larger integer type. - cost = round(Int, sqrt(dim_a) * sqrt(dim_b) * sqrt(dim_ab)) - return cost, indsTᵃTᵇ -end - -function contraction_cost( - indsTᵃ::IndexSetT, indsTᵇ::IndexSetT, dims::Vector -) where {IndexSetT<:Unsigned} - unionTᵃTᵇ = _union(indsTᵃ, indsTᵇ) - cost = dim(unionTᵃTᵇ, dims) - indsTᵃTᵇ = _setdiff(unionTᵃTᵇ, _intersect(indsTᵃ, indsTᵇ)) - return cost, indsTᵃTᵇ -end - -# -# Convert indices into unique integer labels -# - -function contraction_labels!(labels1, labels2, is1, is2) - nextlabel = 1 - nextlabel = common_contraction_labels!(labels1, labels2, is1, is2, nextlabel) - nextlabel = uncommon_contraction_labels!(labels1, is1, nextlabel) - nextlabel = uncommon_contraction_labels!(labels2, is2, nextlabel) - return labels1, labels2 -end - -# Compute the common contraction labels and return the next label -function common_contraction_labels!(labels1, labels2, is1, is2, label) - N1 = length(is1) - N2 = length(is2) - @inbounds for n1 in 1:N1, n2 in 1:N2 - i1 = is1[n1] - i2 = is2[n2] - if i1 == i2 - labels1[n1] = labels2[n2] = label - label += 1 - end - end - return label -end - -function uncommon_contraction_labels!(labels, is, label) - N = length(labels) - @inbounds for n in 1:N - if iszero(labels[n]) - labels[n] = label - label += 1 - end - end - return label -end - -function contraction_labels!(labels, is) - ntensors = length(is) - nextlabel = 1 - # Loop through each tensor pair searching for - # common indices - @inbounds for n1 in 1:(ntensors - 1), n2 in (n1 + 1):ntensors - nextlabel = common_contraction_labels!( - labels[n1], labels[n2], is[n1], is[n2], nextlabel - ) - end - @inbounds for n in 1:ntensors - nextlabel = uncommon_contraction_labels!(labels[n], is[n], nextlabel) - end - return nextlabel - 1 -end - -function empty_labels(is::NTuple{N}) where {N} - return ntuple(n -> fill(0, length(is[n])), Val(N)) -end - -function empty_labels(is::Vector) - ntensors = length(is) - labels = Vector{Vector{Int}}(undef, ntensors) - @inbounds for n in 1:ntensors - labels[n] = fill(0, length(is[n])) - end - return labels -end - -function contraction_labels(is) - labels = empty_labels(is) - contraction_labels!(labels, is) - return labels -end - -contraction_labels(is...) = contraction_labels(is) - -# -# Use a Dict as a cache to map the indices to the integer label -# This only helps with many nodes/tensors (nnodes > 30) -# TODO: determine the crossover when this is useful and use -# it in `depth_first_constructive`/`breadth_first_constructive` -# - -contraction_labels_caching(is) = contraction_labels_caching(eltype(eltype(is)), is) - -function contraction_labels_caching(::Type{IndexT}, is) where {IndexT} - labels = empty_labels(is) - return contraction_labels_caching!(labels, IndexT, is) -end - -function contraction_labels_caching!(labels, ::Type{IndexT}, is) where {IndexT} - N = length(is) - ind_to_label = Dict{IndexT,Int}() - label = 0 - @inbounds for n in 1:N - isₙ = is[n] - labelsₙ = labels[n] - @inbounds for j in 1:length(labelsₙ) - i = isₙ[j] - i_label = get!(ind_to_label, i) do - label += 1 - end - labelsₙ[j] = i_label - end - end - return label -end - -# -# Compute the labels and also return a data structure storing the dims. -# - -function label_dims(::Type{DimT}, is) where {DimT<:Integer} - labels = empty_labels(is) - nlabels = contraction_labels!(labels, is) - dims = fill(zero(DimT), nlabels) - @inbounds for i in 1:length(is) - labelsᵢ = labels[i] - isᵢ = is[i] - @inbounds for n in 1:length(labelsᵢ) - lₙ = labelsᵢ[n] - if iszero(dims[lₙ]) - dims[lₙ] = dim(isᵢ[n]) - end - end - end - return labels, dims -end - -label_dims(is...) = label_dims(is) - -# Convert a contraction sequence in pair form to tree format. -# This is used in `depth_first_constructive` to convert the output. -function pair_sequence_to_tree(pairs::Vector{Pair{Int,Int}}, N::Int) - trees = Any[1:N...] - for p in pairs - push!(trees, Any[trees[p[1]], trees[p[2]]]) - end - return trees[end] -end - -# -# BitSet utilities -# - -function _cmp(A::BitSet, B::BitSet) - for (a, b) in zip(A, B) - if !isequal(a, b) - return isless(a, b) ? -1 : 1 - end - end - return cmp(length(A), length(B)) -end - -# Returns true when `A` is less than `B` in lexicographic order. -_isless(A::BitSet, B::BitSet) = _cmp(A, B) < 0 - -bitset(::Type{BitSet}, ints) = BitSet(ints) - -function bitset(::Type{T}, ints) where {T<:Unsigned} - set = zero(T) - u = one(T) - for i in ints - set |= (u << (i - 1)) - end - return set -end - -# Return a vector of the positions of the nonzero bits -# Used for debugging -function findall_nonzero_bits(i::Unsigned) - nonzeros = Int[] - n = 1 - @inbounds while !iszero(i) - if isodd(i) - push!(nonzeros, n) - end - i = i >> 1 - n += 1 - end - return nonzeros -end - -# Return the position of the first nonzero bit -function findfirst_nonzero_bit(i::Unsigned) - n = 0 - @inbounds while !iszero(i) - if isodd(i) - return n + 1 - end - i = i >> 1 - n += 1 - end - return n -end - -_isless(s1::T, s2::T) where {T<:Unsigned} = s1 < s2 -_intersect(s1::BitSet, s2::BitSet) = intersect(s1, s2) -_intersect(s1::T, s2::T) where {T<:Unsigned} = s1 & s2 -_union(s1::BitSet, s2::BitSet) = union(s1, s2) -_union(s1::T, s2::T) where {T<:Unsigned} = s1 | s2 -_setdiff(s1::BitSet, s2::BitSet) = setdiff(s1, s2) -_setdiff(s1::T, s2::T) where {T<:Unsigned} = s1 & (~s2) -_symdiff(s1::BitSet, s2::BitSet) = symdiff(s1, s2) -_symdiff(s1::T, s2::T) where {T<:Unsigned} = xor(s1, s2) -_isemptyset(s::BitSet) = isempty(s) -_isemptyset(s::Unsigned) = iszero(s) - -# TODO: use _first instead, optimize to avoid using _set -_only(s::BitSet) = only(s) -_only(s::Unsigned) = findfirst_nonzero_bit(s) - -# -# Adjacency matrix and connected components -# - -# For a network of tensors T (stored as index labels), return the adjacency matrix. -function adjacencymatrix(T::Vector, alldims::Vector) - # First break up the network into disconnected parts - N = length(T) - _adjacencymatrix = falses(N, N) - for nᵢ in 1:(N - 1), nⱼ in (nᵢ + 1):N - if dim(_intersect(T[nᵢ], T[nⱼ]), alldims) > 1 - _adjacencymatrix[nᵢ, nⱼ] = _adjacencymatrix[nⱼ, nᵢ] = true - end - end - return _adjacencymatrix -end - -# For a given adjacency matrix of size n x n, connectedcomponents returns -# a list of components that contains integer vectors, where every integer -# vector groups the indices of the vertices of a connected component of the -# graph encoded by A. The number of connected components is given by -# length(components). -function connectedcomponents(A::AbstractMatrix{Bool}) - n = size(A, 1) - @assert size(A, 2) == n - components = Vector{Vector{Int}}(undef, 0) - assignedlist = falses((n,)) - for i in 1:n - if !assignedlist[i] - assignedlist[i] = true - checklist = [i] - currentcomponent = [i] - while !isempty(checklist) - j = pop!(checklist) - for k in findall(A[j, :]) - if !assignedlist[k] - push!(currentcomponent, k) - push!(checklist, k) - assignedlist[k] = true - end - end - end - push!(components, currentcomponent) - end - end - return components -end - -# For a network of tensors T (stored as index labels), return the connected components -# (splits up T into the connected components). -function connectedcomponents(T::Vector, alldims::Vector) - return connectedcomponents(adjacencymatrix(T, alldims)) -end diff --git a/src/lib/ContractionSequenceOptimization/src/contraction_cost.jl b/src/tensor_operations/contraction_cost.jl similarity index 100% rename from src/lib/ContractionSequenceOptimization/src/contraction_cost.jl rename to src/tensor_operations/contraction_cost.jl diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl index a56c99bf57..248a8f69d3 100644 --- a/src/tensor_operations/tensor_algebra.jl +++ b/src/tensor_operations/tensor_algebra.jl @@ -74,41 +74,6 @@ function contract(A::ITensor, B::ITensor)::ITensor return _contract(A, B) end -function optimal_contraction_sequence(A::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}) - if length(A) == 1 - return optimal_contraction_sequence(A[1]) - elseif length(A) == 2 - return optimal_contraction_sequence(A[1], A[2]) - elseif length(A) == 3 - return optimal_contraction_sequence(A[1], A[2], A[3]) - else - return _optimal_contraction_sequence(A) - end -end - -optimal_contraction_sequence(A::ITensor) = Any[1] -optimal_contraction_sequence(A1::ITensor, A2::ITensor) = Any[1, 2] -function optimal_contraction_sequence(A1::ITensor, A2::ITensor, A3::ITensor) - return optimal_contraction_sequence(inds(A1), inds(A2), inds(A3)) -end -optimal_contraction_sequence(As::ITensor...) = _optimal_contraction_sequence(As) - -_optimal_contraction_sequence(As::Tuple{<:ITensor}) = Any[1] -_optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor}) = Any[1, 2] -function _optimal_contraction_sequence(As::Tuple{<:ITensor,<:ITensor,<:ITensor}) - return optimal_contraction_sequence(inds(As[1]), inds(As[2]), inds(As[3])) -end -function _optimal_contraction_sequence(As::Tuple{Vararg{ITensor}}) - return __optimal_contraction_sequence(As) -end - -_optimal_contraction_sequence(As::Vector{<:ITensor}) = __optimal_contraction_sequence(As) - -function __optimal_contraction_sequence(As) - indsAs = [inds(A) for A in As] - return optimal_contraction_sequence(indsAs) -end - function default_sequence() return using_contraction_sequence_optimization() ? "automatic" : "left_associative" end @@ -165,6 +130,15 @@ function contract( end end +function optimal_contraction_sequence(As) + return throw( + ArgumentError( + "Optimal contraction sequence isn't defined. Try loading a backend package like + TensorOperations.jl", + ), + ) +end + contract(As::ITensor...; kwargs...)::ITensor = contract(As; kwargs...) _contract(As, sequence::Int) = As[sequence] diff --git a/test/ext/ITensorsTensorOperationsExt/Project.toml b/test/ext/ITensorsTensorOperationsExt/Project.toml new file mode 100644 index 0000000000..2e118300ac --- /dev/null +++ b/test/ext/ITensorsTensorOperationsExt/Project.toml @@ -0,0 +1,3 @@ +[deps] +ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" +TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" diff --git a/test/lib/ContractionSequenceOptimization/test_itensor_contract.jl b/test/ext/ITensorsTensorOperationsExt/runtests.jl similarity index 91% rename from test/lib/ContractionSequenceOptimization/test_itensor_contract.jl rename to test/ext/ITensorsTensorOperationsExt/runtests.jl index 58e699dbf0..1a89d2180e 100644 --- a/test/lib/ContractionSequenceOptimization/test_itensor_contract.jl +++ b/test/ext/ITensorsTensorOperationsExt/runtests.jl @@ -1,20 +1,19 @@ +@eval module $(gensym()) + using ITensors using Test import Random: seed! seed!(12345) -using ITensors.ContractionSequenceOptimization: optimal_contraction_sequence, deepmap +using ITensors: dim, optimal_contraction_sequence +using TensorOperations: TensorOperations @testset "ITensor contraction sequence optimization" begin d = 100 i = Index(d, "i") A = random_itensor(i', dag(i)) - # Low level functions - @test dim([1, 2], [4, 5, 6]) == 4 * 5 - @test dim(Int[], [4, 5, 6]) == 1 - @test !ITensors.using_contraction_sequence_optimization() A2 = A' * A @@ -57,7 +56,7 @@ using ITensors.ContractionSequenceOptimization: optimal_contraction_sequence, de @test !ITensors.using_contraction_sequence_optimization() # This is not the only sequence - @test ITensors.optimal_contraction_sequence([A, A'', A']) == Any[1, Any[2, 3]] + @test ITensors.optimal_contraction_sequence([A, A'', A']) == Any[1, Any[3, 2]] time_without_opt = @elapsed A * A'' * A' @@ -83,9 +82,9 @@ using ITensors.ContractionSequenceOptimization: optimal_contraction_sequence, de ITensors.disable_contraction_sequence_optimization() @test !ITensors.using_contraction_sequence_optimization() - # This is not the only sequence - @test ITensors.optimal_contraction_sequence([A, A'', A', A''']) == - Any[Any[1, 3], Any[2, 4]] + seq = ITensors.optimal_contraction_sequence([A, A'', A', A''']) + @test length(seq) == 2 + @test issetequal(Any[3,1], first(seq)) || issetequal(Any[2,4], first(seq)) time_without_opt = @elapsed A * A'' * A' * A''' @@ -105,7 +104,7 @@ end # so that tensor allocations dominate over network # analysis for testing the number of allocations below. d0 = 2 - δd = 1000 + δd = 5000 ntensors = 6 ElType = Float64 d = [d0 + (n - 1) * δd for n in 1:ntensors] @@ -127,9 +126,11 @@ end @test allocations_left_associative ≈ allocations_left_associative_pairwise rtol = 0.01 sequence = foldr((x, y) -> [x, y], 1:ntensors) - @test sequence == optimal_contraction_sequence(As) + b1 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [5, 6]]]]] + b2 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [6, 5]]]]] + @test b1 || b2 + As_network = foldr((x, y) -> [x, y], As) - @test deepmap(n -> As[n], sequence) == As_network # Warmup contract(As; sequence=sequence) @@ -159,3 +160,4 @@ end @test allocations_right_associative_3 < allocations_left_associative @test allocations_right_associative_4 < allocations_left_associative end +end diff --git a/test/lib/ContractionSequenceOptimization/runtests.jl b/test/lib/ContractionSequenceOptimization/runtests.jl deleted file mode 100644 index c2fe5a8b6f..0000000000 --- a/test/lib/ContractionSequenceOptimization/runtests.jl +++ /dev/null @@ -1,16 +0,0 @@ -using ITensors -using Test - -ITensors.Strided.disable_threads() -ITensors.BLAS.set_num_threads(1) -ITensors.disable_threaded_blocksparse() - -@testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end -end From 5997fb02820cca92c9332a4c7b3cc741cd86c96c Mon Sep 17 00:00:00 2001 From: Joey Date: Tue, 25 Mar 2025 17:33:01 -0400 Subject: [PATCH 02/18] Updated Project.toml --- Project.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/Project.toml b/Project.toml index 9717da9e92..821c62cd15 100644 --- a/Project.toml +++ b/Project.toml @@ -29,6 +29,7 @@ Zeros = "bd1ec220-6eb4-527a-9b49-e79c3db6233b" [weakdeps] HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" +TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444" From d0e29848bdc39fe9adefa9956791351be4d77708 Mon Sep 17 00:00:00 2001 From: Joey Date: Tue, 25 Mar 2025 18:09:37 -0400 Subject: [PATCH 03/18] Formatting and delete file --- .../ITensorsTensorOperationsExt.jl | 4 +++- src/tensor_operations/tensor_algebra.jl | 2 +- test/ext/ITensorsTensorOperationsExt/Project.toml | 3 --- test/ext/ITensorsTensorOperationsExt/runtests.jl | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) delete mode 100644 test/ext/ITensorsTensorOperationsExt/Project.toml diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl index 73511c02e8..0aaba8e9d8 100644 --- a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -4,7 +4,9 @@ using ITensors: ITensors, ITensor, dim, inds using NDTensors.AlgorithmSelection: @Algorithm_str using TensorOperations: TensorOperations, optimaltree -function ITensors.optimal_contraction_sequence(As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}) +function ITensors.optimal_contraction_sequence( + As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}} +) network = collect.(inds.(As)) inds_to_dims = Dict(i => Float64(dim(i)) for i in unique(reduce(vcat, network))) seq, _ = optimaltree(network, inds_to_dims) diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl index 248a8f69d3..d1e58a0939 100644 --- a/src/tensor_operations/tensor_algebra.jl +++ b/src/tensor_operations/tensor_algebra.jl @@ -134,7 +134,7 @@ function optimal_contraction_sequence(As) return throw( ArgumentError( "Optimal contraction sequence isn't defined. Try loading a backend package like - TensorOperations.jl", + TensorOperations.jl" ), ) end diff --git a/test/ext/ITensorsTensorOperationsExt/Project.toml b/test/ext/ITensorsTensorOperationsExt/Project.toml deleted file mode 100644 index 2e118300ac..0000000000 --- a/test/ext/ITensorsTensorOperationsExt/Project.toml +++ /dev/null @@ -1,3 +0,0 @@ -[deps] -ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" -TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" diff --git a/test/ext/ITensorsTensorOperationsExt/runtests.jl b/test/ext/ITensorsTensorOperationsExt/runtests.jl index 1a89d2180e..84c7a94866 100644 --- a/test/ext/ITensorsTensorOperationsExt/runtests.jl +++ b/test/ext/ITensorsTensorOperationsExt/runtests.jl @@ -84,7 +84,7 @@ using TensorOperations: TensorOperations seq = ITensors.optimal_contraction_sequence([A, A'', A', A''']) @test length(seq) == 2 - @test issetequal(Any[3,1], first(seq)) || issetequal(Any[2,4], first(seq)) + @test issetequal(Any[3, 1], first(seq)) || issetequal(Any[2, 4], first(seq)) time_without_opt = @elapsed A * A'' * A' * A''' From fff38d36d0e1b80fc30cf2e066e66476d2af4e93 Mon Sep 17 00:00:00 2001 From: Joey Date: Tue, 25 Mar 2025 18:12:03 -0400 Subject: [PATCH 04/18] Remove redendant file from tests --- test/runtests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/runtests.jl b/test/runtests.jl index 0bf93de356..66dc122234 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -18,8 +18,8 @@ ITensors.disable_threaded_blocksparse() "lib/Ops", "base", "threading", - "lib/ContractionSequenceOptimization", "ext/ITensorsChainRulesCoreExt", + "ext/ITensorsTensorOperationsExt", "ext/ITensorsVectorInterfaceExt", "ext/NDTensorsMappedArraysExt", ] From 8354875659501688189424f5a99e599375c0804d Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 09:51:55 -0400 Subject: [PATCH 05/18] Updated Project.toml --- test/Project.toml | 1 + test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl | 1 + 2 files changed, 2 insertions(+) diff --git a/test/Project.toml b/test/Project.toml index f05c7e73ae..d9a07bd2bc 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -14,6 +14,7 @@ Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" QuadGK = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" +TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" VectorInterface = "409d34a3-91d5-4945-b6ec-7529ddf182d8" Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f" diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl index d20ab0474f..58e5ad3b23 100644 --- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl +++ b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl @@ -7,6 +7,7 @@ using ChainRulesCore: rrule_via_ad include("utils/chainrulestestutils.jl") +using TensorOperations: TensorOperations using Zygote: ZygoteRuleConfig, gradient Random.seed!(1234) From 7e6d3c9827fd4241b16392a3fccadc0fe4dcfc0b Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 10:48:55 -0400 Subject: [PATCH 06/18] Update Docs --- docs/src/ContractionSequenceOptimization.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/docs/src/ContractionSequenceOptimization.md b/docs/src/ContractionSequenceOptimization.md index 5991946e80..c314354668 100644 --- a/docs/src/ContractionSequenceOptimization.md +++ b/docs/src/ContractionSequenceOptimization.md @@ -1,8 +1,6 @@ # Contraction sequence optimization -When contracting a tensor network, the sequence of contraction makes a big difference in the computational cost. However, the complexity of determining the optimal sequence grows exponentially with the number of tensors, but there are many heuristic algorithms available for computing optimal sequences for small networks[^1][^2][^3][^4][^5][^6]. ITensors.jl provides some functionality for helping you find the optimal contraction sequence for small tensor network, as we will show below. - -The algorithm in ITensors.jl currently uses a modified version of[^1] with simplifications for outer product contractions similar to those used in [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl). +When contracting a tensor network, the sequence of contraction makes a big difference in the computational cost. However, the complexity of determining the optimal sequence grows exponentially with the number of tensors, but there are many heuristic algorithms available for computing optimal sequences for small networks[^1][^2][^3][^4][^5][^6]. ITensors.jl imports functionality from [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl) for helping you find the optimal contraction sequence for small tensor network, as we will show below. [^1]: [Faster identification of optimal contraction sequences for tensor networks](https://arxiv.org/abs/1304.6112) [^2]: [Improving the efficiency of variational tensor network algorithms](https://arxiv.org/abs/1310.8023) @@ -64,12 +62,13 @@ display(cost2) ``` This example helps us learn that in the limit of large MPS bond dimension `m`, the first contraction sequence is faster, while in the limit of large MPO bond dimension `k`, the second sequence is faster. This has practical implications for writing an efficient DMRG algorithm in both limits, which we plan to incorporate into ITensors.jl. -Here is a more systematic example of searching through the parameter space to find optimal contraction sequences: +Here is a more systematic example of searching through the parameter space to find optimal contraction sequences. Note, the TensorOperations.jl library must be loaded to use the optimal_contraction_sequence function: ```julia using ITensors using Symbolics using ITensors: contraction_cost, optimal_contraction_sequence +using TensorOperations function tensor_network(; m, k, d) l = Index(m, "l") From 5611fa056d5aaba4a296a67129d34f89429a99ef Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 10:51:20 -0400 Subject: [PATCH 07/18] Updated version --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 21721dc8e7..e02a62bb01 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.8.9" +version = "0.8.10" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" From 50fa4671a78c5196dac248913e9e94e8e4791f55 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 11:04:27 -0400 Subject: [PATCH 08/18] DocString for optimal_constraction_sequence --- .../ITensorsTensorOperationsExt.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl index 0aaba8e9d8..4b11b7bf82 100644 --- a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -4,6 +4,12 @@ using ITensors: ITensors, ITensor, dim, inds using NDTensors.AlgorithmSelection: @Algorithm_str using TensorOperations: TensorOperations, optimaltree +""" + optimal_contraction_sequence(T) + +Returns a contraction sequence for contracting the tensors `T`. The sequence is +generally optimal and is found via the optimaltree function in TensorOperations.jl which must be loaded. +""" function ITensors.optimal_contraction_sequence( As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}} ) From 96afe54a42c9cd8557d3bd199085de26626feb1c Mon Sep 17 00:00:00 2001 From: Joseph Tindall <51231103+JoeyT1994@users.noreply.github.com> Date: Wed, 26 Mar 2025 11:17:35 -0400 Subject: [PATCH 09/18] Update docs/src/ContractionSequenceOptimization.md Co-authored-by: Matt Fishman --- docs/src/ContractionSequenceOptimization.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/src/ContractionSequenceOptimization.md b/docs/src/ContractionSequenceOptimization.md index c314354668..20e0e569ca 100644 --- a/docs/src/ContractionSequenceOptimization.md +++ b/docs/src/ContractionSequenceOptimization.md @@ -68,7 +68,7 @@ using ITensors using Symbolics using ITensors: contraction_cost, optimal_contraction_sequence -using TensorOperations +using TensorOperations: TensorOperations function tensor_network(; m, k, d) l = Index(m, "l") From 9bed32fbde6fec4cb861b78fae42645e429c2982 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 11:24:54 -0400 Subject: [PATCH 10/18] News item --- Project.toml | 2 +- README.md | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index e02a62bb01..7dd9d2158d 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.8.10" +version = "0.9.0" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/README.md b/README.md index 9e8c73e792..3aed8dcfbf 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,8 @@ ITensors.jl is supported by the Flatiron Institute, a division of the Simons Fou ## News +- March 26, 2025: ITensors.jl v0.9 has been released. This is a minor breaking change since the `optimal_contraction_sequence` function now passes to the `optimaltree` function from [TensorOperations.jl](https://github.com/Jutho/TensorOperations.jl). The `TensorOperations` package therefore needs to be loaded in order for `optimal_contraction_sequence` to be used or if the flag `ITensors.enable_contraction_sequence_optimization()` is switched on. + - March 22, 2025: As part of the latest release of ITensors.jl (v0.8.3), all documentation related to MPS/MPO functionality has been moved to the [ITensorMPS.jl documentation](https://docs.itensor.org/ITensorMPS). - February 22, 2025: Please note that there were issues installing the latest version of ITensors.jl (ITensors.jl v0.8) in older versions of Julia v1.10 and v1.11 ([https://github.com/ITensor/ITensors.jl/issues/1618](https://github.com/ITensor/ITensors.jl/issues/1618), [https://itensor.discourse.group/t/typeparameteraccessors-not-found-error-on-julia-v-1-10-0/2260](https://itensor.discourse.group/t/typeparameteraccessors-not-found-error-on-julia-v-1-10-0/2260)). This issue has been fixed in [NDTensors.jl v0.4.4](https://github.com/ITensor/ITensors.jl/pull/1623), so please try updating your packages if you are using older versions of Julia v1.10 or v1.11 and running into issues installing ITensors.jl. From 377333db6c386e280841dbd74967cee37a6b5f37 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 12:04:42 -0400 Subject: [PATCH 11/18] update Docs version --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index 37c2052ba7..dc504018c2 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -8,6 +8,6 @@ Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" [compat] Documenter = "1.9.0" HDF5 = "0.17.2" -ITensors = "0.8.4" +ITensors = "0.9.0" Strided = "2.2.0" LinearAlgebra = "1.10.0" From 0cd521e2ce1b6d629e6f50b6944af0c8cdc47a2e Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 12:05:45 -0400 Subject: [PATCH 12/18] Update compat in docs/Project.toml --- docs/Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/Project.toml b/docs/Project.toml index dc504018c2..337f266347 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -4,10 +4,12 @@ HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f" ITensors = "9136182c-28ba-11e9-034c-db9fb085ebd5" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" +TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" [compat] Documenter = "1.9.0" HDF5 = "0.17.2" ITensors = "0.9.0" Strided = "2.2.0" +TensorOperations = "5.1.4" LinearAlgebra = "1.10.0" From 431ff5d961f8f2881b1aff062af83b8464d4058c Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 12:08:13 -0400 Subject: [PATCH 13/18] Revert docs version --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index 337f266347..43e3154fdf 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -9,7 +9,7 @@ TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" [compat] Documenter = "1.9.0" HDF5 = "0.17.2" -ITensors = "0.9.0" +ITensors = "0.8.4" Strided = "2.2.0" TensorOperations = "5.1.4" LinearAlgebra = "1.10.0" From 205d819cf9a6c0de708869b27cca0cad89951178 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 12:10:48 -0400 Subject: [PATCH 14/18] Update Docs Version --- docs/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index 43e3154fdf..337f266347 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -9,7 +9,7 @@ TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" [compat] Documenter = "1.9.0" HDF5 = "0.17.2" -ITensors = "0.8.4" +ITensors = "0.9.0" Strided = "2.2.0" TensorOperations = "5.1.4" LinearAlgebra = "1.10.0" From cab6d63af26718019a1cf763bbdecd8b6e512369 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 13:59:05 -0400 Subject: [PATCH 15/18] Remove compat for ITensors in docs/Project.toml --- docs/Project.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/Project.toml b/docs/Project.toml index 337f266347..526be47b61 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -9,7 +9,6 @@ TensorOperations = "6aa20fa7-93e2-5fca-9bc0-fbd0db3c71a2" [compat] Documenter = "1.9.0" HDF5 = "0.17.2" -ITensors = "0.9.0" Strided = "2.2.0" TensorOperations = "5.1.4" LinearAlgebra = "1.10.0" From b003332e9a1f18bb22c3e6e7fb781f6705d3a6a9 Mon Sep 17 00:00:00 2001 From: Joey Date: Wed, 26 Mar 2025 15:26:24 -0400 Subject: [PATCH 16/18] Move docstring to src --- src/tensor_operations/tensor_algebra.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl index d1e58a0939..0264240481 100644 --- a/src/tensor_operations/tensor_algebra.jl +++ b/src/tensor_operations/tensor_algebra.jl @@ -130,6 +130,12 @@ function contract( end end +""" + optimal_contraction_sequence(T) + +Returns a contraction sequence for contracting the tensors `T`. The sequence is +generally optimal and is found via the optimaltree function in TensorOperations.jl which must be loaded. +""" function optimal_contraction_sequence(As) return throw( ArgumentError( From f3b93a483a5a2a87d572aedccd63f0b397d2575f Mon Sep 17 00:00:00 2001 From: Joseph Tindall <51231103+JoeyT1994@users.noreply.github.com> Date: Wed, 26 Mar 2025 15:37:06 -0400 Subject: [PATCH 17/18] Update ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl Co-authored-by: Matt Fishman --- .../ITensorsTensorOperationsExt.jl | 6 ------ 1 file changed, 6 deletions(-) diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl index 4b11b7bf82..0aaba8e9d8 100644 --- a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -4,12 +4,6 @@ using ITensors: ITensors, ITensor, dim, inds using NDTensors.AlgorithmSelection: @Algorithm_str using TensorOperations: TensorOperations, optimaltree -""" - optimal_contraction_sequence(T) - -Returns a contraction sequence for contracting the tensors `T`. The sequence is -generally optimal and is found via the optimaltree function in TensorOperations.jl which must be loaded. -""" function ITensors.optimal_contraction_sequence( As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}} ) From 373eaf2d0e42416057580ace6453f80d605dd222 Mon Sep 17 00:00:00 2001 From: Joseph Tindall <51231103+JoeyT1994@users.noreply.github.com> Date: Wed, 26 Mar 2025 15:56:44 -0400 Subject: [PATCH 18/18] Update ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl Co-authored-by: Matt Fishman --- ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl index 0aaba8e9d8..c437a573ea 100644 --- a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -1,8 +1,7 @@ module ITensorsTensorOperationsExt using ITensors: ITensors, ITensor, dim, inds -using NDTensors.AlgorithmSelection: @Algorithm_str -using TensorOperations: TensorOperations, optimaltree +using TensorOperations: optimaltree function ITensors.optimal_contraction_sequence( As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}