diff --git a/Project.toml b/Project.toml index c439eec..2d9e5ab 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "FusionTensors" uuid = "e16ca583-1f51-4df0-8e12-57d32947d33e" authors = ["ITensor developers and contributors"] -version = "0.4.1" +version = "0.5.0" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" @@ -11,6 +11,7 @@ GradedArrays = "bc96ca6e-b7c8-4bb6-888e-c93f838762c2" HalfIntegers = "f0d1745a-41c9-11e9-1dd9-e5d34d218721" LRUCache = "8ac3fa9e-de4c-5943-b1dc-09c6b5f20637" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" Strided = "5e0ebb24-38b0-5f93-81fe-25c709ecae67" TensorAlgebra = "68bd88dc-f39d-4e12-b2ca-f046b68fcc6a" TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d" @@ -25,6 +26,7 @@ GradedArrays = "0.4.13" HalfIntegers = "1.6" LRUCache = "1.6" LinearAlgebra = "1.10" +Random = "1.10" Strided = "2.3" TensorAlgebra = "0.3.8" TensorProducts = "0.1.7" diff --git a/docs/Project.toml b/docs/Project.toml index 5d0dd21..073b185 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -5,5 +5,5 @@ Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306" [compat] Documenter = "1.10.0" -FusionTensors = "0.4" +FusionTensors = "0.5" Literate = "2.20.1" diff --git a/src/fusiontensor/array_cast.jl b/src/fusiontensor/array_cast.jl index cbc5891..56bbe1b 100644 --- a/src/fusiontensor/array_cast.jl +++ b/src/fusiontensor/array_cast.jl @@ -64,7 +64,7 @@ function to_fusiontensor_no_checknorm( codomain_legs::Tuple{Vararg{AbstractGradedUnitRange}}, domain_legs::Tuple{Vararg{AbstractGradedUnitRange}}, ) - ft = FusionTensor(eltype(blockarray), codomain_legs, domain_legs) + ft = FusionTensor{eltype(blockarray)}(undef, codomain_legs, domain_legs) for (f1, f2) in keys(trees_block_mapping(ft)) b = findblock(ft, f1, f2) ft[f1, f2] = contract_fusion_trees(blockarray[b], f1, f2) diff --git a/src/fusiontensor/base_interface.jl b/src/fusiontensor/base_interface.jl index 21cf259..5befd60 100644 --- a/src/fusiontensor/base_interface.jl +++ b/src/fusiontensor/base_interface.jl @@ -1,8 +1,7 @@ # This files defines Base functions for FusionTensor using Accessors: @set - -using BlockSparseArrays: @view! +using BlockSparseArrays: @view!, eachstoredblock using TensorAlgebra: BlockedTuple, tuplemortar set_data_matrix(ft::FusionTensor, data_matrix) = @set ft.data_matrix = data_matrix @@ -105,7 +104,7 @@ function Base.similar( return similar(ft, T, tuplemortar(new_axes)) end function Base.similar(::FusionTensor, ::Type{T}, new_axes::BlockedTuple{2}) where {T} - return FusionTensor(T, new_axes) + return FusionTensor{T}(undef, new_axes) end Base.show(io::IO, ft::FusionTensor) = print(io, "$(ndims(ft))-dim FusionTensor") diff --git a/src/fusiontensor/fusiontensor.jl b/src/fusiontensor/fusiontensor.jl index 1318a25..490644b 100644 --- a/src/fusiontensor/fusiontensor.jl +++ b/src/fusiontensor/fusiontensor.jl @@ -19,6 +19,8 @@ using GradedArrays: sectormergesort, sectors, space_isequal +using LinearAlgebra: UniformScaling +using Random: Random, AbstractRNG, randn! using TensorAlgebra: BlockedTuple, trivial_axis, tuplemortar using TensorProducts: tensor_product using TypeParameterAccessors: type_parameters @@ -174,12 +176,12 @@ function FusionTensor( end # empty matrix -function FusionTensor(elt::Type, legs::FusionTensorAxes) +function FusionTensor{T}(::UndefInitializer, legs::FusionTensorAxes) where {T} S = sector_type(legs) row_axis, codomain_trees_to_ranges = fuse_axes(S, codomain(legs)) col_axis, domain_trees_to_ranges = flip_domain(fuse_axes(S, dual.(domain(legs)))...) - mat = initialize_data_matrix(elt, row_axis, col_axis) + mat = initialize_data_matrix(T, row_axis, col_axis) tree_to_block_mapping = intersect_codomain_domain( codomain_trees_to_ranges, domain_trees_to_ranges ) @@ -189,7 +191,7 @@ end #constructor from precomputed data_matrix function FusionTensor(mat::AbstractMatrix, legs::FusionTensorAxes) # init with empty data_matrix to construct trees_block_mapping - ft = FusionTensor(eltype(mat), legs) + ft = FusionTensor{eltype(mat)}(undef, legs) for b in eachblockstoredindex(mat) b in eachblockstoredindex(data_matrix(ft)) || throw(ArgumentError("matrix block $b is not allowed")) @@ -199,6 +201,9 @@ function FusionTensor(mat::AbstractMatrix, legs::FusionTensorAxes) end FusionTensor(x, legs::BlockedTuple{2}) = FusionTensor(x, FusionTensorAxes(legs)) +function FusionTensor{T}(x, legs::BlockedTuple{2}) where {T} + return FusionTensor{T}(x, FusionTensorAxes(legs)) +end # constructor from split axes function FusionTensor( @@ -209,6 +214,47 @@ function FusionTensor( return FusionTensor(x, tuplemortar((codomain_legs, domain_legs))) end +function FusionTensor{T}( + x, + codomain_legs::Tuple{Vararg{AbstractGradedUnitRange}}, + domain_legs::Tuple{Vararg{AbstractGradedUnitRange}}, +) where {T} + return FusionTensor{T}(x, tuplemortar((codomain_legs, domain_legs))) +end + +# specific constructors +function Base.zeros(::Type{T}, fta::FusionTensorAxes) where {T} + ft = FusionTensor{T}(undef, fta) + foreach(m -> fill!(m, zero(T)), eachstoredblock(data_matrix(ft))) + return ft +end +Base.zeros(fta::FusionTensorAxes) = zeros(Float64, fta) + +function Base.randn(rng::AbstractRNG, ::Type{T}, fta::FusionTensorAxes) where {T} + ft = FusionTensor{T}(undef, fta) + foreach(m -> randn!(rng, m), eachstoredblock(data_matrix(ft))) + return ft +end +Base.randn(rng::AbstractRNG, fta::FusionTensorAxes) = randn(rng, Float64, fta) +Base.randn(::Type{T}, fta::FusionTensorAxes) where {T} = randn(Random.default_rng(), T, fta) +Base.randn(fta::FusionTensorAxes) = randn(Float64, fta) + +function FusionTensor{T}( + s::UniformScaling, codomain_legs::Tuple{Vararg{AbstractGradedUnitRange}} +) where {T} + fta = FusionTensorAxes(codomain_legs, dual.(codomain_legs)) + ft = FusionTensor{T}(undef, fta) + for m in eachstoredblock(data_matrix(ft)) + m .= s(size(m, 1)) + end + return ft +end +function FusionTensor( + s::UniformScaling, codomain_legs::Tuple{Vararg{AbstractGradedUnitRange}} +) + return FusionTensor{Float64}(s, codomain_legs) +end + # ================================ BlockArrays interface ================================= function BlockArrays.findblock(ft::FusionTensor, f1::SectorFusionTree, f2::SectorFusionTree) diff --git a/src/permutedims/permutedims.jl b/src/permutedims/permutedims.jl index 252eb5c..0be9ee7 100644 --- a/src/permutedims/permutedims.jl +++ b/src/permutedims/permutedims.jl @@ -38,7 +38,7 @@ function fusiontensor_permutedims(ft, biperm::BlockedPermutation{2}) end end - new_ft = FusionTensor(eltype(ft), axes(ft)[biperm]) + new_ft = FusionTensor{eltype(ft)}(undef, axes(ft)[biperm]) fusiontensor_permutedims!(new_ft, ft, Tuple(biperm)) return new_ft end @@ -46,11 +46,11 @@ end function fusiontensor_permutedims!( new_ft::FusionTensor{T,N}, old_ft::FusionTensor{T,N}, flatperm::NTuple{N,Integer} ) where {T,N} + foreach(m -> fill!(m, zero(T)), eachstoredblock(data_matrix(new_ft))) unitary = compute_unitary(new_ft, old_ft, flatperm) - for p in unitary - old_trees, new_trees = first(p) + for ((old_trees, new_trees), coeff) in unitary new_block = view(new_ft, new_trees...) old_block = view(old_ft, old_trees...) - @strided new_block .+= last(p) .* permutedims(old_block, flatperm) + @strided new_block .+= coeff .* permutedims(old_block, flatperm) end end diff --git a/test/Project.toml b/test/Project.toml index 222a336..8aa8d70 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -5,6 +5,7 @@ BlockSparseArrays = "2c9a651f-6452-4ace-a6ac-809f4280fbb4" FusionTensors = "e16ca583-1f51-4df0-8e12-57d32947d33e" GradedArrays = "bc96ca6e-b7c8-4bb6-888e-c93f838762c2" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" +Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f" Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb" TensorAlgebra = "68bd88dc-f39d-4e12-b2ca-f046b68fcc6a" @@ -15,9 +16,10 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8.11" BlockArrays = "1.6" BlockSparseArrays = "0.7" -FusionTensors = "0.4" +FusionTensors = "0.5" GradedArrays = "0.4" LinearAlgebra = "1.10.0" +Random = "1.10" SafeTestsets = "0.1.0" Suppressor = "0.2.8" TensorAlgebra = "0.3" diff --git a/test/test_basics.jl b/test/test_basics.jl index accfc5b..7e076b8 100644 --- a/test/test_basics.jl +++ b/test/test_basics.jl @@ -1,7 +1,7 @@ using Test: @test, @test_throws, @testset using BlockArrays: Block -using BlockSparseArrays: BlockSparseArray +using BlockSparseArrays: BlockSparseArray, eachblockstoredindex using FusionTensors: FusionTensor, FusionTensorAxes, @@ -28,6 +28,8 @@ using GradedArrays: space_isequal using TensorAlgebra: tuplemortar using TensorProducts: tensor_product +using LinearAlgebra: LinearAlgebra +using Random: Random include("setup.jl") @@ -35,8 +37,14 @@ include("setup.jl") g1 = gradedrange([U1(0) => 1, U1(1) => 2, U1(2) => 3]) g2 = dual(gradedrange([U1(0) => 2, U1(1) => 2, U1(3) => 1])) + fta = FusionTensorAxes((g1,), (g2,)) + ft0 = FusionTensor{Float64}(undef, fta) + @test ft0 isa FusionTensor + @test space_isequal(codomain_axis(ft0), g1) + @test space_isequal(domain_axis(ft0), g2) + # check dual convention when initializing data_matrix - ft0 = FusionTensor(Float64, (g1,), (g2,)) + ft0 = FusionTensor{Float64}(undef, (g1,), (g2,)) @test ft0 isa FusionTensor @test space_isequal(codomain_axis(ft0), g1) @test space_isequal(domain_axis(ft0), g2) @@ -146,7 +154,7 @@ end g1 = gradedrange([U1(0) => 1, U1(1) => 2, U1(2) => 3]) # one row axis - ft1 = FusionTensor(Float64, (g1,), ()) + ft1 = FusionTensor{Float64}(undef, (g1,), ()) @test ndims_codomain(ft1) == 1 @test ndims_domain(ft1) == 0 @test ndims(ft1) == 1 @@ -156,7 +164,7 @@ end @test sector_type(ft1) === sector_type(g1) # one column axis - ft2 = FusionTensor(Float64, (), (g1,)) + ft2 = FusionTensor{Float64}(undef, (), (g1,)) @test ndims_codomain(ft2) == 0 @test ndims_domain(ft2) == 1 @test ndims(ft2) == 1 @@ -166,7 +174,7 @@ end @test sector_type(ft2) === sector_type(g1) # zero axis - ft3 = FusionTensor(Float64, (), ()) + ft3 = FusionTensor{Float64}(undef, (), ()) @test ndims_codomain(ft3) == 0 @test ndims_domain(ft3) == 0 @test ndims(ft3) == 0 @@ -181,7 +189,7 @@ end g2 = gradedrange([U1(0) => 2, U1(1) => 2, U1(3) => 1]) g3 = gradedrange([U1(-1) => 1, U1(0) => 2, U1(1) => 1]) g4 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 1]) - ft3 = FusionTensor(Float64, (g1, g2), (g3, g4)) + ft3 = FusionTensor{Float64}(undef, (g1, g2), (g3, g4)) @test isnothing(check_sanity(ft3)) ft4 = +ft3 @@ -260,19 +268,58 @@ end @test space_isequal(dual(g4), codomain_axes(ad)[2]) @test isnothing(check_sanity(ad)) - ft7 = FusionTensor(Float64, (g1,), (g2, g3, g4)) + ft7 = FusionTensor{Float64}(undef, (g1,), (g2, g3, g4)) @test_throws DimensionMismatch ft7 + ft3 @test_throws DimensionMismatch ft7 - ft3 @test_throws DimensionMismatch ft7 * ft3 end +@testset "specific constructors" begin + g1 = gradedrange([U1(0) => 1, U1(1) => 2, U1(2) => 3]) + g2 = gradedrange([U1(0) => 2, U1(1) => 2, U1(3) => 1]) + g3 = gradedrange([U1(-1) => 1, U1(0) => 2, U1(1) => 1]) + g4 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 1]) + + fta = FusionTensorAxes((g1,), (g2, g3)) + @test zeros(fta) isa FusionTensor{Float64,3} + @test zeros(ComplexF64, fta) isa FusionTensor{ComplexF64,3} + + rng = Random.default_rng() + ft1 = randn(rng, ComplexF64, fta) + @test ft1 isa FusionTensor{ComplexF64,3} + @test all(!=(0), data_matrix(ft1)[Block(1, 5)]) + @test randn(rng, fta) isa FusionTensor{Float64,3} + @test randn(ComplexF64, fta) isa FusionTensor{ComplexF64,3} + @test randn(fta) isa FusionTensor{Float64,3} + + ft2 = FusionTensor(LinearAlgebra.I, (g1, g2)) + @test ft2 isa FusionTensor{Float64,4} + @test axes(ft2) == FusionTensorAxes((g1, g2), dual.((g1, g2))) + @test collect(eachblockstoredindex(data_matrix(ft2))) == map(i -> Block(i, i), 1:6) + for i in 1:6 + m = data_matrix(ft2)[Block(i, i)] + @test m == LinearAlgebra.I(size(m, 1)) + end + + ft2 = FusionTensor(3 * LinearAlgebra.I, (g1, g2)) + @test ft2 isa FusionTensor{Float64,4} + @test axes(ft2) == FusionTensorAxes((g1, g2), dual.((g1, g2))) + @test collect(eachblockstoredindex(data_matrix(ft2))) == map(i -> Block(i, i), 1:6) + for i in 1:6 + m = data_matrix(ft2)[Block(i, i)] + @test m == 3 * LinearAlgebra.I(size(m, 1)) + end + + @test FusionTensor{ComplexF64}(LinearAlgebra.I, (g1, g2)) isa FusionTensor{ComplexF64,4} +end + @testset "missing SectorProduct" begin g1 = gradedrange([SectorProduct(U1(1)) => 1]) g2 = gradedrange([SectorProduct(U1(1), SU2(1//2)) => 1]) g3 = gradedrange([SectorProduct(U1(1), SU2(1//2), Z{2}(1)) => 1]) S = sector_type(g3) - ft = FusionTensor(Float64, (g1,), (dual(g2), dual(g3))) + ft = FusionTensor{Float64}(undef, (g1,), (dual(g2), dual(g3))) @test sector_type(ft) === S gr = gradedrange([SectorProduct(U1(1), SU2(0), Z{2}(0)) => 1]) @test space_isequal(codomain_axis(ft), gr) @@ -287,7 +334,7 @@ end gABC = tensor_product(gA, gB, gC) S = sector_type(gABC) - ft = FusionTensor(Float64, (gA, gB), (dual(gA), dual(gB), gC)) + ft = FusionTensor{Float64}(undef, (gA, gB), (dual(gA), dual(gB), gC)) @test sector_type(ft) === S @test space_isequal(codomain_axis(ft), gABC) @test space_isequal(domain_axis(ft), dual(gABC)) diff --git a/test/test_contraction.jl b/test/test_contraction.jl index 25a0041..4317560 100644 --- a/test/test_contraction.jl +++ b/test/test_contraction.jl @@ -14,10 +14,10 @@ include("setup.jl") g3 = gradedrange([U1(-1) => 1, U1(0) => 2, U1(1) => 1]) g4 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 1]) - ft1 = FusionTensor(Float64, (g1, g2), (g3, g4)) + ft1 = FusionTensor{Float64}(undef, (g1, g2), (g3, g4)) @test isnothing(check_sanity(ft1)) - ft2 = FusionTensor(Float64, dual.((g3, g4)), (g1,)) + ft2 = FusionTensor{Float64}(undef, dual.((g3, g4)), (g1,)) @test isnothing(check_sanity(ft2)) ft3 = ft1 * ft2 # tensor contraction @@ -43,9 +43,9 @@ end g3 = gradedrange([U1(-1) => 1, U1(0) => 2, U1(1) => 1]) g4 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 1]) - ft1 = FusionTensor(Float64, (g1, g2), (g3, g4)) - ft2 = FusionTensor(Float64, dual.((g3, g4)), (dual(g1),)) - ft3 = FusionTensor(Float64, dual.((g3, g4)), dual.((g1, g2))) + ft1 = FusionTensor{Float64}(undef, (g1, g2), (g3, g4)) + ft2 = FusionTensor{Float64}(undef, dual.((g3, g4)), (dual(g1),)) + ft3 = FusionTensor{Float64}(undef, dual.((g3, g4)), dual.((g1, g2))) ft4, legs = contract(ft1, (1, 2, 3, 4), ft2, (3, 4, 5)) @test legs == tuplemortar(((1, 2), (5,))) diff --git a/test/test_linear_algebra.jl b/test/test_linear_algebra.jl index 9408e4c..669457f 100644 --- a/test/test_linear_algebra.jl +++ b/test/test_linear_algebra.jl @@ -23,11 +23,6 @@ include("setup.jl") gsu2 = gradedrange([SU2(1 / 2) => 1]) for g in [g0, gu1, gsu2] - ft0 = FusionTensor(Float64, (g, g), (dual(g), dual(g))) - @test isnothing(check_sanity(ft0)) - @test norm(ft0) == 0 - @test tr(ft0) == 0 - ft = to_fusiontensor(sdst, (g, g), (dual(g), dual(g))) @test isnothing(check_sanity(ft)) @test norm(ft) ≈ √3 / 2 diff --git a/test/test_permutedims.jl b/test/test_permutedims.jl index 1d7c9a3..fd293a5 100644 --- a/test/test_permutedims.jl +++ b/test/test_permutedims.jl @@ -23,7 +23,7 @@ include("setup.jl") g4 = gradedrange([U1(-1) => 1, U1(0) => 1, U1(1) => 1]) for elt in (Float64, ComplexF64) - ft1 = FusionTensor(elt, (g1, g2), dual.((g3, g4))) + ft1 = FusionTensor{elt}(undef, (g1, g2), dual.((g3, g4))) @test isnothing(check_sanity(ft1)) # test permutedims interface