diff --git a/Project.toml b/Project.toml index a3edb8d4..c80be7bd 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "BlockSparseArrays" uuid = "2c9a651f-6452-4ace-a6ac-809f4280fbb4" authors = ["ITensor developers and contributors"] -version = "0.3.8" +version = "0.3.9" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" @@ -21,12 +21,12 @@ SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66" TypeParameterAccessors = "7e5a90cf-f82e-492e-a09b-e3e26432c138" [weakdeps] -LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993" TensorAlgebra = "68bd88dc-f39d-4e12-b2ca-f046b68fcc6a" +TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d" [extensions] BlockSparseArraysGradedUnitRangesExt = "GradedUnitRanges" -BlockSparseArraysTensorAlgebraExt = ["LabelledNumbers", "TensorAlgebra"] +BlockSparseArraysTensorAlgebraExt = ["TensorProducts", "TensorAlgebra"] [compat] Adapt = "4.1.1" @@ -38,14 +38,14 @@ DiagonalArrays = "0.3" Dictionaries = "0.4.3" FillArrays = "1.13.0" GPUArraysCore = "0.1.0, 0.2" -GradedUnitRanges = "0.1.0" -LabelledNumbers = "0.1.0" +GradedUnitRanges = "0.2.2" LinearAlgebra = "1.10" MacroTools = "0.5.13" MapBroadcast = "0.1.5" SparseArraysBase = "0.5" SplitApplyCombine = "1.2.3" -TensorAlgebra = "0.1.0, 0.2" +TensorAlgebra = "0.2.4" +TensorProducts = "0.1.2" Test = "1.10" TypeParameterAccessors = "0.2.0, 0.3" julia = "1.10" diff --git a/ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl b/ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl index 37f03d09..c8fc4dfd 100644 --- a/ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl +++ b/ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl @@ -1,15 +1,10 @@ module BlockSparseArraysTensorAlgebraExt using BlockArrays: AbstractBlockedUnitRange -using GradedUnitRanges: tensor_product -using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion -function TensorAlgebra.:⊗(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange) - return tensor_product(a1, a2) -end +using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion +using TensorProducts: OneToOne -using BlockArrays: AbstractBlockedUnitRange using BlockSparseArrays: AbstractBlockSparseArray, blockreshape -using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion TensorAlgebra.FusionStyle(::AbstractBlockedUnitRange) = BlockReshapeFusion() @@ -46,13 +41,12 @@ using DerivableInterfaces: @interface using GradedUnitRanges: GradedUnitRanges, AbstractGradedUnitRange, - OneToOne, blockmergesortperm, blocksortperm, dual, invblockperm, nondual, - tensor_product + unmerged_tensor_product using LinearAlgebra: Adjoint, Transpose using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion, SectorFusion, fusedims, splitdims @@ -77,10 +71,17 @@ function block_mergesort(a::AbstractArray) end function TensorAlgebra.fusedims( - ::SectorFusion, a::AbstractArray, axes::AbstractUnitRange... + ::SectorFusion, a::AbstractArray, merged_axes::AbstractUnitRange... ) # First perform a fusion using a block reshape. - a_reshaped = fusedims(BlockReshapeFusion(), a, axes...) + # TODO avoid groupreducewhile. Require refactor of fusedims. + unmerged_axes = groupreducewhile( + unmerged_tensor_product, axes(a), length(merged_axes); init=OneToOne() + ) do i, axis + return length(axis) ≤ length(merged_axes[i]) + end + + a_reshaped = fusedims(BlockReshapeFusion(), a, unmerged_axes...) # Sort the blocks by sector and merge the equivalent sectors. return block_mergesort(a_reshaped) end @@ -90,10 +91,11 @@ function TensorAlgebra.splitdims( ) # First, fuse axes to get `blockmergesortperm`. # Then unpermute the blocks. - axes_prod = - groupreducewhile(tensor_product, split_axes, ndims(a); init=OneToOne()) do i, axis - return length(axis) ≤ length(axes(a, i)) - end + axes_prod = groupreducewhile( + unmerged_tensor_product, split_axes, ndims(a); init=OneToOne() + ) do i, axis + return length(axis) ≤ length(axes(a, i)) + end blockperms = blocksortperm.(axes_prod) sorted_axes = map((r, I) -> only(axes(r[I])), axes_prod, blockperms) @@ -106,14 +108,6 @@ function TensorAlgebra.splitdims( return splitdims(BlockReshapeFusion(), a_blockpermed, split_axes...) end -# This is a temporary fix for `eachindex` being broken for BlockSparseArrays -# with mixed dual and non-dual axes. This shouldn't be needed once -# GradedUnitRanges is rewritten using BlockArrays v1. -# TODO: Delete this once GradedUnitRanges is rewritten. -function Base.eachindex(a::AbstractBlockSparseArray) - return CartesianIndices(nondual.(axes(a))) -end - # TODO: Handle this through some kind of trait dispatch, maybe # a `SymmetryStyle`-like trait to check if the block sparse # matrix has graded axes. @@ -121,19 +115,4 @@ function Base.axes(a::Adjoint{<:Any,<:AbstractBlockSparseMatrix}) return dual.(reverse(axes(a'))) end -# This definition is only needed since calls like -# `a[[Block(1), Block(2)]]` where `a isa AbstractGradedUnitRange` -# returns a `BlockSparseVector` instead of a `BlockVector` -# due to limitations in the `BlockArray` type not allowing -# axes with non-Int element types. -# TODO: Remove this once that issue is fixed, -# see https://github.com/JuliaArrays/BlockArrays.jl/pull/405. -using BlockArrays: BlockRange -using LabelledNumbers: label -function GradedUnitRanges.blocklabels(a::BlockSparseVector) - return map(BlockRange(a)) do block - return label(blocks(a)[Int(block)]) - end -end - end diff --git a/test/Project.toml b/test/Project.toml index 7cc17c05..ffc9aee9 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -29,7 +29,7 @@ BlockArrays = "1" BlockSparseArrays = "0.3" DiagonalArrays = "0.3" GPUArraysCore = "0.2" -GradedUnitRanges = "0.1" +GradedUnitRanges = "0.2.2" JLArrays = "0.2" LabelledNumbers = "0.1" LinearAlgebra = "1" @@ -38,8 +38,8 @@ Random = "1" SafeTestsets = "0.1" SparseArraysBase = "0.5" Suppressor = "0.2" -SymmetrySectors = "0.1" -TensorAlgebra = "0.2" +SymmetrySectors = "0.1.7" +TensorAlgebra = "0.2.4" Test = "1" TestExtras = "0.3" TypeParameterAccessors = "0.3" diff --git a/test/test_gradedunitrangesext.jl b/test/test_gradedunitrangesext.jl index fbbd0296..06a9a8cf 100644 --- a/test/test_gradedunitrangesext.jl +++ b/test/test_gradedunitrangesext.jl @@ -1,4 +1,3 @@ -@eval module $(gensym()) using Test: @test, @testset using BlockArrays: AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize @@ -10,6 +9,7 @@ using GradedUnitRanges: GradedUnitRange, GradedUnitRangeDual, blocklabels, + dag, dual, gradedrange, isdual @@ -19,6 +19,7 @@ using SymmetrySectors: U1 using TensorAlgebra: fusedims, splitdims using LinearAlgebra: adjoint using Random: randn! + function randn_blockdiagonal(elt::Type, axes::Tuple) a = BlockSparseArray{elt}(undef, axes) blockdiaglength = minimum(blocksize(a)) @@ -390,4 +391,15 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @test all(GradedUnitRanges.space_isequal.(axes(b), (r, dual(r)))) end end + +@testset "dag" begin + elt = ComplexF64 + r = gradedrange([U1(0) => 2, U1(1) => 3]) + a = BlockSparseArray{elt}(undef, r, dual(r)) + a[Block(1, 1)] = randn(elt, 2, 2) + a[Block(2, 2)] = randn(elt, 3, 3) + @test isdual.(axes(a)) == (false, true) + ad = dag(a) + @test Array(ad) == conj(Array(a)) + @test isdual.(axes(ad)) == (true, false) end