Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
name = "BlockSparseArrays"
uuid = "2c9a651f-6452-4ace-a6ac-809f4280fbb4"
authors = ["ITensor developers <[email protected]> and contributors"]
version = "0.3.8"
version = "0.3.9"

[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
Expand All @@ -21,12 +21,12 @@ SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66"
TypeParameterAccessors = "7e5a90cf-f82e-492e-a09b-e3e26432c138"

[weakdeps]
LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993"
TensorAlgebra = "68bd88dc-f39d-4e12-b2ca-f046b68fcc6a"
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"

[extensions]
BlockSparseArraysGradedUnitRangesExt = "GradedUnitRanges"
BlockSparseArraysTensorAlgebraExt = ["LabelledNumbers", "TensorAlgebra"]
BlockSparseArraysTensorAlgebraExt = ["TensorProducts", "TensorAlgebra"]

[compat]
Adapt = "4.1.1"
Expand All @@ -38,14 +38,14 @@ DiagonalArrays = "0.3"
Dictionaries = "0.4.3"
FillArrays = "1.13.0"
GPUArraysCore = "0.1.0, 0.2"
GradedUnitRanges = "0.1.0"
LabelledNumbers = "0.1.0"
GradedUnitRanges = "0.2.2"
LinearAlgebra = "1.10"
MacroTools = "0.5.13"
MapBroadcast = "0.1.5"
SparseArraysBase = "0.5"
SplitApplyCombine = "1.2.3"
TensorAlgebra = "0.1.0, 0.2"
TensorAlgebra = "0.2.4"
TensorProducts = "0.1.2"
Test = "1.10"
TypeParameterAccessors = "0.2.0, 0.3"
julia = "1.10"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,15 +1,10 @@
module BlockSparseArraysTensorAlgebraExt
using BlockArrays: AbstractBlockedUnitRange
using GradedUnitRanges: tensor_product
using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion

function TensorAlgebra.:⊗(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange)
return tensor_product(a1, a2)
end
using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion
using TensorProducts: OneToOne

using BlockArrays: AbstractBlockedUnitRange
using BlockSparseArrays: AbstractBlockSparseArray, blockreshape
using TensorAlgebra: TensorAlgebra, FusionStyle, BlockReshapeFusion

TensorAlgebra.FusionStyle(::AbstractBlockedUnitRange) = BlockReshapeFusion()

Expand Down Expand Up @@ -46,13 +41,12 @@
using GradedUnitRanges:
GradedUnitRanges,
AbstractGradedUnitRange,
OneToOne,
blockmergesortperm,
blocksortperm,
dual,
invblockperm,
nondual,
tensor_product
unmerged_tensor_product
using LinearAlgebra: Adjoint, Transpose
using TensorAlgebra:
TensorAlgebra, FusionStyle, BlockReshapeFusion, SectorFusion, fusedims, splitdims
Expand All @@ -77,10 +71,17 @@
end

function TensorAlgebra.fusedims(
::SectorFusion, a::AbstractArray, axes::AbstractUnitRange...
::SectorFusion, a::AbstractArray, merged_axes::AbstractUnitRange...
)
# First perform a fusion using a block reshape.
a_reshaped = fusedims(BlockReshapeFusion(), a, axes...)
# TODO avoid groupreducewhile. Require refactor of fusedims.
unmerged_axes = groupreducewhile(

Check warning on line 78 in ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl#L78

Added line #L78 was not covered by tests
unmerged_tensor_product, axes(a), length(merged_axes); init=OneToOne()
) do i, axis
return length(axis) ≤ length(merged_axes[i])

Check warning on line 81 in ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl#L81

Added line #L81 was not covered by tests
end

a_reshaped = fusedims(BlockReshapeFusion(), a, unmerged_axes...)

Check warning on line 84 in ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl#L84

Added line #L84 was not covered by tests
# Sort the blocks by sector and merge the equivalent sectors.
return block_mergesort(a_reshaped)
end
Expand All @@ -90,10 +91,11 @@
)
# First, fuse axes to get `blockmergesortperm`.
# Then unpermute the blocks.
axes_prod =
groupreducewhile(tensor_product, split_axes, ndims(a); init=OneToOne()) do i, axis
return length(axis) ≤ length(axes(a, i))
end
axes_prod = groupreducewhile(

Check warning on line 94 in ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl#L94

Added line #L94 was not covered by tests
unmerged_tensor_product, split_axes, ndims(a); init=OneToOne()
) do i, axis
return length(axis) ≤ length(axes(a, i))

Check warning on line 97 in ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/BlockSparseArraysTensorAlgebraExt/BlockSparseArraysTensorAlgebraExt.jl#L97

Added line #L97 was not covered by tests
end
blockperms = blocksortperm.(axes_prod)
sorted_axes = map((r, I) -> only(axes(r[I])), axes_prod, blockperms)

Expand All @@ -106,34 +108,11 @@
return splitdims(BlockReshapeFusion(), a_blockpermed, split_axes...)
end

# This is a temporary fix for `eachindex` being broken for BlockSparseArrays
# with mixed dual and non-dual axes. This shouldn't be needed once
# GradedUnitRanges is rewritten using BlockArrays v1.
# TODO: Delete this once GradedUnitRanges is rewritten.
function Base.eachindex(a::AbstractBlockSparseArray)
return CartesianIndices(nondual.(axes(a)))
end

# TODO: Handle this through some kind of trait dispatch, maybe
# a `SymmetryStyle`-like trait to check if the block sparse
# matrix has graded axes.
function Base.axes(a::Adjoint{<:Any,<:AbstractBlockSparseMatrix})
return dual.(reverse(axes(a')))
end

# This definition is only needed since calls like
# `a[[Block(1), Block(2)]]` where `a isa AbstractGradedUnitRange`
# returns a `BlockSparseVector` instead of a `BlockVector`
# due to limitations in the `BlockArray` type not allowing
# axes with non-Int element types.
# TODO: Remove this once that issue is fixed,
# see https://github.com/JuliaArrays/BlockArrays.jl/pull/405.
using BlockArrays: BlockRange
using LabelledNumbers: label
function GradedUnitRanges.blocklabels(a::BlockSparseVector)
return map(BlockRange(a)) do block
return label(blocks(a)[Int(block)])
end
end

end
6 changes: 3 additions & 3 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ BlockArrays = "1"
BlockSparseArrays = "0.3"
DiagonalArrays = "0.3"
GPUArraysCore = "0.2"
GradedUnitRanges = "0.1"
GradedUnitRanges = "0.2.2"
JLArrays = "0.2"
LabelledNumbers = "0.1"
LinearAlgebra = "1"
Expand All @@ -38,8 +38,8 @@ Random = "1"
SafeTestsets = "0.1"
SparseArraysBase = "0.5"
Suppressor = "0.2"
SymmetrySectors = "0.1"
TensorAlgebra = "0.2"
SymmetrySectors = "0.1.7"
TensorAlgebra = "0.2.4"
Test = "1"
TestExtras = "0.3"
TypeParameterAccessors = "0.3"
14 changes: 13 additions & 1 deletion test/test_gradedunitrangesext.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
@eval module $(gensym())
using Test: @test, @testset
using BlockArrays:
AbstractBlockArray, Block, BlockedOneTo, blockedrange, blocklengths, blocksize
Expand All @@ -10,6 +9,7 @@ using GradedUnitRanges:
GradedUnitRange,
GradedUnitRangeDual,
blocklabels,
dag,
dual,
gradedrange,
isdual
Expand All @@ -19,6 +19,7 @@ using SymmetrySectors: U1
using TensorAlgebra: fusedims, splitdims
using LinearAlgebra: adjoint
using Random: randn!

function randn_blockdiagonal(elt::Type, axes::Tuple)
a = BlockSparseArray{elt}(undef, axes)
blockdiaglength = minimum(blocksize(a))
Expand Down Expand Up @@ -390,4 +391,15 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
@test all(GradedUnitRanges.space_isequal.(axes(b), (r, dual(r))))
end
end

@testset "dag" begin
elt = ComplexF64
r = gradedrange([U1(0) => 2, U1(1) => 3])
a = BlockSparseArray{elt}(undef, r, dual(r))
a[Block(1, 1)] = randn(elt, 2, 2)
a[Block(2, 2)] = randn(elt, 3, 3)
@test isdual.(axes(a)) == (false, true)
ad = dag(a)
@test Array(ad) == conj(Array(a))
@test isdual.(axes(ad)) == (true, false)
end
Loading