Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name = "GradedArrays"
uuid = "bc96ca6e-b7c8-4bb6-888e-c93f838762c2"
version = "0.7.0"
version = "0.7.1"
authors = ["ITensor developers <support@itensor.org> and contributors"]

[workspace]
Expand Down Expand Up @@ -43,12 +43,12 @@ HalfIntegers = "1.6"
KroneckerArrays = "0.3.27"
LinearAlgebra = "1.10"
MatrixAlgebraKit = "0.6"
NamedDimsArrays = "0.13, 0.14"
NamedDimsArrays = "0.15"
Random = "1.10"
SUNRepresentations = "0.3"
SparseArraysBase = "0.9"
SplitApplyCombine = "1.2.3"
TensorAlgebra = "0.7.20"
TensorAlgebra = "0.8"
TensorKitSectors = "0.3"
TypeParameterAccessors = "0.4"
julia = "1.10"
138 changes: 15 additions & 123 deletions src/broadcast.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
using Base.Broadcast: Broadcast as BC
using FillArrays: Zeros, fillsimilar
using TensorAlgebra: TensorAlgebra, *ₗ, +ₗ, -ₗ, /ₗ, conjed
using TensorAlgebra: TensorAlgebra

struct SectorStyle{I, N} <: BC.AbstractArrayStyle{N} end
SectorStyle{I, N}(::Val{M}) where {I, N, M} = SectorStyle{I, M}()
Expand Down Expand Up @@ -31,21 +30,17 @@ function Base.Broadcast.materialize(a::SectorArray)
return ofsector(a, Base.Broadcast.materialize(a.data))
end

function TensorAlgebra.:+ₗ(a::SectorArray, b::SectorArray)
_check_add_axes(a, b)
return ofsector(a, a.data +ₗ b.data)
end

function TensorAlgebra.:*ₗ(α::Number, a::SectorArray)
return ofsector(a, α *ₗ a.data)
end
TensorAlgebra.:*ₗ(a::SectorArray, α::Number) = α *ₗ a
function TensorAlgebra.conjed(a::SectorArray)
return ofsector(a, TensorAlgebra.conjed(a.data))
function Base.similar(bc::BC.Broadcasted{<:SectorStyle}, elt::Type, ax)
bc′ = BC.flatten(bc)
arg = bc′.args[findfirst(arg -> arg isa SectorArray, bc′.args)]
return ofsector(arg, similar(arg.data, elt))
end

function BC.broadcasted(style::SectorStyle, f, args...)
return TensorAlgebra.broadcasted_linear(style, f, args...)
function Base.copyto!(dest::SectorArray, bc::BC.Broadcasted{<:SectorStyle})
lb = TensorAlgebra.tryflattenlinear(bc)
isnothing(lb) &&
throw(ArgumentError("SectorArray broadcasting requires linear operations"))
return copyto!(dest, lb)
end

struct GradedStyle{I, N, B <: BC.AbstractArrayStyle{N}} <: BC.AbstractArrayStyle{N}
Expand Down Expand Up @@ -90,67 +85,6 @@ function Base.similar(bc::BC.Broadcasted{<:GradedStyle}, elt::Type, ax)
return graded_similar(arg, elt, ax)
end

function _check_add_axes(a::AbstractArray, b::AbstractArray)
axes(a) == axes(b) ||
throw(
ArgumentError("linear broadcasting requires matching axes")
)
return nothing
end

function lazyblock(a::GradedArray{<:Any, N}, I::Vararg{Block{1}, N}) where {N}
if isstored(a, I...)
return blocks(a)[Int.(I)...]
else
block_ax = map((ax, i) -> eachblockaxis(ax)[Int(i)], axes(a), I)
return fillsimilar(Zeros{eltype(a)}(block_ax), block_ax)
end
end
lazyblock(a::GradedArray, I::Block) = lazyblock(a, Tuple(I)...)

TensorAlgebra.@scaledarray_type ScaledGradedArray
TensorAlgebra.@scaledarray ScaledGradedArray
TensorAlgebra.@conjarray_type ConjGradedArray
TensorAlgebra.@conjarray ConjGradedArray
TensorAlgebra.@addarray_type AddGradedArray
TensorAlgebra.@addarray AddGradedArray

const LazyGradedArray = Union{
GradedArray, ScaledGradedArray, ConjGradedArray, AddGradedArray,
}

function TensorAlgebra.BroadcastStyle_scaled(arrayt::Type{<:ScaledGradedArray})
return BC.BroadcastStyle(TensorAlgebra.unscaled_type(arrayt))
end
function TensorAlgebra.BroadcastStyle_conj(arrayt::Type{<:ConjGradedArray})
return BC.BroadcastStyle(TensorAlgebra.conjed_type(arrayt))
end
function TensorAlgebra.BroadcastStyle_add(arrayt::Type{<:AddGradedArray})
args_type = TensorAlgebra.addends_type(arrayt)
return Base.promote_op(BC.combine_styles, fieldtypes(args_type)...)()
end

function lazyblock(a::ScaledGradedArray, I::Block)
return TensorAlgebra.coeff(a) *ₗ lazyblock(TensorAlgebra.unscaled(a), I)
end
function lazyblock(a::ConjGradedArray, I::Block)
return conjed(lazyblock(conjed(a), I))
end
function lazyblock(a::AddGradedArray, I::Block)
return +ₗ(map(Base.Fix2(lazyblock, I), TensorAlgebra.addends(a))...)
end

# TODO: Use `eachblockstoredindex` directly for lazy graded wrappers and delete the
# `graded_eachblockstoredindex` helper once that refactor is split into its own PR.
graded_eachblockstoredindex(a::GradedArray) = collect(eachblockstoredindex(a))
function graded_eachblockstoredindex(a::ScaledGradedArray)
return graded_eachblockstoredindex(TensorAlgebra.unscaled(a))
end
graded_eachblockstoredindex(a::ConjGradedArray) = graded_eachblockstoredindex(conjed(a))
function graded_eachblockstoredindex(a::AddGradedArray)
return unique!(vcat(map(graded_eachblockstoredindex, TensorAlgebra.addends(a))...))
end

# TODO: Rename `graded_similar` to `similar_graded` or fold it into `similar`
# entirely once the follow-up allocator cleanup is ready.
function graded_similar(
Expand All @@ -160,52 +94,10 @@ function graded_similar(
) where {N}
return similar(a, elt, ax)
end
function graded_similar(
a::ScaledGradedArray,
elt::Type,
ax::NTuple{N, <:GradedUnitRange}
) where {N}
return graded_similar(TensorAlgebra.unscaled(a), elt, ax)
end
function graded_similar(
a::ConjGradedArray,
elt::Type,
ax::NTuple{N, <:GradedUnitRange}
) where {N}
return graded_similar(conjed(a), elt, ax)
end
function graded_similar(
a::AddGradedArray,
elt::Type,
ax::NTuple{N, <:GradedUnitRange}
) where {N}
style = BC.combine_styles(TensorAlgebra.addends(a)...)
bc = BC.Broadcasted(style, +, TensorAlgebra.addends(a))
return similar(bc, elt, ax)
end

function copy_lazygraded(a::LazyGradedArray)
c = graded_similar(a, eltype(a), axes(a))
for I in graded_eachblockstoredindex(a)
c[I] = lazyblock(a, I)
end
return c
end

function TensorAlgebra.:+ₗ(a::LazyGradedArray, b::LazyGradedArray)
_check_add_axes(a, b)
return AddGradedArray(a, b)
end
TensorAlgebra.:*ₗ(α::Number, a::GradedArray) = ScaledGradedArray(α, a)
TensorAlgebra.conjed(a::GradedArray) = ConjGradedArray(a)

Base.copy(a::ScaledGradedArray) = copy_lazygraded(a)
Base.copy(a::ConjGradedArray) = copy_lazygraded(a)
Base.copy(a::AddGradedArray) = copy_lazygraded(a)
Base.Array(a::ScaledGradedArray) = Array(copy(a))
Base.Array(a::ConjGradedArray) = Array(copy(a))
Base.Array(a::AddGradedArray) = Array(copy(a))

function BC.broadcasted(style::GradedStyle, f, args...)
return TensorAlgebra.broadcasted_linear(style, f, args...)
function Base.copyto!(dest::GradedArray, bc::BC.Broadcasted{<:GradedStyle})
lb = TensorAlgebra.tryflattenlinear(bc)
isnothing(lb) &&
throw(ArgumentError("GradedArray broadcasting requires linear operations"))
return copyto!(dest, lb)
end
25 changes: 16 additions & 9 deletions src/tensoralgebra.jl
Original file line number Diff line number Diff line change
Expand Up @@ -155,27 +155,34 @@ function TensorAlgebra.unmatricize(
return SectorArray(msectors.sectors, mdata)
end

function TensorAlgebra.permutedimsadd!(
y::SectorArray, x::SectorArray, perm,
function TensorAlgebra.permutedimsopadd!(
y::SectorArray, op, x::SectorArray, perm,
α::Number, β::Number
)
ysectors, ydata = kroneckerfactors(y)
xsectors, xdata = kroneckerfactors(x)
ysectors == permutedims(xsectors, perm) || throw(DimensionMismatch())
phase = fermion_permutation_phase(xsectors, perm)
TensorAlgebra.permutedimsadd!(ydata, xdata, perm, phase * α, β)
TensorAlgebra.permutedimsopadd!(ydata, op, xdata, perm, phase * α, β)
return y
end
function TensorAlgebra.permutedimsadd!(
y::GradedArray{<:Any, N}, x::GradedArray{<:Any, N}, perm,
function TensorAlgebra.permutedimsopadd!(
y::GradedArray{<:Any, N}, op, x::GradedArray{<:Any, N}, perm,
α::Number, β::Number
) where {N}
y .*= β
if !iszero(β)
for bI in eachblockstoredindex(y)
y_b = @view!(y[bI])
idperm = ntuple(identity, ndims(y_b))
TensorAlgebra.permutedimsopadd!(y_b, identity, y_b, idperm, β, false)
end
end
for bI in eachblockstoredindex(x)
b = Tuple(bI)
b_dest = ntuple(i -> b[perm[i]], N)
y[Block(b_dest)] =
TensorAlgebra.permutedimsadd!(y[Block(b_dest)], x[bI], perm, α, true)
b_dest = Block(ntuple(i -> b[perm[i]], N))
y_b = @view!(y[b_dest])
x_b = @view!(x[bI])
TensorAlgebra.permutedimsopadd!(y_b, op, x_b, perm, α, true)
end
return y
end
Expand Down
4 changes: 2 additions & 2 deletions test/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -30,13 +30,13 @@ ITensorPkgSkeleton = "0.3.42"
KroneckerArrays = "0.3"
LinearAlgebra = "1.10"
MatrixAlgebraKit = "0.6"
NamedDimsArrays = "0.13, 0.14"
NamedDimsArrays = "0.15"
Random = "1.10"
SUNRepresentations = "0.3"
SafeTestsets = "0.1"
SparseArraysBase = "0.9"
Suppressor = "0.2.8"
TensorAlgebra = "0.7.19"
TensorAlgebra = "0.8"
TensorKitSectors = "0.3"
Test = "1.10"
TestExtras = "0.3.1"
16 changes: 10 additions & 6 deletions test/test_gradedarray.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ using KroneckerArrays: cartesianrange
using LinearAlgebra: adjoint
using Random: randn!
using SparseArraysBase: storedlength
using TensorAlgebra: TensorAlgebra, *ₗ, +ₗ, -ₗ, /ₗ, conjed
using TensorAlgebra: TensorAlgebra, linearbroadcasted
using Test: @test, @test_broken, @test_throws, @testset

function randn_blockdiagonal(elt::Type, axes::Tuple)
Expand Down Expand Up @@ -403,24 +403,28 @@ const elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
@test Array(C) ≈ α .* Array(A) .+ β .* Array(B)
@test axes(C) == axes(A)
@test all(dim -> isdual(axes(C, dim)) == isdual(axes(A, dim)), 1:ndims(A))
Cₗ = α *ₗ A +ₗ β *ₗ B
Cₗ = linearbroadcasted(+, linearbroadcasted(*, α, A), linearbroadcasted(*, β, B))
@test TensorAlgebra.iscall(Cₗ)
@test Array(Cₗ) ≈ α .* Array(A) .+ β .* Array(B)
@test Array(copy(Cₗ)) ≈ α .* Array(A) .+ β .* Array(B)
@test axes(Cₗ) == axes(A)

D = conj.(A) .- B ./ β
@test Array(D) ≈ conj.(Array(A)) .- Array(B) ./ β
@test axes(D) == axes(A)
Dₗ = conjed(A) -ₗ (B /ₗ β)
Dₗ = linearbroadcasted(
+,
linearbroadcasted(conj, A),
linearbroadcasted(*, -1 / β, B)
)
@test TensorAlgebra.iscall(Dₗ)
@test Array(Dₗ) ≈ conj.(Array(A)) .- Array(B) ./ β
@test Array(copy(Dₗ)) ≈ conj.(Array(A)) .- Array(B) ./ β
@test axes(Dₗ) == axes(A)

@test_throws ArgumentError A .* B

r_bad = gradedrange([U1(0) => 1, U1(1) => 3])
B_bad = randn_blockdiagonal(elt, (r_bad, dual(r_bad)))
@test_throws ArgumentError A .+ B_bad
@test_throws DimensionMismatch A .+ B_bad
end
false && @testset "Construct from dense" begin
r = gradedrange([U1(0) => 2, U1(1) => 3])
Expand Down
34 changes: 4 additions & 30 deletions test/test_tensoralgebraext.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@ using GradedArrays: GradedArray, GradedMatrix, SU2, SectorArray, SectorDelta, U1
flip, gradedrange, isdual, sector, sector_type, sectorrange, trivial,
trivial_gradedrange, ⊗
using Random: randn!
using TensorAlgebra: TensorAlgebra, *ₗ, +ₗ, -ₗ, /ₗ, FusionStyle, conjed, contract,
matricize, tensor_product_axis, trivial_axis, unmatricize
using TensorAlgebra: TensorAlgebra, FusionStyle, contract, matricize, tensor_product_axis,
trivial_axis, unmatricize
using Test: @test, @test_throws, @testset

function randn_blockdiagonal(elt::Type, axes::Tuple)
Expand Down Expand Up @@ -76,38 +76,20 @@ end
@test st.data isa Matrix
@test Array(st) ≈ α .* Array(s) .+ β .* Array(t)
@test axes(st) == axes(s)
@test (α *ₗ s) isa SectorArray
@test TensorAlgebra.iscall((α *ₗ s).data)
@test (α *ₗ s +ₗ β *ₗ t) isa SectorArray
@test TensorAlgebra.iscall((α *ₗ s +ₗ β *ₗ t).data)
@test Array(α *ₗ s +ₗ β *ₗ t) ≈ α .* Array(s) .+ β .* Array(t)
@test axes(α *ₗ s +ₗ β *ₗ t) == axes(s)
@test Base.broadcasted(*, α, s) isa SectorArray
@test TensorAlgebra.iscall(Base.broadcasted(*, α, s).data)

conjdiff = conj.(s) .- t ./ β
@test conjdiff isa SectorArray
@test conjdiff.data isa Matrix
@test Array(conjdiff) ≈ conj.(Array(s)) .- Array(t) ./ β
@test axes(conjdiff) == axes(s)
@test conjed(s) isa SectorArray
@test TensorAlgebra.iscall(conjed(s).data)
@test (conjed(s) -ₗ (t /ₗ β)) isa SectorArray
@test TensorAlgebra.iscall((conjed(s) -ₗ (t /ₗ β)).data)
@test Array(conjed(s) -ₗ (t /ₗ β)) ≈ conj.(Array(s)) .- Array(t) ./ β
@test axes(conjed(s) -ₗ (t /ₗ β)) == axes(s)

@test_throws ArgumentError s .* t
@test_throws ArgumentError exp.(s)
end

@testset "SectorArray scalar multiplication materializes on broadcast materialize" begin
@testset "SectorArray scalar multiplication materializes on broadcast" begin
s = SectorArray((U1(0), dual(U1(0))), randn!(Matrix{Float64}(undef, 2, 2)))

scaled = Base.broadcasted(*, 2, s)
@test scaled isa SectorArray
@test TensorAlgebra.iscall(scaled.data)
materialized = Base.Broadcast.materialize(scaled)
materialized = 2 .* s
@test materialized isa SectorArray
@test materialized.data isa Matrix
@test materialized[1, 1] == 2 * s[1, 1]
Expand All @@ -120,14 +102,6 @@ end
@test Array(scaled_mul) ≈ 2 .* Array(s)
end

@testset "SectorArray lazy display" begin
s = SectorArray((U1(0), dual(U1(0))), randn!(Matrix{Float64}(undef, 2, 2)))
lazy = 2 *ₗ s
shown = sprint(show, MIME("text/plain"), lazy)
@test contains(shown, "SectorMatrix")
@test contains(shown, "⊗")
end

const elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
@testset "`contract` `GradedArray` (eltype=$elt)" for elt in elts
@testset "matricize" begin
Expand Down
Loading