Skip to content
This repository was archived by the owner on Apr 14, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 3 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
name = "GradedUnitRanges"
uuid = "e2de450a-8a67-46c7-b59c-01d5a3d041c5"
authors = ["ITensor developers <[email protected]> and contributors"]
version = "0.1.7"
version = "0.2.0"

[deps]
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
Compat = "34da2185-b29b-5c13-b0c7-acf172513d20"
FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b"
LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993"
SplitApplyCombine = "03a91e81-4c3e-53e1-a0a4-9c0c8f19dd66"
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"

[weakdeps]
SymmetrySectors = "f8a8ad64-adbc-4fce-92f7-ffe2bb36a86e"
Expand All @@ -23,4 +24,5 @@ FillArrays = "1.13.0"
LabelledNumbers = "0.1.0"
SplitApplyCombine = "1.2.3"
SymmetrySectors = "0.1.4"
TensorProducts = "0.1.2"
julia = "1.10"
2 changes: 1 addition & 1 deletion docs/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@ Literate = "98b081ad-f1c9-55d3-8b20-4c87d4299306"

[compat]
Documenter = "1.8.1"
GradedUnitRanges = "0.1.6"
GradedUnitRanges = "0.2"
LabelledNumbers = "0.1.0"
Literate = "2.20.1"
1 change: 0 additions & 1 deletion src/GradedUnitRanges.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ include("gradedunitrange.jl")
include("dual.jl")
include("labelledunitrangedual.jl")
include("gradedunitrangedual.jl")
include("onetoone.jl")
include("fusion.jl")

end
62 changes: 15 additions & 47 deletions src/fusion.jl
Original file line number Diff line number Diff line change
@@ -1,58 +1,30 @@
using BlockArrays: AbstractBlockedUnitRange, blocklengths
using LabelledNumbers: LabelledInteger, label, labelled
using SplitApplyCombine: groupcount

# https://github.com/ITensor/ITensors.jl/blob/v0.3.57/NDTensors/src/lib/GradedAxes/src/tensor_product.jl
# https://en.wikipedia.org/wiki/Tensor_product
# https://github.com/KeitaNakamura/Tensorial.jl
function tensor_product(
a1::AbstractUnitRange,
a2::AbstractUnitRange,
a3::AbstractUnitRange,
a_rest::Vararg{AbstractUnitRange},
)
return foldl(tensor_product, (a1, a2, a3, a_rest...))
end
using TensorProducts: TensorProducts, OneToOne, tensor_product

flip_dual(r::AbstractUnitRange) = r
flip_dual(r::GradedUnitRangeDual) = flip(r)
function tensor_product(a1::AbstractUnitRange, a2::AbstractUnitRange)
return tensor_product(flip_dual(a1), flip_dual(a2))
end

function tensor_product(a1::Base.OneTo, a2::Base.OneTo)
return Base.OneTo(length(a1) * length(a2))
end

function tensor_product(::OneToOne, a2::AbstractUnitRange)
return a2
end

function tensor_product(a1::AbstractUnitRange, ::OneToOne)
return a1
end

function tensor_product(::OneToOne, ::OneToOne)
return OneToOne()
end

function fuse_labels(x, y)
return error(
"`fuse_labels` not implemented for object of type `$(typeof(x))` and `$(typeof(y))`."
)
end

function fuse_blocklengths(x::Integer, y::Integer)
# return blocked unit range to keep non-abelian interface
return blockedrange([x * y])
end

function fuse_blocklengths(x::LabelledInteger, y::LabelledInteger)
# return blocked unit range to keep non-abelian interface
return blockedrange([labelled(x * y, fuse_labels(label(x), label(y)))])
end

function tensor_product(a1::AbstractBlockedUnitRange, a2::AbstractBlockedUnitRange)
unmerged_tensor_product() = OneToOne()
unmerged_tensor_product(a) = a
unmerged_tensor_product(a1, a2) = tensor_product(a1, a2)

Check warning on line 22 in src/fusion.jl

View check run for this annotation

Codecov / codecov/patch

src/fusion.jl#L22

Added line #L22 was not covered by tests
function unmerged_tensor_product(a1, a2, as...)
return unmerged_tensor_product(unmerged_tensor_product(a1, a2), as...)
end

function unmerged_tensor_product(a1::AbstractGradedUnitRange, a2::AbstractGradedUnitRange)
nested = map(Iterators.flatten((Iterators.product(blocks(a1), blocks(a2)),))) do it
return mapreduce(length, fuse_blocklengths, it)
end
Expand Down Expand Up @@ -96,15 +68,11 @@
blockmergesort(g::GradedUnitRangeDual) = flip(blockmergesort(flip(g)))
blockmergesort(g::AbstractUnitRange) = g

# fusion_product produces a sorted, non-dual GradedUnitRange
function fusion_product(g1, g2)
return blockmergesort(tensor_product(g1, g2))
end
# tensor_product produces a sorted, non-dual GradedUnitRange
TensorProducts.tensor_product(g::AbstractGradedUnitRange) = blockmergesort(flip_dual(g))

fusion_product(g::AbstractUnitRange) = blockmergesort(g)
fusion_product(g::GradedUnitRangeDual) = fusion_product(flip(g))

# recursive fusion_product. Simpler than reduce + fix type stability issues with reduce
function fusion_product(g1, g2, g3...)
return fusion_product(fusion_product(g1, g2), g3...)
function TensorProducts.tensor_product(
g1::AbstractGradedUnitRange, g2::AbstractGradedUnitRange
)
return blockmergesort(unmerged_tensor_product(g1, g2))
end
8 changes: 0 additions & 8 deletions src/onetoone.jl

This file was deleted.

6 changes: 3 additions & 3 deletions test/Project.toml
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
[deps]
Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595"
BlockArrays = "8e7c35d0-a365-5155-bbbb-fb81a777f24e"
BlockSparseArrays = "2c9a651f-6452-4ace-a6ac-809f4280fbb4"
GradedUnitRanges = "e2de450a-8a67-46c7-b59c-01d5a3d041c5"
LabelledNumbers = "f856a3a6-4152-4ec4-b2a7-02c1a55d7993"
SafeTestsets = "1bc83da4-3b8d-516f-aca4-4fe02f6d838f"
Suppressor = "fd094767-a336-5f1f-9728-57cf17d0bbfb"
TensorProducts = "decf83d6-1968-43f4-96dc-fdb3fe15fc6d"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[compat]
Aqua = "0.8.9"
BlockArrays = "1.4.0"
BlockSparseArrays = "0.2.28, 0.3"
GradedUnitRanges = "0.1.6"
GradedUnitRanges = "0.2"
LabelledNumbers = "0.1.0"
SafeTestsets = "0.1"
Suppressor = "0.2"
TensorProducts = "0.1.0"
Test = "1.10"
27 changes: 2 additions & 25 deletions test/test_basics.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
@eval module $(gensym())
using BlockArrays:
Block,
BlockRange,
Expand All @@ -13,34 +12,13 @@ using BlockArrays:
combine_blockaxes,
mortar
using GradedUnitRanges:
GradedOneTo,
GradedUnitRange,
OneToOne,
blocklabels,
gradedrange,
sector_type,
space_isequal
GradedOneTo, GradedUnitRange, blocklabels, gradedrange, sector_type, space_isequal
using LabelledNumbers:
LabelledUnitRange, islabelled, label, labelled, labelled_isequal, unlabel
using Test: @test, @test_broken, @testset

@testset "OneToOne" begin
a0 = OneToOne()
@test a0 isa OneToOne{Bool}
@test eltype(a0) == Bool
@test length(a0) == 1
@test labelled_isequal(a0, a0)
@test a0[1] == true
@test a0[[1]] == [true]

@test labelled_isequal(a0, 1:1)
@test labelled_isequal(1:1, a0)
@test !labelled_isequal(a0, 1:2)
@test !labelled_isequal(1:2, a0)
end

@testset "GradedUnitRanges basics" begin
a0 = OneToOne()
a0 = Base.OneTo(1)
for a in (
blockedrange([labelled(2, "x"), labelled(3, "y")]),
gradedrange([labelled(2, "x"), labelled(3, "y")]),
Expand Down Expand Up @@ -260,4 +238,3 @@ end
@test length(a) == 1
@test label(first(a)) == "x"
end
end
18 changes: 2 additions & 16 deletions test/test_dual.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
@eval module $(gensym())
using BlockArrays:
Block,
BlockedOneTo,
Expand All @@ -13,13 +12,11 @@ using BlockArrays:
findblock,
mortar,
combine_blockaxes
using BlockSparseArrays: BlockSparseArray
using GradedUnitRanges:
AbstractGradedUnitRange,
GradedUnitRanges,
GradedUnitRangeDual,
LabelledUnitRangeDual,
OneToOne,
blocklabels,
blockmergesortperm,
blocksortperm,
Expand All @@ -36,6 +33,8 @@ using GradedUnitRanges:
using LabelledNumbers:
LabelledInteger, LabelledUnitRange, label, label_type, labelled, labelled_isequal, unlabel
using Test: @test, @test_broken, @testset
using TensorProducts: OneToOne, tensor_product

struct U1
n::Int
end
Expand Down Expand Up @@ -306,16 +305,3 @@ end
@test !isdual(dual(flip(a)))
end
end

@testset "dag" begin
elt = ComplexF64
r = gradedrange([U1(0) => 2, U1(1) => 3])
a = BlockSparseArray{elt}(undef, r, dual(r))
a[Block(1, 1)] = randn(elt, 2, 2)
a[Block(2, 2)] = randn(elt, 3, 3)
@test isdual.(axes(a)) == (false, true)
ad = dag(a)
@test Array(ad) == conj(Array(a))
@test isdual.(axes(ad)) == (true, false)
end
end
Loading
Loading