Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,9 @@ function uncombine(
# This is needed for reshaping the block
# TODO: It is already calculated in uncombine_output, use it from there
labels_uncomb_perm = setdiff(labels_dest, labels_src)
ind_uncomb_perm = ⊗(
axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]...
)
ind_uncomb_perm = ⊗(axes_dest[map(
x -> findfirst(==(x), labels_dest), labels_uncomb_perm
)]...)
ind_uncomb = BlockArrays.blockedrange(
length.(BlockArrays.blocks(ind_uncomb_perm)[blockperm])
)
Expand Down Expand Up @@ -139,9 +139,9 @@ function uncombine_output(
blockcomb::Vector{Int},
)
labels_uncomb_perm = setdiff(labels_dest, labels_src)
ind_uncomb_perm = ⊗(
axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]...
)
ind_uncomb_perm = ⊗(axes_dest[map(
x -> findfirst(==(x), labels_dest), labels_uncomb_perm
)]...)
axes_uncomb_perm = insertat(axes(a_src), ind_uncomb_perm, combdim)
# Uncombine the blocks of a_src
blocks_uncomb = uncombine_blocks(nzblocks(a_src), combdim, blockcomb)
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/blocksparse/block.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ convert(::Type{Block{N}}, t::Tuple) where {N} = Block{N}(t)

gethash(b::Block) = b.hash[]

sethash!(b::Block, h::UInt) = (b.hash[] = h; return b)
sethash!(b::Block, h::UInt) = (b.hash[]=h; return b)

#
# Basic functions
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/dense/densetensor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ end
@propagate_inbounds @inline getindex(T::DenseTensor, i::Integer) = storage(T)[i]

@propagate_inbounds @inline function setindex!(T::DenseTensor, v, i::Integer)
return (storage(T)[i] = v; T)
return (storage(T)[i]=v; T)
end

#
Expand Down
2 changes: 1 addition & 1 deletion NDTensors/src/diag/tensoralgebra/contract.jl
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ function contract!(
# elements of A and B.
# `expose` allows dispatching on the data type
# in order to allow scalar indexing on GPU.
expose(R)[] = mapreduce(*, +, diagview(T1), diagview(T2))
expose(R)[] = mapreduce(*,+,diagview(T1),diagview(T2))
else
diagview(R) .= diagview(T1) .* diagview(T2)
end
Expand Down
3 changes: 1 addition & 2 deletions NDTensors/src/lib/AllocateData/test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,7 @@ const initializerss = ((undef,), (AllocateData.undef,), (zero_init,), ())
const axess = ((2, 2), (1:2, 1:2))
@testset "AllocateData (arraytype=$arraytype, eltype=$elt, initializer=$initializers, axes=$axes)" for arraytype in
arraytypes,
elt in elts,
initializers in initializerss,
elt in elts, initializers in initializerss,
axes in axess

a = allocate(arraytype{elt}, initializers..., axes)
Expand Down
5 changes: 3 additions & 2 deletions NDTensors/src/lib/BackendSelection/src/backend_types.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ for type in (:Algorithm, :Backend)
function Base.show(io::IO, backend::$type)
return print(io, "$($type) type ", backend_string(backend), ", ", parameters(backend))
end
Base.print(io::IO, backend::$type) =
print(io, backend_string(backend), ", ", parameters(backend))
Base.print(io::IO, backend::$type) = print(
io, backend_string(backend), ", ", parameters(backend)
)
end
end

Expand Down
4 changes: 2 additions & 2 deletions NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl
Original file line number Diff line number Diff line change
Expand Up @@ -188,8 +188,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype
# Broken:
## @test b[Block()[]] == 2
for b in (
(b = copy(a); @allowscalar b[] = 2; b),
(b = copy(a); @allowscalar b[CartesianIndex()] = 2; b),
(b=copy(a); @allowscalar b[] = 2; b),
(b=copy(a); @allowscalar b[CartesianIndex()] = 2; b),
)
@test size(b) == ()
@test isone(length(b))
Expand Down
5 changes: 3 additions & 2 deletions NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,9 @@ LabelledNumbers.unlabel(a::LabelledUnitRangeDual) = unlabel(nondual(a))
LabelledNumbers.LabelledStyle(::LabelledUnitRangeDual) = IsLabelled()

for f in [:first, :getindex, :last, :length, :step]
@eval Base.$f(a::LabelledUnitRangeDual, args...) =
labelled($f(unlabel(a), args...), label(a))
@eval Base.$f(a::LabelledUnitRangeDual, args...) = labelled(
$f(unlabel(a), args...), label(a)
)
end

# fix ambiguities
Expand Down
15 changes: 9 additions & 6 deletions NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl
Original file line number Diff line number Diff line change
Expand Up @@ -112,12 +112,15 @@ for f in [:rand, :randn]
)
return $f(rng, elt, (dim1, dims...))
end
Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) =
$f(default_rng(), elt, dims)
Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) =
$f(elt, (dim1, dims...))
Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) =
$f(default_eltype(), dims)
Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = $f(
default_rng(), elt, dims
)
Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f(
elt, (dim1, dims...)
)
Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = $f(
default_eltype(), dims
)
Base.$f(dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f((dim1, dims...))
end
end
10 changes: 6 additions & 4 deletions NDTensors/src/lib/NamedDimsArrays/src/constructors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,12 @@ for f in [:rand, :randn]
)
return $f(rng, elt, (dim1, dims...))
end
Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) =
$f(default_rng(), elt, dims)
Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) =
$f(elt, (dim1, dims...))
Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(
default_rng(), elt, dims
)
Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) = $f(
elt, (dim1, dims...)
)
Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims)
Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...))
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,19 @@ const MSmallSet{S,T} = SortedSet{T,MSmallVector{S,T}}

# Specialized constructors
@propagate_inbounds SmallSet{S}(; kwargs...) where {S} = SmallSet{S}([]; kwargs...)
@propagate_inbounds SmallSet{S}(iter; kwargs...) where {S} =
SmallSet{S}(collect(iter); kwargs...)
@propagate_inbounds SmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} =
SmallSet{S,I}(a; kwargs...)
@propagate_inbounds SmallSet{S}(iter; kwargs...) where {S} = SmallSet{S}(
collect(iter); kwargs...
)
@propagate_inbounds SmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = SmallSet{S,I}(
a; kwargs...
)

@propagate_inbounds MSmallSet{S}(; kwargs...) where {S} = MSmallSet{S}([]; kwargs...)
@propagate_inbounds MSmallSet{S}(iter; kwargs...) where {S} =
MSmallSet{S}(collect(iter); kwargs...)
@propagate_inbounds MSmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} =
MSmallSet{S,I}(a; kwargs...)
@propagate_inbounds MSmallSet{S}(iter; kwargs...) where {S} = MSmallSet{S}(
collect(iter); kwargs...
)
@propagate_inbounds MSmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = MSmallSet{
S,I
}(
a; kwargs...
)
10 changes: 6 additions & 4 deletions NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,9 @@ end

@inline Dictionaries.istokenizable(set::AbstractWrappedSet) = istokenizable(parent(set))
@inline Dictionaries.tokentype(set::AbstractWrappedSet) = tokentype(parent(set))
@inline Dictionaries.iteratetoken(set::AbstractWrappedSet, s...) =
iterate(parent(set), s...)
@inline Dictionaries.iteratetoken(set::AbstractWrappedSet, s...) = iterate(
parent(set), s...
)
@inline function Dictionaries.iteratetoken_reverse(set::AbstractWrappedSet)
return iteratetoken_reverse(parent(set))
end
Expand All @@ -40,8 +41,9 @@ end
@inline function Dictionaries.gettoken(set::AbstractWrappedSet, i)
return gettoken(parent(set), i)
end
@propagate_inbounds Dictionaries.gettokenvalue(set::AbstractWrappedSet, x) =
gettokenvalue(parent(set), x)
@propagate_inbounds Dictionaries.gettokenvalue(set::AbstractWrappedSet, x) = gettokenvalue(
parent(set), x
)

@inline Dictionaries.isinsertable(set::AbstractWrappedSet) = isinsertable(parent(set))

Expand Down
62 changes: 38 additions & 24 deletions NDTensors/src/lib/SortedSets/src/sortedset.jl
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@ small collections. Larger collections are better handled by containers like `Ind
struct SortedSet{T,Data<:AbstractArray{T},Order<:Ordering} <: AbstractSet{T}
data::Data
order::Order
global @inline _SortedSet(
data::Data, order::Order
) where {T,Data<:AbstractArray{T},Order<:Ordering} = new{T,Data,Order}(data, order)
global @inline _SortedSet(data::Data, order::Order) where {T,Data<:AbstractArray{T},Order<:Ordering} = new{
T,Data,Order
}(
data, order
)
end

@inline Base.parent(set::SortedSet) = getfield(set, :data)
Expand Down Expand Up @@ -63,29 +65,39 @@ end
end

# Traits
@inline SmallVectors.InsertStyle(::Type{<:SortedSet{T,Data}}) where {T,Data} =
InsertStyle(Data)
@inline SmallVectors.InsertStyle(::Type{<:SortedSet{T,Data}}) where {T,Data} = InsertStyle(
Data
)
@inline SmallVectors.thaw(set::SortedSet) = SortedSet(thaw(parent(set)), order(set))
@inline SmallVectors.freeze(set::SortedSet) = SortedSet(freeze(parent(set)), order(set))

@propagate_inbounds SortedSet(; kwargs...) = SortedSet{Any}([]; kwargs...)
@propagate_inbounds SortedSet{T}(; kwargs...) where {T} =
SortedSet{T,Vector{T}}(T[]; kwargs...)
@propagate_inbounds SortedSet{T,Data}(; kwargs...) where {T,Data} =
SortedSet{T}(Data(); kwargs...)
@propagate_inbounds SortedSet{T}(; kwargs...) where {T} = SortedSet{T,Vector{T}}(
T[]; kwargs...
)
@propagate_inbounds SortedSet{T,Data}(; kwargs...) where {T,Data} = SortedSet{T}(
Data(); kwargs...
)

@propagate_inbounds SortedSet(iter; kwargs...) = SortedSet(collect(iter); kwargs...)
@propagate_inbounds SortedSet{T}(iter; kwargs...) where {T} =
SortedSet{T}(collect(T, iter); kwargs...)

@propagate_inbounds SortedSet(a::AbstractArray{T}; kwargs...) where {T} =
SortedSet{T}(a; kwargs...)
@propagate_inbounds SortedSet{T}(a::AbstractArray{T}; kwargs...) where {T} =
SortedSet{T,typeof(a)}(a; kwargs...)

@propagate_inbounds SortedSet{T,Data}(
a::AbstractArray; kwargs...
) where {T,Data<:AbstractArray{T}} = SortedSet{T,Data}(Data(a); kwargs...)
@propagate_inbounds SortedSet{T}(iter; kwargs...) where {T} = SortedSet{T}(
collect(T, iter); kwargs...
)

@propagate_inbounds SortedSet(a::AbstractArray{T}; kwargs...) where {T} = SortedSet{T}(
a; kwargs...
)
@propagate_inbounds SortedSet{T}(a::AbstractArray{T}; kwargs...) where {T} = SortedSet{
T,typeof(a)
}(
a; kwargs...
)

@propagate_inbounds SortedSet{T,Data}(a::AbstractArray; kwargs...) where {T,Data<:AbstractArray{T}} = SortedSet{
T,Data
}(
Data(a); kwargs...
)

function Base.convert(::Type{AbstractIndices{T}}, set::SortedSet) where {T}
return convert(SortedSet{T}, set)
Expand Down Expand Up @@ -162,8 +174,9 @@ end

@inline Dictionaries.istokenizable(::SortedSet) = true
@inline Dictionaries.tokentype(::SortedSet) = Int
@inline Dictionaries.iteratetoken(set::SortedSet, s...) =
iterate(LinearIndices(parent(set)), s...)
@inline Dictionaries.iteratetoken(set::SortedSet, s...) = iterate(
LinearIndices(parent(set)), s...
)
@inline function Dictionaries.iteratetoken_reverse(set::SortedSet)
li = LinearIndices(parent(set))
if isempty(li)
Expand Down Expand Up @@ -221,8 +234,9 @@ end
# TODO: Make into `MSmallVector`?
# More generally, make a `thaw(::AbstractArray)` function to return
# a mutable version of an AbstractArray.
@inline Dictionaries.empty_type(::Type{SortedSet{T,D,Order}}, ::Type{T}) where {T,D,Order} =
SortedSet{T,Dictionaries.empty_type(D, T),Order}
@inline Dictionaries.empty_type(::Type{SortedSet{T,D,Order}}, ::Type{T}) where {T,D,Order} = SortedSet{
T,Dictionaries.empty_type(D, T),Order
}

@inline Dictionaries.empty_type(::Type{<:AbstractVector}, ::Type{T}) where {T} = Vector{T}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,7 @@ using Test: @test, @testset
end

a = SparseArray{Matrix{elt}}(
2, 3; zero=(a, I) -> (z = similar(eltype(a), 2, 3); fill!(z, false); z)
2, 3; zero=(a, I) -> (z=similar(eltype(a), 2, 3); fill!(z, false); z)
)
a[1, 2] = randn(elt, 2, 3)
b = NestedPermutedDimsArray(a, (2, 1))
Expand Down
19 changes: 13 additions & 6 deletions NDTensors/src/lib/TagSets/src/TagSets.jl
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,14 @@ for (SetTyp, TagSetTyp) in ((:SmallSet, :SmallTagSet), (:MSmallSet, :MSmallTagSe
return TagSet($SetTyp{S,I}(a; kwargs...))
end
@propagate_inbounds $TagSetTyp{S}(; kwargs...) where {S} = $TagSetTyp{S}([]; kwargs...)
@propagate_inbounds $TagSetTyp{S}(iter; kwargs...) where {S} =
$TagSetTyp{S}(collect(iter); kwargs...)
@propagate_inbounds $TagSetTyp{S}(a::AbstractArray{I}; kwargs...) where {S,I} =
$TagSetTyp{S,I}(a; kwargs...)
@propagate_inbounds $TagSetTyp{S}(iter; kwargs...) where {S} = $TagSetTyp{S}(
collect(iter); kwargs...
)
@propagate_inbounds $TagSetTyp{S}(a::AbstractArray{I}; kwargs...) where {S,I} = $TagSetTyp{
S,I
}(
a; kwargs...
)
# Strings get split by a deliminator.
function $TagSetTyp{S}(str::T; kwargs...) where {S,T<:AbstractString}
return $TagSetTyp{S,T}(str, kwargs...)
Expand All @@ -68,8 +72,11 @@ Base.parent(set::TagSet) = getfield(set, :data)

# AbstractWrappedSet interface.
# Specialized version when they are the same data type is faster.
@inline SortedSets.rewrap(::TagSet{T,D}, data::D) where {T,D<:AbstractIndices{T}} =
TagSet{T,D}(data)
@inline SortedSets.rewrap(::TagSet{T,D}, data::D) where {T,D<:AbstractIndices{T}} = TagSet{
T,D
}(
data
)
@inline SortedSets.rewrap(::TagSet, data) = TagSet(data)

# TagSet interface
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,13 +39,13 @@ using SimpleTraits: Not, @traitfn

@traitfn function unwrap_array_type(
arraytype::Type{ArrayType}
) where {ArrayType; IsWrappedArray{ArrayType}}
) where {ArrayType;IsWrappedArray{ArrayType}}
return unwrap_array_type(parenttype(arraytype))
end

@traitfn function unwrap_array_type(
arraytype::Type{ArrayType}
) where {ArrayType; !IsWrappedArray{ArrayType}}
) where {ArrayType;!IsWrappedArray{ArrayType}}
return arraytype
end

Expand All @@ -58,7 +58,7 @@ end

@traitfn function set_eltype(
type::Type{ArrayType}, param
) where {ArrayType <: AbstractArray; IsWrappedArray{ArrayType}}
) where {ArrayType<:AbstractArray;IsWrappedArray{ArrayType}}
new_parenttype = set_eltype(parenttype(type), param)
# Need to set both in one `set_type_parameters` call to avoid
# conflicts in type parameter constraints of certain wrapper types.
Expand All @@ -67,7 +67,7 @@ end

@traitfn function set_eltype(
type::Type{ArrayType}, param
) where {ArrayType <: AbstractArray; !IsWrappedArray{ArrayType}}
) where {ArrayType<:AbstractArray;!IsWrappedArray{ArrayType}}
return set_type_parameter(type, eltype, param)
end

Expand Down
Loading