diff --git a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl index 742b900235..f4697d4768 100644 --- a/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl +++ b/NDTensors/src/backup/arraystorage/blocksparsearray/storage/combiner/contract_uncombine.jl @@ -82,9 +82,9 @@ function uncombine( # This is needed for reshaping the block # TODO: It is already calculated in uncombine_output, use it from there labels_uncomb_perm = setdiff(labels_dest, labels_src) - ind_uncomb_perm = ⊗( - axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]... - ) + ind_uncomb_perm = ⊗(axes_dest[map( + x -> findfirst(==(x), labels_dest), labels_uncomb_perm + )]...) ind_uncomb = BlockArrays.blockedrange( length.(BlockArrays.blocks(ind_uncomb_perm)[blockperm]) ) @@ -139,9 +139,9 @@ function uncombine_output( blockcomb::Vector{Int}, ) labels_uncomb_perm = setdiff(labels_dest, labels_src) - ind_uncomb_perm = ⊗( - axes_dest[map(x -> findfirst(==(x), labels_dest), labels_uncomb_perm)]... - ) + ind_uncomb_perm = ⊗(axes_dest[map( + x -> findfirst(==(x), labels_dest), labels_uncomb_perm + )]...) axes_uncomb_perm = insertat(axes(a_src), ind_uncomb_perm, combdim) # Uncombine the blocks of a_src blocks_uncomb = uncombine_blocks(nzblocks(a_src), combdim, blockcomb) diff --git a/NDTensors/src/blocksparse/block.jl b/NDTensors/src/blocksparse/block.jl index 5094669003..7bd3d54da0 100644 --- a/NDTensors/src/blocksparse/block.jl +++ b/NDTensors/src/blocksparse/block.jl @@ -68,7 +68,7 @@ convert(::Type{Block{N}}, t::Tuple) where {N} = Block{N}(t) gethash(b::Block) = b.hash[] -sethash!(b::Block, h::UInt) = (b.hash[] = h; return b) +sethash!(b::Block, h::UInt) = (b.hash[]=h; return b) # # Basic functions diff --git a/NDTensors/src/dense/densetensor.jl b/NDTensors/src/dense/densetensor.jl index 119934bdfa..16e301a2d3 100644 --- a/NDTensors/src/dense/densetensor.jl +++ b/NDTensors/src/dense/densetensor.jl @@ -131,7 +131,7 @@ end @propagate_inbounds @inline getindex(T::DenseTensor, i::Integer) = storage(T)[i] @propagate_inbounds @inline function setindex!(T::DenseTensor, v, i::Integer) - return (storage(T)[i] = v; T) + return (storage(T)[i]=v; T) end # diff --git a/NDTensors/src/diag/tensoralgebra/contract.jl b/NDTensors/src/diag/tensoralgebra/contract.jl index 085737c890..6244c0dffd 100644 --- a/NDTensors/src/diag/tensoralgebra/contract.jl +++ b/NDTensors/src/diag/tensoralgebra/contract.jl @@ -93,7 +93,7 @@ function contract!( # elements of A and B. # `expose` allows dispatching on the data type # in order to allow scalar indexing on GPU. - expose(R)[] = mapreduce(*, +, diagview(T1), diagview(T2)) + expose(R)[] = mapreduce(*,+,diagview(T1),diagview(T2)) else diagview(R) .= diagview(T1) .* diagview(T2) end diff --git a/NDTensors/src/lib/AllocateData/test/runtests.jl b/NDTensors/src/lib/AllocateData/test/runtests.jl index b824651feb..983af1ad88 100644 --- a/NDTensors/src/lib/AllocateData/test/runtests.jl +++ b/NDTensors/src/lib/AllocateData/test/runtests.jl @@ -14,8 +14,7 @@ const initializerss = ((undef,), (AllocateData.undef,), (zero_init,), ()) const axess = ((2, 2), (1:2, 1:2)) @testset "AllocateData (arraytype=$arraytype, eltype=$elt, initializer=$initializers, axes=$axes)" for arraytype in arraytypes, - elt in elts, - initializers in initializerss, + elt in elts, initializers in initializerss, axes in axess a = allocate(arraytype{elt}, initializers..., axes) diff --git a/NDTensors/src/lib/BackendSelection/src/backend_types.jl b/NDTensors/src/lib/BackendSelection/src/backend_types.jl index a07a46f22c..ed4002f339 100644 --- a/NDTensors/src/lib/BackendSelection/src/backend_types.jl +++ b/NDTensors/src/lib/BackendSelection/src/backend_types.jl @@ -28,8 +28,9 @@ for type in (:Algorithm, :Backend) function Base.show(io::IO, backend::$type) return print(io, "$($type) type ", backend_string(backend), ", ", parameters(backend)) end - Base.print(io::IO, backend::$type) = - print(io, backend_string(backend), ", ", parameters(backend)) + Base.print(io::IO, backend::$type) = print( + io, backend_string(backend), ", ", parameters(backend) + ) end end diff --git a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl index 13eda130c2..40befbd6d7 100644 --- a/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl +++ b/NDTensors/src/lib/BlockSparseArrays/test/test_basics.jl @@ -188,8 +188,8 @@ using .NDTensorsTestUtils: devices_list, is_supported_eltype # Broken: ## @test b[Block()[]] == 2 for b in ( - (b = copy(a); @allowscalar b[] = 2; b), - (b = copy(a); @allowscalar b[CartesianIndex()] = 2; b), + (b=copy(a); @allowscalar b[] = 2; b), + (b=copy(a); @allowscalar b[CartesianIndex()] = 2; b), ) @test size(b) == () @test isone(length(b)) diff --git a/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl b/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl index 466d64945b..e52c441472 100644 --- a/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl +++ b/NDTensors/src/lib/GradedAxes/src/labelledunitrangedual.jl @@ -19,8 +19,9 @@ LabelledNumbers.unlabel(a::LabelledUnitRangeDual) = unlabel(nondual(a)) LabelledNumbers.LabelledStyle(::LabelledUnitRangeDual) = IsLabelled() for f in [:first, :getindex, :last, :length, :step] - @eval Base.$f(a::LabelledUnitRangeDual, args...) = - labelled($f(unlabel(a), args...), label(a)) + @eval Base.$f(a::LabelledUnitRangeDual, args...) = labelled( + $f(unlabel(a), args...), label(a) + ) end # fix ambiguities diff --git a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl index 56ad8e30fc..2b7cd19fde 100644 --- a/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl +++ b/NDTensors/src/lib/LabelledNumbers/src/labelledinteger.jl @@ -112,12 +112,15 @@ for f in [:rand, :randn] ) return $f(rng, elt, (dim1, dims...)) end - Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = - $f(elt, (dim1, dims...)) - Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = - $f(default_eltype(), dims) + Base.$f(elt::Type{<:Number}, dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = $f( + default_rng(), elt, dims + ) + Base.$f(elt::Type{<:Number}, dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f( + elt, (dim1, dims...) + ) + Base.$f(dims::Tuple{LabelledInteger,Vararg{LabelledInteger}}) = $f( + default_eltype(), dims + ) Base.$f(dim1::LabelledInteger, dims::Vararg{LabelledInteger}) = $f((dim1, dims...)) end end diff --git a/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl b/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl index 9e167beea0..9c7643aabf 100644 --- a/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl +++ b/NDTensors/src/lib/NamedDimsArrays/src/constructors.jl @@ -18,10 +18,12 @@ for f in [:rand, :randn] ) return $f(rng, elt, (dim1, dims...)) end - Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) = - $f(elt, (dim1, dims...)) + Base.$f(elt::Type{<:Number}, dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f( + default_rng(), elt, dims + ) + Base.$f(elt::Type{<:Number}, dim1::NamedInt, dims::Vararg{NamedInt}) = $f( + elt, (dim1, dims...) + ) Base.$f(dims::Tuple{NamedInt,Vararg{NamedInt}}) = $f(default_eltype(), dims) Base.$f(dim1::NamedInt, dims::Vararg{NamedInt}) = $f((dim1, dims...)) end diff --git a/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl b/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl index 8c70f8fe60..67ad94d403 100644 --- a/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl +++ b/NDTensors/src/lib/SortedSets/src/SortedSetsSmallVectorsExt/smallset.jl @@ -4,13 +4,19 @@ const MSmallSet{S,T} = SortedSet{T,MSmallVector{S,T}} # Specialized constructors @propagate_inbounds SmallSet{S}(; kwargs...) where {S} = SmallSet{S}([]; kwargs...) -@propagate_inbounds SmallSet{S}(iter; kwargs...) where {S} = - SmallSet{S}(collect(iter); kwargs...) -@propagate_inbounds SmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - SmallSet{S,I}(a; kwargs...) +@propagate_inbounds SmallSet{S}(iter; kwargs...) where {S} = SmallSet{S}( + collect(iter); kwargs... +) +@propagate_inbounds SmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = SmallSet{S,I}( + a; kwargs... +) @propagate_inbounds MSmallSet{S}(; kwargs...) where {S} = MSmallSet{S}([]; kwargs...) -@propagate_inbounds MSmallSet{S}(iter; kwargs...) where {S} = - MSmallSet{S}(collect(iter); kwargs...) -@propagate_inbounds MSmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - MSmallSet{S,I}(a; kwargs...) +@propagate_inbounds MSmallSet{S}(iter; kwargs...) where {S} = MSmallSet{S}( + collect(iter); kwargs... +) +@propagate_inbounds MSmallSet{S}(a::AbstractArray{I}; kwargs...) where {S,I} = MSmallSet{ + S,I +}( + a; kwargs... +) diff --git a/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl b/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl index e1a1094ff8..a9689513d2 100644 --- a/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl +++ b/NDTensors/src/lib/SortedSets/src/abstractwrappedset.jl @@ -28,8 +28,9 @@ end @inline Dictionaries.istokenizable(set::AbstractWrappedSet) = istokenizable(parent(set)) @inline Dictionaries.tokentype(set::AbstractWrappedSet) = tokentype(parent(set)) -@inline Dictionaries.iteratetoken(set::AbstractWrappedSet, s...) = - iterate(parent(set), s...) +@inline Dictionaries.iteratetoken(set::AbstractWrappedSet, s...) = iterate( + parent(set), s... +) @inline function Dictionaries.iteratetoken_reverse(set::AbstractWrappedSet) return iteratetoken_reverse(parent(set)) end @@ -40,8 +41,9 @@ end @inline function Dictionaries.gettoken(set::AbstractWrappedSet, i) return gettoken(parent(set), i) end -@propagate_inbounds Dictionaries.gettokenvalue(set::AbstractWrappedSet, x) = - gettokenvalue(parent(set), x) +@propagate_inbounds Dictionaries.gettokenvalue(set::AbstractWrappedSet, x) = gettokenvalue( + parent(set), x +) @inline Dictionaries.isinsertable(set::AbstractWrappedSet) = isinsertable(parent(set)) diff --git a/NDTensors/src/lib/SortedSets/src/sortedset.jl b/NDTensors/src/lib/SortedSets/src/sortedset.jl index dff1b441a0..516e2c5ad7 100644 --- a/NDTensors/src/lib/SortedSets/src/sortedset.jl +++ b/NDTensors/src/lib/SortedSets/src/sortedset.jl @@ -10,9 +10,11 @@ small collections. Larger collections are better handled by containers like `Ind struct SortedSet{T,Data<:AbstractArray{T},Order<:Ordering} <: AbstractSet{T} data::Data order::Order - global @inline _SortedSet( - data::Data, order::Order - ) where {T,Data<:AbstractArray{T},Order<:Ordering} = new{T,Data,Order}(data, order) + global @inline _SortedSet(data::Data, order::Order) where {T,Data<:AbstractArray{T},Order<:Ordering} = new{ + T,Data,Order + }( + data, order + ) end @inline Base.parent(set::SortedSet) = getfield(set, :data) @@ -63,29 +65,39 @@ end end # Traits -@inline SmallVectors.InsertStyle(::Type{<:SortedSet{T,Data}}) where {T,Data} = - InsertStyle(Data) +@inline SmallVectors.InsertStyle(::Type{<:SortedSet{T,Data}}) where {T,Data} = InsertStyle( + Data +) @inline SmallVectors.thaw(set::SortedSet) = SortedSet(thaw(parent(set)), order(set)) @inline SmallVectors.freeze(set::SortedSet) = SortedSet(freeze(parent(set)), order(set)) @propagate_inbounds SortedSet(; kwargs...) = SortedSet{Any}([]; kwargs...) -@propagate_inbounds SortedSet{T}(; kwargs...) where {T} = - SortedSet{T,Vector{T}}(T[]; kwargs...) -@propagate_inbounds SortedSet{T,Data}(; kwargs...) where {T,Data} = - SortedSet{T}(Data(); kwargs...) +@propagate_inbounds SortedSet{T}(; kwargs...) where {T} = SortedSet{T,Vector{T}}( + T[]; kwargs... +) +@propagate_inbounds SortedSet{T,Data}(; kwargs...) where {T,Data} = SortedSet{T}( + Data(); kwargs... +) @propagate_inbounds SortedSet(iter; kwargs...) = SortedSet(collect(iter); kwargs...) -@propagate_inbounds SortedSet{T}(iter; kwargs...) where {T} = - SortedSet{T}(collect(T, iter); kwargs...) - -@propagate_inbounds SortedSet(a::AbstractArray{T}; kwargs...) where {T} = - SortedSet{T}(a; kwargs...) -@propagate_inbounds SortedSet{T}(a::AbstractArray{T}; kwargs...) where {T} = - SortedSet{T,typeof(a)}(a; kwargs...) - -@propagate_inbounds SortedSet{T,Data}( - a::AbstractArray; kwargs... -) where {T,Data<:AbstractArray{T}} = SortedSet{T,Data}(Data(a); kwargs...) +@propagate_inbounds SortedSet{T}(iter; kwargs...) where {T} = SortedSet{T}( + collect(T, iter); kwargs... +) + +@propagate_inbounds SortedSet(a::AbstractArray{T}; kwargs...) where {T} = SortedSet{T}( + a; kwargs... +) +@propagate_inbounds SortedSet{T}(a::AbstractArray{T}; kwargs...) where {T} = SortedSet{ + T,typeof(a) +}( + a; kwargs... +) + +@propagate_inbounds SortedSet{T,Data}(a::AbstractArray; kwargs...) where {T,Data<:AbstractArray{T}} = SortedSet{ + T,Data +}( + Data(a); kwargs... +) function Base.convert(::Type{AbstractIndices{T}}, set::SortedSet) where {T} return convert(SortedSet{T}, set) @@ -162,8 +174,9 @@ end @inline Dictionaries.istokenizable(::SortedSet) = true @inline Dictionaries.tokentype(::SortedSet) = Int -@inline Dictionaries.iteratetoken(set::SortedSet, s...) = - iterate(LinearIndices(parent(set)), s...) +@inline Dictionaries.iteratetoken(set::SortedSet, s...) = iterate( + LinearIndices(parent(set)), s... +) @inline function Dictionaries.iteratetoken_reverse(set::SortedSet) li = LinearIndices(parent(set)) if isempty(li) @@ -221,8 +234,9 @@ end # TODO: Make into `MSmallVector`? # More generally, make a `thaw(::AbstractArray)` function to return # a mutable version of an AbstractArray. -@inline Dictionaries.empty_type(::Type{SortedSet{T,D,Order}}, ::Type{T}) where {T,D,Order} = - SortedSet{T,Dictionaries.empty_type(D, T),Order} +@inline Dictionaries.empty_type(::Type{SortedSet{T,D,Order}}, ::Type{T}) where {T,D,Order} = SortedSet{ + T,Dictionaries.empty_type(D, T),Order +} @inline Dictionaries.empty_type(::Type{<:AbstractVector}, ::Type{T}) where {T} = Vector{T} diff --git a/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl b/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl index b64955cb55..8b0eb32d54 100644 --- a/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl +++ b/NDTensors/src/lib/SparseArraysBase/test/test_abstractsparsearray.jl @@ -244,7 +244,7 @@ using Test: @test, @testset end a = SparseArray{Matrix{elt}}( - 2, 3; zero=(a, I) -> (z = similar(eltype(a), 2, 3); fill!(z, false); z) + 2, 3; zero=(a, I) -> (z=similar(eltype(a), 2, 3); fill!(z, false); z) ) a[1, 2] = randn(elt, 2, 3) b = NestedPermutedDimsArray(a, (2, 1)) diff --git a/NDTensors/src/lib/TagSets/src/TagSets.jl b/NDTensors/src/lib/TagSets/src/TagSets.jl index c4d9f1013f..a77a5b5ead 100644 --- a/NDTensors/src/lib/TagSets/src/TagSets.jl +++ b/NDTensors/src/lib/TagSets/src/TagSets.jl @@ -45,10 +45,14 @@ for (SetTyp, TagSetTyp) in ((:SmallSet, :SmallTagSet), (:MSmallSet, :MSmallTagSe return TagSet($SetTyp{S,I}(a; kwargs...)) end @propagate_inbounds $TagSetTyp{S}(; kwargs...) where {S} = $TagSetTyp{S}([]; kwargs...) - @propagate_inbounds $TagSetTyp{S}(iter; kwargs...) where {S} = - $TagSetTyp{S}(collect(iter); kwargs...) - @propagate_inbounds $TagSetTyp{S}(a::AbstractArray{I}; kwargs...) where {S,I} = - $TagSetTyp{S,I}(a; kwargs...) + @propagate_inbounds $TagSetTyp{S}(iter; kwargs...) where {S} = $TagSetTyp{S}( + collect(iter); kwargs... + ) + @propagate_inbounds $TagSetTyp{S}(a::AbstractArray{I}; kwargs...) where {S,I} = $TagSetTyp{ + S,I + }( + a; kwargs... + ) # Strings get split by a deliminator. function $TagSetTyp{S}(str::T; kwargs...) where {S,T<:AbstractString} return $TagSetTyp{S,T}(str, kwargs...) @@ -68,8 +72,11 @@ Base.parent(set::TagSet) = getfield(set, :data) # AbstractWrappedSet interface. # Specialized version when they are the same data type is faster. -@inline SortedSets.rewrap(::TagSet{T,D}, data::D) where {T,D<:AbstractIndices{T}} = - TagSet{T,D}(data) +@inline SortedSets.rewrap(::TagSet{T,D}, data::D) where {T,D<:AbstractIndices{T}} = TagSet{ + T,D +}( + data +) @inline SortedSets.rewrap(::TagSet, data) = TagSet(data) # TagSet interface diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl index 89e38d3b2f..77d3117088 100644 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl +++ b/NDTensors/src/lib/TypeParameterAccessors/src/base/abstractarray.jl @@ -39,13 +39,13 @@ using SimpleTraits: Not, @traitfn @traitfn function unwrap_array_type( arraytype::Type{ArrayType} -) where {ArrayType; IsWrappedArray{ArrayType}} +) where {ArrayType;IsWrappedArray{ArrayType}} return unwrap_array_type(parenttype(arraytype)) end @traitfn function unwrap_array_type( arraytype::Type{ArrayType} -) where {ArrayType; !IsWrappedArray{ArrayType}} +) where {ArrayType;!IsWrappedArray{ArrayType}} return arraytype end @@ -58,7 +58,7 @@ end @traitfn function set_eltype( type::Type{ArrayType}, param -) where {ArrayType <: AbstractArray; IsWrappedArray{ArrayType}} +) where {ArrayType<:AbstractArray;IsWrappedArray{ArrayType}} new_parenttype = set_eltype(parenttype(type), param) # Need to set both in one `set_type_parameters` call to avoid # conflicts in type parameter constraints of certain wrapper types. @@ -67,7 +67,7 @@ end @traitfn function set_eltype( type::Type{ArrayType}, param -) where {ArrayType <: AbstractArray; !IsWrappedArray{ArrayType}} +) where {ArrayType<:AbstractArray;!IsWrappedArray{ArrayType}} return set_type_parameter(type, eltype, param) end diff --git a/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl b/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl index f6fed09885..c9349f0b85 100644 --- a/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl +++ b/NDTensors/src/lib/TypeParameterAccessors/src/base/similartype.jl @@ -18,25 +18,25 @@ end @traitfn function similartype( arraytype::Type{ArrayT} -) where {ArrayT; !IsWrappedArray{ArrayT}} +) where {ArrayT;!IsWrappedArray{ArrayT}} return arraytype end @traitfn function similartype( arraytype::Type{ArrayT}, eltype::Type -) where {ArrayT; !IsWrappedArray{ArrayT}} +) where {ArrayT;!IsWrappedArray{ArrayT}} return set_eltype(arraytype, eltype) end @traitfn function similartype( arraytype::Type{ArrayT}, dims::Tuple -) where {ArrayT; !IsWrappedArray{ArrayT}} +) where {ArrayT;!IsWrappedArray{ArrayT}} return set_indstype(arraytype, dims) end @traitfn function similartype( arraytype::Type{ArrayT}, ndims::NDims -) where {ArrayT; !IsWrappedArray{ArrayT}} +) where {ArrayT;!IsWrappedArray{ArrayT}} return set_ndims(arraytype, ndims) end @@ -47,27 +47,25 @@ function similartype( end ## Wrapped arrays -@traitfn function similartype( - arraytype::Type{ArrayT} -) where {ArrayT; IsWrappedArray{ArrayT}} +@traitfn function similartype(arraytype::Type{ArrayT}) where {ArrayT;IsWrappedArray{ArrayT}} return similartype(unwrap_array_type(arraytype), NDims(arraytype)) end @traitfn function similartype( arraytype::Type{ArrayT}, eltype::Type -) where {ArrayT; IsWrappedArray{ArrayT}} +) where {ArrayT;IsWrappedArray{ArrayT}} return similartype(unwrap_array_type(arraytype), eltype, NDims(arraytype)) end @traitfn function similartype( arraytype::Type{ArrayT}, dims::Tuple -) where {ArrayT; IsWrappedArray{ArrayT}} +) where {ArrayT;IsWrappedArray{ArrayT}} return similartype(unwrap_array_type(arraytype), dims) end @traitfn function similartype( arraytype::Type{ArrayT}, ndims::NDims -) where {ArrayT; IsWrappedArray{ArrayT}} +) where {ArrayT;IsWrappedArray{ArrayT}} return similartype(unwrap_array_type(arraytype), ndims) end diff --git a/NDTensors/src/tupletools.jl b/NDTensors/src/tupletools.jl index 132b219af5..ce7dac1168 100644 --- a/NDTensors/src/tupletools.jl +++ b/NDTensors/src/tupletools.jl @@ -60,7 +60,7 @@ permute(s::AbstractVector, perm) = _permute(s, perm) sim(s::NTuple) = s # type stable findfirst -@inline _findfirst(args...) = (i = findfirst(args...); i === nothing ? 0 : i) +@inline _findfirst(args...) = (i=findfirst(args...); i === nothing ? 0 : i) """ getperm(col1,col2) diff --git a/src/itensor.jl b/src/itensor.jl index 3a0c6c7cf4..f283234d83 100644 --- a/src/itensor.jl +++ b/src/itensor.jl @@ -119,8 +119,9 @@ Constructor for an ITensor from a TensorStorage and a set of indices. The ITensor stores a view of the TensorStorage. """ -ITensor(as::AliasStyle, st::TensorStorage, is)::ITensor = - ITensor(as, Tensor(as, st, Tuple(is))) +ITensor(as::AliasStyle, st::TensorStorage, is)::ITensor = ITensor( + as, Tensor(as, st, Tuple(is)) +) ITensor(as::AliasStyle, is, st::TensorStorage)::ITensor = ITensor(as, st, is) ITensor(st::TensorStorage, is)::ITensor = itensor(Tensor(NeverAlias(), st, Tuple(is))) @@ -137,8 +138,9 @@ of the input data when possible. """ itensor(args...; kwargs...)::ITensor = ITensor(AllowAlias(), args...; kwargs...) -ITensor(::AliasStyle, args...; kwargs...)::ITensor = - error("ITensor constructor with input arguments of types `$(typeof.(args))` not defined.") +ITensor(::AliasStyle, args...; kwargs...)::ITensor = error( + "ITensor constructor with input arguments of types `$(typeof.(args))` not defined." +) """ Tensor(::ITensor) @@ -300,7 +302,7 @@ emptyITensor(is::Indices) = emptyITensor(EmptyNumber, is) emptyITensor(is...) = emptyITensor(EmptyNumber, indices(is...)) -function emptyITensor(::Type{ElT}=EmptyNumber) where {ElT<:Number} +function emptyITensor((::Type{ElT})=EmptyNumber) where {ElT<:Number} return itensor(EmptyTensor(ElT, ())) end @@ -1092,8 +1094,9 @@ A[1, 2] # 2.0, same as: A[i => 1, i' => 2] end # Special case that handles indexing with `end` like `A[i => end, j => 3]` -@propagate_inbounds getindex(T::ITensor, I::Union{Integer,LastVal}...)::Any = - _getindex(tensor(T), I...) +@propagate_inbounds getindex(T::ITensor, I::Union{Integer,LastVal}...)::Any = _getindex( + tensor(T), I... +) # Simple version with just integer indexing, bounds checking gets done by NDTensors @@ -1119,8 +1122,9 @@ A = ITensor(2.0, i, i') A[i => 1, i' => 2] # 2.0, same as: A[i' => 2, i => 1] ``` """ -@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any,N})::Any) where {N} = - _getindex(tensor(T), ivs...) +@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any,N})::Any) where {N} = _getindex( + tensor(T), ivs... +) ## Allowing one to get the first ITensor element if its an order 0 tensor or an order 1 tensor with a dimension of 1. Also convert GPU back to CPU @propagate_inbounds function getindex(T::ITensor)::Any diff --git a/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl b/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl index a9200a0efd..6c06856658 100644 --- a/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl +++ b/src/lib/ITensorsNamedDimsArraysExt/src/itensor.jl @@ -19,10 +19,12 @@ for f in [:rand, :randn] ) return $f(rng, elt, (dim1, dims...)) end - Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) = - $f(default_rng(), elt, dims) - Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) = - $f(elt, (dim1, dims...)) + Base.$f(elt::Type{<:Number}, dims::Tuple{Index,Vararg{Index}}) = $f( + default_rng(), elt, dims + ) + Base.$f(elt::Type{<:Number}, dim1::Index, dims::Vararg{Index}) = $f( + elt, (dim1, dims...) + ) Base.$f(dims::Tuple{Index,Vararg{Index}}) = $f(default_eltype(), dims) Base.$f(dim1::Index, dims::Vararg{Index}) = $f((dim1, dims...)) end diff --git a/src/symmetrystyle.jl b/src/symmetrystyle.jl index 319dfe6060..2507d33e04 100644 --- a/src/symmetrystyle.jl +++ b/src/symmetrystyle.jl @@ -7,9 +7,8 @@ function symmetrystyle(T) return error("No SymmetryStyle defined for the specified object $T of type $(typeof(T))") end -symmetrystyle(T, S, U, V...)::SymmetryStyle = ( - Base.@_inline_meta; symmetrystyle(symmetrystyle(T), symmetrystyle(S, U, V...)) -) +symmetrystyle(T, S, U, V...)::SymmetryStyle = + (Base.@_inline_meta; symmetrystyle(symmetrystyle(T), symmetrystyle(S, U, V...))) symmetrystyle(T, S)::SymmetryStyle = symmetrystyle(symmetrystyle(T), symmetrystyle(S)) diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl index a56c99bf57..91134ca1bf 100644 --- a/src/tensor_operations/tensor_algebra.jl +++ b/src/tensor_operations/tensor_algebra.jl @@ -601,23 +601,23 @@ function product(A::ITensor, B::ITensor; apply_dag::Bool=false) danglings_inds = unioninds(danglings_indsA, danglings_indsB) if hassameinds(common_paired_indsA, common_paired_indsB) # matrix-matrix product - A′ = prime(A; inds=!danglings_inds) - AB = mapprime(A′ * B, 2 => 1; inds=!danglings_inds) + A′ = prime(A; inds=(!danglings_inds)) + AB = mapprime(A′ * B, 2 => 1; inds=(!danglings_inds)) if apply_dag - AB′ = prime(AB; inds=!danglings_inds) - Adag = swapprime(dag(A), 0 => 1; inds=!danglings_inds) - return mapprime(AB′ * Adag, 2 => 1; inds=!danglings_inds) + AB′ = prime(AB; inds=(!danglings_inds)) + Adag = swapprime(dag(A), 0 => 1; inds=(!danglings_inds)) + return mapprime(AB′ * Adag, 2 => 1; inds=(!danglings_inds)) end return AB elseif isempty(common_paired_indsA) && !isempty(common_paired_indsB) # vector-matrix product apply_dag && error("apply_dag not supported for matrix-vector product") - A′ = prime(A; inds=!danglings_inds) + A′ = prime(A; inds=(!danglings_inds)) return A′ * B elseif !isempty(common_paired_indsA) && isempty(common_paired_indsB) # matrix-vector product apply_dag && error("apply_dag not supported for vector-matrix product") - return replaceprime(A * B, 1 => 0; inds=!danglings_inds) + return replaceprime(A * B, 1 => 0; inds=(!danglings_inds)) end end diff --git a/test/base/test_itensor.jl b/test/base/test_itensor.jl index 66ca1abfc7..52efac9fbf 100644 --- a/test/base/test_itensor.jl +++ b/test/base/test_itensor.jl @@ -246,7 +246,7 @@ end @test A[b => end - 1, a => 2] == A[a => 2, b => 2] @test A[b => end, a => 1] == A[a => 1, b => 3] @test A[b => end - 2, a => 1] == A[a => 1, b => 1] - @test A[b => end^2 - 7, a => 1] == A[a => 1, b => 2] + @test A[b => end ^ 2 - 7, a => 1] == A[a => 1, b => 2] i, j, k, l = Index.(2, ("i", "j", "k", "l")) B = random_itensor(i) diff --git a/test/base/test_not.jl b/test/base/test_not.jl index d0c3855d1c..1efa534b04 100644 --- a/test/base/test_not.jl +++ b/test/base/test_not.jl @@ -11,7 +11,7 @@ using ITensors, Test @test hassameinds(Ap, (i', j, k'')) - Ap = prime(A; tags=!ts"j") + Ap = prime(A; tags=(!ts"j")) @test hassameinds(Ap, (i', j, k'')) @@ -23,7 +23,7 @@ using ITensors, Test @test hassameinds(Ap2, (i, j'', k''')) - Ap2 = prime(A, 2; inds=!i) + Ap2 = prime(A, 2; inds=(!i)) @test hassameinds(Ap2, (i, j'', k''')) @@ -39,7 +39,7 @@ using ITensors, Test @test hassameinds(At2, (settags(i, "y"), j, k')) - At2 = settags(A, "y"; inds=!IndexSet(j, k')) + At2 = settags(A, "y"; inds=(!IndexSet(j, k'))) @test hassameinds(At2, (settags(i, "y"), j, k')) diff --git a/test/base/test_sitetype.jl b/test/base/test_sitetype.jl index 12c77734a6..de03b2b4a5 100644 --- a/test/base/test_sitetype.jl +++ b/test/base/test_sitetype.jl @@ -226,7 +226,7 @@ end o = op("$(ot)_op_1", s, 1, 2) @test o ≈ itensor( - [i * j for i in 1:(d^2), j in 1:(d^2)], s[2]', s[1]', dag(s[2]), dag(s[1]) + [i * j for i in 1:(d ^ 2), j in 1:(d ^ 2)], s[2]', s[1]', dag(s[2]), dag(s[1]) ) d = 4 diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl index d20ab0474f..a811956f57 100644 --- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl +++ b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl @@ -239,11 +239,11 @@ Random.seed!(1234) args = (A,) test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x^2 * δ((i', i)))[1, 1] + f = x -> (x ^ 2 * δ((i', i)))[1, 1] args = (6.2,) test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - f = x -> (x^2 * δ(i', i))[1, 1] + f = x -> (x ^ 2 * δ(i', i))[1, 1] args = (5.2,) test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) end @@ -283,7 +283,7 @@ Random.seed!(1234) #args = (2.8 + 3.1im,) #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) f = function (x) - return real((x^3 * ITensor([sin(x) exp(-2x); 3x^3 x+x^2], j', dag(j)))[1, 1]) + return real((x ^ 3 * ITensor([sin(x) exp(-2x); 3x ^ 3 x + x ^ 2], j', dag(j)))[1, 1]) end args = (3.4 + 2.3im,) test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)