From edb4b34fdb898f08fe14a534d1f639b2a629f606 Mon Sep 17 00:00:00 2001 From: mtfishman Date: Fri, 10 Oct 2025 18:33:42 -0400 Subject: [PATCH] More Runic formatting --- .../generate_issue_templates.jl | 52 +- NDTensors/Project.toml | 2 +- NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl | 26 +- NDTensors/ext/NDTensorsAMDGPUExt/append.jl | 2 +- NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl | 36 +- NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl | 14 +- .../ext/NDTensorsAMDGPUExt/linearalgebra.jl | 12 +- NDTensors/ext/NDTensorsAMDGPUExt/mul.jl | 58 +- .../ext/NDTensorsAMDGPUExt/permutedims.jl | 26 +- NDTensors/ext/NDTensorsCUDAExt/adapt.jl | 20 +- NDTensors/ext/NDTensorsCUDAExt/append.jl | 2 +- NDTensors/ext/NDTensorsCUDAExt/copyto.jl | 28 +- NDTensors/ext/NDTensorsCUDAExt/indexing.jl | 16 +- .../ext/NDTensorsCUDAExt/linearalgebra.jl | 76 +- NDTensors/ext/NDTensorsCUDAExt/mul.jl | 58 +- NDTensors/ext/NDTensorsCUDAExt/permutedims.jl | 26 +- NDTensors/ext/NDTensorsCUDAExt/set_types.jl | 2 +- .../blocksparsetensor.jl | 24 +- .../ext/NDTensorsGPUArraysCoreExt/contract.jl | 124 +-- NDTensors/ext/NDTensorsHDF5Ext/empty.jl | 26 +- NDTensors/ext/NDTensorsJLArraysExt/copyto.jl | 28 +- .../ext/NDTensorsJLArraysExt/indexing.jl | 12 +- .../ext/NDTensorsJLArraysExt/linearalgebra.jl | 28 +- NDTensors/ext/NDTensorsJLArraysExt/mul.jl | 58 +- .../ext/NDTensorsJLArraysExt/permutedims.jl | 26 +- .../NDTensorsMappedArraysExt.jl | 18 +- NDTensors/ext/NDTensorsMetalExt/adapt.jl | 22 +- NDTensors/ext/NDTensorsMetalExt/append.jl | 2 +- NDTensors/ext/NDTensorsMetalExt/copyto.jl | 28 +- NDTensors/ext/NDTensorsMetalExt/indexing.jl | 10 +- .../ext/NDTensorsMetalExt/linearalgebra.jl | 56 +- NDTensors/ext/NDTensorsMetalExt/mul.jl | 52 +- .../ext/NDTensorsMetalExt/permutedims.jl | 42 +- NDTensors/ext/NDTensorsMetalExt/set_types.jl | 2 +- .../ext/NDTensorsOctavianExt/octavian.jl | 26 +- .../ext/NDTensorscuTENSORExt/contract.jl | 70 +- NDTensors/src/abstractarray/diaginterface.jl | 34 +- .../generic_array_constructors.jl | 46 +- NDTensors/src/abstractarray/iscu.jl | 2 +- NDTensors/src/abstractarray/mul.jl | 8 +- NDTensors/src/abstractarray/permutedims.jl | 8 +- NDTensors/src/abstractarray/set_types.jl | 2 +- NDTensors/src/abstractarray/similar.jl | 26 +- NDTensors/src/abstractarray/to_shape.jl | 2 +- NDTensors/src/adapt.jl | 6 +- NDTensors/src/array/mul.jl | 4 +- NDTensors/src/array/permutedims.jl | 24 +- NDTensors/src/blocksparse/adapt.jl | 2 +- NDTensors/src/blocksparse/blockdims.jl | 92 +-- NDTensors/src/blocksparse/blockoffsets.jl | 170 ++-- NDTensors/src/blocksparse/blocksparse.jl | 150 ++-- NDTensors/src/blocksparse/combiner.jl | 276 +++---- NDTensors/src/blocksparse/contract.jl | 120 +-- NDTensors/src/blocksparse/contract_generic.jl | 208 ++--- .../src/blocksparse/contract_sequential.jl | 178 ++--- .../src/blocksparse/contract_threaded.jl | 160 ++-- .../src/blocksparse/contract_utilities.jl | 116 +-- NDTensors/src/blocksparse/linearalgebra.jl | 710 ++++++++--------- NDTensors/src/blocksparse/similar.jl | 20 +- NDTensors/src/combiner/combiner.jl | 148 ++-- NDTensors/src/combiner/contract.jl | 170 ++-- NDTensors/src/dense/dense.jl | 118 +-- NDTensors/src/dense/densetensor.jl | 258 +++---- .../src/dense/generic_array_constructors.jl | 46 +- .../src/dense/linearalgebra/decompositions.jl | 132 ++-- NDTensors/src/dense/set_types.jl | 8 +- NDTensors/src/dense/tensoralgebra/contract.jl | 362 ++++----- NDTensors/src/dense/tensoralgebra/outer.jl | 54 +- NDTensors/src/diag/diagtensor.jl | 220 +++--- NDTensors/src/diag/set_types.jl | 12 +- NDTensors/src/diag/similar.jl | 12 +- NDTensors/src/diag/tensoralgebra/outer.jl | 36 +- NDTensors/src/dims.jl | 4 +- NDTensors/src/empty/EmptyTensor.jl | 148 ++-- NDTensors/src/empty/adapt.jl | 6 +- NDTensors/src/empty/tensoralgebra/contract.jl | 70 +- NDTensors/src/emptynumber.jl | 6 +- NDTensors/src/exports.jl | 160 ++-- NDTensors/src/imports.jl | 122 +-- NDTensors/src/lib/AMDGPUExtensions/src/roc.jl | 2 +- .../src/lib/AMDGPUExtensions/test/runtests.jl | 4 +- .../lib/BackendSelection/src/backend_types.jl | 68 +- .../src/lib/BackendSelection/test/runtests.jl | 34 +- NDTensors/src/lib/CUDAExtensions/src/cuda.jl | 2 +- .../src/lib/CUDAExtensions/test/runtests.jl | 4 +- NDTensors/src/lib/Expose/src/exposed.jl | 10 +- .../lib/Expose/src/functions/abstractarray.jl | 6 +- .../src/lib/Expose/src/functions/adapt.jl | 4 +- .../src/lib/Expose/src/functions/append.jl | 2 +- .../src/lib/Expose/src/functions/copyto.jl | 4 +- .../lib/Expose/src/functions/linearalgebra.jl | 12 +- NDTensors/src/lib/Expose/src/functions/mul.jl | 4 +- .../lib/Expose/src/functions/permutedims.jl | 10 +- NDTensors/src/lib/Expose/src/import.jl | 22 +- NDTensors/src/lib/Expose/test/runtests.jl | 500 ++++++------ .../src/gpuarrayscore.jl | 6 +- .../GPUArraysCoreExtensions/test/runtests.jl | 2 +- .../src/lib/MetalExtensions/src/metal.jl | 2 +- .../src/lib/MetalExtensions/test/runtests.jl | 2 +- NDTensors/src/linearalgebra/linearalgebra.jl | 660 ++++++++-------- NDTensors/src/tensor/set_types.jl | 26 +- NDTensors/src/tensor/similar.jl | 48 +- NDTensors/src/tensor/tensor.jl | 314 ++++---- .../generic_tensor_operations.jl | 302 ++++---- .../src/tensorstorage/default_storage.jl | 10 +- NDTensors/src/tensorstorage/set_types.jl | 8 +- NDTensors/src/tensorstorage/similar.jl | 66 +- NDTensors/src/tensorstorage/tensorstorage.jl | 16 +- .../test/NDTensorsTestUtils/device_list.jl | 74 +- .../NDTensorsTestUtils/is_supported_eltype.jl | 2 +- NDTensors/test/broken/readwrite.jl | 96 +-- NDTensors/test/lib/runtests.jl | 16 +- NDTensors/test/runtests.jl | 26 +- NDTensors/test/test_blocksparse.jl | 674 ++++++++-------- NDTensors/test/test_combiner.jl | 176 ++--- NDTensors/test/test_dense.jl | 578 +++++++------- NDTensors/test/test_diag.jl | 204 ++--- NDTensors/test/test_diagblocksparse.jl | 178 ++--- NDTensors/test/test_emptynumber.jl | 50 +- NDTensors/test/test_emptystorage.jl | 44 +- NDTensors/test/test_tupletools.jl | 36 +- Project.toml | 2 +- docs/make.jl | 12 +- docs/make_local_notest.jl | 2 +- docs/make_local_test.jl | 2 +- examples/basic_ops/basic_ops.jl | 16 +- examples/src/ctmrg_isotropic.jl | 84 +- examples/src/trg.jl | 68 +- examples/trg/run.jl | 2 +- .../LazyApply/LazyApply.jl | 28 +- .../NDTensors/dense.jl | 12 +- .../NDTensors/tensor.jl | 44 +- ext/ITensorsChainRulesCoreExt/indexset.jl | 50 +- ext/ITensorsChainRulesCoreExt/itensor.jl | 262 +++---- .../non_differentiable.jl | 2 +- ext/ITensorsChainRulesCoreExt/projection.jl | 10 +- ext/ITensorsChainRulesCoreExt/smallstrings.jl | 18 +- ext/ITensorsChainRulesCoreExt/zygoterules.jl | 6 +- ext/ITensorsHDF5Ext/qn.jl | 40 +- ext/ITensorsHDF5Ext/tagset.jl | 26 +- .../ITensorsTensorOperationsExt.jl | 12 +- .../ITensorsVectorInterfaceExt.jl | 120 +-- ext/ITensorsZygoteRulesExt/itensors.jl | 6 +- src/ITensors.jl | 76 +- src/exports.jl | 392 +++++----- src/fermions/fermions.jl | 518 ++++++------- src/imports.jl | 338 ++++---- src/indexset.jl | 474 ++++++------ .../src/ITensorVisualizationCore.jl | 10 +- .../src/visualize_macro.jl | 282 +++---- src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl | 72 +- .../src/ITensorsSiteTypesExt.jl | 52 +- src/lib/LazyApply/src/LazyApply.jl | 330 ++++---- src/lib/Ops/ops_itensor.jl | 72 +- src/lib/Ops/src/op.jl | 232 +++--- src/lib/Ops/src/trotter.jl | 22 +- src/lib/QuantumNumbers/src/arrow.jl | 2 +- src/lib/QuantumNumbers/src/qnval.jl | 54 +- src/lib/SiteTypes/src/sitetype.jl | 570 +++++++------- src/lib/SiteTypes/src/sitetypes/electron.jl | 368 ++++----- .../SiteTypes/src/sitetypes/generic_sites.jl | 46 +- src/lib/SiteTypes/src/sitetypes/qudit.jl | 82 +- src/oneitensor.jl | 2 +- src/packagecompile/compile.jl | 50 +- src/qn/qnitensor.jl | 350 ++++----- src/readwrite.jl | 22 +- src/set_types.jl | 2 +- src/tensor_operations/itensor_combiner.jl | 18 +- src/tensor_operations/permutations.jl | 58 +- test/base/runtests.jl | 14 +- test/base/test_argsdict.jl | 204 ++--- test/base/test_broadcast.jl | 540 ++++++------- test/base/test_combiner.jl | 478 ++++++------ test/base/test_ctmrg.jl | 62 +- test/base/test_debug_checks.jl | 52 +- test/base/test_emptyitensor.jl | 130 ++-- test/base/test_examples.jl | 12 +- test/base/test_exports.jl | 6 +- test/base/test_global_variables.jl | 84 +- test/base/test_index.jl | 340 ++++---- test/base/test_indexset.jl | 726 +++++++++--------- test/base/test_indices.jl | 454 +++++------ test/base/test_inference.jl | 178 ++--- test/base/test_itensor_scalar.jl | 112 +-- test/base/test_itensor_scalar_contract.jl | 172 ++--- test/base/test_itensor_slice.jl | 82 +- test/base/test_ndtensors.jl | 34 +- test/base/test_not.jl | 52 +- test/base/test_oneitensor.jl | 26 +- test/base/test_qn.jl | 294 +++---- test/base/test_qncombiner.jl | 14 +- test/base/test_qndiagitensor.jl | 218 +++--- test/base/test_qnindex.jl | 114 +-- test/base/test_smallstring.jl | 108 +-- test/base/test_symmetrystyle.jl | 72 +- test/base/test_tagset.jl | 282 +++---- .../TestITensorsExportedNames.jl | 380 ++++----- test/base/utils/util.jl | 66 +- .../ext/ITensorsChainRulesCoreExt/runtests.jl | 14 +- .../test_chainrules_ops.jl | 466 +++++------ .../ITensorsTensorOperationsExt/runtests.jl | 242 +++--- .../ITensorsVectorInterfaceExt/runtests.jl | 250 +++--- test/ext/NDTensorsMappedArraysExt/runtests.jl | 28 +- test/lib/LazyApply/outdated/test_lazyapply.jl | 78 +- test/lib/LazyApply/runtests.jl | 14 +- test/lib/LazyApply/test_lazyapply.jl | 62 +- test/lib/Ops/runtests.jl | 14 +- test/lib/Ops/test_ops.jl | 470 ++++++------ test/lib/Ops/test_trotter.jl | 38 +- test/runtests.jl | 50 +- test/threading/runtests.jl | 14 +- test/threading/test_threading.jl | 124 +-- 212 files changed, 11182 insertions(+), 11176 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl b/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl index aaaa985ff5..3ef9025956 100644 --- a/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl +++ b/.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl @@ -7,44 +7,44 @@ package_names = ["ITensors", "NDTensors"] package_ordering = Dict(["ITensors" => 1, "NDTensors" => 2]) function bug_report_file(package_name::String) - return "$(package_name)_bug_report.md" + return "$(package_name)_bug_report.md" end function feature_request_file(package_name::String) - return "$(package_name)_feature_request.md" + return "$(package_name)_feature_request.md" end for package_name in package_names - @show package_name + @show package_name - order = lpad(package_ordering[package_name], 2, "0") + order = lpad(package_ordering[package_name], 2, "0") - template_bug_report = bug_report_file(template_package_name) - new_bug_report = order * "_" * bug_report_file(package_name) + template_bug_report = bug_report_file(template_package_name) + new_bug_report = order * "_" * bug_report_file(package_name) - if isfile(new_bug_report) - println("File $new_bug_report already exists, skipping") - else - println("Copying $template_bug_report to $new_bug_report") - cp(template_bug_report, new_bug_report) + if isfile(new_bug_report) + println("File $new_bug_report already exists, skipping") + else + println("Copying $template_bug_report to $new_bug_report") + cp(template_bug_report, new_bug_report) - println("Replace $template_package_name with $package_name in $new_bug_report") - replace_in_file(new_bug_report, template_package_name => package_name) + println("Replace $template_package_name with $package_name in $new_bug_report") + replace_in_file(new_bug_report, template_package_name => package_name) - mv(new_bug_report, joinpath("..", new_bug_report); force=true) - end + mv(new_bug_report, joinpath("..", new_bug_report); force = true) + end - template_feature_request = feature_request_file(template_package_name) - new_feature_request = order * "_" * feature_request_file(package_name) + template_feature_request = feature_request_file(template_package_name) + new_feature_request = order * "_" * feature_request_file(package_name) - if isfile(new_feature_request) - println("File $new_feature_request already exists, skipping") - else - println("Copying $template_feature_request to $new_feature_request") - cp(template_feature_request, new_feature_request) + if isfile(new_feature_request) + println("File $new_feature_request already exists, skipping") + else + println("Copying $template_feature_request to $new_feature_request") + cp(template_feature_request, new_feature_request) - println("Replace $template_package_name with $package_name in $new_feature_request") - replace_in_file(new_feature_request, template_package_name => package_name) + println("Replace $template_package_name with $package_name in $new_feature_request") + replace_in_file(new_feature_request, template_package_name => package_name) - mv(new_feature_request, joinpath("..", new_feature_request); force=true) - end + mv(new_feature_request, joinpath("..", new_feature_request); force = true) + end end diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml index a6f620e419..7df9c48cdc 100644 --- a/NDTensors/Project.toml +++ b/NDTensors/Project.toml @@ -1,7 +1,7 @@ name = "NDTensors" uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf" authors = ["Matthew Fishman "] -version = "0.4.12" +version = "0.4.13" [deps] Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697" diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl b/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl index 929790f165..520c4dabad 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl @@ -2,28 +2,28 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype using NDTensors.AMDGPUExtensions: AMDGPUExtensions, ROCArrayAdaptor using NDTensors.GPUArraysCoreExtensions: storagemode using NDTensors.TypeParameterAccessors: - default_type_parameters, set_type_parameters, type_parameters + default_type_parameters, set_type_parameters, type_parameters using Adapt: Adapt, adapt using AMDGPU: AMDGPU, ROCArray, ROCVector using Functors: fmap function AMDGPUExtensions.roc( - xs; storagemode=default_type_parameters(ROCArray, storagemode) -) - return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs) + xs; storagemode = default_type_parameters(ROCArray, storagemode) + ) + return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs) end function Adapt.adapt_storage(adaptor::ROCArrayAdaptor, xs::AbstractArray) - new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - roctype = set_type_parameters(ROCArray, (eltype, ndims, storagemode), new_parameters) - return isbits(xs) ? xs : adapt(roctype, xs) + new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) + roctype = set_type_parameters(ROCArray, (eltype, ndims, storagemode), new_parameters) + return isbits(xs) ? xs : adapt(roctype, xs) end function NDTensors.adapt_storagetype( - adaptor::ROCArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - roctype = set_type_parameters( - ROCVector, (eltype, storagemode), (ElT, storagemode(adaptor)) - ) - return emptytype(adapt_storagetype(roctype, StoreT)) + adaptor::ROCArrayAdaptor, ::Type{EmptyStorage{ElT, StoreT}} + ) where {ElT, StoreT} + roctype = set_type_parameters( + ROCVector, (eltype, storagemode), (ElT, storagemode(adaptor)) + ) + return emptytype(adapt_storagetype(roctype, StoreT)) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/append.jl b/NDTensors/ext/NDTensorsAMDGPUExt/append.jl index c4b5d30947..c159c3623e 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/append.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/append.jl @@ -4,5 +4,5 @@ using NDTensors.Expose: Exposed, unexpose ## Warning this append function uses scalar indexing and is therefore extremely slow function Base.append!(Ecollection::Exposed{<:ROCArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) + return @allowscalar append!(unexpose(Ecollection), collections...) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl b/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl index cba61603a2..141ecda648 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl @@ -3,33 +3,33 @@ using LinearAlgebra: LinearAlgebra, Adjoint using AMDGPU: ROCArray # Same definition as `MtlArray`. -function Base.copy(src::Exposed{<:ROCArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) +function Base.copy(src::Exposed{<:ROCArray, <:Base.ReshapedArray}) + return reshape(copy(parent(src)), size(unexpose(src))) end function Base.copy( - src::Exposed{ - <:ROCArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) + src::Exposed{ + <:ROCArray, <:SubArray{<:Any, <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + }, + ) + return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) end -function Base.copyto!(dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) +function Base.copyto!(dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:SubArray}) + copyto!(dest, expose(copy(src))) + return unexpose(dest) end function Base.copyto!( - dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) + dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:Base.ReshapedArray} + ) + copyto!(dest, expose(parent(src))) + return unexpose(dest) end function Base.copyto!( - dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:LinearAlgebra.Transpose} -) - copyto!(expose(transpose(dest)), expose(parent(src))) - return unexpose(dest) + dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:LinearAlgebra.Transpose} + ) + copyto!(expose(transpose(dest)), expose(parent(src))) + return unexpose(dest) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl b/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl index 46ade03433..7ec8697e9a 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl @@ -4,20 +4,20 @@ using NDTensors.Expose: Exposed, expose, parent, unexpose using NDTensors.GPUArraysCoreExtensions: cpu function Base.getindex(E::Exposed{<:ROCArray}) - return @allowscalar unexpose(E)[] + return @allowscalar unexpose(E)[] end function Base.setindex!(E::Exposed{<:ROCArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) + @allowscalar unexpose(E)[] = x + return unexpose(E) end -function Base.getindex(E::Exposed{<:ROCArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' +function Base.getindex(E::Exposed{<:ROCArray, <:Adjoint}, i, j) + return (expose(parent(E))[j, i])' end -Base.any(f, E::Exposed{<:ROCArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E))) +Base.any(f, E::Exposed{<:ROCArray, <:NDTensors.Tensor}) = any(f, data(unexpose(E))) function Base.print_array(io::IO, E::Exposed{<:ROCArray}) - return Base.print_array(io, expose(cpu(E))) + return Base.print_array(io, expose(cpu(E))) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl b/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl index 642d2e6da0..fcc9edcd36 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl @@ -7,16 +7,16 @@ using Adapt: adapt using AMDGPU: ROCMatrix function LinearAlgebra.svd(A::Exposed{<:ROCMatrix}; kwargs...) - U, S, V = svd(cpu(A)) - return roc.((U, S, V)) + U, S, V = svd(cpu(A)) + return roc.((U, S, V)) end ## TODO currently AMDGPU doesn't have ql so make a ql function function Expose.ql(A::Exposed{<:ROCMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql(expose(cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end function Expose.ql_positive(A::Exposed{<:ROCMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql_positive(expose(cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl b/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl index 8d332e8452..a7434c9071 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/mul.jl @@ -4,42 +4,42 @@ using AMDGPU: ROCArray # This was calling generic matrix multiplication. function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:ROCArray}, - BM::Exposed{<:ROCArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) + CM::Exposed{<:ROCArray, <:LinearAlgebra.Transpose}, + AM::Exposed{<:ROCArray}, + BM::Exposed{<:ROCArray}, + α, + β, + ) + mul!(transpose(CM), transpose(BM), transpose(AM), α, β) + return unexpose(CM) end # This was calling generic matrix multiplication. function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:ROCArray}, - BM::Exposed{<:ROCArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) + CM::Exposed{<:ROCArray, <:LinearAlgebra.Adjoint}, + AM::Exposed{<:ROCArray}, + BM::Exposed{<:ROCArray}, + α, + β, + ) + mul!(CM', BM', AM', α, β) + return unexpose(CM) end # Fix issue in AMDGPU.jl where it cannot distinguish # Transpose{Reshape{Adjoint{ROCArray}}} as a ROCArray and calls generic matmul function LinearAlgebra.mul!( - CM::Exposed{<:ROCArray}, - AM::Exposed{<:ROCArray}, - BM::Exposed{ - <:ROCArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) + CM::Exposed{<:ROCArray}, + AM::Exposed{<:ROCArray}, + BM::Exposed{ + <:ROCArray, + <:LinearAlgebra.Transpose{ + <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:LinearAlgebra.Adjoint}, + }, + }, + α, + β, + ) + mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) + return unexpose(CM) end diff --git a/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl b/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl index cc284e6389..efd98b20b2 100644 --- a/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl +++ b/NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl @@ -2,22 +2,22 @@ using NDTensors.Expose: Exposed, expose, parent, unexpose using AMDGPU: ROCArray function Base.permutedims!( - Edest::Exposed{<:ROCArray,<:Base.ReshapedArray}, Esrc::Exposed{<:ROCArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) + Edest::Exposed{<:ROCArray, <:Base.ReshapedArray}, Esrc::Exposed{<:ROCArray}, perm + ) + Aperm = permutedims(Esrc, perm) + copyto!(expose(parent(Edest)), expose(Aperm)) + return unexpose(Edest) end # There is an issue in AMDGPU where if Edest is a reshaped{<:Adjoint} # .= can fail. So instead force Esrc into the shape of parent(Edest) function Base.permutedims!( - Edest::Exposed{<:ROCArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:ROCArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) + Edest::Exposed{<:ROCArray, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + Esrc::Exposed{<:ROCArray}, + perm, + f, + ) + Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) + parent(Edest) .= f.(parent(Edest), Aperm) + return unexpose(Edest) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/adapt.jl b/NDTensors/ext/NDTensorsCUDAExt/adapt.jl index 02e8a41b4b..f6a97ac593 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/adapt.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/adapt.jl @@ -5,22 +5,22 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype using NDTensors.CUDAExtensions: CUDAExtensions, CuArrayAdaptor using NDTensors.GPUArraysCoreExtensions: storagemode using NDTensors.TypeParameterAccessors: - default_type_parameters, set_type_parameters, type_parameters + default_type_parameters, set_type_parameters, type_parameters -function CUDAExtensions.cu(xs; storagemode=default_type_parameters(CuArray, storagemode)) - return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs) +function CUDAExtensions.cu(xs; storagemode = default_type_parameters(CuArray, storagemode)) + return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs) end ## Could do this generically function Adapt.adapt_storage(adaptor::CuArrayAdaptor, xs::AbstractArray) - params = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - cutype = set_type_parameters(CuArray, (eltype, ndims, storagemode), params) - return isbits(xs) ? xs : adapt(cutype, xs) + params = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) + cutype = set_type_parameters(CuArray, (eltype, ndims, storagemode), params) + return isbits(xs) ? xs : adapt(cutype, xs) end function NDTensors.adapt_storagetype( - adaptor::CuArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - cutype = set_type_parameters(CuVector, (eltype, storagemode), (ElT, storagemode(adaptor))) - return emptytype(adapt_storagetype(cutype, StoreT)) + adaptor::CuArrayAdaptor, ::Type{EmptyStorage{ElT, StoreT}} + ) where {ElT, StoreT} + cutype = set_type_parameters(CuVector, (eltype, storagemode), (ElT, storagemode(adaptor))) + return emptytype(adapt_storagetype(cutype, StoreT)) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/append.jl b/NDTensors/ext/NDTensorsCUDAExt/append.jl index 9a974354ab..855dcd91d4 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/append.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/append.jl @@ -4,5 +4,5 @@ using NDTensors.Expose: Exposed, unexpose ## Warning this append function uses scalar indexing and is therefore extremely slow function Base.append!(Ecollection::Exposed{<:CuArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) + return @allowscalar append!(unexpose(Ecollection), collections...) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/copyto.jl b/NDTensors/ext/NDTensorsCUDAExt/copyto.jl index c3f136a9a6..dd6f0b55d5 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/copyto.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/copyto.jl @@ -3,28 +3,28 @@ using NDTensors.Expose: Exposed, expose, unexpose using LinearAlgebra: Adjoint # Same definition as `MtlArray`. -function Base.copy(src::Exposed{<:CuArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) +function Base.copy(src::Exposed{<:CuArray, <:Base.ReshapedArray}) + return reshape(copy(parent(src)), size(unexpose(src))) end function Base.copy( - src::Exposed{ - <:CuArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) + src::Exposed{ + <:CuArray, <:SubArray{<:Any, <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + }, + ) + return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) end # Catches a bug in `copyto!` in CUDA backend. -function Base.copyto!(dest::Exposed{<:CuArray}, src::Exposed{<:CuArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) +function Base.copyto!(dest::Exposed{<:CuArray}, src::Exposed{<:CuArray, <:SubArray}) + copyto!(dest, expose(copy(src))) + return unexpose(dest) end # Catches a bug in `copyto!` in CUDA backend. function Base.copyto!( - dest::Exposed{<:CuArray}, src::Exposed{<:CuArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) + dest::Exposed{<:CuArray}, src::Exposed{<:CuArray, <:Base.ReshapedArray} + ) + copyto!(dest, expose(parent(src))) + return unexpose(dest) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/indexing.jl b/NDTensors/ext/NDTensorsCUDAExt/indexing.jl index 621cde84da..571bd31261 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/indexing.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/indexing.jl @@ -4,22 +4,22 @@ using NDTensors: NDTensors using NDTensors.Expose: Exposed, expose, unexpose function Base.getindex(E::Exposed{<:CuArray}) - return @allowscalar unexpose(E)[] + return @allowscalar unexpose(E)[] end function Base.setindex!(E::Exposed{<:CuArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) + @allowscalar unexpose(E)[] = x + return unexpose(E) end -function Base.getindex(E::Exposed{<:CuArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' +function Base.getindex(E::Exposed{<:CuArray, <:Adjoint}, i, j) + return (expose(parent(E))[j, i])' end -function Base.any(f, E::Exposed{<:CuArray,<:NDTensors.Tensor}) - return any(f, NDTensors.data(unexpose(E))) +function Base.any(f, E::Exposed{<:CuArray, <:NDTensors.Tensor}) + return any(f, NDTensors.data(unexpose(E))) end function Base.print_array(io::IO, E::Exposed{<:CuArray}) - return Base.print_array(io, expose(NDTensors.cpu(E))) + return Base.print_array(io, expose(NDTensors.cpu(E))) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl b/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl index f76841e135..f850da1d39 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/linearalgebra.jl @@ -5,56 +5,56 @@ using NDTensors: NDTensors using NDTensors.Expose: Expose, expose, ql, ql_positive using NDTensors.GPUArraysCoreExtensions: cpu using NDTensors.TypeParameterAccessors: unwrap_array_type -function NDTensors.svd_catch_error(A::CuMatrix; alg::String="jacobi_algorithm") - if alg == "jacobi_algorithm" - alg = CUDA.CUSOLVER.JacobiAlgorithm() - elseif alg == "qr_algorithm" - alg = CUDA.CUSOLVER.QRAlgorithm() - else - error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) - end - return NDTensors.svd_catch_error(A, alg) +function NDTensors.svd_catch_error(A::CuMatrix; alg::String = "jacobi_algorithm") + if alg == "jacobi_algorithm" + alg = CUDA.CUSOLVER.JacobiAlgorithm() + elseif alg == "qr_algorithm" + alg = CUDA.CUSOLVER.QRAlgorithm() + else + error( + "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", + ) + end + return NDTensors.svd_catch_error(A, alg) end function NDTensors.svd_catch_error(A::CuMatrix, ::CUDA.CUSOLVER.JacobiAlgorithm) - USV = try - svd(A; alg=CUDA.CUSOLVER.JacobiAlgorithm()) - catch - return nothing - end - return USV + USV = try + svd(A; alg = CUDA.CUSOLVER.JacobiAlgorithm()) + catch + return nothing + end + return USV end function NDTensors.svd_catch_error(A::CuMatrix, ::CUDA.CUSOLVER.QRAlgorithm) - s = size(A) - if s[1] < s[2] - At = copy(Adjoint(A)) + s = size(A) + if s[1] < s[2] + At = copy(Adjoint(A)) - USV = try - svd(At; alg=CUDA.CUSOLVER.QRAlgorithm()) - catch - return nothing - end - MV, MS, MU = USV - USV = (MU, MS, MV) - else - USV = try - svd(A; alg=CUDA.CUSOLVER.QRAlgorithm()) - catch - return nothing + USV = try + svd(At; alg = CUDA.CUSOLVER.QRAlgorithm()) + catch + return nothing + end + MV, MS, MU = USV + USV = (MU, MS, MV) + else + USV = try + svd(A; alg = CUDA.CUSOLVER.QRAlgorithm()) + catch + return nothing + end end - end - return USV + return USV end ## TODO currently AMDGPU doesn't have ql so make a ql function function Expose.ql(A::Exposed{<:CuMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql(expose(cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end function Expose.ql_positive(A::Exposed{<:CuMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql_positive(expose(cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/mul.jl b/NDTensors/ext/NDTensorsCUDAExt/mul.jl index 624e20aaad..fcc638afe0 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/mul.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/mul.jl @@ -5,43 +5,43 @@ using NDTensors.Expose: Exposed, expose, unexpose # This was calling generic matrix multiplication. # TODO: Raise an issue with `CUDA.jl`. function LinearAlgebra.mul!( - CM::Exposed{<:CuArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:CuArray}, - BM::Exposed{<:CuArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) + CM::Exposed{<:CuArray, <:LinearAlgebra.Transpose}, + AM::Exposed{<:CuArray}, + BM::Exposed{<:CuArray}, + α, + β, + ) + mul!(transpose(CM), transpose(BM), transpose(AM), α, β) + return unexpose(CM) end # This was calling generic matrix multiplication. # TODO: Raise an issue with `CUDA.jl`. function LinearAlgebra.mul!( - CM::Exposed{<:CuArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:CuArray}, - BM::Exposed{<:CuArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) + CM::Exposed{<:CuArray, <:LinearAlgebra.Adjoint}, + AM::Exposed{<:CuArray}, + BM::Exposed{<:CuArray}, + α, + β, + ) + mul!(CM', BM', AM', α, β) + return unexpose(CM) end ## Fix issue in CUDA.jl where it cannot distinguish Transpose{Reshape{Adjoint{CuArray}}} ## as a CuArray and calls generic matmul function LinearAlgebra.mul!( - CM::Exposed{<:CuArray}, - AM::Exposed{<:CuArray}, - BM::Exposed{ - <:CuArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) + CM::Exposed{<:CuArray}, + AM::Exposed{<:CuArray}, + BM::Exposed{ + <:CuArray, + <:LinearAlgebra.Transpose{ + <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:LinearAlgebra.Adjoint}, + }, + }, + α, + β, + ) + mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) + return unexpose(CM) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl b/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl index 032c55c40a..d37022f33c 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/permutedims.jl @@ -2,22 +2,22 @@ using CUDA: CuArray using NDTensors.Expose: Exposed, expose, unexpose function Base.permutedims!( - Edest::Exposed{<:CuArray,<:Base.ReshapedArray}, Esrc::Exposed{<:CuArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) + Edest::Exposed{<:CuArray, <:Base.ReshapedArray}, Esrc::Exposed{<:CuArray}, perm + ) + Aperm = permutedims(Esrc, perm) + copyto!(expose(parent(Edest)), expose(Aperm)) + return unexpose(Edest) end ## Found an issue in CUDA where if Edest is a reshaped{<:Adjoint} ## .= can fail. So instead force Esrc into the shape of parent(Edest) function Base.permutedims!( - Edest::Exposed{<:CuArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:CuArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) + Edest::Exposed{<:CuArray, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + Esrc::Exposed{<:CuArray}, + perm, + f, + ) + Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) + parent(Edest) .= f.(parent(Edest), Aperm) + return unexpose(Edest) end diff --git a/NDTensors/ext/NDTensorsCUDAExt/set_types.jl b/NDTensors/ext/NDTensorsCUDAExt/set_types.jl index 5fb5856726..21205aec11 100644 --- a/NDTensors/ext/NDTensorsCUDAExt/set_types.jl +++ b/NDTensors/ext/NDTensorsCUDAExt/set_types.jl @@ -3,5 +3,5 @@ using NDTensors.GPUArraysCoreExtensions: storagemode using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position function TypeParameterAccessors.position(::Type{<:CuArray}, ::typeof(storagemode)) - return Position(3) + return Position(3) end diff --git a/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl b/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl index aeef915f41..c5618b1d1e 100644 --- a/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl +++ b/NDTensors/ext/NDTensorsGPUArraysCoreExt/blocksparsetensor.jl @@ -6,20 +6,20 @@ using NDTensors.Expose: Exposed, unexpose ## convert blocksparse GPU tensors to dense tensors and call diag ## copying will probably have some impact on timing but this code ## currently isn't used in the main code, just in tests. -function NDTensors.diag(ETensor::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}) - return diag(dense(unexpose(ETensor))) +function NDTensors.diag(ETensor::Exposed{<:AbstractGPUArray, <:BlockSparseTensor}) + return diag(dense(unexpose(ETensor))) end ## TODO scalar indexing is slow here function NDTensors.map_diag!( - f::Function, - exposed_t_destination::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}, - exposed_t_source::Exposed{<:AbstractGPUArray,<:BlockSparseTensor}, -) - t_destination = unexpose(exposed_t_destination) - t_source = unexpose(exposed_t_source) - @allowscalar for i in 1:diaglength(t_destination) - NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i) - end - return t_destination + f::Function, + exposed_t_destination::Exposed{<:AbstractGPUArray, <:BlockSparseTensor}, + exposed_t_source::Exposed{<:AbstractGPUArray, <:BlockSparseTensor}, + ) + t_destination = unexpose(exposed_t_destination) + t_source = unexpose(exposed_t_source) + @allowscalar for i in 1:diaglength(t_destination) + NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i) + end + return t_destination end diff --git a/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl b/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl index 26b13ed731..d87e185264 100644 --- a/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl +++ b/NDTensors/ext/NDTensorsGPUArraysCoreExt/contract.jl @@ -5,77 +5,77 @@ using NDTensors.Expose: Exposed, expose, unexpose using NDTensors.TypeParameterAccessors: parenttype, set_ndims function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DiagTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool); - convert_to_dense::Bool=true, -) - # Convert tensor1 to dense. - # TODO: Define `Exposed` overload for `dense`. - tensor1 = expose(dense(unexpose(tensor1))) - contract!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β - ) - return output_tensor + output_tensor::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelsoutput_tensor, + tensor1::Exposed{<:AbstractGPUArray, <:DiagTensor}, + labelstensor1, + tensor2::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelstensor2, + α::Number = one(Bool), + β::Number = zero(Bool); + convert_to_dense::Bool = true, + ) + # Convert tensor1 to dense. + # TODO: Define `Exposed` overload for `dense`. + tensor1 = expose(dense(unexpose(tensor1))) + contract!( + output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β + ) + return output_tensor end function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DiagTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - contract!( - output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β - ) - return output_tensor + output_tensor::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelsoutput_tensor, + tensor1::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelstensor1, + tensor2::Exposed{<:AbstractGPUArray, <:DiagTensor}, + labelstensor2, + α::Number = one(Bool), + β::Number = zero(Bool), + ) + contract!( + output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β + ) + return output_tensor end ## In this function we convert the DiagTensor to a dense tensor and ## Feed it back into contract function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:Number,<:DiagTensor}, - labelstensor1, - tensor2::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - # Convert tensor1 to dense. - # TODO: Define `Exposed` overload for `dense`. - # TODO: This allocates on CPU first then moves over to GPU which could be optimized. - tensor1 = expose( - adapt(set_ndims(parenttype(typeof(tensor2)), 1), dense(unexpose(tensor1))) - ) - contract!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β - ) - return output_tensor + output_tensor::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelsoutput_tensor, + tensor1::Exposed{<:Number, <:DiagTensor}, + labelstensor1, + tensor2::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelstensor2, + α::Number = one(Bool), + β::Number = zero(Bool), + ) + # Convert tensor1 to dense. + # TODO: Define `Exposed` overload for `dense`. + # TODO: This allocates on CPU first then moves over to GPU which could be optimized. + tensor1 = expose( + adapt(set_ndims(parenttype(typeof(tensor2)), 1), dense(unexpose(tensor1))) + ) + contract!( + output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2, α, β + ) + return output_tensor end function NDTensors.contract!( - output_tensor::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelsoutput_tensor, - tensor1::Exposed{<:AbstractGPUArray,<:DenseTensor}, - labelstensor1, - tensor2::Exposed{<:Number,<:DiagTensor}, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - contract!( - output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β - ) - return output_tensor + output_tensor::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelsoutput_tensor, + tensor1::Exposed{<:AbstractGPUArray, <:DenseTensor}, + labelstensor1, + tensor2::Exposed{<:Number, <:DiagTensor}, + labelstensor2, + α::Number = one(Bool), + β::Number = zero(Bool), + ) + contract!( + output_tensor, labelsoutput_tensor, tensor2, labelstensor2, tensor1, labelstensor1, α, β + ) + return output_tensor end diff --git a/NDTensors/ext/NDTensorsHDF5Ext/empty.jl b/NDTensors/ext/NDTensorsHDF5Ext/empty.jl index 5c5f8782d4..ead7e40c48 100644 --- a/NDTensors/ext/NDTensorsHDF5Ext/empty.jl +++ b/NDTensors/ext/NDTensorsHDF5Ext/empty.jl @@ -4,20 +4,20 @@ using NDTensors: EmptyStorage # XXX: this seems a bit strange and fragile? # Takes the type very literally. function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{StoreT} -) where {StoreT<:EmptyStorage} - g = open_group(parent, name) - typestr = string(StoreT) - if read(attributes(g)["type"]) != typestr - error("HDF5 group or file does not contain $typestr data") - end - return StoreT() + parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{StoreT} + ) where {StoreT <: EmptyStorage} + g = open_group(parent, name) + typestr = string(StoreT) + if read(attributes(g)["type"]) != typestr + error("HDF5 group or file does not contain $typestr data") + end + return StoreT() end function HDF5.write( - parent::Union{HDF5.File,HDF5.Group}, name::String, ::StoreT -) where {StoreT<:EmptyStorage} - g = create_group(parent, name) - attributes(g)["type"] = string(StoreT) - return attributes(g)["version"] = 1 + parent::Union{HDF5.File, HDF5.Group}, name::String, ::StoreT + ) where {StoreT <: EmptyStorage} + g = create_group(parent, name) + attributes(g)["type"] = string(StoreT) + return attributes(g)["version"] = 1 end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl b/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl index e0fe1eb99d..5ae9e26fbf 100644 --- a/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl +++ b/NDTensors/ext/NDTensorsJLArraysExt/copyto.jl @@ -3,28 +3,28 @@ using NDTensors.Expose: Exposed, expose, unexpose using LinearAlgebra: Adjoint # Same definition as `CuArray`. -function Base.copy(src::Exposed{<:JLArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) +function Base.copy(src::Exposed{<:JLArray, <:Base.ReshapedArray}) + return reshape(copy(parent(src)), size(unexpose(src))) end function Base.copy( - src::Exposed{ - <:JLArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) + src::Exposed{ + <:JLArray, <:SubArray{<:Any, <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + }, + ) + return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) end # Catches a bug in `copyto!` in CUDA backend. -function Base.copyto!(dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) +function Base.copyto!(dest::Exposed{<:JLArray}, src::Exposed{<:JLArray, <:SubArray}) + copyto!(dest, expose(copy(src))) + return unexpose(dest) end # Catches a bug in `copyto!` in JLArray backend. function Base.copyto!( - dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) + dest::Exposed{<:JLArray}, src::Exposed{<:JLArray, <:Base.ReshapedArray} + ) + copyto!(dest, expose(parent(src))) + return unexpose(dest) end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl b/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl index 0f6eeb0469..7b0f04a72a 100644 --- a/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl +++ b/NDTensors/ext/NDTensorsJLArraysExt/indexing.jl @@ -4,16 +4,16 @@ using NDTensors: NDTensors using NDTensors.Expose: Exposed, expose, unexpose function Base.getindex(E::Exposed{<:JLArray}) - return @allowscalar unexpose(E)[] + return @allowscalar unexpose(E)[] end function Base.setindex!(E::Exposed{<:JLArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) + @allowscalar unexpose(E)[] = x + return unexpose(E) end -function Base.getindex(E::Exposed{<:JLArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' +function Base.getindex(E::Exposed{<:JLArray, <:Adjoint}, i, j) + return (expose(parent(E))[j, i])' end -Base.any(f, E::Exposed{<:JLArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E))) +Base.any(f, E::Exposed{<:JLArray, <:NDTensors.Tensor}) = any(f, data(unexpose(E))) diff --git a/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl b/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl index 4d594050f1..b8079ec973 100644 --- a/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl +++ b/NDTensors/ext/NDTensorsJLArraysExt/linearalgebra.jl @@ -10,31 +10,31 @@ using NDTensors.TypeParameterAccessors: unwrap_array_type ## that issue is resolved we can rely on the abstractarray version of ## this operation. function Expose.qr(A::Exposed{<:JLArray}) - Q, L = qr(unexpose(A)) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = qr(unexpose(A)) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end ## TODO this should work using a JLArray but there is an error converting the Q from its packed QR from ## back into a JLArray see https://github.com/JuliaGPU/GPUArrays.jl/issues/545. To fix call cpu for now function Expose.qr_positive(A::Exposed{<:JLArray}) - Q, L = qr_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) + Q, L = qr_positive(expose(cpu(A))) + return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) end function Expose.ql(A::Exposed{<:JLMatrix}) - Q, L = ql(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql(expose(cpu(A))) + return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) end function Expose.ql_positive(A::Exposed{<:JLMatrix}) - Q, L = ql_positive(expose(cpu(A))) - return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql_positive(expose(cpu(A))) + return adapt(unwrap_array_type(A), copy(Q)), adapt(unwrap_array_type(A), L) end -function LinearAlgebra.eigen(A::Exposed{<:JLMatrix,<:Symmetric}) - q, l = (eigen(expose(cpu(A)))) - return adapt.(unwrap_array_type(A), (q, l)) +function LinearAlgebra.eigen(A::Exposed{<:JLMatrix, <:Symmetric}) + q, l = (eigen(expose(cpu(A)))) + return adapt.(unwrap_array_type(A), (q, l)) end -function LinearAlgebra.eigen(A::Exposed{<:JLMatrix,<:Hermitian}) - q, l = (eigen(expose(Hermitian(cpu(unexpose(A).data))))) - return adapt.(JLArray, (q, l)) +function LinearAlgebra.eigen(A::Exposed{<:JLMatrix, <:Hermitian}) + q, l = (eigen(expose(Hermitian(cpu(unexpose(A).data))))) + return adapt.(JLArray, (q, l)) end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/mul.jl b/NDTensors/ext/NDTensorsJLArraysExt/mul.jl index 5b04e75df6..f97ff6c5ec 100644 --- a/NDTensors/ext/NDTensorsJLArraysExt/mul.jl +++ b/NDTensors/ext/NDTensorsJLArraysExt/mul.jl @@ -3,41 +3,41 @@ using LinearAlgebra: LinearAlgebra, mul!, transpose using NDTensors.Expose: Exposed, expose, unexpose function LinearAlgebra.mul!( - CM::Exposed{<:JLArray,<:LinearAlgebra.Transpose}, - AM::Exposed{<:JLArray}, - BM::Exposed{<:JLArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) + CM::Exposed{<:JLArray, <:LinearAlgebra.Transpose}, + AM::Exposed{<:JLArray}, + BM::Exposed{<:JLArray}, + α, + β, + ) + mul!(transpose(CM), transpose(BM), transpose(AM), α, β) + return unexpose(CM) end function LinearAlgebra.mul!( - CM::Exposed{<:JLArray,<:LinearAlgebra.Adjoint}, - AM::Exposed{<:JLArray}, - BM::Exposed{<:JLArray}, - α, - β, -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) + CM::Exposed{<:JLArray, <:LinearAlgebra.Adjoint}, + AM::Exposed{<:JLArray}, + BM::Exposed{<:JLArray}, + α, + β, + ) + mul!(CM', BM', AM', α, β) + return unexpose(CM) end ## Fix issue in JLArrays.jl where it cannot distinguish Transpose{Reshape{Adjoint{JLArray}}} ## as a JLArray and calls generic matmul function LinearAlgebra.mul!( - CM::Exposed{<:JLArray}, - AM::Exposed{<:JLArray}, - BM::Exposed{ - <:JLArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) - return unexpose(CM) + CM::Exposed{<:JLArray}, + AM::Exposed{<:JLArray}, + BM::Exposed{ + <:JLArray, + <:LinearAlgebra.Transpose{ + <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:LinearAlgebra.Adjoint}, + }, + }, + α, + β, + ) + mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β) + return unexpose(CM) end diff --git a/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl b/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl index 4bbd5833c4..9d81fa0237 100644 --- a/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl +++ b/NDTensors/ext/NDTensorsJLArraysExt/permutedims.jl @@ -3,22 +3,22 @@ using LinearAlgebra: Adjoint using NDTensors.Expose: Exposed, expose, unexpose function Base.permutedims!( - Edest::Exposed{<:JLArray,<:Base.ReshapedArray}, Esrc::Exposed{<:JLArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) + Edest::Exposed{<:JLArray, <:Base.ReshapedArray}, Esrc::Exposed{<:JLArray}, perm + ) + Aperm = permutedims(Esrc, perm) + copyto!(expose(parent(Edest)), expose(Aperm)) + return unexpose(Edest) end ## Found an issue in CUDA where if Edest is a reshaped{<:Adjoint} ## .= can fail. So instead force Esrc into the shape of parent(Edest) function Base.permutedims!( - Edest::Exposed{<:JLArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}, - Esrc::Exposed{<:JLArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) + Edest::Exposed{<:JLArray, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + Esrc::Exposed{<:JLArray}, + perm, + f, + ) + Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) + parent(Edest) .= f.(parent(Edest), Aperm) + return unexpose(Edest) end diff --git a/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl b/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl index 74372f3f2c..6055ee569a 100644 --- a/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl +++ b/NDTensors/ext/NDTensorsMappedArraysExt/NDTensorsMappedArraysExt.jl @@ -2,24 +2,24 @@ module NDTensorsMappedArraysExt using MappedArrays: AbstractMappedArray using NDTensors: NDTensors function NDTensors.similar(arraytype::Type{<:AbstractMappedArray}, dims::Tuple{Vararg{Int}}) - return similar(Array{eltype(arraytype)}, dims) + return similar(Array{eltype(arraytype)}, dims) end function NDTensors.similartype(storagetype::Type{<:AbstractMappedArray}) - return Array{eltype(storagetype),ndims(storagetype)} + return Array{eltype(storagetype), ndims(storagetype)} end function NDTensors.similartype( - storagetype::Type{<:AbstractMappedArray}, dims::Tuple{Vararg{Int}} -) - return Array{eltype(storagetype),length(dims)} + storagetype::Type{<:AbstractMappedArray}, dims::Tuple{Vararg{Int}} + ) + return Array{eltype(storagetype), length(dims)} end using MappedArrays: ReadonlyMappedArray using NDTensors: AllowAlias # It is a bit unfortunate that we have to define this, it fixes an ambiguity # error with MappedArrays. -function (arraytype::Type{ReadonlyMappedArray{T,N,A,F}} where {T,N,A<:AbstractArray,F})( - ::AllowAlias, a::AbstractArray -) - return a +function (arraytype::Type{ReadonlyMappedArray{T, N, A, F}} where {T, N, A <: AbstractArray, F})( + ::AllowAlias, a::AbstractArray + ) + return a end end diff --git a/NDTensors/ext/NDTensorsMetalExt/adapt.jl b/NDTensors/ext/NDTensorsMetalExt/adapt.jl index 5034c71b29..217b2e697e 100644 --- a/NDTensors/ext/NDTensorsMetalExt/adapt.jl +++ b/NDTensors/ext/NDTensorsMetalExt/adapt.jl @@ -9,21 +9,21 @@ using NDTensors.TypeParameterAccessors: set_type_parameters, type_parameters GPUArraysCoreExtensions.cpu(e::Exposed{<:MtlArray}) = adapt(Array, e) -function MetalExtensions.mtl(xs; storagemode=DefaultStorageMode) - return fmap(x -> adapt(MtlArrayAdaptor{storagemode}(), x), xs) +function MetalExtensions.mtl(xs; storagemode = DefaultStorageMode) + return fmap(x -> adapt(MtlArrayAdaptor{storagemode}(), x), xs) end function Adapt.adapt_storage(adaptor::MtlArrayAdaptor, xs::AbstractArray) - new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) - mtltype = set_type_parameters(MtlArray, (eltype, ndims, storagemode), new_parameters) - return isbits(xs) ? xs : adapt(mtltype, xs) + new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor)) + mtltype = set_type_parameters(MtlArray, (eltype, ndims, storagemode), new_parameters) + return isbits(xs) ? xs : adapt(mtltype, xs) end function NDTensors.adapt_storagetype( - adaptor::MtlArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}} -) where {ElT,StoreT} - mtltype = set_type_parameters( - MtlVector, (eltype, storagemode), (ElT, storagemode(adaptor)) - ) - return emptytype(adapt_storagetype(mtltype, StoreT)) + adaptor::MtlArrayAdaptor, ::Type{EmptyStorage{ElT, StoreT}} + ) where {ElT, StoreT} + mtltype = set_type_parameters( + MtlVector, (eltype, storagemode), (ElT, storagemode(adaptor)) + ) + return emptytype(adapt_storagetype(mtltype, StoreT)) end diff --git a/NDTensors/ext/NDTensorsMetalExt/append.jl b/NDTensors/ext/NDTensorsMetalExt/append.jl index b48d2cfd4f..45c62b9a19 100644 --- a/NDTensors/ext/NDTensorsMetalExt/append.jl +++ b/NDTensors/ext/NDTensorsMetalExt/append.jl @@ -6,5 +6,5 @@ using NDTensors.Expose: Exposed, unexpose ## Warning this append function uses scalar indexing and is therefore extremely slow function Base.append!(Ecollection::Exposed{<:MtlArray}, collections...) - return @allowscalar append!(unexpose(Ecollection), collections...) + return @allowscalar append!(unexpose(Ecollection), collections...) end diff --git a/NDTensors/ext/NDTensorsMetalExt/copyto.jl b/NDTensors/ext/NDTensorsMetalExt/copyto.jl index 6c7aeb4b3c..9cbd2d1f8f 100644 --- a/NDTensors/ext/NDTensorsMetalExt/copyto.jl +++ b/NDTensors/ext/NDTensorsMetalExt/copyto.jl @@ -1,28 +1,28 @@ using Metal: MtlArray using NDTensors.Expose: Exposed, expose, unexpose -function Base.copy(src::Exposed{<:MtlArray,<:Base.ReshapedArray}) - return reshape(copy(parent(src)), size(unexpose(src))) +function Base.copy(src::Exposed{<:MtlArray, <:Base.ReshapedArray}) + return reshape(copy(parent(src)), size(unexpose(src))) end function Base.copy( - src::Exposed{ - <:MtlArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}} - }, -) - return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) + src::Exposed{ + <:MtlArray, <:SubArray{<:Any, <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}}, + }, + ) + return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...]) end # Catches a bug in `copyto!` in Metal backend. -function Base.copyto!(dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray,<:SubArray}) - copyto!(dest, expose(copy(src))) - return unexpose(dest) +function Base.copyto!(dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray, <:SubArray}) + copyto!(dest, expose(copy(src))) + return unexpose(dest) end # Catches a bug in `copyto!` in Metal backend. function Base.copyto!( - dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray,<:Base.ReshapedArray} -) - copyto!(dest, expose(parent(src))) - return unexpose(dest) + dest::Exposed{<:MtlArray}, src::Exposed{<:MtlArray, <:Base.ReshapedArray} + ) + copyto!(dest, expose(parent(src))) + return unexpose(dest) end diff --git a/NDTensors/ext/NDTensorsMetalExt/indexing.jl b/NDTensors/ext/NDTensorsMetalExt/indexing.jl index 8a37e44e05..801dae578e 100644 --- a/NDTensors/ext/NDTensorsMetalExt/indexing.jl +++ b/NDTensors/ext/NDTensorsMetalExt/indexing.jl @@ -4,15 +4,15 @@ using LinearAlgebra: Adjoint using NDTensors.Expose: Exposed, expose, unexpose function Base.getindex(E::Exposed{<:MtlArray}) - return @allowscalar unexpose(E)[] + return @allowscalar unexpose(E)[] end function Base.setindex!(E::Exposed{<:MtlArray}, x::Number) - @allowscalar unexpose(E)[] = x - return unexpose(E) + @allowscalar unexpose(E)[] = x + return unexpose(E) end # Shared with `CuArray`. Move to `NDTensorsGPUArraysCoreExt`? -function Base.getindex(E::Exposed{<:MtlArray,<:Adjoint}, i, j) - return (expose(parent(E))[j, i])' +function Base.getindex(E::Exposed{<:MtlArray, <:Adjoint}, i, j) + return (expose(parent(E))[j, i])' end diff --git a/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl b/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl index 28d592506a..9c11b51eeb 100644 --- a/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl +++ b/NDTensors/ext/NDTensorsMetalExt/linearalgebra.jl @@ -2,48 +2,48 @@ using Metal: MtlMatrix using LinearAlgebra: LinearAlgebra, qr, eigen, svd using NDTensors.Expose: qr_positive, ql_positive, ql using NDTensors.TypeParameterAccessors: - set_type_parameters, type_parameters, unwrap_array_type + set_type_parameters, type_parameters, unwrap_array_type function LinearAlgebra.qr(A::Exposed{<:MtlMatrix}) - Q, R = qr(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) + Q, R = qr(expose(NDTensors.cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) end function NDTensors.Expose.qr_positive(A::Exposed{<:MtlMatrix}) - Q, R = qr_positive(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) + Q, R = qr_positive(expose(NDTensors.cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), R) end function NDTensors.Expose.ql(A::Exposed{<:MtlMatrix}) - Q, L = ql(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql(expose(NDTensors.cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end function NDTensors.Expose.ql_positive(A::Exposed{<:MtlMatrix}) - Q, L = ql_positive(expose(NDTensors.cpu(A))) - return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) + Q, L = ql_positive(expose(NDTensors.cpu(A))) + return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L) end function LinearAlgebra.eigen(A::Exposed{<:MtlMatrix}) - Dcpu, Ucpu = eigen(expose(NDTensors.cpu(A))) - D = adapt( - set_type_parameters( - unwrap_array_type(A), (eltype, ndims), type_parameters(Dcpu, (eltype, ndims)) - ), - Dcpu, - ) - U = adapt(unwrap_array_type(A), Ucpu) - return D, U + Dcpu, Ucpu = eigen(expose(NDTensors.cpu(A))) + D = adapt( + set_type_parameters( + unwrap_array_type(A), (eltype, ndims), type_parameters(Dcpu, (eltype, ndims)) + ), + Dcpu, + ) + U = adapt(unwrap_array_type(A), Ucpu) + return D, U end function LinearAlgebra.svd(A::Exposed{<:MtlMatrix}; kwargs...) - Ucpu, Scpu, Vcpu = svd(expose(NDTensors.cpu(A)); kwargs...) - U = adapt(unwrap_array_type(A), Ucpu) - S = adapt( - set_type_parameters( - unwrap_array_type(A), (eltype, ndims), type_parameters(Scpu, (eltype, ndims)) - ), - Scpu, - ) - V = adapt(unwrap_array_type(A), Vcpu) - return U, S, V + Ucpu, Scpu, Vcpu = svd(expose(NDTensors.cpu(A)); kwargs...) + U = adapt(unwrap_array_type(A), Ucpu) + S = adapt( + set_type_parameters( + unwrap_array_type(A), (eltype, ndims), type_parameters(Scpu, (eltype, ndims)) + ), + Scpu, + ) + V = adapt(unwrap_array_type(A), Vcpu) + return U, S, V end diff --git a/NDTensors/ext/NDTensorsMetalExt/mul.jl b/NDTensors/ext/NDTensorsMetalExt/mul.jl index b6e13d9e74..6b77229ed3 100644 --- a/NDTensors/ext/NDTensorsMetalExt/mul.jl +++ b/NDTensors/ext/NDTensorsMetalExt/mul.jl @@ -3,40 +3,40 @@ using LinearAlgebra: LinearAlgebra, Adjoint, Transpose, mul! # This was calling generic matrix multiplication. # TODO: Raise an issue with `Metal.jl`. function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray,<:Transpose}, - AM::Exposed{<:MtlArray}, - BM::Exposed{<:MtlArray}, - α, - β, -) - mul!(transpose(CM), transpose(BM), transpose(AM), α, β) - return unexpose(CM) + CM::Exposed{<:MtlArray, <:Transpose}, + AM::Exposed{<:MtlArray}, + BM::Exposed{<:MtlArray}, + α, + β, + ) + mul!(transpose(CM), transpose(BM), transpose(AM), α, β) + return unexpose(CM) end # This was calling generic matrix multiplication. # TODO: Raise an issue with `Metal.jl`. function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray,<:Adjoint}, AM::Exposed{<:MtlArray}, BM::Exposed{<:MtlArray}, α, β -) - mul!(CM', BM', AM', α, β) - return unexpose(CM) + CM::Exposed{<:MtlArray, <:Adjoint}, AM::Exposed{<:MtlArray}, BM::Exposed{<:MtlArray}, α, β + ) + mul!(CM', BM', AM', α, β) + return unexpose(CM) end ## Fix issue in Metal.jl where it cannot distinguish Transpose{Reshape{Adjoint{MtlArray}}} ## as a MtlArray and calls generic matmul function LinearAlgebra.mul!( - CM::Exposed{<:MtlArray}, - AM::Exposed{<:MtlArray}, - BM::Exposed{ - <:MtlArray, - <:LinearAlgebra.Transpose{ - <:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint} - }, - }, - α, - β, -) - B = copy(expose(parent(BM))) - mul!(CM, AM, expose(transpose(B)), α, β) - return unexpose(CM) + CM::Exposed{<:MtlArray}, + AM::Exposed{<:MtlArray}, + BM::Exposed{ + <:MtlArray, + <:LinearAlgebra.Transpose{ + <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:LinearAlgebra.Adjoint}, + }, + }, + α, + β, + ) + B = copy(expose(parent(BM))) + mul!(CM, AM, expose(transpose(B)), α, β) + return unexpose(CM) end diff --git a/NDTensors/ext/NDTensorsMetalExt/permutedims.jl b/NDTensors/ext/NDTensorsMetalExt/permutedims.jl index 5af55b8eb3..d9ee61f97b 100644 --- a/NDTensors/ext/NDTensorsMetalExt/permutedims.jl +++ b/NDTensors/ext/NDTensorsMetalExt/permutedims.jl @@ -4,37 +4,37 @@ using NDTensors.Expose: Exposed, expose, unexpose ## Theres an issue in metal that `ReshapedArray' wrapped arrays cannot be permuted using ## permutedims (failing in that Metal uses scalar indexing) ## These functions are to address the problem in different instances of permutedims -function Base.permutedims(E::Exposed{<:MtlArray,<:Base.ReshapedArray}, perm) - A = copy(E) - return permutedims(A, perm) +function Base.permutedims(E::Exposed{<:MtlArray, <:Base.ReshapedArray}, perm) + A = copy(E) + return permutedims(A, perm) end function Base.permutedims!( - Edest::Exposed{<:MtlArray,<:Base.ReshapedArray}, Esrc::Exposed{<:MtlArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(expose(parent(Edest)), expose(Aperm)) - return unexpose(Edest) + Edest::Exposed{<:MtlArray, <:Base.ReshapedArray}, Esrc::Exposed{<:MtlArray}, perm + ) + Aperm = permutedims(Esrc, perm) + copyto!(expose(parent(Edest)), expose(Aperm)) + return unexpose(Edest) end function Base.permutedims!( - Edest::Exposed{<:MtlArray}, Esrc::Exposed{<:MtlArray,<:Base.ReshapedArray}, perm -) - Aperm = permutedims(Esrc, perm) - copyto!(Edest, expose(Aperm)) - return unexpose(Edest) + Edest::Exposed{<:MtlArray}, Esrc::Exposed{<:MtlArray, <:Base.ReshapedArray}, perm + ) + Aperm = permutedims(Esrc, perm) + copyto!(Edest, expose(Aperm)) + return unexpose(Edest) end ## To get around the Metal issue here we copy and permute Esrc, ## then we reshape Esrc to the size of Edest's parent ## and broadcast into the parent. function Base.permutedims!( - Edest::Exposed{<:MtlArray,<:Base.ReshapedArray}, - Esrc::Exposed{<:MtlArray,<:Base.ReshapedArray}, - perm, - f, -) - Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) - parent(Edest) .= f.(parent(Edest), Aperm) - return unexpose(Edest) + Edest::Exposed{<:MtlArray, <:Base.ReshapedArray}, + Esrc::Exposed{<:MtlArray, <:Base.ReshapedArray}, + perm, + f, + ) + Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest))) + parent(Edest) .= f.(parent(Edest), Aperm) + return unexpose(Edest) end diff --git a/NDTensors/ext/NDTensorsMetalExt/set_types.jl b/NDTensors/ext/NDTensorsMetalExt/set_types.jl index 32f1bd29cc..34c331d1b2 100644 --- a/NDTensors/ext/NDTensorsMetalExt/set_types.jl +++ b/NDTensors/ext/NDTensorsMetalExt/set_types.jl @@ -3,5 +3,5 @@ using NDTensors.GPUArraysCoreExtensions: storagemode using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position function TypeParameterAccessors.position(::Type{<:MtlArray}, ::typeof(storagemode)) - return Position(3) + return Position(3) end diff --git a/NDTensors/ext/NDTensorsOctavianExt/octavian.jl b/NDTensors/ext/NDTensorsOctavianExt/octavian.jl index 2cbd6d4a10..36344b288a 100644 --- a/NDTensors/ext/NDTensorsOctavianExt/octavian.jl +++ b/NDTensors/ext/NDTensorsOctavianExt/octavian.jl @@ -1,18 +1,18 @@ function NDTensors.backend_octavian() - return NDTensors.gemm_backend[] = :Octavian + return NDTensors.gemm_backend[] = :Octavian end function _gemm!( - ::NDTensors.GemmBackend{:Octavian}, - tA, - tB, - alpha, - A::AbstractVecOrMat, - B::AbstractVecOrMat, - beta, - C::AbstractVecOrMat, -) - return Octavian.matmul!( - C, tA == 'T' ? transpose(A) : A, tB == 'T' ? transpose(B) : B, alpha, beta - ) + ::NDTensors.GemmBackend{:Octavian}, + tA, + tB, + alpha, + A::AbstractVecOrMat, + B::AbstractVecOrMat, + beta, + C::AbstractVecOrMat, + ) + return Octavian.matmul!( + C, tA == 'T' ? transpose(A) : A, tB == 'T' ? transpose(B) : B, alpha, beta + ) end diff --git a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl b/NDTensors/ext/NDTensorscuTENSORExt/contract.jl index 0e70969a31..618e46271c 100644 --- a/NDTensors/ext/NDTensorscuTENSORExt/contract.jl +++ b/NDTensors/ext/NDTensorscuTENSORExt/contract.jl @@ -6,45 +6,45 @@ using cuTENSOR: cuTENSOR, CuArray, CuTensor # Handle cases that can't be handled by `cuTENSOR.jl` # right now. function to_zero_offset_cuarray(a::CuArray) - return iszero(a.offset) ? a : copy(a) + return iszero(a.offset) ? a : copy(a) end function to_zero_offset_cuarray(a::ReshapedArray) - return copy(expose(a)) + return copy(expose(a)) end function NDTensors.contract!( - exposedR::Exposed{<:CuArray,<:DenseTensor}, - labelsR, - exposedT1::Exposed{<:CuArray,<:DenseTensor}, - labelsT1, - exposedT2::Exposed{<:CuArray,<:DenseTensor}, - labelsT2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - R, T1, T2 = unexpose.((exposedR, exposedT1, exposedT2)) - zoffR = iszero(array(R).offset) - arrayR = zoffR ? array(R) : copy(array(R)) - arrayT1 = to_zero_offset_cuarray(array(T1)) - arrayT2 = to_zero_offset_cuarray(array(T2)) - # Promote to a common type. This is needed because as of - # cuTENSOR.jl v5.4.2, cuTENSOR contraction only performs - # limited sets of type promotions of inputs, see: - # https://github.com/JuliaGPU/CUDA.jl/blob/v5.4.2/lib/cutensor/src/types.jl#L11-L19 - elt = promote_type(eltype.((arrayR, arrayT1, arrayT2))...) - if elt !== eltype(arrayR) - return error( - "In cuTENSOR contraction, input tensors have element types `$(eltype(arrayT1))` and `$(eltype(arrayT2))` while the output has element type `$(eltype(arrayR))`.", + exposedR::Exposed{<:CuArray, <:DenseTensor}, + labelsR, + exposedT1::Exposed{<:CuArray, <:DenseTensor}, + labelsT1, + exposedT2::Exposed{<:CuArray, <:DenseTensor}, + labelsT2, + α::Number = one(Bool), + β::Number = zero(Bool), ) - end - arrayT1 = convert(CuArray{elt}, arrayT1) - arrayT2 = convert(CuArray{elt}, arrayT2) - cuR = CuTensor(arrayR, collect(labelsR)) - cuT1 = CuTensor(arrayT1, collect(labelsT1)) - cuT2 = CuTensor(arrayT2, collect(labelsT2)) - cuTENSOR.mul!(cuR, cuT1, cuT2, α, β) - if !zoffR - array(R) .= cuR.data - end - return R + R, T1, T2 = unexpose.((exposedR, exposedT1, exposedT2)) + zoffR = iszero(array(R).offset) + arrayR = zoffR ? array(R) : copy(array(R)) + arrayT1 = to_zero_offset_cuarray(array(T1)) + arrayT2 = to_zero_offset_cuarray(array(T2)) + # Promote to a common type. This is needed because as of + # cuTENSOR.jl v5.4.2, cuTENSOR contraction only performs + # limited sets of type promotions of inputs, see: + # https://github.com/JuliaGPU/CUDA.jl/blob/v5.4.2/lib/cutensor/src/types.jl#L11-L19 + elt = promote_type(eltype.((arrayR, arrayT1, arrayT2))...) + if elt !== eltype(arrayR) + return error( + "In cuTENSOR contraction, input tensors have element types `$(eltype(arrayT1))` and `$(eltype(arrayT2))` while the output has element type `$(eltype(arrayR))`.", + ) + end + arrayT1 = convert(CuArray{elt}, arrayT1) + arrayT2 = convert(CuArray{elt}, arrayT2) + cuR = CuTensor(arrayR, collect(labelsR)) + cuT1 = CuTensor(arrayT1, collect(labelsT1)) + cuT2 = CuTensor(arrayT2, collect(labelsT2)) + cuTENSOR.mul!(cuR, cuT1, cuT2, α, β) + if !zoffR + array(R) .= cuR.data + end + return R end diff --git a/NDTensors/src/abstractarray/diaginterface.jl b/NDTensors/src/abstractarray/diaginterface.jl index da46395a3e..6a9a8a6015 100644 --- a/NDTensors/src/abstractarray/diaginterface.jl +++ b/NDTensors/src/abstractarray/diaginterface.jl @@ -2,32 +2,32 @@ # copied here so we don't have to depend on `DiagonalArrays.jl`. function diaglength(a::AbstractArray) - return minimum(size(a)) + return minimum(size(a)) end function diagstride(a::AbstractArray) - s = 1 - p = 1 - for i in 1:(ndims(a) - 1) - p *= size(a, i) - s += p - end - return s + s = 1 + p = 1 + for i in 1:(ndims(a) - 1) + p *= size(a, i) + s += p + end + return s end function diagindices(a::AbstractArray) - maxdiag = if isempty(a) - 0 - else - LinearIndices(a)[CartesianIndex(ntuple(Returns(diaglength(a)), ndims(a)))] - end - return 1:diagstride(a):maxdiag + maxdiag = if isempty(a) + 0 + else + LinearIndices(a)[CartesianIndex(ntuple(Returns(diaglength(a)), ndims(a)))] + end + return 1:diagstride(a):maxdiag end -function diagindices(a::AbstractArray{<:Any,0}) - return Base.OneTo(1) +function diagindices(a::AbstractArray{<:Any, 0}) + return Base.OneTo(1) end function diagview(a::AbstractArray) - return @view a[diagindices(a)] + return @view a[diagindices(a)] end diff --git a/NDTensors/src/abstractarray/generic_array_constructors.jl b/NDTensors/src/abstractarray/generic_array_constructors.jl index 8912c81efa..6c82102e48 100644 --- a/NDTensors/src/abstractarray/generic_array_constructors.jl +++ b/NDTensors/src/abstractarray/generic_array_constructors.jl @@ -1,8 +1,8 @@ using TypeParameterAccessors: - unwrap_array_type, - specify_default_type_parameters, - specify_type_parameters, - type_parameters + unwrap_array_type, + specify_default_type_parameters, + specify_type_parameters, + type_parameters # Convert to Array, avoiding copying if possible array(a::AbstractArray) = a @@ -12,33 +12,33 @@ vector(a::AbstractVector) = a ## Warning to use these functions it is necessary to define `TypeParameterAccessors.position(::Type{<:YourArrayType}, ::typeof(ndims)))` # Implementation, catches if `ndims(arraytype) != length(dims)`. ## TODO convert ndims to `type_parameters(::, typeof(ndims))` -function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng=Random.default_rng()) - arraytype_specified = specify_type_parameters( - unwrap_array_type(arraytype), ndims, length(dims) - ) - arraytype_specified = specify_default_type_parameters(arraytype_specified) - @assert length(dims) == ndims(arraytype_specified) - data = similar(arraytype_specified, dims...) - return randn!(rng, data) +function generic_randn(arraytype::Type{<:AbstractArray}, dims...; rng = Random.default_rng()) + arraytype_specified = specify_type_parameters( + unwrap_array_type(arraytype), ndims, length(dims) + ) + arraytype_specified = specify_default_type_parameters(arraytype_specified) + @assert length(dims) == ndims(arraytype_specified) + data = similar(arraytype_specified, dims...) + return randn!(rng, data) end function generic_randn( - arraytype::Type{<:AbstractArray}, dims::Tuple; rng=Random.default_rng() -) - return generic_randn(arraytype, dims...; rng) + arraytype::Type{<:AbstractArray}, dims::Tuple; rng = Random.default_rng() + ) + return generic_randn(arraytype, dims...; rng) end # Implementation, catches if `ndims(arraytype) != length(dims)`. function generic_zeros(arraytype::Type{<:AbstractArray}, dims...) - arraytype_specified = specify_type_parameters( - unwrap_array_type(arraytype), ndims, length(dims) - ) - arraytype_specified = specify_default_type_parameters(arraytype_specified) - @assert length(dims) == ndims(arraytype_specified) - ElT = eltype(arraytype_specified) - return fill!(similar(arraytype_specified, dims...), zero(ElT)) + arraytype_specified = specify_type_parameters( + unwrap_array_type(arraytype), ndims, length(dims) + ) + arraytype_specified = specify_default_type_parameters(arraytype_specified) + @assert length(dims) == ndims(arraytype_specified) + ElT = eltype(arraytype_specified) + return fill!(similar(arraytype_specified, dims...), zero(ElT)) end function generic_zeros(arraytype::Type{<:AbstractArray}, dims::Tuple) - return generic_zeros(arraytype, dims...) + return generic_zeros(arraytype, dims...) end diff --git a/NDTensors/src/abstractarray/iscu.jl b/NDTensors/src/abstractarray/iscu.jl index fc7560c1e2..e6fc96d8c9 100644 --- a/NDTensors/src/abstractarray/iscu.jl +++ b/NDTensors/src/abstractarray/iscu.jl @@ -3,5 +3,5 @@ using TypeParameterAccessors: unwrap_array_type # For `isgpu`, will require a `NDTensorsGPUArrayCoreExt`. iscu(A::AbstractArray) = iscu(typeof(A)) function iscu(A::Type{<:AbstractArray}) - return (unwrap_array_type(A) == A ? false : iscu(unwrap_array_type(A))) + return (unwrap_array_type(A) == A ? false : iscu(unwrap_array_type(A))) end diff --git a/NDTensors/src/abstractarray/mul.jl b/NDTensors/src/abstractarray/mul.jl index ff5841f189..e0e552c92e 100644 --- a/NDTensors/src/abstractarray/mul.jl +++ b/NDTensors/src/abstractarray/mul.jl @@ -1,12 +1,12 @@ function mul!!(CM::AbstractArray, AM::AbstractArray, BM::AbstractArray, α, β) - CM = mul!(expose(CM), expose(AM), expose(BM), α, β) - return CM + CM = mul!(expose(CM), expose(AM), expose(BM), α, β) + return CM end ## TODO There is an issue in CUDA.jl ## When all are transpose CUDA.mul! isn't being ## Called correctly in `NDTensorsCUDAExt` function mul!!(CM::Transpose, AM::Transpose, BM::Transpose, α, β) - CM = mul!!(parent(CM), parent(BM), parent(AM), α, β) - return CM + CM = mul!!(parent(CM), parent(BM), parent(AM), α, β) + return CM end diff --git a/NDTensors/src/abstractarray/permutedims.jl b/NDTensors/src/abstractarray/permutedims.jl index 7fb99847fd..e192fa5edf 100644 --- a/NDTensors/src/abstractarray/permutedims.jl +++ b/NDTensors/src/abstractarray/permutedims.jl @@ -1,9 +1,9 @@ function permutedims!!(B::AbstractArray, A::AbstractArray, perm) - permutedims!(expose(B), expose(A), perm) - return B + permutedims!(expose(B), expose(A), perm) + return B end function permutedims!!(B::AbstractArray, A::AbstractArray, perm, f) - permutedims!(expose(B), expose(A), perm, f) - return B + permutedims!(expose(B), expose(A), perm, f) + return B end diff --git a/NDTensors/src/abstractarray/set_types.jl b/NDTensors/src/abstractarray/set_types.jl index 85a470ad50..c17f732c34 100644 --- a/NDTensors/src/abstractarray/set_types.jl +++ b/NDTensors/src/abstractarray/set_types.jl @@ -13,5 +13,5 @@ TODO: Use `Accessors.jl` notation: # `FillArray` instead. This is a stand-in # to make things work with the current design. function TypeParameterAccessors.set_ndims(numbertype::Type{<:Number}, ndims) - return numbertype + return numbertype end diff --git a/NDTensors/src/abstractarray/similar.jl b/NDTensors/src/abstractarray/similar.jl index dbc8c223ff..23b8329204 100644 --- a/NDTensors/src/abstractarray/similar.jl +++ b/NDTensors/src/abstractarray/similar.jl @@ -7,8 +7,8 @@ using TypeParameterAccessors: IsWrappedArray, unwrap_array_type, set_eltype, sim # This function actually allocates the data. # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, dims::Tuple) - shape = NDTensors.to_shape(arraytype, dims) - return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape)) + shape = NDTensors.to_shape(arraytype, dims) + return similartype(arraytype, shape)(undef, NDTensors.to_shape(arraytype, shape)) end # This function actually allocates the data. @@ -16,27 +16,27 @@ end # dimensions specified by integers with `Base.to_shape`. # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, dims::Dims) - return similartype(arraytype, dims)(undef, dims) + return similartype(arraytype, dims)(undef, dims) end # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, dims::DimOrInd...) - return similar(arraytype, NDTensors.to_shape(dims)) + return similar(arraytype, NDTensors.to_shape(dims)) end # Handles range inputs, `Base.to_shape` converts them to integer dimensions. # See Julia's `base/abstractarray.jl`. # NDTensors.similar function similar( - arraytype::Type{<:AbstractArray}, - shape::Tuple{Union{Integer,OneTo},Vararg{Union{Integer,OneTo}}}, -) - return NDTensors.similar(arraytype, NDTensors.to_shape(shape)) + arraytype::Type{<:AbstractArray}, + shape::Tuple{Union{Integer, OneTo}, Vararg{Union{Integer, OneTo}}}, + ) + return NDTensors.similar(arraytype, NDTensors.to_shape(shape)) end # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, eltype::Type, dims::Tuple) - return NDTensors.similar(similartype(arraytype, eltype, dims), dims) + return NDTensors.similar(similartype(arraytype, eltype, dims), dims) end # TODO: Add an input `structure` which can store things like the nonzero @@ -70,19 +70,19 @@ end # TODO: Maybe makes an empty array, i.e. `similartype(arraytype, eltype)()`? # NDTensors.similar function similar(arraytype::Type{<:AbstractArray}, eltype::Type) - return error("Must specify dimensions.") + return error("Must specify dimensions.") end ## NDTensors.similar for instances # NDTensors.similar function similar(array::AbstractArray, eltype::Type, dims::Tuple) - return NDTensors.similar(similartype(typeof(array), eltype), dims) + return NDTensors.similar(similartype(typeof(array), eltype), dims) end # NDTensors.similar function similar(array::AbstractArray, eltype::Type, dims::Int) - return NDTensors.similar(similartype(typeof(array), eltype), dims) + return NDTensors.similar(similartype(typeof(array), eltype), dims) end # NDTensors.similar @@ -91,7 +91,7 @@ similar(array::AbstractArray, dims::Tuple) = NDTensors.similar(typeof(array), di # Use the `size` to determine the dimensions # NDTensors.similar function similar(array::AbstractArray, eltype::Type) - return NDTensors.similar(typeof(array), eltype, size(array)) + return NDTensors.similar(typeof(array), eltype, size(array)) end # Use the `size` to determine the dimensions diff --git a/NDTensors/src/abstractarray/to_shape.jl b/NDTensors/src/abstractarray/to_shape.jl index fc3f4a0edd..61a8ee1a14 100644 --- a/NDTensors/src/abstractarray/to_shape.jl +++ b/NDTensors/src/abstractarray/to_shape.jl @@ -3,7 +3,7 @@ # with custom index types. # NDTensors.to_shape function to_shape(arraytype::Type{<:AbstractArray}, dims::Tuple) - return NDTensors.to_shape(dims) + return NDTensors.to_shape(dims) end # NDTensors.to_shape to_shape(dims) = Base.to_shape(dims) diff --git a/NDTensors/src/adapt.jl b/NDTensors/src/adapt.jl index fb68b40186..2244a5c2d1 100644 --- a/NDTensors/src/adapt.jl +++ b/NDTensors/src/adapt.jl @@ -3,7 +3,7 @@ adapt_structure(to, x::TensorStorage) = setdata(x, adapt(to, data(x))) adapt_structure(to, x::Tensor) = setstorage(x, adapt(to, storage(x))) function GPUArraysCoreExtensions.cpu(eltype::Type{<:Number}, x) - return fmap(x -> adapt(Array{eltype}, x), x) + return fmap(x -> adapt(Array{eltype}, x), x) end GPUArraysCoreExtensions.cpu(x) = fmap(x -> adapt(Array, x), x) @@ -29,9 +29,9 @@ double_precision(x) = fmap(x -> adapt(double_precision(eltype(x)), x), x) using TypeParameterAccessors: specify_type_parameters function adapt_storagetype(to::Type{<:AbstractVector}, x::Type{<:TensorStorage}) - return set_datatype(x, specify_type_parameters(to, eltype, eltype(x))) + return set_datatype(x, specify_type_parameters(to, eltype, eltype(x))) end function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:TensorStorage}) - return set_datatype(x, specify_type_parameters(to, (ndims, eltype), (1, eltype(x)))) + return set_datatype(x, specify_type_parameters(to, (ndims, eltype), (1, eltype(x)))) end diff --git a/NDTensors/src/array/mul.jl b/NDTensors/src/array/mul.jl index d66c38f4fc..849cf13b24 100644 --- a/NDTensors/src/array/mul.jl +++ b/NDTensors/src/array/mul.jl @@ -1,4 +1,4 @@ function mul!(CM::Exposed{<:Array}, AM::Exposed{<:Array}, BM::Exposed{<:Array}, α, β) - @strided mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) - return unexpose(CM) + @strided mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) + return unexpose(CM) end diff --git a/NDTensors/src/array/permutedims.jl b/NDTensors/src/array/permutedims.jl index 1a2875d9da..aba193da17 100644 --- a/NDTensors/src/array/permutedims.jl +++ b/NDTensors/src/array/permutedims.jl @@ -3,22 +3,22 @@ using .Expose: Exposed, unexpose # TODO: Move to `Expose` module. # Create the Exposed version of Base.permutedims function permutedims(E::Exposed{<:Array}, perm) - ## Creating Mperm here to evaluate the permutation and - ## avoid returning a Stridedview - @strided Mperm = permutedims(unexpose(E), perm) - return Mperm + ## Creating Mperm here to evaluate the permutation and + ## avoid returning a Stridedview + @strided Mperm = permutedims(unexpose(E), perm) + return Mperm end function permutedims!(Edest::Exposed{<:Array}, Esrc::Exposed{<:Array}, perm) - a_dest = unexpose(Edest) - a_src = unexpose(Esrc) - @strided a_dest .= permutedims(a_src, perm) - return a_dest + a_dest = unexpose(Edest) + a_src = unexpose(Esrc) + @strided a_dest .= permutedims(a_src, perm) + return a_dest end function permutedims!(Edest::Exposed{<:Array}, Esrc::Exposed{<:Array}, perm, f) - a_dest = unexpose(Edest) - a_src = unexpose(Esrc) - @strided a_dest .= f.(a_dest, permutedims(a_src, perm)) - return a_dest + a_dest = unexpose(Edest) + a_src = unexpose(Esrc) + @strided a_dest .= f.(a_dest, permutedims(a_src, perm)) + return a_dest end diff --git a/NDTensors/src/blocksparse/adapt.jl b/NDTensors/src/blocksparse/adapt.jl index a5c40e5fe2..85027b3e0b 100644 --- a/NDTensors/src/blocksparse/adapt.jl +++ b/NDTensors/src/blocksparse/adapt.jl @@ -1,3 +1,3 @@ function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector}) - return BlockSparse{eltype(datatype),datatype,ndims(storagetype)} + return BlockSparse{eltype(datatype), datatype, ndims(storagetype)} end diff --git a/NDTensors/src/blocksparse/blockdims.jl b/NDTensors/src/blocksparse/blockdims.jl index 8c60ec1b78..a73af992a1 100644 --- a/NDTensors/src/blocksparse/blockdims.jl +++ b/NDTensors/src/blocksparse/blockdims.jl @@ -16,12 +16,12 @@ dim(d::BlockDim) = sum(d) Dimensions used for BlockSparse NDTensors. Each entry lists the block sizes in each dimension. """ -const BlockDims{N} = NTuple{N,BlockDim} +const BlockDims{N} = NTuple{N, BlockDim} Base.ndims(ds::Type{<:BlockDims{N}}) where {N} = N function TypeParameterAccessors.similartype(::Type{<:BlockDims}, ::Type{Val{N}}) where {N} - return BlockDims{N} + return BlockDims{N} end Base.copy(ds::BlockDims) = ds @@ -32,7 +32,7 @@ dim(::BlockDims,::Integer) Return the total extent of the specified dimensions. """ function dim(ds::BlockDims{N}, i::Integer) where {N} - return sum(ds[i]) + return sum(ds[i]) end """ @@ -42,7 +42,7 @@ Return the total extents of the dense space the block dimensions live in. """ function dims(ds::BlockDims{N}) where {N} - return ntuple(i -> dim(ds, i), Val(N)) + return ntuple(i -> dim(ds, i), Val(N)) end """ @@ -52,7 +52,7 @@ Return the total extent of the dense space the block dimensions live in. """ function dim(ds::BlockDims{N}) where {N} - return prod(dims(ds)) + return prod(dims(ds)) end """ @@ -61,7 +61,7 @@ end The number of blocks of the BlockDim. """ function nblocks(ind::BlockDim) - return length(ind) + return length(ind) end """ @@ -70,7 +70,7 @@ end The number of blocks along the diagonal. """ function ndiagblocks(x) - return minimum(nblocks(x)) + return minimum(nblocks(x)) end """ @@ -79,7 +79,7 @@ end The number of blocks in the specified dimension. """ function nblocks(inds::Tuple, i::Integer) - return nblocks(inds[i]) + return nblocks(inds[i]) end """ @@ -87,8 +87,8 @@ end The number of blocks in the specified dimensions. """ -function nblocks(inds::Tuple, is::NTuple{N,Int}) where {N} - return ntuple(i -> nblocks(inds, is[i]), Val(N)) +function nblocks(inds::Tuple, is::NTuple{N, Int}) where {N} + return ntuple(i -> nblocks(inds, is[i]), Val(N)) end """ @@ -97,16 +97,16 @@ end A tuple of the number of blocks in each dimension. """ -function nblocks(inds::NTuple{N,<:Any}) where {N} - return ntuple(i -> nblocks(inds, i), Val(N)) +function nblocks(inds::NTuple{N, <:Any}) where {N} + return ntuple(i -> nblocks(inds, i), Val(N)) end function eachblock(inds::Tuple) - return (Block(b) for b in CartesianIndices(_Tuple(nblocks(inds)))) + return (Block(b) for b in CartesianIndices(_Tuple(nblocks(inds)))) end function eachdiagblock(inds::Tuple) - return (Block(ntuple(_ -> i, length(inds))) for i in 1:ndiagblocks(inds)) + return (Block(ntuple(_ -> i, length(inds))) for i in 1:ndiagblocks(inds)) end """ @@ -116,13 +116,13 @@ The size of the specified block in the specified dimension. """ function blockdim(ind::BlockDim, i::Integer) - return ind[i] + return ind[i] end function blockdim(ind::Integer, i) - return error( - "`blockdim(i::Integer, b)` not currently defined for non-block index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.", - ) + return error( + "`blockdim(i::Integer, b)` not currently defined for non-block index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.", + ) end """ @@ -132,7 +132,7 @@ The size of the specified block in the specified dimension. """ function blockdim(inds, block, i::Integer) - return blockdim(inds[i], block[i]) + return blockdim(inds[i], block[i]) end """ @@ -141,7 +141,7 @@ end The size of the specified block. """ function blockdims(inds, block) - return ntuple(i -> blockdim(inds, block, i), ValLength(inds)) + return ntuple(i -> blockdim(inds, block, i), ValLength(inds)) end """ @@ -150,7 +150,7 @@ end The total size of the specified block. """ function blockdim(inds, block) - return prod(blockdims(inds, block)) + return prod(blockdims(inds, block)) end """ @@ -159,45 +159,45 @@ end The length of the diagonal of the specified block. """ function blockdiaglength(inds, block) - return minimum(blockdims(inds, block)) + return minimum(blockdims(inds, block)) end function outer(dim1, dim2, dim3, dims...; kwargs...) - return outer(outer(dim1, dim2), dim3, dims...; kwargs...) + return outer(outer(dim1, dim2), dim3, dims...; kwargs...) end function outer(dim1::BlockDim, dim2::BlockDim) - dimR = BlockDim(undef, nblocks(dim1) * nblocks(dim2)) - for (i, t) in enumerate(Iterators.product(dim1, dim2)) - dimR[i] = prod(t) - end - return dimR + dimR = BlockDim(undef, nblocks(dim1) * nblocks(dim2)) + for (i, t) in enumerate(Iterators.product(dim1, dim2)) + dimR[i] = prod(t) + end + return dimR end function permuteblocks(dim::BlockDim, perm) - return dim[perm] + return dim[perm] end # Given a CartesianIndex in the range dims(T), get the block it is in # and the index within that block -function blockindex(T, i::Vararg{Integer,N}) where {N} - # Bounds check. - # Do something more robust like: - # @boundscheck Base.checkbounds_indices(Bool, map(Base.oneto, dims(T)), i) || throw_boundserror(T, i) - @boundscheck any(iszero, i) && Base.throw_boundserror(T, i) - - # Start in the (1,1,...,1) block - current_block_loc = @MVector ones(Int, N) - current_block_dims = blockdims(T, Tuple(current_block_loc)) - block_index = MVector(i) - for dim in 1:N - while block_index[dim] > current_block_dims[dim] - block_index[dim] -= current_block_dims[dim] - current_block_loc[dim] += 1 - current_block_dims = blockdims(T, Tuple(current_block_loc)) +function blockindex(T, i::Vararg{Integer, N}) where {N} + # Bounds check. + # Do something more robust like: + # @boundscheck Base.checkbounds_indices(Bool, map(Base.oneto, dims(T)), i) || throw_boundserror(T, i) + @boundscheck any(iszero, i) && Base.throw_boundserror(T, i) + + # Start in the (1,1,...,1) block + current_block_loc = @MVector ones(Int, N) + current_block_dims = blockdims(T, Tuple(current_block_loc)) + block_index = MVector(i) + for dim in 1:N + while block_index[dim] > current_block_dims[dim] + block_index[dim] -= current_block_dims[dim] + current_block_loc[dim] += 1 + current_block_dims = blockdims(T, Tuple(current_block_loc)) + end end - end - return Tuple(block_index), Block{N}(current_block_loc) + return Tuple(block_index), Block{N}(current_block_loc) end blockindex(T) = (), Block{0}() diff --git a/NDTensors/src/blocksparse/blockoffsets.jl b/NDTensors/src/blocksparse/blockoffsets.jl index cb5aeea76c..51a4deaffe 100644 --- a/NDTensors/src/blocksparse/blockoffsets.jl +++ b/NDTensors/src/blocksparse/blockoffsets.jl @@ -5,10 +5,10 @@ using SparseArrays: SparseArrays # const Blocks{N} = Vector{Block{N}} -const BlockOffset{N} = Pair{Block{N},Int} +const BlockOffset{N} = Pair{Block{N}, Int} # Use Dictionary from Dictionaries.jl (faster # iteration than Base.Dict) -const BlockOffsets{N} = Dictionary{Block{N},Int} +const BlockOffsets{N} = Dictionary{Block{N}, Int} BlockOffset(block::Block{N}, offset::Int) where {N} = BlockOffset{N}(block, offset) @@ -33,49 +33,49 @@ nzblocks(bofs::BlockOffsets) = collect(eachnzblock(bofs)) # define block ordering with reverse lexographical order function isblockless(b1::Block{N}, b2::Block{N}) where {N} - return CartesianIndex(b1) < CartesianIndex(b2) + return CartesianIndex(b1) < CartesianIndex(b2) end function isblockless(bof1::BlockOffset{N}, bof2::BlockOffset{N}) where {N} - return isblockless(nzblock(bof1), nzblock(bof2)) + return isblockless(nzblock(bof1), nzblock(bof2)) end function isblockless(bof1::BlockOffset{N}, b2::Block{N}) where {N} - return isblockless(nzblock(bof1), b2) + return isblockless(nzblock(bof1), b2) end function isblockless(b1::Block{N}, bof2::BlockOffset{N}) where {N} - return isblockless(b1, nzblock(bof2)) + return isblockless(b1, nzblock(bof2)) end function offset(bofs::BlockOffsets{N}, block::Block{N}) where {N} - if !isassigned(bofs, block) - return nothing - end - return bofs[block] + if !isassigned(bofs, block) + return nothing + end + return bofs[block] end function SparseArrays.nnz(bofs::BlockOffsets, inds) - _nnz = 0 - nnzblocks(bofs) == 0 && return _nnz - for block in eachnzblock(bofs) - _nnz += blockdim(inds, block) - end - return _nnz + _nnz = 0 + nnzblocks(bofs) == 0 && return _nnz + for block in eachnzblock(bofs) + _nnz += blockdim(inds, block) + end + return _nnz end blockoffsets(blocks::Vector{<:NTuple}, inds) = blockoffsets(Block.(blocks), inds) # TODO: should this be a constructor? function blockoffsets(blocks::Vector{<:Block{N}}, inds) where {N} - blockoffsets = BlockOffsets{N}() - nnz = 0 - for block in blocks - insert!(blockoffsets, block, nnz) - current_block_dim = blockdim(inds, block) - nnz += current_block_dim - end - return blockoffsets, nnz + blockoffsets = BlockOffsets{N}() + nnz = 0 + for block in blocks + insert!(blockoffsets, block, nnz) + current_block_dim = blockdim(inds, block) + nnz += current_block_dim + end + return blockoffsets, nnz end """ @@ -87,35 +87,35 @@ The offsets are along the diagonal. Assumes the blocks are allong the diagonal. """ function diagblockoffsets( - blocks::Vector{BlockT}, inds -) where {BlockT<:Union{Block{N},Tuple{Vararg{Any,N}}}} where {N} - blockoffsets = BlockOffsets{N}() - nnzdiag = 0 - for (i, block) in enumerate(blocks) - insert!(blockoffsets, Block(block), nnzdiag) - current_block_diaglength = blockdiaglength(inds, block) - nnzdiag += current_block_diaglength - end - return blockoffsets, nnzdiag + blocks::Vector{BlockT}, inds + ) where {BlockT <: Union{Block{N}, Tuple{Vararg{Any, N}}}} where {N} + blockoffsets = BlockOffsets{N}() + nnzdiag = 0 + for (i, block) in enumerate(blocks) + insert!(blockoffsets, Block(block), nnzdiag) + current_block_diaglength = blockdiaglength(inds, block) + nnzdiag += current_block_diaglength + end + return blockoffsets, nnzdiag end # Permute the blockoffsets and indices -function permutedims(boffs::BlockOffsets{N}, inds, perm::NTuple{N,Int}) where {N} - blocksR = Blocks{N}(undef, nnzblocks(boffs)) - for (i, block) in enumerate(keys(boffs)) - blocksR[i] = permute(block, perm) - end - indsR = permute(inds, perm) - blockoffsetsR, _ = blockoffsets(blocksR, indsR) - return blockoffsetsR, indsR +function permutedims(boffs::BlockOffsets{N}, inds, perm::NTuple{N, Int}) where {N} + blocksR = Blocks{N}(undef, nnzblocks(boffs)) + for (i, block) in enumerate(keys(boffs)) + blocksR[i] = permute(block, perm) + end + indsR = permute(inds, perm) + blockoffsetsR, _ = blockoffsets(blocksR, indsR) + return blockoffsetsR, indsR end -function permutedims(blocks::Vector{Block{N}}, perm::NTuple{N,Int}) where {N} - blocks_perm = Vector{Block{N}}(undef, length(blocks)) - for (i, block) in enumerate(blocks) - blocks_perm[i] = permute(block, perm) - end - return blocks_perm +function permutedims(blocks::Vector{Block{N}}, perm::NTuple{N, Int}) where {N} + blocks_perm = Vector{Block{N}}(undef, length(blocks)) + for (i, block) in enumerate(blocks) + blocks_perm[i] = permute(block, perm) + end + return blocks_perm end """ @@ -124,46 +124,46 @@ blockdim(T::BlockOffsets,nnz::Int,pos::Int) Get the block dimension of the block at position pos. """ function blockdim(boffs::BlockOffsets, nnz::Int, pos::Int) - if nnzblocks(boffs) == 0 - return 0 - elseif pos == nnzblocks(boffs) - return nnz - offset(boffs, pos) - end - return offset(boffs, pos + 1) - offset(boffs, pos) + if nnzblocks(boffs) == 0 + return 0 + elseif pos == nnzblocks(boffs) + return nnz - offset(boffs, pos) + end + return offset(boffs, pos + 1) - offset(boffs, pos) end function Base.union( - boffs1::BlockOffsets{N}, nnz1::Int, boffs2::BlockOffsets{N}, nnz2::Int -) where {N} - n1, n2 = 1, 1 - boffsR = BlockOffset{N}[] - current_offset = 0 - while n1 <= length(boffs1) && n2 <= length(boffs2) - if isblockless(boffs1[n1], boffs2[n2]) - push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n1) - n1 += 1 - elseif isblockless(boffs2[n2], boffs1[n1]) - push!(boffsR, BlockOffset(nzblock(boffs2[n2]), current_offset)) - current_offset += blockdim(boffs2, nnz2, n2) - n2 += 1 - else - push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n1) - n1 += 1 - n2 += 1 - end - end - if n1 <= length(boffs1) - for n in n1:length(boffs1) - push!(boffsR, BlockOffset(nzblock(boffs1[n]), current_offset)) - current_offset += blockdim(boffs1, nnz1, n) + boffs1::BlockOffsets{N}, nnz1::Int, boffs2::BlockOffsets{N}, nnz2::Int + ) where {N} + n1, n2 = 1, 1 + boffsR = BlockOffset{N}[] + current_offset = 0 + while n1 <= length(boffs1) && n2 <= length(boffs2) + if isblockless(boffs1[n1], boffs2[n2]) + push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) + current_offset += blockdim(boffs1, nnz1, n1) + n1 += 1 + elseif isblockless(boffs2[n2], boffs1[n1]) + push!(boffsR, BlockOffset(nzblock(boffs2[n2]), current_offset)) + current_offset += blockdim(boffs2, nnz2, n2) + n2 += 1 + else + push!(boffsR, BlockOffset(nzblock(boffs1[n1]), current_offset)) + current_offset += blockdim(boffs1, nnz1, n1) + n1 += 1 + n2 += 1 + end end - elseif n2 <= length(boffs2) - for n in n2:length(bofss2) - push!(boffsR, BlockOffset(nzblock(boffs2[n]), current_offset)) - current_offset += blockdim(boffs2, nnz2, n) + if n1 <= length(boffs1) + for n in n1:length(boffs1) + push!(boffsR, BlockOffset(nzblock(boffs1[n]), current_offset)) + current_offset += blockdim(boffs1, nnz1, n) + end + elseif n2 <= length(boffs2) + for n in n2:length(bofss2) + push!(boffsR, BlockOffset(nzblock(boffs2[n]), current_offset)) + current_offset += blockdim(boffs2, nnz2, n) + end end - end - return boffsR, current_offset + return boffsR, current_offset end diff --git a/NDTensors/src/blocksparse/blocksparse.jl b/NDTensors/src/blocksparse/blocksparse.jl index 7724ac10fc..d4c1412bb9 100644 --- a/NDTensors/src/blocksparse/blocksparse.jl +++ b/NDTensors/src/blocksparse/blocksparse.jl @@ -2,81 +2,81 @@ # BlockSparse storage # -struct BlockSparse{ElT,VecT,N} <: TensorStorage{ElT} - data::VecT - blockoffsets::BlockOffsets{N} # Block number-offset pairs - function BlockSparse( - data::VecT, blockoffsets::BlockOffsets{N} - ) where {VecT<:AbstractVector{ElT},N} where {ElT} - return new{ElT,VecT,N}(data, blockoffsets) - end +struct BlockSparse{ElT, VecT, N} <: TensorStorage{ElT} + data::VecT + blockoffsets::BlockOffsets{N} # Block number-offset pairs + function BlockSparse( + data::VecT, blockoffsets::BlockOffsets{N} + ) where {VecT <: AbstractVector{ElT}, N} where {ElT} + return new{ElT, VecT, N}(data, blockoffsets) + end end # TODO: Implement as `fieldtype(storagetype, :data)`. -datatype(::Type{<:BlockSparse{<:Any,DataT}}) where {DataT} = DataT +datatype(::Type{<:BlockSparse{<:Any, DataT}}) where {DataT} = DataT # TODO: Implement as `ndims(blockoffsetstype(storagetype))`. -Base.ndims(storagetype::Type{<:BlockSparse{<:Any,<:Any,N}}) where {N} = N +Base.ndims(storagetype::Type{<:BlockSparse{<:Any, <:Any, N}}) where {N} = N # TODO: Implement as `fieldtype(storagetype, :blockoffsets)`. blockoffsetstype(storagetype::Type{<:BlockSparse}) = BlockOffsets{ndims(storagetype)} function set_datatype(storagetype::Type{<:BlockSparse}, datatype::Type{<:AbstractVector}) - return BlockSparse{eltype(datatype),datatype,ndims(storagetype)} + return BlockSparse{eltype(datatype), datatype, ndims(storagetype)} end function TypeParameterAccessors.set_ndims(storagetype::Type{<:BlockSparse}, ndims::Int) - return BlockSparse{eltype(storagetype),datatype(storagetype),ndims} + return BlockSparse{eltype(storagetype), datatype(storagetype), ndims} end # TODO: Write as `(::Type{<:BlockSparse})()`. -BlockSparse{ElT,DataT,N}() where {ElT,DataT,N} = BlockSparse(DataT(), BlockOffsets{N}()) +BlockSparse{ElT, DataT, N}() where {ElT, DataT, N} = BlockSparse(DataT(), BlockOffsets{N}()) function BlockSparse( - datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, dim::Integer; vargs... -) - return BlockSparse( - fill!(NDTensors.similar(datatype, dim), zero(eltype(datatype))), blockoffsets; vargs... - ) + datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, dim::Integer; vargs... + ) + return BlockSparse( + fill!(NDTensors.similar(datatype, dim), zero(eltype(datatype))), blockoffsets; vargs... + ) end function BlockSparse( - eltype::Type{<:Number}, blockoffsets::BlockOffsets, dim::Integer; vargs... -) - return BlockSparse(Vector{eltype}, blockoffsets, dim; vargs...) + eltype::Type{<:Number}, blockoffsets::BlockOffsets, dim::Integer; vargs... + ) + return BlockSparse(Vector{eltype}, blockoffsets, dim; vargs...) end function BlockSparse(x::Number, blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(fill(x, dim), blockoffsets; vargs...) + return BlockSparse(fill(x, dim), blockoffsets; vargs...) end function BlockSparse( - ::Type{ElT}, ::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs... -) where {ElT<:Number} - return BlockSparse(Vector{ElT}(undef, dim), blockoffsets; vargs...) + ::Type{ElT}, ::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs... + ) where {ElT <: Number} + return BlockSparse(Vector{ElT}(undef, dim), blockoffsets; vargs...) end function BlockSparse( - datatype::Type{<:AbstractArray}, - ::UndefInitializer, - blockoffsets::BlockOffsets, - dim::Integer; - vargs..., -) - return BlockSparse(datatype(undef, dim), blockoffsets; vargs...) + datatype::Type{<:AbstractArray}, + ::UndefInitializer, + blockoffsets::BlockOffsets, + dim::Integer; + vargs..., + ) + return BlockSparse(datatype(undef, dim), blockoffsets; vargs...) end function BlockSparse(blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(Float64, blockoffsets, dim; vargs...) + return BlockSparse(Float64, blockoffsets, dim; vargs...) end function BlockSparse(::UndefInitializer, blockoffsets::BlockOffsets, dim::Integer; vargs...) - return BlockSparse(Float64, undef, blockoffsets, dim; vargs...) + return BlockSparse(Float64, undef, blockoffsets, dim; vargs...) end copy(D::BlockSparse) = BlockSparse(copy(data(D)), copy(blockoffsets(D))) setdata(B::BlockSparse, ndata) = BlockSparse(ndata, copy(blockoffsets(B))) function setdata(storagetype::Type{<:BlockSparse}, data) - return error("Not implemented, must specify block offsets as well") + return error("Not implemented, must specify block offsets as well") end # @@ -84,15 +84,15 @@ end # function randn( - StorageT::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer -) where {ElT<:Number} - return randn(Random.default_rng(), StorageT, blockoffsets, dim) + StorageT::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer + ) where {ElT <: Number} + return randn(Random.default_rng(), StorageT, blockoffsets, dim) end function randn( - rng::AbstractRNG, ::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer -) where {ElT<:Number} - return BlockSparse(randn(rng, ElT, dim), blockoffsets) + rng::AbstractRNG, ::Type{<:BlockSparse{ElT}}, blockoffsets::BlockOffsets, dim::Integer + ) where {ElT <: Number} + return BlockSparse(randn(rng, ElT, dim), blockoffsets) end #function BlockSparse{ElR}(data::VecT,offsets) where {ElR,VecT<:AbstractVector{ElT}} where {ElT} @@ -102,17 +102,17 @@ end # TODO: check the offsets are the same? function copyto!(D1::BlockSparse, D2::BlockSparse) - blockoffsets(D1) ≠ blockoffsets(D1) && - error("Cannot copy between BlockSparse storages with different offsets") - copyto!(expose(data(D1)), expose(data(D2))) - return D1 + blockoffsets(D1) ≠ blockoffsets(D1) && + error("Cannot copy between BlockSparse storages with different offsets") + copyto!(expose(data(D1)), expose(data(D2))) + return D1 end Base.real(::Type{BlockSparse{T}}) where {T} = BlockSparse{real(T)} complex(::Type{BlockSparse{T}}) where {T} = BlockSparse{complex(T)} -Base.ndims(::BlockSparse{T,V,N}) where {T,V,N} = N +Base.ndims(::BlockSparse{T, V, N}) where {T, V, N} = N eltype(::BlockSparse{T}) where {T} = eltype(T) # This is necessary since for some reason inference doesn't work @@ -120,41 +120,41 @@ eltype(::BlockSparse{T}) where {T} = eltype(T) eltype(::BlockSparse{Nothing}) = Nothing eltype(::Type{BlockSparse{T}}) where {T} = eltype(T) -dense(::Type{<:BlockSparse{ElT,VecT}}) where {ElT,VecT} = Dense{ElT,VecT} +dense(::Type{<:BlockSparse{ElT, VecT}}) where {ElT, VecT} = Dense{ElT, VecT} can_contract(T1::Type{<:Dense}, T2::Type{<:BlockSparse}) = false can_contract(T1::Type{<:BlockSparse}, T2::Type{<:Dense}) = can_contract(T2, T1) function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N}}, ::Type{<:BlockSparse{ElT2,VecT2,N}} -) where {ElT1,ElT2,VecT1,VecT2,N} - # Promote the element types properly. - ElT = promote_type(ElT1, ElT2) - VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) - return BlockSparse{ElT,VecT,N} + ::Type{<:BlockSparse{ElT1, VecT1, N}}, ::Type{<:BlockSparse{ElT2, VecT2, N}} + ) where {ElT1, ElT2, VecT1, VecT2, N} + # Promote the element types properly. + ElT = promote_type(ElT1, ElT2) + VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) + return BlockSparse{ElT, VecT, N} end function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N1}}, ::Type{<:BlockSparse{ElT2,VecT2,N2}} -) where {ElT1,ElT2,VecT1,VecT2,N1,N2} - # Promote the element types properly. - ElT = promote_type(ElT1, ElT2) - VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) - return BlockSparse{ElT,VecT,NR} where {NR} + ::Type{<:BlockSparse{ElT1, VecT1, N1}}, ::Type{<:BlockSparse{ElT2, VecT2, N2}} + ) where {ElT1, ElT2, VecT1, VecT2, N1, N2} + # Promote the element types properly. + ElT = promote_type(ElT1, ElT2) + VecT = promote_type(set_eltype(VecT1, ElT), set_eltype(VecT2, ElT)) + return BlockSparse{ElT, VecT, NR} where {NR} end function promote_rule( - ::Type{<:BlockSparse{ElT1,VecT1,N1}}, ::Type{ElT2} -) where {ElT1,VecT1<:AbstractVector{ElT1},ElT2<:Number,N1} - ElR = promote_type(ElT1, ElT2) - VecR = set_eltype(VecT1, ElR) - return BlockSparse{ElR,VecR,N1} + ::Type{<:BlockSparse{ElT1, VecT1, N1}}, ::Type{ElT2} + ) where {ElT1, VecT1 <: AbstractVector{ElT1}, ElT2 <: Number, N1} + ElR = promote_type(ElT1, ElT2) + VecR = set_eltype(VecT1, ElR) + return BlockSparse{ElR, VecR, N1} end function convert( - ::Type{<:BlockSparse{ElR,VecR,N}}, D::BlockSparse{ElD,VecD,N} -) where {ElR,VecR,N,ElD,VecD} - return setdata(D, convert(VecR, data(D))) + ::Type{<:BlockSparse{ElR, VecR, N}}, D::BlockSparse{ElD, VecD, N} + ) where {ElR, VecR, N, ElD, VecD} + return setdata(D, convert(VecR, data(D))) end """ @@ -163,8 +163,8 @@ isblocknz(T::BlockSparse, Check if the specified block is non-zero. """ -function isblocknz(T::BlockSparse{ElT,VecT,N}, block::Block{N}) where {ElT,VecT,N} - return isassigned(blockoffsets(T), block) +function isblocknz(T::BlockSparse{ElT, VecT, N}, block::Block{N}) where {ElT, VecT, N} + return isassigned(blockoffsets(T), block) end # If block is input as Tuple @@ -173,12 +173,12 @@ isblocknz(T::BlockSparse, block) = isblocknz(T, Block(block)) # Given a specified block, return a Dense storage that is a view to the data # in that block. Return nothing if the block is structurally zero function blockview(T::BlockSparse, block) - #error("Block must be structurally non-zero to get a view") - !isblocknz(T, block) && return nothing - blockoffsetT = offset(T, block) - blockdimT = blockdim(T, block) - dataTslice = @view data(T)[(blockoffsetT + 1):(blockoffsetT + blockdimT)] - return Dense(dataTslice) + #error("Block must be structurally non-zero to get a view") + !isblocknz(T, block) && return nothing + blockoffsetT = offset(T, block) + blockdimT = blockdim(T, block) + dataTslice = @view data(T)[(blockoffsetT + 1):(blockoffsetT + blockdimT)] + return Dense(dataTslice) end # XXX this is not well defined with new Dictionary design diff --git a/NDTensors/src/blocksparse/combiner.jl b/NDTensors/src/blocksparse/combiner.jl index e993d441c1..c85952eb85 100644 --- a/NDTensors/src/blocksparse/combiner.jl +++ b/NDTensors/src/blocksparse/combiner.jl @@ -1,162 +1,162 @@ #: function before_combiner_signs( - tensor, - tensor_labels, - indstensor, - combiner_tensor, - combiner_tensor_labels, - indscombiner_tensor, - labelsoutput_tensor, - output_tensor_inds, -) - return tensor + tensor, + tensor_labels, + indstensor, + combiner_tensor, + combiner_tensor_labels, + indscombiner_tensor, + labelsoutput_tensor, + output_tensor_inds, + ) + return tensor end function after_combiner_signs( - output_tensor, - labelsoutput_tensor, - output_tensor_inds, - combiner_tensor, - combiner_tensor_labels, - indscombiner_tensor, -) - return output_tensor + output_tensor, + labelsoutput_tensor, + output_tensor_inds, + combiner_tensor, + combiner_tensor_labels, + indscombiner_tensor, + ) + return output_tensor end function contract( - tensor::BlockSparseTensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - #@timeit_debug timer "Block sparse (un)combiner" begin - # Get the label marking the combined index - # By convention the combined index is the first one - # TODO: Consider storing the location of the combined - # index in preperation for multiple combined indices - # TODO: Use `combinedind_label(...)`, `uncombinedind_labels(...)`, etc. - cpos_in_combiner_tensor_labels = 1 - clabel = combiner_tensor_labels[cpos_in_combiner_tensor_labels] - c = combinedind(combiner_tensor) - labels_uc = deleteat(combiner_tensor_labels, cpos_in_combiner_tensor_labels) - is_combining_contraction = is_combining( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - if is_combining_contraction - output_tensor_labels = contract_labels(combiner_tensor_labels, tensor_labels) - cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) - output_tensor_labels_uc = insertat( - output_tensor_labels, labels_uc, cpos_in_output_tensor_labels + tensor::BlockSparseTensor, + tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, ) - output_tensor_inds = contract_inds( - inds(combiner_tensor), - combiner_tensor_labels, - inds(tensor), - tensor_labels, - output_tensor_labels, + #@timeit_debug timer "Block sparse (un)combiner" begin + # Get the label marking the combined index + # By convention the combined index is the first one + # TODO: Consider storing the location of the combined + # index in preperation for multiple combined indices + # TODO: Use `combinedind_label(...)`, `uncombinedind_labels(...)`, etc. + cpos_in_combiner_tensor_labels = 1 + clabel = combiner_tensor_labels[cpos_in_combiner_tensor_labels] + c = combinedind(combiner_tensor) + labels_uc = deleteat(combiner_tensor_labels, cpos_in_combiner_tensor_labels) + is_combining_contraction = is_combining( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels ) + if is_combining_contraction + output_tensor_labels = contract_labels(combiner_tensor_labels, tensor_labels) + cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) + output_tensor_labels_uc = insertat( + output_tensor_labels, labels_uc, cpos_in_output_tensor_labels + ) + output_tensor_inds = contract_inds( + inds(combiner_tensor), + combiner_tensor_labels, + inds(tensor), + tensor_labels, + output_tensor_labels, + ) - #: - tensor = before_combiner_signs( - tensor, - tensor_labels, - inds(tensor), - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - output_tensor_labels, - output_tensor_inds, - ) + #: + tensor = before_combiner_signs( + tensor, + tensor_labels, + inds(tensor), + combiner_tensor, + combiner_tensor_labels, + inds(combiner_tensor), + output_tensor_labels, + output_tensor_inds, + ) - perm = getperm(output_tensor_labels_uc, tensor_labels) - ucpos_in_tensor_labels = Tuple(findall(x -> x in labels_uc, tensor_labels)) - output_tensor = permutedims_combine( - tensor, - output_tensor_inds, - perm, - ucpos_in_tensor_labels, - blockperm(combiner_tensor), - blockcomb(combiner_tensor), - ) - return output_tensor - else # Uncombining - output_tensor_labels = tensor_labels - cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) - # Move combined index to first position - if cpos_in_output_tensor_labels != 1 - output_tensor_labels_orig = output_tensor_labels - output_tensor_labels = deleteat(output_tensor_labels, cpos_in_output_tensor_labels) - output_tensor_labels = insertafter(output_tensor_labels, clabel, 0) - cpos_in_output_tensor_labels = 1 - perm = getperm(output_tensor_labels, output_tensor_labels_orig) - tensor = permutedims(tensor, perm) - tensor_labels = permute(tensor_labels, perm) - end - output_tensor_labels_uc = insertat( - output_tensor_labels, labels_uc, cpos_in_output_tensor_labels - ) - output_tensor_inds_uc = contract_inds( - inds(combiner_tensor), - combiner_tensor_labels, - inds(tensor), - tensor_labels, - output_tensor_labels_uc, - ) + perm = getperm(output_tensor_labels_uc, tensor_labels) + ucpos_in_tensor_labels = Tuple(findall(x -> x in labels_uc, tensor_labels)) + output_tensor = permutedims_combine( + tensor, + output_tensor_inds, + perm, + ucpos_in_tensor_labels, + blockperm(combiner_tensor), + blockcomb(combiner_tensor), + ) + return output_tensor + else # Uncombining + output_tensor_labels = tensor_labels + cpos_in_output_tensor_labels = findfirst(==(clabel), output_tensor_labels) + # Move combined index to first position + if cpos_in_output_tensor_labels != 1 + output_tensor_labels_orig = output_tensor_labels + output_tensor_labels = deleteat(output_tensor_labels, cpos_in_output_tensor_labels) + output_tensor_labels = insertafter(output_tensor_labels, clabel, 0) + cpos_in_output_tensor_labels = 1 + perm = getperm(output_tensor_labels, output_tensor_labels_orig) + tensor = permutedims(tensor, perm) + tensor_labels = permute(tensor_labels, perm) + end + output_tensor_labels_uc = insertat( + output_tensor_labels, labels_uc, cpos_in_output_tensor_labels + ) + output_tensor_inds_uc = contract_inds( + inds(combiner_tensor), + combiner_tensor_labels, + inds(tensor), + tensor_labels, + output_tensor_labels_uc, + ) - # : - tensor = before_combiner_signs( - tensor, - tensor_labels, - inds(tensor), - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - output_tensor_labels_uc, - output_tensor_inds_uc, - ) + # : + tensor = before_combiner_signs( + tensor, + tensor_labels, + inds(tensor), + combiner_tensor, + combiner_tensor_labels, + inds(combiner_tensor), + output_tensor_labels_uc, + output_tensor_inds_uc, + ) - output_tensor = uncombine( - tensor, - tensor_labels, - output_tensor_inds_uc, - output_tensor_labels_uc, - cpos_in_output_tensor_labels, - blockperm(combiner_tensor), - blockcomb(combiner_tensor), - ) + output_tensor = uncombine( + tensor, + tensor_labels, + output_tensor_inds_uc, + output_tensor_labels_uc, + cpos_in_output_tensor_labels, + blockperm(combiner_tensor), + blockcomb(combiner_tensor), + ) - # : - output_tensor = after_combiner_signs( - output_tensor, - output_tensor_labels_uc, - output_tensor_inds_uc, - combiner_tensor, - combiner_tensor_labels, - inds(combiner_tensor), - ) + # : + output_tensor = after_combiner_signs( + output_tensor, + output_tensor_labels_uc, + output_tensor_inds_uc, + combiner_tensor, + combiner_tensor_labels, + inds(combiner_tensor), + ) - return output_tensor - end - return invalid_combiner_contraction_error( - combiner_tensor, tensor_labels, tensor, tensor_labels - ) + return output_tensor + end + return invalid_combiner_contraction_error( + combiner_tensor, tensor_labels, tensor, tensor_labels + ) end function contract( - combiner_tensor::CombinerTensor, - combiner_tensor_labels, - tensor::BlockSparseTensor, - tensor_labels, -) - return contract(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + tensor::BlockSparseTensor, + tensor_labels, + ) + return contract(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) end # Special case when no indices are combined # TODO: No copy? Maybe use `AllowAlias`. function contract( - tensor::BlockSparseTensor, - tensor_labels, - combiner_tensor::CombinerTensor{<:Any,0}, - combiner_tensor_labels, -) - return copy(tensor) + tensor::BlockSparseTensor, + tensor_labels, + combiner_tensor::CombinerTensor{<:Any, 0}, + combiner_tensor_labels, + ) + return copy(tensor) end diff --git a/NDTensors/src/blocksparse/contract.jl b/NDTensors/src/blocksparse/contract.jl index ac8c0771b2..a77f46d8e6 100644 --- a/NDTensors/src/blocksparse/contract.jl +++ b/NDTensors/src/blocksparse/contract.jl @@ -1,74 +1,74 @@ using .BackendSelection: Algorithm, @Algorithm_str function contract( - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - labelsR=contract_labels(labelstensor1, labelstensor2), -) - R, contraction_plan = contraction_output( - tensor1, labelstensor1, tensor2, labelstensor2, labelsR - ) - R = contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan - ) - return R + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + labelsR = contract_labels(labelstensor1, labelstensor2), + ) + R, contraction_plan = contraction_output( + tensor1, labelstensor1, tensor2, labelstensor2, labelsR + ) + R = contract!( + R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan + ) + return R end # Determine the contraction output and block contractions function contraction_output( - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - labelsR, -) - indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR) - TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - blockoffsetsR, contraction_plan = contract_blockoffsets( - blockoffsets(tensor1), - inds(tensor1), - labelstensor1, - blockoffsets(tensor2), - inds(tensor2), - labelstensor2, - indsR, - labelsR, - ) - R = similar(TensorR, blockoffsetsR, indsR) - return R, contraction_plan + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + labelsR, + ) + indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR) + TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + blockoffsetsR, contraction_plan = contract_blockoffsets( + blockoffsets(tensor1), + inds(tensor1), + labelstensor1, + blockoffsets(tensor2), + inds(tensor2), + labelstensor2, + indsR, + labelsR, + ) + R = similar(TensorR, blockoffsetsR, indsR) + return R, contraction_plan end function contract_blockoffsets( - boffs1::BlockOffsets, inds1, labels1, boffs2::BlockOffsets, inds2, labels2, indsR, labelsR -) - alg = Algorithm"sequential"() - if using_threaded_blocksparse() && nthreads() > 1 - alg = Algorithm"threaded_threads"() - end - return contract_blockoffsets( - alg, boffs1, inds1, labels1, boffs2, inds2, labels2, indsR, labelsR - ) + boffs1::BlockOffsets, inds1, labels1, boffs2::BlockOffsets, inds2, labels2, indsR, labelsR + ) + alg = Algorithm"sequential"() + if using_threaded_blocksparse() && nthreads() > 1 + alg = Algorithm"threaded_threads"() + end + return contract_blockoffsets( + alg, boffs1, inds1, labels1, boffs2, inds2, labels2, indsR, labelsR + ) end function contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - if isempty(contraction_plan) - return R - end - alg = Algorithm"sequential"() - if using_threaded_blocksparse() && nthreads() > 1 - alg = Algorithm"threaded_folds"() - end - return contract!( - alg, R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan - ) + R::BlockSparseTensor, + labelsR, + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + contraction_plan, + ) + if isempty(contraction_plan) + return R + end + alg = Algorithm"sequential"() + if using_threaded_blocksparse() && nthreads() > 1 + alg = Algorithm"threaded_folds"() + end + return contract!( + alg, R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan + ) end diff --git a/NDTensors/src/blocksparse/contract_generic.jl b/NDTensors/src/blocksparse/contract_generic.jl index 873e1220f4..3634eebde4 100644 --- a/NDTensors/src/blocksparse/contract_generic.jl +++ b/NDTensors/src/blocksparse/contract_generic.jl @@ -1,73 +1,73 @@ # A generic version that is used by both # "threaded_folds" and "threaded"threads". function contract_blockoffsets( - alg::Algorithm, - boffs1::BlockOffsets, - inds1, - labels1, - boffs2::BlockOffsets, - inds2, - labels2, - indsR, - labelsR, -) - NR = length(labelsR) - ValNR = ValLength(labelsR) - labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( - labels1, labels2, labelsR - ) - contraction_plan = contract_blocks( - alg, boffs1, boffs2, labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR, ValNR - ) - blockoffsetsR = BlockOffsets{NR}() - nnzR = 0 - for (_, _, blockR) in contraction_plan - if !isassigned(blockoffsetsR, blockR) - insert!(blockoffsetsR, blockR, nnzR) - nnzR += blockdim(indsR, blockR) + alg::Algorithm, + boffs1::BlockOffsets, + inds1, + labels1, + boffs2::BlockOffsets, + inds2, + labels2, + indsR, + labelsR, + ) + NR = length(labelsR) + ValNR = ValLength(labelsR) + labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( + labels1, labels2, labelsR + ) + contraction_plan = contract_blocks( + alg, boffs1, boffs2, labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR, ValNR + ) + blockoffsetsR = BlockOffsets{NR}() + nnzR = 0 + for (_, _, blockR) in contraction_plan + if !isassigned(blockoffsetsR, blockR) + insert!(blockoffsetsR, blockR, nnzR) + nnzR += blockdim(indsR, blockR) + end end - end - return blockoffsetsR, contraction_plan + return blockoffsetsR, contraction_plan end # A generic version making use of `Folds.jl` which # can take various Executor backends. # Used for sequential and threaded contract functions. function contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, - executor, -) - # Group the contraction plan by the output block, - # since the sets of contractions into the same block - # must be performed sequentially to reduce over those - # sets of contractions properly (and avoid race conditions). - # Same as: - # ```julia - # grouped_contraction_plan = group(last, contraction_plan) - # ``` - # but more efficient since we know the groups/keys already, - # since they are the nonzero blocks of the output tensor `R`. - grouped_contraction_plan = map(_ -> empty(contraction_plan), eachnzblock(R)) - for block_contraction in contraction_plan - push!(grouped_contraction_plan[last(block_contraction)], block_contraction) - end - _contract!( - R, - labelsR, - tensor1, - labelstensor1, - tensor2, - labelstensor2, - grouped_contraction_plan, - executor, - ) - return R + R::BlockSparseTensor, + labelsR, + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + contraction_plan, + executor, + ) + # Group the contraction plan by the output block, + # since the sets of contractions into the same block + # must be performed sequentially to reduce over those + # sets of contractions properly (and avoid race conditions). + # Same as: + # ```julia + # grouped_contraction_plan = group(last, contraction_plan) + # ``` + # but more efficient since we know the groups/keys already, + # since they are the nonzero blocks of the output tensor `R`. + grouped_contraction_plan = map(_ -> empty(contraction_plan), eachnzblock(R)) + for block_contraction in contraction_plan + push!(grouped_contraction_plan[last(block_contraction)], block_contraction) + end + _contract!( + R, + labelsR, + tensor1, + labelstensor1, + tensor2, + labelstensor2, + grouped_contraction_plan, + executor, + ) + return R end using .Expose: expose @@ -75,54 +75,54 @@ using .Expose: expose # since `Folds`/`FLoops` is not type stable: # https://discourse.julialang.org/t/type-instability-in-floop-reduction/68598 function _contract!( - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - grouped_contraction_plan, - executor, -) - Folds.foreach(grouped_contraction_plan.values, executor) do contraction_plan_group - # Start by overwriting the block: - # R .= α .* (tensor1 * tensor2) - β = zero(eltype(R)) - for block_contraction in contraction_plan_group - blocktensor1, blocktensor2, blockR = block_contraction - - # : - α = compute_alpha( - eltype(R), + R::BlockSparseTensor, labelsR, - blockR, - inds(R), + tensor1::BlockSparseTensor, labelstensor1, - blocktensor1, - inds(tensor1), + tensor2::BlockSparseTensor, labelstensor2, - blocktensor2, - inds(tensor2), - ) + grouped_contraction_plan, + executor, + ) + Folds.foreach(grouped_contraction_plan.values, executor) do contraction_plan_group + # Start by overwriting the block: + # R .= α .* (tensor1 * tensor2) + β = zero(eltype(R)) + for block_contraction in contraction_plan_group + blocktensor1, blocktensor2, blockR = block_contraction - contract!( - expose(R[blockR]), - labelsR, - expose(tensor1[blocktensor1]), - labelstensor1, - expose(tensor2[blocktensor2]), - labelstensor2, - α, - β, - ) + # : + α = compute_alpha( + eltype(R), + labelsR, + blockR, + inds(R), + labelstensor1, + blocktensor1, + inds(tensor1), + labelstensor2, + blocktensor2, + inds(tensor2), + ) + + contract!( + expose(R[blockR]), + labelsR, + expose(tensor1[blocktensor1]), + labelstensor1, + expose(tensor2[blocktensor2]), + labelstensor2, + α, + β, + ) - if iszero(β) - # After the block has been overwritten, - # add into it: - # R .= α .* (tensor1 * tensor2) .+ β .* R - β = one(eltype(R)) - end + if iszero(β) + # After the block has been overwritten, + # add into it: + # R .= α .* (tensor1 * tensor2) .+ β .* R + β = one(eltype(R)) + end + end end - end - return nothing + return nothing end diff --git a/NDTensors/src/blocksparse/contract_sequential.jl b/NDTensors/src/blocksparse/contract_sequential.jl index 19f5e507f8..7583c1ecf7 100644 --- a/NDTensors/src/blocksparse/contract_sequential.jl +++ b/NDTensors/src/blocksparse/contract_sequential.jl @@ -1,105 +1,105 @@ function contract_blockoffsets( - ::Algorithm"sequential", - boffs1::BlockOffsets, - inds1, - labels1, - boffs2::BlockOffsets, - inds2, - labels2, - indsR, - labelsR, -) - N1 = length(blocktype(boffs1)) - N2 = length(blocktype(boffs2)) - NR = length(labelsR) - ValNR = ValLength(labelsR) - labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( - labels1, labels2, labelsR - ) - blockoffsetsR = BlockOffsets{NR}() - nnzR = 0 - contraction_plan = Tuple{Block{N1},Block{N2},Block{NR}}[] - # Reserve some capacity - # In theory the maximum is length(boffs1) * length(boffs2) - # but in practice that is too much - sizehint!(contraction_plan, max(length(boffs1), length(boffs2))) - for block1 in keys(boffs1) - for block2 in keys(boffs2) - if are_blocks_contracted(block1, block2, labels1_to_labels2) - blockR = contract_blocks( - block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR - ) - push!(contraction_plan, (block1, block2, blockR)) - if !isassigned(blockoffsetsR, blockR) - insert!(blockoffsetsR, blockR, nnzR) - nnzR += blockdim(indsR, blockR) + ::Algorithm"sequential", + boffs1::BlockOffsets, + inds1, + labels1, + boffs2::BlockOffsets, + inds2, + labels2, + indsR, + labelsR, + ) + N1 = length(blocktype(boffs1)) + N2 = length(blocktype(boffs2)) + NR = length(labelsR) + ValNR = ValLength(labelsR) + labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR = contract_labels( + labels1, labels2, labelsR + ) + blockoffsetsR = BlockOffsets{NR}() + nnzR = 0 + contraction_plan = Tuple{Block{N1}, Block{N2}, Block{NR}}[] + # Reserve some capacity + # In theory the maximum is length(boffs1) * length(boffs2) + # but in practice that is too much + sizehint!(contraction_plan, max(length(boffs1), length(boffs2))) + for block1 in keys(boffs1) + for block2 in keys(boffs2) + if are_blocks_contracted(block1, block2, labels1_to_labels2) + blockR = contract_blocks( + block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR + ) + push!(contraction_plan, (block1, block2, blockR)) + if !isassigned(blockoffsetsR, blockR) + insert!(blockoffsetsR, blockR, nnzR) + nnzR += blockdim(indsR, blockR) + end + end end - end end - end - return blockoffsetsR, contraction_plan + return blockoffsetsR, contraction_plan end function contract!( - ::Algorithm"sequential", - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - executor = SequentialEx() - return contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor - ) + ::Algorithm"sequential", + R::BlockSparseTensor, + labelsR, + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + contraction_plan, + ) + executor = SequentialEx() + return contract!( + R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor + ) end using .Expose: expose ########################################################################### # Old version # TODO: DELETE, keeping around for now for testing/benchmarking. function contract!( - ::Algorithm"sequential_deprecated", - R::BlockSparseTensor{ElR,NR}, - labelsR, - T1::BlockSparseTensor{ElT1,N1}, - labelsT1, - T2::BlockSparseTensor{ElT2,N2}, - labelsT2, - contraction_plan, -) where {ElR,ElT1,ElT2,N1,N2,NR} - if isempty(contraction_plan) - return R - end - if using_threaded_blocksparse() && nthreads() > 1 - _contract_threaded_deprecated!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan) - return R - end - already_written_to = Dict{Block{NR},Bool}() - indsR = inds(R) - indsT1 = inds(T1) - indsT2 = inds(T2) - # In R .= α .* (T1 * T2) .+ β .* R - for (block1, block2, blockR) in contraction_plan + ::Algorithm"sequential_deprecated", + R::BlockSparseTensor{ElR, NR}, + labelsR, + T1::BlockSparseTensor{ElT1, N1}, + labelsT1, + T2::BlockSparseTensor{ElT2, N2}, + labelsT2, + contraction_plan, + ) where {ElR, ElT1, ElT2, N1, N2, NR} + if isempty(contraction_plan) + return R + end + if using_threaded_blocksparse() && nthreads() > 1 + _contract_threaded_deprecated!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan) + return R + end + already_written_to = Dict{Block{NR}, Bool}() + indsR = inds(R) + indsT1 = inds(T1) + indsT2 = inds(T2) + # In R .= α .* (T1 * T2) .+ β .* R + for (block1, block2, blockR) in contraction_plan - # - α = compute_alpha( - ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2 - ) + # + α = compute_alpha( + ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2 + ) - T1block = T1[block1] - T2block = T2[block2] - Rblock = R[blockR] - β = one(ElR) - if !haskey(already_written_to, blockR) - already_written_to[blockR] = true - # Overwrite the block of R - β = zero(ElR) + T1block = T1[block1] + T2block = T2[block2] + Rblock = R[blockR] + β = one(ElR) + if !haskey(already_written_to, blockR) + already_written_to[blockR] = true + # Overwrite the block of R + β = zero(ElR) + end + contract!( + expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β + ) end - contract!( - expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β - ) - end - return R + return R end diff --git a/NDTensors/src/blocksparse/contract_threaded.jl b/NDTensors/src/blocksparse/contract_threaded.jl index 9a096b2676..ad1ddb17ef 100644 --- a/NDTensors/src/blocksparse/contract_threaded.jl +++ b/NDTensors/src/blocksparse/contract_threaded.jl @@ -1,91 +1,91 @@ using .Expose: expose function contract_blocks( - alg::Algorithm"threaded_threads", - boffs1, - boffs2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR::Val{NR}, -) where {NR} - N1 = length(blocktype(boffs1)) - N2 = length(blocktype(boffs2)) - blocks1 = keys(boffs1) - blocks2 = keys(boffs2) - T = Tuple{Block{N1},Block{N2},Block{NR}} - return if length(blocks1) > length(blocks2) - tasks = map( - Iterators.partition(blocks1, max(1, length(blocks1) ÷ nthreads())) - ) do blocks1_partition - @spawn begin - block_contractions = T[] - for block1 in blocks1_partition - for block2 in blocks2 - block_contraction = maybe_contract_blocks( - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - if !isnothing(block_contraction) - push!(block_contractions, block_contraction) + alg::Algorithm"threaded_threads", + boffs1, + boffs2, + labels1_to_labels2, + labels1_to_labelsR, + labels2_to_labelsR, + ValNR::Val{NR}, + ) where {NR} + N1 = length(blocktype(boffs1)) + N2 = length(blocktype(boffs2)) + blocks1 = keys(boffs1) + blocks2 = keys(boffs2) + T = Tuple{Block{N1}, Block{N2}, Block{NR}} + return if length(blocks1) > length(blocks2) + tasks = map( + Iterators.partition(blocks1, max(1, length(blocks1) ÷ nthreads())) + ) do blocks1_partition + @spawn begin + block_contractions = T[] + for block1 in blocks1_partition + for block2 in blocks2 + block_contraction = maybe_contract_blocks( + block1, + block2, + labels1_to_labels2, + labels1_to_labelsR, + labels2_to_labelsR, + ValNR, + ) + if !isnothing(block_contraction) + push!(block_contractions, block_contraction) + end + end + end + return block_contractions end - end end - return block_contractions - end - end - all_block_contractions = T[] - for task in tasks - append!(all_block_contractions, fetch(task)) - end - return all_block_contractions - else - tasks = map( - Iterators.partition(blocks2, max(1, length(blocks2) ÷ nthreads())) - ) do blocks2_partition - @spawn begin - block_contractions = T[] - for block2 in blocks2_partition - for block1 in blocks1 - block_contraction = maybe_contract_blocks( - block1, - block2, - labels1_to_labels2, - labels1_to_labelsR, - labels2_to_labelsR, - ValNR, - ) - if !isnothing(block_contraction) - push!(block_contractions, block_contraction) + all_block_contractions = T[] + for task in tasks + append!(all_block_contractions, fetch(task)) + end + return all_block_contractions + else + tasks = map( + Iterators.partition(blocks2, max(1, length(blocks2) ÷ nthreads())) + ) do blocks2_partition + @spawn begin + block_contractions = T[] + for block2 in blocks2_partition + for block1 in blocks1 + block_contraction = maybe_contract_blocks( + block1, + block2, + labels1_to_labels2, + labels1_to_labelsR, + labels2_to_labelsR, + ValNR, + ) + if !isnothing(block_contraction) + push!(block_contractions, block_contraction) + end + end + end + return block_contractions end - end end - return block_contractions - end - end - all_block_contractions = T[] - for task in tasks - append!(all_block_contractions, fetch(task)) + all_block_contractions = T[] + for task in tasks + append!(all_block_contractions, fetch(task)) + end + return all_block_contractions end - return all_block_contractions - end end function contract!( - ::Algorithm"threaded_folds", - R::BlockSparseTensor, - labelsR, - tensor1::BlockSparseTensor, - labelstensor1, - tensor2::BlockSparseTensor, - labelstensor2, - contraction_plan, -) - executor = ThreadedEx() - return contract!( - R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor - ) + ::Algorithm"threaded_folds", + R::BlockSparseTensor, + labelsR, + tensor1::BlockSparseTensor, + labelstensor1, + tensor2::BlockSparseTensor, + labelstensor2, + contraction_plan, + ) + executor = ThreadedEx() + return contract!( + R, labelsR, tensor1, labelstensor1, tensor2, labelstensor2, contraction_plan, executor + ) end diff --git a/NDTensors/src/blocksparse/contract_utilities.jl b/NDTensors/src/blocksparse/contract_utilities.jl index f2252ab87e..cb66536c74 100644 --- a/NDTensors/src/blocksparse/contract_utilities.jl +++ b/NDTensors/src/blocksparse/contract_utilities.jl @@ -1,34 +1,34 @@ # function compute_alpha( - ElR, - labelsR, - blockR, - indsR, - labelstensor1, - blocktensor1, - indstensor1, - labelstensor2, - blocktensor2, - indstensor2, -) - return one(ElR) + ElR, + labelsR, + blockR, + indsR, + labelstensor1, + blocktensor1, + indstensor1, + labelstensor2, + blocktensor2, + indstensor2, + ) + return one(ElR) end function maybe_contract_blocks( - block1, block2, labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR, ValNR -) - if are_blocks_contracted(block1, block2, labels1_to_labels2) - blockR = contract_blocks(block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR) - return block1, block2, blockR - end - return nothing + block1, block2, labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR, ValNR + ) + if are_blocks_contracted(block1, block2, labels1_to_labels2) + blockR = contract_blocks(block1, labels1_to_labelsR, block2, labels2_to_labelsR, ValNR) + return block1, block2, blockR + end + return nothing end function contract_labels(labels1, labels2, labelsR) - labels1_to_labels2 = find_matching_positions(labels1, labels2) - labels1_to_labelsR = find_matching_positions(labels1, labelsR) - labels2_to_labelsR = find_matching_positions(labels2, labelsR) - return labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR + labels1_to_labels2 = find_matching_positions(labels1, labels2) + labels1_to_labelsR = find_matching_positions(labels1, labelsR) + labels2_to_labelsR = find_matching_positions(labels2, labelsR) + return labels1_to_labels2, labels1_to_labelsR, labels2_to_labelsR end """ @@ -42,49 +42,49 @@ For example, for all t1[pos1] == t2[pos2], t1_to_t2[pos1] == pos2, otherwise t1_to_t2[pos1] == 0. """ function find_matching_positions(t1, t2) - t1_to_t2 = @MVector zeros(Int, length(t1)) - for pos1 in 1:length(t1) - for pos2 in 1:length(t2) - if t1[pos1] == t2[pos2] - t1_to_t2[pos1] = pos2 - end + t1_to_t2 = @MVector zeros(Int, length(t1)) + for pos1 in 1:length(t1) + for pos2 in 1:length(t2) + if t1[pos1] == t2[pos2] + t1_to_t2[pos1] = pos2 + end + end end - end - return Tuple(t1_to_t2) + return Tuple(t1_to_t2) end function are_blocks_contracted(block1::Block, block2::Block, labels1_to_labels2::Tuple) - t1 = Tuple(block1) - t2 = Tuple(block2) - for i1 in 1:length(block1) - i2 = @inbounds labels1_to_labels2[i1] - if i2 > 0 - # This dimension is contracted - if @inbounds t1[i1] != @inbounds t2[i2] - return false - end + t1 = Tuple(block1) + t2 = Tuple(block2) + for i1 in 1:length(block1) + i2 = @inbounds labels1_to_labels2[i1] + if i2 > 0 + # This dimension is contracted + if @inbounds t1[i1] != @inbounds t2[i2] + return false + end + end end - end - return true + return true end function contract_blocks( - block1::Block, labels1_to_labelsR, block2::Block, labels2_to_labelsR, ::Val{NR} -) where {NR} - blockR = ntuple(_ -> UInt(0), Val(NR)) - t1 = Tuple(block1) - t2 = Tuple(block2) - for i1 in 1:length(block1) - iR = @inbounds labels1_to_labelsR[i1] - if iR > 0 - blockR = @inbounds setindex(blockR, t1[i1], iR) + block1::Block, labels1_to_labelsR, block2::Block, labels2_to_labelsR, ::Val{NR} + ) where {NR} + blockR = ntuple(_ -> UInt(0), Val(NR)) + t1 = Tuple(block1) + t2 = Tuple(block2) + for i1 in 1:length(block1) + iR = @inbounds labels1_to_labelsR[i1] + if iR > 0 + blockR = @inbounds setindex(blockR, t1[i1], iR) + end end - end - for i2 in 1:length(block2) - iR = @inbounds labels2_to_labelsR[i2] - if iR > 0 - blockR = @inbounds setindex(blockR, t2[i2], iR) + for i2 in 1:length(block2) + iR = @inbounds labels2_to_labelsR[i2] + if iR > 0 + blockR = @inbounds setindex(blockR, t2[i2], iR) + end end - end - return Block{NR}(blockR) + return Block{NR}(blockR) end diff --git a/NDTensors/src/blocksparse/linearalgebra.jl b/NDTensors/src/blocksparse/linearalgebra.jl index dad1312f81..8310f14a87 100644 --- a/NDTensors/src/blocksparse/linearalgebra.jl +++ b/NDTensors/src/blocksparse/linearalgebra.jl @@ -1,29 +1,29 @@ using TypeParameterAccessors: unwrap_array_type using .Expose: expose -const BlockSparseMatrix{ElT,StoreT,IndsT} = BlockSparseTensor{ElT,2,StoreT,IndsT} -const DiagBlockSparseMatrix{ElT,StoreT,IndsT} = DiagBlockSparseTensor{ElT,2,StoreT,IndsT} -const DiagMatrix{ElT,StoreT,IndsT} = DiagTensor{ElT,2,StoreT,IndsT} +const BlockSparseMatrix{ElT, StoreT, IndsT} = BlockSparseTensor{ElT, 2, StoreT, IndsT} +const DiagBlockSparseMatrix{ElT, StoreT, IndsT} = DiagBlockSparseTensor{ElT, 2, StoreT, IndsT} +const DiagMatrix{ElT, StoreT, IndsT} = DiagTensor{ElT, 2, StoreT, IndsT} function _truncated_blockdim( - S::DiagMatrix, docut::Real; singular_values=false, truncate=true, min_blockdim=nothing -) - min_blockdim = replace_nothing(min_blockdim, 0) - # TODO: Replace `cpu` with `Expose` dispatch. - S = cpu(S) - full_dim = diaglength(S) - !truncate && return full_dim - min_blockdim = min(min_blockdim, full_dim) - newdim = 0 - val = singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) - while newdim + 1 ≤ full_dim && val > docut - newdim += 1 - if newdim + 1 ≤ full_dim - val = - singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) + S::DiagMatrix, docut::Real; singular_values = false, truncate = true, min_blockdim = nothing + ) + min_blockdim = replace_nothing(min_blockdim, 0) + # TODO: Replace `cpu` with `Expose` dispatch. + S = cpu(S) + full_dim = diaglength(S) + !truncate && return full_dim + min_blockdim = min(min_blockdim, full_dim) + newdim = 0 + val = singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) + while newdim + 1 ≤ full_dim && val > docut + newdim += 1 + if newdim + 1 ≤ full_dim + val = + singular_values ? getdiagindex(S, newdim + 1)^2 : abs(getdiagindex(S, newdim + 1)) + end end - end - (newdim >= min_blockdim) || (newdim = min_blockdim) - return newdim + (newdim >= min_blockdim) || (newdim = min_blockdim) + return newdim end """ @@ -37,391 +37,391 @@ This assumption makes it so the result can be computed from the dense svds of seperate blocks. """ function svd( - T::Tensor{ElT,2,<:BlockSparse}; - min_blockdim=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - alg=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT} - Us = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) - Ss = Vector{DiagTensor{real(ElT),2}}(undef, nnzblocks(T)) - Vs = Vector{DenseTensor{ElT,2}}(undef, nnzblocks(T)) - - # Sorted eigenvalues - d = Vector{real(ElT)}() - - for (n, b) in enumerate(eachnzblock(T)) - blockT = blockview(T, b) - USVb = svd(blockT; alg) - if isnothing(USVb) - return nothing + T::Tensor{ElT, 2, <:BlockSparse}; + min_blockdim = nothing, + mindim = nothing, + maxdim = nothing, + cutoff = nothing, + alg = nothing, + use_absolute_cutoff = nothing, + use_relative_cutoff = nothing, + ) where {ElT} + Us = Vector{DenseTensor{ElT, 2}}(undef, nnzblocks(T)) + Ss = Vector{DiagTensor{real(ElT), 2}}(undef, nnzblocks(T)) + Vs = Vector{DenseTensor{ElT, 2}}(undef, nnzblocks(T)) + + # Sorted eigenvalues + d = Vector{real(ElT)}() + + for (n, b) in enumerate(eachnzblock(T)) + blockT = blockview(T, b) + USVb = svd(blockT; alg) + if isnothing(USVb) + return nothing + end + Ub, Sb, Vb = USVb + Us[n] = Ub + Ss[n] = Sb + Vs[n] = Vb + # Previously this was: + # vector(diag(Sb)) + # But it broke, did `diag(::Tensor)` change types? + # TODO: call this a function `diagonal`, i.e.: + # https://github.com/JuliaLang/julia/issues/30250 + # or make `diag(::Tensor)` return a view by default. + append!(expose(d), data(Sb)) end - Ub, Sb, Vb = USVb - Us[n] = Ub - Ss[n] = Sb - Vs[n] = Vb - # Previously this was: - # vector(diag(Sb)) - # But it broke, did `diag(::Tensor)` change types? - # TODO: call this a function `diagonal`, i.e.: - # https://github.com/JuliaLang/julia/issues/30250 - # or make `diag(::Tensor)` return a view by default. - append!(expose(d), data(Sb)) - end - - # Square the singular values to get - # the eigenvalues - d .= d .^ 2 - sort!(d; rev=true) - - # Get the list of blocks of T - # that are not dropped - nzblocksT = nzblocks(T) - - dropblocks = Int[] - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, docut = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim( - Ss[n], docut; min_blockdim, singular_values=true, truncate=true - ) - if blockdim == 0 - push!(dropblocks, n) - else - # TODO: Replace call to `data` with `diagview`. - Strunc = tensor(Diag(data(Ss[n])[1:blockdim]), (blockdim, blockdim)) - Us[n] = Us[n][1:dim(Us[n], 1), 1:blockdim] - Ss[n] = Strunc - Vs[n] = Vs[n][1:dim(Vs[n], 1), 1:blockdim] - end + + # Square the singular values to get + # the eigenvalues + d .= d .^ 2 + sort!(d; rev = true) + + # Get the list of blocks of T + # that are not dropped + nzblocksT = nzblocks(T) + + dropblocks = Int[] + if any(!isnothing, (maxdim, cutoff)) + d, truncerr, docut = truncate!!( + d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + for n in 1:nnzblocks(T) + blockdim = _truncated_blockdim( + Ss[n], docut; min_blockdim, singular_values = true, truncate = true + ) + if blockdim == 0 + push!(dropblocks, n) + else + # TODO: Replace call to `data` with `diagview`. + Strunc = tensor(Diag(data(Ss[n])[1:blockdim]), (blockdim, blockdim)) + Us[n] = Us[n][1:dim(Us[n], 1), 1:blockdim] + Ss[n] = Strunc + Vs[n] = Vs[n][1:dim(Vs[n], 1), 1:blockdim] + end + end + deleteat!(Us, dropblocks) + deleteat!(Ss, dropblocks) + deleteat!(Vs, dropblocks) + deleteat!(nzblocksT, dropblocks) + else + truncerr, docut = 0.0, 0.0 end - deleteat!(Us, dropblocks) - deleteat!(Ss, dropblocks) - deleteat!(Vs, dropblocks) - deleteat!(nzblocksT, dropblocks) - else - truncerr, docut = 0.0, 0.0 - end - - # The number of non-zero blocks of T remaining - nnzblocksT = length(nzblocksT) - - # - # Make indices of U and V - # that connect to S - # - i1 = ind(T, 1) - i2 = ind(T, 2) - uind = dag(sim(i1)) - vind = dag(sim(i2)) - resize!(uind, nnzblocksT) - resize!(vind, nnzblocksT) - for (n, blockT) in enumerate(nzblocksT) - Udim = size(Us[n], 2) - b1 = block(i1, blockT[1]) - setblock!(uind, resize(b1, Udim), n) - Vdim = size(Vs[n], 2) - b2 = block(i2, blockT[2]) - setblock!(vind, resize(b2, Vdim), n) - end - - # - # Put the blocks into U,S,V - # - - nzblocksU = Vector{Block{2}}(undef, nnzblocksT) - nzblocksS = Vector{Block{2}}(undef, nnzblocksT) - nzblocksV = Vector{Block{2}}(undef, nnzblocksT) - - for (n, blockT) in enumerate(nzblocksT) - blockU = (blockT[1], UInt(n)) - nzblocksU[n] = blockU - - blockS = (n, n) - nzblocksS[n] = blockS - - blockV = (blockT[2], UInt(n)) - nzblocksV[n] = blockV - end - - indsU = setindex(inds(T), uind, 2) - - indsV = setindex(inds(T), vind, 1) - indsV = permute(indsV, (2, 1)) - - indsS = setindex(inds(T), dag(uind), 1) - indsS = setindex(indsS, dag(vind), 2) - - U = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksU, indsU) - S = DiagBlockSparseTensor( - set_eltype(unwrap_array_type(T), real(ElT)), undef, nzblocksS, indsS - ) - V = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksV, indsV) - - for n in 1:nnzblocksT - Ub, Sb, Vb = Us[n], Ss[n], Vs[n] - - blockU = nzblocksU[n] - blockS = nzblocksS[n] - blockV = nzblocksV[n] - - if VERSION < v"1.5" - # In v1.3 and v1.4 of Julia, Ub has - # a very complicated view wrapper that - # can't be handled efficiently - Ub = copy(Ub) - Vb = copy(Vb) + + # The number of non-zero blocks of T remaining + nnzblocksT = length(nzblocksT) + + # + # Make indices of U and V + # that connect to S + # + i1 = ind(T, 1) + i2 = ind(T, 2) + uind = dag(sim(i1)) + vind = dag(sim(i2)) + resize!(uind, nnzblocksT) + resize!(vind, nnzblocksT) + for (n, blockT) in enumerate(nzblocksT) + Udim = size(Us[n], 2) + b1 = block(i1, blockT[1]) + setblock!(uind, resize(b1, Udim), n) + Vdim = size(Vs[n], 2) + b2 = block(i2, blockT[2]) + setblock!(vind, resize(b2, Vdim), n) end - # - sU = right_arrow_sign(uind, blockU[2]) + # + # Put the blocks into U,S,V + # - if sU == -1 - Ub *= -1 - end - copyto!(expose(blockview(U, blockU)), expose(Ub)) - - blockviewS = blockview(S, blockS) - # TODO: Replace `data` with `diagview`. - copyto!(expose(data(blockviewS)), expose(data(Sb))) - - # - sV = left_arrow_sign(vind, blockV[2]) - # This sign (sVP) accounts for the fact that - # V is transposed, i.e. the index connecting to S - # is the second index: - sVP = 1 - if using_auto_fermion() - sVP = -block_sign(vind, blockV[2]) + nzblocksU = Vector{Block{2}}(undef, nnzblocksT) + nzblocksS = Vector{Block{2}}(undef, nnzblocksT) + nzblocksV = Vector{Block{2}}(undef, nnzblocksT) + + for (n, blockT) in enumerate(nzblocksT) + blockU = (blockT[1], UInt(n)) + nzblocksU[n] = blockU + + blockS = (n, n) + nzblocksS[n] = blockS + + blockV = (blockT[2], UInt(n)) + nzblocksV[n] = blockV end - if (sV * sVP) == -1 - Vb *= -1 + indsU = setindex(inds(T), uind, 2) + + indsV = setindex(inds(T), vind, 1) + indsV = permute(indsV, (2, 1)) + + indsS = setindex(inds(T), dag(uind), 1) + indsS = setindex(indsS, dag(vind), 2) + + U = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksU, indsU) + S = DiagBlockSparseTensor( + set_eltype(unwrap_array_type(T), real(ElT)), undef, nzblocksS, indsS + ) + V = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksV, indsV) + + for n in 1:nnzblocksT + Ub, Sb, Vb = Us[n], Ss[n], Vs[n] + + blockU = nzblocksU[n] + blockS = nzblocksS[n] + blockV = nzblocksV[n] + + if VERSION < v"1.5" + # In v1.3 and v1.4 of Julia, Ub has + # a very complicated view wrapper that + # can't be handled efficiently + Ub = copy(Ub) + Vb = copy(Vb) + end + + # + sU = right_arrow_sign(uind, blockU[2]) + + if sU == -1 + Ub *= -1 + end + copyto!(expose(blockview(U, blockU)), expose(Ub)) + + blockviewS = blockview(S, blockS) + # TODO: Replace `data` with `diagview`. + copyto!(expose(data(blockviewS)), expose(data(Sb))) + + # + sV = left_arrow_sign(vind, blockV[2]) + # This sign (sVP) accounts for the fact that + # V is transposed, i.e. the index connecting to S + # is the second index: + sVP = 1 + if using_auto_fermion() + sVP = -block_sign(vind, blockV[2]) + end + + if (sV * sVP) == -1 + Vb *= -1 + end + copyto!(blockview(V, blockV), Vb) end - copyto!(blockview(V, blockV), Vb) - end - return U, S, V, Spectrum(d, truncerr) + return U, S, V, Spectrum(d, truncerr) end -_eigen_eltypes(T::Hermitian{ElT,<:BlockSparseMatrix{ElT}}) where {ElT} = real(ElT), ElT +_eigen_eltypes(T::Hermitian{ElT, <:BlockSparseMatrix{ElT}}) where {ElT} = real(ElT), ElT _eigen_eltypes(T::BlockSparseMatrix{ElT}) where {ElT} = complex(ElT), complex(ElT) function LinearAlgebra.eigen( - T::Union{Hermitian{ElT,<:Tensor{ElT,2,<:BlockSparse}},Tensor{ElT,2,<:BlockSparse}}; - min_blockdim=nothing, - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex}} - ElD, ElV = _eigen_eltypes(T) - - # Sorted eigenvalues - d = Vector{real(ElT)}() - - for b in eachnzblock(T) - all(==(b[1]), b) || error("Eigen currently only supports block diagonal matrices.") - end - - b = first(eachnzblock(T)) - blockT = blockview(T, b) - Db, Vb = eigen(expose(blockT)) - Ds = [Db] - Vs = [Vb] - append!(expose(d), abs.(data(Db))) - for (n, b) in enumerate(eachnzblock(T)) - n == 1 && continue + T::Union{Hermitian{ElT, <:Tensor{ElT, 2, <:BlockSparse}}, Tensor{ElT, 2, <:BlockSparse}}; + min_blockdim = nothing, + mindim = nothing, + maxdim = nothing, + cutoff = nothing, + use_absolute_cutoff = nothing, + use_relative_cutoff = nothing, + ) where {ElT <: Union{Real, Complex}} + ElD, ElV = _eigen_eltypes(T) + + # Sorted eigenvalues + d = Vector{real(ElT)}() + + for b in eachnzblock(T) + all(==(b[1]), b) || error("Eigen currently only supports block diagonal matrices.") + end + + b = first(eachnzblock(T)) blockT = blockview(T, b) Db, Vb = eigen(expose(blockT)) - push!(Ds, Db) - push!(Vs, Vb) + Ds = [Db] + Vs = [Vb] append!(expose(d), abs.(data(Db))) - end - - dropblocks = Int[] - sort!(d; rev=true, by=abs) + for (n, b) in enumerate(eachnzblock(T)) + n == 1 && continue + blockT = blockview(T, b) + Db, Vb = eigen(expose(blockT)) + push!(Ds, Db) + push!(Vs, Vb) + append!(expose(d), abs.(data(Db))) + end - if any(!isnothing, (maxdim, cutoff)) - d, truncerr, docut = truncate!!( - d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - for n in 1:nnzblocks(T) - blockdim = _truncated_blockdim( - Ds[n], docut; min_blockdim, singular_values=false, truncate=true - ) - if blockdim == 0 - push!(dropblocks, n) - else - # TODO: Replace call to `data` with `diagview`. - Dtrunc = tensor(Diag(data(Ds[n])[1:blockdim]), (blockdim, blockdim)) - Ds[n] = Dtrunc - new_size = (dim(Vs[n], 1), blockdim) - new_data = array(Vs[n])[1:new_size[1], 1:new_size[2]] - Vs[n] = tensor(Dense(new_data), new_size) - end + dropblocks = Int[] + sort!(d; rev = true, by = abs) + + if any(!isnothing, (maxdim, cutoff)) + d, truncerr, docut = truncate!!( + d; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + for n in 1:nnzblocks(T) + blockdim = _truncated_blockdim( + Ds[n], docut; min_blockdim, singular_values = false, truncate = true + ) + if blockdim == 0 + push!(dropblocks, n) + else + # TODO: Replace call to `data` with `diagview`. + Dtrunc = tensor(Diag(data(Ds[n])[1:blockdim]), (blockdim, blockdim)) + Ds[n] = Dtrunc + new_size = (dim(Vs[n], 1), blockdim) + new_data = array(Vs[n])[1:new_size[1], 1:new_size[2]] + Vs[n] = tensor(Dense(new_data), new_size) + end + end + deleteat!(Ds, dropblocks) + deleteat!(Vs, dropblocks) + else + truncerr = 0.0 end - deleteat!(Ds, dropblocks) - deleteat!(Vs, dropblocks) - else - truncerr = 0.0 - end - # Get the list of blocks of T - # that are not dropped - nzblocksT = nzblocks(T) - deleteat!(nzblocksT, dropblocks) + # Get the list of blocks of T + # that are not dropped + nzblocksT = nzblocks(T) + deleteat!(nzblocksT, dropblocks) - # The number of blocks of T remaining - nnzblocksT = nnzblocks(T) - length(dropblocks) + # The number of blocks of T remaining + nnzblocksT = nnzblocks(T) - length(dropblocks) - # - # Put the blocks into D, V - # + # + # Put the blocks into D, V + # - i1, i2 = inds(T) - l = sim(i1) + i1, i2 = inds(T) + l = sim(i1) - lkeepblocks = Int[bT[1] for bT in nzblocksT] - ldropblocks = setdiff(1:nblocks(l), lkeepblocks) - deleteat!(l, ldropblocks) + lkeepblocks = Int[bT[1] for bT in nzblocksT] + ldropblocks = setdiff(1:nblocks(l), lkeepblocks) + deleteat!(l, ldropblocks) - # l may have too many blocks - (nblocks(l) > nnzblocksT) && error("New index l in eigen has too many blocks") + # l may have too many blocks + (nblocks(l) > nnzblocksT) && error("New index l in eigen has too many blocks") - # Truncation may have changed - # some block sizes - for n in 1:nnzblocksT - setblockdim!(l, minimum(dims(Ds[n])), n) - end + # Truncation may have changed + # some block sizes + for n in 1:nnzblocksT + setblockdim!(l, minimum(dims(Ds[n])), n) + end - r = dag(sim(l)) + r = dag(sim(l)) - indsD = (l, r) - indsV = (dag(i2), r) + indsD = (l, r) + indsV = (dag(i2), r) - nzblocksD = Vector{Block{2}}(undef, nnzblocksT) - nzblocksV = Vector{Block{2}}(undef, nnzblocksT) - for n in 1:nnzblocksT - blockT = nzblocksT[n] + nzblocksD = Vector{Block{2}}(undef, nnzblocksT) + nzblocksV = Vector{Block{2}}(undef, nnzblocksT) + for n in 1:nnzblocksT + blockT = nzblocksT[n] - blockD = (n, n) - nzblocksD[n] = blockD + blockD = (n, n) + nzblocksD[n] = blockD - blockV = (blockT[1], n) - nzblocksV[n] = blockV - end + blockV = (blockT[1], n) + nzblocksV[n] = blockV + end - D = DiagBlockSparseTensor( - set_ndims(set_eltype(unwrap_array_type(T), ElD), 1), undef, nzblocksD, indsD - ) - V = BlockSparseTensor(set_eltype(unwrap_array_type(T), ElV), undef, nzblocksV, indsV) + D = DiagBlockSparseTensor( + set_ndims(set_eltype(unwrap_array_type(T), ElD), 1), undef, nzblocksD, indsD + ) + V = BlockSparseTensor(set_eltype(unwrap_array_type(T), ElV), undef, nzblocksV, indsV) - for n in 1:nnzblocksT - Db, Vb = Ds[n], Vs[n] + for n in 1:nnzblocksT + Db, Vb = Ds[n], Vs[n] - blockD = nzblocksD[n] - blockviewD = blockview(D, blockD) - # TODO: Replace `data` with `diagview`. - copyto!(expose(data(blockviewD)), expose(data(Db))) + blockD = nzblocksD[n] + blockviewD = blockview(D, blockD) + # TODO: Replace `data` with `diagview`. + copyto!(expose(data(blockviewD)), expose(data(Db))) - blockV = nzblocksV[n] - copyto!(blockview(V, blockV), Vb) - end + blockV = nzblocksV[n] + copyto!(blockview(V, blockV), Vb) + end - return D, V, Spectrum(d, truncerr) + return D, V, Spectrum(d, truncerr) end -Expose.ql(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(ql, T; kwargs...) -qr(T::BlockSparseTensor{<:Any,2}; kwargs...) = qx(qr, T; kwargs...) +Expose.ql(T::BlockSparseTensor{<:Any, 2}; kwargs...) = qx(ql, T; kwargs...) +qr(T::BlockSparseTensor{<:Any, 2}; kwargs...) = qx(qr, T; kwargs...) # # Generic function to implelement blocks sparse qr/ql decomposition. It calls # the dense qr or ql for each block. The X tensor = R or L. # This code thanks to Niklas Tausendpfund # https://github.com/ntausend/variance_iTensor/blob/main/Hubig_variance_test.ipynb # -function qx(qx::Function, T::BlockSparseTensor{<:Any,2}; positive=nothing) - ElT = eltype(T) - # getting total number of blocks - nnzblocksT = nnzblocks(T) - nzblocksT = nzblocks(T) +function qx(qx::Function, T::BlockSparseTensor{<:Any, 2}; positive = nothing) + ElT = eltype(T) + # getting total number of blocks + nnzblocksT = nnzblocks(T) + nzblocksT = nzblocks(T) + + Qs = Vector{DenseTensor{ElT, 2}}(undef, nnzblocksT) + Xs = Vector{DenseTensor{ElT, 2}}(undef, nnzblocksT) + + for (jj, b) in enumerate(eachnzblock(T)) + blockT = blockview(T, b) + QXb = qx(blockT; positive) + + if (isnothing(QXb)) + return nothing + end + + Q, X = QXb + Qs[jj] = Q + Xs[jj] = X + end - Qs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) - Xs = Vector{DenseTensor{ElT,2}}(undef, nnzblocksT) + # + # Make the new index connecting Q and R + # + itl = ind(T, 1) #left index of T + iq = dag(sim(itl)) #start with similar to the left index of T + resize!(iq, nnzblocksT) #adjust the size to match the block count + for (n, blockT) in enumerate(nzblocksT) + Qdim = size(Qs[n], 2) #get the block dim on right side of Q. + b1 = block(itl, blockT[1]) + setblock!(iq, resize(b1, Qdim), n) + end - for (jj, b) in enumerate(eachnzblock(T)) - blockT = blockview(T, b) - QXb = qx(blockT; positive) + indsQ = setindex(inds(T), iq, 2) + indsX = setindex(inds(T), dag(iq), 1) + + nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) + nzblocksX = Vector{Block{2}}(undef, nnzblocksT) - if (isnothing(QXb)) - return nothing + for n in 1:nnzblocksT + blockT = nzblocksT[n] + nzblocksQ[n] = (blockT[1], UInt(n)) + nzblocksX[n] = (UInt(n), blockT[2]) end - Q, X = QXb - Qs[jj] = Q - Xs[jj] = X - end - - # - # Make the new index connecting Q and R - # - itl = ind(T, 1) #left index of T - iq = dag(sim(itl)) #start with similar to the left index of T - resize!(iq, nnzblocksT) #adjust the size to match the block count - for (n, blockT) in enumerate(nzblocksT) - Qdim = size(Qs[n], 2) #get the block dim on right side of Q. - b1 = block(itl, blockT[1]) - setblock!(iq, resize(b1, Qdim), n) - end - - indsQ = setindex(inds(T), iq, 2) - indsX = setindex(inds(T), dag(iq), 1) - - nzblocksQ = Vector{Block{2}}(undef, nnzblocksT) - nzblocksX = Vector{Block{2}}(undef, nnzblocksT) - - for n in 1:nnzblocksT - blockT = nzblocksT[n] - nzblocksQ[n] = (blockT[1], UInt(n)) - nzblocksX[n] = (UInt(n), blockT[2]) - end - - Q = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksQ, indsQ) - X = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksX, indsX) - - for n in 1:nnzblocksT - copyto!(blockview(Q, nzblocksQ[n]), Qs[n]) - copyto!(blockview(X, nzblocksX[n]), Xs[n]) - end - - Q = adapt(unwrap_array_type(T), Q) - X = adapt(unwrap_array_type(T), X) - return Q, X + Q = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksQ, indsQ) + X = BlockSparseTensor(unwrap_array_type(T), undef, nzblocksX, indsX) + + for n in 1:nnzblocksT + copyto!(blockview(Q, nzblocksQ[n]), Qs[n]) + copyto!(blockview(X, nzblocksX[n]), Xs[n]) + end + + Q = adapt(unwrap_array_type(T), Q) + X = adapt(unwrap_array_type(T), X) + return Q, X end function exp( - T::Union{BlockSparseMatrix{ElT},Hermitian{ElT,<:BlockSparseMatrix{ElT}}} -) where {ElT<:Union{Real,Complex}} - expT = BlockSparseTensor(ElT, undef, nzblocks(T), inds(T)) - for b in eachnzblock(T) - all(==(b[1]), b) || error("exp currently supports only block-diagonal matrices") - end - for b in eachdiagblock(T) - blockT = blockview(T, b) - if isnothing(blockT) - # Block was not found in the list, treat as 0 - id_block = Matrix{ElT}(I, blockdims(T, b)) - insertblock!(expT, b) - blockview(expT, b) .= id_block - else - blockview(expT, b) .= exp(blockT) + T::Union{BlockSparseMatrix{ElT}, Hermitian{ElT, <:BlockSparseMatrix{ElT}}} + ) where {ElT <: Union{Real, Complex}} + expT = BlockSparseTensor(ElT, undef, nzblocks(T), inds(T)) + for b in eachnzblock(T) + all(==(b[1]), b) || error("exp currently supports only block-diagonal matrices") + end + for b in eachdiagblock(T) + blockT = blockview(T, b) + if isnothing(blockT) + # Block was not found in the list, treat as 0 + id_block = Matrix{ElT}(I, blockdims(T, b)) + insertblock!(expT, b) + blockview(expT, b) .= id_block + else + blockview(expT, b) .= exp(blockT) + end end - end - return expT + return expT end diff --git a/NDTensors/src/blocksparse/similar.jl b/NDTensors/src/blocksparse/similar.jl index d9dc3b5e37..46ba2ecae2 100644 --- a/NDTensors/src/blocksparse/similar.jl +++ b/NDTensors/src/blocksparse/similar.jl @@ -3,33 +3,33 @@ using TypeParameterAccessors: similartype # NDTensors.similar function similar(storagetype::Type{<:BlockSparse}, blockoffsets::BlockOffsets, dims::Tuple) - data = similar(datatype(storagetype), nnz(blockoffsets, dims)) - return BlockSparse(data, blockoffsets) + data = similar(datatype(storagetype), nnz(blockoffsets, dims)) + return BlockSparse(data, blockoffsets) end # NDTensors.similar function similar(storagetype::Type{<:BlockSparse}, dims::Tuple) - # Create an empty BlockSparse storage - return similartype(storagetype, dims)() + # Create an empty BlockSparse storage + return similartype(storagetype, dims)() end # NDTensors.similar function similar(storagetype::Type{<:BlockSparse}, dims::Dims) - # Create an empty BlockSparse storage - return similartype(storagetype, dims)() + # Create an empty BlockSparse storage + return similartype(storagetype, dims)() end ## TODO: Is there a way to make this generic? # NDTensors.similar function similar( - tensortype::Type{<:BlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple -) - return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims) + tensortype::Type{<:BlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple + ) + return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims) end # NDTensors.similar function similar(tensor::BlockSparseTensor, blockoffsets::BlockOffsets, dims::Tuple) - return similar(typeof(tensor), blockoffsets, dims) + return similar(typeof(tensor), blockoffsets, dims) end ## ## TODO: Determine if the methods below are needed. diff --git a/NDTensors/src/combiner/combiner.jl b/NDTensors/src/combiner/combiner.jl index c807e96204..080c598929 100644 --- a/NDTensors/src/combiner/combiner.jl +++ b/NDTensors/src/combiner/combiner.jl @@ -5,13 +5,13 @@ export Combiner # This can generalize to a Combiner that combines # multiple set of indices, e.g. (i,j),(k,l) -> (a,b) struct Combiner <: TensorStorage{Number} - perm::Vector{Int} - comb::Vector{Int} - cind::Vector{Int} - isconj::Bool - function Combiner(perm::Vector{Int}, comb::Vector{Int}, cind::Vector{Int}, isconj::Bool) - return new(perm, comb, cind, isconj) - end + perm::Vector{Int} + comb::Vector{Int} + cind::Vector{Int} + isconj::Bool + function Combiner(perm::Vector{Int}, comb::Vector{Int}, cind::Vector{Int}, isconj::Bool) + return new(perm, comb, cind, isconj) + end end Combiner() = Combiner(Int[], Int[], Int[1], false) @@ -28,7 +28,7 @@ isconj(C::Combiner) = C.isconj setisconj(C::Combiner, isconj) = Combiner(blockperm(C), blockcomb(C), cinds(C), isconj) function copy(C::Combiner) - return Combiner(copy(blockperm(C)), copy(blockcomb(C)), copy(cinds(C)), isconj(C)) + return Combiner(copy(blockperm(C)), copy(blockcomb(C)), copy(cinds(C)), isconj(C)) end eltype(::Type{<:Combiner}) = Number @@ -44,37 +44,37 @@ conj(::NeverAlias, C::Combiner) = conj(AllowAlias(), copy(C)) # CombinerTensor (Tensor using Combiner storage) # -const CombinerTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Combiner} +const CombinerTensor{ElT, N, StoreT, IndsT} = + Tensor{ElT, N, StoreT, IndsT} where {StoreT <: Combiner} # The position of the combined index/dimension. # By convention, it is the first one. combinedind_position(combiner_tensor::CombinerTensor) = 1 function combinedind(combiner_tensor::CombinerTensor) - return inds(combiner_tensor)[combinedind_position(combiner_tensor)] + return inds(combiner_tensor)[combinedind_position(combiner_tensor)] end # TODO: Rewrite in terms of `combinedind_position`. function uncombinedinds(combiner_tensor::CombinerTensor) - return deleteat(inds(combiner_tensor), combinedind_position(combiner_tensor)) + return deleteat(inds(combiner_tensor), combinedind_position(combiner_tensor)) end function combinedind_label(combiner_tensor::CombinerTensor, combiner_tensor_labels) - return combiner_tensor_labels[combinedind_position(combiner_tensor)] + return combiner_tensor_labels[combinedind_position(combiner_tensor)] end function uncombinedind_labels(combiner_tensor::CombinerTensor, combiner_tensor_labels) - return deleteat(combiner_tensor_labels, combinedind_position(combiner_tensor)) + return deleteat(combiner_tensor_labels, combinedind_position(combiner_tensor)) end blockperm(C::CombinerTensor) = blockperm(storage(C)) blockcomb(C::CombinerTensor) = blockcomb(storage(C)) function is_index_replacement( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return (ndims(combiner_tensor) == 2) && - isone(count(∈(tensor_labels), combiner_tensor_labels)) + tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels + ) + return (ndims(combiner_tensor) == 2) && + isone(count(∈(tensor_labels), combiner_tensor_labels)) end # Return if the combiner contraction is combining or uncombining. @@ -82,90 +82,90 @@ end # only the combined index should be uncontracted, and when uncombining, # only the combined index should be contracted. function is_combining( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - is_combining = is_combining_no_check( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - check_valid_combiner_contraction( - is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - return is_combining + tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels + ) + is_combining = is_combining_no_check( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + check_valid_combiner_contraction( + is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + return is_combining end function is_combining_no_check( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return combinedind_label(combiner_tensor, combiner_tensor_labels) ∉ tensor_labels + tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels + ) + return combinedind_label(combiner_tensor, combiner_tensor_labels) ∉ tensor_labels end function check_valid_combiner_contraction( - is_combining::Bool, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - if !is_valid_combiner_contraction( - is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - return invalid_combiner_contraction_error( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + is_combining::Bool, + tensor::Tensor, + tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, ) - end - return nothing + if !is_valid_combiner_contraction( + is_combining, tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + return invalid_combiner_contraction_error( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) + end + return nothing end function is_valid_combiner_contraction( - is_combining::Bool, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - in_tensor_labels_op = is_combining ? ∉(tensor_labels) : ∈(tensor_labels) - return isone(count(in_tensor_labels_op, combiner_tensor_labels)) + is_combining::Bool, + tensor::Tensor, + tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + ) + in_tensor_labels_op = is_combining ? ∉(tensor_labels) : ∈(tensor_labels) + return isone(count(in_tensor_labels_op, combiner_tensor_labels)) end function invalid_combiner_contraction_error( - tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels -) - return error( - """ - Trying to contract a tensor with indices: + tensor::Tensor, tensor_labels, combiner_tensor::CombinerTensor, combiner_tensor_labels + ) + return error( + """ + Trying to contract a tensor with indices: - $(inds(tensor)) + $(inds(tensor)) - and labels: + and labels: - $(tensor_labels) + $(tensor_labels) - with a combiner tensor with indices: + with a combiner tensor with indices: - $(inds(combiner_tensor)) + $(inds(combiner_tensor)) - and labels: + and labels: - $(combiner_tensor_labels). + $(combiner_tensor_labels). - This is not a valid combiner contraction. + This is not a valid combiner contraction. - If you are combining, the combined index of the combiner should be the only one uncontracted. + If you are combining, the combined index of the combiner should be the only one uncontracted. - If you are uncombining, the combined index of the combiner should be the only one contracted. + If you are uncombining, the combined index of the combiner should be the only one contracted. - By convention, the combined index should be the index in position $(combinedind_position(combiner_tensor)) of the combiner tensor. - """, - ) + By convention, the combined index should be the index in position $(combinedind_position(combiner_tensor)) of the combiner tensor. + """, + ) end function Base.show(io::IO, mime::MIME"text/plain", S::Combiner) - println(io, "Permutation of blocks: ", S.perm) - return println(io, "Combination of blocks: ", S.comb) + println(io, "Permutation of blocks: ", S.perm) + return println(io, "Combination of blocks: ", S.comb) end function Base.show(io::IO, mime::MIME"text/plain", T::CombinerTensor) - summary(io, T) - println(io) - return show(io, mime, storage(T)) + summary(io, T) + println(io) + return show(io, mime, storage(T)) end diff --git a/NDTensors/src/combiner/contract.jl b/NDTensors/src/combiner/contract.jl index aae09061ba..773740e7d3 100644 --- a/NDTensors/src/combiner/contract.jl +++ b/NDTensors/src/combiner/contract.jl @@ -1,103 +1,103 @@ function contraction_output( - ::TensorT1, ::TensorT2, indsR::Tuple -) where {TensorT1<:CombinerTensor,TensorT2<:DenseTensor} - TensorR = contraction_output_type(TensorT1, TensorT2, indsR) - return similar(TensorR, indsR) + ::TensorT1, ::TensorT2, indsR::Tuple + ) where {TensorT1 <: CombinerTensor, TensorT2 <: DenseTensor} + TensorR = contraction_output_type(TensorT1, TensorT2, indsR) + return similar(TensorR, indsR) end function contraction_output( - T1::TensorT1, T2::TensorT2, indsR -) where {TensorT1<:DenseTensor,TensorT2<:CombinerTensor} - return contraction_output(T2, T1, indsR) + T1::TensorT1, T2::TensorT2, indsR + ) where {TensorT1 <: DenseTensor, TensorT2 <: CombinerTensor} + return contraction_output(T2, T1, indsR) end function contract!!( - output_tensor::Tensor, - output_tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, - tensor::Tensor, - tensor_labels, -) - if ndims(combiner_tensor) ≤ 1 - # Empty combiner, acts as multiplying by 1 - output_tensor = permutedims!!( - output_tensor, tensor, getperm(output_tensor_labels, tensor_labels) + output_tensor::Tensor, + output_tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + tensor::Tensor, + tensor_labels, ) - return output_tensor - end - if is_index_replacement(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) - ui = setdiff(combiner_tensor_labels, tensor_labels)[] - newind = inds(combiner_tensor)[findfirst(==(ui), combiner_tensor_labels)] - cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) - output_tensor_storage = copy(storage(tensor)) - output_tensor_inds = setindex(inds(tensor), newind, cpos2) - return NDTensors.tensor(output_tensor_storage, output_tensor_inds) - end - is_combining_contraction = is_combining( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) - if is_combining_contraction - Alabels, Blabels = tensor_labels, combiner_tensor_labels - final_labels = contract_labels(Blabels, Alabels) - final_labels_n = contract_labels(combiner_tensor_labels, tensor_labels) - output_tensor_inds = inds(output_tensor) - if final_labels != final_labels_n - perm = getperm(final_labels_n, final_labels) - output_tensor_inds = permute(inds(output_tensor), perm) - output_tensor_labels = permute(output_tensor_labels, perm) + if ndims(combiner_tensor) ≤ 1 + # Empty combiner, acts as multiplying by 1 + output_tensor = permutedims!!( + output_tensor, tensor, getperm(output_tensor_labels, tensor_labels) + ) + return output_tensor end - cpos1, output_tensor_cpos = intersect_positions( - combiner_tensor_labels, output_tensor_labels + if is_index_replacement(tensor, tensor_labels, combiner_tensor, combiner_tensor_labels) + ui = setdiff(combiner_tensor_labels, tensor_labels)[] + newind = inds(combiner_tensor)[findfirst(==(ui), combiner_tensor_labels)] + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + output_tensor_inds = setindex(inds(tensor), newind, cpos2) + return NDTensors.tensor(output_tensor_storage, output_tensor_inds) + end + is_combining_contraction = is_combining( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels ) - labels_comb = deleteat(combiner_tensor_labels, cpos1) - output_tensor_vl = [output_tensor_labels...] - for (ii, li) in enumerate(labels_comb) - insert!(output_tensor_vl, output_tensor_cpos + ii, li) + if is_combining_contraction + Alabels, Blabels = tensor_labels, combiner_tensor_labels + final_labels = contract_labels(Blabels, Alabels) + final_labels_n = contract_labels(combiner_tensor_labels, tensor_labels) + output_tensor_inds = inds(output_tensor) + if final_labels != final_labels_n + perm = getperm(final_labels_n, final_labels) + output_tensor_inds = permute(inds(output_tensor), perm) + output_tensor_labels = permute(output_tensor_labels, perm) + end + cpos1, output_tensor_cpos = intersect_positions( + combiner_tensor_labels, output_tensor_labels + ) + labels_comb = deleteat(combiner_tensor_labels, cpos1) + output_tensor_vl = [output_tensor_labels...] + for (ii, li) in enumerate(labels_comb) + insert!(output_tensor_vl, output_tensor_cpos + ii, li) + end + deleteat!(output_tensor_vl, output_tensor_cpos) + labels_perm = tuple(output_tensor_vl...) + perm = getperm(labels_perm, tensor_labels) + tensorp = reshape(output_tensor, permute(inds(tensor), perm)) + permutedims!(tensorp, tensor, perm) + return reshape(tensorp, output_tensor_inds) + else # Uncombining + cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) + output_tensor_storage = copy(storage(tensor)) + indsC = deleteat(inds(combiner_tensor), cpos1) + output_tensor_inds = insertat(inds(tensor), indsC, cpos2) + return NDTensors.tensor(output_tensor_storage, output_tensor_inds) end - deleteat!(output_tensor_vl, output_tensor_cpos) - labels_perm = tuple(output_tensor_vl...) - perm = getperm(labels_perm, tensor_labels) - tensorp = reshape(output_tensor, permute(inds(tensor), perm)) - permutedims!(tensorp, tensor, perm) - return reshape(tensorp, output_tensor_inds) - else # Uncombining - cpos1, cpos2 = intersect_positions(combiner_tensor_labels, tensor_labels) - output_tensor_storage = copy(storage(tensor)) - indsC = deleteat(inds(combiner_tensor), cpos1) - output_tensor_inds = insertat(inds(tensor), indsC, cpos2) - return NDTensors.tensor(output_tensor_storage, output_tensor_inds) - end - return invalid_combiner_contraction_error( - tensor, tensor_labels, combiner_tensor, combiner_tensor_labels - ) + return invalid_combiner_contraction_error( + tensor, tensor_labels, combiner_tensor, combiner_tensor_labels + ) end function contract!!( - output_tensor::Tensor, - output_tensor_labels, - tensor::Tensor, - tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - return contract!!( - output_tensor, - output_tensor_labels, - combiner_tensor, - combiner_tensor_labels, - tensor, - tensor_labels, - ) + output_tensor::Tensor, + output_tensor_labels, + tensor::Tensor, + tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + ) + return contract!!( + output_tensor, + output_tensor_labels, + combiner_tensor, + combiner_tensor_labels, + tensor, + tensor_labels, + ) end function contract( - diag_tensor::DiagTensor, - diag_tensor_labels, - combiner_tensor::CombinerTensor, - combiner_tensor_labels, -) - return contract( - dense(diag_tensor), diag_tensor_labels, combiner_tensor, combiner_tensor_labels - ) + diag_tensor::DiagTensor, + diag_tensor_labels, + combiner_tensor::CombinerTensor, + combiner_tensor_labels, + ) + return contract( + dense(diag_tensor), diag_tensor_labels, combiner_tensor, combiner_tensor_labels + ) end diff --git a/NDTensors/src/dense/dense.jl b/NDTensors/src/dense/dense.jl index 0cd079f2b6..8c2a5fec31 100644 --- a/NDTensors/src/dense/dense.jl +++ b/NDTensors/src/dense/dense.jl @@ -2,94 +2,94 @@ # Dense storage # -struct Dense{ElT,DataT<:AbstractArray} <: TensorStorage{ElT} - data::DataT - function Dense{ElT,DataT}(data::DataT) where {ElT,DataT<:AbstractVector} - @assert ElT == eltype(DataT) - return new{ElT,DataT}(data) - end +struct Dense{ElT, DataT <: AbstractArray} <: TensorStorage{ElT} + data::DataT + function Dense{ElT, DataT}(data::DataT) where {ElT, DataT <: AbstractVector} + @assert ElT == eltype(DataT) + return new{ElT, DataT}(data) + end - function Dense{ElT,DataT}(data::DataT) where {ElT,DataT<:AbstractArray} - println("Only Vector-based datatypes are currently supported.") - throw(TypeError) - end + function Dense{ElT, DataT}(data::DataT) where {ElT, DataT <: AbstractArray} + println("Only Vector-based datatypes are currently supported.") + throw(TypeError) + end end #Start with high information constructors and move to low information constructors -function Dense{ElT,DataT}() where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(DataT()) +function Dense{ElT, DataT}() where {ElT, DataT <: AbstractArray} + return Dense{ElT, DataT}(DataT()) end # Construct from a set of indices # This will fail if zero(ElT) is not defined for the ElT -function Dense{ElT,DataT}(inds::Tuple) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(generic_zeros(DataT, dim(inds))) +function Dense{ElT, DataT}(inds::Tuple) where {ElT, DataT <: AbstractArray} + return Dense{ElT, DataT}(generic_zeros(DataT, dim(inds))) end -function Dense{ElT,DataT}(dim::Integer) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(generic_zeros(DataT, dim)) +function Dense{ElT, DataT}(dim::Integer) where {ElT, DataT <: AbstractArray} + return Dense{ElT, DataT}(generic_zeros(DataT, dim)) end -function Dense{ElT,DataT}(::UndefInitializer, inds::Tuple) where {ElT,DataT<:AbstractArray} - return Dense{ElT,DataT}(similar(DataT, dim(inds))) +function Dense{ElT, DataT}(::UndefInitializer, inds::Tuple) where {ElT, DataT <: AbstractArray} + return Dense{ElT, DataT}(similar(DataT, dim(inds))) end -function Dense{ElT,DataT}(x, dim::Integer) where {ElT,DataT<:AbstractVector} - return Dense{ElT,DataT}(fill!(similar(DataT, dim), ElT(x))) +function Dense{ElT, DataT}(x, dim::Integer) where {ElT, DataT <: AbstractVector} + return Dense{ElT, DataT}(fill!(similar(DataT, dim), ElT(x))) end -function Dense{ElR,DataT}(data::AbstractArray) where {ElR,DataT<:AbstractArray} - data = convert(DataT, data) - return Dense{ElR,DataT}(data) +function Dense{ElR, DataT}(data::AbstractArray) where {ElR, DataT <: AbstractArray} + data = convert(DataT, data) + return Dense{ElR, DataT}(data) end # This function is ill-defined. It cannot transform a complex type to real... -function Dense{ElR}(data::AbstractArray{ElT}) where {ElR,ElT} - return Dense{ElR}(convert(similartype(typeof(data), ElR), data)) +function Dense{ElR}(data::AbstractArray{ElT}) where {ElR, ElT} + return Dense{ElR}(convert(similartype(typeof(data), ElR), data)) end function Dense{ElT}(data::AbstractArray{ElT}) where {ElT} - return Dense{ElT,typeof(data)}(data) + return Dense{ElT, typeof(data)}(data) end function Dense{ElT}(inds::Tuple) where {ElT} - return Dense{ElT}(dim(inds)) + return Dense{ElT}(dim(inds)) end function Dense{ElT}(dim::Integer) where {ElT} - return Dense{ElT,default_datatype(ElT)}(dim) + return Dense{ElT, default_datatype(ElT)}(dim) end -Dense{ElT}() where {ElT} = Dense{ElT,default_datatype(ElT)}() +Dense{ElT}() where {ElT} = Dense{ElT, default_datatype(ElT)}() function Dense(data::AbstractVector) - return Dense{eltype(data)}(data) + return Dense{eltype(data)}(data) end -function Dense(data::DataT) where {DataT<:AbstractArray{<:Any,N}} where {N} - #println("Warning: Only vector based datatypes are currenlty supported by Dense. The data structure provided will be vectorized.") - return Dense(vec(data)) +function Dense(data::DataT) where {DataT <: AbstractArray{<:Any, N}} where {N} + #println("Warning: Only vector based datatypes are currenlty supported by Dense. The data structure provided will be vectorized.") + return Dense(vec(data)) end function Dense(DataT::Type{<:AbstractArray}, dim::Integer) - ElT = eltype(DataT) - return Dense{ElT,DataT}(dim) + ElT = eltype(DataT) + return Dense{ElT, DataT}(dim) end Dense(ElT::Type{<:Number}, dim::Integer) = Dense{ElT}(dim) function Dense(ElT::Type{<:Number}, ::UndefInitializer, dim::Integer) - return Dense{ElT,default_datatype(ElT)}(undef, (dim,)) + return Dense{ElT, default_datatype(ElT)}(undef, (dim,)) end function Dense(::UndefInitializer, dim::Integer) - datatype = default_datatype() - return Dense{eltype(datatype),datatype}(undef, (dim,)) + datatype = default_datatype() + return Dense{eltype(datatype), datatype}(undef, (dim,)) end function Dense(x::Number, dim::Integer) - ElT = typeof(x) - return Dense{ElT,default_datatype(ElT)}(x, dim) + ElT = typeof(x) + return Dense{ElT, default_datatype(ElT)}(x, dim) end Dense(dim::Integer) = Dense(default_eltype(), dim) @@ -102,47 +102,47 @@ setdata(D::Dense, ndata) = Dense(ndata) setdata(storagetype::Type{<:Dense}, data) = Dense(data) function copy(D::Dense) - return Dense(copy(expose(data(D)))) + return Dense(copy(expose(data(D)))) end function Base.copyto!(R::Dense, T::Dense) - copyto!(expose(data(R)), expose(data(T))) - return R + copyto!(expose(data(R)), expose(data(T))) + return R end function Base.real(T::Type{<:Dense}) - return set_datatype(T, similartype(datatype(T), real(eltype(T)))) + return set_datatype(T, similartype(datatype(T), real(eltype(T)))) end function complex(T::Type{<:Dense}) - return set_datatype(T, similartype(datatype(T), complex(eltype(T)))) + return set_datatype(T, similartype(datatype(T), complex(eltype(T)))) end # TODO: Define a generic `dense` for `Tensor`, `TensorStorage`. dense(storagetype::Type{<:Dense}) = storagetype # TODO: make these more general, move to tensorstorage.jl -datatype(storetype::Type{<:Dense{<:Any,DataT}}) where {DataT} = DataT +datatype(storetype::Type{<:Dense{<:Any, DataT}}) where {DataT} = DataT using TypeParameterAccessors: unwrap_array_type function promote_rule( - ::Type{<:Dense{ElT1,DataT1}}, ::Type{<:Dense{ElT2,DataT2}} -) where {ElT1,DataT1,ElT2,DataT2} - ElR = promote_type(ElT1, ElT2) - VecR = promote_type(unwrap_array_type(DataT1), unwrap_array_type(DataT2)) - VecR = similartype(VecR, ElR) - return Dense{ElR,VecR} + ::Type{<:Dense{ElT1, DataT1}}, ::Type{<:Dense{ElT2, DataT2}} + ) where {ElT1, DataT1, ElT2, DataT2} + ElR = promote_type(ElT1, ElT2) + VecR = promote_type(unwrap_array_type(DataT1), unwrap_array_type(DataT2)) + VecR = similartype(VecR, ElR) + return Dense{ElR, VecR} end # This is for type promotion for Scalar*Dense function promote_rule( - ::Type{<:Dense{ElT1,DataT}}, ::Type{ElT2} -) where {DataT,ElT1,ElT2<:Number} - ElR = promote_type(ElT1, ElT2) - DataR = set_eltype(DataT, ElR) - return Dense{ElR,DataR} + ::Type{<:Dense{ElT1, DataT}}, ::Type{ElT2} + ) where {DataT, ElT1, ElT2 <: Number} + ElR = promote_type(ElT1, ElT2) + DataR = set_eltype(DataT, ElR) + return Dense{ElR, DataR} end -function convert(::Type{<:Dense{ElR,DataT}}, D::Dense) where {ElR,DataT} - return Dense(convert(DataT, data(D))) +function convert(::Type{<:Dense{ElR, DataT}}, D::Dense) where {ElR, DataT} + return Dense(convert(DataT, data(D))) end diff --git a/NDTensors/src/dense/densetensor.jl b/NDTensors/src/dense/densetensor.jl index f1aa7b07ff..17aabaecc7 100644 --- a/NDTensors/src/dense/densetensor.jl +++ b/NDTensors/src/dense/densetensor.jl @@ -4,7 +4,7 @@ using SparseArrays: nnz # DenseTensor (Tensor using Dense storage) # -const DenseTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Dense} +const DenseTensor{ElT, N, StoreT, IndsT} = Tensor{ElT, N, StoreT, IndsT} where {StoreT <: Dense} DenseTensor(::Type{ElT}, inds) where {ElT} = tensor(Dense(ElT, dim(inds)), inds) @@ -17,11 +17,11 @@ DenseTensor(inds) = tensor(Dense(dim(inds)), inds) DenseTensor(inds::Int...) = DenseTensor(inds) function DenseTensor(::Type{ElT}, ::UndefInitializer, inds) where {ElT} - return tensor(Dense(ElT, undef, dim(inds)), inds) + return tensor(Dense(ElT, undef, dim(inds)), inds) end function DenseTensor(::Type{ElT}, ::UndefInitializer, inds::Int...) where {ElT} - return DenseTensor(ElT, undef, inds) + return DenseTensor(ElT, undef, inds) end DenseTensor(::UndefInitializer, inds) = tensor(Dense(undef, dim(inds)), inds) @@ -33,7 +33,7 @@ DenseTensor(::UndefInitializer, inds::Int...) = DenseTensor(undef, inds) # function randomDenseTensor(::Type{ElT}, inds) where {ElT} - return tensor(generic_randn(Dense{ElT}, dim(inds)), inds) + return tensor(generic_randn(Dense{ElT}, dim(inds)), inds) end randomDenseTensor(inds) = randomDenseTensor(default_eltype(), inds) @@ -48,27 +48,27 @@ IndexStyle(::Type{<:DenseTensor}) = IndexLinear() iterate(T::DenseTensor, args...) = iterate(storage(T), args...) function _zeros(TensorT::Type{<:DenseTensor}, inds) - return tensor(generic_zeros(storagetype(TensorT), dim(inds)), inds) + return tensor(generic_zeros(storagetype(TensorT), dim(inds)), inds) end function zeros(TensorT::Type{<:DenseTensor}, inds) - return _zeros(TensorT, inds) + return _zeros(TensorT, inds) end # To fix method ambiguity with zeros(::Type, ::Tuple) function zeros(TensorT::Type{<:DenseTensor}, inds::Dims) - return _zeros(TensorT, inds) + return _zeros(TensorT, inds) end function zeros(TensorT::Type{<:DenseTensor}, inds::Tuple{}) - return _zeros(TensorT, inds) + return _zeros(TensorT, inds) end convert(::Type{Array}, T::DenseTensor) = reshape(data(storage(T)), dims(inds(T))) function Base.copyto!(R::DenseTensor, T::DenseTensor) - copyto!(storage(R), storage(T)) - return R + copyto!(storage(R), storage(T)) + return R end # Create an Array that is a view of the Dense Tensor @@ -76,15 +76,15 @@ end array(T::DenseTensor) = convert(Array, T) function diagview(T::DenseTensor) - return diagview(array(T)) + return diagview(array(T)) end -function Array{ElT,N}(T::DenseTensor{ElT,N}) where {ElT,N} - return copy(array(T)) +function Array{ElT, N}(T::DenseTensor{ElT, N}) where {ElT, N} + return copy(array(T)) end -function Array(T::DenseTensor{ElT,N}) where {ElT,N} - return Array{ElT,N}(T) +function Array(T::DenseTensor{ElT, N}) where {ElT, N} + return Array{ElT, N}(T) end # @@ -93,38 +93,38 @@ end ## TODO replace this with Exposed @propagate_inbounds function getindex(T::DenseTensor{<:Number}) - return getindex(expose(data(T))) + return getindex(expose(data(T))) end @propagate_inbounds function getindex(T::DenseTensor{<:Number}, I::Integer...) - Base.@_inline_meta - return getindex(expose(data(T)), Base._sub2ind(T, I...)) + Base.@_inline_meta + return getindex(expose(data(T)), Base._sub2ind(T, I...)) end @propagate_inbounds function getindex(T::DenseTensor{<:Number}, I::CartesianIndex) - Base.@_inline_meta - return getindex(T, I.I...) + Base.@_inline_meta + return getindex(T, I.I...) end @propagate_inbounds function setindex!( - T::DenseTensor{<:Number}, x::Number, I::Vararg{Integer} -) - Base.@_inline_meta - setindex!(data(T), x, Base._sub2ind(T, I...)) - return T + T::DenseTensor{<:Number}, x::Number, I::Vararg{Integer} + ) + Base.@_inline_meta + setindex!(data(T), x, Base._sub2ind(T, I...)) + return T end @propagate_inbounds function setindex!( - T::DenseTensor{<:Number}, x::Number, I::CartesianIndex -) - Base.@_inline_meta - setindex!(T, x, I.I...) - return T + T::DenseTensor{<:Number}, x::Number, I::CartesianIndex + ) + Base.@_inline_meta + setindex!(T, x, I.I...) + return T end @propagate_inbounds function setindex!(T::DenseTensor{<:Number}, x::Number) - setindex!(expose(data(T)), x) - return T + setindex!(expose(data(T)), x) + return T end # @@ -134,7 +134,7 @@ end @propagate_inbounds @inline getindex(T::DenseTensor, i::Integer) = storage(T)[i] @propagate_inbounds @inline function setindex!(T::DenseTensor, v, i::Integer) - return (storage(T)[i]=v; T) + return (storage(T)[i] = v; T) end # @@ -156,116 +156,116 @@ end ## end @propagate_inbounds function getindex(T::DenseTensor, I...) - AI = @view array(T)[I...] - storeR = Dense(vec(AI)) - indsR = size(AI) - return tensor(storeR, indsR) + AI = @view array(T)[I...] + storeR = Dense(vec(AI)) + indsR = size(AI) + return tensor(storeR, indsR) end # Reshape a DenseTensor using the specified dimensions # This returns a view into the same Tensor data function reshape(T::DenseTensor, dims) - dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") - return tensor(storage(T), dims) + dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") + return tensor(storage(T), dims) end # This version fixes method ambiguity with AbstractArray reshape function reshape(T::DenseTensor, dims::Dims) - dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") - return tensor(storage(T), dims) + dim(T) == dim(dims) || error("Total new dimension must be the same as the old dimension") + return tensor(storage(T), dims) end function reshape(T::DenseTensor, dims::Int...) - return tensor(storage(T), tuple(dims...)) + return tensor(storage(T), tuple(dims...)) end ## TODO might have to look into these functions more # If the storage data are regular Vectors, use Base.copyto! function copyto!( - R::Tensor{<:Number,N,<:Dense{<:Number,<:Vector}}, - T::Tensor{<:Number,N,<:Dense{<:Number,<:Vector}}, -) where {N} - RA = array(R) - TA = array(T) - RA .= TA - return R + R::Tensor{<:Number, N, <:Dense{<:Number, <:Vector}}, + T::Tensor{<:Number, N, <:Dense{<:Number, <:Vector}}, + ) where {N} + RA = array(R) + TA = array(T) + RA .= TA + return R end # If they are something more complicated like views, use Strided copyto! function copyto!( - R::DenseTensor{<:Number,N,StoreT}, T::DenseTensor{<:Number,N,StoreT} -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - @strided RA .= TA - return R + R::DenseTensor{<:Number, N, StoreT}, T::DenseTensor{<:Number, N, StoreT} + ) where {N, StoreT <: StridedArray} + RA = array(R) + TA = array(T) + @strided RA .= TA + return R end # Maybe allocate output data. # TODO: Remove this in favor of `map!` # applied to `PermutedDimsArray`. function permutedims!!(R::DenseTensor, T::DenseTensor, perm, f::Function) - Base.checkdims_perm(R, T, perm) - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR + Base.checkdims_perm(R, T, perm) + RR = convert(promote_type(typeof(R), typeof(T)), R) + permutedims!(RR, T, perm, f) + return RR end function permutedims!!(R::DenseTensor, T::DenseTensor, perm) - Base.checkdims_perm(R, T, perm) - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm) - return RR + Base.checkdims_perm(R, T, perm) + RR = convert(promote_type(typeof(R), typeof(T)), R) + permutedims!(RR, T, perm) + return RR end # TODO: call permutedims!(R,T,perm,(r,t)->t)? function permutedims!( - R::DenseTensor{<:Number,N,StoreT}, T::DenseTensor{<:Number,N,StoreT}, perm::NTuple{N,Int} -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - permutedims!(expose(RA), expose(TA), perm) - return R + R::DenseTensor{<:Number, N, StoreT}, T::DenseTensor{<:Number, N, StoreT}, perm::NTuple{N, Int} + ) where {N, StoreT <: StridedArray} + RA = array(R) + TA = array(T) + permutedims!(expose(RA), expose(TA), perm) + return R end # TODO: call permutedims!(R,T,perm,(r,t)->t)? function permutedims!( - R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm::NTuple{N,Int} -) where {N} - RA = array(R) - TA = array(T) - permutedims!(expose(RA), expose(TA), perm) - return R + R::DenseTensor{<:Number, N}, T::DenseTensor{<:Number, N}, perm::NTuple{N, Int} + ) where {N} + RA = array(R) + TA = array(T) + permutedims!(expose(RA), expose(TA), perm) + return R end function apply!( - R::DenseTensor{<:Number,N,StoreT}, - T::DenseTensor{<:Number,N,StoreT}, - f::Function=(r, t) -> t, -) where {N,StoreT<:StridedArray} - RA = array(R) - TA = array(T) - @strided RA .= f.(RA, TA) - return R + R::DenseTensor{<:Number, N, StoreT}, + T::DenseTensor{<:Number, N, StoreT}, + f::Function = (r, t) -> t, + ) where {N, StoreT <: StridedArray} + RA = array(R) + TA = array(T) + @strided RA .= f.(RA, TA) + return R end -function apply!(R::DenseTensor, T::DenseTensor, f::Function=(r, t) -> t) - RA = array(R) - TA = array(T) - RA .= f.(RA, TA) - return R +function apply!(R::DenseTensor, T::DenseTensor, f::Function = (r, t) -> t) + RA = array(R) + TA = array(T) + RA .= f.(RA, TA) + return R end function permutedims!( - R::DenseTensor{<:Number,N}, T::DenseTensor{<:Number,N}, perm, f::Function -) where {N} - if nnz(R) == 1 && nnz(T) == 1 - R[] = f(R[], T[]) - return R - end - RA = array(R) - TA = array(T) - return permutedims!!(RA, TA, perm, f) + R::DenseTensor{<:Number, N}, T::DenseTensor{<:Number, N}, perm, f::Function + ) where {N} + if nnz(R) == 1 && nnz(T) == 1 + R[] = f(R[], T[]) + return R + end + RA = array(R) + TA = array(T) + return permutedims!!(RA, TA, perm, f) end """ @@ -282,40 +282,40 @@ First T is permuted as `permutedims(3,2,1)`, then reshaped such that the original indices 3 and 2 are combined. """ function permute_reshape( - T::DenseTensor{ElT,NT,IndsT}, pos::Vararg{Any,N} -) where {ElT,NT,IndsT,N} - perm = flatten(pos...) - - length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($N)") - isperm(perm) || error("Index positions must be a permutation") - - dimsT = dims(T) - indsT = inds(T) - if !is_trivial_permutation(perm) - T = permutedims(T, perm) - end - if all(p -> length(p) == 1, pos) && N == NT - return T - end - newdims = MVector(ntuple(_ -> eltype(IndsT)(1), Val(N))) - for i in 1:N - if length(pos[i]) == 1 - # No reshape needed, just use the - # original index - newdims[i] = indsT[pos[i][1]] - else - newdim_i = 1 - for p in pos[i] - newdim_i *= dimsT[p] - end - newdims[i] = eltype(IndsT)(newdim_i) + T::DenseTensor{ElT, NT, IndsT}, pos::Vararg{Any, N} + ) where {ElT, NT, IndsT, N} + perm = flatten(pos...) + + length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($N)") + isperm(perm) || error("Index positions must be a permutation") + + dimsT = dims(T) + indsT = inds(T) + if !is_trivial_permutation(perm) + T = permutedims(T, perm) + end + if all(p -> length(p) == 1, pos) && N == NT + return T + end + newdims = MVector(ntuple(_ -> eltype(IndsT)(1), Val(N))) + for i in 1:N + if length(pos[i]) == 1 + # No reshape needed, just use the + # original index + newdims[i] = indsT[pos[i][1]] + else + newdim_i = 1 + for p in pos[i] + newdim_i *= dimsT[p] + end + newdims[i] = eltype(IndsT)(newdim_i) + end end - end - newinds = similartype(IndsT, Val{N})(Tuple(newdims)) - return reshape(T, newinds) + newinds = similartype(IndsT, Val{N})(Tuple(newdims)) + return reshape(T, newinds) end function Base.show(io::IO, mime::MIME"text/plain", T::DenseTensor) - summary(io, T) - return print_tensor(io, T) + summary(io, T) + return print_tensor(io, T) end diff --git a/NDTensors/src/dense/generic_array_constructors.jl b/NDTensors/src/dense/generic_array_constructors.jl index c9ef550e63..98b2375f0e 100644 --- a/NDTensors/src/dense/generic_array_constructors.jl +++ b/NDTensors/src/dense/generic_array_constructors.jl @@ -1,36 +1,36 @@ using TypeParameterAccessors: - default_type_parameters, - parenttype, - set_eltype, - specify_default_type_parameters, - specify_type_parameters, - type_parameters + default_type_parameters, + parenttype, + set_eltype, + specify_default_type_parameters, + specify_type_parameters, + type_parameters ##TODO replace randn in ITensors with generic_randn ## and replace zeros with generic_zeros # This is a file to write generic fills for NDTensors. # This includes random fills, zeros, ... -function generic_randn(StoreT::Type{<:Dense}, dims::Integer; rng=Random.default_rng()) - StoreT = specify_default_type_parameters(StoreT) - DataT = specify_type_parameters( - type_parameters(StoreT, parenttype), eltype, eltype(StoreT) - ) - @assert eltype(StoreT) == eltype(DataT) +function generic_randn(StoreT::Type{<:Dense}, dims::Integer; rng = Random.default_rng()) + StoreT = specify_default_type_parameters(StoreT) + DataT = specify_type_parameters( + type_parameters(StoreT, parenttype), eltype, eltype(StoreT) + ) + @assert eltype(StoreT) == eltype(DataT) - data = generic_randn(DataT, dims; rng=rng) - StoreT = set_datatype(StoreT, typeof(data)) - return StoreT(data) + data = generic_randn(DataT, dims; rng = rng) + StoreT = set_datatype(StoreT, typeof(data)) + return StoreT(data) end function generic_zeros(StoreT::Type{<:Dense}, dims::Integer) - StoreT = specify_default_type_parameters(StoreT) - DataT = specify_type_parameters( - type_parameters(StoreT, parenttype), eltype, eltype(StoreT) - ) - @assert eltype(StoreT) == eltype(DataT) + StoreT = specify_default_type_parameters(StoreT) + DataT = specify_type_parameters( + type_parameters(StoreT, parenttype), eltype, eltype(StoreT) + ) + @assert eltype(StoreT) == eltype(DataT) - data = generic_zeros(DataT, dims) - StoreT = set_datatype(StoreT, typeof(data)) - return StoreT(data) + data = generic_zeros(DataT, dims) + StoreT = set_datatype(StoreT, typeof(data)) + return StoreT(data) end diff --git a/NDTensors/src/dense/linearalgebra/decompositions.jl b/NDTensors/src/dense/linearalgebra/decompositions.jl index d10991745f..ec383233e6 100644 --- a/NDTensors/src/dense/linearalgebra/decompositions.jl +++ b/NDTensors/src/dense/linearalgebra/decompositions.jl @@ -1,96 +1,96 @@ Strided.StridedView(T::DenseTensor) = StridedView(convert(Array, T)) function drop_singletons(::Order{N}, labels, dims) where {N} - labelsᵣ = ntuple(zero, Val(N)) - dimsᵣ = labelsᵣ - nkeep = 1 - for n in 1:length(dims) - if dims[n] > 1 - labelsᵣ = @inbounds setindex(labelsᵣ, labels[n], nkeep) - dimsᵣ = @inbounds setindex(dimsᵣ, dims[n], nkeep) - nkeep += 1 + labelsᵣ = ntuple(zero, Val(N)) + dimsᵣ = labelsᵣ + nkeep = 1 + for n in 1:length(dims) + if dims[n] > 1 + labelsᵣ = @inbounds setindex(labelsᵣ, labels[n], nkeep) + dimsᵣ = @inbounds setindex(dimsᵣ, dims[n], nkeep) + nkeep += 1 + end end - end - return labelsᵣ, dimsᵣ + return labelsᵣ, dimsᵣ end # svd of an order-n tensor according to positions Lpos # and Rpos function svd( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - UM, S, VM, spec = svd(M; kwargs...) - u = ind(UM, 2) - v = ind(VM, 2) + T::DenseTensor{<:Number, N, IndsT}, Lpos::NTuple{NL, Int}, Rpos::NTuple{NR, Int}; kwargs... + ) where {N, IndsT, NL, NR} + M = permute_reshape(T, Lpos, Rpos) + UM, S, VM, spec = svd(M; kwargs...) + u = ind(UM, 2) + v = ind(VM, 2) - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Uinds = push(Linds, u) + Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) + Uinds = push(Linds, u) - # TODO: do these positions need to be reversed? - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - Vinds = push(Rinds, v) + # TODO: do these positions need to be reversed? + Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) + Vinds = push(Rinds, v) - U = reshape(UM, Uinds) - V = reshape(VM, Vinds) + U = reshape(UM, Uinds) + V = reshape(VM, Vinds) - return U, S, V, spec + return U, S, V, spec end # qr decomposition of an order-n tensor according to # positions Lpos and Rpos function qr( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; kwargs... -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - QM, RM = qr(M; kwargs...) - q = ind(QM, 2) - r = ind(RM, 1) - # TODO: simplify this by permuting inds(T) by (Lpos,Rpos) - # then grab Linds,Rinds - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Qinds = push(Linds, r) - Q = reshape(QM, Qinds) - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - Rinds = pushfirst(Rinds, r) - R = reshape(RM, Rinds) - return Q, R + T::DenseTensor{<:Number, N, IndsT}, Lpos::NTuple{NL, Int}, Rpos::NTuple{NR, Int}; kwargs... + ) where {N, IndsT, NL, NR} + M = permute_reshape(T, Lpos, Rpos) + QM, RM = qr(M; kwargs...) + q = ind(QM, 2) + r = ind(RM, 1) + # TODO: simplify this by permuting inds(T) by (Lpos,Rpos) + # then grab Linds,Rinds + Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) + Qinds = push(Linds, r) + Q = reshape(QM, Qinds) + Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) + Rinds = pushfirst(Rinds, r) + R = reshape(RM, Rinds) + return Q, R end # polar decomposition of an order-n tensor according to positions Lpos # and Rpos function polar( - T::DenseTensor{<:Number,N,IndsT}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int} -) where {N,IndsT,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - UM, PM = polar(M) + T::DenseTensor{<:Number, N, IndsT}, Lpos::NTuple{NL, Int}, Rpos::NTuple{NR, Int} + ) where {N, IndsT, NL, NR} + M = permute_reshape(T, Lpos, Rpos) + UM, PM = polar(M) - # TODO: turn these into functions - Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) - Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) + # TODO: turn these into functions + Linds = similartype(IndsT, Val{NL})(ntuple(i -> inds(T)[Lpos[i]], Val(NL))) + Rinds = similartype(IndsT, Val{NR})(ntuple(i -> inds(T)[Rpos[i]], Val(NR))) - # Use sim to create "similar" indices, in case - # the indices have identifiers. If not this should - # act as an identity operator - simRinds = sim(Rinds) - Uinds = (Linds..., simRinds...) - Pinds = (simRinds..., Rinds...) + # Use sim to create "similar" indices, in case + # the indices have identifiers. If not this should + # act as an identity operator + simRinds = sim(Rinds) + Uinds = (Linds..., simRinds...) + Pinds = (simRinds..., Rinds...) - U = reshape(UM, Uinds) - P = reshape(PM, Pinds) - return U, P + U = reshape(UM, Uinds) + P = reshape(PM, Pinds) + return U, P end function LinearAlgebra.exp( - T::DenseTensor{ElT,N}, Lpos::NTuple{NL,Int}, Rpos::NTuple{NR,Int}; ishermitian::Bool=false -) where {ElT,N,NL,NR} - M = permute_reshape(T, Lpos, Rpos) - indsTp = permute(inds(T), (Lpos..., Rpos...)) - if ishermitian - expM = parent(exp(Hermitian(matrix(M)))) - return tensor(Dense{ElT}(vec(expM)), indsTp) - else - expM = exp(M) - return reshape(expM, indsTp) - end + T::DenseTensor{ElT, N}, Lpos::NTuple{NL, Int}, Rpos::NTuple{NR, Int}; ishermitian::Bool = false + ) where {ElT, N, NL, NR} + M = permute_reshape(T, Lpos, Rpos) + indsTp = permute(inds(T), (Lpos..., Rpos...)) + if ishermitian + expM = parent(exp(Hermitian(matrix(M)))) + return tensor(Dense{ElT}(vec(expM)), indsTp) + else + expM = exp(M) + return reshape(expM, indsTp) + end end diff --git a/NDTensors/src/dense/set_types.jl b/NDTensors/src/dense/set_types.jl index e4a4fa4ea1..21d6c9c4e0 100644 --- a/NDTensors/src/dense/set_types.jl +++ b/NDTensors/src/dense/set_types.jl @@ -1,13 +1,13 @@ using TypeParameterAccessors: TypeParameterAccessors, Position, parenttype function set_datatype(storagetype::Type{<:Dense}, datatype::Type{<:AbstractVector}) - return Dense{eltype(datatype),datatype} + return Dense{eltype(datatype), datatype} end function set_datatype(storagetype::Type{<:Dense}, datatype::Type{<:AbstractArray}) - return error( - "Setting the `datatype` of the storage type `$storagetype` to a $(ndims(datatype))-dimsional array of type `$datatype` is not currently supported, use an `AbstractVector` instead.", - ) + return error( + "Setting the `datatype` of the storage type `$storagetype` to a $(ndims(datatype))-dimsional array of type `$datatype` is not currently supported, use an `AbstractVector` instead.", + ) end TypeParameterAccessors.default_type_parameters(::Type{<:Dense}) = (Float64, Vector) diff --git a/NDTensors/src/dense/tensoralgebra/contract.jl b/NDTensors/src/dense/tensoralgebra/contract.jl index 60db4e9a22..6ec0632336 100644 --- a/NDTensors/src/dense/tensoralgebra/contract.jl +++ b/NDTensors/src/dense/tensoralgebra/contract.jl @@ -1,231 +1,231 @@ using SparseArrays: nnz function contraction_output(tensor1::DenseTensor, tensor2::DenseTensor, indsR) - tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) - return NDTensors.similar(tensortypeR, indsR) + tensortypeR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR) + return NDTensors.similar(tensortypeR, indsR) end # Both are scalar-like tensors function _contract_scalar!( - R::DenseTensor{ElR}, - labelsR, - T1::Number, - labelsT1, - T2::Number, - labelsT2, - α=one(ElR), - β=zero(ElR), -) where {ElR} - if iszero(β) - R[] = α * T1 * T2 - elseif iszero(α) - R[] = β * R[] - else - R[] = α * T1 * T2 + β * R[] - end - return R + R::DenseTensor{ElR}, + labelsR, + T1::Number, + labelsT1, + T2::Number, + labelsT2, + α = one(ElR), + β = zero(ElR), + ) where {ElR} + if iszero(β) + R[] = α * T1 * T2 + elseif iszero(α) + R[] = β * R[] + else + R[] = α * T1 * T2 + β * R[] + end + return R end # Trivial permutation # Version where R and T have different element types, so we can't call BLAS # Instead use Julia's broadcasting (maybe consider Strided in the future) function _contract_scalar_noperm!( - R::DenseTensor{ElR}, T::DenseTensor, α, β=zero(ElR) -) where {ElR} - Rᵈ = data(R) - Tᵈ = data(T) - if iszero(β) - if iszero(α) - fill!(Rᵈ, 0) - else - Rᵈ .= α .* Tᵈ - end - elseif isone(β) - if iszero(α) - # No-op - # Rᵈ .= Rᵈ + R::DenseTensor{ElR}, T::DenseTensor, α, β = zero(ElR) + ) where {ElR} + Rᵈ = data(R) + Tᵈ = data(T) + if iszero(β) + if iszero(α) + fill!(Rᵈ, 0) + else + Rᵈ .= α .* Tᵈ + end + elseif isone(β) + if iszero(α) + # No-op + # Rᵈ .= Rᵈ + else + Rᵈ .= α .* Tᵈ .+ Rᵈ + end else - Rᵈ .= α .* Tᵈ .+ Rᵈ + if iszero(α) + # Rᵈ .= β .* Rᵈ + BLAS.scal!(length(Rᵈ), β, Rᵈ, 1) + else + Rᵈ .= α .* Tᵈ .+ β .* Rᵈ + end end - else - if iszero(α) - # Rᵈ .= β .* Rᵈ - BLAS.scal!(length(Rᵈ), β, Rᵈ, 1) - else - Rᵈ .= α .* Tᵈ .+ β .* Rᵈ - end - end - return R + return R end # Trivial permutation # Version where R and T are the same element type, so we can # call BLAS function _contract_scalar_noperm!( - R::DenseTensor{ElR}, T::DenseTensor{ElR}, α, β=zero(ElR) -) where {ElR} - Rᵈ = data(R) - Tᵈ = data(T) - if iszero(β) - if iszero(α) - fill!(Rᵈ, 0) + R::DenseTensor{ElR}, T::DenseTensor{ElR}, α, β = zero(ElR) + ) where {ElR} + Rᵈ = data(R) + Tᵈ = data(T) + if iszero(β) + if iszero(α) + fill!(Rᵈ, 0) + else + Rᵈ .= α .* Tᵈ + end + elseif isone(β) + if iszero(α) + # No-op + # Rᵈ .= Rᵈ + else + # Rᵈ .= α .* Tᵈ .+ Rᵈ + LinearAlgebra.axpy!(α, Tᵈ, Rᵈ) + end else - Rᵈ .= α .* Tᵈ + if iszero(α) + Rᵈ .= β .* Rᵈ + else + # Rᵈ .= α .* Tᵈ .+ β .* Rᵈ + LinearAlgebra.axpby!(α, Tᵈ, β, Rᵈ) + end end - elseif isone(β) - if iszero(α) - # No-op - # Rᵈ .= Rᵈ - else - # Rᵈ .= α .* Tᵈ .+ Rᵈ - LinearAlgebra.axpy!(α, Tᵈ, Rᵈ) - end - else - if iszero(α) - Rᵈ .= β .* Rᵈ - else - # Rᵈ .= α .* Tᵈ .+ β .* Rᵈ - LinearAlgebra.axpby!(α, Tᵈ, β, Rᵈ) - end - end - return R + return R end function _contract_scalar_maybe_perm!( - ::Order{N}, R::DenseTensor{ElR,NR}, labelsR, T::DenseTensor, labelsT, α, β=zero(ElR) -) where {ElR,NR,N} - labelsRᵣ, dimsRᵣ = drop_singletons(Order(N), labelsR, dims(R)) - labelsTᵣ, dimsTᵣ = drop_singletons(Order(N), labelsT, dims(T)) - perm = getperm(labelsRᵣ, labelsTᵣ) - if is_trivial_permutation(perm) - # trivial permutation - _contract_scalar_noperm!(R, T, α, β) - else - # non-trivial permutation - Rᵣ = ReshapedArray(data(R), dimsRᵣ, ()) - Tᵣ = ReshapedArray(data(T), dimsTᵣ, ()) - _contract_scalar_perm!(Rᵣ, Tᵣ, perm, α, β) - end - return R + ::Order{N}, R::DenseTensor{ElR, NR}, labelsR, T::DenseTensor, labelsT, α, β = zero(ElR) + ) where {ElR, NR, N} + labelsRᵣ, dimsRᵣ = drop_singletons(Order(N), labelsR, dims(R)) + labelsTᵣ, dimsTᵣ = drop_singletons(Order(N), labelsT, dims(T)) + perm = getperm(labelsRᵣ, labelsTᵣ) + if is_trivial_permutation(perm) + # trivial permutation + _contract_scalar_noperm!(R, T, α, β) + else + # non-trivial permutation + Rᵣ = ReshapedArray(data(R), dimsRᵣ, ()) + Tᵣ = ReshapedArray(data(T), dimsTᵣ, ()) + _contract_scalar_perm!(Rᵣ, Tᵣ, perm, α, β) + end + return R end function _contract_scalar_maybe_perm!( - R::DenseTensor{ElR,NR}, labelsR, T::DenseTensor, labelsT, α, β=zero(ElR) -) where {ElR,NR} - N = count(≠(1), dims(R)) - _contract_scalar_maybe_perm!(Order(N), R, labelsR, T, labelsT, α, β) - return R + R::DenseTensor{ElR, NR}, labelsR, T::DenseTensor, labelsT, α, β = zero(ElR) + ) where {ElR, NR} + N = count(≠(1), dims(R)) + _contract_scalar_maybe_perm!(Order(N), R, labelsR, T, labelsT, α, β) + return R end # XXX: handle case of non-trivial permutation function _contract_scalar_maybe_perm!( - R::DenseTensor{ElR,NR}, - labelsR, - T₁::DenseTensor, - labelsT₁, - T₂::DenseTensor, - labelsT₂, - α=one(ElR), - β=zero(ElR), -) where {ElR,NR} - if nnz(T₁) == 1 - _contract_scalar_maybe_perm!(R, labelsR, T₂, labelsT₂, α * T₁[], β) - elseif nnz(T₂) == 1 - _contract_scalar_maybe_perm!(R, labelsR, T₁, labelsT₁, α * T₂[], β) - else - error("In _contract_scalar_perm!, one tensor must be a scalar") - end - return R + R::DenseTensor{ElR, NR}, + labelsR, + T₁::DenseTensor, + labelsT₁, + T₂::DenseTensor, + labelsT₂, + α = one(ElR), + β = zero(ElR), + ) where {ElR, NR} + if nnz(T₁) == 1 + _contract_scalar_maybe_perm!(R, labelsR, T₂, labelsT₂, α * T₁[], β) + elseif nnz(T₂) == 1 + _contract_scalar_maybe_perm!(R, labelsR, T₁, labelsT₁, α * T₂[], β) + else + error("In _contract_scalar_perm!, one tensor must be a scalar") + end + return R end # At least one of the tensors is size 1 function _contract_scalar!( - R::DenseTensor{ElR}, - labelsR, - T1::DenseTensor, - labelsT1, - T2::DenseTensor, - labelsT2, - α=one(ElR), - β=zero(ElR), -) where {ElR} - if nnz(T1) == nnz(T2) == 1 - _contract_scalar!(R, labelsR, T1[], labelsT1, T2[], labelsT2, α, β) - else - _contract_scalar_maybe_perm!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - end - return R + R::DenseTensor{ElR}, + labelsR, + T1::DenseTensor, + labelsT1, + T2::DenseTensor, + labelsT2, + α = one(ElR), + β = zero(ElR), + ) where {ElR} + if nnz(T1) == nnz(T2) == 1 + _contract_scalar!(R, labelsR, T1[], labelsT1, T2[], labelsT2, α, β) + else + _contract_scalar_maybe_perm!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) + end + return R end function contract!( - R::DenseTensor{ElR,NR}, - labelsR, - T1::DenseTensor{ElT1,N1}, - labelsT1, - T2::DenseTensor{ElT2,N2}, - labelsT2, - α::Elα=one(ElR), - β::Elβ=zero(ElR), -) where {Elα,Elβ,ElR,ElT1,ElT2,NR,N1,N2} - # Special case for scalar tensors - if nnz(T1) == 1 || nnz(T2) == 1 - _contract_scalar!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - return R - end - - if using_tblis() && ElR <: LinearAlgebra.BlasReal && (ElR == ElT1 == ElT2 == Elα == Elβ) - #@timeit_debug timer "TBLIS contract!" begin - contract!(Val(:TBLIS), R, labelsR, T1, labelsT1, T2, labelsT2, α, β) - #end - return R - end - - if N1 + N2 == NR - outer!(R, T1, T2) - labelsRp = (labelsT1..., labelsT2...) - perm = getperm(labelsR, labelsRp) - if !is_trivial_permutation(perm) - permutedims!(R, copy(R), perm) + R::DenseTensor{ElR, NR}, + labelsR, + T1::DenseTensor{ElT1, N1}, + labelsT1, + T2::DenseTensor{ElT2, N2}, + labelsT2, + α::Elα = one(ElR), + β::Elβ = zero(ElR), + ) where {Elα, Elβ, ElR, ElT1, ElT2, NR, N1, N2} + # Special case for scalar tensors + if nnz(T1) == 1 || nnz(T2) == 1 + _contract_scalar!(R, labelsR, T1, labelsT1, T2, labelsT2, α, β) + return R end - return R - end - - props = ContractionProperties(labelsT1, labelsT2, labelsR) - compute_contraction_properties!(props, T1, T2, R) - if ElT1 != ElT2 - # TODO: use promote instead - # T1, T2 = promote(T1, T2) + if using_tblis() && ElR <: LinearAlgebra.BlasReal && (ElR == ElT1 == ElT2 == Elα == Elβ) + #@timeit_debug timer "TBLIS contract!" begin + contract!(Val(:TBLIS), R, labelsR, T1, labelsT1, T2, labelsT2, α, β) + #end + return R + end - ElT1T2 = promote_type(ElT1, ElT2) - if ElT1 != ElR - # TODO: get this working - # T1 = ElR.(T1) - T1 = one(ElT1T2) * T1 + if N1 + N2 == NR + outer!(R, T1, T2) + labelsRp = (labelsT1..., labelsT2...) + perm = getperm(labelsR, labelsRp) + if !is_trivial_permutation(perm) + permutedims!(R, copy(R), perm) + end + return R end - if ElT2 != ElR - # TODO: get this working - # T2 = ElR.(T2) - T2 = one(ElT1T2) * T2 + + props = ContractionProperties(labelsT1, labelsT2, labelsR) + compute_contraction_properties!(props, T1, T2, R) + + if ElT1 != ElT2 + # TODO: use promote instead + # T1, T2 = promote(T1, T2) + + ElT1T2 = promote_type(ElT1, ElT2) + if ElT1 != ElR + # TODO: get this working + # T1 = ElR.(T1) + T1 = one(ElT1T2) * T1 + end + if ElT2 != ElR + # TODO: get this working + # T2 = ElR.(T2) + T2 = one(ElT1T2) * T2 + end end - end - _contract!(R, T1, T2, props, α, β) - return R - #end + _contract!(R, T1, T2, props, α, β) + return R + #end end function _contract!( - CT::DenseTensor{El,NC}, - AT::DenseTensor{El,NA}, - BT::DenseTensor{El,NB}, - props::ContractionProperties, - α::Number=one(El), - β::Number=zero(El), -) where {El,NC,NA,NB} - C = array(CT) - A = array(AT) - B = array(BT) - - return _contract!(C, A, B, props, α, β) + CT::DenseTensor{El, NC}, + AT::DenseTensor{El, NA}, + BT::DenseTensor{El, NB}, + props::ContractionProperties, + α::Number = one(El), + β::Number = zero(El), + ) where {El, NC, NA, NB} + C = array(CT) + A = array(AT) + B = array(BT) + + return _contract!(C, A, B, props, α, β) end diff --git a/NDTensors/src/dense/tensoralgebra/outer.jl b/NDTensors/src/dense/tensoralgebra/outer.jl index 3fd949b9f2..84b91c3f19 100644 --- a/NDTensors/src/dense/tensoralgebra/outer.jl +++ b/NDTensors/src/dense/tensoralgebra/outer.jl @@ -1,35 +1,35 @@ function outer!( - R::DenseTensor{ElR}, T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2} -) where {ElR,ElT1,ElT2} - if ElT1 != ElT2 - # TODO: use promote instead - # T1,T2 = promote(T1,T2) + R::DenseTensor{ElR}, T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2} + ) where {ElR, ElT1, ElT2} + if ElT1 != ElT2 + # TODO: use promote instead + # T1,T2 = promote(T1,T2) - ElT1T2 = promote_type(ElT1, ElT2) - if ElT1 != ElT1T2 - # TODO: get this working - # T1 = ElR.(T1) - T1 = one(ElT1T2) * T1 + ElT1T2 = promote_type(ElT1, ElT2) + if ElT1 != ElT1T2 + # TODO: get this working + # T1 = ElR.(T1) + T1 = one(ElT1T2) * T1 + end + if ElT2 != ElT1T2 + # TODO: get this working + # T2 = ElR.(T2) + T2 = one(ElT1T2) * T2 + end end - if ElT2 != ElT1T2 - # TODO: get this working - # T2 = ElR.(T2) - T2 = one(ElT1T2) * T2 - end - end - v1 = data(T1) - v2 = data(T2) - RM = reshape(R, length(v1), length(v2)) - ## There is no _gemm! defined for CUDA or Metal so it calls - ## generic matmul. Replace with mul!! to call correct mul! (ger) - mul!!(array(RM), v1, transpose(v2), one(ElR), zero(ElR)) - return R + v1 = data(T1) + v2 = data(T2) + RM = reshape(R, length(v1), length(v2)) + ## There is no _gemm! defined for CUDA or Metal so it calls + ## generic matmul. Replace with mul!! to call correct mul! (ger) + mul!!(array(RM), v1, transpose(v2), one(ElR), zero(ElR)) + return R end # TODO: call outer!!, make this generic -function outer(T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2}) where {ElT1,ElT2} - array_outer = vec(array(T1)) * transpose(vec(array(T2))) - inds_outer = unioninds(inds(T1), inds(T2)) - return tensor(Dense{promote_type(ElT1, ElT2)}(vec(array_outer)), inds_outer) +function outer(T1::DenseTensor{ElT1}, T2::DenseTensor{ElT2}) where {ElT1, ElT2} + array_outer = vec(array(T1)) * transpose(vec(array(T2))) + inds_outer = unioninds(inds(T1), inds(T2)) + return tensor(Dense{promote_type(ElT1, ElT2)}(vec(array_outer)), inds_outer) end diff --git a/NDTensors/src/diag/diagtensor.jl b/NDTensors/src/diag/diagtensor.jl index ef114eb3c4..f10f78eee7 100644 --- a/NDTensors/src/diag/diagtensor.jl +++ b/NDTensors/src/diag/diagtensor.jl @@ -1,48 +1,48 @@ -const DiagTensor{ElT,N,StoreT,IndsT} = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:Diag} -const NonuniformDiagTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:NonuniformDiag} -const UniformDiagTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:UniformDiag} +const DiagTensor{ElT, N, StoreT, IndsT} = Tensor{ElT, N, StoreT, IndsT} where {StoreT <: Diag} +const NonuniformDiagTensor{ElT, N, StoreT, IndsT} = + Tensor{ElT, N, StoreT, IndsT} where {StoreT <: NonuniformDiag} +const UniformDiagTensor{ElT, N, StoreT, IndsT} = + Tensor{ElT, N, StoreT, IndsT} where {StoreT <: UniformDiag} function diag(tensor::DiagTensor) - tensor_diag = NDTensors.similar(dense(typeof(tensor)), (diaglength(tensor),)) - # TODO: Define `eachdiagindex`. - diagview(tensor_diag) .= diagview(tensor) - return tensor_diag + tensor_diag = NDTensors.similar(dense(typeof(tensor)), (diaglength(tensor),)) + # TODO: Define `eachdiagindex`. + diagview(tensor_diag) .= diagview(tensor) + return tensor_diag end IndexStyle(::Type{<:DiagTensor}) = IndexCartesian() # TODO: this needs to be better (promote element type, check order compatibility, # etc. -function convert(::Type{<:DenseTensor{ElT,N}}, T::DiagTensor{ElT,N}) where {ElT<:Number,N} - return dense(T) +function convert(::Type{<:DenseTensor{ElT, N}}, T::DiagTensor{ElT, N}) where {ElT <: Number, N} + return dense(T) end -convert(::Type{Diagonal}, D::DiagTensor{<:Number,2}) = Diagonal(data(D)) +convert(::Type{Diagonal}, D::DiagTensor{<:Number, 2}) = Diagonal(data(D)) -function Array{ElT,N}(T::DiagTensor{ElT,N}) where {ElT,N} - return array(T) +function Array{ElT, N}(T::DiagTensor{ElT, N}) where {ElT, N} + return array(T) end -function Array(T::DiagTensor{ElT,N}) where {ElT,N} - return Array{ElT,N}(T) +function Array(T::DiagTensor{ElT, N}) where {ElT, N} + return Array{ElT, N}(T) end function diagview(T::NonuniformDiagTensor) - return data(T) + return data(T) end function zeros(tensortype::Type{<:DiagTensor}, inds) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) + return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) end function zeros(tensortype::Type{<:DiagTensor}, inds::Dims) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) + return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) end function zeros(tensortype::Type{<:DiagTensor}, inds::Tuple{}) - return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) + return tensor(generic_zeros(storagetype(tensortype), mindim(inds)), inds) end # Compute the norm of Uniform diagonal tensor @@ -71,151 +71,151 @@ Set the entire diagonal of a uniform DiagTensor. setdiag(T::UniformDiagTensor, val) = tensor(Diag(val), inds(T)) function Base.copyto!(R::DenseTensor, T::DiagTensor) - diagview(R) .= diagview(T) - return R + diagview(R) .= diagview(T) + return R end @propagate_inbounds function getindex( - T::DiagTensor{ElT,N}, inds::Vararg{Int,N} -) where {ElT,N} - if all(==(inds[1]), inds) - return getdiagindex(T, inds[1]) - else - return zero(eltype(ElT)) - end -end -@propagate_inbounds getindex(T::DiagTensor{<:Number,1}, ind::Int) = storage(T)[ind] + T::DiagTensor{ElT, N}, inds::Vararg{Int, N} + ) where {ElT, N} + if all(==(inds[1]), inds) + return getdiagindex(T, inds[1]) + else + return zero(eltype(ElT)) + end +end +@propagate_inbounds getindex(T::DiagTensor{<:Number, 1}, ind::Int) = storage(T)[ind] using .Expose: expose -@propagate_inbounds getindex(T::DiagTensor{<:Number,0}) = getindex(expose(storage(T))) +@propagate_inbounds getindex(T::DiagTensor{<:Number, 0}) = getindex(expose(storage(T))) # Set diagonal elements # Throw error for off-diagonal @propagate_inbounds function setindex!( - T::DiagTensor{<:Number,N}, val, inds::Vararg{Int,N} -) where {N} - all(==(inds[1]), inds) || error("Cannot set off-diagonal element of Diag storage") - setdiagindex!(T, val, inds[1]) - return T + T::DiagTensor{<:Number, N}, val, inds::Vararg{Int, N} + ) where {N} + all(==(inds[1]), inds) || error("Cannot set off-diagonal element of Diag storage") + setdiagindex!(T, val, inds[1]) + return T end -@propagate_inbounds function setindex!(T::DiagTensor{<:Number,1}, val, ind::Int) - return (storage(T)[ind] = val) +@propagate_inbounds function setindex!(T::DiagTensor{<:Number, 1}, val, ind::Int) + return (storage(T)[ind] = val) end -@propagate_inbounds setindex!(T::DiagTensor{<:Number,0}, val) = (storage(T)[1] = val) +@propagate_inbounds setindex!(T::DiagTensor{<:Number, 0}, val) = (storage(T)[1] = val) -function setindex!(T::UniformDiagTensor{<:Number,N}, val, inds::Vararg{Int,N}) where {N} - return error("Cannot set elements of a uniform Diag storage") +function setindex!(T::UniformDiagTensor{<:Number, N}, val, inds::Vararg{Int, N}) where {N} + return error("Cannot set elements of a uniform Diag storage") end # TODO: make a fill!! that works for uniform and non-uniform #fill!(T::DiagTensor,v) = fill!(storage(T),v) -function dense(::Type{<:Tensor{ElT,N,StoreT,IndsT}}) where {ElT,N,StoreT<:Diag,IndsT} - return Tensor{ElT,N,dense(StoreT),IndsT} +function dense(::Type{<:Tensor{ElT, N, StoreT, IndsT}}) where {ElT, N, StoreT <: Diag, IndsT} + return Tensor{ElT, N, dense(StoreT), IndsT} end using TypeParameterAccessors: unwrap_array_type # convert to Dense function dense(T::DiagTensor) - R = zeros(dense(typeof(T)), inds(T)) - diagview(R) .= diagview(T) - return R + R = zeros(dense(typeof(T)), inds(T)) + diagview(R) .= diagview(T) + return R end denseblocks(T::DiagTensor) = dense(T) function permutedims!( - R::DiagTensor{<:Number,N}, - T::DiagTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - # TODO: check that inds(R)==permute(inds(T),perm)? - diagview(R) .= f.(diagview(R), diagview(T)) - return R + R::DiagTensor{<:Number, N}, + T::DiagTensor{<:Number, N}, + perm::NTuple{N, Int}, + f::Function = (r, t) -> t, + ) where {N} + # TODO: check that inds(R)==permute(inds(T),perm)? + diagview(R) .= f.(diagview(R), diagview(T)) + return R end function permutedims( - T::DiagTensor{<:Number,N}, perm::NTuple{N,Int}, f::Function=identity -) where {N} - R = NDTensors.similar(T) - g(r, t) = f(t) - permutedims!(R, T, perm, g) - return R + T::DiagTensor{<:Number, N}, perm::NTuple{N, Int}, f::Function = identity + ) where {N} + R = NDTensors.similar(T) + g(r, t) = f(t) + permutedims!(R, T, perm, g) + return R end function permutedims( - T::UniformDiagTensor{<:Number,N}, perm::NTuple{N,Int}, f::Function=identity -) where {N} - R = tensor(Diag(f(getdiagindex(T, 1))), permute(inds(T), perm)) - return R + T::UniformDiagTensor{<:Number, N}, perm::NTuple{N, Int}, f::Function = identity + ) where {N} + R = tensor(Diag(f(getdiagindex(T, 1))), permute(inds(T), perm)) + return R end # Version that may overwrite in-place or may return the result function permutedims!!( - R::NonuniformDiagTensor{<:Number,N}, - T::NonuniformDiagTensor{<:Number,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {N} - R = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(R, T, perm, f) - return R + R::NonuniformDiagTensor{<:Number, N}, + T::NonuniformDiagTensor{<:Number, N}, + perm::NTuple{N, Int}, + f::Function = (r, t) -> t, + ) where {N} + R = convert(promote_type(typeof(R), typeof(T)), R) + permutedims!(R, T, perm, f) + return R end function permutedims!!( - R::UniformDiagTensor{ElR,N}, - T::UniformDiagTensor{ElT,N}, - perm::NTuple{N,Int}, - f::Function=(r, t) -> t, -) where {ElR,ElT,N} - R = convert(promote_type(typeof(R), typeof(T)), R) - R = tensor(Diag(f(getdiagindex(R, 1), getdiagindex(T, 1))), inds(R)) - return R + R::UniformDiagTensor{ElR, N}, + T::UniformDiagTensor{ElT, N}, + perm::NTuple{N, Int}, + f::Function = (r, t) -> t, + ) where {ElR, ElT, N} + R = convert(promote_type(typeof(R), typeof(T)), R) + R = tensor(Diag(f(getdiagindex(R, 1), getdiagindex(T, 1))), inds(R)) + return R end function permutedims!( - R::DenseTensor{ElR,N}, T::DiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - diagview(R) .= f.(diagview(R), diagview(T)) - return R + R::DenseTensor{ElR, N}, T::DiagTensor{ElT, N}, perm::NTuple{N, Int}, f::Function = (r, t) -> t + ) where {ElR, ElT, N} + diagview(R) .= f.(diagview(R), diagview(T)) + return R end function permutedims!!( - R::DenseTensor{ElR,N}, T::DiagTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR + R::DenseTensor{ElR, N}, T::DiagTensor{ElT, N}, perm::NTuple{N, Int}, f::Function = (r, t) -> t + ) where {ElR, ElT, N} + RR = convert(promote_type(typeof(R), typeof(T)), R) + permutedims!(RR, T, perm, f) + return RR end # TODO: make a single implementation since this is # the same as the version with the input types # swapped. function permutedims!!( - R::DiagTensor{ElR,N}, T::DenseTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=(r, t) -> t -) where {ElR,ElT,N} - RR = convert(promote_type(typeof(R), typeof(T)), R) - permutedims!(RR, T, perm, f) - return RR + R::DiagTensor{ElR, N}, T::DenseTensor{ElT, N}, perm::NTuple{N, Int}, f::Function = (r, t) -> t + ) where {ElR, ElT, N} + RR = convert(promote_type(typeof(R), typeof(T)), R) + permutedims!(RR, T, perm, f) + return RR end function Base.mapreduce(f, op, t1::DiagTensor, t_tail::DiagTensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) - end - if length(t1) > diaglength(t1) - # Some elements are zero, account for that - # with the initial value. - init_kwargs = (; init=zero(elt)) - else - init_kwargs = (;) - end - return mapreduce(f, op, diagview(t1), diagview.(t_tail)...; kwargs..., init_kwargs...) + elt = mapreduce(eltype, promote_type, (t1, t_tail...)) + if !iszero(f(zero(elt))) + return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) + end + if length(t1) > diaglength(t1) + # Some elements are zero, account for that + # with the initial value. + init_kwargs = (; init = zero(elt)) + else + init_kwargs = (;) + end + return mapreduce(f, op, diagview(t1), diagview.(t_tail)...; kwargs..., init_kwargs...) end function Base.show(io::IO, mime::MIME"text/plain", T::DiagTensor) - summary(io, T) - print_tensor(io, T) - return nothing + summary(io, T) + print_tensor(io, T) + return nothing end diff --git a/NDTensors/src/diag/set_types.jl b/NDTensors/src/diag/set_types.jl index bbb798a5a4..b908710e40 100644 --- a/NDTensors/src/diag/set_types.jl +++ b/NDTensors/src/diag/set_types.jl @@ -1,20 +1,20 @@ using TypeParameterAccessors: TypeParameterAccessors function TypeParameterAccessors.set_eltype(storagetype::Type{<:UniformDiag}, eltype::Type) - return Diag{eltype,eltype} + return Diag{eltype, eltype} end function TypeParameterAccessors.set_eltype( - storagetype::Type{<:NonuniformDiag}, eltype::Type{<:AbstractArray} -) - return Diag{eltype,similartype(storagetype, eltype)} + storagetype::Type{<:NonuniformDiag}, eltype::Type{<:AbstractArray} + ) + return Diag{eltype, similartype(storagetype, eltype)} end # TODO: Remove this once uniform diagonal tensors use FillArrays for the data. function set_datatype(storagetype::Type{<:UniformDiag}, datatype::Type) - return Diag{datatype,datatype} + return Diag{datatype, datatype} end function set_datatype(storagetype::Type{<:NonuniformDiag}, datatype::Type{<:AbstractArray}) - return Diag{eltype(datatype),datatype} + return Diag{eltype(datatype), datatype} end diff --git a/NDTensors/src/diag/similar.jl b/NDTensors/src/diag/similar.jl index 7231caffe2..398e02b9c0 100644 --- a/NDTensors/src/diag/similar.jl +++ b/NDTensors/src/diag/similar.jl @@ -2,21 +2,21 @@ using TypeParameterAccessors: TypeParameterAccessors # NDTensors.similar function similar(storagetype::Type{<:Diag}, dims::Dims) - return setdata(storagetype, similar(datatype(storagetype), mindim(dims))) + return setdata(storagetype, similar(datatype(storagetype), mindim(dims))) end # TODO: Redesign UniformDiag to make it handled better # by generic code. function TypeParameterAccessors.similartype(storagetype::Type{<:UniformDiag}, eltype::Type) - # This will also set the `datatype`. - return set_eltype(storagetype, eltype) + # This will also set the `datatype`. + return set_eltype(storagetype, eltype) end # Needed to get slice of DiagTensor like T[1:3,1:3] function similar( - T::DiagTensor{<:Number,N}, ::Type{ElR}, inds::Dims{N} -) where {ElR<:Number,N} - return tensor(similar(storage(T), ElR, minimum(inds)), inds) + T::DiagTensor{<:Number, N}, ::Type{ElR}, inds::Dims{N} + ) where {ElR <: Number, N} + return tensor(similar(storage(T), ElR, minimum(inds)), inds) end similar(storage::NonuniformDiag) = setdata(storage, similar(data(storage))) diff --git a/NDTensors/src/diag/tensoralgebra/outer.jl b/NDTensors/src/diag/tensoralgebra/outer.jl index b01a4850a0..7e6c5ef6b2 100644 --- a/NDTensors/src/diag/tensoralgebra/outer.jl +++ b/NDTensors/src/diag/tensoralgebra/outer.jl @@ -1,30 +1,30 @@ function outer!( - R::DenseTensor{<:Number,NR}, T1::DiagTensor{<:Number,N1}, T2::DiagTensor{<:Number,N2} -) where {NR,N1,N2} - for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2) - indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) - R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2) - end - return R + R::DenseTensor{<:Number, NR}, T1::DiagTensor{<:Number, N1}, T2::DiagTensor{<:Number, N2} + ) where {NR, N1, N2} + for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2) + indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR))) + R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2) + end + return R end # TODO: write an optimized version of this? function outer!(R::DenseTensor{ElR}, T1::DenseTensor, T2::DiagTensor) where {ElR} - R .= zero(ElR) - outer!(R, T1, dense(T2)) - return R + R .= zero(ElR) + outer!(R, T1, dense(T2)) + return R end function outer!(R::DenseTensor{ElR}, T1::DiagTensor, T2::DenseTensor) where {ElR} - R .= zero(ElR) - outer!(R, dense(T1), T2) - return R + R .= zero(ElR) + outer!(R, dense(T1), T2) + return R end # Right an in-place version -function outer(T1::DiagTensor{ElT1,N1}, T2::DiagTensor{ElT2,N2}) where {ElT1,ElT2,N1,N2} - indsR = unioninds(inds(T1), inds(T2)) - R = tensor(Dense(generic_zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR) - outer!(R, T1, T2) - return R +function outer(T1::DiagTensor{ElT1, N1}, T2::DiagTensor{ElT2, N2}) where {ElT1, ElT2, N1, N2} + indsR = unioninds(inds(T1), inds(T2)) + R = tensor(Dense(generic_zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR) + outer!(R, T1, T2) + return R end diff --git a/NDTensors/src/dims.jl b/NDTensors/src/dims.jl index 38db99f6d9..81d3cef8f5 100644 --- a/NDTensors/src/dims.jl +++ b/NDTensors/src/dims.jl @@ -18,7 +18,7 @@ dim(::Tuple{}) = 1 dense(ds::Dims) = ds -dense(::Type{DimsT}) where {DimsT<:Dims} = DimsT +dense(::Type{DimsT}) where {DimsT <: Dims} = DimsT dim(ds::Dims) = prod(ds) @@ -72,7 +72,7 @@ sim(i::Int) = i # More complicated definition makes Order(Ref(2)[]) faster @eval struct Order{N} - (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) + (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) end @doc """ diff --git a/NDTensors/src/empty/EmptyTensor.jl b/NDTensors/src/empty/EmptyTensor.jl index f5e04e0652..a3c185c03e 100644 --- a/NDTensors/src/empty/EmptyTensor.jl +++ b/NDTensors/src/empty/EmptyTensor.jl @@ -2,25 +2,25 @@ # EmptyTensor (Tensor using EmptyStorage storage) # -const EmptyTensor{ElT,N,StoreT,IndsT} = - Tensor{ElT,N,StoreT,IndsT} where {StoreT<:EmptyStorage} +const EmptyTensor{ElT, N, StoreT, IndsT} = + Tensor{ElT, N, StoreT, IndsT} where {StoreT <: EmptyStorage} ## Start constructors -function EmptyTensor(::Type{ElT}, inds) where {ElT<:Number} - return tensor(EmptyStorage(ElT), inds) +function EmptyTensor(::Type{ElT}, inds) where {ElT <: Number} + return tensor(EmptyStorage(ElT), inds) end -function EmptyTensor(::Type{StoreT}, inds) where {StoreT<:TensorStorage} - return tensor(empty(StoreT), inds) +function EmptyTensor(::Type{StoreT}, inds) where {StoreT <: TensorStorage} + return tensor(empty(StoreT), inds) end -function EmptyBlockSparseTensor(::Type{ElT}, inds) where {ElT<:Number} - StoreT = BlockSparse{ElT,Vector{ElT},length(inds)} - return EmptyTensor(StoreT, inds) +function EmptyBlockSparseTensor(::Type{ElT}, inds) where {ElT <: Number} + StoreT = BlockSparse{ElT, Vector{ElT}, length(inds)} + return EmptyTensor(StoreT, inds) end ## End constructors -fulltype(::Type{EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} = StoreT +fulltype(::Type{EmptyStorage{ElT, StoreT}}) where {ElT, StoreT} = StoreT fulltype(T::EmptyStorage) = fulltype(typeof(T)) fulltype(T::Tensor) = fulltype(typeof(T)) @@ -29,64 +29,64 @@ fulltype(T::Tensor) = fulltype(typeof(T)) # example `EmptyStorage` that wraps a `BlockSparse` which # can have non-unity dimensions. function Base.ndims(storagetype::Type{<:EmptyStorage}) - return ndims(fulltype(storagetype)) + return ndims(fulltype(storagetype)) end # From an EmptyTensor, return the closest Tensor type -function fulltype(::Type{TensorT}) where {TensorT<:Tensor} - return Tensor{ - eltype(TensorT),ndims(TensorT),fulltype(storetype(TensorT)),indstype(TensorT) - } +function fulltype(::Type{TensorT}) where {TensorT <: Tensor} + return Tensor{ + eltype(TensorT), ndims(TensorT), fulltype(storetype(TensorT)), indstype(TensorT), + } end function fulltype( - ::Type{ElR}, ::Type{<:Tensor{ElT,N,EStoreT,IndsT}} -) where {ElR,ElT<:Number,N,EStoreT<:EmptyStorage{ElT,StoreT},IndsT} where {StoreT} - return Tensor{ElR,N,similartype(StoreT, ElR),IndsT} + ::Type{ElR}, ::Type{<:Tensor{ElT, N, EStoreT, IndsT}} + ) where {ElR, ElT <: Number, N, EStoreT <: EmptyStorage{ElT, StoreT}, IndsT} where {StoreT} + return Tensor{ElR, N, similartype(StoreT, ElR), IndsT} end -function emptytype(::Type{TensorT}) where {TensorT<:Tensor} - return Tensor{ - eltype(TensorT),ndims(TensorT),emptytype(storagetype(TensorT)),indstype(TensorT) - } +function emptytype(::Type{TensorT}) where {TensorT <: Tensor} + return Tensor{ + eltype(TensorT), ndims(TensorT), emptytype(storagetype(TensorT)), indstype(TensorT), + } end # XXX TODO: add bounds checking getindex(T::EmptyTensor, I::Integer...) = zero(eltype(T)) function getindex(T::EmptyTensor{Complex{EmptyNumber}}, I::Integer...) - return Complex(EmptyNumber(), EmptyNumber()) + return Complex(EmptyNumber(), EmptyNumber()) end similar(T::EmptyTensor, inds::Tuple) = setinds(T, inds) -function similar(T::EmptyTensor, ::Type{ElT}) where {ElT<:Number} - return tensor(similar(storage(T), ElT), inds(T)) +function similar(T::EmptyTensor, ::Type{ElT}) where {ElT <: Number} + return tensor(similar(storage(T), ElT), inds(T)) end function randn!!(T::EmptyTensor) - return randn!!(Random.default_rng(), T) + return randn!!(Random.default_rng(), T) end function randn!!(rng::AbstractRNG, T::EmptyTensor) - Tf = similar(fulltype(T), inds(T)) - randn!(rng, Tf) - return Tf + Tf = similar(fulltype(T), inds(T)) + randn!(rng, Tf) + return Tf end # Default to Float64 function randn!!(T::EmptyTensor{EmptyNumber}) - return randn!!(Random.default_rng(), T) + return randn!!(Random.default_rng(), T) end # Default to Float64 function randn!!(rng::AbstractRNG, T::EmptyTensor{EmptyNumber}) - return randn!!(rng, similar(T, Float64)) + return randn!!(rng, similar(T, Float64)) end function _fill!!(::Type{ElT}, T::EmptyTensor, α::Number) where {ElT} - Tf = similar(fulltype(T), ElT, inds(T)) - fill!(Tf, α) - return Tf + Tf = similar(fulltype(T), ElT, inds(T)) + fill!(Tf, α) + return Tf end fill!!(T::EmptyTensor, α::Number) = _fill!!(eltype(T), T, α) @@ -98,87 +98,87 @@ isempty(::EmptyTensor) = true zero(empty::EmptyTensor) = empty -function zeros(T::TensorT) where {TensorT<:EmptyTensor} - TensorR = fulltype(TensorT) - return zeros(TensorR, inds(T)) +function zeros(T::TensorT) where {TensorT <: EmptyTensor} + TensorR = fulltype(TensorT) + return zeros(TensorR, inds(T)) end -function zeros(::Type{ElT}, T::TensorT) where {ElT,TensorT<:EmptyTensor} - TensorR = fulltype(ElT, TensorT) - return zeros(TensorR, inds(T)) +function zeros(::Type{ElT}, T::TensorT) where {ElT, TensorT <: EmptyTensor} + TensorR = fulltype(ElT, TensorT) + return zeros(TensorR, inds(T)) end -function insertblock(T::EmptyTensor{<:Number,N}, block) where {N} - R = zeros(T) - insertblock!(R, Block(block)) - return R +function insertblock(T::EmptyTensor{<:Number, N}, block) where {N} + R = zeros(T) + insertblock!(R, Block(block)) + return R end -insertblock!!(T::EmptyTensor{<:Number,N}, block) where {N} = insertblock(T, block) +insertblock!!(T::EmptyTensor{<:Number, N}, block) where {N} = insertblock(T, block) blockoffsets(tensor::EmptyTensor) = BlockOffsets{ndims(tensor)}() # Special case with element type of EmptyNumber: storage takes the type # of the input. @propagate_inbounds function _setindex(T::EmptyTensor{EmptyNumber}, x, I...) - R = zeros(typeof(x), T) - R[I...] = x - return R + R = zeros(typeof(x), T) + R[I...] = x + return R end # Special case with element type of Complex{EmptyNumber}: storage takes the type # of the complex version of the input. @propagate_inbounds function _setindex(T::EmptyTensor{Complex{EmptyNumber}}, x, I...) - R = zeros(typeof(complex(x)), T) - R[I...] = x - return R + R = zeros(typeof(complex(x)), T) + R[I...] = x + return R end @propagate_inbounds function _setindex(T::EmptyTensor, x, I...) - R = zeros(T) - R[I...] = x - return R + R = zeros(T) + R[I...] = x + return R end @propagate_inbounds function setindex(T::EmptyTensor, x, I...) - return _setindex(T, x, I...) + return _setindex(T, x, I...) end # This is needed to fix an ambiguity error with ArrayInterface.jl # https://github.com/ITensor/NDTensors.jl/issues/62 @propagate_inbounds function setindex(T::EmptyTensor, x, I::Int...) - return _setindex(T, x, I...) + return _setindex(T, x, I...) end setindex!!(T::EmptyTensor, x, I...) = setindex(T, x, I...) function promote_rule( - ::Type{T1}, ::Type{T2} -) where {T1<:EmptyStorage{EmptyNumber},T2<:TensorStorage} - return T2 + ::Type{T1}, ::Type{T2} + ) where {T1 <: EmptyStorage{EmptyNumber}, T2 <: TensorStorage} + return T2 end -function promote_rule(::Type{T1}, ::Type{T2}) where {T1<:EmptyStorage,T2<:TensorStorage} - return promote_type(similartype(T2, eltype(T1)), T2) +function promote_rule(::Type{T1}, ::Type{T2}) where {T1 <: EmptyStorage, T2 <: TensorStorage} + return promote_type(similartype(T2, eltype(T1)), T2) end -function permutedims!!(R::Tensor, T::EmptyTensor, perm::Tuple, f::Function=(r, t) -> t) - RR = convert(promote_type(typeof(R), typeof(T)), R) - RR = permutedims!!(RR, RR, ntuple(identity, Val(ndims(R))), (r, t) -> f(r, false)) - return RR +function permutedims!!(R::Tensor, T::EmptyTensor, perm::Tuple, f::Function = (r, t) -> t) + RR = convert(promote_type(typeof(R), typeof(T)), R) + RR = permutedims!!(RR, RR, ntuple(identity, Val(ndims(R))), (r, t) -> f(r, false)) + return RR end -function permutedims!!(R::EmptyTensor, T::Tensor, perm::Tuple, f::Function=(r, t) -> t) - RR = similar(promote_type(typeof(R), typeof(T)), inds(R)) - RR = permutedims!!(RR, T, perm, (r, t) -> f(false, t)) - return RR +function permutedims!!(R::EmptyTensor, T::Tensor, perm::Tuple, f::Function = (r, t) -> t) + RR = similar(promote_type(typeof(R), typeof(T)), inds(R)) + RR = permutedims!!(RR, T, perm, (r, t) -> f(false, t)) + return RR end -function permutedims!!(R::EmptyTensor, T::EmptyTensor, perm::Tuple, f::Function=(r, t) -> t) - RR = convert(promote_type(typeof(R), typeof(T)), R) - return RR +function permutedims!!(R::EmptyTensor, T::EmptyTensor, perm::Tuple, f::Function = (r, t) -> t) + RR = convert(promote_type(typeof(R), typeof(T)), R) + return RR end function Base.show(io::IO, mime::MIME"text/plain", T::EmptyTensor) - summary(io, T) - return println(io) + summary(io, T) + return println(io) end diff --git a/NDTensors/src/empty/adapt.jl b/NDTensors/src/empty/adapt.jl index 0a048aac9e..b0860054ec 100644 --- a/NDTensors/src/empty/adapt.jl +++ b/NDTensors/src/empty/adapt.jl @@ -1,8 +1,8 @@ function adapt_structure(to, x::EmptyStorage) - return adapt_storagetype(to, typeof(x))() + return adapt_storagetype(to, typeof(x))() end function adapt_storagetype(to::Type{<:AbstractArray}, x::Type{<:EmptyStorage}) - d = datatype(storagetype(x)) - return emptytype(adapt_storagetype(adapt(to, d), fulltype(x))) + d = datatype(storagetype(x)) + return emptytype(adapt_storagetype(adapt(to, d), fulltype(x))) end diff --git a/NDTensors/src/empty/tensoralgebra/contract.jl b/NDTensors/src/empty/tensoralgebra/contract.jl index d566bad441..c6c8f55cb9 100644 --- a/NDTensors/src/empty/tensoralgebra/contract.jl +++ b/NDTensors/src/empty/tensoralgebra/contract.jl @@ -1,74 +1,74 @@ # Version of contraction where output storage is empty function contract!!(R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::Tensor, labelsT2) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR + RR = contract(T1, labelsT1, T2, labelsT2, labelsR) + return RR end # When one of the tensors is empty, return an empty # tensor. # XXX: make sure `R` is actually correct! function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::Tensor, labelsT2 -) - return R + R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::Tensor, labelsT2 + ) + return R end # When one of the tensors is empty, return an empty # tensor. # XXX: make sure `R` is actually correct! function contract!!( - R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::EmptyTensor, labelsT2 -) - return R + R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::EmptyTensor, labelsT2 + ) + return R end function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::EmptyTensor, labelsT2 -) - return R + R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::EmptyTensor, labelsT2 + ) + return R end # For ambiguity with versions in combiner.jl function contract!!( - R::EmptyTensor, labelsR, T1::CombinerTensor, labelsT1, T2::Tensor, labelsT2 -) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR + R::EmptyTensor, labelsR, T1::CombinerTensor, labelsT1, T2::Tensor, labelsT2 + ) + RR = contract(T1, labelsT1, T2, labelsT2, labelsR) + return RR end # For ambiguity with versions in combiner.jl function contract!!( - R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::CombinerTensor, labelsT2 -) - RR = contract(T1, labelsT1, T2, labelsT2, labelsR) - return RR + R::EmptyTensor, labelsR, T1::Tensor, labelsT1, T2::CombinerTensor, labelsT2 + ) + RR = contract(T1, labelsT1, T2, labelsT2, labelsR) + return RR end # For ambiguity with versions in combiner.jl function contract!!( - R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::CombinerTensor, labelsT2 -) - RR = contraction_output(T1, labelsT1, T2, labelsT2, labelsR) - return RR + R::EmptyTensor, labelsR, T1::EmptyTensor, labelsT1, T2::CombinerTensor, labelsT2 + ) + RR = contraction_output(T1, labelsT1, T2, labelsT2, labelsR) + return RR end function contraction_output(T1::EmptyTensor, T2::EmptyTensor, indsR::Tuple) - fulltypeR = contraction_output_type(fulltype(T1), fulltype(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) + fulltypeR = contraction_output_type(fulltype(T1), fulltype(T2), indsR) + storagetypeR = storagetype(fulltypeR) + emptystoragetypeR = emptytype(storagetypeR) + return Tensor(emptystoragetypeR(), indsR) end function contraction_output(T1::Tensor, T2::EmptyTensor, indsR) - fulltypeR = contraction_output_type(typeof(T1), fulltype(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) + fulltypeR = contraction_output_type(typeof(T1), fulltype(T2), indsR) + storagetypeR = storagetype(fulltypeR) + emptystoragetypeR = emptytype(storagetypeR) + return Tensor(emptystoragetypeR(), indsR) end function contraction_output(T1::EmptyTensor, T2::Tensor, indsR) - fulltypeR = contraction_output_type(fulltype(T1), typeof(T2), indsR) - storagetypeR = storagetype(fulltypeR) - emptystoragetypeR = emptytype(storagetypeR) - return Tensor(emptystoragetypeR(), indsR) + fulltypeR = contraction_output_type(fulltype(T1), typeof(T2), indsR) + storagetypeR = storagetype(fulltypeR) + emptystoragetypeR = emptytype(storagetypeR) + return Tensor(emptystoragetypeR(), indsR) end diff --git a/NDTensors/src/emptynumber.jl b/NDTensors/src/emptynumber.jl index 1d9799b740..ebe541cbda 100644 --- a/NDTensors/src/emptynumber.jl +++ b/NDTensors/src/emptynumber.jl @@ -9,7 +9,7 @@ zero(n::EmptyNumber) = zero(typeof(n)) # This helps handle a lot of basic algebra, like: # EmptyNumber() + 2.3 == 2.3 -convert(::Type{T}, x::EmptyNumber) where {T<:Number} = T(zero(T)) +convert(::Type{T}, x::EmptyNumber) where {T <: Number} = T(zero(T)) # TODO: Should this be implemented? #Complex(x::Real, ::EmptyNumber) = x @@ -23,8 +23,8 @@ Base.promote_rule(::Type{EmptyNumber}, T::Type{<:Number}) = T Base.promote_rule(T::Type{<:Number}, ::Type{EmptyNumber}) = T Base.promote_rule(::Type{EmptyNumber}, ::Type{Bool}) = Bool Base.promote_rule(::Type{Bool}, ::Type{EmptyNumber}) = Bool -Base.promote_rule(::Type{EmptyNumber}, T::Type{Complex{R}}) where {R<:Real} = T -Base.promote_rule(T::Type{Complex{R}}, ::Type{EmptyNumber}) where {R<:Real} = T +Base.promote_rule(::Type{EmptyNumber}, T::Type{Complex{R}}) where {R <: Real} = T +Base.promote_rule(T::Type{Complex{R}}, ::Type{EmptyNumber}) where {R <: Real} = T # Basic arithmetic (::EmptyNumber + ::EmptyNumber) = EmptyNumber() diff --git a/NDTensors/src/exports.jl b/NDTensors/src/exports.jl index 8e91f52019..f9c4a38ce4 100644 --- a/NDTensors/src/exports.jl +++ b/NDTensors/src/exports.jl @@ -1,89 +1,89 @@ export - # NDTensors.jl - insertblock!!, - setindex, - setindex!!, - # blocksparse/blockdims.jl - BlockDims, - blockdim, - blockdims, - nblocks, - blockindex, - # blocksparse/blocksparse.jl - # Types - Block, - BlockOffset, - BlockOffsets, - BlockSparse, - # Methods - blockoffsets, - blockview, - eachnzblock, - findblock, - isblocknz, - nnzblocks, - nnz, - nzblock, - nzblocks, + # NDTensors.jl + insertblock!!, + setindex, + setindex!!, + # blocksparse/blockdims.jl + BlockDims, + blockdim, + blockdims, + nblocks, + blockindex, + # blocksparse/blocksparse.jl + # Types + Block, + BlockOffset, + BlockOffsets, + BlockSparse, + # Methods + blockoffsets, + blockview, + eachnzblock, + findblock, + isblocknz, + nnzblocks, + nnz, + nzblock, + nzblocks, - # blocksparse/blocksparsetensor.jl - # Types - BlockSparseTensor, - # Methods - blockview, - insertblock!, - randomBlockSparseTensor, + # blocksparse/blocksparsetensor.jl + # Types + BlockSparseTensor, + # Methods + blockview, + insertblock!, + randomBlockSparseTensor, - # dense.jl - # Types - Dense, - DenseTensor, - # Symbols - ⊗, - # Methods - randomTensor, - array, - contract, - matrix, - outer, - permutedims!!, - read, - vector, - write, + # dense.jl + # Types + Dense, + DenseTensor, + # Symbols + ⊗, + # Methods + randomTensor, + array, + contract, + matrix, + outer, + permutedims!!, + read, + vector, + write, - # diag.jl - # Types - Diag, - DiagTensor, + # diag.jl + # Types + Diag, + DiagTensor, - # empty.jl - EmptyStorage, - EmptyTensor, - EmptyBlockSparseTensor, + # empty.jl + EmptyStorage, + EmptyTensor, + EmptyBlockSparseTensor, - # tensorstorage.jl - data, - TensorStorage, - randn!, - scale!, - norm, + # tensorstorage.jl + data, + TensorStorage, + randn!, + scale!, + norm, - # tensor.jl - Tensor, - tensor, - inds, - ind, - store, + # tensor.jl + Tensor, + tensor, + inds, + ind, + store, - # truncate.jl - truncate!, + # truncate.jl + truncate!, - # linearalgebra.jl - eigs, - entropy, - polar, - ql, - random_orthog, - random_unitary, - Spectrum, - truncerror + # linearalgebra.jl + eigs, + entropy, + polar, + ql, + random_orthog, + random_unitary, + Spectrum, + truncerror diff --git a/NDTensors/src/imports.jl b/NDTensors/src/imports.jl index bbfacd4a87..713ac7021c 100644 --- a/NDTensors/src/imports.jl +++ b/NDTensors/src/imports.jl @@ -20,16 +20,16 @@ using TimerOutputs using TupleTools for lib in [ - :BackendSelection, - :Expose, - :GPUArraysCoreExtensions, - :AMDGPUExtensions, - :CUDAExtensions, - :MetalExtensions, - :RankFactorization, -] - include("lib/$(lib)/src/$(lib).jl") - @eval using .$lib: $lib + :BackendSelection, + :Expose, + :GPUArraysCoreExtensions, + :AMDGPUExtensions, + :CUDAExtensions, + :MetalExtensions, + :RankFactorization, + ] + include("lib/$(lib)/src/$(lib).jl") + @eval using .$lib: $lib end # TODO: This is defined for backwards compatibility, # delete this alias once downstream packages change over @@ -48,57 +48,57 @@ using .GPUArraysCoreExtensions: cpu using .MetalExtensions: mtl import Base: - # Types - AbstractFloat, - Array, - CartesianIndex, - Complex, - IndexStyle, - Tuple, - # Symbols - +, - -, - *, - /, - # Methods - checkbounds, - complex, - convert, - conj, - copy, - copyto!, - eachindex, - eltype, - empty, - fill, - fill!, - getindex, - hash, - imag, - isempty, - isless, - iterate, - length, - map, - permutedims, - permutedims!, - print, - promote_rule, - randn, - real, - reshape, - setindex, - setindex!, - show, - size, - stride, - strides, - summary, - to_indices, - unsafe_convert, - view, - zero, - zeros + # Types + AbstractFloat, + Array, + CartesianIndex, + Complex, + IndexStyle, + Tuple, + # Symbols + +, + -, + *, + /, + # Methods + checkbounds, + complex, + convert, + conj, + copy, + copyto!, + eachindex, + eltype, + empty, + fill, + fill!, + getindex, + hash, + imag, + isempty, + isless, + iterate, + length, + map, + permutedims, + permutedims!, + print, + promote_rule, + randn, + real, + reshape, + setindex, + setindex!, + show, + size, + stride, + strides, + summary, + to_indices, + unsafe_convert, + view, + zero, + zeros import Base.Broadcast: Broadcasted, BroadcastStyle diff --git a/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl b/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl index 03c28c613e..a3ba228b51 100644 --- a/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl +++ b/NDTensors/src/lib/AMDGPUExtensions/src/roc.jl @@ -10,5 +10,5 @@ function roc end struct ROCArrayAdaptor{B} end function TypeParameterAccessors.position(::Type{<:ROCArrayAdaptor}, ::typeof(storagemode)) - return Position(1) + return Position(1) end diff --git a/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl b/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl index da274f21da..551fdab59f 100644 --- a/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl +++ b/NDTensors/src/lib/AMDGPUExtensions/test/runtests.jl @@ -3,7 +3,7 @@ using Test: @testset, @test using NDTensors.AMDGPUExtensions: roc, ROCArrayAdaptor using NDTensors.GPUArraysCoreExtensions: storagemode @testset "roc and ROCArrayAdaptor" begin - @test roc isa Function - @test storagemode(ROCArrayAdaptor{1}) == 1 + @test roc isa Function + @test storagemode(ROCArrayAdaptor{1}) == 1 end end diff --git a/NDTensors/src/lib/BackendSelection/src/backend_types.jl b/NDTensors/src/lib/BackendSelection/src/backend_types.jl index ed4002f339..6048f599ea 100644 --- a/NDTensors/src/lib/BackendSelection/src/backend_types.jl +++ b/NDTensors/src/lib/BackendSelection/src/backend_types.jl @@ -1,37 +1,37 @@ for type in (:Algorithm, :Backend) - @eval begin - """ - $($type) - - A type representing a backend for a function. - - For example, a function might have multiple backends - implementations, which internally are selected with a `$($type)` type. - - This allows users to extend functionality with a new implementation but - use the same interface. - """ - struct $type{Back,Kwargs<:NamedTuple} <: AbstractBackend - kwargs::Kwargs - end - - $type{Back}(kwargs::NamedTuple) where {Back} = $type{Back,typeof(kwargs)}(kwargs) - $type{Back}(; kwargs...) where {Back} = $type{Back}(NamedTuple(kwargs)) - $type(s; kwargs...) = $type{Symbol(s)}(NamedTuple(kwargs)) - - $type(backend::$type) = backend - - # TODO: Use `SetParameters`. - backend_string(::$type{Back}) where {Back} = string(Back) - parameters(backend::$type) = getfield(backend, :kwargs) - - function Base.show(io::IO, backend::$type) - return print(io, "$($type) type ", backend_string(backend), ", ", parameters(backend)) + @eval begin + """ + $($type) + + A type representing a backend for a function. + + For example, a function might have multiple backends + implementations, which internally are selected with a `$($type)` type. + + This allows users to extend functionality with a new implementation but + use the same interface. + """ + struct $type{Back, Kwargs <: NamedTuple} <: AbstractBackend + kwargs::Kwargs + end + + $type{Back}(kwargs::NamedTuple) where {Back} = $type{Back, typeof(kwargs)}(kwargs) + $type{Back}(; kwargs...) where {Back} = $type{Back}(NamedTuple(kwargs)) + $type(s; kwargs...) = $type{Symbol(s)}(NamedTuple(kwargs)) + + $type(backend::$type) = backend + + # TODO: Use `SetParameters`. + backend_string(::$type{Back}) where {Back} = string(Back) + parameters(backend::$type) = getfield(backend, :kwargs) + + function Base.show(io::IO, backend::$type) + return print(io, "$($type) type ", backend_string(backend), ", ", parameters(backend)) + end + Base.print(io::IO, backend::$type) = print( + io, backend_string(backend), ", ", parameters(backend) + ) end - Base.print(io::IO, backend::$type) = print( - io, backend_string(backend), ", ", parameters(backend) - ) - end end # TODO: See if these can be moved inside of `@eval`. @@ -43,7 +43,7 @@ adding methods to a function that supports multiple algorithm backends. """ macro Algorithm_str(s) - return :(Algorithm{$(Expr(:quote, Symbol(s)))}) + return :(Algorithm{$(Expr(:quote, Symbol(s)))}) end """ @@ -54,5 +54,5 @@ adding methods to a function that supports multiple backends. """ macro Backend_str(s) - return :(Backend{$(Expr(:quote, Symbol(s)))}) + return :(Backend{$(Expr(:quote, Symbol(s)))}) end diff --git a/NDTensors/src/lib/BackendSelection/test/runtests.jl b/NDTensors/src/lib/BackendSelection/test/runtests.jl index 5abde35cbd..55456de965 100644 --- a/NDTensors/src/lib/BackendSelection/test/runtests.jl +++ b/NDTensors/src/lib/BackendSelection/test/runtests.jl @@ -2,28 +2,28 @@ using Test: @test, @testset using NDTensors: NDTensors using NDTensors.BackendSelection: - BackendSelection, Algorithm, Backend, @Algorithm_str, @Backend_str + BackendSelection, Algorithm, Backend, @Algorithm_str, @Backend_str # TODO: This is defined for backwards compatibility, # delete this alias once downstream packages change over # to using `BackendSelection`. using NDTensors.AlgorithmSelection: AlgorithmSelection @testset "BackendSelection" begin - # TODO: This is defined for backwards compatibility, - # delete this alias once downstream packages change over - # to using `BackendSelection`. - @test AlgorithmSelection === BackendSelection - for type in (Algorithm, Backend) - @testset "$type" begin - @test type("backend") isa type{:backend} - @test type(:backend) isa type{:backend} - backend = type("backend"; x=2, y=3) - @test backend isa type{:backend} - @test BackendSelection.parameters(backend) === (; x=2, y=3) + # TODO: This is defined for backwards compatibility, + # delete this alias once downstream packages change over + # to using `BackendSelection`. + @test AlgorithmSelection === BackendSelection + for type in (Algorithm, Backend) + @testset "$type" begin + @test type("backend") isa type{:backend} + @test type(:backend) isa type{:backend} + backend = type("backend"; x = 2, y = 3) + @test backend isa type{:backend} + @test BackendSelection.parameters(backend) === (; x = 2, y = 3) + end end - end - # Macro syntax. - @test Algorithm"backend"(; x=2, y=3) === Algorithm("backend"; x=2, y=3) - @test Backend"backend"(; x=2, y=3) === Backend("backend"; x=2, y=3) - @test isnothing(show(Algorithm(""))) + # Macro syntax. + @test Algorithm"backend"(; x = 2, y = 3) === Algorithm("backend"; x = 2, y = 3) + @test Backend"backend"(; x = 2, y = 3) === Backend("backend"; x = 2, y = 3) + @test isnothing(show(Algorithm(""))) end end diff --git a/NDTensors/src/lib/CUDAExtensions/src/cuda.jl b/NDTensors/src/lib/CUDAExtensions/src/cuda.jl index 948f0b4b17..acaabcea57 100644 --- a/NDTensors/src/lib/CUDAExtensions/src/cuda.jl +++ b/NDTensors/src/lib/CUDAExtensions/src/cuda.jl @@ -10,5 +10,5 @@ function cu end struct CuArrayAdaptor{B} end function TypeParameterAccessors.position(::Type{<:CuArrayAdaptor}, ::typeof(storagemode)) - return Position(1) + return Position(1) end diff --git a/NDTensors/src/lib/CUDAExtensions/test/runtests.jl b/NDTensors/src/lib/CUDAExtensions/test/runtests.jl index 9c736ea59f..0b1cb281e4 100644 --- a/NDTensors/src/lib/CUDAExtensions/test/runtests.jl +++ b/NDTensors/src/lib/CUDAExtensions/test/runtests.jl @@ -3,7 +3,7 @@ using Test: @testset, @test using NDTensors.CUDAExtensions: cu, CuArrayAdaptor using NDTensors.GPUArraysCoreExtensions: storagemode @testset "cu function exists" begin - @test cu isa Function - @test storagemode(CuArrayAdaptor{1}) == 1 + @test cu isa Function + @test storagemode(CuArrayAdaptor{1}) == 1 end end diff --git a/NDTensors/src/lib/Expose/src/exposed.jl b/NDTensors/src/lib/Expose/src/exposed.jl index b57f19e34c..64030fc64d 100644 --- a/NDTensors/src/lib/Expose/src/exposed.jl +++ b/NDTensors/src/lib/Expose/src/exposed.jl @@ -1,17 +1,17 @@ using TypeParameterAccessors: - TypeParameterAccessors, unwrap_array_type, parenttype, type_parameters -struct Exposed{Unwrapped,Object} - object::Object + TypeParameterAccessors, unwrap_array_type, parenttype, type_parameters +struct Exposed{Unwrapped, Object} + object::Object end -expose(object) = Exposed{unwrap_array_type(object),typeof(object)}(object) +expose(object) = Exposed{unwrap_array_type(object), typeof(object)}(object) unexpose(E::Exposed) = E.object ## TODO remove TypeParameterAccessors when SetParameters is removed TypeParameterAccessors.parenttype(type::Type{<:Exposed}) = type_parameters(type, parenttype) function TypeParameterAccessors.position(::Type{<:Exposed}, ::typeof(parenttype)) - return TypeParameterAccessors.Position(1) + return TypeParameterAccessors.Position(1) end TypeParameterAccessors.unwrap_array_type(type::Type{<:Exposed}) = parenttype(type) TypeParameterAccessors.unwrap_array_type(E::Exposed) = unwrap_array_type(typeof(E)) diff --git a/NDTensors/src/lib/Expose/src/functions/abstractarray.jl b/NDTensors/src/lib/Expose/src/functions/abstractarray.jl index ca6e573c56..639f7af3ac 100644 --- a/NDTensors/src/lib/Expose/src/functions/abstractarray.jl +++ b/NDTensors/src/lib/Expose/src/functions/abstractarray.jl @@ -6,14 +6,14 @@ adjoint(E::Exposed) = adjoint(unexpose(E)) getindex(E::Exposed) = unexpose(E)[] function setindex!(E::Exposed, x::Number) - unexpose(E)[] = x - return unexpose(E) + unexpose(E)[] = x + return unexpose(E) end getindex(E::Exposed, I...) = unexpose(E)[I...] function copy(E::Exposed) - return copy(unexpose(E)) + return copy(unexpose(E)) end any(f, E::Exposed) = any(f, unexpose(E)) diff --git a/NDTensors/src/lib/Expose/src/functions/adapt.jl b/NDTensors/src/lib/Expose/src/functions/adapt.jl index 6ebc8bf7d6..b24a597ad8 100644 --- a/NDTensors/src/lib/Expose/src/functions/adapt.jl +++ b/NDTensors/src/lib/Expose/src/functions/adapt.jl @@ -3,6 +3,6 @@ Adapt.adapt_structure(to, x::Exposed) = adapt_structure(to, unexpose(x)) # https://github.com/JuliaGPU/Adapt.jl/pull/51 # TODO: Remove once https://github.com/JuliaGPU/Adapt.jl/issues/71 is addressed. -function Adapt.adapt_structure(to, A::Exposed{<:Any,<:Hermitian}) - return Hermitian(adapt(to, parent(unexpose(A))), Symbol(unexpose(A).uplo)) +function Adapt.adapt_structure(to, A::Exposed{<:Any, <:Hermitian}) + return Hermitian(adapt(to, parent(unexpose(A))), Symbol(unexpose(A).uplo)) end diff --git a/NDTensors/src/lib/Expose/src/functions/append.jl b/NDTensors/src/lib/Expose/src/functions/append.jl index a72e895bed..040226a184 100644 --- a/NDTensors/src/lib/Expose/src/functions/append.jl +++ b/NDTensors/src/lib/Expose/src/functions/append.jl @@ -1,3 +1,3 @@ function Base.append!(Ecollection::Exposed, collections...) - return append!(unexpose(Ecollection), collections...) + return append!(unexpose(Ecollection), collections...) end diff --git a/NDTensors/src/lib/Expose/src/functions/copyto.jl b/NDTensors/src/lib/Expose/src/functions/copyto.jl index c2ffd3c29a..b0b6ff3113 100644 --- a/NDTensors/src/lib/Expose/src/functions/copyto.jl +++ b/NDTensors/src/lib/Expose/src/functions/copyto.jl @@ -1,4 +1,4 @@ function copyto!(R::Exposed, T::Exposed) - copyto!(unexpose(R), unexpose(T)) - return unexpose(R) + copyto!(unexpose(R), unexpose(T)) + return unexpose(R) end diff --git a/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl b/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl index 4dfa65f43b..6ed285c196 100644 --- a/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl +++ b/NDTensors/src/lib/Expose/src/functions/linearalgebra.jl @@ -1,5 +1,5 @@ function qr(E::Exposed) - return qr(unexpose(E)) + return qr(unexpose(E)) end ## These functions do not exist in `LinearAlgebra` but were defined ## in NDTensors. Because Expose is imported before NDTensors, @@ -8,22 +8,22 @@ end ## I have done the same thing for the function cpu ## Expose.qr_positive function qr_positive(E::Exposed) - return qr_positive(unexpose(E)) + return qr_positive(unexpose(E)) end ## Expose.ql function ql(E::Exposed) - return ql(unexpose(E)) + return ql(unexpose(E)) end ## Expose.ql_positive function ql_positive(E::Exposed) - return ql_positive(unexpose(E)) + return ql_positive(unexpose(E)) end function LinearAlgebra.eigen(E::Exposed) - return eigen(unexpose(E)) + return eigen(unexpose(E)) end function svd(E::Exposed; kwargs...) - return svd(unexpose(E); kwargs...) + return svd(unexpose(E); kwargs...) end diff --git a/NDTensors/src/lib/Expose/src/functions/mul.jl b/NDTensors/src/lib/Expose/src/functions/mul.jl index c858df7685..e246fce3b0 100644 --- a/NDTensors/src/lib/Expose/src/functions/mul.jl +++ b/NDTensors/src/lib/Expose/src/functions/mul.jl @@ -1,4 +1,4 @@ function mul!(CM::Exposed, AM::Exposed, BM::Exposed, α, β) - mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) - return unexpose(CM) + mul!(unexpose(CM), unexpose(AM), unexpose(BM), α, β) + return unexpose(CM) end diff --git a/NDTensors/src/lib/Expose/src/functions/permutedims.jl b/NDTensors/src/lib/Expose/src/functions/permutedims.jl index a4c78eec49..5222778b5d 100644 --- a/NDTensors/src/lib/Expose/src/functions/permutedims.jl +++ b/NDTensors/src/lib/Expose/src/functions/permutedims.jl @@ -1,13 +1,13 @@ function permutedims(E::Exposed, perm) - return permutedims(unexpose(E), perm) + return permutedims(unexpose(E), perm) end function permutedims!(Edest::Exposed, Esrc::Exposed, perm) - permutedims!(unexpose(Edest), unexpose(Esrc), perm) - return unexpose(Edest) + permutedims!(unexpose(Edest), unexpose(Esrc), perm) + return unexpose(Edest) end function permutedims!(Edest::Exposed, Esrc::Exposed, perm, f) - unexpose(Edest) .= f.(unexpose(Edest), permutedims(Esrc, perm)) - return unexpose(Edest) + unexpose(Edest) .= f.(unexpose(Edest), permutedims(Esrc, perm)) + return unexpose(Edest) end diff --git a/NDTensors/src/lib/Expose/src/import.jl b/NDTensors/src/lib/Expose/src/import.jl index 4a1f789ad8..08c767e952 100644 --- a/NDTensors/src/lib/Expose/src/import.jl +++ b/NDTensors/src/lib/Expose/src/import.jl @@ -1,14 +1,14 @@ import Base: - adjoint, - permutedims, - permutedims!, - copy, - copyto!, - parent, - print_array, - transpose, - getindex, - setindex!, - any + adjoint, + permutedims, + permutedims!, + copy, + copyto!, + parent, + print_array, + transpose, + getindex, + setindex!, + any import LinearAlgebra: mul!, qr, svd diff --git a/NDTensors/src/lib/Expose/test/runtests.jl b/NDTensors/src/lib/Expose/test/runtests.jl index 11a6f64639..9b65ed93cf 100644 --- a/NDTensors/src/lib/Expose/test/runtests.jl +++ b/NDTensors/src/lib/Expose/test/runtests.jl @@ -1,17 +1,17 @@ @eval module $(gensym()) using GPUArraysCore: @allowscalar using LinearAlgebra: - LinearAlgebra, - Adjoint, - Diagonal, - Hermitian, - Symmetric, - Transpose, - eigen, - mul!, - norm, - qr, - svd + LinearAlgebra, + Adjoint, + Diagonal, + Hermitian, + Symmetric, + Transpose, + eigen, + mul!, + norm, + qr, + svd using NDTensors: NDTensors, mul!! using NDTensors.Expose: Expose, Exposed, expose using NDTensors.GPUArraysCoreExtensions: cpu @@ -21,249 +21,249 @@ include(joinpath(pkgdir(NDTensors), "test", "NDTensorsTestUtils", "NDTensorsTest using .NDTensorsTestUtils: devices_list @testset "Testing Expose $dev, $elt" for dev in devices_list(ARGS), - elt in (Float32, ComplexF32) - - rng = StableRNG(1234) - v = dev(randn(rng, elt, 10)) - vt = transpose(v) - va = v' - - E = expose(v) - @test any(>(0) ∘ real, E) - - Et = expose(vt) - Ea = expose(va) - v_type = typeof(v) - e_type = eltype(v) - @test typeof(E) == Exposed{v_type,v_type} - @test typeof(Et) == Exposed{v_type,Transpose{e_type,v_type}} - @test typeof(Ea) == Exposed{v_type,Adjoint{e_type,v_type}} - - @test parent(E) == v - @test parent(Et) == v - @test parent(Ea) == v - @test transpose(E) == vt - @test cpu(E) == cpu(v) - @test cpu(Et) == cpu(vt) - - m = reshape(v, (5, 2)) - mt = transpose(m) - ma = m' - E = expose(m) - Et = expose(mt) - Ea = expose(ma) - - m_type = typeof(m) - @test typeof(E) == Exposed{m_type,m_type} - @test typeof(Et) == Exposed{m_type,Transpose{e_type,m_type}} - @test typeof(Ea) == Exposed{m_type,Adjoint{e_type,m_type}} - - o = dev(randn(elt, 1)) - expose(o)[] = 2 - @test expose(o)[] == 2 - - fill!(m, zero(elt)) - @test any(!Base.isinf, expose(m)) - - mp = copy(Ea) - @test mp == ma - fill!(ma, elt(2)) - copyto!(expose(mp), expose(ma)) - @test mp == ma - - q, r = qr(expose(mp)) - @test q * r ≈ mp - - q, r = Expose.qr_positive(expose(mp)) - @test q * r ≈ mp - - square = dev(rand(real(elt), (10, 10))) - square = (square + transpose(square)) / 2 - ## CUDA only supports Hermitian or Symmetric eigen decompositions - ## So I symmetrize square and call symetric here - l, U = eigen(expose(Symmetric(square))) - @test eltype(l) == real(elt) - @test eltype(U) == real(elt) - @test square * U ≈ U * Diagonal(l) - - square = dev(rand(elt, (10, 10))) - # Can use `hermitianpart` in Julia 1.10 - square = (square + square') / 2 - ## CUDA only supports Hermitian or Symmetric eigen decompositions - ## So I symmetrize square and call symetric here - l, U = eigen(expose(Hermitian(square))) - @test eltype(l) == real(elt) - @test eltype(U) == elt - @test square * U ≈ U * Diagonal(l) - - U, S, V, = svd(expose(mp)) - @test eltype(U) == elt - @test eltype(S) == real(elt) - @test eltype(V) == elt - @test U * Diagonal(S) * V' ≈ mp - - cm = dev(randn(elt, 2, 2)) - mul!(expose(cm), expose(mp), expose(mp'), 1.0, 0.0) - @test cm ≈ mp * mp' - - @test permutedims(expose(mp), (2, 1)) == transpose(mp) - fill!(mt, 3) - permutedims!(expose(m), expose(mt), (2, 1)) - @test norm(m) ≈ sqrt(3^2 * 10) - @test size(m) == (5, 2) - permutedims!(expose(m), expose(mt), (2, 1), +) - @test size(m) == (5, 2) - @test norm(m) ≈ sqrt(6^2 * 10) - - m = reshape(m, (5, 2, 1)) - mt = fill!(similar(m), elt(3)) - m = permutedims(expose(m), (2, 1, 3)) - @test size(m) == (2, 5, 1) - permutedims!(expose(m), expose(mt), (2, 1, 3)) - @test norm(m) ≈ sqrt(3^2 * 10) - permutedims!(expose(m), expose(mt), (2, 1, 3), -) - @test norm(m) == 0 - - x = dev(rand(elt, 4, 4)) - y = dev(rand(elt, 4, 4)) - copyto!(expose(y), expose(x)) - @test y == x - - y = dev(rand(elt, 4, 4)) - x = Base.ReshapedArray(dev(rand(elt, 16)), (4, 4), ()) - copyto!(expose(y), expose(x)) - @test cpu(y) == cpu(x) - @test cpu(copy(expose(x))) == cpu(x) - - ## Tests for Metal because permutedims with ReshapedArray does not work properly - ## transpose(ReshapedArray(MtlArray)) fails with scalar indexing so calling copy to - ## evaluate tests in the following tests - y = dev(rand(elt, 4, 4)) - @test permutedims(expose(y), (2, 1)) == transpose(y) - y = Base.ReshapedArray(y, (2, 8), ()) - @test permutedims(expose(y), (2, 1)) == transpose(copy(expose(y))) - yt = dev(rand(elt, (8, 2))) - permutedims!(expose(y), expose(yt), (2, 1)) - @test copy(expose(y)) == transpose(yt) - yt = dev(rand(elt, 8, 2)) - permutedims!(expose(yt), expose(y), (2, 1)) - @test copy(expose(y)) == transpose(yt) - - y = reshape(dev(randn(elt, 8))', 2, 4) - x = Base.ReshapedArray(dev(randn(elt, 8, 8)'[1:8]), (2, 4), ()) - z = dev(fill!(Matrix{elt}(undef, (2, 4)), 0.0)) - for i in 1:2 - for j in 1:4 - @allowscalar z[i, j] = y[i, j] * x[i, j] + elt in (Float32, ComplexF32) + + rng = StableRNG(1234) + v = dev(randn(rng, elt, 10)) + vt = transpose(v) + va = v' + + E = expose(v) + @test any(>(0) ∘ real, E) + + Et = expose(vt) + Ea = expose(va) + v_type = typeof(v) + e_type = eltype(v) + @test typeof(E) == Exposed{v_type, v_type} + @test typeof(Et) == Exposed{v_type, Transpose{e_type, v_type}} + @test typeof(Ea) == Exposed{v_type, Adjoint{e_type, v_type}} + + @test parent(E) == v + @test parent(Et) == v + @test parent(Ea) == v + @test transpose(E) == vt + @test cpu(E) == cpu(v) + @test cpu(Et) == cpu(vt) + + m = reshape(v, (5, 2)) + mt = transpose(m) + ma = m' + E = expose(m) + Et = expose(mt) + Ea = expose(ma) + + m_type = typeof(m) + @test typeof(E) == Exposed{m_type, m_type} + @test typeof(Et) == Exposed{m_type, Transpose{e_type, m_type}} + @test typeof(Ea) == Exposed{m_type, Adjoint{e_type, m_type}} + + o = dev(randn(elt, 1)) + expose(o)[] = 2 + @test expose(o)[] == 2 + + fill!(m, zero(elt)) + @test any(!Base.isinf, expose(m)) + + mp = copy(Ea) + @test mp == ma + fill!(ma, elt(2)) + copyto!(expose(mp), expose(ma)) + @test mp == ma + + q, r = qr(expose(mp)) + @test q * r ≈ mp + + q, r = Expose.qr_positive(expose(mp)) + @test q * r ≈ mp + + square = dev(rand(real(elt), (10, 10))) + square = (square + transpose(square)) / 2 + ## CUDA only supports Hermitian or Symmetric eigen decompositions + ## So I symmetrize square and call symetric here + l, U = eigen(expose(Symmetric(square))) + @test eltype(l) == real(elt) + @test eltype(U) == real(elt) + @test square * U ≈ U * Diagonal(l) + + square = dev(rand(elt, (10, 10))) + # Can use `hermitianpart` in Julia 1.10 + square = (square + square') / 2 + ## CUDA only supports Hermitian or Symmetric eigen decompositions + ## So I symmetrize square and call symetric here + l, U = eigen(expose(Hermitian(square))) + @test eltype(l) == real(elt) + @test eltype(U) == elt + @test square * U ≈ U * Diagonal(l) + + U, S, V, = svd(expose(mp)) + @test eltype(U) == elt + @test eltype(S) == real(elt) + @test eltype(V) == elt + @test U * Diagonal(S) * V' ≈ mp + + cm = dev(randn(elt, 2, 2)) + mul!(expose(cm), expose(mp), expose(mp'), 1.0, 0.0) + @test cm ≈ mp * mp' + + @test permutedims(expose(mp), (2, 1)) == transpose(mp) + fill!(mt, 3) + permutedims!(expose(m), expose(mt), (2, 1)) + @test norm(m) ≈ sqrt(3^2 * 10) + @test size(m) == (5, 2) + permutedims!(expose(m), expose(mt), (2, 1), +) + @test size(m) == (5, 2) + @test norm(m) ≈ sqrt(6^2 * 10) + + m = reshape(m, (5, 2, 1)) + mt = fill!(similar(m), elt(3)) + m = permutedims(expose(m), (2, 1, 3)) + @test size(m) == (2, 5, 1) + permutedims!(expose(m), expose(mt), (2, 1, 3)) + @test norm(m) ≈ sqrt(3^2 * 10) + permutedims!(expose(m), expose(mt), (2, 1, 3), -) + @test norm(m) == 0 + + x = dev(rand(elt, 4, 4)) + y = dev(rand(elt, 4, 4)) + copyto!(expose(y), expose(x)) + @test y == x + + y = dev(rand(elt, 4, 4)) + x = Base.ReshapedArray(dev(rand(elt, 16)), (4, 4), ()) + copyto!(expose(y), expose(x)) + @test cpu(y) == cpu(x) + @test cpu(copy(expose(x))) == cpu(x) + + ## Tests for Metal because permutedims with ReshapedArray does not work properly + ## transpose(ReshapedArray(MtlArray)) fails with scalar indexing so calling copy to + ## evaluate tests in the following tests + y = dev(rand(elt, 4, 4)) + @test permutedims(expose(y), (2, 1)) == transpose(y) + y = Base.ReshapedArray(y, (2, 8), ()) + @test permutedims(expose(y), (2, 1)) == transpose(copy(expose(y))) + yt = dev(rand(elt, (8, 2))) + permutedims!(expose(y), expose(yt), (2, 1)) + @test copy(expose(y)) == transpose(yt) + yt = dev(rand(elt, 8, 2)) + permutedims!(expose(yt), expose(y), (2, 1)) + @test copy(expose(y)) == transpose(yt) + + y = reshape(dev(randn(elt, 8))', 2, 4) + x = Base.ReshapedArray(dev(randn(elt, 8, 8)'[1:8]), (2, 4), ()) + z = dev(fill!(Matrix{elt}(undef, (2, 4)), 0.0)) + for i in 1:2 + for j in 1:4 + @allowscalar z[i, j] = y[i, j] * x[i, j] + end end - end - permutedims!(expose(y), expose(x), (1, 2), *) - @allowscalar @test reshape(z, size(y)) ≈ y - for i in 1:2 - for j in 1:4 - @allowscalar z[i, j] = x[i, j] * y[i, j] + permutedims!(expose(y), expose(x), (1, 2), *) + @allowscalar @test reshape(z, size(y)) ≈ y + for i in 1:2 + for j in 1:4 + @allowscalar z[i, j] = x[i, j] * y[i, j] + end end - end - permutedims!(expose(x), expose(y), (1, 2), *) - ## I copy x here because it is a ReshapedArray{SubArray} which causes `≈` - ## to throw an error - @test z ≈ copy(expose(x)) - - y = dev(rand(elt, 4, 4)) - x = @view dev(rand(elt, 8, 8))[1:4, 1:4] - copyto!(expose(y), expose(x)) - @test y == x - @test copy(x) == x - - y = dev(randn(elt, 16)) - x = reshape(dev(randn(elt, 4, 4))', 16) - copyto!(expose(y), expose(x)) - @allowscalar begin + permutedims!(expose(x), expose(y), (1, 2), *) + ## I copy x here because it is a ReshapedArray{SubArray} which causes `≈` + ## to throw an error + @test z ≈ copy(expose(x)) + + y = dev(rand(elt, 4, 4)) + x = @view dev(rand(elt, 8, 8))[1:4, 1:4] + copyto!(expose(y), expose(x)) @test y == x @test copy(x) == x - end - y = dev(randn(elt, 8)) - x = @view reshape(dev(randn(elt, 8, 8))', 64)[1:8] - copyto!(expose(y), expose(x)) - @allowscalar begin - @test y == x - ## temporarily use expose copy because this is broken in Metal 1.1 - @test copy(expose(x)) == x - end - - y = Base.ReshapedArray(dev(randn(elt, 16)), (4, 4), ()) - x = dev(randn(elt, 4, 4)) - permutedims!(expose(y), expose(x), (2, 1)) - @test cpu(y) == transpose(cpu(x)) - - ########################################## - ### Testing an issue with CUDA&Metal transpose/adjoint mul - A = dev(randn(elt, (3, 2))) - B = dev(randn(elt, (3, 4))) - C = dev(randn(elt, (4, 2))) - Cp = copy(C) - - ## This fails with scalar indexing - if dev != cpu - @test_broken mul!(transpose(C), transpose(A), B, true, false) - end - mul!(C, transpose(B), A, true, false) - mul!(expose(transpose(Cp)), expose(transpose(A)), expose(B), true, false) - @test C ≈ Cp - Cp = zero(C) - ## Try calling mul!! with transposes to verify that code works - Cpt = mul!!(transpose(Cp), transpose(A), B, true, false) - @test transpose(Cpt) ≈ C - - Cp = zero(C) - ## This fails with scalar indexing - if dev != cpu - @test_broken mul!(C', A', B, true, false) - end - mul!(C, B', A, true, false) - mul!(expose(Cp'), expose(A'), expose(B), true, false) - @test C ≈ Cp - Cp = zero(C) - Cpt = mul!!(Cp', A', B, true, false) - @test Cpt' ≈ C - - ################################## - ### Add test for transpose(reshape(adjoint )) failure in CUDA - - A = dev(transpose(reshape(randn(elt, 2, 12)', (12, 2)))) - B = dev(randn(elt, 2, 2)) - C = dev(zeros(elt, 2, 12)) - mul!(expose(C), expose(B), expose(A), true, false) - Cp = cpu(similar(C)) - mul!(expose(Cp), expose(cpu(B)), expose(cpu(A)), true, false) - @test cpu(C) ≈ Cp - zero(C) - mul!!(C, B, A, true, false) - @test cpu(C) ≈ Cp - - ################################## - ### Add test for append! to address scalar indexing in GPUs - ## For now, Metal doesn't have a `resize!` function so all the tests are failing - if (dev == NDTensors.mtl) - continue - end - A = dev(randn(elt, 10)) - Ap = copy(A) - B = randn(elt, 3) - C = append!(expose(A), B) - - @test length(C) == 13 - @test sum(C) ≈ sum(Ap) + sum(B) - - A = Ap - B = dev(randn(elt, 29)) - Bp = copy(B) - C = append!(expose(B), A) - @test length(C) == 39 - @test sum(C) ≈ sum(Bp) + sum(Ap) - @allowscalar for i in 1:length(B) - C[i] == B[i] - end + y = dev(randn(elt, 16)) + x = reshape(dev(randn(elt, 4, 4))', 16) + copyto!(expose(y), expose(x)) + @allowscalar begin + @test y == x + @test copy(x) == x + end + + y = dev(randn(elt, 8)) + x = @view reshape(dev(randn(elt, 8, 8))', 64)[1:8] + copyto!(expose(y), expose(x)) + @allowscalar begin + @test y == x + ## temporarily use expose copy because this is broken in Metal 1.1 + @test copy(expose(x)) == x + end + + y = Base.ReshapedArray(dev(randn(elt, 16)), (4, 4), ()) + x = dev(randn(elt, 4, 4)) + permutedims!(expose(y), expose(x), (2, 1)) + @test cpu(y) == transpose(cpu(x)) + + ########################################## + ### Testing an issue with CUDA&Metal transpose/adjoint mul + A = dev(randn(elt, (3, 2))) + B = dev(randn(elt, (3, 4))) + C = dev(randn(elt, (4, 2))) + Cp = copy(C) + + ## This fails with scalar indexing + if dev != cpu + @test_broken mul!(transpose(C), transpose(A), B, true, false) + end + mul!(C, transpose(B), A, true, false) + mul!(expose(transpose(Cp)), expose(transpose(A)), expose(B), true, false) + @test C ≈ Cp + Cp = zero(C) + ## Try calling mul!! with transposes to verify that code works + Cpt = mul!!(transpose(Cp), transpose(A), B, true, false) + @test transpose(Cpt) ≈ C + + Cp = zero(C) + ## This fails with scalar indexing + if dev != cpu + @test_broken mul!(C', A', B, true, false) + end + mul!(C, B', A, true, false) + mul!(expose(Cp'), expose(A'), expose(B), true, false) + @test C ≈ Cp + Cp = zero(C) + Cpt = mul!!(Cp', A', B, true, false) + @test Cpt' ≈ C + + ################################## + ### Add test for transpose(reshape(adjoint )) failure in CUDA + + A = dev(transpose(reshape(randn(elt, 2, 12)', (12, 2)))) + B = dev(randn(elt, 2, 2)) + C = dev(zeros(elt, 2, 12)) + mul!(expose(C), expose(B), expose(A), true, false) + Cp = cpu(similar(C)) + mul!(expose(Cp), expose(cpu(B)), expose(cpu(A)), true, false) + @test cpu(C) ≈ Cp + zero(C) + mul!!(C, B, A, true, false) + @test cpu(C) ≈ Cp + + ################################## + ### Add test for append! to address scalar indexing in GPUs + ## For now, Metal doesn't have a `resize!` function so all the tests are failing + if (dev == NDTensors.mtl) + continue + end + A = dev(randn(elt, 10)) + Ap = copy(A) + B = randn(elt, 3) + C = append!(expose(A), B) + + @test length(C) == 13 + @test sum(C) ≈ sum(Ap) + sum(B) + + A = Ap + B = dev(randn(elt, 29)) + Bp = copy(B) + C = append!(expose(B), A) + @test length(C) == 39 + @test sum(C) ≈ sum(Bp) + sum(Ap) + @allowscalar for i in 1:length(B) + C[i] == B[i] + end end end diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl index 0042a05dab..e98d6c70fb 100644 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl +++ b/NDTensors/src/lib/GPUArraysCoreExtensions/src/gpuarrayscore.jl @@ -2,14 +2,14 @@ using ..Expose: Exposed, unexpose using TypeParameterAccessors: TypeParameterAccessors, type_parameters, set_type_parameters function storagemode(object) - return storagemode(typeof(object)) + return storagemode(typeof(object)) end function storagemode(type::Type) - return type_parameters(type, storagemode) + return type_parameters(type, storagemode) end function set_storagemode(type::Type, param) - return set_type_parameters(type, storagemode, param) + return set_type_parameters(type, storagemode, param) end function cpu end diff --git a/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl b/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl index 3e53b7f509..3267639190 100644 --- a/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl +++ b/NDTensors/src/lib/GPUArraysCoreExtensions/test/runtests.jl @@ -2,6 +2,6 @@ using Test: @testset, @test using NDTensors.GPUArraysCoreExtensions: storagemode @testset "Test Base" begin - @test storagemode isa Function + @test storagemode isa Function end end diff --git a/NDTensors/src/lib/MetalExtensions/src/metal.jl b/NDTensors/src/lib/MetalExtensions/src/metal.jl index ab329d3249..109c0d0f26 100644 --- a/NDTensors/src/lib/MetalExtensions/src/metal.jl +++ b/NDTensors/src/lib/MetalExtensions/src/metal.jl @@ -11,5 +11,5 @@ function mtl end struct MtlArrayAdaptor{B} end function TypeParameterAccessors.position(::Type{<:MtlArrayAdaptor}, ::typeof(storagemode)) - return Position(1) + return Position(1) end diff --git a/NDTensors/src/lib/MetalExtensions/test/runtests.jl b/NDTensors/src/lib/MetalExtensions/test/runtests.jl index ca3b5cc32c..93baef09ea 100644 --- a/NDTensors/src/lib/MetalExtensions/test/runtests.jl +++ b/NDTensors/src/lib/MetalExtensions/test/runtests.jl @@ -2,6 +2,6 @@ using Test: @testset, @test using NDTensors.MetalExtensions: mtl @testset "mtl function exists" begin - @test mtl isa Function + @test mtl isa Function end end diff --git a/NDTensors/src/linearalgebra/linearalgebra.jl b/NDTensors/src/linearalgebra/linearalgebra.jl index 64135c6f72..f24f02c22e 100644 --- a/NDTensors/src/linearalgebra/linearalgebra.jl +++ b/NDTensors/src/linearalgebra/linearalgebra.jl @@ -12,71 +12,71 @@ import .Expose: qr_positive, ql, ql_positive # ```julia # contract(T1, (1, -1), T2, (-1, 2)) # ``` -function Base.:*(T1::Tensor{<:Any,2,<:Dense}, T2::Tensor{<:Any,2,<:Dense}) - RM = matrix(T1) * matrix(T2) - indsR = (ind(T1, 1), ind(T2, 2)) - return tensor(Dense(vec(RM)), indsR) +function Base.:*(T1::Tensor{<:Any, 2, <:Dense}, T2::Tensor{<:Any, 2, <:Dense}) + RM = matrix(T1) * matrix(T2) + indsR = (ind(T1, 1), ind(T2, 2)) + return tensor(Dense(vec(RM)), indsR) end function LinearAlgebra.dot(x::Tensor, y::Tensor) - size(x) == size(y) || throw( - DimensionMismatch( - "dimensions must match in `dot(x::Tensor, y::Tensor)`: `x` has size `$(size(x))` while `y` has size `$(size(y))`.", - ), - ) - labels = ntuple(dim -> -dim, ndims(x)) - return contract(conj(x), labels, y, labels)[] + size(x) == size(y) || throw( + DimensionMismatch( + "dimensions must match in `dot(x::Tensor, y::Tensor)`: `x` has size `$(size(x))` while `y` has size `$(size(y))`.", + ), + ) + labels = ntuple(dim -> -dim, ndims(x)) + return contract(conj(x), labels, y, labels)[] end -function LinearAlgebra.exp(T::DenseTensor{ElT,2}) where {ElT<:Union{Real,Complex}} - expTM = exp(matrix(T)) - return tensor(Dense(vec(expTM)), inds(T)) +function LinearAlgebra.exp(T::DenseTensor{ElT, 2}) where {ElT <: Union{Real, Complex}} + expTM = exp(matrix(T)) + return tensor(Dense(vec(expTM)), inds(T)) end function LinearAlgebra.exp( - T::Hermitian{ElT,<:DenseTensor{ElT,2}} -) where {ElT<:Union{Real,Complex}} - # exp(::Hermitian/Symmetric) returns Hermitian/Symmetric, - # so extract the parent matrix - expTM = parent(exp(matrix(T))) - return tensor(Dense(vec(expTM)), inds(T)) + T::Hermitian{ElT, <:DenseTensor{ElT, 2}} + ) where {ElT <: Union{Real, Complex}} + # exp(::Hermitian/Symmetric) returns Hermitian/Symmetric, + # so extract the parent matrix + expTM = parent(exp(matrix(T))) + return tensor(Dense(vec(expTM)), inds(T)) end function svd_catch_error(A; kwargs...) - USV = try - svd(expose(A); kwargs...) - catch - return nothing - end - return USV + USV = try + svd(expose(A); kwargs...) + catch + return nothing + end + return USV end function lapack_svd_error_message(alg) - return "The SVD algorithm `\"$alg\"` has thrown an error,\n" * - "likely because of a convergance failure. You can try\n" * - "other SVD algorithms that may converge better using the\n" * - "`alg` (or `svd_alg` if called through `factorize` or MPS/MPO functionality) keyword argument:\n\n" * - " - \"divide_and_conquer\" is a divide-and-conquer algorithm\n" * - " (LAPACK's `gesdd`). It is fast, but may lead to some innacurate\n" * - " singular values for very ill-conditioned matrices.\n" * - " It also may sometimes fail to converge, leading to errors\n" * - " (in which case `\"qr_iteration\"` or `\"recursive\"` can be tried).\n\n" * - " - `\"qr_iteration\"` (LAPACK's `gesvd`) is typically slower \n" * - " than \"divide_and_conquer\", especially for large matrices,\n" * - " but is more accurate for very ill-conditioned matrices \n" * - " compared to `\"divide_and_conquer\"`.\n\n" * - " - `\"recursive\"` is ITensor's custom SVD algorithm. It is very\n" * - " reliable, but may be slow if high precision is needed.\n" * - " To get an `svd` of a matrix `A`, an eigendecomposition of\n" * - " ``A^{\\dagger} A`` is used to compute `U` and then a `qr` of\n" * - " ``A^{\\dagger} U`` is used to compute `V`. This is performed\n" * - " recursively to compute small singular values.\n" * - " - `\"qr_algorithm\"` is a CUDA.jl implemented SVD algorithm using QR.\n" * - " - `\"jacobi_algorithm\"` is a CUDA.jl implemented SVD algorithm.\n\n" * - "Returning `nothing`. For an output `F = svd(A, ...)` you can check if\n" * - "`isnothing(F)` in your code and try a different algorithm.\n\n" * - "To suppress this message in the future, you can wrap the `svd` call in the\n" * - "`@suppress` macro from the `Suppressor` package.\n" + return "The SVD algorithm `\"$alg\"` has thrown an error,\n" * + "likely because of a convergance failure. You can try\n" * + "other SVD algorithms that may converge better using the\n" * + "`alg` (or `svd_alg` if called through `factorize` or MPS/MPO functionality) keyword argument:\n\n" * + " - \"divide_and_conquer\" is a divide-and-conquer algorithm\n" * + " (LAPACK's `gesdd`). It is fast, but may lead to some innacurate\n" * + " singular values for very ill-conditioned matrices.\n" * + " It also may sometimes fail to converge, leading to errors\n" * + " (in which case `\"qr_iteration\"` or `\"recursive\"` can be tried).\n\n" * + " - `\"qr_iteration\"` (LAPACK's `gesvd`) is typically slower \n" * + " than \"divide_and_conquer\", especially for large matrices,\n" * + " but is more accurate for very ill-conditioned matrices \n" * + " compared to `\"divide_and_conquer\"`.\n\n" * + " - `\"recursive\"` is ITensor's custom SVD algorithm. It is very\n" * + " reliable, but may be slow if high precision is needed.\n" * + " To get an `svd` of a matrix `A`, an eigendecomposition of\n" * + " ``A^{\\dagger} A`` is used to compute `U` and then a `qr` of\n" * + " ``A^{\\dagger} U`` is used to compute `V`. This is performed\n" * + " recursively to compute small singular values.\n" * + " - `\"qr_algorithm\"` is a CUDA.jl implemented SVD algorithm using QR.\n" * + " - `\"jacobi_algorithm\"` is a CUDA.jl implemented SVD algorithm.\n\n" * + "Returning `nothing`. For an output `F = svd(A, ...)` you can check if\n" * + "`isnothing(F)` in your code and try a different algorithm.\n\n" * + "To suppress this message in the future, you can wrap the `svd` call in the\n" * + "`@suppress` macro from the `Suppressor` package.\n" end """ @@ -85,138 +85,138 @@ end svd of an order-2 DenseTensor """ function svd( - T::DenseTensor{ElT,2,IndsT}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, - alg=nothing, - # Only used by BlockSparse svd - min_blockdim=nothing, -) where {ElT,IndsT} - alg = replace_nothing(alg, default_svd_alg(T)) - if alg == "divide_and_conquer" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.DivideAndConquer()) - if isnothing(MUSV) - # If "divide_and_conquer" fails, try "qr_iteration" - alg = "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) - if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" + T::DenseTensor{ElT, 2, IndsT}; + mindim = nothing, + maxdim = nothing, + cutoff = nothing, + use_absolute_cutoff = nothing, + use_relative_cutoff = nothing, + alg = nothing, + # Only used by BlockSparse svd + min_blockdim = nothing, + ) where {ElT, IndsT} + alg = replace_nothing(alg, default_svd_alg(T)) + if alg == "divide_and_conquer" + MUSV = svd_catch_error(matrix(T); alg = LinearAlgebra.DivideAndConquer()) + if isnothing(MUSV) + # If "divide_and_conquer" fails, try "qr_iteration" + alg = "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg = LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + end + elseif alg == "qr_iteration" + MUSV = svd_catch_error(matrix(T); alg = LinearAlgebra.QRIteration()) + if isnothing(MUSV) + # If "qr_iteration" fails, try "recursive" + alg = "recursive" + MUSV = svd_recursive(matrix(T)) + end + elseif alg == "recursive" MUSV = svd_recursive(matrix(T)) - end + elseif alg == "qr_algorithm" || alg == "jacobi_algorithm" + MUSV = svd_catch_error(matrix(T); alg) + else + error( + "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", + ) end - elseif alg == "qr_iteration" - MUSV = svd_catch_error(matrix(T); alg=LinearAlgebra.QRIteration()) if isnothing(MUSV) - # If "qr_iteration" fails, try "recursive" - alg = "recursive" - MUSV = svd_recursive(matrix(T)) + if any(isnan, expose(T)) + println("SVD failed, the matrix you were trying to SVD contains NaNs.") + else + println(lapack_svd_error_message(alg)) + end + return nothing end - elseif alg == "recursive" - MUSV = svd_recursive(matrix(T)) - elseif alg == "qr_algorithm" || alg == "jacobi_algorithm" - MUSV = svd_catch_error(matrix(T); alg) - else - error( - "svd algorithm $alg is not currently supported. Please see the documentation for currently supported algorithms.", - ) - end - if isnothing(MUSV) - if any(isnan, expose(T)) - println("SVD failed, the matrix you were trying to SVD contains NaNs.") + MU, MS, MV = MUSV + conj!(MV) + #end # @timeit_debug + + P = MS .^ 2 + if any(!isnothing, (maxdim, cutoff)) + P, truncerr, _ = truncate!!( + P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) else - println(lapack_svd_error_message(alg)) + truncerr = 0.0 end - return nothing - end - MU, MS, MV = MUSV - conj!(MV) - #end # @timeit_debug - - P = MS .^ 2 - if any(!isnothing, (maxdim, cutoff)) - P, truncerr, _ = truncate!!( - P; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - else - truncerr = 0.0 - end - spec = Spectrum(P, truncerr) - dS = length(P) - if dS < length(MS) - MU = expose(MU)[:, 1:dS] - # Fails on some GPU backends like Metal. - # resize!(MS, dS) - MS = MS[1:dS] - MV = expose(MV)[:, 1:dS] - end - - # Make the new indices to go onto U and V - u = eltype(IndsT)(dS) - v = eltype(IndsT)(dS) - Uinds = IndsT((ind(T, 1), u)) - Sinds = IndsT((u, v)) - Vinds = IndsT((ind(T, 2), v)) - U = tensor(Dense(vec(MU)), Uinds) - S = tensor(Diag(MS), Sinds) - V = tensor(Dense(vec(MV)), Vinds) - return U, S, V, spec + spec = Spectrum(P, truncerr) + dS = length(P) + if dS < length(MS) + MU = expose(MU)[:, 1:dS] + # Fails on some GPU backends like Metal. + # resize!(MS, dS) + MS = MS[1:dS] + MV = expose(MV)[:, 1:dS] + end + + # Make the new indices to go onto U and V + u = eltype(IndsT)(dS) + v = eltype(IndsT)(dS) + Uinds = IndsT((ind(T, 1), u)) + Sinds = IndsT((u, v)) + Vinds = IndsT((ind(T, 2), v)) + U = tensor(Dense(vec(MU)), Uinds) + S = tensor(Diag(MS), Sinds) + V = tensor(Dense(vec(MV)), Vinds) + return U, S, V, spec end function LinearAlgebra.eigen( - T::Hermitian{ElT,<:DenseTensor{ElT,2,IndsT}}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex},IndsT} - matrixT = matrix(T) - ## TODO Here I am calling parent to ensure that the correct `any` function - ## is envoked for non-cpu matrices - ## TODO use expose here - if any(!isfinite, parent(matrixT)) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - - ### What do we do if DM is full of Nan or Inf? - DM, VM = eigen(expose(matrixT)) - - # Sort by largest to smallest eigenvalues - # TODO: Replace `cpu` with `unwrap_array_type` dispatch. - p = sortperm(cpu(DM); rev=true, by=abs) - DM = DM[p] - VM = VM[:, p] + T::Hermitian{ElT, <:DenseTensor{ElT, 2, IndsT}}; + mindim = nothing, + maxdim = nothing, + cutoff = nothing, + use_absolute_cutoff = nothing, + use_relative_cutoff = nothing, + ) where {ElT <: Union{Real, Complex}, IndsT} + matrixT = matrix(T) + ## TODO Here I am calling parent to ensure that the correct `any` function + ## is envoked for non-cpu matrices + ## TODO use expose here + if any(!isfinite, parent(matrixT)) + throw( + ArgumentError( + "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" + ), + ) + end - if any(!isnothing, (maxdim, cutoff)) - DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - dD = length(DM) - if dD < size(VM, 2) - VM = VM[:, 1:dD] + ### What do we do if DM is full of Nan or Inf? + DM, VM = eigen(expose(matrixT)) + + # Sort by largest to smallest eigenvalues + # TODO: Replace `cpu` with `unwrap_array_type` dispatch. + p = sortperm(cpu(DM); rev = true, by = abs) + DM = DM[p] + VM = VM[:, p] + + if any(!isnothing, (maxdim, cutoff)) + DM, truncerr, _ = truncate!!( + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + dD = length(DM) + if dD < size(VM, 2) + VM = VM[:, 1:dD] + end + else + dD = length(DM) + truncerr = 0.0 end - else - dD = length(DM) - truncerr = 0.0 - end - spec = Spectrum(DM, truncerr) - - # Make the new indices to go onto V - l = eltype(IndsT)(dD) - r = eltype(IndsT)(dD) - Vinds = IndsT((dag(ind(T, 2)), dag(r))) - Dinds = IndsT((l, dag(r))) - V = tensor(Dense(vec(VM)), Vinds) - D = tensor(Diag(DM), Dinds) - return D, V, spec + spec = Spectrum(DM, truncerr) + + # Make the new indices to go onto V + l = eltype(IndsT)(dD) + r = eltype(IndsT)(dD) + Vinds = IndsT((dag(ind(T, 2)), dag(r))) + Dinds = IndsT((l, dag(r))) + V = tensor(Dense(vec(VM)), Vinds) + D = tensor(Diag(DM), Dinds) + return D, V, spec end """ @@ -232,30 +232,30 @@ Sampling is based on https://arxiv.org/abs/math-ph/0609050 such that in the case `n==m`, the unitary matrix will be sampled according to the Haar measure. """ -function random_unitary(::Type{ElT}, n::Int, m::Int) where {ElT<:Number} - return random_unitary(Random.default_rng(), ElT, n, m) +function random_unitary(::Type{ElT}, n::Int, m::Int) where {ElT <: Number} + return random_unitary(Random.default_rng(), ElT, n, m) end function random_unitary(rng::AbstractRNG, DataT::Type{<:AbstractArray}, n::Int, m::Int) - ElT = eltype(DataT) - if n < m - return DataT(random_unitary(rng, ElT, m, n)') - end - F = qr(randn(rng, ElT, n, m)) - Q = DataT(F.Q) - # The upper triangle of F.factors - # are the elements of R. - # Multiply cols of Q by the signs - # that would make diagonal of R - # non-negative: - for c in 1:size(Q, 2) - Q[:, c] .*= sign(F.factors[c, c]) - end - return Q + ElT = eltype(DataT) + if n < m + return DataT(random_unitary(rng, ElT, m, n)') + end + F = qr(randn(rng, ElT, n, m)) + Q = DataT(F.Q) + # The upper triangle of F.factors + # are the elements of R. + # Multiply cols of Q by the signs + # that would make diagonal of R + # non-negative: + for c in 1:size(Q, 2) + Q[:, c] .*= sign(F.factors[c, c]) + end + return Q end -function random_unitary(rng::AbstractRNG, ::Type{ElT}, n::Int, m::Int) where {ElT<:Number} - return random_unitary(rng, set_ndims(default_datatype(ElT), 2), n, m) +function random_unitary(rng::AbstractRNG, ::Type{ElT}, n::Int, m::Int) where {ElT <: Number} + return random_unitary(rng, set_ndims(default_datatype(ElT), 2), n, m) end random_unitary(n::Int, m::Int) = random_unitary(ComplexF64, n, m) @@ -270,99 +270,99 @@ identity, or if m > n O*transpose(O) is the identity. Optionally can pass a real number type as the first argument to obtain a matrix of that type. """ -random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT<:Real} = random_unitary(ElT, n, m) +random_orthog(::Type{ElT}, n::Int, m::Int) where {ElT <: Real} = random_unitary(ElT, n, m) random_orthog(n::Int, m::Int) = random_orthog(Float64, n, m) function LinearAlgebra.eigen( - T::DenseTensor{ElT,2,IndsT}; - mindim=nothing, - maxdim=nothing, - cutoff=nothing, - use_absolute_cutoff=nothing, - use_relative_cutoff=nothing, -) where {ElT<:Union{Real,Complex},IndsT} - matrixT = matrix(T) - if any(!isfinite, matrixT) - throw( - ArgumentError( - "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" - ), - ) - end - - DM, VM = eigen(expose(matrixT)) - - # Sort by largest to smallest eigenvalues - #p = sortperm(DM; rev = true) - #DM = DM[p] - #VM = VM[:,p] + T::DenseTensor{ElT, 2, IndsT}; + mindim = nothing, + maxdim = nothing, + cutoff = nothing, + use_absolute_cutoff = nothing, + use_relative_cutoff = nothing, + ) where {ElT <: Union{Real, Complex}, IndsT} + matrixT = matrix(T) + if any(!isfinite, matrixT) + throw( + ArgumentError( + "Trying to perform the eigendecomposition of a matrix containing NaNs or Infs" + ), + ) + end - if any(!isnothing, (maxdim, cutoff)) - DM, truncerr, _ = truncate!!( - DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff - ) - dD = length(DM) - if dD < size(VM, 2) - VM = VM[:, 1:dD] + DM, VM = eigen(expose(matrixT)) + + # Sort by largest to smallest eigenvalues + #p = sortperm(DM; rev = true) + #DM = DM[p] + #VM = VM[:,p] + + if any(!isnothing, (maxdim, cutoff)) + DM, truncerr, _ = truncate!!( + DM; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff + ) + dD = length(DM) + if dD < size(VM, 2) + VM = VM[:, 1:dD] + end + else + dD = length(DM) + truncerr = 0.0 end - else - dD = length(DM) - truncerr = 0.0 - end - spec = Spectrum(abs.(DM), truncerr) - - i1, i2 = inds(T) - - # Make the new indices to go onto D and V - l = typeof(i1)(dD) - r = dag(sim(l)) - Dinds = (l, r) - Vinds = (dag(i2), r) - D = complex(tensor(Diag(DM), Dinds)) - V = complex(tensor(Dense(vec(VM)), Vinds)) - return D, V, spec + spec = Spectrum(abs.(DM), truncerr) + + i1, i2 = inds(T) + + # Make the new indices to go onto D and V + l = typeof(i1)(dD) + r = dag(sim(l)) + Dinds = (l, r) + Vinds = (dag(i2), r) + D = complex(tensor(Diag(DM), Dinds)) + V = complex(tensor(Dense(vec(VM)), Vinds)) + return D, V, spec end # LinearAlgebra.qr -function qr(T::DenseTensor{<:Any,2}; positive=false) - qxf = positive ? qr_positive : qr - return qx(qxf, T) +function qr(T::DenseTensor{<:Any, 2}; positive = false) + qxf = positive ? qr_positive : qr + return qx(qxf, T) end # NDTensors.Expose.ql -function ql(T::DenseTensor{<:Any,2}; positive=false) - qxf = positive ? ql_positive : ql - return qx(qxf, T) +function ql(T::DenseTensor{<:Any, 2}; positive = false) + qxf = positive ? ql_positive : ql + return qx(qxf, T) end # # Generic function for qr and ql decomposition of dense matrix. # The X tensor = R or L. # -function qx(qx::Function, T::DenseTensor{<:Any,2}) - QM, XM = qx(expose(matrix(T))) - # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix - # It gets converted to matrix below. - # Make the new indices to go onto Q and R - q, r = inds(T) - q = dim(q) < dim(r) ? sim(q) : sim(r) - IndsT = indstype(T) #get the index type - Qinds = IndsT((ind(T, 1), q)) - Xinds = IndsT((q, ind(T, 2))) - QM = convert(typeof(XM), QM) - ## Here I convert QM twice because of an issue in CUDA where convert does not take QM to be a UnifiedBuffer array - QM = convert(typeof(XM), QM) - Q = tensor(Dense(vec(QM)), Qinds) #Q was strided - X = tensor(Dense(vec(XM)), Xinds) - return Q, X +function qx(qx::Function, T::DenseTensor{<:Any, 2}) + QM, XM = qx(expose(matrix(T))) + # Be aware that if positive==false, then typeof(QM)=LinearAlgebra.QRCompactWYQ, not Matrix + # It gets converted to matrix below. + # Make the new indices to go onto Q and R + q, r = inds(T) + q = dim(q) < dim(r) ? sim(q) : sim(r) + IndsT = indstype(T) #get the index type + Qinds = IndsT((ind(T, 1), q)) + Xinds = IndsT((q, ind(T, 2))) + QM = convert(typeof(XM), QM) + ## Here I convert QM twice because of an issue in CUDA where convert does not take QM to be a UnifiedBuffer array + QM = convert(typeof(XM), QM) + Q = tensor(Dense(vec(QM)), Qinds) #Q was strided + X = tensor(Dense(vec(XM)), Xinds) + return Q, X end # Version of `sign` that returns one # if `x == 0`. function nonzero_sign(x) - iszero(x) && return one(x) - return sign(x) + iszero(x) && return one(x) + return sign(x) end # @@ -379,12 +379,12 @@ non-negative. Such a QR decomposition of a matrix is unique. Returns a tuple (Q,R). """ function qr_positive(M::AbstractMatrix) - sparseQ, R = qr(M) - Q = convert(typeof(R), sparseQ) - signs = nonzero_sign.(diag(R)) - Q = Q * Diagonal(signs) - R = Diagonal(conj.(signs)) * R - return (Q, R) + sparseQ, R = qr(M) + Q = convert(typeof(R), sparseQ) + signs = nonzero_sign.(diag(R)) + Q = Q * Diagonal(signs) + R = Diagonal(conj.(signs)) * R + return (Q, R) end using TypeParameterAccessors: unwrap_array_type @@ -397,23 +397,23 @@ non-negative. Such a QL decomposition of a matrix is unique. Returns a tuple (Q,L). """ function ql_positive(M::AbstractMatrix) - # TODO: Change to `isgpu`, or better yet rewrite - # in terms of broadcasting and linear algebra - # like `qr_positive`. - sparseQ, L = ql(M) - Q = convert(typeof(L), sparseQ) - nr, nc = size(L) - dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr - for c in 1:(nc - dc) - if L[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. - sign_Lc = sign(L[c, c + dc]) - if c <= nr && !isone(sign_Lc) - L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. - Q[:, c] *= conj(sign_Lc) - end + # TODO: Change to `isgpu`, or better yet rewrite + # in terms of broadcasting and linear algebra + # like `qr_positive`. + sparseQ, L = ql(M) + Q = convert(typeof(L), sparseQ) + nr, nc = size(L) + dc = nc > nr ? nc - nr : 0 #diag is shifted over by dc if nc>nr + for c in 1:(nc - dc) + if L[c, c + dc] != 0.0 #sign(0.0)==0.0 so we don't want to zero out a column of Q. + sign_Lc = sign(L[c, c + dc]) + if c <= nr && !isone(sign_Lc) + L[c, 1:(c + dc)] *= sign_Lc #only fip non-zero portion of the column. + Q[:, c] *= conj(sign_Lc) + end + end end - end - return (Q, L) + return (Q, L) end # @@ -421,63 +421,63 @@ end # before letting lapack overwirte it. # function ql(A::AbstractMatrix) - Base.require_one_based_indexing(A) - T = eltype(A) - AA = similar(A, LinearAlgebra._qreltype(T), size(A)) - copyto!(expose(AA), expose(A)) - Q, L = ql!(AA) - return (Q, L) + Base.require_one_based_indexing(A) + T = eltype(A) + AA = similar(A, LinearAlgebra._qreltype(T), size(A)) + copyto!(expose(AA), expose(A)) + Q, L = ql!(AA) + return (Q, L) end # # This is where the low level call to lapack actually occurs. Most of the work is # about unpacking Q and L from the A matrix. # function ql!(A::StridedMatrix{<:LAPACK.BlasFloat}) - ## TODO is this really necessary here, we could create Expose function if - ## we need this function on CU/GPU - if iscu(A) - throw("Error: ql is not implemented in CUDA.jl") - end - tau = Base.similar(A, min(size(A)...)) - x = LAPACK.geqlf!(A, tau) - #save L from the lower portion of A, before orgql! mangles it! - nr, nc = size(A) - mn = min(nr, nc) - L = similar(A, (mn, nc)) - for r in 1:mn - for c in 1:(r + nc - mn) - L[r, c] = A[r + nr - mn, c] - end - for c in (r + 1 + nc - mn):nc - L[r, c] = 0.0 + ## TODO is this really necessary here, we could create Expose function if + ## we need this function on CU/GPU + if iscu(A) + throw("Error: ql is not implemented in CUDA.jl") end - end - # Now we need shift the orth vectors from the right side of Q over the left side, before - if (mn < nc) - for r in 1:nr - for c in 1:mn - A[r, c] = A[r, c + nc - mn] - end + tau = Base.similar(A, min(size(A)...)) + x = LAPACK.geqlf!(A, tau) + #save L from the lower portion of A, before orgql! mangles it! + nr, nc = size(A) + mn = min(nr, nc) + L = similar(A, (mn, nc)) + for r in 1:mn + for c in 1:(r + nc - mn) + L[r, c] = A[r + nr - mn, c] + end + for c in (r + 1 + nc - mn):nc + L[r, c] = 0.0 + end end - for r in 1:nr - A = A[:, 1:mn] #whack the extra columns in A. + # Now we need shift the orth vectors from the right side of Q over the left side, before + if (mn < nc) + for r in 1:nr + for c in 1:mn + A[r, c] = A[r, c + nc - mn] + end + end + for r in 1:nr + A = A[:, 1:mn] #whack the extra columns in A. + end end - end - LAPACK.orgql!(A, tau) - return A, L + LAPACK.orgql!(A, tau) + return A, L end # TODO: support alg keyword argument to choose the svd algorithm -function polar(T::DenseTensor{ElT,2,IndsT}) where {ElT,IndsT} - QM, RM = polar(matrix(T)) - dim = size(QM, 2) - # Make the new indices to go onto Q and R - q = eltype(IndsT)(dim) - # TODO: use push/pushfirst instead of a constructor - # call here - Qinds = IndsT((ind(T, 1), q)) - Rinds = IndsT((q, ind(T, 2))) - Q = tensor(Dense(vec(QM)), Qinds) - R = tensor(Dense(vec(RM)), Rinds) - return Q, R +function polar(T::DenseTensor{ElT, 2, IndsT}) where {ElT, IndsT} + QM, RM = polar(matrix(T)) + dim = size(QM, 2) + # Make the new indices to go onto Q and R + q = eltype(IndsT)(dim) + # TODO: use push/pushfirst instead of a constructor + # call here + Qinds = IndsT((ind(T, 1), q)) + Rinds = IndsT((q, ind(T, 2))) + Q = tensor(Dense(vec(QM)), Qinds) + R = tensor(Dense(vec(RM)), Rinds) + return Q, R end diff --git a/NDTensors/src/tensor/set_types.jl b/NDTensors/src/tensor/set_types.jl index 9830b81d93..438921b3ed 100644 --- a/NDTensors/src/tensor/set_types.jl +++ b/NDTensors/src/tensor/set_types.jl @@ -1,31 +1,31 @@ using TypeParameterAccessors: TypeParameterAccessors, Position, parenttype function TypeParameterAccessors.set_ndims(arraytype::Type{<:Tensor}, ndims) - # TODO: Implement something like: - # ```julia - # return set_storagetype(arraytype, set_ndims(storagetype(arraytype), ndims)) - # ``` - # However, we will also need to define `set_ndims(indstype(arraytype), ndims)` - # and use `set_indstype(arraytype, set_ndims(indstype(arraytype), ndims))`. - return error( - "Setting the number dimensions of the array type `$arraytype` (to `$ndims`) is not currently defined.", - ) + # TODO: Implement something like: + # ```julia + # return set_storagetype(arraytype, set_ndims(storagetype(arraytype), ndims)) + # ``` + # However, we will also need to define `set_ndims(indstype(arraytype), ndims)` + # and use `set_indstype(arraytype, set_ndims(indstype(arraytype), ndims))`. + return error( + "Setting the number dimensions of the array type `$arraytype` (to `$ndims`) is not currently defined.", + ) end function set_storagetype(tensortype::Type{<:Tensor}, storagetype) - return Tensor{eltype(tensortype),ndims(tensortype),storagetype,indstype(tensortype)} + return Tensor{eltype(tensortype), ndims(tensortype), storagetype, indstype(tensortype)} end # TODO: Modify the `storagetype` according to `inds`, such as the dimensions? # TODO: Make a version that accepts `indstype::Type`? function TypeParameterAccessors.set_indstype(tensortype::Type{<:Tensor}, inds::Tuple) - return Tensor{eltype(tensortype),length(inds),storagetype(tensortype),typeof(inds)} + return Tensor{eltype(tensortype), length(inds), storagetype(tensortype), typeof(inds)} end TypeParameterAccessors.parenttype(tensortype::Type{<:Tensor}) = storagetype(tensortype) function TypeParameterAccessors.parenttype(storagetype::Type{<:TensorStorage}) - return datatype(storagetype) + return datatype(storagetype) end function TypeParameterAccessors.position(::Type{<:Tensor}, ::typeof(parenttype)) - return Position(3) + return Position(3) end diff --git a/NDTensors/src/tensor/similar.jl b/NDTensors/src/tensor/similar.jl index 3facf61e36..4ef609b34e 100644 --- a/NDTensors/src/tensor/similar.jl +++ b/NDTensors/src/tensor/similar.jl @@ -8,42 +8,42 @@ similar(tensor::Tensor, eltype::Type) = setstorage(tensor, similar(storage(tenso # NDTensors.similar function similar(tensor::Tensor, dims::Tuple) - return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) + return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) end # NDTensors.similar function similar(tensor::Tensor, dims::Dims) - return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) + return setinds(setstorage(tensor, similar(storage(tensor), dims)), dims) end # NDTensors.similar function similar(tensortype::Type{<:Tensor}, dims::Tuple) - # TODO: Is there a better constructor pattern for this? - # Maybe use `setstorage(::Type{<:Tensor}, ...)` and - # `setinds(::Type{<:Tensor}, ...)`? - return similartype(tensortype, dims)( - AllowAlias(), similar(storagetype(tensortype), dims), dims - ) + # TODO: Is there a better constructor pattern for this? + # Maybe use `setstorage(::Type{<:Tensor}, ...)` and + # `setinds(::Type{<:Tensor}, ...)`? + return similartype(tensortype, dims)( + AllowAlias(), similar(storagetype(tensortype), dims), dims + ) end # NDTensors.similar function similar(tensortype::Type{<:Tensor}, dims::Dims) - # TODO: Is there a better constructor pattern for this? - # Maybe use `setstorage(::Type{<:Tensor}, ...)` and - # `setinds(::Type{<:Tensor}, ...)`? - return similartype(tensortype, dims)( - AllowAlias(), similar(storagetype(tensortype), dims), dims - ) + # TODO: Is there a better constructor pattern for this? + # Maybe use `setstorage(::Type{<:Tensor}, ...)` and + # `setinds(::Type{<:Tensor}, ...)`? + return similartype(tensortype, dims)( + AllowAlias(), similar(storagetype(tensortype), dims), dims + ) end # NDTensors.similar function similar(tensor::Tensor, eltype::Type, dims::Tuple) - return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) + return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) end # NDTensors.similar function similar(tensor::Tensor, eltype::Type, dims::Dims) - return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) + return setinds(setstorage(tensor, similar(storage(tensor), eltype, dims)), dims) end # Base overloads @@ -52,20 +52,20 @@ Base.similar(tensor::Tensor, eltype::Type) = NDTensors.similar(tensor, eltype) Base.similar(tensor::Tensor, dims::Tuple) = NDTensors.similar(tensor, dims) Base.similar(tensor::Tensor, dims::Dims) = NDTensors.similar(tensor, dims) function Base.similar(tensor::Tensor, eltype::Type, dims::Tuple) - return NDTensors.similar(tensor, eltype, dims) + return NDTensors.similar(tensor, eltype, dims) end function Base.similar(tensor::Tensor, eltype::Type, dims::Dims) - return NDTensors.similar(tensor, eltype, dims) + return NDTensors.similar(tensor, eltype, dims) end function TypeParameterAccessors.similartype(tensortype::Type{<:Tensor}, eltype::Type) - return set_storagetype(tensortype, similartype(storagetype(tensortype), eltype)) + return set_storagetype(tensortype, similartype(storagetype(tensortype), eltype)) end function TypeParameterAccessors.similartype(tensortype::Type{<:Tensor}, dims::Tuple) - tensortype_new_inds = set_indstype(tensortype, dims) - # Need to pass `dims` in case that information is needed to make a storage type, - # for example `BlockSparse` needs the number of dimensions. - storagetype_new_inds = similartype(storagetype(tensortype_new_inds), dims) - return set_storagetype(tensortype_new_inds, storagetype_new_inds) + tensortype_new_inds = set_indstype(tensortype, dims) + # Need to pass `dims` in case that information is needed to make a storage type, + # for example `BlockSparse` needs the number of dimensions. + storagetype_new_inds = similartype(storagetype(tensortype_new_inds), dims) + return set_storagetype(tensortype_new_inds, storagetype_new_inds) end diff --git a/NDTensors/src/tensor/tensor.jl b/NDTensors/src/tensor/tensor.jl index 01012fc9b6..80b6ece502 100644 --- a/NDTensors/src/tensor/tensor.jl +++ b/NDTensors/src/tensor/tensor.jl @@ -6,55 +6,55 @@ Tensor{StoreT,IndsT} A plain old tensor (with order independent interface and no assumption of labels) """ -struct Tensor{ElT,N,StoreT,IndsT} <: AbstractArray{ElT,N} - storage::StoreT - inds::IndsT - - """ - Tensor{ElT,N,StoreT,IndsT}(inds, store::StorageType) - - Internal constructor for creating a Tensor from the - storage and indices. - - The Tensor is a view of the tensor storage. - - For normal usage, use the Tensor(store::TensorStorage, inds) - and tensor(store::TensorStorage, inds) constructors. - """ - function Tensor{ElT,N,StoreT,IndsT}( - ::AllowAlias, storage, inds::Tuple - ) where {ElT,N,StoreT,IndsT} - @assert ElT == eltype(StoreT) - @assert length(inds) == N - return new{ElT,N,StoreT,IndsT}(storage, inds) - end +struct Tensor{ElT, N, StoreT, IndsT} <: AbstractArray{ElT, N} + storage::StoreT + inds::IndsT + + """ + Tensor{ElT,N,StoreT,IndsT}(inds, store::StorageType) + + Internal constructor for creating a Tensor from the + storage and indices. + + The Tensor is a view of the tensor storage. + + For normal usage, use the Tensor(store::TensorStorage, inds) + and tensor(store::TensorStorage, inds) constructors. + """ + function Tensor{ElT, N, StoreT, IndsT}( + ::AllowAlias, storage, inds::Tuple + ) where {ElT, N, StoreT, IndsT} + @assert ElT == eltype(StoreT) + @assert length(inds) == N + return new{ElT, N, StoreT, IndsT}(storage, inds) + end end ## Tensor constructors -function Tensor{ElT,N,StoreT,IndsT}( - ::NeverAlias, storage::TensorStorage, inds -) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), copy(storage), inds) +function Tensor{ElT, N, StoreT, IndsT}( + ::NeverAlias, storage::TensorStorage, inds + ) where {ElT, N, StoreT <: TensorStorage, IndsT} + return Tensor{ElT, N, StoreT, IndsT}(AllowAlias(), copy(storage), inds) end # Constructs with undef -function Tensor{ElT,N,StoreT,IndsT}( - ::UndefInitializer, inds::Tuple -) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), similar(StoreT, inds), inds) +function Tensor{ElT, N, StoreT, IndsT}( + ::UndefInitializer, inds::Tuple + ) where {ElT, N, StoreT <: TensorStorage, IndsT} + return Tensor{ElT, N, StoreT, IndsT}(AllowAlias(), similar(StoreT, inds), inds) end # constructs with the value x -function Tensor{ElT,N,StoreT,IndsT}( - x::S, inds::Tuple -) where {S,ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), fill!(similar(StoreT, inds), x), inds) +function Tensor{ElT, N, StoreT, IndsT}( + x::S, inds::Tuple + ) where {S, ElT, N, StoreT <: TensorStorage, IndsT} + return Tensor{ElT, N, StoreT, IndsT}(AllowAlias(), fill!(similar(StoreT, inds), x), inds) end # constructs with zeros -function Tensor{ElT,N,StoreT,IndsT}(inds::Tuple) where {ElT,N,StoreT<:TensorStorage,IndsT} - return Tensor{ElT,N,StoreT,IndsT}(AllowAlias(), StoreT(dim(inds)), inds) +function Tensor{ElT, N, StoreT, IndsT}(inds::Tuple) where {ElT, N, StoreT <: TensorStorage, IndsT} + return Tensor{ElT, N, StoreT, IndsT}(AllowAlias(), StoreT(dim(inds)), inds) end """ @@ -66,13 +66,13 @@ The Tensor holds a copy of the storage data. The indices `inds` will be converted to a `Tuple`. """ function Tensor(as::AliasStyle, storage::TensorStorage, inds::Tuple) - return Tensor{eltype(storage),length(inds),typeof(storage),typeof(inds)}( - as, storage, inds - ) + return Tensor{eltype(storage), length(inds), typeof(storage), typeof(inds)}( + as, storage, inds + ) end function Tensor(as::NeverAlias, storage::TensorStorage, inds::Tuple) - return Tensor(AllowAlias(), copy(storage), inds) + return Tensor(AllowAlias(), copy(storage), inds) end # Automatically convert to Tuple if the indices are not a Tuple @@ -80,36 +80,36 @@ end # to allow for very large tensor orders in which case Tuple # operations may become too slow. function Tensor(as::AliasStyle, storage, inds) - return Tensor(as, storage, Tuple(inds)) + return Tensor(as, storage, Tuple(inds)) end tensor(args...; kwargs...) = Tensor(AllowAlias(), args...; kwargs...) Tensor(storage::TensorStorage, inds::Tuple) = Tensor(NeverAlias(), storage, inds) function Tensor(eltype::Type, inds::Tuple) - return Tensor(AllowAlias(), default_storagetype(eltype, inds)(dim(inds)), inds) + return Tensor(AllowAlias(), default_storagetype(eltype, inds)(dim(inds)), inds) end Tensor(inds::Tuple) = Tensor(default_eltype(), inds) function Tensor(eltype::Type, ::UndefInitializer, inds::Tuple) - return Tensor( - AllowAlias(), default_storagetype(default_datatype(eltype), inds)(undef, inds), inds - ) + return Tensor( + AllowAlias(), default_storagetype(default_datatype(eltype), inds)(undef, inds), inds + ) end Tensor(::UndefInitializer, inds::Tuple) = Tensor(default_eltype(), undef, inds) -function Tensor(data::AbstractArray{<:Any,1}, inds::Tuple) - return Tensor(AllowAlias(), default_storagetype(typeof(data), inds)(data), inds) +function Tensor(data::AbstractArray{<:Any, 1}, inds::Tuple) + return Tensor(AllowAlias(), default_storagetype(typeof(data), inds)(data), inds) end -function Tensor(data::AbstractArray{<:Any,N}, inds::Tuple) where {N} - return Tensor(vec(data), inds) +function Tensor(data::AbstractArray{<:Any, N}, inds::Tuple) where {N} + return Tensor(vec(data), inds) end function Tensor(datatype::Type{<:AbstractArray}, inds::Tuple) - return Tensor(generic_zeros(datatype, dim(inds)), inds) + return Tensor(generic_zeros(datatype, dim(inds)), inds) end ## End Tensor constructors @@ -122,33 +122,33 @@ end # end function randomTensor(::Type{ElT}, inds::Tuple) where {ElT} - return tensor(generic_randn(default_storagetype(default_datatype(ElT)), dim(inds)), inds) + return tensor(generic_randn(default_storagetype(default_datatype(ElT)), dim(inds)), inds) end randomTensor(inds::Tuple) = randomDenseTensor(default_eltype(), inds) function randomTensor(DataT::Type{<:AbstractArray}, inds::Tuple) - return tensor(generic_randn(default_storagetype(DataT), dim(inds)), inds) + return tensor(generic_randn(default_storagetype(DataT), dim(inds)), inds) end function randomTensor(StoreT::Type{<:TensorStorage}, inds::Tuple) - return tensor(generic_randn(StoreT, dim(inds)), inds) + return tensor(generic_randn(StoreT, dim(inds)), inds) end ## End Random Tensor -Base.ndims(::Type{<:Tensor{<:Any,N}}) where {N} = N +Base.ndims(::Type{<:Tensor{<:Any, N}}) where {N} = N # Like `Base.to_shape` but more general, can return # `Index`, etc. Customize for an array/tensor # with custom index types. # NDTensors.to_shape function to_shape(arraytype::Type{<:Tensor}, shape::Tuple) - return shape + return shape end # Allow the storage and indices to be input in opposite ordering function (tensortype::Type{<:Tensor})(as::AliasStyle, inds, storage::TensorStorage) - return tensortype(as, storage, inds) + return tensortype(as, storage, inds) end storage(T::Tensor) = T.storage @@ -161,10 +161,10 @@ data(T::Tensor) = data(storage(T)) datatype(T::Tensor) = datatype(storage(T)) datatype(tensortype::Type{<:Tensor}) = datatype(storagetype(tensortype)) -indstype(::Type{<:Tensor{<:Any,<:Any,<:Any,IndsT}}) where {IndsT} = IndsT +indstype(::Type{<:Tensor{<:Any, <:Any, <:Any, IndsT}}) where {IndsT} = IndsT indstype(T::Tensor) = indstype(typeof(T)) -storagetype(::Type{<:Tensor{<:Any,<:Any,StoreT}}) where {StoreT} = StoreT +storagetype(::Type{<:Tensor{<:Any, <:Any, StoreT}}) where {StoreT} = StoreT storagetype(T::Tensor) = storagetype(typeof(T)) # TODO: deprecate @@ -199,13 +199,13 @@ size(T::Tensor, i::Int) = dim(T, i) # Needed for passing Tensor{T,2} to BLAS/LAPACK # TODO: maybe this should only be for DenseTensor? function unsafe_convert(::Type{Ptr{ElT}}, T::Tensor{ElT}) where {ElT} - return unsafe_convert(Ptr{ElT}, storage(T)) + return unsafe_convert(Ptr{ElT}, storage(T)) end copy(T::Tensor) = setstorage(T, copy(storage(T))) function copyto!(R::Tensor, T::Tensor) - return error("Not implemented.") + return error("Not implemented.") end complex(T::Tensor) = setstorage(T, complex(storage(T))) @@ -215,20 +215,20 @@ real(T::Tensor) = setstorage(T, real(storage(T))) imag(T::Tensor) = setstorage(T, imag(storage(T))) function Base.map(f, t1::Tensor, t_tail::Tensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - # TODO: Do a better job of preserving the storage type, if possible. - return tensor(Dense(map(f, array(t1), array.(t_tail)...; kwargs...)), inds(t1)) - end - return setstorage(t1, map(f, storage(t1), storage.(t_tail)...; kwargs...)) + elt = mapreduce(eltype, promote_type, (t1, t_tail...)) + if !iszero(f(zero(elt))) + # TODO: Do a better job of preserving the storage type, if possible. + return tensor(Dense(map(f, array(t1), array.(t_tail)...; kwargs...)), inds(t1)) + end + return setstorage(t1, map(f, storage(t1), storage.(t_tail)...; kwargs...)) end function Base.mapreduce(f, op, t1::Tensor, t_tail::Tensor...; kwargs...) - elt = mapreduce(eltype, promote_type, (t1, t_tail...)) - if !iszero(f(zero(elt))) - return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) - end - return mapreduce(f, op, storage(t1), storage.(t_tail)...; kwargs...) + elt = mapreduce(eltype, promote_type, (t1, t_tail...)) + if !iszero(f(zero(elt))) + return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...) + end + return mapreduce(f, op, storage(t1), storage.(t_tail)...; kwargs...) end # @@ -255,46 +255,46 @@ fill!(T::Tensor, α::Number) = (fill!(storage(T), α); T) -(T::Tensor) = setstorage(T, -storage(T)) function convert( - ::Type{<:Tensor{<:Number,N,StoreR,Inds}}, T::Tensor{<:Number,N,<:Any,Inds} -) where {N,Inds,StoreR} - return setstorage(T, convert(StoreR, storage(T))) + ::Type{<:Tensor{<:Number, N, StoreR, Inds}}, T::Tensor{<:Number, N, <:Any, Inds} + ) where {N, Inds, StoreR} + return setstorage(T, convert(StoreR, storage(T))) end -function zeros(TensorT::Type{<:Tensor{ElT,N,StoreT}}, inds) where {ElT,N,StoreT} - return error("zeros(::Type{$TensorT}, inds) not implemented yet") +function zeros(TensorT::Type{<:Tensor{ElT, N, StoreT}}, inds) where {ElT, N, StoreT} + return error("zeros(::Type{$TensorT}, inds) not implemented yet") end function promote_rule( - ::Type{<:Tensor{ElT1,N1,StoreT1,IndsT1}}, ::Type{<:Tensor{ElT2,N2,StoreT2,IndsT2}} -) where {ElT1,ElT2,N1,N2,StoreT1,StoreT2,IndsT1,IndsT2} - StoreR = promote_type(StoreT1, StoreT2) - ElR = eltype(StoreR) - return Tensor{ElR,N3,StoreR,IndsR} where {N3,IndsR} + ::Type{<:Tensor{ElT1, N1, StoreT1, IndsT1}}, ::Type{<:Tensor{ElT2, N2, StoreT2, IndsT2}} + ) where {ElT1, ElT2, N1, N2, StoreT1, StoreT2, IndsT1, IndsT2} + StoreR = promote_type(StoreT1, StoreT2) + ElR = eltype(StoreR) + return Tensor{ElR, N3, StoreR, IndsR} where {N3, IndsR} end function promote_rule( - ::Type{<:Tensor{ElT1,N,StoreT1,Inds}}, ::Type{<:Tensor{ElT2,N,StoreT2,Inds}} -) where {ElT1,ElT2,N,StoreT1,StoreT2,Inds} - StoreR = promote_type(StoreT1, StoreT2) - ElR = eltype(StoreR) - return Tensor{ElR,N,StoreR,Inds} + ::Type{<:Tensor{ElT1, N, StoreT1, Inds}}, ::Type{<:Tensor{ElT2, N, StoreT2, Inds}} + ) where {ElT1, ElT2, N, StoreT1, StoreT2, Inds} + StoreR = promote_type(StoreT1, StoreT2) + ElR = eltype(StoreR) + return Tensor{ElR, N, StoreR, Inds} end # Convert the tensor type to the closest dense # type -function dense(::Type{<:Tensor{ElT,NT,StoreT,IndsT}}) where {ElT,NT,StoreT,IndsT} - return Tensor{ElT,NT,dense(StoreT),IndsT} +function dense(::Type{<:Tensor{ElT, NT, StoreT, IndsT}}) where {ElT, NT, StoreT, IndsT} + return Tensor{ElT, NT, dense(StoreT), IndsT} end dense(T::Tensor) = setstorage(T, dense(storage(T))) # Convert to Array, avoiding copying if possible array(T::Tensor) = array(dense(T)) -matrix(T::Tensor{<:Number,2}) = array(T) -vector(T::Tensor{<:Number,1}) = array(T) +matrix(T::Tensor{<:Number, 2}) = array(T) +vector(T::Tensor{<:Number, 1}) = array(T) -array(T::Transpose{<:Any,<:Tensor}) = transpose(array(transpose(T))) -matrix(T::Transpose{<:Any,<:Tensor}) = transpose(array(transpose(T))) +array(T::Transpose{<:Any, <:Tensor}) = transpose(array(transpose(T))) +matrix(T::Transpose{<:Any, <:Tensor}) = transpose(array(transpose(T))) # # Helper functions for BlockSparse-type storage @@ -337,26 +337,26 @@ Check if the specified block is non-zero """ isblocknz(T::Tensor, block) = isblocknz(storage(T), block) -function blockstart(T::Tensor{<:Number,N}, block) where {N} - start_index = @MVector ones(Int, N) - for j in 1:N - ind_j = ind(T, j) - for block_j in 1:(block[j] - 1) - start_index[j] += blockdim(ind_j, block_j) +function blockstart(T::Tensor{<:Number, N}, block) where {N} + start_index = @MVector ones(Int, N) + for j in 1:N + ind_j = ind(T, j) + for block_j in 1:(block[j] - 1) + start_index[j] += blockdim(ind_j, block_j) + end end - end - return Tuple(start_index) + return Tuple(start_index) end -function blockend(T::Tensor{<:Number,N}, block) where {N} - end_index = @MVector zeros(Int, N) - for j in 1:N - ind_j = ind(T, j) - for block_j in 1:block[j] - end_index[j] += blockdim(ind_j, block_j) +function blockend(T::Tensor{<:Number, N}, block) where {N} + end_index = @MVector zeros(Int, N) + for j in 1:N + ind_j = ind(T, j) + for block_j in 1:block[j] + end_index[j] += blockdim(ind_j, block_j) + end end - end - return Tuple(end_index) + return Tuple(end_index) end # @@ -368,39 +368,39 @@ end insertblock!!(T::Tensor, block) = insertblock!(T, block) function tensor_isequal(x, y) - # TODO: Use a reduction to avoid intermediates. - # This doesn't work right now because `mapreduce` - # on `Tensor`s is limited to functions that preserve - # zeros. - # return mapreduce(==, ==, x, y) + # TODO: Use a reduction to avoid intermediates. + # This doesn't work right now because `mapreduce` + # on `Tensor`s is limited to functions that preserve + # zeros. + # return mapreduce(==, ==, x, y) - # TODO: Use `x - y` instead of `map(-, x, y)`. - # `x - y` calls `x .- y` and broadcasting isn't - # defined properly for sparse Tensor storage - # like `Diag` and `BlockSparse`. - return iszero(norm(map(-, x, y))) + # TODO: Use `x - y` instead of `map(-, x, y)`. + # `x - y` calls `x .- y` and broadcasting isn't + # defined properly for sparse Tensor storage + # like `Diag` and `BlockSparse`. + return iszero(norm(map(-, x, y))) end function Base.:(==)(x::Tensor, y::Tensor) - return tensor_isequal(x, y) + return tensor_isequal(x, y) end function Base.:(==)(x::AbstractArray, y::Tensor) - return array(x) == array(y) + return array(x) == array(y) end function Base.:(==)(x::Tensor, y::AbstractArray) - return array(x) == array(y) + return array(x) == array(y) end function Base.isequal(x::Tensor, y::Tensor) - return tensor_isequal(x, y) + return tensor_isequal(x, y) end function Base.isequal(x::AbstractArray, y::Tensor) - return isequal(array(x), array(y)) + return isequal(array(x), array(y)) end function Base.isequal(x::Tensor, y::AbstractArray) - return isequal(array(x), array(y)) + return isequal(array(x), array(y)) end """ @@ -408,8 +408,8 @@ getdiagindex Get the specified value on the diagonal """ -function getdiagindex(T::Tensor{<:Number,N}, ind::Int) where {N} - return getindex(T, CartesianIndex(ntuple(_ -> ind, Val(N)))) +function getdiagindex(T::Tensor{<:Number, N}, ind::Int) where {N} + return getindex(T, CartesianIndex(ntuple(_ -> ind, Val(N)))) end using .Expose: Exposed, expose, unexpose @@ -418,13 +418,13 @@ using .Expose: Exposed, expose, unexpose diag(tensor::Tensor) = diag(expose(tensor)) function diag(ETensor::Exposed) - tensor = unexpose(ETensor) - ## d = NDTensors.similar(T, ElT, (diaglength(T),)) - tensordiag = NDTensors.similar( - dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),) - ) - array(tensordiag) .= diagview(tensor) - return tensordiag + tensor = unexpose(ETensor) + ## d = NDTensors.similar(T, ElT, (diaglength(T),)) + tensordiag = NDTensors.similar( + dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),) + ) + array(tensordiag) .= diagview(tensor) + return tensordiag end """ @@ -432,25 +432,25 @@ setdiagindex! Set the specified value on the diagonal """ -function setdiagindex!(T::Tensor{<:Number,N}, val, ind::Int) where {N} - setindex!(T, val, CartesianIndex(ntuple(_ -> ind, Val(N)))) - return T +function setdiagindex!(T::Tensor{<:Number, N}, val, ind::Int) where {N} + setindex!(T, val, CartesianIndex(ntuple(_ -> ind, Val(N)))) + return T end function map_diag!(f::Function, t_dest::Tensor, t_src::Tensor) - map_diag!(f, expose(t_dest), expose(t_src)) - return t_dest + map_diag!(f, expose(t_dest), expose(t_src)) + return t_dest end function map_diag!(f::Function, exposed_t_dest::Exposed, exposed_t_src::Exposed) - diagview(unexpose(exposed_t_dest)) .= f.(diagview(unexpose(exposed_t_src))) - return unexpose(exposed_t_dest) + diagview(unexpose(exposed_t_dest)) .= f.(diagview(unexpose(exposed_t_src))) + return unexpose(exposed_t_dest) end map_diag(f::Function, t::Tensor) = map_diag(f, expose(t)) function map_diag(f::Function, exposed_t::Exposed) - t_dest = copy(exposed_t) - map_diag!(f, expose(t_dest), exposed_t) - return t_dest + t_dest = copy(exposed_t) + map_diag!(f, expose(t_dest), exposed_t) + return t_dest end # @@ -458,22 +458,22 @@ end # function zero_contraction_output( - T1::TensorT1, T2::TensorT2, indsR::IndsR -) where {TensorT1<:Tensor,TensorT2<:Tensor,IndsR} - return zeros(contraction_output_type(TensorT1, TensorT2, indsR), indsR) + T1::TensorT1, T2::TensorT2, indsR::IndsR + ) where {TensorT1 <: Tensor, TensorT2 <: Tensor, IndsR} + return zeros(contraction_output_type(TensorT1, TensorT2, indsR), indsR) end # # Broadcasting # -BroadcastStyle(::Type{T}) where {T<:Tensor} = Broadcast.ArrayStyle{T}() +BroadcastStyle(::Type{T}) where {T <: Tensor} = Broadcast.ArrayStyle{T}() function Base.similar( - bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, ::Type{ElT} -) where {T<:Tensor,ElT} - A = find_tensor(bc) - return NDTensors.similar(A, ElT) + bc::Broadcast.Broadcasted{Broadcast.ArrayStyle{T}}, ::Type{ElT} + ) where {T <: Tensor, ElT} + A = find_tensor(bc) + return NDTensors.similar(A, ElT) end "`A = find_tensor(As)` returns the first Tensor among the arguments." @@ -484,11 +484,11 @@ find_tensor(a::Tensor, rest) = a find_tensor(::Any, rest) = find_tensor(rest) function summary(io::IO, T::Tensor) - for (dim, ind) in enumerate(inds(T)) - println(io, "Dim $dim: ", ind) - end - println(io, typeof(storage(T))) - return println(io, " ", Base.dims2string(dims(T))) + for (dim, ind) in enumerate(inds(T)) + println(io, "Dim $dim: ", ind) + end + println(io, typeof(storage(T))) + return println(io, " ", Base.dims2string(dims(T))) end # @@ -496,4 +496,4 @@ end # print_tensor(io::IO, T::Tensor) = Base.print_array(io, expose(T)) -print_tensor(io::IO, T::Tensor{<:Number,1}) = Base.print_array(io, reshape(T, (dim(T), 1))) +print_tensor(io::IO, T::Tensor{<:Number, 1}) = Base.print_array(io, reshape(T, (dim(T), 1))) diff --git a/NDTensors/src/tensoroperations/generic_tensor_operations.jl b/NDTensors/src/tensoroperations/generic_tensor_operations.jl index ccf889b963..4b8032ad00 100644 --- a/NDTensors/src/tensoroperations/generic_tensor_operations.jl +++ b/NDTensors/src/tensoroperations/generic_tensor_operations.jl @@ -1,8 +1,8 @@ function permutedims(tensor::Tensor, perm) - (ndims(tensor) == length(perm) && isperm(perm)) || - throw(ArgumentError("no valid permutation of dimensions")) - output_tensor = NDTensors.similar(tensor, permute(inds(tensor), perm)) - return permutedims!!(output_tensor, tensor, perm) + (ndims(tensor) == length(perm) && isperm(perm)) || + throw(ArgumentError("no valid permutation of dimensions")) + output_tensor = NDTensors.similar(tensor, permute(inds(tensor), perm)) + return permutedims!!(output_tensor, tensor, perm) end # Version that may overwrite the result or allocate @@ -10,62 +10,62 @@ end # Similar to `BangBang.jl` notation: # https://juliafolds.github.io/BangBang.jl/stable/. function permutedims!!(output_tensor::Tensor, tensor::Tensor, perm, f::Function) - Base.checkdims_perm(output_tensor, tensor, perm) - permutedims!(output_tensor, tensor, perm, f) - return output_tensor + Base.checkdims_perm(output_tensor, tensor, perm) + permutedims!(output_tensor, tensor, perm, f) + return output_tensor end # Equivalent to `permutedims!!(output_tensor, tensor, perm, (r, t) -> t)` function permutedims!!(output_tensor::Tensor, tensor::Tensor, perm) - Base.checkdims_perm(output_tensor, tensor, perm) - permutedims!(output_tensor, tensor, perm) - return output_tensor + Base.checkdims_perm(output_tensor, tensor, perm) + permutedims!(output_tensor, tensor, perm) + return output_tensor end function permutedims!(output_tensor::Tensor, tensor::Tensor, perm, f::Function) - Base.checkdims_perm(output_tensor, tensor, perm) - error( - "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm, f::Function` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, `perm = $perm`, and `f = $f`.", - ) - return output_tensor + Base.checkdims_perm(output_tensor, tensor, perm) + error( + "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm, f::Function` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, `perm = $perm`, and `f = $f`.", + ) + return output_tensor end function permutedims!(output_tensor::Tensor, tensor::Tensor, perm) - Base.checkdims_perm(output_tensor, tensor, perm) - error( - "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, and `perm = $perm`.", - ) - return output_tensor + Base.checkdims_perm(output_tensor, tensor, perm) + error( + "`permutedims!(output_tensor::Tensor, tensor::Tensor, perm` not implemented for `typeof(output_tensor) = $(typeof(output_tensor))`, `typeof(tensor) = $(typeof(tensor))`, and `perm = $perm`.", + ) + return output_tensor end function (x::Number * tensor::Tensor) - return NDTensors.tensor(x * storage(tensor), inds(tensor)) + return NDTensors.tensor(x * storage(tensor), inds(tensor)) end (tensor::Tensor * x::Number) = x * tensor function (tensor::Tensor / x::Number) - return NDTensors.tensor(storage(tensor) / x, inds(tensor)) + return NDTensors.tensor(storage(tensor) / x, inds(tensor)) end function contraction_output_type( - tensortype1::Type{<:Tensor}, tensortype2::Type{<:Tensor}, inds -) - return similartype(promote_type(tensortype1, tensortype2), inds) + tensortype1::Type{<:Tensor}, tensortype2::Type{<:Tensor}, inds + ) + return similartype(promote_type(tensortype1, tensortype2), inds) end function contraction_output( - tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor -) - indsoutput_tensor = contract_inds( - inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsoutput_tensor - ) - output_tensor = contraction_output(tensor1, tensor2, indsoutput_tensor) - return output_tensor + tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor + ) + indsoutput_tensor = contract_inds( + inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsoutput_tensor + ) + output_tensor = contraction_output(tensor1, tensor2, indsoutput_tensor) + return output_tensor end # Trait returning true if the two tensors or storage types can # contract with each other. -@traitdef CanContract{X,Y} +@traitdef CanContract{X, Y} #! format: off @traitimpl CanContract{X,Y} <- can_contract(X, Y) #! format: on @@ -73,160 +73,160 @@ end # Assume storage types can contract with each other can_contract(tensor1::Type, tensor2::Type) = true function can_contract(tensor1::Type{<:Tensor}, tensor2::Type{<:Tensor}) - return can_contract(storagetype(tensor1), storagetype(tensor2)) + return can_contract(storagetype(tensor1), storagetype(tensor2)) end function can_contract(tensor1::TensorStorage, tensor2::TensorStorage) - return can_contract(typeof(tensor1), typeof(tensor2)) + return can_contract(typeof(tensor1), typeof(tensor2)) end function can_contract(tensor1::Tensor, tensor2::Tensor) - return can_contract(typeof(tensor1), typeof(tensor2)) + return can_contract(typeof(tensor1), typeof(tensor2)) end # Version where output labels aren't supplied @traitfn function contract( - tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 -) where {TensorT1<:Tensor,TensorT2<:Tensor;CanContract{TensorT1,TensorT2}} - labelsoutput_tensor = contract_labels(labels_tensor1, labels_tensor2) - return contract(tensor1, labels_tensor1, tensor2, labels_tensor2, labelsoutput_tensor) + tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 + ) where {TensorT1 <: Tensor, TensorT2 <: Tensor; CanContract{TensorT1, TensorT2}} + labelsoutput_tensor = contract_labels(labels_tensor1, labels_tensor2) + return contract(tensor1, labels_tensor1, tensor2, labels_tensor2, labelsoutput_tensor) end @traitfn function contract( - tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 -) where {TensorT1<:Tensor,TensorT2<:Tensor;!CanContract{TensorT1,TensorT2}} - return error( - "Can't contract tensor of storage type $(storagetype(tensor1)) with tensor of storage type $(storagetype(tensor2)).", - ) + tensor1::TensorT1, labels_tensor1, tensor2::TensorT2, labels_tensor2 + ) where {TensorT1 <: Tensor, TensorT2 <: Tensor; !CanContract{TensorT1, TensorT2}} + return error( + "Can't contract tensor of storage type $(storagetype(tensor1)) with tensor of storage type $(storagetype(tensor2)).", + ) end function contract( - tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor -) - # TODO: put the contract_inds logic into contraction_output, - # call like output_tensor = contraction_ouput(tensor1,labelstensor1,tensor2,labelstensor2) - #indsoutput_tensor = contract_inds(inds(tensor1),labelstensor1,inds(tensor2),labelstensor2,labelsoutput_tensor) - output_tensor = contraction_output( - tensor1, labelstensor1, tensor2, labelstensor2, labelsoutput_tensor - ) - # contract!! version here since the output output_tensor may not - # be mutable (like UniformDiag) - output_tensor = contract!!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 - ) - return output_tensor + tensor1::Tensor, labelstensor1, tensor2::Tensor, labelstensor2, labelsoutput_tensor + ) + # TODO: put the contract_inds logic into contraction_output, + # call like output_tensor = contraction_ouput(tensor1,labelstensor1,tensor2,labelstensor2) + #indsoutput_tensor = contract_inds(inds(tensor1),labelstensor1,inds(tensor2),labelstensor2,labelsoutput_tensor) + output_tensor = contraction_output( + tensor1, labelstensor1, tensor2, labelstensor2, labelsoutput_tensor + ) + # contract!! version here since the output output_tensor may not + # be mutable (like UniformDiag) + output_tensor = contract!!( + output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 + ) + return output_tensor end using .Expose: Exposed, expose, unexpose # Overload this function for immutable storage types function _contract!!( - output_tensor::Tensor, - labelsoutput_tensor, - tensor1::Tensor, - labelstensor1, - tensor2::Tensor, - labelstensor2, - α::Number=1, - β::Number=0, -) - if α ≠ 1 || β ≠ 0 - contract!( - expose(output_tensor), - labelsoutput_tensor, - expose(tensor1), - labelstensor1, - expose(tensor2), - labelstensor2, - α, - β, - ) - else - contract!( - expose(output_tensor), - labelsoutput_tensor, - expose(tensor1), - labelstensor1, - expose(tensor2), - labelstensor2, + output_tensor::Tensor, + labelsoutput_tensor, + tensor1::Tensor, + labelstensor1, + tensor2::Tensor, + labelstensor2, + α::Number = 1, + β::Number = 0, ) - end - return output_tensor + if α ≠ 1 || β ≠ 0 + contract!( + expose(output_tensor), + labelsoutput_tensor, + expose(tensor1), + labelstensor1, + expose(tensor2), + labelstensor2, + α, + β, + ) + else + contract!( + expose(output_tensor), + labelsoutput_tensor, + expose(tensor1), + labelstensor1, + expose(tensor2), + labelstensor2, + ) + end + return output_tensor end function contract!( - output_tensor::Exposed, - labelsoutput_tensor, - tensor1::Exposed, - labelstensor1, - tensor2::Exposed, - labelstensor2, - α::Number=one(Bool), - β::Number=zero(Bool), -) - return contract!( - unexpose(output_tensor), - labelsoutput_tensor, - unexpose(tensor1), - labelstensor1, - unexpose(tensor2), - labelstensor2, - α, - β, - ) + output_tensor::Exposed, + labelsoutput_tensor, + tensor1::Exposed, + labelstensor1, + tensor2::Exposed, + labelstensor2, + α::Number = one(Bool), + β::Number = zero(Bool), + ) + return contract!( + unexpose(output_tensor), + labelsoutput_tensor, + unexpose(tensor1), + labelstensor1, + unexpose(tensor2), + labelstensor2, + α, + β, + ) end # Is this generic for all storage types? function contract!!( - output_tensor::Tensor, - labelsoutput_tensor, - tensor1::Tensor, - labelstensor1, - tensor2::Tensor, - labelstensor2, - α::Number=1, - β::Number=0, -) - Noutput_tensor = ndims(output_tensor) - N1 = ndims(tensor1) - N2 = ndims(tensor2) - if (N1 ≠ 0) && (N2 ≠ 0) && (N1 + N2 == Noutput_tensor) - # Outer product - (α ≠ 1 || β ≠ 0) && error( - "contract!! not yet implemented for outer product tensor contraction with non-trivial α and β", - ) - # TODO: permute tensor1 and tensor2 appropriately first (can be more efficient - # then permuting the result of tensor1⊗tensor2) - # TODO: implement the in-place version directly - output_tensor = outer!!(output_tensor, tensor1, tensor2) - labelsoutput_tensorp = (labelstensor1..., labelstensor2...) - perm = getperm(labelsoutput_tensor, labelsoutput_tensorp) - if !is_trivial_permutation(perm) - output_tensorp = reshape(output_tensor, (inds(tensor1)..., inds(tensor2)...)) - output_tensor = permutedims!!(output_tensor, copy(output_tensorp), perm) - end - else - if α ≠ 1 || β ≠ 0 - output_tensor = _contract!!( - output_tensor, + output_tensor::Tensor, labelsoutput_tensor, - tensor1, + tensor1::Tensor, labelstensor1, - tensor2, + tensor2::Tensor, labelstensor2, - α, - β, - ) + α::Number = 1, + β::Number = 0, + ) + Noutput_tensor = ndims(output_tensor) + N1 = ndims(tensor1) + N2 = ndims(tensor2) + if (N1 ≠ 0) && (N2 ≠ 0) && (N1 + N2 == Noutput_tensor) + # Outer product + (α ≠ 1 || β ≠ 0) && error( + "contract!! not yet implemented for outer product tensor contraction with non-trivial α and β", + ) + # TODO: permute tensor1 and tensor2 appropriately first (can be more efficient + # then permuting the result of tensor1⊗tensor2) + # TODO: implement the in-place version directly + output_tensor = outer!!(output_tensor, tensor1, tensor2) + labelsoutput_tensorp = (labelstensor1..., labelstensor2...) + perm = getperm(labelsoutput_tensor, labelsoutput_tensorp) + if !is_trivial_permutation(perm) + output_tensorp = reshape(output_tensor, (inds(tensor1)..., inds(tensor2)...)) + output_tensor = permutedims!!(output_tensor, copy(output_tensorp), perm) + end else - output_tensor = _contract!!( - output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 - ) + if α ≠ 1 || β ≠ 0 + output_tensor = _contract!!( + output_tensor, + labelsoutput_tensor, + tensor1, + labelstensor1, + tensor2, + labelstensor2, + α, + β, + ) + else + output_tensor = _contract!!( + output_tensor, labelsoutput_tensor, tensor1, labelstensor1, tensor2, labelstensor2 + ) + end end - end - return output_tensor + return output_tensor end function outer!!(output_tensor::Tensor, tensor1::Tensor, tensor2::Tensor) - outer!(output_tensor, tensor1, tensor2) - return output_tensor + outer!(output_tensor, tensor1, tensor2) + return output_tensor end function outer end diff --git a/NDTensors/src/tensorstorage/default_storage.jl b/NDTensors/src/tensorstorage/default_storage.jl index 872eefc233..1467244e15 100644 --- a/NDTensors/src/tensorstorage/default_storage.jl +++ b/NDTensors/src/tensorstorage/default_storage.jl @@ -1,21 +1,21 @@ ## This is a fil which specifies the default storage type provided some set of parameters ## The parameters are the element type and storage type -default_datatype(eltype::Type=default_eltype()) = Vector{eltype} +default_datatype(eltype::Type = default_eltype()) = Vector{eltype} default_eltype() = Float64 using TypeParameterAccessors: specify_default_type_parameters ## TODO use multiple dispace to make this pick between dense and blocksparse function default_storagetype(datatype::Type{<:AbstractArray}, inds::Tuple) - datatype = specify_default_type_parameters(datatype) - return Dense{eltype(datatype),datatype} + datatype = specify_default_type_parameters(datatype) + return Dense{eltype(datatype), datatype} end function default_storagetype(datatype::Type{<:AbstractArray}) - return default_storagetype(datatype, ()) + return default_storagetype(datatype, ()) end default_storagetype(eltype::Type) = default_storagetype(default_datatype(eltype)) function default_storagetype(eltype::Type, inds::Tuple) - return default_storagetype(default_datatype(eltype), inds) + return default_storagetype(default_datatype(eltype), inds) end default_storagetype() = default_storagetype(default_eltype()) diff --git a/NDTensors/src/tensorstorage/set_types.jl b/NDTensors/src/tensorstorage/set_types.jl index 0d4156ed2c..cfba32f65d 100644 --- a/NDTensors/src/tensorstorage/set_types.jl +++ b/NDTensors/src/tensorstorage/set_types.jl @@ -1,7 +1,7 @@ using TypeParameterAccessors: TypeParameterAccessors function TypeParameterAccessors.set_ndims(arraytype::Type{<:TensorStorage}, ndims::Int) - # TODO: Change to this once `TensorStorage` types support wrapping - # non-AbstractVector types. - # return set_datatype(arraytype, set_ndims(datatype(arraytype), ndims)) - return arraytype + # TODO: Change to this once `TensorStorage` types support wrapping + # non-AbstractVector types. + # return set_datatype(arraytype, set_ndims(datatype(arraytype), ndims)) + return arraytype end diff --git a/NDTensors/src/tensorstorage/similar.jl b/NDTensors/src/tensorstorage/similar.jl index 97eba53089..7f1aa5f01f 100644 --- a/NDTensors/src/tensorstorage/similar.jl +++ b/NDTensors/src/tensorstorage/similar.jl @@ -5,54 +5,54 @@ similar(storage::TensorStorage) = setdata(storage, NDTensors.similar(data(storag # NDTensors.similar function similar(storage::TensorStorage, eltype::Type) - return setdata(storage, NDTensors.similar(data(storage), eltype)) + return setdata(storage, NDTensors.similar(data(storage), eltype)) end # NDTensors.similar function similar(storage::TensorStorage, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storage, NDTensors.similar(data(storage), dims)) - return setdata(storage, vec(NDTensors.similar(data(storage), dims))) + # TODO: Don't convert to an `AbstractVector` with `vec`, once we support + # more general data types. + # return setdata(storage, NDTensors.similar(data(storage), dims)) + return setdata(storage, vec(NDTensors.similar(data(storage), dims))) end # NDTensors.similar function similar(storage::TensorStorage, eltype::Type, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storage, NDTensors.similar(data(storage), eltype, dims)) - return setdata(storage, vec(NDTensors.similar(data(storage), eltype, dims))) + # TODO: Don't convert to an `AbstractVector` with `vec`, once we support + # more general data types. + # return setdata(storage, NDTensors.similar(data(storage), eltype, dims)) + return setdata(storage, vec(NDTensors.similar(data(storage), eltype, dims))) end # NDTensors.similar function similar(storagetype::Type{<:TensorStorage}, eltype::Type, dims::Tuple) - return similar(similartype(storagetype, eltype), dims) + return similar(similartype(storagetype, eltype), dims) end # NDTensors.similar function similar(storagetype::Type{<:TensorStorage}, eltype::Type) - return error("Must specify dimensions.") + return error("Must specify dimensions.") end # NDTensors.similar function similar(storagetype::Type{<:TensorStorage}, dims::Tuple) - # TODO: Don't convert to an `AbstractVector` with `vec`, once we support - # more general data types. - # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) - return setdata(storagetype, vec(NDTensors.similar(datatype(storagetype), dims))) + # TODO: Don't convert to an `AbstractVector` with `vec`, once we support + # more general data types. + # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) + return setdata(storagetype, vec(NDTensors.similar(datatype(storagetype), dims))) end # NDTensors.similar function similar(storagetype::Type{<:TensorStorage}, dims::Dims) - # TODO: Don't convert to an `AbstractVector` with `prod`, once we support - # more general data types. - # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) - return setdata(storagetype, NDTensors.similar(datatype(storagetype), prod(dims))) + # TODO: Don't convert to an `AbstractVector` with `prod`, once we support + # more general data types. + # return setdata(storagetype, NDTensors.similar(datatype(storagetype), dims)) + return setdata(storagetype, NDTensors.similar(datatype(storagetype), prod(dims))) end # NDTensors.similar function similar(storagetype::Type{<:TensorStorage}, dims::DimOrInd...) - return similar(storagetype, NDTensors.to_shape(dims)) + return similar(storagetype, NDTensors.to_shape(dims)) end # Define Base.similar in terms of NDTensors.similar @@ -64,20 +64,20 @@ Base.similar(storage::TensorStorage, eltype::Type) = NDTensors.similar(storage, ## Base.similar(storage::TensorStorage, dims::DimOrInd...) = NDTensors.similar(storage, dims...) function TypeParameterAccessors.similartype( - storagetype::Type{<:TensorStorage}, eltype::Type -) - # TODO: Don't convert to an `AbstractVector` with `set_ndims(datatype, 1)`, once we support - # more general data types. - # return set_datatype(storagetype, NDTensors.similartype(datatype(storagetype), eltype)) - return set_datatype(storagetype, set_ndims(similartype(datatype(storagetype), eltype), 1)) + storagetype::Type{<:TensorStorage}, eltype::Type + ) + # TODO: Don't convert to an `AbstractVector` with `set_ndims(datatype, 1)`, once we support + # more general data types. + # return set_datatype(storagetype, NDTensors.similartype(datatype(storagetype), eltype)) + return set_datatype(storagetype, set_ndims(similartype(datatype(storagetype), eltype), 1)) end function TypeParameterAccessors.similartype(storagetype::Type{<:TensorStorage}, dims::Tuple) - # TODO: In the future, set the dimensions of the data type based on `dims`, once - # more general data types beyond `AbstractVector` are supported. - # `similartype` unwraps any wrapped data. - return set_ndims( - set_datatype(storagetype, set_ndims(similartype(datatype(storagetype)), 1)), - length(dims), - ) + # TODO: In the future, set the dimensions of the data type based on `dims`, once + # more general data types beyond `AbstractVector` are supported. + # `similartype` unwraps any wrapped data. + return set_ndims( + set_datatype(storagetype, set_ndims(similartype(datatype(storagetype)), 1)), + length(dims), + ) end diff --git a/NDTensors/src/tensorstorage/tensorstorage.jl b/NDTensors/src/tensorstorage/tensorstorage.jl index c3e7675485..6e7fee1b7a 100644 --- a/NDTensors/src/tensorstorage/tensorstorage.jl +++ b/NDTensors/src/tensorstorage/tensorstorage.jl @@ -27,7 +27,7 @@ Base.size(S::TensorStorage) = size(data(S)) Base.@propagate_inbounds Base.getindex(S::TensorStorage, i::Integer) = data(S)[i] Base.@propagate_inbounds function Base.setindex!(S::TensorStorage, v, i::Integer) - return (setindex!(data(S), v, i); S) + return (setindex!(data(S), v, i); S) end (S::TensorStorage * x::Number) = setdata(S, x * data(S)) @@ -38,7 +38,7 @@ end # Needed for passing Tensor{T,2} to BLAS/LAPACK function Base.unsafe_convert(::Type{Ptr{ElT}}, T::TensorStorage{ElT}) where {ElT} - return Base.unsafe_convert(Ptr{ElT}, data(T)) + return Base.unsafe_convert(Ptr{ElT}, data(T)) end # This may need to be overloaded, since storage types @@ -49,11 +49,11 @@ Base.conj!(S::TensorStorage) = (conj!(data(S)); return S) Base.conj(S::TensorStorage) = conj(AllowAlias(), S) function Base.conj(::AllowAlias, S::TensorStorage) - return setdata(S, conj(data(S))) + return setdata(S, conj(data(S))) end function Base.conj(::NeverAlias, S::TensorStorage) - return conj!(copy(S)) + return conj!(copy(S)) end Base.complex(S::TensorStorage) = setdata(S, complex(data(S))) @@ -63,18 +63,18 @@ Base.real(S::TensorStorage) = setdata(S, real(data(S))) Base.imag(S::TensorStorage) = setdata(S, imag(data(S))) function Base.copyto!(S1::TensorStorage, S2::TensorStorage) - return error("Not implemented.") + return error("Not implemented.") end Random.randn!(S::TensorStorage) = randn!(Random.default_rng(), S) Random.randn!(rng::AbstractRNG, S::TensorStorage) = (randn!(rng, data(S)); S) function Base.map(f, t1::TensorStorage, t_tail::TensorStorage...; kwargs...) - return setdata(t1, map(f, data(t1), data.(t_tail)...; kwargs...)) + return setdata(t1, map(f, data(t1), data.(t_tail)...; kwargs...)) end function Base.mapreduce(f, op, t1::TensorStorage, t_tail::TensorStorage...; kwargs...) - return mapreduce(f, op, data(t1), data.(t_tail)...; kwargs...) + return mapreduce(f, op, data(t1), data.(t_tail)...; kwargs...) end Base.fill!(S::TensorStorage, v) = (fill!(data(S), v); S) @@ -85,7 +85,7 @@ scale!(S::TensorStorage, v::Number) = rmul!(S, v) norm(S::TensorStorage) = norm(data(S)) -Base.convert(::Type{T}, S::T) where {T<:TensorStorage} = S +Base.convert(::Type{T}, S::T) where {T <: TensorStorage} = S blockoffsets(S::TensorStorage) = S.blockoffsets diff --git a/NDTensors/test/NDTensorsTestUtils/device_list.jl b/NDTensors/test/NDTensorsTestUtils/device_list.jl index 9294956fc2..f98ca90ff9 100644 --- a/NDTensors/test/NDTensorsTestUtils/device_list.jl +++ b/NDTensors/test/NDTensorsTestUtils/device_list.jl @@ -2,58 +2,58 @@ using Pkg: Pkg using NDTensors: NDTensors if "cuda" in ARGS || "all" in ARGS - ## Right now adding CUDA during Pkg.test results in a - ## compat issues. I am adding it back to test/Project.toml - Pkg.add("CUDA") - using CUDA: CUDA + ## Right now adding CUDA during Pkg.test results in a + ## compat issues. I am adding it back to test/Project.toml + Pkg.add("CUDA") + using CUDA: CUDA end if "rocm" in ARGS || "all" in ARGS - ## Warning AMDGPU does not work in Julia versions below 1.8 - Pkg.add("AMDGPU") - using AMDGPU: AMDGPU + ## Warning AMDGPU does not work in Julia versions below 1.8 + Pkg.add("AMDGPU") + using AMDGPU: AMDGPU end if "metal" in ARGS || "all" in ARGS - ## Warning Metal does not work in Julia versions below 1.8 - Pkg.add("Metal") - using Metal: Metal + ## Warning Metal does not work in Julia versions below 1.8 + Pkg.add("Metal") + using Metal: Metal end if "cutensor" in ARGS || "all" in ARGS - Pkg.add("CUDA") - Pkg.add("cuTENSOR") - using CUDA: CUDA - using cuTENSOR: cuTENSOR + Pkg.add("CUDA") + Pkg.add("cuTENSOR") + using CUDA: CUDA + using cuTENSOR: cuTENSOR end using JLArrays: JLArrays, jl function devices_list(test_args) - devs = Vector{Function}(undef, 0) - if isempty(test_args) || "base" in test_args - push!(devs, NDTensors.cpu) - ## Skip jl on lower versions of Julia for now - ## all linear algebra is failing on Julia 1.6 with JLArrays - if VERSION > v"1.7" - push!(devs, jl) + devs = Vector{Function}(undef, 0) + if isempty(test_args) || "base" in test_args + push!(devs, NDTensors.cpu) + ## Skip jl on lower versions of Julia for now + ## all linear algebra is failing on Julia 1.6 with JLArrays + if VERSION > v"1.7" + push!(devs, jl) + end end - end - if "cuda" in test_args || "cutensor" in test_args || "all" in test_args - if CUDA.functional() - push!(devs, NDTensors.CUDAExtensions.cu) - else - println( - "Warning: CUDA.jl is not functional on this architecture and tests will be skipped." - ) + if "cuda" in test_args || "cutensor" in test_args || "all" in test_args + if CUDA.functional() + push!(devs, NDTensors.CUDAExtensions.cu) + else + println( + "Warning: CUDA.jl is not functional on this architecture and tests will be skipped." + ) + end end - end - if "rocm" in test_args || "all" in test_args - push!(devs, NDTensors.AMDGPUExtensions.roc) - end + if "rocm" in test_args || "all" in test_args + push!(devs, NDTensors.AMDGPUExtensions.roc) + end - if "metal" in test_args || "all" in test_args - push!(devs, NDTensors.MetalExtensions.mtl) - end + if "metal" in test_args || "all" in test_args + push!(devs, NDTensors.MetalExtensions.mtl) + end - return devs + return devs end diff --git a/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl b/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl index 553c87954c..bf6ba20cb2 100644 --- a/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl +++ b/NDTensors/test/NDTensorsTestUtils/is_supported_eltype.jl @@ -2,5 +2,5 @@ using NDTensors.MetalExtensions: mtl is_supported_eltype(dev, elt::Type) = true is_supported_eltype(dev::typeof(mtl), elt::Type{Float64}) = false function is_supported_eltype(dev::typeof(mtl), elt::Type{<:Complex}) - return is_supported_eltype(dev, real(elt)) + return is_supported_eltype(dev, real(elt)) end diff --git a/NDTensors/test/broken/readwrite.jl b/NDTensors/test/broken/readwrite.jl index 50576d3aa5..d36c6c73e0 100644 --- a/NDTensors/test/broken/readwrite.jl +++ b/NDTensors/test/broken/readwrite.jl @@ -4,72 +4,72 @@ using NDTensors, Test using HDF5 @testset "Write to Disk and Read from Disk" begin - @testset "HDF5 readwrite Dense storage" begin - # Real case + @testset "HDF5 readwrite Dense storage" begin + # Real case - D = randomTensor(3, 4) + D = randomTensor(3, 4) - fo = h5open("data.h5", "w") - write(fo, "D", D.store) - close(fo) + fo = h5open("data.h5", "w") + write(fo, "D", D.store) + close(fo) - fi = h5open("data.h5", "r") - rDstore = read(fi, "D", Dense{Float64}) - close(fi) - @test rDstore ≈ D.store + fi = h5open("data.h5", "r") + rDstore = read(fi, "D", Dense{Float64}) + close(fi) + @test rDstore ≈ D.store - # Complex case + # Complex case - D = randomTensor(ComplexF64, 3, 4) + D = randomTensor(ComplexF64, 3, 4) - fo = h5open("data.h5", "w") - write(fo, "D", D.store) - close(fo) + fo = h5open("data.h5", "w") + write(fo, "D", D.store) + close(fo) - fi = h5open("data.h5", "r") - rDstore = read(fi, "D", Dense{ComplexF64}) - close(fi) - @test rDstore ≈ D.store - end + fi = h5open("data.h5", "r") + rDstore = read(fi, "D", Dense{ComplexF64}) + close(fi) + @test rDstore ≈ D.store + end - @testset "HDF5 readwrite BlockSparse storage" begin - # Indices - indsA = ([2, 3], [4, 5]) + @testset "HDF5 readwrite BlockSparse storage" begin + # Indices + indsA = ([2, 3], [4, 5]) - # Locations of non-zero blocks - locs = [(1, 2), (2, 1)] + # Locations of non-zero blocks + locs = [(1, 2), (2, 1)] - # Real case + # Real case - B = randomBlockSparseTensor(locs, indsA) + B = randomBlockSparseTensor(locs, indsA) - fo = h5open("data.h5", "w") - write(fo, "B", B.store) - close(fo) + fo = h5open("data.h5", "w") + write(fo, "B", B.store) + close(fo) - fi = h5open("data.h5", "r") - rBstore = read(fi, "B", BlockSparse{Float64}) - close(fi) - @test rBstore ≈ B.store + fi = h5open("data.h5", "r") + rBstore = read(fi, "B", BlockSparse{Float64}) + close(fi) + @test rBstore ≈ B.store - # Complex case + # Complex case - B = randomBlockSparseTensor(ComplexF64, locs, indsA) + B = randomBlockSparseTensor(ComplexF64, locs, indsA) - fo = h5open("data.h5", "w") - write(fo, "B", B.store) - close(fo) + fo = h5open("data.h5", "w") + write(fo, "B", B.store) + close(fo) - fi = h5open("data.h5", "r") - rBstore = read(fi, "B", BlockSparse{ComplexF64}) - close(fi) - @test rBstore ≈ B.store - end + fi = h5open("data.h5", "r") + rBstore = read(fi, "B", BlockSparse{ComplexF64}) + close(fi) + @test rBstore ≈ B.store + end - # - # Clean up the test hdf5 file - # - rm("data.h5"; force=true) + # + # Clean up the test hdf5 file + # + rm("data.h5"; force = true) end nothing diff --git a/NDTensors/test/lib/runtests.jl b/NDTensors/test/lib/runtests.jl index 1cb9e1b966..7402357d53 100644 --- a/NDTensors/test/lib/runtests.jl +++ b/NDTensors/test/lib/runtests.jl @@ -2,13 +2,13 @@ using NDTensors: NDTensors using Test: @testset @testset "Test NDTensors lib $lib" for lib in [ - "AMDGPUExtensions", - "BackendSelection", - "CUDAExtensions", - "GPUArraysCoreExtensions", - "MetalExtensions", - "Expose", -] - include(joinpath(pkgdir(NDTensors), "src", "lib", lib, "test", "runtests.jl")) + "AMDGPUExtensions", + "BackendSelection", + "CUDAExtensions", + "GPUArraysCoreExtensions", + "MetalExtensions", + "Expose", + ] + include(joinpath(pkgdir(NDTensors), "src", "lib", lib, "test", "runtests.jl")) end end diff --git a/NDTensors/test/runtests.jl b/NDTensors/test/runtests.jl index b073a05e2d..7c63e9c44b 100644 --- a/NDTensors/test/runtests.jl +++ b/NDTensors/test/runtests.jl @@ -1,20 +1,20 @@ using SafeTestsets: @safetestset @safetestset "NDTensors" begin - using Test: @testset - using NDTensors: NDTensors - @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) + using Test: @testset + using NDTensors: NDTensors + @testset "$(@__DIR__)" begin + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + for dir in ["lib"] + push!(filenames, joinpath(dir, "runtests.jl")) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + include(filename) + end end - for dir in ["lib"] - push!(filenames, joinpath(dir, "runtests.jl")) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - include(filename) - end - end end nothing diff --git a/NDTensors/test/test_blocksparse.jl b/NDTensors/test/test_blocksparse.jl index 30a0e82bf5..aca15ba91d 100644 --- a/NDTensors/test/test_blocksparse.jl +++ b/NDTensors/test/test_blocksparse.jl @@ -2,355 +2,355 @@ using GPUArraysCore: @allowscalar using LinearAlgebra: Hermitian, exp, norm, svd using NDTensors: - NDTensors, - BlockSparseTensor, - array, - blockdims, - blockoffsets, - blockview, - data, - dense, - diag, - diaglength, - dims, - eachnzblock, - inds, - isblocknz, - nnz, - nnzblocks, - randomBlockSparseTensor, - store, - storage + NDTensors, + BlockSparseTensor, + array, + blockdims, + blockoffsets, + blockview, + data, + dense, + diag, + diaglength, + dims, + eachnzblock, + inds, + isblocknz, + nnz, + nnzblocks, + randomBlockSparseTensor, + store, + storage include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: default_rtol, devices_list, is_supported_eltype using Random: randn! using Test: @test, @test_throws, @testset @testset "BlockSparseTensor basic functionality" begin - C = nothing - - @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float32, Float64) - - if !is_supported_eltype(dev, elt) - continue - end - # Indices - indsA = ([2, 3], [4, 5]) - - # Locations of non-zero blocks - locs = [(1, 2), (2, 1)] - - A = dev(BlockSparseTensor{elt}(locs, indsA...)) - randn!(A) - - @test blockdims(A, (1, 2)) == (2, 5) - @test blockdims(A, (2, 1)) == (3, 4) - @test !isempty(A) - @test nnzblocks(A) == 2 - @test nnz(A) == 2 * 5 + 3 * 4 - @test inds(A) == ([2, 3], [4, 5]) - @test isblocknz(A, (2, 1)) - @test isblocknz(A, (1, 2)) - @test !isblocknz(A, (1, 1)) - @test !isblocknz(A, (2, 2)) - dA = diag(A) - @test @allowscalar dA ≈ diag(dense(A)) - @test sum(A) ≈ sum(array(A)) - @test prod(A) ≈ prod(array(A)) - - # Test different ways of getting nnz - @test nnz(blockoffsets(A), inds(A)) == nnz(A) - - B = 2 * A - @test B[1, 1] == 2 * A[1, 1] - @test nnz(A) == 2 * 5 + 3 * 4 - @test nnz(B) == 2 * 5 + 3 * 4 - @test nnzblocks(A) == 2 - @test nnzblocks(B) == 2 - - B = A / 2 - @test B[1, 1] == A[1, 1] / 2 - @test nnz(A) == 2 * 5 + 3 * 4 - @test nnz(B) == 2 * 5 + 3 * 4 - @test nnzblocks(A) == 2 - @test nnzblocks(B) == 2 - - @allowscalar begin - A[1, 5] = 15 - A[2, 5] = 25 - - @test A[1, 1] == 0 - @test A[1, 5] == 15 - @test A[2, 5] == 25 - end - D = dense(A) - - @allowscalar begin - @test D == A - - for I in eachindex(A) - @test D[I] == A[I] - end - end - - A12 = blockview(A, (1, 2)) - - @test dims(A12) == (2, 5) - - @allowscalar for I in eachindex(A12) - @test A12[I] == A[I + CartesianIndex(0, 4)] - end - - B = dev(BlockSparseTensor(elt, undef, locs, indsA)) - randn!(B) - - C = A + B - - @allowscalar for I in eachindex(C) - @test C[I] == A[I] + B[I] - end - Cp = NDTensors.map_diag(i -> 2 * i, C) - @allowscalar for i in 1:diaglength(Cp) - @test Cp[i, i] == 2 * C[i, i] - end - - Ap = permutedims(A, (2, 1)) - - @test blockdims(Ap, (1, 2)) == (4, 3) - @test blockdims(Ap, (2, 1)) == (5, 2) - @test nnz(A) == nnz(Ap) - @test nnzblocks(A) == nnzblocks(Ap) - - @allowscalar for I in eachindex(C) - @test A[I] == Ap[NDTensors.permute(I, (2, 1))] - end - - A = dev(BlockSparseTensor(complex(elt), locs, indsA)) - randn!(A) - @test conj(data(store(A))) == data(store(conj(A))) - @test typeof(conj(A)) <: BlockSparseTensor - - @testset "No blocks" begin - T = dev(BlockSparseTensor{elt}(Tuple{Int,Int}[], [2, 2], [2, 2])) - @test nnzblocks(T) == 0 - @test size(T) == (4, 4) - @test length(T) == 16 - @test !isempty(T) - @test isempty(storage(T)) - @test nnz(T) == 0 - @test eltype(T) == elt - @test norm(T) == 0 - end - - @testset "Empty" begin - T = dev(BlockSparseTensor{elt}(Tuple{Int,Int}[], Int[], Int[])) - @test nnzblocks(T) == 0 - @test size(T) == (0, 0) - @test length(T) == 0 - @test isempty(T) - @test isempty(storage(T)) - @test nnz(T) == 0 - @test eltype(T) == elt - @test norm(T) == 0 + C = nothing + + @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), + elt in (Float32, Float64) + + if !is_supported_eltype(dev, elt) + continue + end + # Indices + indsA = ([2, 3], [4, 5]) + + # Locations of non-zero blocks + locs = [(1, 2), (2, 1)] + + A = dev(BlockSparseTensor{elt}(locs, indsA...)) + randn!(A) + + @test blockdims(A, (1, 2)) == (2, 5) + @test blockdims(A, (2, 1)) == (3, 4) + @test !isempty(A) + @test nnzblocks(A) == 2 + @test nnz(A) == 2 * 5 + 3 * 4 + @test inds(A) == ([2, 3], [4, 5]) + @test isblocknz(A, (2, 1)) + @test isblocknz(A, (1, 2)) + @test !isblocknz(A, (1, 1)) + @test !isblocknz(A, (2, 2)) + dA = diag(A) + @test @allowscalar dA ≈ diag(dense(A)) + @test sum(A) ≈ sum(array(A)) + @test prod(A) ≈ prod(array(A)) + + # Test different ways of getting nnz + @test nnz(blockoffsets(A), inds(A)) == nnz(A) + + B = 2 * A + @test B[1, 1] == 2 * A[1, 1] + @test nnz(A) == 2 * 5 + 3 * 4 + @test nnz(B) == 2 * 5 + 3 * 4 + @test nnzblocks(A) == 2 + @test nnzblocks(B) == 2 + + B = A / 2 + @test B[1, 1] == A[1, 1] / 2 + @test nnz(A) == 2 * 5 + 3 * 4 + @test nnz(B) == 2 * 5 + 3 * 4 + @test nnzblocks(A) == 2 + @test nnzblocks(B) == 2 + + @allowscalar begin + A[1, 5] = 15 + A[2, 5] = 25 + + @test A[1, 1] == 0 + @test A[1, 5] == 15 + @test A[2, 5] == 25 + end + D = dense(A) + + @allowscalar begin + @test D == A + + for I in eachindex(A) + @test D[I] == A[I] + end + end + + A12 = blockview(A, (1, 2)) + + @test dims(A12) == (2, 5) + + @allowscalar for I in eachindex(A12) + @test A12[I] == A[I + CartesianIndex(0, 4)] + end + + B = dev(BlockSparseTensor(elt, undef, locs, indsA)) + randn!(B) + + C = A + B + + @allowscalar for I in eachindex(C) + @test C[I] == A[I] + B[I] + end + Cp = NDTensors.map_diag(i -> 2 * i, C) + @allowscalar for i in 1:diaglength(Cp) + @test Cp[i, i] == 2 * C[i, i] + end + + Ap = permutedims(A, (2, 1)) + + @test blockdims(Ap, (1, 2)) == (4, 3) + @test blockdims(Ap, (2, 1)) == (5, 2) + @test nnz(A) == nnz(Ap) + @test nnzblocks(A) == nnzblocks(Ap) + + @allowscalar for I in eachindex(C) + @test A[I] == Ap[NDTensors.permute(I, (2, 1))] + end + + A = dev(BlockSparseTensor(complex(elt), locs, indsA)) + randn!(A) + @test conj(data(store(A))) == data(store(conj(A))) + @test typeof(conj(A)) <: BlockSparseTensor + + @testset "No blocks" begin + T = dev(BlockSparseTensor{elt}(Tuple{Int, Int}[], [2, 2], [2, 2])) + @test nnzblocks(T) == 0 + @test size(T) == (4, 4) + @test length(T) == 16 + @test !isempty(T) + @test isempty(storage(T)) + @test nnz(T) == 0 + @test eltype(T) == elt + @test norm(T) == 0 + end + + @testset "Empty" begin + T = dev(BlockSparseTensor{elt}(Tuple{Int, Int}[], Int[], Int[])) + @test nnzblocks(T) == 0 + @test size(T) == (0, 0) + @test length(T) == 0 + @test isempty(T) + @test isempty(storage(T)) + @test nnz(T) == 0 + @test eltype(T) == elt + @test norm(T) == 0 + end + + @testset "Random constructor" begin + T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) + @test nnzblocks(T) == 2 + @test nnz(T) == 8 + @test eltype(T) == elt + @test norm(T) ≉ 0 + + Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) + @test nnzblocks(Tc) == 2 + @test nnz(Tc) == 8 + @test eltype(Tc) == complex(elt) + @test norm(Tc) ≉ 0 + end + + @testset "Complex Valued Operations" begin + T = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) + rT = real(T) + @test eltype(rT) == elt + @test nnzblocks(rT) == nnzblocks(T) + iT = imag(T) + @test eltype(iT) == elt + @test nnzblocks(iT) == nnzblocks(T) + @test norm(rT)^2 + norm(iT)^2 ≈ norm(T)^2 + + cT = conj(T) + @test eltype(cT) == complex(elt) + @test nnzblocks(cT) == nnzblocks(T) + end + + @testset "similartype regression test" begin + # Regression test for issue seen in: + # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 + # Previously, `similartype` wasn't using information about the dimensions + # properly and was returning a `BlockSparse` storage of the dimensions + # of the input tensor. + T = dev(BlockSparseTensor(elt, [(1, 1)], ([2], [2]))) + @test NDTensors.ndims( + NDTensors.storagetype(NDTensors.similartype(typeof(T), ([2], [2], [2]))) + ) == 3 + end + + @testset "Random constructor" begin + T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) + @test nnzblocks(T) == 2 + @test nnz(T) == 8 + @test eltype(T) == elt + @test norm(T) ≉ 0 + + Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) + @test nnzblocks(Tc) == 2 + @test nnz(Tc) == 8 + @test eltype(Tc) == complex(elt) + @test norm(Tc) ≉ 0 + end + + @testset "permute_combine" begin + indsA = ([2, 3], [4, 5], [6, 7, 8]) + locsA = [(2, 1, 1), (1, 2, 1), (2, 2, 3)] + A = dev(BlockSparseTensor{elt}(locsA, indsA...)) + randn!(A) + + B = NDTensors.permute_combine(A, 3, (2, 1)) + @test nnzblocks(A) == nnzblocks(B) + @test nnz(A) == nnz(B) + + Ap = NDTensors.permutedims(A, (3, 2, 1)) + + @allowscalar for (bAp, bB) in zip(eachnzblock(Ap), eachnzblock(B)) + blockAp = blockview(Ap, bAp) + blockB = blockview(B, bB) + @test reshape(blockAp, size(blockB)) == blockB + end + end end - @testset "Random constructor" begin - T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(T) == 2 - @test nnz(T) == 8 - @test eltype(T) == elt - @test norm(T) ≉ 0 - - Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(Tc) == 2 - @test nnz(Tc) == 8 - @test eltype(Tc) == complex(elt) - @test norm(Tc) ≉ 0 + @testset "BlockSparseTensor setindex! add block" begin + T = BlockSparseTensor([2, 3], [4, 5]) + + @allowscalar for I in eachindex(T) + @test T[I] == 0.0 + end + @test nnz(T) == 0 + @test nnzblocks(T) == 0 + @test !isblocknz(T, (1, 1)) + @test !isblocknz(T, (2, 1)) + @test !isblocknz(T, (1, 2)) + @test !isblocknz(T, (2, 2)) + + T[1, 1] = 1.0 + + @test T[1, 1] == 1.0 + @test nnz(T) == 8 + @test nnzblocks(T) == 1 + @test isblocknz(T, (1, 1)) + @test !isblocknz(T, (2, 1)) + @test !isblocknz(T, (1, 2)) + @test !isblocknz(T, (2, 2)) + + T[4, 8] = 2.0 + + @test T[4, 8] == 2.0 + @test nnz(T) == 8 + 15 + @test nnzblocks(T) == 2 + @test isblocknz(T, (1, 1)) + @test !isblocknz(T, (2, 1)) + @test !isblocknz(T, (1, 2)) + @test isblocknz(T, (2, 2)) + + T[1, 6] = 3.0 + + @test T[1, 6] == 3.0 + @test nnz(T) == 8 + 15 + 10 + @test nnzblocks(T) == 3 + @test isblocknz(T, (1, 1)) + @test !isblocknz(T, (2, 1)) + @test isblocknz(T, (1, 2)) + @test isblocknz(T, (2, 2)) + + T[4, 2] = 4.0 + + @test T[4, 2] == 4.0 + @test nnz(T) == 8 + 15 + 10 + 12 + @test nnzblocks(T) == 4 + @test isblocknz(T, (1, 1)) + @test isblocknz(T, (2, 1)) + @test isblocknz(T, (1, 2)) + @test isblocknz(T, (2, 2)) end - @testset "Complex Valued Operations" begin - T = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - rT = real(T) - @test eltype(rT) == elt - @test nnzblocks(rT) == nnzblocks(T) - iT = imag(T) - @test eltype(iT) == elt - @test nnzblocks(iT) == nnzblocks(T) - @test norm(rT)^2 + norm(iT)^2 ≈ norm(T)^2 - - cT = conj(T) - @test eltype(cT) == complex(elt) - @test nnzblocks(cT) == nnzblocks(T) + @testset "svd on $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), + elt in (Float32, Float64) + + if !is_supported_eltype(dev, elt) + continue + end + @testset "svd example 1" begin + A = dev(BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2])) + randn!(A) + U, S, V = svd(A) + @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) + atol = default_rtol(elt) + end + + @testset "svd example 2" begin + A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [2, 2], [3, 2, 3])) + randn!(A) + U, S, V = svd(A) + @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) + atol = default_rtol(elt) + end + + @testset "svd example 3" begin + A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [3, 2, 3], [2, 2])) + randn!(A) + U, S, V = svd(A) + @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) + atol = default_rtol(elt) + end + + @testset "svd example 4" begin + A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [2, 3, 4], [5, 6])) + randn!(A) + U, S, V = svd(A) + @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) + atol = default_rtol(elt) + end + + @testset "svd example 5" begin + A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [5, 6], [2, 3, 4])) + randn!(A) + U, S, V = svd(A) + @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) + atol = default_rtol(elt) + end end - @testset "similartype regression test" begin - # Regression test for issue seen in: - # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77 - # Previously, `similartype` wasn't using information about the dimensions - # properly and was returning a `BlockSparse` storage of the dimensions - # of the input tensor. - T = dev(BlockSparseTensor(elt, [(1, 1)], ([2], [2]))) - @test NDTensors.ndims( - NDTensors.storagetype(NDTensors.similartype(typeof(T), ([2], [2], [2]))) - ) == 3 + @testset "exp, eltype: $elt" for elt in (Float32, Float64) + A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 4], [2, 4]) + randn!(A) + expT = exp(A) + @test array(expT) ≈ exp(array(A)) + atol = default_rtol(elt) + + # Hermitian case + A = BlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2])) + randn!(A) + Ah = BlockSparseTensor(complex(elt), undef, [(1, 1), (2, 2)], ([2, 2], [2, 2])) + for bA in eachnzblock(A) + b = blockview(A, bA) + blockview(Ah, bA) .= b + b' + end + expTh = exp(Hermitian(Ah)) + @test array(expTh) ≈ exp(Hermitian(array(Ah))) rtol = default_rtol(eltype(Ah)) + + A = BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2]) + @test_throws ErrorException exp(A) end - - @testset "Random constructor" begin - T = dev(randomBlockSparseTensor(elt, [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(T) == 2 - @test nnz(T) == 8 - @test eltype(T) == elt - @test norm(T) ≉ 0 - - Tc = dev(randomBlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2]))) - @test nnzblocks(Tc) == 2 - @test nnz(Tc) == 8 - @test eltype(Tc) == complex(elt) - @test norm(Tc) ≉ 0 - end - - @testset "permute_combine" begin - indsA = ([2, 3], [4, 5], [6, 7, 8]) - locsA = [(2, 1, 1), (1, 2, 1), (2, 2, 3)] - A = dev(BlockSparseTensor{elt}(locsA, indsA...)) - randn!(A) - - B = NDTensors.permute_combine(A, 3, (2, 1)) - @test nnzblocks(A) == nnzblocks(B) - @test nnz(A) == nnz(B) - - Ap = NDTensors.permutedims(A, (3, 2, 1)) - - @allowscalar for (bAp, bB) in zip(eachnzblock(Ap), eachnzblock(B)) - blockAp = blockview(Ap, bAp) - blockB = blockview(B, bB) - @test reshape(blockAp, size(blockB)) == blockB - end - end - end - - @testset "BlockSparseTensor setindex! add block" begin - T = BlockSparseTensor([2, 3], [4, 5]) - - @allowscalar for I in eachindex(T) - @test T[I] == 0.0 - end - @test nnz(T) == 0 - @test nnzblocks(T) == 0 - @test !isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test !isblocknz(T, (2, 2)) - - T[1, 1] = 1.0 - - @test T[1, 1] == 1.0 - @test nnz(T) == 8 - @test nnzblocks(T) == 1 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test !isblocknz(T, (2, 2)) - - T[4, 8] = 2.0 - - @test T[4, 8] == 2.0 - @test nnz(T) == 8 + 15 - @test nnzblocks(T) == 2 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test !isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - - T[1, 6] = 3.0 - - @test T[1, 6] == 3.0 - @test nnz(T) == 8 + 15 + 10 - @test nnzblocks(T) == 3 - @test isblocknz(T, (1, 1)) - @test !isblocknz(T, (2, 1)) - @test isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - - T[4, 2] = 4.0 - - @test T[4, 2] == 4.0 - @test nnz(T) == 8 + 15 + 10 + 12 - @test nnzblocks(T) == 4 - @test isblocknz(T, (1, 1)) - @test isblocknz(T, (2, 1)) - @test isblocknz(T, (1, 2)) - @test isblocknz(T, (2, 2)) - end - - @testset "svd on $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float32, Float64) - - if !is_supported_eltype(dev, elt) - continue - end - @testset "svd example 1" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 2" begin - A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [2, 2], [3, 2, 3])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 3" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [3, 2, 3], [2, 2])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 4" begin - A = dev(BlockSparseTensor{elt}([(2, 1), (3, 2)], [2, 3, 4], [5, 6])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - - @testset "svd example 5" begin - A = dev(BlockSparseTensor{elt}([(1, 2), (2, 3)], [5, 6], [2, 3, 4])) - randn!(A) - U, S, V = svd(A) - @test @allowscalar array(U) * array(S) * array(V)' ≈ array(A) - atol = default_rtol(elt) - end - end - - @testset "exp, eltype: $elt" for elt in (Float32, Float64) - A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 4], [2, 4]) - randn!(A) - expT = exp(A) - @test array(expT) ≈ exp(array(A)) - atol = default_rtol(elt) - - # Hermitian case - A = BlockSparseTensor(complex(elt), [(1, 1), (2, 2)], ([2, 2], [2, 2])) - randn!(A) - Ah = BlockSparseTensor(complex(elt), undef, [(1, 1), (2, 2)], ([2, 2], [2, 2])) - for bA in eachnzblock(A) - b = blockview(A, bA) - blockview(Ah, bA) .= b + b' - end - expTh = exp(Hermitian(Ah)) - @test array(expTh) ≈ exp(Hermitian(array(Ah))) rtol = default_rtol(eltype(Ah)) - - A = BlockSparseTensor{elt}([(2, 1), (1, 2)], [2, 2], [2, 2]) - @test_throws ErrorException exp(A) - end end end diff --git a/NDTensors/test/test_combiner.jl b/NDTensors/test/test_combiner.jl index b9e6d3e77c..40d3e73dbe 100644 --- a/NDTensors/test/test_combiner.jl +++ b/NDTensors/test/test_combiner.jl @@ -1,35 +1,37 @@ @eval module $(gensym()) using GPUArraysCore: @allowscalar using NDTensors: - NDTensors, - Block, - BlockOffsets, - BlockSparse, - BlockSparseTensor, - Combiner, - Dense, - DenseTensor, - contract, - dim, - dims, - tensor + NDTensors, + Block, + BlockOffsets, + BlockSparse, + BlockSparseTensor, + Combiner, + Dense, + DenseTensor, + contract, + dim, + dims, + tensor include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list, is_supported_eltype using Test: @testset, @test, @test_throws # Testing generic block indices struct Index{Space} - space::Space + space::Space end NDTensors.dim(i::Index) = sum(b -> last(b), i.space) NDTensors.nblocks(i::Index) = length(i.space) NDTensors.blockdim(i::Index, block::Integer) = last(i.space[block]) function NDTensors.outer(i1::Index, i2::Index) - return Index(vec( - map(Iterators.product(i1.space, i2.space)) do (b1, b2) - return first(b1) + first(b2) => last(b1) * last(b2) - end, - )) + return Index( + vec( + map(Iterators.product(i1.space, i2.space)) do (b1, b2) + return first(b1) + first(b2) => last(b1) * last(b2) + end, + ) + ) end NDTensors.permuteblocks(i::Index, perm::Vector{Int}) = Index(i.space[perm]) @@ -37,86 +39,86 @@ struct QN end Base.:+(q1::QN, q2::QN) = QN() @testset "CombinerTensor basic functionality" begin - @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), - elt in (Float64, Float32) + @testset "test device: $dev, eltype: $elt" for dev in devices_list(copy(ARGS)), + elt in (Float64, Float32) - if !is_supported_eltype(dev, elt) - continue - end - @testset "Dense * Combiner" begin - d = 2 - input_tensor_inds = (d, d, d) - combiner_tensor_inds = (d^2, d, d) - output_tensor_inds = (d, d^2) + if !is_supported_eltype(dev, elt) + continue + end + @testset "Dense * Combiner" begin + d = 2 + input_tensor_inds = (d, d, d) + combiner_tensor_inds = (d^2, d, d) + output_tensor_inds = (d, d^2) - input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) - combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) + input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) + combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) - output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) - @test output_tensor isa DenseTensor - @test dims(output_tensor) == output_tensor_inds - @allowscalar for i in 1:length(input_tensor) - @test input_tensor[i] == output_tensor[i] - end + output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) + @test output_tensor isa DenseTensor + @test dims(output_tensor) == output_tensor_inds + @allowscalar for i in 1:length(input_tensor) + @test input_tensor[i] == output_tensor[i] + end - # Test uncombining - new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) - @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) + # Test uncombining + new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) + @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) - # Catch invalid combining - input_tensor_inds = (d,) - input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) - combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) - @test_throws Any contract(input_tensor, (-1,), combiner_tensor, (1, -1, -2)) - end + # Catch invalid combining + input_tensor_inds = (d,) + input_tensor = dev(tensor(Dense(randn(elt, input_tensor_inds)), input_tensor_inds)) + combiner_tensor = dev(tensor(Combiner([1], [1]), combiner_tensor_inds)) + @test_throws Any contract(input_tensor, (-1,), combiner_tensor, (1, -1, -2)) + end - ind_constructors = (dim -> [dim], dim -> Index([QN() => dim])) - #TODO cu doesn't work with blocksparse yet - @testset "BlockSparse * Combiner" for ind_constructor in ind_constructors - d = 2 - i, j, k = map(ind_constructor, (d, d, d)) - c = ind_constructor(d^2) + ind_constructors = (dim -> [dim], dim -> Index([QN() => dim])) + #TODO cu doesn't work with blocksparse yet + @testset "BlockSparse * Combiner" for ind_constructor in ind_constructors + d = 2 + i, j, k = map(ind_constructor, (d, d, d)) + c = ind_constructor(d^2) - input_tensor_inds = (i, j, k) - combiner_tensor_inds = (c, j, k) - output_tensor_inds = (c, i) + input_tensor_inds = (i, j, k) + combiner_tensor_inds = (c, j, k) + output_tensor_inds = (c, i) - input_tensor = dev( - tensor( - BlockSparse( - randn(elt, dim(input_tensor_inds)), BlockOffsets{3}([Block(1, 1, 1)], [0]) - ), - input_tensor_inds, - ), - ) - combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) + input_tensor = dev( + tensor( + BlockSparse( + randn(elt, dim(input_tensor_inds)), BlockOffsets{3}([Block(1, 1, 1)], [0]) + ), + input_tensor_inds, + ), + ) + combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) - output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) - @test output_tensor isa BlockSparseTensor - @test dims(output_tensor) == dims(output_tensor_inds) - output_tensor = permutedims(output_tensor, (2, 1)) - @allowscalar for i in 1:length(input_tensor) - @test input_tensor[i] == output_tensor[i] - end + output_tensor = contract(input_tensor, (1, -1, -2), combiner_tensor, (2, -1, -2)) + @test output_tensor isa BlockSparseTensor + @test dims(output_tensor) == dims(output_tensor_inds) + output_tensor = permutedims(output_tensor, (2, 1)) + @allowscalar for i in 1:length(input_tensor) + @test input_tensor[i] == output_tensor[i] + end - # Test uncombining. Broken for inds that are not `Index`. - new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) - new_input_tensor = permutedims(new_input_tensor, (3, 1, 2)) - @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) + # Test uncombining. Broken for inds that are not `Index`. + new_input_tensor = contract(output_tensor, (1, -1), combiner_tensor, (-1, 2, 3)) + new_input_tensor = permutedims(new_input_tensor, (3, 1, 2)) + @test NDTensors.cpu(new_input_tensor) == NDTensors.cpu(input_tensor) - # Catch invalid combining - invalid_input_tensor_inds = (k,) - invalid_input_tensor = dev( - tensor( - BlockSparse( - randn(elt, dim(invalid_input_tensor_inds)), BlockOffsets{1}([Block(1)], [0]) - ), - invalid_input_tensor_inds, - ), - ) - combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) - @test_throws Any contract(invalid_input_tensor, (-1,), combiner_tensor, (1, 2, -1)) + # Catch invalid combining + invalid_input_tensor_inds = (k,) + invalid_input_tensor = dev( + tensor( + BlockSparse( + randn(elt, dim(invalid_input_tensor_inds)), BlockOffsets{1}([Block(1)], [0]) + ), + invalid_input_tensor_inds, + ), + ) + combiner_tensor = tensor(Combiner([1], [1]), combiner_tensor_inds) + @test_throws Any contract(invalid_input_tensor, (-1,), combiner_tensor, (1, 2, -1)) + end end - end end end diff --git a/NDTensors/test/test_dense.jl b/NDTensors/test/test_dense.jl index 48916c2d25..c17b816f35 100644 --- a/NDTensors/test/test_dense.jl +++ b/NDTensors/test/test_dense.jl @@ -7,320 +7,320 @@ include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list struct MyInd - dim::Int + dim::Int end NDTensors.dim(i::MyInd) = i.dim @testset "Dense Tensors" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)) - elt = dev == mtl ? Float32 : Float64 - # Testing with GPU and CPU backends - @testset "DenseTensor basic functionality" begin - A = dev(Tensor(elt, (3, 4))) - @allowscalar for I in eachindex(A) - @test A[I] == 0 - end - - @test @allowscalar A[2, 1] isa elt - @test dims(A[1:2, 1]) == (2,) - @test dims(A[1:2, 2]) == (2,) - @test dims(A[2:3, 2]) == (2,) - @test dims(A[2, 2:4]) == (3,) - @test dims(A[2:3, 2:4]) == (2, 3) - @test dims(A[2:3, 2:end]) == (2, 3) - @test dims(A[3, 2:end]) == (3,) - - randn!(A) - - @test ndims(A) == 2 - @test dims(A) == (3, 4) - @test inds(A) == (3, 4) - - Aview = A[2:3, 2:3] - @test dims(Aview) == (2, 2) - ## Added for issue 1431 create a tensor from - ## a sliced view of another tensor - Acopy = Tensor(NDTensors.storage(Aview), (1, 4)) - @test NDTensors.cpu(data(Acopy)) == NDTensors.cpu(data(Aview)) - @test dims(Acopy) == (1, 4) - - B = dev(Tensor(elt, undef, (3, 4))) - randn!(B) - C = copy(A) - C = permutedims!!(C, B, (1, 2), +) - Cp = NDTensors.map_diag(i -> 2 * i, C) - @allowscalar for i in 1:diaglength(Cp) - @test Cp[i, i] == 2 * C[i, i] - end - - Ap = permutedims(A, (2, 1)) - @allowscalar begin - for I in eachindex(A) - @test A[I] != 0 - end + @testset "test device: $dev" for dev in devices_list(copy(ARGS)) + elt = dev == mtl ? Float32 : Float64 + # Testing with GPU and CPU backends + @testset "DenseTensor basic functionality" begin + A = dev(Tensor(elt, (3, 4))) + @allowscalar for I in eachindex(A) + @test A[I] == 0 + end - for I in eachindex(A) - @test A[I] != 0 - end + @test @allowscalar A[2, 1] isa elt + @test dims(A[1:2, 1]) == (2,) + @test dims(A[1:2, 2]) == (2,) + @test dims(A[2:3, 2]) == (2,) + @test dims(A[2, 2:4]) == (3,) + @test dims(A[2:3, 2:4]) == (2, 3) + @test dims(A[2:3, 2:end]) == (2, 3) + @test dims(A[3, 2:end]) == (3,) + + randn!(A) + + @test ndims(A) == 2 + @test dims(A) == (3, 4) + @test inds(A) == (3, 4) + + Aview = A[2:3, 2:3] + @test dims(Aview) == (2, 2) + ## Added for issue 1431 create a tensor from + ## a sliced view of another tensor + Acopy = Tensor(NDTensors.storage(Aview), (1, 4)) + @test NDTensors.cpu(data(Acopy)) == NDTensors.cpu(data(Aview)) + @test dims(Acopy) == (1, 4) + + B = dev(Tensor(elt, undef, (3, 4))) + randn!(B) + C = copy(A) + C = permutedims!!(C, B, (1, 2), +) + Cp = NDTensors.map_diag(i -> 2 * i, C) + @allowscalar for i in 1:diaglength(Cp) + @test Cp[i, i] == 2 * C[i, i] + end + + Ap = permutedims(A, (2, 1)) + @allowscalar begin + for I in eachindex(A) + @test A[I] != 0 + end + + for I in eachindex(A) + @test A[I] != 0 + end + + ## TODO Currently this fails with scalar indexing on CUDA + ## Because A + B calls + ## +(A::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, B::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}) + ## @ Base ./arraymath.jl:8 + #C = A + B + + for I in eachindex(C) + @test C[I] == A[I] + B[I] + end - ## TODO Currently this fails with scalar indexing on CUDA - ## Because A + B calls - ## +(A::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}, B::DenseTensor{Float64, 2, Tuple{Int64, Int64}, Dense{Float64, CuArray{Float64, 1, CUDA.Mem.DeviceBuffer}}}) - ## @ Base ./arraymath.jl:8 - #C = A + B + for I in eachindex(A) + @test A[I] == Ap[NDTensors.permute(I, (2, 1))] + end - for I in eachindex(C) - @test C[I] == A[I] + B[I] + A[1, 1] = 11 + @test A[1, 1] == 11 + + @test A[2, 2] == Aview[1, 1] + end + + ## Testing A .= α .* B .+ β .* A + C = copy(A) + @allowscalar fill!(B, zero(elt)) + β = elt(2) + α = elt(1) + permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) + @allowscalar 2 .* C == A + randn!(B) + C = copy(A) + A = permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) + @allowscalar for i in 1:3, j in 1:4 + @test A[i, j] == α * B[i, j] + β * C[i, j] + end + + ## add elt around 2.0 to preserve the eltype of A. + @test data(A * elt(2.0)) == data(elt(2.0) * A) + + Asim = similar(data(A), 10) + @test eltype(Asim) == elt + @test length(Asim) == 10 + + t = dev(Tensor(complex(elt), (100, 100))) + randn!(t) + @test conj(data(store(t))) == data(store(conj(t))) + @test typeof(conj(t)) <: DenseTensor + + @test Dense(complex(elt)) == Dense{complex(elt)}() + @test Dense(complex(elt)) == complex(Dense(elt)) + + D = dev(Tensor(complex(elt), (100, 100))) + @test eltype(D) == complex(elt) + @test ndims(D) == 2 + @test dim(D) == 100^2 + + E = dev(Tensor(complex(elt), undef, (100, 100))) + @test eltype(E) == complex(elt) + @test ndims(E) == 2 + @test dim(E) == 100^2 + + F = dev(Tensor(elt, (100, 100))) + @test eltype(F) == elt + @test ndims(F) == 2 + @test dim(F) == 100^2 + + G = dev(Tensor(elt, undef, (100, 100))) + @test eltype(G) == elt + @test ndims(G) == 2 + @test dim(G) == 100^2 + + H = dev(Tensor(complex(elt), undef, (100, 100))) + @test eltype(H) == complex(elt) + @test ndims(H) == 2 + @test dim(H) == 100^2 + + I_arr = dev(rand(elt, 10, 10, 10)) + I = dev(Tensor(I_arr, (10, 10, 10))) + @test eltype(I) == elt + @test dim(I) == 1000 + @test Array(I) == I_arr + + J = dev(Tensor(elt, (2, 2))) + K = dev(Tensor(elt, (2, 2))) + @test Array(J * K) ≈ Array(J) * Array(K) end - for I in eachindex(A) - @test A[I] == Ap[NDTensors.permute(I, (2, 1))] + @testset "Random constructor" begin + T = dev(randomTensor(elt, (2, 2))) + @test dims(T) == (2, 2) + @test eltype(T) == elt + @test @allowscalar T[1, 1] ≉ 0 + @test norm(T) ≉ 0 + + Tc = dev(randomTensor(complex(elt), (2, 2))) + @test dims(Tc) == (2, 2) + @test eltype(Tc) == complex(elt) + @test @allowscalar Tc[1, 1] ≉ 0 + @test norm(Tc) ≉ 0 end - A[1, 1] = 11 - @test A[1, 1] == 11 - - @test A[2, 2] == Aview[1, 1] - end - - ## Testing A .= α .* B .+ β .* A - C = copy(A) - @allowscalar fill!(B, zero(elt)) - β = elt(2) - α = elt(1) - permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) - @allowscalar 2 .* C == A - randn!(B) - C = copy(A) - A = permutedims!!(A, B, (1, 2), (a, b) -> +(*(β, a), *(α, b))) - @allowscalar for i in 1:3, j in 1:4 - @test A[i, j] == α * B[i, j] + β * C[i, j] - end - - ## add elt around 2.0 to preserve the eltype of A. - @test data(A * elt(2.0)) == data(elt(2.0) * A) - - Asim = similar(data(A), 10) - @test eltype(Asim) == elt - @test length(Asim) == 10 - - t = dev(Tensor(complex(elt), (100, 100))) - randn!(t) - @test conj(data(store(t))) == data(store(conj(t))) - @test typeof(conj(t)) <: DenseTensor - - @test Dense(complex(elt)) == Dense{complex(elt)}() - @test Dense(complex(elt)) == complex(Dense(elt)) - - D = dev(Tensor(complex(elt), (100, 100))) - @test eltype(D) == complex(elt) - @test ndims(D) == 2 - @test dim(D) == 100^2 - - E = dev(Tensor(complex(elt), undef, (100, 100))) - @test eltype(E) == complex(elt) - @test ndims(E) == 2 - @test dim(E) == 100^2 - - F = dev(Tensor(elt, (100, 100))) - @test eltype(F) == elt - @test ndims(F) == 2 - @test dim(F) == 100^2 - - G = dev(Tensor(elt, undef, (100, 100))) - @test eltype(G) == elt - @test ndims(G) == 2 - @test dim(G) == 100^2 - - H = dev(Tensor(complex(elt), undef, (100, 100))) - @test eltype(H) == complex(elt) - @test ndims(H) == 2 - @test dim(H) == 100^2 - - I_arr = dev(rand(elt, 10, 10, 10)) - I = dev(Tensor(I_arr, (10, 10, 10))) - @test eltype(I) == elt - @test dim(I) == 1000 - @test Array(I) == I_arr - - J = dev(Tensor(elt, (2, 2))) - K = dev(Tensor(elt, (2, 2))) - @test Array(J * K) ≈ Array(J) * Array(K) - end + @testset "Complex Valued Tensors" begin + d1, d2, d3 = 2, 3, 4 + T = dev(randomTensor(complex(elt), (d1, d2, d3))) - @testset "Random constructor" begin - T = dev(randomTensor(elt, (2, 2))) - @test dims(T) == (2, 2) - @test eltype(T) == elt - @test @allowscalar T[1, 1] ≉ 0 - @test norm(T) ≉ 0 - - Tc = dev(randomTensor(complex(elt), (2, 2))) - @test dims(Tc) == (2, 2) - @test eltype(Tc) == complex(elt) - @test @allowscalar Tc[1, 1] ≉ 0 - @test norm(Tc) ≉ 0 - end + rT = real(T) + iT = imag(T) + cT = conj(T) - @testset "Complex Valued Tensors" begin - d1, d2, d3 = 2, 3, 4 - T = dev(randomTensor(complex(elt), (d1, d2, d3))) + @allowscalar for n1 in 1:d1, n2 in 1:d2, n3 in 1:d3 + @test rT[n1, n2, n3] ≈ real(T[n1, n2, n3]) + @test iT[n1, n2, n3] ≈ imag(T[n1, n2, n3]) + @test cT[n1, n2, n3] ≈ conj(T[n1, n2, n3]) + end + end - rT = real(T) - iT = imag(T) - cT = conj(T) + @testset "Custom inds types" begin + T = dev(Tensor(elt, (MyInd(2), MyInd(3), MyInd(4)))) + @test store(T) isa Dense + @test eltype(T) == elt + @test norm(T) == 0 + @test dims(T) == (2, 3, 4) + @test ndims(T) == 3 + @test inds(T) == (MyInd(2), MyInd(3), MyInd(4)) + @allowscalar begin + T[2, 1, 2] = 1.21 + @test T[2, 1, 2] == elt(1.21) + end + @test norm(T) == elt(1.21) + + T = dev(randomTensor(complex(elt), (MyInd(4), MyInd(3)))) + @test store(T) isa Dense + @test eltype(T) == complex(elt) + @test norm(T) > 0 + @test dims(T) == (4, 3) + @test ndims(T) == 2 + @test inds(T) == (MyInd(4), MyInd(3)) + + T2 = 2 * T + @test eltype(T2) == complex(elt) + @test store(T2) isa Dense + @test norm(T2) > 0 + @test norm(T2) / norm(T) ≈ 2 + @test dims(T2) == (4, 3) + @test ndims(T2) == 2 + @test inds(T2) == (MyInd(4), MyInd(3)) + end - @allowscalar for n1 in 1:d1, n2 in 1:d2, n3 in 1:d3 - @test rT[n1, n2, n3] ≈ real(T[n1, n2, n3]) - @test iT[n1, n2, n3] ≈ imag(T[n1, n2, n3]) - @test cT[n1, n2, n3] ≈ conj(T[n1, n2, n3]) - end - end + @testset "generic contraction" begin + # correctness of _gemm! + for alpha in [0.0, 1.0, 2.0] + for beta in [0.0, 1.0, 2.0] + for tA in ['N', 'T'] + for tB in ['N', 'T'] + A = randn(4, 4) + B = randn(4, 4) + C = randn(4, 4) + A = BigFloat.(A) + B = BigFloat.(B) + C2 = BigFloat.(C) + NDTensors._gemm!(tA, tB, alpha, A, B, beta, C) + NDTensors._gemm!(tA, tB, alpha, A, B, beta, C2) + @test C ≈ C2 + end + end + end + end + end - @testset "Custom inds types" begin - T = dev(Tensor(elt, (MyInd(2), MyInd(3), MyInd(4)))) - @test store(T) isa Dense - @test eltype(T) == elt - @test norm(T) == 0 - @test dims(T) == (2, 3, 4) - @test ndims(T) == 3 - @test inds(T) == (MyInd(2), MyInd(3), MyInd(4)) - @allowscalar begin - T[2, 1, 2] = 1.21 - @test T[2, 1, 2] == elt(1.21) - end - @test norm(T) == elt(1.21) - - T = dev(randomTensor(complex(elt), (MyInd(4), MyInd(3)))) - @test store(T) isa Dense - @test eltype(T) == complex(elt) - @test norm(T) > 0 - @test dims(T) == (4, 3) - @test ndims(T) == 2 - @test inds(T) == (MyInd(4), MyInd(3)) - - T2 = 2 * T - @test eltype(T2) == complex(elt) - @test store(T2) isa Dense - @test norm(T2) > 0 - @test norm(T2) / norm(T) ≈ 2 - @test dims(T2) == (4, 3) - @test ndims(T2) == 2 - @test inds(T2) == (MyInd(4), MyInd(3)) - end + @testset "Contraction with size 1 block and NaN" begin + @testset "No permutation" begin + R = dev(Tensor(complex(elt), (2, 2, 1))) + fill!(R, elt(NaN)) + @test @allowscalar any(isnan, R) + T1 = dev(randomTensor(elt, (2, 2, 1))) + T2 = dev(randomTensor(complex(elt), (1, 1))) + NDTensors.contract!(R, (1, 2, 3), T1, (1, 2, -1), T2, (-1, 1)) + @test @allowscalar !any(isnan, R) + @test convert(Array, R) ≈ convert(Array, T1) * T2[] + end - @testset "generic contraction" begin - # correctness of _gemm! - for alpha in [0.0, 1.0, 2.0] - for beta in [0.0, 1.0, 2.0] - for tA in ['N', 'T'] - for tB in ['N', 'T'] - A = randn(4, 4) - B = randn(4, 4) - C = randn(4, 4) - A = BigFloat.(A) - B = BigFloat.(B) - C2 = BigFloat.(C) - NDTensors._gemm!(tA, tB, alpha, A, B, beta, C) - NDTensors._gemm!(tA, tB, alpha, A, B, beta, C2) - @test C ≈ C2 + @testset "Permutation" begin + R = dev(Tensor(complex(elt), (2, 2, 1))) + fill!(R, elt(NaN)) + @test @allowscalar any(isnan, R) + T1 = dev(randomTensor(elt, (2, 2, 1))) + T2 = dev(randomTensor(complex(elt), (1, 1))) + NDTensors.contract!(R, (2, 1, 3), T1, (1, 2, -1), T2, (-1, 1)) + @test @allowscalar !any(isnan, R) + @test convert(Array, R) ≈ permutedims(convert(Array, T1), (2, 1, 3)) * T2[] end - end end - end end - @testset "Contraction with size 1 block and NaN" begin - @testset "No permutation" begin - R = dev(Tensor(complex(elt), (2, 2, 1))) - fill!(R, elt(NaN)) - @test @allowscalar any(isnan, R) - T1 = dev(randomTensor(elt, (2, 2, 1))) - T2 = dev(randomTensor(complex(elt), (1, 1))) - NDTensors.contract!(R, (1, 2, 3), T1, (1, 2, -1), T2, (-1, 1)) - @test @allowscalar !any(isnan, R) - @test convert(Array, R) ≈ convert(Array, T1) * T2[] - end - - @testset "Permutation" begin - R = dev(Tensor(complex(elt), (2, 2, 1))) - fill!(R, elt(NaN)) - @test @allowscalar any(isnan, R) - T1 = dev(randomTensor(elt, (2, 2, 1))) - T2 = dev(randomTensor(complex(elt), (1, 1))) - NDTensors.contract!(R, (2, 1, 3), T1, (1, 2, -1), T2, (-1, 1)) - @test @allowscalar !any(isnan, R) - @test convert(Array, R) ≈ permutedims(convert(Array, T1), (2, 1, 3)) * T2[] - end - end - end - - # Only CPU backend testing - @testset "Contract with exotic types" begin - # BigFloat is not supported on GPU - ## randn(BigFloat, ...) is not defined in Julia 1.6 - a = BigFloat.(randn(Float64, 2, 3)) - t = Tensor(a, (1, 2, 3)) - m = Tensor(a, (2, 3)) - v = Tensor([one(BigFloat)], (1,)) - - @test m ≈ contract(t, (-1, 2, 3), v, (-1,)) - tp = similar(t) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, false) - @test iszero(tp) - - fill!(tp, one(BigFloat)) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, true) - for i in tp - @test i == one(BigFloat) + # Only CPU backend testing + @testset "Contract with exotic types" begin + # BigFloat is not supported on GPU + ## randn(BigFloat, ...) is not defined in Julia 1.6 + a = BigFloat.(randn(Float64, 2, 3)) + t = Tensor(a, (1, 2, 3)) + m = Tensor(a, (2, 3)) + v = Tensor([one(BigFloat)], (1,)) + + @test m ≈ contract(t, (-1, 2, 3), v, (-1,)) + tp = similar(t) + NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, false) + @test iszero(tp) + + fill!(tp, one(BigFloat)) + NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, true) + for i in tp + @test i == one(BigFloat) + end + + rand_factor = BigFloat(randn(Float64)) + NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, rand_factor) + for i in tp + @test i == rand_factor + end end - rand_factor = BigFloat(randn(Float64)) - NDTensors.contract!(tp, (1, 2, 3), t, (1, 2, 3), v, (1,), false, rand_factor) - for i in tp - @test i == rand_factor + @testset "change backends" begin + a, b, c = [randn(5, 5) for i in 1:3] + backend_auto() + @test NDTensors.gemm_backend[] == :Auto + @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == + NDTensors.GemmBackend(:BLAS) + res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) + backend_blas() + @test NDTensors.gemm_backend[] == :BLAS + res2 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) + backend_generic() + @test NDTensors.gemm_backend[] == :Generic + res3 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) + @test res1 == res2 + @test res1 ≈ res3 + backend_auto() end - end - - @testset "change backends" begin - a, b, c = [randn(5, 5) for i in 1:3] - backend_auto() - @test NDTensors.gemm_backend[] == :Auto - @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == - NDTensors.GemmBackend(:BLAS) - res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - backend_blas() - @test NDTensors.gemm_backend[] == :BLAS - res2 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - backend_generic() - @test NDTensors.gemm_backend[] == :Generic - res3 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test res1 == res2 - @test res1 ≈ res3 - backend_auto() - end - - @testset "change backends" begin - a, b, c = [randn(5, 5) for i in 1:3] - backend_auto() - @test NDTensors.gemm_backend[] == :Auto - @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == - NDTensors.GemmBackend(:BLAS) - res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test_throws UndefVarError backend_octavian() - if VERSION >= v"1.5" - # Octavian only support Julia 1.5 - # Need to install it here instead of - # putting it as a dependency in the Project.toml - # since otherwise it fails for older Julia versions. - using Octavian - NDTensors.backend_octavian() - @test NDTensors.gemm_backend[] == :Octavian - res4 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) - @test res1 ≈ res4 - backend_auto() + + @testset "change backends" begin + a, b, c = [randn(5, 5) for i in 1:3] + backend_auto() + @test NDTensors.gemm_backend[] == :Auto + @test NDTensors.auto_select_backend(typeof.((a, b, c))...) == + NDTensors.GemmBackend(:BLAS) + res1 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) + @test_throws UndefVarError backend_octavian() + if VERSION >= v"1.5" + # Octavian only support Julia 1.5 + # Need to install it here instead of + # putting it as a dependency in the Project.toml + # since otherwise it fails for older Julia versions. + using Octavian + NDTensors.backend_octavian() + @test NDTensors.gemm_backend[] == :Octavian + res4 = NDTensors._gemm!('N', 'N', 2.0, a, b, 0.2, copy(c)) + @test res1 ≈ res4 + backend_auto() + end end - end end nothing diff --git a/NDTensors/test/test_diag.jl b/NDTensors/test/test_diag.jl index af5ae4febb..50b922dc6d 100644 --- a/NDTensors/test/test_diag.jl +++ b/NDTensors/test/test_diag.jl @@ -3,126 +3,126 @@ using Adapt: adapt using GPUArraysCore: @allowscalar using LinearAlgebra: diagm, dot, norm using NDTensors: - NDTensors, - Dense, - Diag, - DiagTensor, - Tensor, - array, - contract, - data, - dense, - diaglength, - diagindices, - matrix, - randomTensor, - tensor + NDTensors, + Dense, + Diag, + DiagTensor, + Tensor, + array, + contract, + data, + dense, + diaglength, + diagindices, + matrix, + randomTensor, + tensor using Test: @testset, @test, @test_throws include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list, is_supported_eltype @testset "DiagTensor basic functionality" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)), - elt in (Float32, ComplexF32, Float64, ComplexF64) + @testset "test device: $dev" for dev in devices_list(copy(ARGS)), + elt in (Float32, ComplexF32, Float64, ComplexF64) - if !is_supported_eltype(dev, elt) - # Metal doesn't support double precision - continue - end - t = dev(tensor(Diag(rand(elt, 100)), (100, 100))) - @test conj(data(t)) == data(conj(t)) - @test typeof(conj(t)) <: DiagTensor + if !is_supported_eltype(dev, elt) + # Metal doesn't support double precision + continue + end + t = dev(tensor(Diag(rand(elt, 100)), (100, 100))) + @test conj(data(t)) == data(conj(t)) + @test typeof(conj(t)) <: DiagTensor - d = rand(real(elt), 10) - D = dev(Diag{elt}(d)) - @test eltype(D) == elt - @test @allowscalar dev(Array(dense(D))) == convert.(elt, d) - simD = similar(D) - @test length(simD) == length(D) - @test eltype(simD) == eltype(D) - D = dev(Diag(one(elt))) - @test eltype(D) == elt - @test complex(D) == Diag(one(complex(elt))) - @test similar(D) == Diag(0.0) + d = rand(real(elt), 10) + D = dev(Diag{elt}(d)) + @test eltype(D) == elt + @test @allowscalar dev(Array(dense(D))) == convert.(elt, d) + simD = similar(D) + @test length(simD) == length(D) + @test eltype(simD) == eltype(D) + D = dev(Diag(one(elt))) + @test eltype(D) == elt + @test complex(D) == Diag(one(complex(elt))) + @test similar(D) == Diag(0.0) - D = Tensor(Diag(1), (2, 2)) - @test norm(D) == √2 - d = 3 - ## TODO this fails because uniform diag tensors are immutable - #S = NDTensors.map_diag((i->i * 2), dev(D)) - # @allowscalar for i in 1:diaglength(S) - # @test S[i,i] == 2.0 * D[i,i] - # end + D = Tensor(Diag(1), (2, 2)) + @test norm(D) == √2 + d = 3 + ## TODO this fails because uniform diag tensors are immutable + #S = NDTensors.map_diag((i->i * 2), dev(D)) + # @allowscalar for i in 1:diaglength(S) + # @test S[i,i] == 2.0 * D[i,i] + # end - vr = rand(elt, d) - D = dev(tensor(Diag(vr), (d, d))) - Da = array(D) - Dm = matrix(D) - Dp = permutedims(D, (2, 1)) - for x in (Da, Dm, Dp) - @test x == dev(diagm(0 => vr)) - @test x == dev(diagm(0 => vr)) - @test x == D - end - @test sqrt(contract(D, (-1, -2), conj(D), (-1, -2))[]) ≈ norm(D) - # This if statement corresponds to the reported bug: - # https://github.com/JuliaGPU/Metal.jl/issues/364 - if !(dev == NDTensors.mtl && elt === ComplexF32) - S = permutedims(dev(D), (1, 2), sqrt) - @allowscalar begin - for i in 1:diaglength(S) - @test S[i, i] ≈ sqrt(D[i, i]) + vr = rand(elt, d) + D = dev(tensor(Diag(vr), (d, d))) + Da = array(D) + Dm = matrix(D) + Dp = permutedims(D, (2, 1)) + for x in (Da, Dm, Dp) + @test x == dev(diagm(0 => vr)) + @test x == dev(diagm(0 => vr)) + @test x == D + end + @test sqrt(contract(D, (-1, -2), conj(D), (-1, -2))[]) ≈ norm(D) + # This if statement corresponds to the reported bug: + # https://github.com/JuliaGPU/Metal.jl/issues/364 + if !(dev == NDTensors.mtl && elt === ComplexF32) + S = permutedims(dev(D), (1, 2), sqrt) + @allowscalar begin + for i in 1:diaglength(S) + @test S[i, i] ≈ sqrt(D[i, i]) + end + end + end + S = NDTensors.map_diag(i -> 2 * i, dev(D)) + @allowscalar for i in 1:diaglength(S) + @test S[i, i] == 2 * D[i, i] end - end - end - S = NDTensors.map_diag(i -> 2 * i, dev(D)) - @allowscalar for i in 1:diaglength(S) - @test S[i, i] == 2 * D[i, i] - end - a = dev(tensor(Dense(randn(elt, 3)), (3,))) - @test diagindices(a) == 1:1:3 - a = dev(tensor(Dense(randn(elt, 9)), (3, 3))) - @test diagindices(a) == 1:4:9 - a = dev(tensor(Dense(randn(elt, 36)), (3, 4, 3))) - @test diagindices(a) == 1:16:33 - a = dev(tensor(Dense(randn(elt, 0)), (3, 0))) - @test diagindices(a) == 1:1:0 + a = dev(tensor(Dense(randn(elt, 3)), (3,))) + @test diagindices(a) == 1:1:3 + a = dev(tensor(Dense(randn(elt, 9)), (3, 3))) + @test diagindices(a) == 1:4:9 + a = dev(tensor(Dense(randn(elt, 36)), (3, 4, 3))) + @test diagindices(a) == 1:16:33 + a = dev(tensor(Dense(randn(elt, 0)), (3, 0))) + @test diagindices(a) == 1:1:0 - # Regression test for https://github.com/ITensor/ITensors.jl/issues/1199 - S = dev(tensor(Diag(randn(elt, 2)), (2, 2))) - ## This was creating a `Dense{ReshapedArray{Adjoint{Matrix}}}` which, in mul!, was - ## becoming a Transpose{ReshapedArray{Adjoint{Matrix}}} which was causing issues on - ## dispatching GPU mul! - V = dev(tensor(Dense(randn(elt, 12, 2)'), (3, 4, 2))) - S1 = contract(S, (2, -1), V, (3, 4, -1)) - S2 = contract(dense(S), (2, -1), copy(V), (3, 4, -1)) - @test @allowscalar S1 ≈ S2 - end + # Regression test for https://github.com/ITensor/ITensors.jl/issues/1199 + S = dev(tensor(Diag(randn(elt, 2)), (2, 2))) + ## This was creating a `Dense{ReshapedArray{Adjoint{Matrix}}}` which, in mul!, was + ## becoming a Transpose{ReshapedArray{Adjoint{Matrix}}} which was causing issues on + ## dispatching GPU mul! + V = dev(tensor(Dense(randn(elt, 12, 2)'), (3, 4, 2))) + S1 = contract(S, (2, -1), V, (3, 4, -1)) + S2 = contract(dense(S), (2, -1), copy(V), (3, 4, -1)) + @test @allowscalar S1 ≈ S2 + end end @testset "DiagTensor contractions" for dev in devices_list(copy(ARGS)) - ## TODO add more GPU tests - elt = (dev == NDTensors.mtl ? Float32 : Float64) - t = dev(tensor(Diag(elt[1.0, 1.0, 1.0]), (3, 3))) - A = dev(randomTensor(Dense{elt}, (3, 3))) + ## TODO add more GPU tests + elt = (dev == NDTensors.mtl ? Float32 : Float64) + t = dev(tensor(Diag(elt[1.0, 1.0, 1.0]), (3, 3))) + A = dev(randomTensor(Dense{elt}, (3, 3))) - @test sum(t) ≈ sum(array(t)) - @test sum(A) ≈ sum(array(A)) - @test prod(t) ≈ prod(array(t)) - @test prod(A) ≈ prod(array(A)) + @test sum(t) ≈ sum(array(t)) + @test sum(A) ≈ sum(array(A)) + @test prod(t) ≈ prod(array(t)) + @test prod(A) ≈ prod(array(A)) - @test contract(t, (1, -2), t, (-2, 3)) == t - @test contract(A, (1, -2), t, (-2, 3)) == A - @test contract(A, (-2, 1), t, (-2, 3)) == transpose(A) + @test contract(t, (1, -2), t, (-2, 3)) == t + @test contract(A, (1, -2), t, (-2, 3)) == A + @test contract(A, (-2, 1), t, (-2, 3)) == transpose(A) - ## Testing sparse contractions on GPU - t = dev(tensor(Diag(one(elt)), (3, 3))) - @test contract(t, (-1, -2), A, (-1, -2))[] ≈ dot(dev(array(t)), array(A)) rtol = sqrt( - eps(elt) - ) + ## Testing sparse contractions on GPU + t = dev(tensor(Diag(one(elt)), (3, 3))) + @test contract(t, (-1, -2), A, (-1, -2))[] ≈ dot(dev(array(t)), array(A)) rtol = sqrt( + eps(elt) + ) - ## Test dot on GPU - @test dot(t, A) ≈ dot(dev(array(t)), array(A)) rtol = sqrt(eps(elt)) + ## Test dot on GPU + @test dot(t, A) ≈ dot(dev(array(t)), array(A)) rtol = sqrt(eps(elt)) end nothing end diff --git a/NDTensors/test/test_diagblocksparse.jl b/NDTensors/test/test_diagblocksparse.jl index 537ef0cefc..d20bca6b2e 100644 --- a/NDTensors/test/test_diagblocksparse.jl +++ b/NDTensors/test/test_diagblocksparse.jl @@ -3,121 +3,121 @@ using Dictionaries: Dictionary using GPUArraysCore: @allowscalar using LinearAlgebra: norm using NDTensors: - NDTensors, - Block, - BlockSparseTensor, - Diag, - DiagBlockSparse, - Tensor, - blockoffsets, - contract, - dense, - denseblocks, - inds, - nzblocks + NDTensors, + Block, + BlockSparseTensor, + Diag, + DiagBlockSparse, + Tensor, + blockoffsets, + contract, + dense, + denseblocks, + inds, + nzblocks using Random: randn! using Test: @test, @test_broken, @test_throws, @testset @testset "UniformDiagBlockSparseTensor basic functionality" begin - NeverAlias = NDTensors.NeverAlias - AllowAlias = NDTensors.AllowAlias + NeverAlias = NDTensors.NeverAlias + AllowAlias = NDTensors.AllowAlias - storage = DiagBlockSparse(1.0, Dictionary([Block(1, 1), Block(2, 2)], [0, 1])) - tensor = Tensor(storage, ([1, 1], [1, 1])) + storage = DiagBlockSparse(1.0, Dictionary([Block(1, 1), Block(2, 2)], [0, 1])) + tensor = Tensor(storage, ([1, 1], [1, 1])) - @test conj(tensor) == tensor - @test conj(NeverAlias(), tensor) == tensor - @test conj(AllowAlias(), tensor) == tensor + @test conj(tensor) == tensor + @test conj(NeverAlias(), tensor) == tensor + @test conj(AllowAlias(), tensor) == tensor - c = 1 + 2im - tensor *= c + c = 1 + 2im + tensor *= c - @test tensor[1, 1] == c - @test conj(tensor) ≠ tensor - @test conj(NeverAlias(), tensor) ≠ tensor - @test conj(AllowAlias(), tensor) ≠ tensor - @test conj(tensor)[1, 1] == conj(c) - @test conj(NeverAlias(), tensor)[1, 1] == conj(c) - @test conj(AllowAlias(), tensor)[1, 1] == conj(c) + @test tensor[1, 1] == c + @test conj(tensor) ≠ tensor + @test conj(NeverAlias(), tensor) ≠ tensor + @test conj(AllowAlias(), tensor) ≠ tensor + @test conj(tensor)[1, 1] == conj(c) + @test conj(NeverAlias(), tensor)[1, 1] == conj(c) + @test conj(AllowAlias(), tensor)[1, 1] == conj(c) end @testset "DiagBlockSparse off-diagonal (eltype=$elt)" for elt in ( - Float32, Float64, Complex{Float32}, Complex{Float64} -) - inds1 = ([1, 1], [1, 1]) - inds2 = ([1, 1], [1, 1]) - blocks = [(1, 2), (2, 1)] - a1 = BlockSparseTensor{elt}(blocks, inds1...) - for b in nzblocks(a1) - randn!(a1[b]) - end - a2 = Tensor(DiagBlockSparse(one(elt), blockoffsets(a1)), inds2) - for (labels1, labels2) in (((1, -1), (-1, 2)), ((-1, -2), (-1, -2))) - @test_throws ErrorException contract(a1, labels1, a2, labels2) - end + Float32, Float64, Complex{Float32}, Complex{Float64}, + ) + inds1 = ([1, 1], [1, 1]) + inds2 = ([1, 1], [1, 1]) + blocks = [(1, 2), (2, 1)] + a1 = BlockSparseTensor{elt}(blocks, inds1...) + for b in nzblocks(a1) + randn!(a1[b]) + end + a2 = Tensor(DiagBlockSparse(one(elt), blockoffsets(a1)), inds2) + for (labels1, labels2) in (((1, -1), (-1, 2)), ((-1, -2), (-1, -2))) + @test_throws ErrorException contract(a1, labels1, a2, labels2) + end end include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list @testset "DiagBlockSparse contract" for dev in devices_list(copy(ARGS)) - elt = dev == NDTensors.mtl ? Float32 : Float64 - A = dev(BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 2], [2, 2])) - randn!(A) - t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) - tdense = Tensor(Diag(one(elt)), inds(A)) + elt = dev == NDTensors.mtl ? Float32 : Float64 + A = dev(BlockSparseTensor{elt}([(1, 1), (2, 2)], [2, 2], [2, 2])) + randn!(A) + t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) + tdense = Tensor(Diag(one(elt)), inds(A)) - a = dense(contract(A, (1, -2), t, (3, -2))) - b = contract(dense(A), (1, -2), tdense, (3, -2)) - @test @allowscalar a ≈ b + a = dense(contract(A, (1, -2), t, (3, -2))) + b = contract(dense(A), (1, -2), tdense, (3, -2)) + @test @allowscalar a ≈ b - a = dense(contract(A, (-2, 1), t, (-2, 3))) - b = contract(dense(A), (-2, 1), tdense, (-2, 3)) - @test @allowscalar a ≈ b + a = dense(contract(A, (-2, 1), t, (-2, 3))) + b = contract(dense(A), (-2, 1), tdense, (-2, 3)) + @test @allowscalar a ≈ b - a = contract(A, (-1, -2), t, (-1, -2))[] - b = contract(dense(A), (-1, -2), tdense, (-1, -2))[] - @test @allowscalar a ≈ b + a = contract(A, (-1, -2), t, (-1, -2))[] + b = contract(dense(A), (-1, -2), tdense, (-1, -2))[] + @test @allowscalar a ≈ b - ## TODO fix these kinds of contractions - A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [3, 2, 3], [2, 2]) - randn!(A) - t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) - @test dense(contract(A, (1, -2), (t), (3, -2))) ≈ - contract(dense(A), (1, -2), dense(t), (3, -2)) - @test dense(contract(A, (-2, 1), t, (-2, 3))) ≈ - contract(dense(A), (-2, 1), dense(t), (-2, 3)) - @test contract(dev(A), (-1, -2), dev(t), (-1, -2))[] ≈ - contract(dense(A), (-1, -2), dense(t), (-1, -2))[] + ## TODO fix these kinds of contractions + A = BlockSparseTensor{elt}([(1, 1), (2, 2)], [3, 2, 3], [2, 2]) + randn!(A) + t = Tensor(DiagBlockSparse(one(elt), blockoffsets(A)), inds(A)) + @test dense(contract(A, (1, -2), (t), (3, -2))) ≈ + contract(dense(A), (1, -2), dense(t), (3, -2)) + @test dense(contract(A, (-2, 1), t, (-2, 3))) ≈ + contract(dense(A), (-2, 1), dense(t), (-2, 3)) + @test contract(dev(A), (-1, -2), dev(t), (-1, -2))[] ≈ + contract(dense(A), (-1, -2), dense(t), (-1, -2))[] end @testset "UniformDiagBlockSparse norm" begin - elt = Float64 - storage = DiagBlockSparse(one(elt), Dictionary([Block(1, 1), Block(2, 2)], [0, 2])) - tensor = Tensor(storage, ([2, 2], [2, 2])) - @test norm(tensor) ≈ norm(dense(tensor)) + elt = Float64 + storage = DiagBlockSparse(one(elt), Dictionary([Block(1, 1), Block(2, 2)], [0, 2])) + tensor = Tensor(storage, ([2, 2], [2, 2])) + @test norm(tensor) ≈ norm(dense(tensor)) - elt = Float64 - storage = DiagBlockSparse(one(elt), Dictionary([Block(1, 1)], [0])) - tensor = Tensor(storage, ([2], [1, 1])) - @test norm(tensor) ≈ norm(dense(tensor)) + elt = Float64 + storage = DiagBlockSparse(one(elt), Dictionary([Block(1, 1)], [0])) + tensor = Tensor(storage, ([2], [1, 1])) + @test norm(tensor) ≈ norm(dense(tensor)) end @testset "DiagBlockSparse denseblocks" begin - elt = Float64 - blockoffsets_a = Dictionary([Block(1, 1), Block(2, 2)], [0, 2]) - inds_a = ([2, 2], [2, 2]) - a = Tensor(DiagBlockSparse(elt, blockoffsets_a, 4), inds_a) - a[Block(1, 1)][1, 1] = 1 - a[Block(1, 1)][2, 2] = 2 - a[Block(2, 2)][1, 1] = 3 - a[Block(2, 2)][2, 2] = 4 - a′ = denseblocks(a) - @test dense(a) == dense(a′) + elt = Float64 + blockoffsets_a = Dictionary([Block(1, 1), Block(2, 2)], [0, 2]) + inds_a = ([2, 2], [2, 2]) + a = Tensor(DiagBlockSparse(elt, blockoffsets_a, 4), inds_a) + a[Block(1, 1)][1, 1] = 1 + a[Block(1, 1)][2, 2] = 2 + a[Block(2, 2)][1, 1] = 3 + a[Block(2, 2)][2, 2] = 4 + a′ = denseblocks(a) + @test dense(a) == dense(a′) - elt = Float64 - blockoffsets_a = Dictionary([Block(1, 1)], [0]) - inds_a = ([2], [1, 1]) - a = Tensor(DiagBlockSparse(one(elt), blockoffsets_a), inds_a) - a′ = denseblocks(a) - @test dense(a) == dense(a′) + elt = Float64 + blockoffsets_a = Dictionary([Block(1, 1)], [0]) + inds_a = ([2], [1, 1]) + a = Tensor(DiagBlockSparse(one(elt), blockoffsets_a), inds_a) + a′ = denseblocks(a) + @test dense(a) == dense(a′) end end diff --git a/NDTensors/test/test_emptynumber.jl b/NDTensors/test/test_emptynumber.jl index 73d82117f5..f02357bfcb 100644 --- a/NDTensors/test/test_emptynumber.jl +++ b/NDTensors/test/test_emptynumber.jl @@ -6,34 +6,34 @@ using Test: @testset, @test, @test_throws const 𝟎 = EmptyNumber() @testset "NDTensors.EmptyNumber" begin - x = 2.3 + x = 2.3 - @test complex(𝟎) == 𝟎 - @test complex(EmptyNumber) == Complex{EmptyNumber} + @test complex(𝟎) == 𝟎 + @test complex(EmptyNumber) == Complex{EmptyNumber} - # Promotion - for T in (Bool, Float32, Float64, Complex{Float32}, Complex{Float64}) - @test promote_type(EmptyNumber, T) === T - @test promote_type(T, EmptyNumber) === T - end + # Promotion + for T in (Bool, Float32, Float64, Complex{Float32}, Complex{Float64}) + @test promote_type(EmptyNumber, T) === T + @test promote_type(T, EmptyNumber) === T + end - # Basic arithmetic - @test 𝟎 + 𝟎 == 𝟎 - @test 𝟎 + x == x - @test x + 𝟎 == x - @test -𝟎 == 𝟎 - @test 𝟎 - 𝟎 == 𝟎 - @test x - 𝟎 == x - @test 𝟎 * 𝟎 == 𝟎 - @test x * 𝟎 == 𝟎 - @test 𝟎 * x == 𝟎 - @test 𝟎 / x == 𝟎 - @test_throws DivideError() x / 𝟎 == 𝟎 - @test_throws DivideError() 𝟎 / 𝟎 == 𝟎 + # Basic arithmetic + @test 𝟎 + 𝟎 == 𝟎 + @test 𝟎 + x == x + @test x + 𝟎 == x + @test -𝟎 == 𝟎 + @test 𝟎 - 𝟎 == 𝟎 + @test x - 𝟎 == x + @test 𝟎 * 𝟎 == 𝟎 + @test x * 𝟎 == 𝟎 + @test 𝟎 * x == 𝟎 + @test 𝟎 / x == 𝟎 + @test_throws DivideError() x / 𝟎 == 𝟎 + @test_throws DivideError() 𝟎 / 𝟎 == 𝟎 - @test float(𝟎) == 0.0 - @test float(𝟎) isa Float64 - @test norm(𝟎) == 0.0 - @test norm(𝟎) isa Float64 + @test float(𝟎) == 0.0 + @test float(𝟎) isa Float64 + @test norm(𝟎) == 0.0 + @test norm(𝟎) isa Float64 end end diff --git a/NDTensors/test/test_emptystorage.jl b/NDTensors/test/test_emptystorage.jl index 1f82ae2a57..d1121c20ed 100644 --- a/NDTensors/test/test_emptystorage.jl +++ b/NDTensors/test/test_emptystorage.jl @@ -5,31 +5,31 @@ include("NDTensorsTestUtils/NDTensorsTestUtils.jl") using .NDTensorsTestUtils: devices_list @testset "EmptyStorage test" begin - @testset "test device: $dev" for dev in devices_list(copy(ARGS)) - T = dev(Tensor(EmptyStorage(NDTensors.EmptyNumber), (2, 2))) - @test size(T) == (2, 2) - @test eltype(T) == NDTensors.EmptyNumber - @test T[1, 1] == NDTensors.EmptyNumber() - @test T[1, 2] == NDTensors.EmptyNumber() - # TODO: This should fail with an out of bounds error! - #@test T[1, 3] == NDTensors.EmptyNumber() + @testset "test device: $dev" for dev in devices_list(copy(ARGS)) + T = dev(Tensor(EmptyStorage(NDTensors.EmptyNumber), (2, 2))) + @test size(T) == (2, 2) + @test eltype(T) == NDTensors.EmptyNumber + @test T[1, 1] == NDTensors.EmptyNumber() + @test T[1, 2] == NDTensors.EmptyNumber() + # TODO: This should fail with an out of bounds error! + #@test T[1, 3] == NDTensors.EmptyNumber() - Tc = complex(T) - @test size(Tc) == (2, 2) - @test eltype(Tc) == Complex{NDTensors.EmptyNumber} - @test Tc[1, 1] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) - @test Tc[1, 2] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) + Tc = complex(T) + @test size(Tc) == (2, 2) + @test eltype(Tc) == Complex{NDTensors.EmptyNumber} + @test Tc[1, 1] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) + @test Tc[1, 2] == Complex(NDTensors.EmptyNumber(), NDTensors.EmptyNumber()) - T = dev(EmptyTensor(Float64, (2, 2))) - @test blockoffsets(T) == BlockOffsets{2}() - T = dev(EmptyBlockSparseTensor(Float64, ([1, 1], [1, 1]))) - @test blockoffsets(T) == BlockOffsets{2}() + T = dev(EmptyTensor(Float64, (2, 2))) + @test blockoffsets(T) == BlockOffsets{2}() + T = dev(EmptyBlockSparseTensor(Float64, ([1, 1], [1, 1]))) + @test blockoffsets(T) == BlockOffsets{2}() - T = dev(EmptyStorage(NDTensors.EmptyNumber)) - @test zero(T) isa typeof(T) + T = dev(EmptyStorage(NDTensors.EmptyNumber)) + @test zero(T) isa typeof(T) - T = dev(EmptyTensor(NDTensors.EmptyNumber, (2, 2))) - @test zero(T) isa typeof(T) - end + T = dev(EmptyTensor(NDTensors.EmptyNumber, (2, 2))) + @test zero(T) isa typeof(T) + end end end diff --git a/NDTensors/test/test_tupletools.jl b/NDTensors/test/test_tupletools.jl index 959a127d5f..94df98079c 100644 --- a/NDTensors/test/test_tupletools.jl +++ b/NDTensors/test/test_tupletools.jl @@ -3,32 +3,32 @@ using Test: @testset, @test using NDTensors: NDTensors @testset "Test non-exported tuple tools" begin - @test NDTensors.diff((1, 3, 6, 4)) == (2, 3, -2) - @test NDTensors.diff((1, 2, 3)) == (1, 1) + @test NDTensors.diff((1, 3, 6, 4)) == (2, 3, -2) + @test NDTensors.diff((1, 2, 3)) == (1, 1) end @testset "Test deleteat" begin - t = (1, 2, 3, 4) - t = NDTensors.deleteat(t, 2) - @test t == (1, 3, 4) + t = (1, 2, 3, 4) + t = NDTensors.deleteat(t, 2) + @test t == (1, 3, 4) - # deleteat with mixed-type Tuple - t = ('a', 2, 'c', 4) - t = NDTensors.deleteat(t, 2) - @test t == ('a', 'c', 4) - t = NDTensors.deleteat(t, 2) - @test t == ('a', 4) + # deleteat with mixed-type Tuple + t = ('a', 2, 'c', 4) + t = NDTensors.deleteat(t, 2) + @test t == ('a', 'c', 4) + t = NDTensors.deleteat(t, 2) + @test t == ('a', 4) end @testset "Test insertat" begin - t = (1, 2) - t = NDTensors.insertat(t, (3, 4), 2) - @test t == (1, 3, 4) + t = (1, 2) + t = NDTensors.insertat(t, (3, 4), 2) + @test t == (1, 3, 4) - # insertat with mixed-type Tuple - t = (1, 'b') - t = NDTensors.insertat(t, ('c'), 2) - @test t == (1, 'c') + # insertat with mixed-type Tuple + t = (1, 'b') + t = NDTensors.insertat(t, ('c'), 2) + @test t == (1, 'c') end end diff --git a/Project.toml b/Project.toml index aafd60bd6d..6367f22959 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "ITensors" uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5" authors = ["Matthew Fishman ", "Miles Stoudenmire "] -version = "0.9.12" +version = "0.9.13" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" diff --git a/docs/make.jl b/docs/make.jl index a9121b55d9..3fbbf01518 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -1,6 +1,6 @@ include("settings.jl") -makedocs(; sitename=sitename, settings...) +makedocs(; sitename = sitename, settings...) # If ENV["GITHUB_EVENT_NAME"] == "workflow_dispatch" # it indicates the Documenter build was launched manually, @@ -8,12 +8,12 @@ makedocs(; sitename=sitename, settings...) # As of Dec 2022, Documenter does not build the dev branch # in this case, so change the value to "push" to fix: if get(ENV, "GITHUB_EVENT_NAME", nothing) == "workflow_dispatch" - ENV["GITHUB_EVENT_NAME"] = "push" + ENV["GITHUB_EVENT_NAME"] = "push" end deploydocs(; - repo="github.com/ITensor/ITensors.jl.git", - devbranch="main", - push_preview=true, - deploy_config=Documenter.GitHubActions(), + repo = "github.com/ITensor/ITensors.jl.git", + devbranch = "main", + push_preview = true, + deploy_config = Documenter.GitHubActions(), ) diff --git a/docs/make_local_notest.jl b/docs/make_local_notest.jl index 85b8b4d451..14c8ffccde 100644 --- a/docs/make_local_notest.jl +++ b/docs/make_local_notest.jl @@ -2,4 +2,4 @@ include("settings.jl") settings[:doctest] = false -makedocs(; sitename=sitename, settings...) +makedocs(; sitename = sitename, settings...) diff --git a/docs/make_local_test.jl b/docs/make_local_test.jl index 4dcaf08680..7d13024135 100644 --- a/docs/make_local_test.jl +++ b/docs/make_local_test.jl @@ -1,3 +1,3 @@ include("settings.jl") -makedocs(; sitename=sitename, settings...) +makedocs(; sitename = sitename, settings...) diff --git a/examples/basic_ops/basic_ops.jl b/examples/basic_ops/basic_ops.jl index ff11b59579..17749654bc 100644 --- a/examples/basic_ops/basic_ops.jl +++ b/examples/basic_ops/basic_ops.jl @@ -41,23 +41,23 @@ println("S = Y - X =\n", T, "\n") # Check that adding incompatible tensors cause an error try - U = Z + X + U = Z + X catch - println("Cannot add Z and X") + println("Cannot add Z and X") end # Compare calculations to Julia arrays jZ = [ - 1.0 0.0 - 0.0 -1.0 + 1.0 0.0 + 0.0 -1.0 ] jX = [ - 0.0 1.0 - 1.0 0.0 + 0.0 1.0 + 1.0 0.0 ] jY = [ - 1.0 0.0 - 0.0 1.0 + 1.0 0.0 + 0.0 1.0 ] @assert Array(R, a, c) == jZ * jX @assert Array(S, b, c) == jY + jX diff --git a/examples/src/ctmrg_isotropic.jl b/examples/src/ctmrg_isotropic.jl index ccf43af56a..a4bbdc81a8 100644 --- a/examples/src/ctmrg_isotropic.jl +++ b/examples/src/ctmrg_isotropic.jl @@ -1,49 +1,49 @@ using ITensors -function ctmrg(T::ITensor, Cₗᵤ::ITensor, Aₗ::ITensor; χmax::Int, cutoff=0.0, nsteps::Int) - sₕ = commonind(T, Aₗ) - sᵥ = uniqueind(T, Aₗ, Aₗ'; plev=0) - lᵥ = commonind(Cₗᵤ, Aₗ) - lₕ = uniqueind(Cₗᵤ, Aₗ) - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - Cₗᵤ = dense(Cₗᵤ) - for i in 1:nsteps - ## Get the grown corner transfer matrix (CTM) - Cₗᵤ⁽¹⁾ = Aₗ * Cₗᵤ * Aᵤ * T - - ## Diagonalize the grown CTM - # TODO: replace with - # eigen(Cₗᵤ⁽¹⁾, "horiz" => "vert"; tags = "horiz" => "vert", kwargs...) - Cₗᵤ, Uᵥ = eigen( - Cₗᵤ⁽¹⁾, - (lₕ', sₕ'), - (lᵥ', sᵥ'); - ishermitian=true, - cutoff, - maxdim=χmax, - lefttags=tags(lₕ), - righttags=tags(lᵥ), - ) +function ctmrg(T::ITensor, Cₗᵤ::ITensor, Aₗ::ITensor; χmax::Int, cutoff = 0.0, nsteps::Int) + sₕ = commonind(T, Aₗ) + sᵥ = uniqueind(T, Aₗ, Aₗ'; plev = 0) + lᵥ = commonind(Cₗᵤ, Aₗ) + lₕ = uniqueind(Cₗᵤ, Aₗ) + Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) Cₗᵤ = dense(Cₗᵤ) - lᵥ = commonind(Cₗᵤ, Uᵥ) - lₕ = uniqueind(Cₗᵤ, Uᵥ) + for i in 1:nsteps + ## Get the grown corner transfer matrix (CTM) + Cₗᵤ⁽¹⁾ = Aₗ * Cₗᵤ * Aᵤ * T - # The renormalized CTM is the diagonal matrix of eigenvalues - # Normalize the CTM - Cₗ = Cₗᵤ * prime(dag(Cₗᵤ), lₕ) - normC = (Cₗ * dag(Cₗ))[]^(1 / 4) - Cₗᵤ = Cₗᵤ / normC + ## Diagonalize the grown CTM + # TODO: replace with + # eigen(Cₗᵤ⁽¹⁾, "horiz" => "vert"; tags = "horiz" => "vert", kwargs...) + Cₗᵤ, Uᵥ = eigen( + Cₗᵤ⁽¹⁾, + (lₕ', sₕ'), + (lᵥ', sᵥ'); + ishermitian = true, + cutoff, + maxdim = χmax, + lefttags = tags(lₕ), + righttags = tags(lᵥ), + ) + Cₗᵤ = dense(Cₗᵤ) + lᵥ = commonind(Cₗᵤ, Uᵥ) + lₕ = uniqueind(Cₗᵤ, Uᵥ) - # Calculate the renormalized half row transfer matrix (HRTM) - Uᵥ = noprime(Uᵥ) - Aₗ = Aₗ * Uᵥ * T * dag(Uᵥ') - Aₗ = replaceinds(Aₗ, sₕ' => sₕ) + # The renormalized CTM is the diagonal matrix of eigenvalues + # Normalize the CTM + Cₗ = Cₗᵤ * prime(dag(Cₗᵤ), lₕ) + normC = (Cₗ * dag(Cₗ))[]^(1 / 4) + Cₗᵤ = Cₗᵤ / normC - # Normalize the HRTM - ACₗ = Aₗ * Cₗᵤ * prime(dag(Cₗᵤ)) - normA = √((ACₗ * dag(ACₗ))[]) - Aₗ = Aₗ / normA - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - end - return Cₗᵤ, Aₗ + # Calculate the renormalized half row transfer matrix (HRTM) + Uᵥ = noprime(Uᵥ) + Aₗ = Aₗ * Uᵥ * T * dag(Uᵥ') + Aₗ = replaceinds(Aₗ, sₕ' => sₕ) + + # Normalize the HRTM + ACₗ = Aₗ * Cₗᵤ * prime(dag(Cₗᵤ)) + normA = √((ACₗ * dag(ACₗ))[]) + Aₗ = Aₗ / normA + Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) + end + return Cₗᵤ, Aₗ end diff --git a/examples/src/trg.jl b/examples/src/trg.jl index 787bc0625c..803a7d192d 100644 --- a/examples/src/trg.jl +++ b/examples/src/trg.jl @@ -14,38 +14,38 @@ nsteps are the number of renormalization steps performed. The outputs are κ, the partition function per site, and the final renormalized ITensor T. """ -function trg(T::ITensor; χmax::Int, nsteps::Int, cutoff=0.0, svd_alg="divide_and_conquer") - sₕ, sᵥ = filterinds(T; plev=0) - @assert hassameinds((sₕ, sₕ', sᵥ, sᵥ'), T) - - # Keep track of the partition function per site - κ = 1.0 - for n in 1:nsteps - Fₕ, Fₕ′ = factorize( - T, (sₕ', sᵥ'); ortho="none", maxdim=χmax, cutoff, tags=tags(sₕ), svd_alg - ) - - s̃ₕ = commonind(Fₕ, Fₕ′) - Fₕ′ *= δ(dag(s̃ₕ), s̃ₕ') - - Fᵥ, Fᵥ′ = factorize( - T, (sₕ, sᵥ'); ortho="none", maxdim=χmax, cutoff, tags=tags(sᵥ), svd_alg - ) - - s̃ᵥ = commonind(Fᵥ, Fᵥ′) - Fᵥ′ *= δ(dag(s̃ᵥ), s̃ᵥ') - - T = - (Fₕ * δ(dag(sₕ'), sₕ)) * - (Fᵥ * δ(dag(sᵥ'), sᵥ)) * - (Fₕ′ * δ(dag(sₕ), sₕ')) * - (Fᵥ′ * δ(dag(sᵥ), sᵥ')) - - sₕ, sᵥ = s̃ₕ, s̃ᵥ - - trT = abs((T * δ(sₕ, sₕ') * δ(sᵥ, sᵥ'))[]) - T = T / trT - κ *= trT^(1 / 2^n) - end - return κ, T +function trg(T::ITensor; χmax::Int, nsteps::Int, cutoff = 0.0, svd_alg = "divide_and_conquer") + sₕ, sᵥ = filterinds(T; plev = 0) + @assert hassameinds((sₕ, sₕ', sᵥ, sᵥ'), T) + + # Keep track of the partition function per site + κ = 1.0 + for n in 1:nsteps + Fₕ, Fₕ′ = factorize( + T, (sₕ', sᵥ'); ortho = "none", maxdim = χmax, cutoff, tags = tags(sₕ), svd_alg + ) + + s̃ₕ = commonind(Fₕ, Fₕ′) + Fₕ′ *= δ(dag(s̃ₕ), s̃ₕ') + + Fᵥ, Fᵥ′ = factorize( + T, (sₕ, sᵥ'); ortho = "none", maxdim = χmax, cutoff, tags = tags(sᵥ), svd_alg + ) + + s̃ᵥ = commonind(Fᵥ, Fᵥ′) + Fᵥ′ *= δ(dag(s̃ᵥ), s̃ᵥ') + + T = + (Fₕ * δ(dag(sₕ'), sₕ)) * + (Fᵥ * δ(dag(sᵥ'), sᵥ)) * + (Fₕ′ * δ(dag(sₕ), sₕ')) * + (Fᵥ′ * δ(dag(sᵥ), sᵥ')) + + sₕ, sᵥ = s̃ₕ, s̃ᵥ + + trT = abs((T * δ(sₕ, sₕ') * δ(sᵥ, sᵥ'))[]) + T = T / trT + κ *= trT^(1 / 2^n) + end + return κ, T end diff --git a/examples/trg/run.jl b/examples/trg/run.jl index fe0b92fdc1..3411bcec3e 100644 --- a/examples/trg/run.jl +++ b/examples/trg/run.jl @@ -14,7 +14,7 @@ T = ising_mpo(sₕ, sᵥ, β) χmax = 20 nsteps = 20 -κ, T = trg(T; χmax=χmax, nsteps=nsteps, svd_alg="divide_and_conquer") +κ, T = trg(T; χmax = χmax, nsteps = nsteps, svd_alg = "divide_and_conquer") κ_exact = exp(-β * ising_free_energy(β)) @show κ, κ_exact diff --git a/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl b/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl index 6b3011af54..44e7b0a24f 100644 --- a/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl +++ b/ext/ITensorsChainRulesCoreExt/LazyApply/LazyApply.jl @@ -1,16 +1,16 @@ function rrule(::Type{Applied}, x1, x2::Tuple, x3::NamedTuple) - y = Applied(x1, x2, x3) - function Applied_pullback(ȳ) - x̄1 = ȳ.f - x̄2 = ȳ.args - x̄3 = ȳ.kwargs - return (NoTangent(), x̄1, x̄2, x̄3) - end - function Applied_pullback(ȳ::Vector) - x̄1 = NoTangent() - x̄2 = (ȳ,) - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, Applied_pullback + y = Applied(x1, x2, x3) + function Applied_pullback(ȳ) + x̄1 = ȳ.f + x̄2 = ȳ.args + x̄3 = ȳ.kwargs + return (NoTangent(), x̄1, x̄2, x̄3) + end + function Applied_pullback(ȳ::Vector) + x̄1 = NoTangent() + x̄2 = (ȳ,) + x̄3 = NoTangent() + return (NoTangent(), x̄1, x̄2, x̄3) + end + return y, Applied_pullback end diff --git a/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl b/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl index 4ff308ae1f..b183cad942 100644 --- a/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl +++ b/ext/ITensorsChainRulesCoreExt/NDTensors/dense.jl @@ -1,8 +1,8 @@ function rrule(f::Type{<:Dense}, x1::AbstractVector) - y = f(x1) - function Dense_pullback(ȳ) - x̄1 = ȳ.data - return (NoTangent(), x̄1) - end - return y, Dense_pullback + y = f(x1) + function Dense_pullback(ȳ) + x̄1 = ȳ.data + return (NoTangent(), x̄1) + end + return y, Dense_pullback end diff --git a/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl b/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl index 06f9484bb4..ccbacf054f 100644 --- a/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl +++ b/ext/ITensorsChainRulesCoreExt/NDTensors/tensor.jl @@ -3,32 +3,32 @@ using ITensors.NDTensors using ITensors.NDTensors: AllowAlias function rrule(f::Type{<:Tensor}, x1::AllowAlias, x2::TensorStorage, x3::Tuple) - y = f(x1, x2, x3) - function Tensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = ȳ.storage - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, Tensor_pullback + y = f(x1, x2, x3) + function Tensor_pullback(ȳ) + x̄1 = NoTangent() + x̄2 = ȳ.storage + x̄3 = NoTangent() + return (NoTangent(), x̄1, x̄2, x̄3) + end + return y, Tensor_pullback end function rrule(::typeof(tensor), x1::TensorStorage, x2::Tuple) - y = tensor(x1, x2) - function tensor_pullback(ȳ) - x̄1 = storage(ȳ) - x̄2 = NoTangent() - return (NoTangent(), x̄1, x̄2) - end - return y, tensor_pullback + y = tensor(x1, x2) + function tensor_pullback(ȳ) + x̄1 = storage(ȳ) + x̄2 = NoTangent() + return (NoTangent(), x̄1, x̄2) + end + return y, tensor_pullback end function rrule(f::Type{<:Tensor}, x1::TensorStorage, x2::Tuple) - y = f(x1, x2) - function tensor_pullback(ȳ) - x̄1 = copy(storage(x1)) - x̄2 = NoTangent() - return (NoTangent(), x̄1, x̄2) - end - return y, tensor_pullback + y = f(x1, x2) + function tensor_pullback(ȳ) + x̄1 = copy(storage(x1)) + x̄2 = NoTangent() + return (NoTangent(), x̄1, x̄2) + end + return y, tensor_pullback end diff --git a/ext/ITensorsChainRulesCoreExt/indexset.jl b/ext/ITensorsChainRulesCoreExt/indexset.jl index 1ce970cf33..3933733e8e 100644 --- a/ext/ITensorsChainRulesCoreExt/indexset.jl +++ b/ext/ITensorsChainRulesCoreExt/indexset.jl @@ -1,30 +1,30 @@ for fname in ( - :prime, - :setprime, - :noprime, - :replaceprime, - :swapprime, - :addtags, - :removetags, - :replacetags, - :settags, - :swaptags, - :replaceind, - :replaceinds, - :swapind, - :swapinds, -) - @eval begin - function rrule(f::typeof($fname), x::ITensor, a...; kwargs...) - y = f(x, a...; kwargs...) - function f_pullback(ȳ) - x̄ = replaceinds(unthunk(ȳ), inds(y) => inds(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return y, f_pullback + :prime, + :setprime, + :noprime, + :replaceprime, + :swapprime, + :addtags, + :removetags, + :replacetags, + :settags, + :swaptags, + :replaceind, + :replaceinds, + :swapind, + :swapinds, + ) + @eval begin + function rrule(f::typeof($fname), x::ITensor, a...; kwargs...) + y = f(x, a...; kwargs...) + function f_pullback(ȳ) + x̄ = replaceinds(unthunk(ȳ), inds(y) => inds(x)) + ā = map_notangent(a) + return (NoTangent(), x̄, ā...) + end + return y, f_pullback + end end - end end rrule(::typeof(adjoint), x::ITensor) = rrule(prime, x) diff --git a/ext/ITensorsChainRulesCoreExt/itensor.jl b/ext/ITensorsChainRulesCoreExt/itensor.jl index 26d00baf18..3e916741ce 100644 --- a/ext/ITensorsChainRulesCoreExt/itensor.jl +++ b/ext/ITensorsChainRulesCoreExt/itensor.jl @@ -1,204 +1,204 @@ function rrule(::typeof(getindex), x::ITensor, I...) - y = getindex(x, I...) - function getindex_pullback(ȳ) - # TODO: add definition `ITensor(::Tuple{}) = ITensor()` - # to ITensors.jl so no splatting is needed here. - x̄ = ITensor(inds(x)...) - x̄[I...] = unthunk(ȳ) - Ī = map_notangent(I) - return (NoTangent(), x̄, Ī...) - end - return y, getindex_pullback + y = getindex(x, I...) + function getindex_pullback(ȳ) + # TODO: add definition `ITensor(::Tuple{}) = ITensor()` + # to ITensors.jl so no splatting is needed here. + x̄ = ITensor(inds(x)...) + x̄[I...] = unthunk(ȳ) + Ī = map_notangent(I) + return (NoTangent(), x̄, Ī...) + end + return y, getindex_pullback end # Specialized version in order to avoid call to `setindex!` # within the pullback, should be better for taking higher order # derivatives in Zygote. function rrule(::typeof(getindex), x::ITensor) - y = x[] - function getindex_pullback(ȳ) - x̄ = ITensor(unthunk(ȳ)) - return (NoTangent(), x̄) - end - return y, getindex_pullback + y = x[] + function getindex_pullback(ȳ) + x̄ = ITensor(unthunk(ȳ)) + return (NoTangent(), x̄) + end + return y, getindex_pullback end function rrule(::Type{ITensor}, x1::AllowAlias, x2::TensorStorage, x3) - y = ITensor(x1, x2, x3) - function ITensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = unthunk(ȳ).tensor.storage - x̄3 = NoTangent() - return (NoTangent(), x̄1, x̄2, x̄3) - end - return y, ITensor_pullback + y = ITensor(x1, x2, x3) + function ITensor_pullback(ȳ) + x̄1 = NoTangent() + x̄2 = unthunk(ȳ).tensor.storage + x̄3 = NoTangent() + return (NoTangent(), x̄1, x̄2, x̄3) + end + return y, ITensor_pullback end function rrule(::Type{ITensor}, x1::AllowAlias, x2::Tensor) - y = ITensor(x1, x2) - function ITensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = Tensor(x1, ȳ) - return (NoTangent(), x̄1, x̄2) - end - return y, ITensor_pullback + y = ITensor(x1, x2) + function ITensor_pullback(ȳ) + x̄1 = NoTangent() + x̄2 = Tensor(x1, ȳ) + return (NoTangent(), x̄1, x̄2) + end + return y, ITensor_pullback end function rrule(::Type{ITensor}, x1::Tensor) - y = ITensor(x1) - function ITensor_pullback(ȳ) - x̄1 = Tensor(ȳ) - return (NoTangent(), x̄1) - end - return y, ITensor_pullback + y = ITensor(x1) + function ITensor_pullback(ȳ) + x̄1 = Tensor(ȳ) + return (NoTangent(), x̄1) + end + return y, ITensor_pullback end function rrule(::typeof(itensor), x1::Tensor) - y = itensor(x1) - function itensor_pullback(ȳ) - x̄1 = tensor(ȳ) - return (NoTangent(), x̄1) - end - return y, itensor_pullback + y = itensor(x1) + function itensor_pullback(ȳ) + x̄1 = tensor(ȳ) + return (NoTangent(), x̄1) + end + return y, itensor_pullback end function rrule(f::Type{<:Tensor}, x1::AllowAlias, x2::ITensor) - y = f(x1, x2) - function Tensor_pullback(ȳ) - x̄1 = NoTangent() - x̄2 = ITensor(x1, ȳ) - return (NoTangent(), x̄1, x̄2) - end - return y, Tensor_pullback + y = f(x1, x2) + function Tensor_pullback(ȳ) + x̄1 = NoTangent() + x̄2 = ITensor(x1, ȳ) + return (NoTangent(), x̄1, x̄2) + end + return y, Tensor_pullback end function rrule(f::Type{<:Tensor}, x1::ITensor) - y = f(x1) - function Tensor_pullback(ȳ) - x̄1 = ITensor(ȳ) - return (NoTangent(), x̄1) - end - return y, Tensor_pullback + y = f(x1) + function Tensor_pullback(ȳ) + x̄1 = ITensor(ȳ) + return (NoTangent(), x̄1) + end + return y, Tensor_pullback end function rrule(::typeof(tensor), x1::ITensor) - y = tensor(x1) - function tensor_pullback(ȳ) - x̄1 = ITensor(typeof(storage(x1))(unthunk(ȳ).storage.data), inds(x1)) - return (NoTangent(), x̄1) - end - return y, tensor_pullback + y = tensor(x1) + function tensor_pullback(ȳ) + x̄1 = ITensor(typeof(storage(x1))(unthunk(ȳ).storage.data), inds(x1)) + return (NoTangent(), x̄1) + end + return y, tensor_pullback end # Special case for contracting a pair of ITensors function rrule(::typeof(contract), x1::ITensor, x2::ITensor) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1(ȳ * dag(x2)) - x̄2 = project_x2(dag(x1) * ȳ) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback + project_x1 = ProjectTo(x1) + project_x2 = ProjectTo(x2) + function contract_pullback(ȳ) + x̄1 = project_x1(ȳ * dag(x2)) + x̄2 = project_x2(dag(x1) * ȳ) + return (NoTangent(), x̄1, x̄2) + end + return x1 * x2, contract_pullback end @non_differentiable ITensors.optimal_contraction_sequence(::Any) function rrule(::typeof(*), x1::Number, x2::ITensor) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1((ȳ * dag(x2))[]) - x̄2 = project_x2(dag(x1) * ȳ) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback + project_x1 = ProjectTo(x1) + project_x2 = ProjectTo(x2) + function contract_pullback(ȳ) + x̄1 = project_x1((ȳ * dag(x2))[]) + x̄2 = project_x2(dag(x1) * ȳ) + return (NoTangent(), x̄1, x̄2) + end + return x1 * x2, contract_pullback end function rrule(::typeof(*), x1::ITensor, x2::Number) - project_x1 = ProjectTo(x1) - project_x2 = ProjectTo(x2) - function contract_pullback(ȳ) - x̄1 = project_x1(ȳ * dag(x2)) - x̄2 = project_x2((dag(x1) * ȳ)[]) - return (NoTangent(), x̄1, x̄2) - end - return x1 * x2, contract_pullback + project_x1 = ProjectTo(x1) + project_x2 = ProjectTo(x2) + function contract_pullback(ȳ) + x̄1 = project_x1(ȳ * dag(x2)) + x̄2 = project_x2((dag(x1) * ȳ)[]) + return (NoTangent(), x̄1, x̄2) + end + return x1 * x2, contract_pullback end function rrule(::typeof(+), x1::ITensor, x2::ITensor) - function add_pullback(ȳ) - return (NoTangent(), ȳ, ȳ) - end - return x1 + x2, add_pullback + function add_pullback(ȳ) + return (NoTangent(), ȳ, ȳ) + end + return x1 + x2, add_pullback end function rrule(::typeof(-), x1::ITensor, x2::ITensor) - function subtract_pullback(ȳ) - return (NoTangent(), ȳ, -ȳ) - end - return x1 - x2, subtract_pullback + function subtract_pullback(ȳ) + return (NoTangent(), ȳ, -ȳ) + end + return x1 - x2, subtract_pullback end function rrule(::typeof(-), x::ITensor) - function minus_pullback(ȳ) - return (NoTangent(), -ȳ) - end - return -x, minus_pullback + function minus_pullback(ȳ) + return (NoTangent(), -ȳ) + end + return -x, minus_pullback end function rrule(::typeof(itensor), x::Array, a...) - function itensor_pullback(ȳ) - uȳ = permute(unthunk(ȳ), a...) - x̄ = reshape(array(uȳ), size(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return itensor(x, a...), itensor_pullback + function itensor_pullback(ȳ) + uȳ = permute(unthunk(ȳ), a...) + x̄ = reshape(array(uȳ), size(x)) + ā = map_notangent(a) + return (NoTangent(), x̄, ā...) + end + return itensor(x, a...), itensor_pullback end function rrule(::Type{ITensor}, x::Array{<:Number}, a...) - function ITensor_pullback(ȳ) - # TODO: define `Array(::ITensor)` directly - uȳ = Array(unthunk(ȳ), a...) - x̄ = reshape(uȳ, size(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return ITensor(x, a...), ITensor_pullback + function ITensor_pullback(ȳ) + # TODO: define `Array(::ITensor)` directly + uȳ = Array(unthunk(ȳ), a...) + x̄ = reshape(uȳ, size(x)) + ā = map_notangent(a) + return (NoTangent(), x̄, ā...) + end + return ITensor(x, a...), ITensor_pullback end function rrule(::Type{ITensor}, x::Number) - function ITensor_pullback(ȳ) - x̄ = ȳ[] - return (NoTangent(), x̄) - end - return ITensor(x), ITensor_pullback + function ITensor_pullback(ȳ) + x̄ = ȳ[] + return (NoTangent(), x̄) + end + return ITensor(x), ITensor_pullback end function rrule(::typeof(dag), x::ITensor) - function dag_pullback(ȳ) - x̄ = dag(unthunk(ȳ)) - return (NoTangent(), x̄) - end - return dag(x), dag_pullback + function dag_pullback(ȳ) + x̄ = dag(unthunk(ȳ)) + return (NoTangent(), x̄) + end + return dag(x), dag_pullback end function rrule(::typeof(permute), x::ITensor, a...) - y = permute(x, a...) - function permute_pullback(ȳ) - x̄ = permute(unthunk(ȳ), inds(x)) - ā = map_notangent(a) - return (NoTangent(), x̄, ā...) - end - return y, permute_pullback + y = permute(x, a...) + function permute_pullback(ȳ) + x̄ = permute(unthunk(ȳ), inds(x)) + ā = map_notangent(a) + return (NoTangent(), x̄, ā...) + end + return y, permute_pullback end # Needed because by default it was calling the generic # `rrule` for `tr` inside ChainRules. # TODO: Raise an issue with ChainRules. function rrule(config::RuleConfig{>:HasReverseMode}, ::typeof(tr), x::ITensor; kwargs...) - return rrule_via_ad(config, ITensors._tr, x; kwargs...) + return rrule_via_ad(config, ITensors._tr, x; kwargs...) end @non_differentiable combiner(::Indices) diff --git a/ext/ITensorsChainRulesCoreExt/non_differentiable.jl b/ext/ITensorsChainRulesCoreExt/non_differentiable.jl index 42af2f63a9..8f69ef5010 100644 --- a/ext/ITensorsChainRulesCoreExt/non_differentiable.jl +++ b/ext/ITensorsChainRulesCoreExt/non_differentiable.jl @@ -1,6 +1,6 @@ using ChainRulesCore: @non_differentiable using ITensors: - ITensors, Index, addtags, commoninds, dag, delta, inds, noncommoninds, onehot, uniqueinds + ITensors, Index, addtags, commoninds, dag, delta, inds, noncommoninds, onehot, uniqueinds using ITensors.TagSets: TagSet @non_differentiable map_notangent(::Any) diff --git a/ext/ITensorsChainRulesCoreExt/projection.jl b/ext/ITensorsChainRulesCoreExt/projection.jl index 443d6e5fcb..91359dc2f6 100644 --- a/ext/ITensorsChainRulesCoreExt/projection.jl +++ b/ext/ITensorsChainRulesCoreExt/projection.jl @@ -1,10 +1,10 @@ function ChainRulesCore.ProjectTo(x::ITensor) - return ProjectTo{ITensor}(; element=ProjectTo(zero(eltype(x)))) + return ProjectTo{ITensor}(; element = ProjectTo(zero(eltype(x)))) end function (project::ProjectTo{ITensor})(dx::ITensor) - S = eltype(dx) - T = ChainRulesCore.project_type(project.element) - dy = S <: T ? dx : map(project.element, dx) - return dy + S = eltype(dx) + T = ChainRulesCore.project_type(project.element) + dy = S <: T ? dx : map(project.element, dx) + return dy end diff --git a/ext/ITensorsChainRulesCoreExt/smallstrings.jl b/ext/ITensorsChainRulesCoreExt/smallstrings.jl index 7b17476f68..949dcd3e4f 100644 --- a/ext/ITensorsChainRulesCoreExt/smallstrings.jl +++ b/ext/ITensorsChainRulesCoreExt/smallstrings.jl @@ -1,12 +1,12 @@ using ITensors: ITensors include( - joinpath( - pkgdir(ITensors), - "src", - "lib", - "SmallStrings", - "ext", - "SmallStringsChainRulesCoreExt", - "SmallStringsChainRulesCoreExt.jl", - ), + joinpath( + pkgdir(ITensors), + "src", + "lib", + "SmallStrings", + "ext", + "SmallStringsChainRulesCoreExt", + "SmallStringsChainRulesCoreExt.jl", + ), ) diff --git a/ext/ITensorsChainRulesCoreExt/zygoterules.jl b/ext/ITensorsChainRulesCoreExt/zygoterules.jl index 65e0996e9c..1a1eb41266 100644 --- a/ext/ITensorsChainRulesCoreExt/zygoterules.jl +++ b/ext/ITensorsChainRulesCoreExt/zygoterules.jl @@ -4,7 +4,7 @@ using ZygoteRules: @adjoint # which currently doesn't work by overloading `ChainRulesCore.rrule` # since it is defined in `Zygote`, which takes precedent. @adjoint function Base.adjoint(x::ITensor) - y, adjoint_rrule_pullback = rrule(adjoint, x) - adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) - return y, adjoint_pullback + y, adjoint_rrule_pullback = rrule(adjoint, x) + adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) + return y, adjoint_pullback end diff --git a/ext/ITensorsHDF5Ext/qn.jl b/ext/ITensorsHDF5Ext/qn.jl index d7ae10d5c4..103a49d63c 100644 --- a/ext/ITensorsHDF5Ext/qn.jl +++ b/ext/ITensorsHDF5Ext/qn.jl @@ -1,26 +1,26 @@ using HDF5: HDF5, attributes, create_group, open_group, read, write using ITensors: maxQNs, modulus, name, QN, QNVal, val -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, gname::AbstractString, q::QN) - g = create_group(parent, gname) - attributes(g)["type"] = "QN" - attributes(g)["version"] = 1 - names = [String(name(q[n])) for n in 1:maxQNs] - vals = [val(q[n]) for n in 1:maxQNs] - mods = [modulus(q[n]) for n in 1:maxQNs] - write(g, "names", names) - write(g, "vals", vals) - return write(g, "mods", mods) +function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, gname::AbstractString, q::QN) + g = create_group(parent, gname) + attributes(g)["type"] = "QN" + attributes(g)["version"] = 1 + names = [String(name(q[n])) for n in 1:maxQNs] + vals = [val(q[n]) for n in 1:maxQNs] + mods = [modulus(q[n]) for n in 1:maxQNs] + write(g, "names", names) + write(g, "vals", vals) + return write(g, "mods", mods) end -function HDF5.read(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{QN}) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "QN" - error("HDF5 group or file does not contain QN data") - end - names = read(g, "names") - vals = read(g, "vals") - mods = read(g, "mods") - mqn = ntuple(n -> QNVal(names[n], vals[n], mods[n]), maxQNs) - return QN(mqn) +function HDF5.read(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{QN}) + g = open_group(parent, name) + if read(attributes(g)["type"]) != "QN" + error("HDF5 group or file does not contain QN data") + end + names = read(g, "names") + vals = read(g, "vals") + mods = read(g, "mods") + mqn = ntuple(n -> QNVal(names[n], vals[n], mods[n]), maxQNs) + return QN(mqn) end diff --git a/ext/ITensorsHDF5Ext/tagset.jl b/ext/ITensorsHDF5Ext/tagset.jl index d838d90b27..1daaac3a84 100644 --- a/ext/ITensorsHDF5Ext/tagset.jl +++ b/ext/ITensorsHDF5Ext/tagset.jl @@ -1,20 +1,20 @@ using HDF5: HDF5, attributes, create_group, open_group, read, write using ITensors.TagSets: TagSet, tagstring -function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::TagSet) - g = create_group(parent, name) - attributes(g)["type"] = "TagSet" - attributes(g)["version"] = 1 - return write(g, "tags", tagstring(T)) +function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, T::TagSet) + g = create_group(parent, name) + attributes(g)["type"] = "TagSet" + attributes(g)["version"] = 1 + return write(g, "tags", tagstring(T)) end function HDF5.read( - parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{TagSet} -) - g = open_group(parent, name) - if read(attributes(g)["type"]) != "TagSet" - error("HDF5 group '$name' does not contain TagSet data") - end - tstring = read(g, "tags") - return TagSet(tstring) + parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{TagSet} + ) + g = open_group(parent, name) + if read(attributes(g)["type"]) != "TagSet" + error("HDF5 group '$name' does not contain TagSet data") + end + tstring = read(g, "tags") + return TagSet(tstring) end diff --git a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl index c437a573ea..a157bbfd3f 100644 --- a/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl +++ b/ext/ITensorsTensorOperationsExt/ITensorsTensorOperationsExt.jl @@ -4,12 +4,12 @@ using ITensors: ITensors, ITensor, dim, inds using TensorOperations: optimaltree function ITensors.optimal_contraction_sequence( - As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}} -) - network = collect.(inds.(As)) - inds_to_dims = Dict(i => Float64(dim(i)) for i in unique(reduce(vcat, network))) - seq, _ = optimaltree(network, inds_to_dims) - return seq + As::Union{Vector{<:ITensor}, Tuple{Vararg{ITensor}}} + ) + network = collect.(inds.(As)) + inds_to_dims = Dict(i => Float64(dim(i)) for i in unique(reduce(vcat, network))) + seq, _ = optimaltree(network, inds_to_dims) + return seq end end diff --git a/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl b/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl index fa2e03ccdd..9d9a0114bd 100644 --- a/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl +++ b/ext/ITensorsVectorInterfaceExt/ITensorsVectorInterfaceExt.jl @@ -3,114 +3,114 @@ using ITensors: ITensors, ITensor using VectorInterface: VectorInterface function VectorInterface.add(a::ITensor, b::ITensor) - return a + b + return a + b end function VectorInterface.add!(a::ITensor, b::ITensor) - a .= a .+ b - return a + a .= a .+ b + return a end function VectorInterface.add!!(a::ITensor, b::ITensor) - if promote_type(eltype(a), eltype(b)) <: eltype(a) - VectorInterface.add!(a, b) - else - a = VectorInterface.add(a, b) - end - return a + if promote_type(eltype(a), eltype(b)) <: eltype(a) + VectorInterface.add!(a, b) + else + a = VectorInterface.add(a, b) + end + return a end function VectorInterface.add(a::ITensor, b::ITensor, α::Number) - return a + b * α + return a + b * α end function VectorInterface.add!(a::ITensor, b::ITensor, α::Number) - a .= a .+ b .* α - return a + a .= a .+ b .* α + return a end function VectorInterface.add!!(a::ITensor, b::ITensor, α::Number) - if promote_type(eltype(a), eltype(b), typeof(α)) <: eltype(a) - VectorInterface.add!(a, b, α) - else - a = VectorInterface.add(a, b, α) - end - return a + if promote_type(eltype(a), eltype(b), typeof(α)) <: eltype(a) + VectorInterface.add!(a, b, α) + else + a = VectorInterface.add(a, b, α) + end + return a end function VectorInterface.add(a::ITensor, b::ITensor, α::Number, β::Number) - return a * β + b * α + return a * β + b * α end function VectorInterface.add!(a::ITensor, b::ITensor, α::Number, β::Number) - a .= a .* β .+ b .* α - return a + a .= a .* β .+ b .* α + return a end function VectorInterface.add!!(a::ITensor, b::ITensor, α::Number, β::Number) - if promote_type(eltype(a), eltype(b), typeof(α), typeof(β)) <: eltype(a) - VectorInterface.add!(a, b, α, β) - else - a = VectorInterface.add(a, b, α, β) - end - return a + if promote_type(eltype(a), eltype(b), typeof(α), typeof(β)) <: eltype(a) + VectorInterface.add!(a, b, α, β) + else + a = VectorInterface.add(a, b, α, β) + end + return a end function VectorInterface.inner(a::ITensor, b::ITensor) - return ITensors.inner(a, b) + return ITensors.inner(a, b) end function VectorInterface.scalartype(a::ITensor) - return ITensors.scalartype(a) + return ITensors.scalartype(a) end # Circumvent issue that `VectorInterface.jl` computes # the scalartype in the type domain, which isn't known # for ITensors. function VectorInterface.scalartype(a::AbstractArray{ITensor}) - # Like the implementation of `LinearAlgebra.promote_leaf_eltypes`: - # https://github.com/JuliaLang/LinearAlgebra.jl/blob/e7da19f2764ba36bd0a9eb8ec67dddce19d87114/src/generic.jl#L1933 - return mapreduce(VectorInterface.scalartype, promote_type, a; init=Bool) + # Like the implementation of `LinearAlgebra.promote_leaf_eltypes`: + # https://github.com/JuliaLang/LinearAlgebra.jl/blob/e7da19f2764ba36bd0a9eb8ec67dddce19d87114/src/generic.jl#L1933 + return mapreduce(VectorInterface.scalartype, promote_type, a; init = Bool) end function VectorInterface.scale(a::ITensor, α::Number) - return a * α + return a * α end function VectorInterface.scale!(a::ITensor, α::Number) - a .= a .* α - return a + a .= a .* α + return a end function VectorInterface.scale!!(a::ITensor, α::Number) - if promote_type(eltype(a), typeof(α)) <: eltype(a) - VectorInterface.scale!(a, α) - else - a = VectorInterface.scale(a, α) - end - return a + if promote_type(eltype(a), typeof(α)) <: eltype(a) + VectorInterface.scale!(a, α) + else + a = VectorInterface.scale(a, α) + end + return a end function VectorInterface.scale!(a_dest::ITensor, a_src::ITensor, α::Number) - a_dest .= a_src .* α - return a_dest + a_dest .= a_src .* α + return a_dest end function VectorInterface.scale!!(a_dest::ITensor, a_src::ITensor, α::Number) - if promote_type(eltype(a_dest), eltype(a_src), typeof(α)) <: eltype(a_dest) - VectorInterface.scale!(a_dest, a_src, α) - else - a_dest = VectorInterface.scale(a_src, α) - end - return a_dest + if promote_type(eltype(a_dest), eltype(a_src), typeof(α)) <: eltype(a_dest) + VectorInterface.scale!(a_dest, a_src, α) + else + a_dest = VectorInterface.scale(a_src, α) + end + return a_dest end function VectorInterface.zerovector(a::ITensor, type::Type{<:Number}) - a′ = similar(a, type) - VectorInterface.zerovector!(a′) - return a′ + a′ = similar(a, type) + VectorInterface.zerovector!(a′) + return a′ end function VectorInterface.zerovector!(a::ITensor) - a .= zero(eltype(a)) - return a + a .= zero(eltype(a)) + return a end function VectorInterface.zerovector!!(a::ITensor, type::Type{<:Number}) - if type === eltype(a) - VectorInterface.zerovector!(a) - else - a = VectorInterface.zerovector(a, type) - end - return a + if type === eltype(a) + VectorInterface.zerovector!(a) + else + a = VectorInterface.zerovector(a, type) + end + return a end end diff --git a/ext/ITensorsZygoteRulesExt/itensors.jl b/ext/ITensorsZygoteRulesExt/itensors.jl index 58562a7cd0..ff9144ef25 100644 --- a/ext/ITensorsZygoteRulesExt/itensors.jl +++ b/ext/ITensorsZygoteRulesExt/itensors.jl @@ -6,7 +6,7 @@ using ZygoteRules: @adjoint # which currently doesn't work by overloading `ChainRulesCore.rrule` # since it is defined in `Zygote`, which takes precedent. @adjoint function Base.adjoint(x::ITensor) - y, adjoint_rrule_pullback = ChainRulesCore.rrule(adjoint, x) - adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) - return y, adjoint_pullback + y, adjoint_rrule_pullback = ChainRulesCore.rrule(adjoint, x) + adjoint_pullback(ȳ) = Base.tail(adjoint_rrule_pullback(ȳ)) + return y, adjoint_pullback end diff --git a/src/ITensors.jl b/src/ITensors.jl index 64c729dcb9..884232e436 100644 --- a/src/ITensors.jl +++ b/src/ITensors.jl @@ -81,19 +81,19 @@ include("val.jl") export val include("lib/QuantumNumbers/src/QuantumNumbers.jl") using .QuantumNumbers: - Arrow, - In, - Neither, - Out, - QN, - QNVal, - hasname, - have_same_mods, - have_same_qns, - isactive, - maxQNs, - modulus, - nactive + Arrow, + In, + Neither, + Out, + QN, + QNVal, + hasname, + have_same_mods, + have_same_qns, + isactive, + maxQNs, + modulus, + nactive export QN, isactive, modulus include("symmetrystyle.jl") include("index.jl") @@ -108,25 +108,25 @@ include("tensor_operations/matrix_algebra.jl") include("tensor_operations/permutations.jl") include("lib/SiteTypes/src/SiteTypes.jl") using .SiteTypes: - SiteTypes, - OpName, - SiteType, - StateName, - TagType, - ValName, - @OpName_str, - @SiteType_str, - @StateName_str, - @TagType_str, - @ValName_str, - alias, - has_fermion_string, - op, - op!, - ops, - siteind, - siteinds, - state + SiteTypes, + OpName, + SiteType, + StateName, + TagType, + ValName, + @OpName_str, + @SiteType_str, + @StateName_str, + @TagType_str, + @ValName_str, + alias, + has_fermion_string, + op, + op!, + ops, + siteind, + siteinds, + state include("lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl") include("broadcast.jl") include("tensor_operations/matrix_decomposition.jl") @@ -145,12 +145,12 @@ include("lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl") # TODO: `using .ITensorVisualizationCore: ITensorVisualizationCore, ...`. using .ITensorVisualizationCore using .ITensorVisualizationCore: - @visualize, - @visualize!, - @visualize_noeval, - @visualize_noeval!, - @visualize_sequence, - @visualize_sequence_noeval + @visualize, + @visualize!, + @visualize_noeval, + @visualize_noeval!, + @visualize_sequence, + @visualize_sequence_noeval include("deprecated.jl") include("argsdict/argsdict.jl") include("packagecompile/compile.jl") diff --git a/src/exports.jl b/src/exports.jl index 803312ba58..ce953f8dcd 100644 --- a/src/exports.jl +++ b/src/exports.jl @@ -1,197 +1,197 @@ export - # From external modules - # LinearAlgebra - nullspace, - tr, - # Modules - LinearAlgebra, - NDTensors, - # NDTensors module - # Types - Block, - # NDTensors.RankFactorization module - Spectrum, - # Methods - eigs, - entropy, - truncerror, - # Deprecated - addblock!, - # ITensors.jl - index_id_rng, - # argsdict/argsdict.jl - argsdict, - # tensor_operations/matrix_decomposition.jl - eigen, - factorize, - polar, - qr, - rq, - lq, - ql, - svd, - diag, - # tensor_operations/tensor_algebra.jl - contract, - # global_variables.jl - # Methods - # Macros - @disable_warn_order, - @reset_warn_order, - @set_warn_order, - # index.jl - # Types - Index, - IndexVal, - # Methods - dag, - dim, - dir, - eachval, - eachindval, - hasid, - hasind, - hasplev, - hasqns, - id, - ind, - isindequal, - noprime, - plev, - prime, - removetags, - removeqn, - removeqns, - replacetags, - replacetags!, - setdir, - setprime, - setspace, - settags, - sim, - space, - splitblocks, - tags, - # indexset.jl - # Types - IndexSet, - Order, - # Methods - allhastags, - anyhastags, - dims, - getfirst, - mapprime, - maxdim, - mindim, - permute, - pop, - popfirst, - push, - pushfirst, - replaceind, - replaceinds, - replaceprime, - swapinds, - setindex, - swapind, - swapinds, - swapprime, - swaptags, - # itensor.jl - # Types - ITensor, - # Methods - ⊙, - ⊕, - addtags!, - apply, - Apply, - array, - axpy!, - blockoffsets, - checkflux, - combinedind, - combiner, - commonind, - commoninds, - complex!, - convert_eltype, - convert_leaf_eltype, - delta, - dense, - denseblocks, - δ, - diagitensor, - diag_itensor, - directsum, - dot, - eachnzblock, - firstind, - filterinds, - hadamard_product, - hascommoninds, - hasind, - hasinds, - hassameinds, - ind, - inds, - inner, - insertblock!, - ishermitian, - itensor, - mul!, - matrix, - mapprime!, - noncommonind, - noncommoninds, - norm, - normalize, - normalize!, - noprime!, - nnzblocks, - nzblocks, - nzblock, - nnz, - onehot, - order, - permute, - prime!, - product, - randn!, - random_itensor, - removetags!, - replacetags!, - replaceind!, - replaceinds!, - swapinds!, - rmul!, - scale!, - scalar, - setelt, - storage, - setprime!, - swapprime!, - settags!, - swaptags!, - transpose, - uniqueinds, - uniqueind, - unioninds, - unionind, - vector, - emptyITensor, - # tagset.jl - # Types - TagSet, - # Macros - @ts_str, - # Methods - addtags, - hastags, - # qn/qnindex.jl - blockdim, - flux, - hasqns, - nblocks, - qn + # From external modules + # LinearAlgebra + nullspace, + tr, + # Modules + LinearAlgebra, + NDTensors, + # NDTensors module + # Types + Block, + # NDTensors.RankFactorization module + Spectrum, + # Methods + eigs, + entropy, + truncerror, + # Deprecated + addblock!, + # ITensors.jl + index_id_rng, + # argsdict/argsdict.jl + argsdict, + # tensor_operations/matrix_decomposition.jl + eigen, + factorize, + polar, + qr, + rq, + lq, + ql, + svd, + diag, + # tensor_operations/tensor_algebra.jl + contract, + # global_variables.jl + # Methods + # Macros + @disable_warn_order, + @reset_warn_order, + @set_warn_order, + # index.jl + # Types + Index, + IndexVal, + # Methods + dag, + dim, + dir, + eachval, + eachindval, + hasid, + hasind, + hasplev, + hasqns, + id, + ind, + isindequal, + noprime, + plev, + prime, + removetags, + removeqn, + removeqns, + replacetags, + replacetags!, + setdir, + setprime, + setspace, + settags, + sim, + space, + splitblocks, + tags, + # indexset.jl + # Types + IndexSet, + Order, + # Methods + allhastags, + anyhastags, + dims, + getfirst, + mapprime, + maxdim, + mindim, + permute, + pop, + popfirst, + push, + pushfirst, + replaceind, + replaceinds, + replaceprime, + swapinds, + setindex, + swapind, + swapinds, + swapprime, + swaptags, + # itensor.jl + # Types + ITensor, + # Methods + ⊙, + ⊕, + addtags!, + apply, + Apply, + array, + axpy!, + blockoffsets, + checkflux, + combinedind, + combiner, + commonind, + commoninds, + complex!, + convert_eltype, + convert_leaf_eltype, + delta, + dense, + denseblocks, + δ, + diagitensor, + diag_itensor, + directsum, + dot, + eachnzblock, + firstind, + filterinds, + hadamard_product, + hascommoninds, + hasind, + hasinds, + hassameinds, + ind, + inds, + inner, + insertblock!, + ishermitian, + itensor, + mul!, + matrix, + mapprime!, + noncommonind, + noncommoninds, + norm, + normalize, + normalize!, + noprime!, + nnzblocks, + nzblocks, + nzblock, + nnz, + onehot, + order, + permute, + prime!, + product, + randn!, + random_itensor, + removetags!, + replacetags!, + replaceind!, + replaceinds!, + swapinds!, + rmul!, + scale!, + scalar, + setelt, + storage, + setprime!, + swapprime!, + settags!, + swaptags!, + transpose, + uniqueinds, + uniqueind, + unioninds, + unionind, + vector, + emptyITensor, + # tagset.jl + # Types + TagSet, + # Macros + @ts_str, + # Methods + addtags, + hastags, + # qn/qnindex.jl + blockdim, + flux, + hasqns, + nblocks, + qn diff --git a/src/fermions/fermions.jl b/src/fermions/fermions.jl index 1ff55a7a40..29a9feb5e5 100644 --- a/src/fermions/fermions.jl +++ b/src/fermions/fermions.jl @@ -12,12 +12,12 @@ implementation uses an O(n^2) algorithm and is intended for small permutations only. """ function parity_sign(P)::Int - L = length(P) - s = +1 - for i in 1:L, j in (i + 1):L - s *= sign(P[j] - P[i]) - end - return s + L = length(P) + s = +1 + for i in 1:L, j in (i + 1):L + s *= sign(P[j] - P[i]) + end + return s end isfermionic(qv::QuantumNumbers.QNVal) = (QuantumNumbers.modulus(qv) < 0) @@ -27,10 +27,10 @@ isfermionic(qn::QN) = any(isfermionic, qn) has_fermionic_subspaces(i::Index) = false function has_fermionic_subspaces(i::QNIndex) - for b in 1:nblocks(i) - isfermionic(qn(i, b)) && (return true) - end - return false + for b in 1:nblocks(i) + isfermionic(qn(i, b)) && (return true) + end + return false end isfermionic(i::Index) = has_fermionic_subspaces(i) @@ -38,10 +38,10 @@ isfermionic(i::Index) = has_fermionic_subspaces(i) has_fermionic_subspaces(is::Indices) = false function has_fermionic_subspaces(is::QNIndices) - for i in is, b in 1:nblocks(i) - isfermionic(qn(i, b)) && (return true) - end - return false + for i in is, b in 1:nblocks(i) + isfermionic(qn(i, b)) && (return true) + end + return false end has_fermionic_subspaces(T::Tensor) = has_fermionic_subspaces(inds(T)) @@ -55,13 +55,13 @@ defined as the sum mod 2 of each of its fermionic QNVals (QNVals with negative modulus). """ function fparity(qn::QN) - p = 0 - for qv in qn - if isfermionic(qv) - p += val(qv) + p = 0 + for qv in qn + if isfermionic(qv) + p += val(qv) + end end - end - return mod(p, 2) + return mod(p, 2) end fparity(iv::Pair{<:Index}) = fparity(qn(iv)) @@ -77,33 +77,33 @@ if the subset of index vals which are fermion-parity odd undergo an odd permutation (odd number of swaps) according to p, then return -1. Otherwise return +1. """ -function compute_permfactor(p, iv_or_qn...; range=1:length(iv_or_qn))::Int - !using_auto_fermion() && return 1 - N = length(iv_or_qn) - # XXX: Bug https://github.com/ITensor/ITensors.jl/issues/931 - # oddp = @MVector zeros(Int, N) - oddp = MVector((ntuple(Returns(0), Val(N)))) - n = 0 - @inbounds for j in range - if fparity(iv_or_qn[p[j]]) == 1 - n += 1 - oddp[n] = p[j] +function compute_permfactor(p, iv_or_qn...; range = 1:length(iv_or_qn))::Int + !using_auto_fermion() && return 1 + N = length(iv_or_qn) + # XXX: Bug https://github.com/ITensor/ITensors.jl/issues/931 + # oddp = @MVector zeros(Int, N) + oddp = MVector((ntuple(Returns(0), Val(N)))) + n = 0 + @inbounds for j in range + if fparity(iv_or_qn[p[j]]) == 1 + n += 1 + oddp[n] = p[j] + end end - end - return parity_sign(oddp[1:n]) + return parity_sign(oddp[1:n]) end -function NDTensors.permfactor(p, ivs::Vararg{Pair{QNIndex},N}; kwargs...) where {N} - !using_auto_fermion() && return 1 - return compute_permfactor(p, ivs...; kwargs...) +function NDTensors.permfactor(p, ivs::Vararg{Pair{QNIndex}, N}; kwargs...) where {N} + !using_auto_fermion() && return 1 + return compute_permfactor(p, ivs...; kwargs...) end function NDTensors.permfactor( - perm, block::NDTensors.Block{N}, inds::QNIndices; kwargs... -) where {N} - !using_auto_fermion() && return 1 - qns = ntuple(n -> qn(inds[n], block[n]), N) - return compute_permfactor(perm, qns...; kwargs...) + perm, block::NDTensors.Block{N}, inds::QNIndices; kwargs... + ) where {N} + !using_auto_fermion() && return 1 + qns = ntuple(n -> qn(inds[n], block[n]), N) + return compute_permfactor(perm, qns...; kwargs...) end NDTensors.block_parity(i::QNIndex, block::Integer) = fparity(qn(i, block)) @@ -111,268 +111,268 @@ NDTensors.block_parity(i::QNIndex, block::Integer) = fparity(qn(i, block)) NDTensors.block_sign(i::QNIndex, block::Integer) = 2 * NDTensors.block_parity(i, block) - 1 function NDTensors.right_arrow_sign(i::QNIndex, block::Integer) - !using_auto_fermion() && return 1 - if dir(i) == Out && NDTensors.block_parity(i, block) == 1 - return -1 - end - return 1 + !using_auto_fermion() && return 1 + if dir(i) == Out && NDTensors.block_parity(i, block) == 1 + return -1 + end + return 1 end function NDTensors.left_arrow_sign(i::QNIndex, block::Integer) - !using_auto_fermion() && return 1 - if dir(i) == In && NDTensors.block_parity(i, block) == 1 - return -1 - end - return 1 + !using_auto_fermion() && return 1 + if dir(i) == In && NDTensors.block_parity(i, block) == 1 + return -1 + end + return 1 end # Version of getperm which is type stable # and works for Tuple or Vector inputs function vec_getperm(s1, s2) - N = length(s1) - p = Vector{Int}(undef, N) - for i in 1:N - @inbounds p[i] = NDTensors._findfirst(==(@inbounds s1[i]), s2) - end - return p + N = length(s1) + p = Vector{Int}(undef, N) + for i in 1:N + @inbounds p[i] = NDTensors._findfirst(==(@inbounds s1[i]), s2) + end + return p end @inline function NDTensors.compute_alpha( - ElR, - labelsR, - blockR, - input_indsR, - labelsT1, - blockT1, - indsT1::NTuple{N1,QNIndex}, - labelsT2, - blockT2, - indsT2::NTuple{N2,QNIndex}, -) where {N1,N2} - if !using_auto_fermion() - !has_fermionic_subspaces(indsT1) || !has_fermionic_subspaces(indsT2) - return one(ElR) - end - - # the "indsR" argument to compute_alpha from NDTensors - # may be a tuple of QNIndex, so convert to a Vector{Index} - indsR = collect(input_indsR) - - nlabelsT1 = TupleTools.sort(labelsT1; rev=true) - nlabelsT2 = TupleTools.sort(labelsT2) - - # Make orig_labelsR from the order of - # indices that would result by just - # taking the uncontracted indices of - # T1 and T2 in their input order: - NR = length(labelsR) - orig_labelsR = zeros(Int, NR) - u = 1 - for ls in (nlabelsT1, nlabelsT2), l in ls - if l > 0 - orig_labelsR[u] = l - u += 1 + ElR, + labelsR, + blockR, + input_indsR, + labelsT1, + blockT1, + indsT1::NTuple{N1, QNIndex}, + labelsT2, + blockT2, + indsT2::NTuple{N2, QNIndex}, + ) where {N1, N2} + if !using_auto_fermion() + !has_fermionic_subspaces(indsT1) || !has_fermionic_subspaces(indsT2) + return one(ElR) end - end - - permT1 = NDTensors.getperm(nlabelsT1, labelsT1) - permT2 = NDTensors.getperm(nlabelsT2, labelsT2) - permR = vec_getperm(labelsR, orig_labelsR) - - alpha1 = NDTensors.permfactor(permT1, blockT1, indsT1) - alpha2 = NDTensors.permfactor(permT2, blockT2, indsT2) - alphaR = NDTensors.permfactor(permR, blockR, indsR) - - alpha_arrows = one(ElR) - for n in 1:length(indsT1) - l = labelsT1[n] - i = indsT1[n] - qi = qn(i, blockT1[n]) - if l < 0 && dir(i) == Out && fparity(qi) == 1 - alpha_arrows *= -1 + + # the "indsR" argument to compute_alpha from NDTensors + # may be a tuple of QNIndex, so convert to a Vector{Index} + indsR = collect(input_indsR) + + nlabelsT1 = TupleTools.sort(labelsT1; rev = true) + nlabelsT2 = TupleTools.sort(labelsT2) + + # Make orig_labelsR from the order of + # indices that would result by just + # taking the uncontracted indices of + # T1 and T2 in their input order: + NR = length(labelsR) + orig_labelsR = zeros(Int, NR) + u = 1 + for ls in (nlabelsT1, nlabelsT2), l in ls + if l > 0 + orig_labelsR[u] = l + u += 1 + end + end + + permT1 = NDTensors.getperm(nlabelsT1, labelsT1) + permT2 = NDTensors.getperm(nlabelsT2, labelsT2) + permR = vec_getperm(labelsR, orig_labelsR) + + alpha1 = NDTensors.permfactor(permT1, blockT1, indsT1) + alpha2 = NDTensors.permfactor(permT2, blockT2, indsT2) + alphaR = NDTensors.permfactor(permR, blockR, indsR) + + alpha_arrows = one(ElR) + for n in 1:length(indsT1) + l = labelsT1[n] + i = indsT1[n] + qi = qn(i, blockT1[n]) + if l < 0 && dir(i) == Out && fparity(qi) == 1 + alpha_arrows *= -1 + end end - end - α = alpha1 * alpha2 * alphaR * alpha_arrows + α = alpha1 * alpha2 * alphaR * alpha_arrows - return α + return α end # Flip signs of selected blocks of T prior to # it being multiplied by a combiner ITensor # labelsR gives the ordering of indices after the product function NDTensors.before_combiner_signs( - T, - labelsT_, - indsT::NTuple{NT,QNIndex}, - C, - labelsC_, - indsC::NTuple{NC,QNIndex}, - labelsR, - indsR::NTuple{NR,QNIndex}, -) where {NC,NT,NR} - if !using_auto_fermion() || !has_fermionic_subspaces(T) - return T - end - - T = copy(T) + T, + labelsT_, + indsT::NTuple{NT, QNIndex}, + C, + labelsC_, + indsC::NTuple{NC, QNIndex}, + labelsR, + indsR::NTuple{NR, QNIndex}, + ) where {NC, NT, NR} + if !using_auto_fermion() || !has_fermionic_subspaces(T) + return T + end - labelsC = [l for l in labelsC_] - labelsT = [l for l in labelsT_] + T = copy(T) - # number of uncombined indices - Nuc = NC - 1 + labelsC = [l for l in labelsC_] + labelsT = [l for l in labelsT_] - ci = NDTensors.cinds(storage(C))[1] - combining = (labelsC[ci] > 0) + # number of uncombined indices + Nuc = NC - 1 - isconj = NDTensors.isconj(storage(C)) + ci = NDTensors.cinds(storage(C))[1] + combining = (labelsC[ci] > 0) - if combining - #println("Combining <<<<<<<<<<<<<<<<<<<<<<<<<<<") + isconj = NDTensors.isconj(storage(C)) - nlabelsT = Int[] + if combining + #println("Combining <<<<<<<<<<<<<<<<<<<<<<<<<<<") - if !isconj - # Permute uncombined indices to front - # in same order as indices passed to the - # combiner constructor - append!(nlabelsT, labelsC[2:end]) - else # isconj - # If combiner is conjugated, put uncombined - # indices in *opposite* order as on combiner - append!(nlabelsT, reverse(labelsC[2:end])) - end - @assert all(l -> l < 0, nlabelsT) + nlabelsT = Int[] - for l in labelsT - if l > 0 #uncontracted - append!(nlabelsT, l) - end - end - @assert length(nlabelsT) == NT - - # Compute permutation that moves uncombined indices to front - permT = vec_getperm(nlabelsT, labelsT) - - for blockT in keys(blockoffsets(T)) - # Compute sign from permuting uncombined indices to front: - alphaT = NDTensors.permfactor(permT, blockT, indsT) - - neg_dir = !isconj ? In : Out - alpha_arrows = 1 - alpha_mixed_arrow = 1 - C_dir = dir(indsC[1]) - for n in 1:length(indsT) - i = indsT[n] - qi = qn(i, blockT[n]) - if labelsT[n] < 0 && fparity(qi) == 1 - alpha_mixed_arrow *= (dir(i) != C_dir) ? -1 : +1 - alpha_arrows *= (dir(i) == neg_dir) ? -1 : +1 + if !isconj + # Permute uncombined indices to front + # in same order as indices passed to the + # combiner constructor + append!(nlabelsT, labelsC[2:end]) + else # isconj + # If combiner is conjugated, put uncombined + # indices in *opposite* order as on combiner + append!(nlabelsT, reverse(labelsC[2:end])) end - end - - fac = alphaT * alpha_arrows - - if isconj - fac *= alpha_mixed_arrow - end + @assert all(l -> l < 0, nlabelsT) - if fac != 1 - Tb = blockview(T, blockT) - scale!(Tb, fac) - end - end # for blockT - - elseif !combining - # - # Uncombining --------------------------- - # - #println("Uncombining >>>>>>>>>>>>>>>>>>>>>>>>>>>") - - nc = findfirst(l -> l < 0, labelsT) - nlabelsT = [labelsT[nc]] - ic = indsT[nc] - - for l in labelsT - (l > 0) && append!(nlabelsT, l) - end + for l in labelsT + if l > 0 #uncontracted + append!(nlabelsT, l) + end + end + @assert length(nlabelsT) == NT + + # Compute permutation that moves uncombined indices to front + permT = vec_getperm(nlabelsT, labelsT) + + for blockT in keys(blockoffsets(T)) + # Compute sign from permuting uncombined indices to front: + alphaT = NDTensors.permfactor(permT, blockT, indsT) + + neg_dir = !isconj ? In : Out + alpha_arrows = 1 + alpha_mixed_arrow = 1 + C_dir = dir(indsC[1]) + for n in 1:length(indsT) + i = indsT[n] + qi = qn(i, blockT[n]) + if labelsT[n] < 0 && fparity(qi) == 1 + alpha_mixed_arrow *= (dir(i) != C_dir) ? -1 : +1 + alpha_arrows *= (dir(i) == neg_dir) ? -1 : +1 + end + end + + fac = alphaT * alpha_arrows + + if isconj + fac *= alpha_mixed_arrow + end + + if fac != 1 + Tb = blockview(T, blockT) + scale!(Tb, fac) + end + end # for blockT + + elseif !combining + # + # Uncombining --------------------------- + # + #println("Uncombining >>>>>>>>>>>>>>>>>>>>>>>>>>>") + + nc = findfirst(l -> l < 0, labelsT) + nlabelsT = [labelsT[nc]] + ic = indsT[nc] + + for l in labelsT + (l > 0) && append!(nlabelsT, l) + end - # Compute sign for permuting combined index to front - # (sign alphaT to be computed for each block below): - permT = vec_getperm(nlabelsT, labelsT) + # Compute sign for permuting combined index to front + # (sign alphaT to be computed for each block below): + permT = vec_getperm(nlabelsT, labelsT) - # - # Note: other permutation of labelsT which - # relates to two treatments of isconj==true/false - # in combining case above is handled as a - # post-processing step in NDTensors.after_combiner_signs - # implemented below - # + # + # Note: other permutation of labelsT which + # relates to two treatments of isconj==true/false + # in combining case above is handled as a + # post-processing step in NDTensors.after_combiner_signs + # implemented below + # - for blockT in keys(blockoffsets(T)) - alphaT = NDTensors.permfactor(permT, blockT, indsT) + for blockT in keys(blockoffsets(T)) + alphaT = NDTensors.permfactor(permT, blockT, indsT) - neg_dir = !isconj ? Out : In - qic = qn(ic, blockT[nc]) - alpha_arrows = (fparity(qic) == 1 && dir(ic) == neg_dir) ? -1 : +1 + neg_dir = !isconj ? Out : In + qic = qn(ic, blockT[nc]) + alpha_arrows = (fparity(qic) == 1 && dir(ic) == neg_dir) ? -1 : +1 - fac = alphaT * alpha_arrows + fac = alphaT * alpha_arrows - if fac != 1 - Tb = blockview(T, blockT) - scale!(Tb, fac) - end + if fac != 1 + Tb = blockview(T, blockT) + scale!(Tb, fac) + end + end end - end - return T + return T end function NDTensors.after_combiner_signs( - R, labelsR, indsR::NTuple{NR,QNIndex}, C, labelsC, indsC::NTuple{NC,QNIndex} -) where {NC,NR} - ci = NDTensors.cinds(store(C))[1] - combining = (labelsC[ci] > 0) - combining && error("NDTensors.after_combiner_signs only for uncombining") + R, labelsR, indsR::NTuple{NR, QNIndex}, C, labelsC, indsC::NTuple{NC, QNIndex} + ) where {NC, NR} + ci = NDTensors.cinds(store(C))[1] + combining = (labelsC[ci] > 0) + combining && error("NDTensors.after_combiner_signs only for uncombining") + + if !using_auto_fermion() || !has_fermionic_subspaces(R) + return R + end - if !using_auto_fermion() || !has_fermionic_subspaces(R) - return R - end - - R = copy(R) - - # number of uncombined indices - Nuc = NC - 1 - - isconj = NDTensors.isconj(store(C)) - - if !combining - if !isconj - #println("!!! Doing uncombining post-processing step") - rperm = ntuple(i -> (Nuc - i + 1), Nuc) # reverse permutation - NDTensors.scale_blocks!( - R, block -> NDTensors.permfactor(rperm, block, indsR; range=1:Nuc) - ) - else - #println("!!! Doing conjugate uncombining post-processing step") - C_dir = dir(inds(C)[1]) - - function mixed_arrow_sign(block) - alpha_mixed_arrow = 1 - for n in 1:Nuc - i = indsR[n] - qi = qn(i, block[n]) - if dir(i) == C_dir && fparity(qi) == 1 - alpha_mixed_arrow *= -1 - end + R = copy(R) + + # number of uncombined indices + Nuc = NC - 1 + + isconj = NDTensors.isconj(store(C)) + + if !combining + if !isconj + #println("!!! Doing uncombining post-processing step") + rperm = ntuple(i -> (Nuc - i + 1), Nuc) # reverse permutation + NDTensors.scale_blocks!( + R, block -> NDTensors.permfactor(rperm, block, indsR; range = 1:Nuc) + ) + else + #println("!!! Doing conjugate uncombining post-processing step") + C_dir = dir(inds(C)[1]) + + function mixed_arrow_sign(block) + alpha_mixed_arrow = 1 + for n in 1:Nuc + i = indsR[n] + qi = qn(i, block[n]) + if dir(i) == C_dir && fparity(qi) == 1 + alpha_mixed_arrow *= -1 + end + end + return alpha_mixed_arrow + end + + NDTensors.scale_blocks!(R, block -> mixed_arrow_sign(block)) end - return alpha_mixed_arrow - end - - NDTensors.scale_blocks!(R, block -> mixed_arrow_sign(block)) end - end - return R + return R end diff --git a/src/imports.jl b/src/imports.jl index 7cc8130b4c..4fdbfd3449 100644 --- a/src/imports.jl +++ b/src/imports.jl @@ -1,187 +1,187 @@ import Base: - # types - Array, - CartesianIndices, - Vector, - NTuple, - Tuple, - # symbols - +, - -, - *, - ^, - /, - ==, - <, - >, - !, - # functions - adjoint, - allunique, - axes, - complex, - conj, - convert, - copy, - copyto!, - deepcopy, - deleteat!, - eachindex, - eltype, - fill!, - filter, - filter!, - findall, - findfirst, - getindex, - hash, - imag, - intersect, - intersect!, - isapprox, - isassigned, - isempty, - isless, - isreal, - iszero, - iterate, - keys, - lastindex, - length, - map, - map!, - ndims, - print, - promote_rule, - push!, - real, - resize!, - setdiff, - setdiff!, - setindex!, - show, - similar, - size, - summary, - truncate, - zero, - # macros - @propagate_inbounds + # types + Array, + CartesianIndices, + Vector, + NTuple, + Tuple, + # symbols + +, + -, + *, + ^, + /, + ==, + <, + >, + !, + # functions + adjoint, + allunique, + axes, + complex, + conj, + convert, + copy, + copyto!, + deepcopy, + deleteat!, + eachindex, + eltype, + fill!, + filter, + filter!, + findall, + findfirst, + getindex, + hash, + imag, + intersect, + intersect!, + isapprox, + isassigned, + isempty, + isless, + isreal, + iszero, + iterate, + keys, + lastindex, + length, + map, + map!, + ndims, + print, + promote_rule, + push!, + real, + resize!, + setdiff, + setdiff!, + setindex!, + show, + similar, + size, + summary, + truncate, + zero, + # macros + @propagate_inbounds import Base.Broadcast: - # types - AbstractArrayStyle, - Broadcasted, - BroadcastStyle, - DefaultArrayStyle, - Style, - # functions - _broadcast_getindex, - broadcasted, - broadcastable, - instantiate + # types + AbstractArrayStyle, + Broadcasted, + BroadcastStyle, + DefaultArrayStyle, + Style, + # functions + _broadcast_getindex, + broadcasted, + broadcastable, + instantiate import Adapt: adapt_structure, adapt_storage import LinearAlgebra: - axpby!, - axpy!, - diag, - dot, - eigen, - exp, - factorize, - ishermitian, - lmul!, - lq, - mul!, - norm, - normalize, - normalize!, - nullspace, - qr, - rmul!, - svd, - tr, - transpose + axpby!, + axpy!, + diag, + dot, + eigen, + exp, + factorize, + ishermitian, + lmul!, + lq, + mul!, + norm, + normalize, + normalize!, + nullspace, + qr, + rmul!, + svd, + tr, + transpose using ITensors.NDTensors.GPUArraysCoreExtensions: cpu using ITensors.NDTensors: - Algorithm, - @Algorithm_str, - EmptyNumber, - _Tuple, - _NTuple, - blas_get_num_threads, - disable_auto_fermion, - double_precision, - eachblock, - eachdiagblock, - enable_auto_fermion, - fill!!, - randn!!, - permutedims, - permutedims!, - single_precision, - timer, - using_auto_fermion + Algorithm, + @Algorithm_str, + EmptyNumber, + _Tuple, + _NTuple, + blas_get_num_threads, + disable_auto_fermion, + double_precision, + eachblock, + eachdiagblock, + enable_auto_fermion, + fill!!, + randn!!, + permutedims, + permutedims!, + single_precision, + timer, + using_auto_fermion using NDTensors.CUDAExtensions: cu import ITensors.NDTensors: - # Modules - Strided, # to control threading - # Types - AliasStyle, - AllowAlias, - NeverAlias, - array, - blockdim, - blockoffsets, - contract, - datatype, - dense, - denseblocks, - diaglength, - dim, - dims, - disable_tblis, - eachnzblock, - enable_tblis, - ind, - inds, - insertblock!, - insert_diag_blocks!, - matrix, - maxdim, - mindim, - nblocks, - nnz, - nnzblocks, - nzblock, - nzblocks, - one, - outer, - permuteblocks, - polar, - ql, - scale!, - setblock!, - setblockdim!, - setinds, - setstorage, - sim, - storage, - storagetype, - sum, - tensor, - truncate!, - using_tblis, - vector, - # Deprecated - addblock!, - store + # Modules + Strided, # to control threading + # Types + AliasStyle, + AllowAlias, + NeverAlias, + array, + blockdim, + blockoffsets, + contract, + datatype, + dense, + denseblocks, + diaglength, + dim, + dims, + disable_tblis, + eachnzblock, + enable_tblis, + ind, + inds, + insertblock!, + insert_diag_blocks!, + matrix, + maxdim, + mindim, + nblocks, + nnz, + nnzblocks, + nzblock, + nzblocks, + one, + outer, + permuteblocks, + polar, + ql, + scale!, + setblock!, + setblockdim!, + setinds, + setstorage, + sim, + storage, + storagetype, + sum, + tensor, + truncate!, + using_tblis, + vector, + # Deprecated + addblock!, + store import ITensors.Ops: Prod, Sum, terms diff --git a/src/indexset.jl b/src/indexset.jl index 397385e86b..2db01c4af1 100644 --- a/src/indexset.jl +++ b/src/indexset.jl @@ -4,7 +4,7 @@ using .TagSets: TagSets, addtags, commontags, hastags, removetags, replacetags # Represents a static order of an ITensor @eval struct Order{N} - (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) + (OrderT::Type{<:Order})() = $(Expr(:new, :OrderT)) end @doc """ @@ -21,23 +21,23 @@ Order(N) = Order{N}() # Helpful if we want code to work generically # for other Index-like types (such as IndexRange) -const IndexSet{IndexT<:Index} = Vector{IndexT} -const IndexTuple{IndexT<:Index} = Tuple{Vararg{IndexT}} +const IndexSet{IndexT <: Index} = Vector{IndexT} +const IndexTuple{IndexT <: Index} = Tuple{Vararg{IndexT}} # Definition to help with generic code -const Indices{IndexT<:Index} = Union{Vector{IndexT},Tuple{Vararg{IndexT}}} +const Indices{IndexT <: Index} = Union{Vector{IndexT}, Tuple{Vararg{IndexT}}} -function _narrow_eltype(v::Vector{T}; default_empty_eltype=T) where {T} - if isempty(v) - return default_empty_eltype[] - end - return convert(Vector{mapreduce(typeof, promote_type, v)}, v) +function _narrow_eltype(v::Vector{T}; default_empty_eltype = T) where {T} + if isempty(v) + return default_empty_eltype[] + end + return convert(Vector{mapreduce(typeof, promote_type, v)}, v) end -function narrow_eltype(v::Vector{T}; default_empty_eltype=T) where {T} - if isconcretetype(T) - return v - end - return _narrow_eltype(v; default_empty_eltype) +function narrow_eltype(v::Vector{T}; default_empty_eltype = T) where {T} + if isconcretetype(T) + return v + end + return _narrow_eltype(v; default_empty_eltype) end _indices() = () @@ -50,7 +50,7 @@ _indices(x1::Tuple, x2::Index) = (x1..., x2) _indices(x1::Index, x2::Index) = (x1, x2) # Vectors -_indices(x1::Vector, x2::Vector) = narrow_eltype(vcat(x1, x2); default_empty_eltype=Index) +_indices(x1::Vector, x2::Vector) = narrow_eltype(vcat(x1, x2); default_empty_eltype = Index) # Mix vectors and tuples/elements _indices(x1::Vector, x2) = _indices(x1, [x2]) @@ -59,9 +59,9 @@ _indices(x1::Vector, x2::Tuple) = _indices(x1, [x2...]) _indices(x1::Tuple, x2::Vector) = _indices([x1...], x2) indices(x::Vector{Index{S}}) where {S} = x -indices(x::Vector{Index}) = narrow_eltype(x; default_empty_eltype=Index) -indices(x::Tuple) = reduce(_indices, x; init=()) -indices(x::Vector) = reduce(_indices, x; init=Index[]) +indices(x::Vector{Index}) = narrow_eltype(x; default_empty_eltype = Index) +indices(x::Tuple) = reduce(_indices, x; init = ()) +indices(x::Vector) = reduce(_indices, x; init = Index[]) indices(x...) = indices(x) # To help with backwards compatibility @@ -108,7 +108,7 @@ Base.to_shape(inds::Tuple{Vararg{Index}}) = dims(inds) Get the product of the dimensions of the indices of the Indices (the total dimension of the space). """ -NDTensors.dim(is::IndexSet) = Compat.mapreduce(dim, *, is; init=1) +NDTensors.dim(is::IndexSet) = Compat.mapreduce(dim, *, is; init = 1) """ dim(is::IndexSet, n::Int) @@ -124,7 +124,7 @@ Return a new Indices with the indices daggered (flip all of the arrow directions). """ function dag(is::Indices) - return isempty(is) ? is : map(i -> dag(i), is) + return isempty(is) ? is : map(i -> dag(i), is) end # TODO: move to NDTensors @@ -132,9 +132,9 @@ NDTensors.dim(is::Tuple, pos::Integer) = dim(is[pos]) # TODO: this is a weird definition, fix it function NDTensors.similartype( - ::Type{<:Tuple{Vararg{IndexT}}}, ::Type{Val{N}} -) where {IndexT,N} - return NTuple{N,IndexT} + ::Type{<:Tuple{Vararg{IndexT}}}, ::Type{Val{N}} + ) where {IndexT, N} + return NTuple{N, IndexT} end ## # This is to help with some generic programming in the Tensor @@ -158,10 +158,10 @@ You can also use the broadcast version `sim.(is)`. NDTensors.sim(is::Indices) = map(i -> sim(i), is) function trivial_index(is::Indices) - if isempty(is) - return Index(1) - end - return trivial_index(first(is)) + if isempty(is) + return Index(1) + end + return trivial_index(first(is)) end """ @@ -172,12 +172,12 @@ Get the minimum dimension of the indices in the index set. Returns 1 if the Indices is empty. """ function mindim(is::Indices) - length(is) == 0 && (return 1) - md = dim(is[1]) - for n in 2:length(is) - md = min(md, dim(is[n])) - end - return md + length(is) == 0 && (return 1) + md = dim(is[1]) + for n in 2:length(is) + md = min(md, dim(is[n])) + end + return md end """ @@ -188,12 +188,12 @@ Get the maximum dimension of the indices in the index set. Returns 1 if the Indices is empty. """ function maxdim(is::Indices) - length(is) == 0 && (return 1) - md = dim(is[1]) - for n in 2:length(is) - md = max(md, dim(is[n])) - end - return md + length(is) == 0 && (return 1) + md = dim(is[1]) + for n in 2:length(is) + md = max(md, dim(is[n])) + end + return md end """ @@ -215,11 +215,11 @@ independent equality use `issetequal` or `hassameinds`. """ function ==(A::Indices, B::Indices) - length(A) ≠ length(B) && return false - for (a, b) in zip(A, B) - a ≠ b && return false - end - return true + length(A) ≠ length(B) && return false + for (a, b) in zip(A, B) + a ≠ b && return false + end + return true end """ @@ -265,8 +265,8 @@ An internal function that returns a function that accepts an Index that checks if the Index matches the provided conditions. """ -function fmatch(; inds=nothing, tags=nothing, plev=nothing, id=nothing) - return i -> fmatch(inds)(i) && fmatch(plev)(i) && fmatch(id)(i) && fmatch(tags)(i) +function fmatch(; inds = nothing, tags = nothing, plev = nothing, id = nothing) + return i -> fmatch(inds)(i) && fmatch(plev)(i) && fmatch(id)(i) && fmatch(tags)(i) end """ @@ -283,8 +283,8 @@ Return the first Index in the Indices. If the Indices is empty, return `nothing`. """ function getfirst(is::Indices) - length(is) == 0 && return nothing - return first(is) + length(is) == 0 && return nothing + return first(is) end """ @@ -294,10 +294,10 @@ Get the first Index matching the pattern function, return `nothing` if not found. """ function getfirst(f::Function, is::Indices) - for i in is - f(i) && return i - end - return nothing + for i in is + f(i) && return i + end + return nothing end getfirst(is::Indices, args...; kwargs...) = getfirst(fmatch(args...; kwargs...), is) @@ -313,7 +313,7 @@ For collections of Indices, returns the first location in `bis` for each value in `ais`. """ function Base.indexin(ais::Indices, bis::Indices) - return [findfirst(bis, ais[i]) for i in 1:length(ais)] + return [findfirst(bis, ais[i]) for i in 1:length(ais)] end #function Base.indexin(a::Index, bis::Indices) @@ -327,7 +327,7 @@ findfirst(is::Indices, args...; kwargs...) = findfirst(fmatch(args...; kwargs... # function prime(f::Function, is::Indices, args...) - return map(i -> f(i) ? prime(i, args...) : i, is) + return map(i -> f(i) ? prime(i, args...) : i, is) end """ @@ -338,7 +338,7 @@ Filter which indices are primed using keyword arguments tags, plev and id. """ function prime(is::Indices, plinc::Integer, args...; kwargs...) - return prime(fmatch(args...; kwargs...), is, plinc) + return prime(fmatch(args...; kwargs...), is, plinc) end prime(f::Function, is::Indices) = prime(f, is, 1) @@ -353,81 +353,81 @@ For is' notation. adjoint(is::Indices) = prime(is) function setprime(f::Function, is::Indices, args...) - return map(i -> f(i) ? setprime(i, args...) : i, is) + return map(i -> f(i) ? setprime(i, args...) : i, is) end function setprime(is::Indices, plev::Integer, args...; kwargs...) - return setprime(fmatch(args...; kwargs...), is, plev) + return setprime(fmatch(args...; kwargs...), is, plev) end noprime(f::Function, is::Indices, args...) = setprime(is, 0, args...; kwargs...) noprime(is::Indices, args...; kwargs...) = setprime(is, 0, args...; kwargs...) -function _swapprime(f::Function, i::Index, pl1pl2::Pair{Int,Int}) - pl1, pl2 = pl1pl2 - if f(i) - if hasplev(i, pl1) - return setprime(i, pl2) - elseif hasplev(i, pl2) - return setprime(i, pl1) +function _swapprime(f::Function, i::Index, pl1pl2::Pair{Int, Int}) + pl1, pl2 = pl1pl2 + if f(i) + if hasplev(i, pl1) + return setprime(i, pl2) + elseif hasplev(i, pl2) + return setprime(i, pl1) + end + return i end return i - end - return i end -function swapprime(f::Function, is::Indices, pl1pl2::Pair{Int,Int}) - return map(i -> _swapprime(f, i, pl1pl2), is) +function swapprime(f::Function, is::Indices, pl1pl2::Pair{Int, Int}) + return map(i -> _swapprime(f, i, pl1pl2), is) end function swapprime(f::Function, is::Indices, pl1::Int, pl2::Int) - return swapprime(f, is::Indices, pl1 => pl2) + return swapprime(f, is::Indices, pl1 => pl2) end -function swapprime(is::Indices, pl1pl2::Pair{Int,Int}, args...; kwargs...) - return swapprime(fmatch(args...; kwargs...), is, pl1pl2) +function swapprime(is::Indices, pl1pl2::Pair{Int, Int}, args...; kwargs...) + return swapprime(fmatch(args...; kwargs...), is, pl1pl2) end function swapprime(is::Indices, pl1::Int, pl2::Int, args...; kwargs...) - return swapprime(fmatch(args...; kwargs...), is, pl1 => pl2) + return swapprime(fmatch(args...; kwargs...), is, pl1 => pl2) end replaceprime(f::Function, is::Indices, pl1::Int, pl2::Int) = replaceprime(f, is, pl1 => pl2) function replaceprime(is::Indices, pl1::Int, pl2::Int, args...; kwargs...) - return replaceprime(fmatch(args...; kwargs...), is, pl1 => pl2) + return replaceprime(fmatch(args...; kwargs...), is, pl1 => pl2) end const mapprime = replaceprime -function _replaceprime(i::Index, rep_pls::Pair{Int,Int}...) - for (pl1, pl2) in rep_pls - hasplev(i, pl1) && return setprime(i, pl2) - end - return i +function _replaceprime(i::Index, rep_pls::Pair{Int, Int}...) + for (pl1, pl2) in rep_pls + hasplev(i, pl1) && return setprime(i, pl2) + end + return i end -function replaceprime(f::Function, is::Indices, rep_pls::Pair{Int,Int}...) - return map(i -> f(i) ? _replaceprime(i, rep_pls...) : i, is) +function replaceprime(f::Function, is::Indices, rep_pls::Pair{Int, Int}...) + return map(i -> f(i) ? _replaceprime(i, rep_pls...) : i, is) end -function replaceprime(is::Indices, rep_pls::Pair{Int,Int}...; kwargs...) - return replaceprime(fmatch(; kwargs...), is, rep_pls...) +function replaceprime(is::Indices, rep_pls::Pair{Int, Int}...; kwargs...) + return replaceprime(fmatch(; kwargs...), is, rep_pls...) end function TagSets.addtags(f::Function, is::Indices, args...) - return map(i -> f(i) ? addtags(i, args...) : i, is) + return map(i -> f(i) ? addtags(i, args...) : i, is) end function TagSets.addtags(is::Indices, tags, args...; kwargs...) - return addtags(fmatch(args...; kwargs...), is, tags) + return addtags(fmatch(args...; kwargs...), is, tags) end settags(f::Function, is::Indices, args...) = map(i -> f(i) ? settags(i, args...) : i, is) function settags(is::Indices, tags, args...; kwargs...) - return settags(fmatch(args...; kwargs...), is, tags) + return settags(fmatch(args...; kwargs...), is, tags) end """ @@ -471,18 +471,18 @@ eachindval(is::Index...) = eachindval(is) eachindval(is::Tuple{Vararg{Index}}) = (is .=> Tuple(ns) for ns in eachval(is)) function TagSets.removetags(f::Function, is::Indices, args...) - return map(i -> f(i) ? removetags(i, args...) : i, is) + return map(i -> f(i) ? removetags(i, args...) : i, is) end function TagSets.removetags(is::Indices, tags, args...; kwargs...) - return removetags(fmatch(args...; kwargs...), is, tags) + return removetags(fmatch(args...; kwargs...), is, tags) end function _replacetags(i::Index, rep_ts::Pair...) - for (tags1, tags2) in rep_ts - hastags(i, tags1) && return replacetags(i, tags1, tags2) - end - return i + for (tags1, tags2) in rep_ts + hastags(i, tags1) && return replacetags(i, tags1, tags2) + end + return i end # XXX new syntax @@ -508,139 +508,141 @@ allhastags(is::Indices, ts::String) = all(i -> hastags(i, ts), is) # Version taking a list of Pairs function TagSets.replacetags(f::Function, is::Indices, rep_ts::Pair...) - return map(i -> f(i) ? _replacetags(i, rep_ts...) : i, is) + return map(i -> f(i) ? _replacetags(i, rep_ts...) : i, is) end function TagSets.replacetags(is::Indices, rep_ts::Pair...; kwargs...) - return replacetags(fmatch(; kwargs...), is, rep_ts...) + return replacetags(fmatch(; kwargs...), is, rep_ts...) end # Version taking two input TagSets/Strings function TagSets.replacetags(f::Function, is::Indices, tags1, tags2) - return replacetags(f, is, tags1 => tags2) + return replacetags(f, is, tags1 => tags2) end function TagSets.replacetags(is::Indices, tags1, tags2, args...; kwargs...) - return replacetags(fmatch(args...; kwargs...), is, tags1 => tags2) + return replacetags(fmatch(args...; kwargs...), is, tags1 => tags2) end function _swaptags(f::Function, i::Index, tags1, tags2) - if f(i) - if hastags(i, tags1) - return replacetags(i, tags1, tags2) - elseif hastags(i, tags2) - return replacetags(i, tags2, tags1) + if f(i) + if hastags(i, tags1) + return replacetags(i, tags1, tags2) + elseif hastags(i, tags2) + return replacetags(i, tags2, tags1) + end + return i end return i - end - return i end function swaptags(f::Function, is::Indices, tags1, tags2) - return map(i -> _swaptags(f, i, tags1, tags2), is) + return map(i -> _swaptags(f, i, tags1, tags2), is) end function swaptags(is::Indices, tags1, tags2, args...; kwargs...) - return swaptags(fmatch(args...; kwargs...), is, tags1, tags2) + return swaptags(fmatch(args...; kwargs...), is, tags1, tags2) end function swaptags(is::Indices, tags12::Pair, args...; kwargs...) - return swaptags(is, first(tags12), last(tags12), args...; kwargs...) + return swaptags(is, first(tags12), last(tags12), args...; kwargs...) end -function replaceinds(is::Indices, rep_inds::Pair{<:Index,<:Index}...) - return replaceinds(is, zip(rep_inds...)...) +function replaceinds(is::Indices, rep_inds::Pair{<:Index, <:Index}...) + return replaceinds(is, zip(rep_inds...)...) end # Handle case of empty indices being replaced replaceinds(is::Indices) = is replaceinds(is::Indices, rep_inds::Tuple{}) = is -function replaceinds(is::Indices, rep_inds::Vector{<:Pair{<:Index,<:Index}}) - return replaceinds(is, rep_inds...) +function replaceinds(is::Indices, rep_inds::Vector{<:Pair{<:Index, <:Index}}) + return replaceinds(is, rep_inds...) end -function replaceinds(is::Indices, rep_inds::Tuple{Vararg{Pair{<:Index,<:Index}}}) - return replaceinds(is, rep_inds...) +function replaceinds(is::Indices, rep_inds::Tuple{Vararg{Pair{<:Index, <:Index}}}) + return replaceinds(is, rep_inds...) end function replaceinds(is::Indices, rep_inds::Pair) - return replaceinds(is, Tuple(first(rep_inds)) .=> Tuple(last(rep_inds))) + return replaceinds(is, Tuple(first(rep_inds)) .=> Tuple(last(rep_inds))) end # Check that the QNs are all the same hassameflux(i1::Index, i2::Index) = (dim(i1) == dim(i2)) function replaceinds_space_error(is, inds1, inds2, i1, i2) - return error(""" - Attempting to replace the Indices + return error( + """ + Attempting to replace the Indices - $(inds1) + $(inds1) - with + with - $(inds2) + $(inds2) - in the Index collection + in the Index collection - $(is). + $(is). - However, the Index + However, the Index - $(i1) + $(i1) - has a different space from the Index + has a different space from the Index - $(i2). + $(i2). - They must have the same spaces to be replaced. - """) + They must have the same spaces to be replaced. + """ + ) end function replaceinds(is::Indices, inds1, inds2) - is1 = inds1 - poss = indexin(is1, is) - is_tuple = Tuple(is) - for (j, pos) in enumerate(poss) - isnothing(pos) && continue - i1 = is_tuple[pos] - i2 = inds2[j] - i2 = setdir(i2, dir(i1)) - if space(i1) ≠ space(i2) - replaceinds_space_error(is, inds1, inds2, i1, i2) + is1 = inds1 + poss = indexin(is1, is) + is_tuple = Tuple(is) + for (j, pos) in enumerate(poss) + isnothing(pos) && continue + i1 = is_tuple[pos] + i2 = inds2[j] + i2 = setdir(i2, dir(i1)) + if space(i1) ≠ space(i2) + replaceinds_space_error(is, inds1, inds2, i1, i2) + end + is_tuple = setindex(is_tuple, i2, pos) end - is_tuple = setindex(is_tuple, i2, pos) - end - return (is_tuple) + return (is_tuple) end replaceind(is::Indices, i1::Index, i2::Index) = replaceinds(is, (i1,), (i2,)) function replaceind(is::Indices, i1::Index, i2::Indices) - length(i2) != 1 && - throw(ArgumentError("cannot use replaceind with an Indices of length $(length(i2))")) - return replaceinds(is, (i1,), i2) + length(i2) != 1 && + throw(ArgumentError("cannot use replaceind with an Indices of length $(length(i2))")) + return replaceinds(is, (i1,), i2) end -replaceind(is::Indices, rep_i::Pair{<:Index,<:Index}) = replaceinds(is, rep_i) +replaceind(is::Indices, rep_i::Pair{<:Index, <:Index}) = replaceinds(is, rep_i) function swapinds(is::Indices, inds1, inds2) - return replaceinds(is, (inds1..., inds2...), (inds2..., inds1...)) + return replaceinds(is, (inds1..., inds2...), (inds2..., inds1...)) end function swapinds(is::Indices, inds1::Index, inds2::Index) - return swapinds(is, (inds1,), (inds2,)) + return swapinds(is, (inds1,), (inds2,)) end function swapinds(is::Indices, inds12::Pair) - return swapinds(is, first(inds12), last(inds12)) + return swapinds(is, first(inds12), last(inds12)) end swapind(is::Indices, i1::Index, i2::Index) = swapinds(is, (i1,), (i2,)) removeqns(is::Indices) = map(removeqns, is) -function QuantumNumbers.removeqn(is::Indices, qn_name::String; mergeblocks=true) - return map(i -> removeqn(i, qn_name; mergeblocks), is) +function QuantumNumbers.removeqn(is::Indices, qn_name::String; mergeblocks = true) + return map(i -> removeqn(i, qn_name; mergeblocks), is) end mergeblocks(is::Indices) = map(mergeblocks, is) @@ -652,13 +654,13 @@ mergeblocks(is::Indices) = map(mergeblocks, is) # setdirs(is1::Indices, is2::Indices) # function permute(is1::Indices, is2::Indices) - length(is1) != length(is2) && throw( - ArgumentError( - "length of first index set, $(length(is1)) does not match length of second index set, $(length(is2))", - ), - ) - perm = getperm(is1, is2) - return is1[invperm(perm)] + length(is1) != length(is2) && throw( + ArgumentError( + "length of first index set, $(length(is1)) does not match length of second index set, $(length(is2))", + ), + ) + perm = getperm(is1, is2) + return is1[invperm(perm)] end # @@ -666,68 +668,68 @@ end # function compute_contraction_labels(Ais::Tuple, Bis::Tuple) - have_qns = hasqns(Ais) && hasqns(Bis) - NA = length(Ais) - NB = length(Bis) - Alabels = MVector{NA,Int}(ntuple(_ -> 0, Val(NA))) - Blabels = MVector{NB,Int}(ntuple(_ -> 0, Val(NB))) - - ncont = 0 - for i in 1:NA, j in 1:NB - Ais_i = @inbounds Ais[i] - Bis_j = @inbounds Bis[j] - if Ais_i == Bis_j - if have_qns && (dir(Ais_i) ≠ -dir(Bis_j)) - error( - "Attempting to contract IndexSet:\n\n$(Ais)\n\nwith IndexSet:\n\n$(Bis)\n\nQN indices must have opposite direction to contract, but indices:\n\n$(Ais_i)\n\nand:\n\n$(Bis_j)\n\ndo not have opposite directions.", - ) - end - Alabels[i] = Blabels[j] = -(1 + ncont) - ncont += 1 + have_qns = hasqns(Ais) && hasqns(Bis) + NA = length(Ais) + NB = length(Bis) + Alabels = MVector{NA, Int}(ntuple(_ -> 0, Val(NA))) + Blabels = MVector{NB, Int}(ntuple(_ -> 0, Val(NB))) + + ncont = 0 + for i in 1:NA, j in 1:NB + Ais_i = @inbounds Ais[i] + Bis_j = @inbounds Bis[j] + if Ais_i == Bis_j + if have_qns && (dir(Ais_i) ≠ -dir(Bis_j)) + error( + "Attempting to contract IndexSet:\n\n$(Ais)\n\nwith IndexSet:\n\n$(Bis)\n\nQN indices must have opposite direction to contract, but indices:\n\n$(Ais_i)\n\nand:\n\n$(Bis_j)\n\ndo not have opposite directions.", + ) + end + Alabels[i] = Blabels[j] = -(1 + ncont) + ncont += 1 + end end - end - u = ncont - for i in 1:NA - if (Alabels[i] == 0) - Alabels[i] = (u += 1) + u = ncont + for i in 1:NA + if (Alabels[i] == 0) + Alabels[i] = (u += 1) + end end - end - for j in 1:NB - if (Blabels[j] == 0) - Blabels[j] = (u += 1) + for j in 1:NB + if (Blabels[j] == 0) + Blabels[j] = (u += 1) + end end - end - return (Tuple(Alabels), Tuple(Blabels)) + return (Tuple(Alabels), Tuple(Blabels)) end function compute_contraction_labels(Cis::Tuple, Ais::Tuple, Bis::Tuple) - NA = length(Ais) - NB = length(Bis) - NC = length(Cis) - Alabels, Blabels = compute_contraction_labels(Ais, Bis) - Clabels = MVector{NC,Int}(ntuple(_ -> 0, Val(NC))) - for i in 1:NC - locA = findfirst(==(Cis[i]), Ais) - if !isnothing(locA) - if Alabels[locA] < 0 - error( - "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." - ) - end - Clabels[i] = Alabels[locA] - else - locB = findfirst(==(Cis[i]), Bis) - if isnothing(locB) || Blabels[locB] < 0 - error( - "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." - ) - end - Clabels[i] = Blabels[locB] + NA = length(Ais) + NB = length(Bis) + NC = length(Cis) + Alabels, Blabels = compute_contraction_labels(Ais, Bis) + Clabels = MVector{NC, Int}(ntuple(_ -> 0, Val(NC))) + for i in 1:NC + locA = findfirst(==(Cis[i]), Ais) + if !isnothing(locA) + if Alabels[locA] < 0 + error( + "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." + ) + end + Clabels[i] = Alabels[locA] + else + locB = findfirst(==(Cis[i]), Bis) + if isnothing(locB) || Blabels[locB] < 0 + error( + "The noncommon indices of $Ais and $Bis must be the same as the indices $Cis." + ) + end + Clabels[i] = Blabels[locB] + end end - end - return (Tuple(Clabels), Alabels, Blabels) + return (Tuple(Clabels), Alabels, Blabels) end # @@ -782,7 +784,7 @@ filter(is::Indices, args::String; kwargs...) = filter(fmatch(args; kwargs...), i Return a new Indices with indices `setdir(is[i], dirs[i])`. """ function setdirs(is::Indices, dirs) - return map(i -> setdir(is[i], dirs[i]), 1:length(is)) + return map(i -> setdir(is[i], dirs[i]), 1:length(is)) end """ @@ -791,7 +793,7 @@ end Return the direction of the Index `i` in the Indices `is`. """ function dir(is1::Indices, i::Index) - return dir(getfirst(is1, i)) + return dir(getfirst(is1, i)) end """ @@ -801,7 +803,7 @@ Return a tuple of the directions of the indices `inds` in the Indices `is`, in the order they are found in `inds`. """ function dirs(is1::Indices, inds) - return map(i -> dir(is1, inds[i]), 1:length(inds)) + return map(i -> dir(is1, inds[i]), 1:length(inds)) end """ @@ -820,9 +822,9 @@ Get the permutation that takes collection 2 to collection 1, such that `col2[p] .== col1`. """ function getperm(s1, s2) - N = length(s1) - r = Vector{Int}(undef, N) - return map!(i -> findfirst(==(s1[i]), s2), r, 1:length(s1)) + N = length(s1) + r = Vector{Int}(undef, N) + return map!(i -> findfirst(==(s1[i]), s2), r, 1:length(s1)) end # TODO: define directly for Vector @@ -832,12 +834,12 @@ end The number of blocks in the specified dimension. """ function NDTensors.nblocks(inds::IndexSet, i::Int) - return nblocks(Tuple(inds), i) + return nblocks(Tuple(inds), i) end # TODO: don't convert to Tuple function NDTensors.nblocks(inds::IndexSet, is) - return nblocks(Tuple(inds), is) + return nblocks(Tuple(inds), is) end """ @@ -849,8 +851,8 @@ dimension. NDTensors.nblocks(inds::Indices) = nblocks.(inds) # TODO: is this needed? -function NDTensors.nblocks(inds::NTuple{N,<:Index}) where {N} - return ntuple(i -> nblocks(inds, i), Val(N)) +function NDTensors.nblocks(inds::NTuple{N, <:Index}) where {N} + return ntuple(i -> nblocks(inds, i), Val(N)) end ndiagblocks(inds) = minimum(nblocks(inds)) @@ -870,12 +872,12 @@ flux(is, Block(2, 2)) == QN(0) ``` """ function flux(inds::Indices, block::Block) - qntot = QN() - for n in 1:length(inds) - ind = inds[n] - qntot += flux(ind, Block(block[n])) - end - return qntot + qntot = QN() + for n in 1:length(inds) + ind = inds[n] + qntot += flux(ind, Block(block[n])) + end + return qntot end """ @@ -918,18 +920,18 @@ block(inds::Indices, vals::Integer...) = blockindex(inds, vals...)[2] # Read and write # -function readcpp(io::IO, ::Type{<:Indices}; format="v3") - is = IndexSet() - if format == "v3" - size = read(io, Int) - function readind(io, n) - i = readcpp(io, Index; format) - stride = read(io, UInt64) - return i +function readcpp(io::IO, ::Type{<:Indices}; format = "v3") + is = IndexSet() + if format == "v3" + size = read(io, Int) + function readind(io, n) + i = readcpp(io, Index; format) + stride = read(io, UInt64) + return i + end + is = IndexSet(n -> readind(io, n), size) + else + throw(ArgumentError("read IndexSet: format=$format not supported")) end - is = IndexSet(n -> readind(io, n), size) - else - throw(ArgumentError("read IndexSet: format=$format not supported")) - end - return is + return is end diff --git a/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl b/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl index a538663264..878bb0c346 100644 --- a/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl +++ b/src/lib/ITensorVisualizationCore/src/ITensorVisualizationCore.jl @@ -4,11 +4,11 @@ using Compat using ..ITensors export @visualize, - @visualize!, - @visualize_noeval, - @visualize_noeval!, - @visualize_sequence, - @visualize_sequence_noeval + @visualize!, + @visualize_noeval, + @visualize_noeval!, + @visualize_sequence, + @visualize_sequence_noeval # Visualizing ITensor networks include("visualize_macro.jl") diff --git a/src/lib/ITensorVisualizationCore/src/visualize_macro.jl b/src/lib/ITensorVisualizationCore/src/visualize_macro.jl index 0ae21d6e25..19775927fb 100644 --- a/src/lib/ITensorVisualizationCore/src/visualize_macro.jl +++ b/src/lib/ITensorVisualizationCore/src/visualize_macro.jl @@ -6,54 +6,54 @@ is_kwarg(arg_or_kwarg::Symbol) = false is_kwarg(arg_or_kwarg::Expr) = (arg_or_kwarg.head == :parameters) function has_kwargs(args_kwargs::Vector) - isempty(args_kwargs) && return false - return is_kwarg(first(args_kwargs)) + isempty(args_kwargs) && return false + return is_kwarg(first(args_kwargs)) end function get_kwargs(args_kwargs::Vector) - @assert has_kwargs(args_kwargs) - return first(args_kwargs) + @assert has_kwargs(args_kwargs) + return first(args_kwargs) end function get_kwarg(kwargs::Expr, key::Symbol) - n = findfirst(kw -> kw.args[1] == :sequence, kwargs.args) - if !isnothing(n) - @assert kwargs.args[n].head == :kw - return esc(kwargs.args[n].args[2]) - end - return nothing + n = findfirst(kw -> kw.args[1] == :sequence, kwargs.args) + if !isnothing(n) + @assert kwargs.args[n].head == :kw + return esc(kwargs.args[n].args[2]) + end + return nothing end function args_kwargs(ex::Vector) - kwargs = has_kwargs(ex) ? get_kwargs(ex) : :() - args = has_kwargs(ex) ? ex[2:end] : ex - return args, kwargs + kwargs = has_kwargs(ex) ? get_kwargs(ex) : :() + args = has_kwargs(ex) ? ex[2:end] : ex + return args, kwargs end function function_args_kwargs(ex::Symbol) - func = :identity - args = [ex] - kwargs = :() - iscollection = true - return func, args, kwargs, iscollection -end - -function function_args_kwargs(ex::Expr) - if ex.head == :call - func = first(ex.args) - args, kwargs = args_kwargs(ex.args[2:end]) - iscollection = true - elseif ex.head == :ref - #func, args, kwargs, iscollection = function_args_kwargs(Symbol(ex.args)) func = :identity args = [ex] kwargs = :() - iscollection = false - else - dump(ex) - error("Visualizing expression $ex not supported right now.") - end - return func, args, kwargs, iscollection + iscollection = true + return func, args, kwargs, iscollection +end + +function function_args_kwargs(ex::Expr) + if ex.head == :call + func = first(ex.args) + args, kwargs = args_kwargs(ex.args[2:end]) + iscollection = true + elseif ex.head == :ref + #func, args, kwargs, iscollection = function_args_kwargs(Symbol(ex.args)) + func = :identity + args = [ex] + kwargs = :() + iscollection = false + else + dump(ex) + error("Visualizing expression $ex not supported right now.") + end + return func, args, kwargs, iscollection end expr_to_string(s::Symbol) = String(s) @@ -64,44 +64,46 @@ expr_to_string(ex::Expr) = String(repr(ex))[3:(end - 1)] # the prefix for the labels if there is only # one input. function vertex_labels_kwargs(args, iscollection) - if iscollection && isone(length(args)) - vertex_labels_kw = :vertex_labels_prefix - vertex_labels_arg = string(only(args)) - else - vertex_labels_kw = :vertex_labels - vertex_labels_arg = string.(args) - end - return vertex_labels_kw, vertex_labels_arg + if iscollection && isone(length(args)) + vertex_labels_kw = :vertex_labels_prefix + vertex_labels_arg = string(only(args)) + else + vertex_labels_kw = :vertex_labels + vertex_labels_arg = string.(args) + end + return vertex_labels_kw, vertex_labels_arg end function func_args_sequence_kwargs(ex, vis_kwargs...) - func, args, kwargs, iscollection = function_args_kwargs(ex) - sequence = get_kwarg(kwargs, :sequence) - vertex_labels_kw, vertex_labels_arg = vertex_labels_kwargs(args, iscollection) - # Merge labels kwarg with kwargs - vis_kwargs_dict = Dict([ - vis_kwarg.args[1] => vis_kwarg.args[2] for vis_kwarg in vis_kwargs - ]) - vertex_labels_kwarg_dict = Dict(vertex_labels_kw => vertex_labels_arg) - merged_kwargs_dict = merge(vertex_labels_kwarg_dict, vis_kwargs_dict) - merged_kwargs_expr = [:($k = $v) for (k, v) in pairs(merged_kwargs_dict)] - return func, esc.(args), sequence, esc.(merged_kwargs_expr) + func, args, kwargs, iscollection = function_args_kwargs(ex) + sequence = get_kwarg(kwargs, :sequence) + vertex_labels_kw, vertex_labels_arg = vertex_labels_kwargs(args, iscollection) + # Merge labels kwarg with kwargs + vis_kwargs_dict = Dict( + [ + vis_kwarg.args[1] => vis_kwarg.args[2] for vis_kwarg in vis_kwargs + ] + ) + vertex_labels_kwarg_dict = Dict(vertex_labels_kw => vertex_labels_arg) + merged_kwargs_dict = merge(vertex_labels_kwarg_dict, vis_kwargs_dict) + merged_kwargs_expr = [:($k = $v) for (k, v) in pairs(merged_kwargs_dict)] + return func, esc.(args), sequence, esc.(merged_kwargs_expr) end -function visualize_expr(vis_func, ex::Union{Symbol,Expr}, vis_kwargs::Expr...) - func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) - e = quote - $(vis_func)($(func), ($(args...),), $(sequence); $(kwargs...)) - end - return e +function visualize_expr(vis_func, ex::Union{Symbol, Expr}, vis_kwargs::Expr...) + func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) + e = quote + $(vis_func)($(func), ($(args...),), $(sequence); $(kwargs...)) + end + return e end -function visualize_expr!(fig, vis_func!, ex::Union{Symbol,Expr}, vis_kwargs::Expr...) - func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) - e = quote - $(vis_func!)($(esc(fig)), $(func), ($(args...),), $(sequence); $(kwargs...)) - end - return e +function visualize_expr!(fig, vis_func!, ex::Union{Symbol, Expr}, vis_kwargs::Expr...) + func, args, sequence, kwargs = func_args_sequence_kwargs(ex, vis_kwargs...) + e = quote + $(vis_func!)($(esc(fig)), $(func), ($(args...),), $(sequence); $(kwargs...)) + end + return e end """ @@ -155,117 +157,117 @@ readline() - `arrow_show`: Whether or not to show arrows on the edges. """ macro visualize(fig::Symbol, ex::Symbol, kwargs::Expr...) - e = quote - $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) - $(esc(ex)) - end - return e + e = quote + $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) + $(esc(ex)) + end + return e end macro visualize!(fig, ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - $(esc(ex)) - end - return e + e = quote + $(visualize_expr!(fig, visualize!, ex, kwargs...)) + $(esc(ex)) + end + return e end macro visualize(ex::Symbol) - e = quote - display($(visualize_expr(visualize, ex))) - $(esc(ex)) - end - return e + e = quote + display($(visualize_expr(visualize, ex))) + $(esc(ex)) + end + return e end macro visualize(ex_or_fig::Symbol, ex_or_kwarg::Expr, last_kwargs::Expr...) - if ex_or_kwarg.head == :(=) - # The second input is a keyword argument which means that the - # first input is the collection to visualize (no figure output binding specified) - ex = ex_or_fig - kwargs = (ex_or_kwarg, last_kwargs...) - e = quote - display($(visualize_expr(visualize, ex, kwargs...))) - $(esc(ex)) - end - else - # The second input is not a keyword argument which means that the - # first input is the binding for the figure output, the second is the expression - # to visualize - fig = ex_or_fig - ex = ex_or_kwarg - kwargs = last_kwargs - e = quote - $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) - $(esc(ex)) + if ex_or_kwarg.head == :(=) + # The second input is a keyword argument which means that the + # first input is the collection to visualize (no figure output binding specified) + ex = ex_or_fig + kwargs = (ex_or_kwarg, last_kwargs...) + e = quote + display($(visualize_expr(visualize, ex, kwargs...))) + $(esc(ex)) + end + else + # The second input is not a keyword argument which means that the + # first input is the binding for the figure output, the second is the expression + # to visualize + fig = ex_or_fig + ex = ex_or_kwarg + kwargs = last_kwargs + e = quote + $(esc(fig)) = $(visualize_expr(visualize, ex, kwargs...)) + $(esc(ex)) + end end - end - return e + return e end macro visualize!(fig, ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - $(esc(ex)) - end - return e + e = quote + $(visualize_expr!(fig, visualize!, ex, kwargs...)) + $(esc(ex)) + end + return e end macro visualize(ex::Expr, kwargs::Expr...) - e = quote - display($(visualize_expr(visualize, ex, kwargs...))) - $(esc(ex)) - end - return e + e = quote + display($(visualize_expr(visualize, ex, kwargs...))) + $(esc(ex)) + end + return e end macro visualize_noeval(ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr(visualize, ex, kwargs...)) - end - return e + e = quote + $(visualize_expr(visualize, ex, kwargs...)) + end + return e end macro visualize_noeval(ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr(visualize, ex, kwargs...)) - end - return e + e = quote + $(visualize_expr(visualize, ex, kwargs...)) + end + return e end macro visualize_noeval!(fig, ex::Symbol, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - end - return e + e = quote + $(visualize_expr!(fig, visualize!, ex, kwargs...)) + end + return e end macro visualize_noeval!(fig, ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr!(fig, visualize!, ex, kwargs...)) - end - return e + e = quote + $(visualize_expr!(fig, visualize!, ex, kwargs...)) + end + return e end macro visualize_sequence(fig::Symbol, ex::Expr, kwargs::Expr...) - e = quote - $(esc(fig)) = $(visualize_expr(visualize_sequence, ex, kwargs...)) - $(esc(ex)) - end - return e + e = quote + $(esc(fig)) = $(visualize_expr(visualize_sequence, ex, kwargs...)) + $(esc(ex)) + end + return e end macro visualize_sequence(ex::Expr, kwargs::Expr...) - e = quote - display($(visualize_expr(visualize_sequence, ex, kwargs...))) - $(esc(ex)) - end - return e + e = quote + display($(visualize_expr(visualize_sequence, ex, kwargs...))) + $(esc(ex)) + end + return e end macro visualize_sequence_noeval(ex::Expr, kwargs::Expr...) - e = quote - $(visualize_expr(visualize_sequence, ex, kwargs...)) - end - return e + e = quote + $(visualize_expr(visualize_sequence, ex, kwargs...)) + end + return e end diff --git a/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl b/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl index 48bef834ae..78801e4323 100644 --- a/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl +++ b/src/lib/ITensorsOpsExt/src/ITensorsOpsExt.jl @@ -6,74 +6,74 @@ using ..Ops: Op using ..SiteTypes: SiteTypes, op function SiteTypes.op(I::UniformScaling, s::Index...) - return I.λ * op("Id", s...) + return I.λ * op("Id", s...) end function ITensors.ITensor(o::Op, s::Vector{<:Index}) - return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) + return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) end function ITensors.ITensor(o::Scaled, s::Vector{<:Index}) - c = coefficient(o) - if isreal(c) - c = real(c) - end - return c * ITensor(argument(o), s) + c = coefficient(o) + if isreal(c) + c = real(c) + end + return c * ITensor(argument(o), s) end function ITensors.ITensor(o::Prod, s::Vector{<:Index}) - T = ITensor(true) - for a in o.args[1] - Tₙ = ITensor(a, s) - # TODO: Implement this logic inside `apply` - if hascommoninds(T, Tₙ) - T = T(Tₙ) - else - T *= Tₙ + T = ITensor(true) + for a in o.args[1] + Tₙ = ITensor(a, s) + # TODO: Implement this logic inside `apply` + if hascommoninds(T, Tₙ) + T = T(Tₙ) + else + T *= Tₙ + end end - end - return T + return T end function ITensors.ITensor(o::Sum, s::Vector{<:Index}) - T = ITensor() - for a in o.args[1] - T += ITensor(a, s) - end - return T + T = ITensor() + for a in o.args[1] + T += ITensor(a, s) + end + return T end function ITensors.ITensor(o::Exp, s::Vector{<:Index}) - return exp(ITensor(argument(o), s)) + return exp(ITensor(argument(o), s)) end function ITensors.ITensor(o::LazyApply.Adjoint, s::Vector{<:Index}) - return swapprime(dag(ITensor(o', s)), 0 => 1) + return swapprime(dag(ITensor(o', s)), 0 => 1) end function LazyApply.Sum{ITensor}(o::Sum, s::Vector{<:Index}) - return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) + return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) end function LazyApply.Prod{ITensor}(o::Prod, s::Vector{<:Index}) - return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) + return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) end -function LazyApply.Prod{ITensor}(o::Scaled{C,Prod{Op}}, s::Vector{<:Index}) where {C} - t = Prod{ITensor}(argument(o), s) - t1 = coefficient(o) * only(t.args)[1] - return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) +function LazyApply.Prod{ITensor}(o::Scaled{C, Prod{Op}}, s::Vector{<:Index}) where {C} + t = Prod{ITensor}(argument(o), s) + t1 = coefficient(o) * only(t.args)[1] + return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) end function ITensors.apply(o::Prod{ITensor}, v::ITensor; kwargs...) - ov = v - for oₙ in reverse(only(o.args)) - ov = apply(oₙ, ov; kwargs...) - end - return ov + ov = v + for oₙ in reverse(only(o.args)) + ov = apply(oₙ, ov; kwargs...) + end + return ov end function (o::Prod{ITensor})(v::ITensor; kwargs...) - return apply(o, v; kwargs...) + return apply(o, v; kwargs...) end end diff --git a/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl b/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl index 970465e7d9..90f16004c6 100644 --- a/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl +++ b/src/lib/ITensorsSiteTypesExt/src/ITensorsSiteTypesExt.jl @@ -7,14 +7,14 @@ SiteTypes.val(i::Index, l::LastVal) = l.f(dim(i)) # TODO: # Implement a macro with a general definition: # f(iv::Pair{<:Index}, args...) = (f(ind(iv), args...) => val(iv)) -ITensors.prime(iv::Pair{<:Index}, inc::Integer=1) = (prime(ind(iv), inc) => val(iv)) +ITensors.prime(iv::Pair{<:Index}, inc::Integer = 1) = (prime(ind(iv), inc) => val(iv)) NDTensors.sim(iv::Pair{<:Index}, args...) = (sim(ind(iv), args...) => val(iv)) ITensors.dag(iv::Pair{<:Index}) = (dag(ind(iv)) => val(iv)) Base.adjoint(iv::Pair{<:Index}) = (prime(ind(iv)) => val(iv)) using ..ITensors: ITensors, Indices function ITensors._vals(is::Indices, I::String...) - return val.(is, I) + return val.(is, I) end using Adapt: Adapt @@ -22,31 +22,31 @@ using ..ITensors: ITensors, Index, ITensor, ind, inds using NDTensors: NDTensors, Tensor using ..SiteTypes: val Base.@propagate_inbounds @inline function ITensors._getindex( - T::Tensor, ivs::Vararg{Any,N} -) where {N} - # Tried ind.(ivs), val.(ivs) but it is slower - p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) - fac = NDTensors.permfactor(p, ivs...) # possible sign - return fac * ITensors._getindex( - T, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... - ) + T::Tensor, ivs::Vararg{Any, N} + ) where {N} + # Tried ind.(ivs), val.(ivs) but it is slower + p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) + fac = NDTensors.permfactor(p, ivs...) # possible sign + return fac * ITensors._getindex( + T, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... + ) end Base.@propagate_inbounds @inline function ITensors._setindex!!( - T::Tensor, x::Number, ivs::Vararg{Any,N} -) where {N} - # Would be nice to split off the functions for extracting the `ind` and `val` as Tuples, - # but it was slower. - p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) - fac = NDTensors.permfactor(p, ivs...) # possible sign - return ITensors._setindex!!( - T, fac * x, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... - ) + T::Tensor, x::Number, ivs::Vararg{Any, N} + ) where {N} + # Would be nice to split off the functions for extracting the `ind` and `val` as Tuples, + # but it was slower. + p = NDTensors.getperm(inds(T), ntuple(n -> ind(@inbounds ivs[n]), Val(N))) + fac = NDTensors.permfactor(p, ivs...) # possible sign + return ITensors._setindex!!( + T, fac * x, NDTensors.permute(ntuple(n -> val(@inbounds ivs[n]), Val(N)), p)... + ) end Base.@propagate_inbounds @inline function Base.setindex!( - T::ITensor, x::Number, I1::Pair{<:Index,String}, I::Pair{<:Index,String}... -) - Iv = map(i -> i.first => val(i.first, i.second), (I1, I...)) - return setindex!(T, x, Iv...) + T::ITensor, x::Number, I1::Pair{<:Index, String}, I::Pair{<:Index, String}... + ) + Iv = map(i -> i.first => val(i.first, i.second), (I1, I...)) + return setindex!(T, x, Iv...) end """ onehot(ivs...) @@ -72,8 +72,8 @@ B = onehot(i=>1,j=>3) ``` """ function ITensors.onehot(datatype::Type{<:AbstractArray}, ivs::Pair{<:Index}...) - A = ITensor(eltype(datatype), ind.(ivs)...) - A[val.(ivs)...] = one(eltype(datatype)) - return Adapt.adapt(datatype, A) + A = ITensor(eltype(datatype), ind.(ivs)...) + A[val.(ivs)...] = one(eltype(datatype)) + return Adapt.adapt(datatype, A) end end diff --git a/src/lib/LazyApply/src/LazyApply.jl b/src/lib/LazyApply/src/LazyApply.jl index f0e69fa71f..58ad8ed109 100644 --- a/src/lib/LazyApply/src/LazyApply.jl +++ b/src/lib/LazyApply/src/LazyApply.jl @@ -1,42 +1,42 @@ module LazyApply import Base: - ==, - +, - -, - *, - /, - ^, - exp, - adjoint, - copy, - show, - getindex, - length, - isless, - iterate, - firstindex, - lastindex, - keys, - reverse, - size + ==, + +, + -, + *, + /, + ^, + exp, + adjoint, + copy, + show, + getindex, + length, + isless, + iterate, + firstindex, + lastindex, + keys, + reverse, + size export Applied, Scaled, Sum, Prod, Exp, coefficient, argument, expand, materialize, terms -struct Applied{F,Args<:Tuple,Kwargs<:NamedTuple} - f::F - args::Args - kwargs::Kwargs +struct Applied{F, Args <: Tuple, Kwargs <: NamedTuple} + f::F + args::Args + kwargs::Kwargs end Applied(f, args::Tuple) = Applied(f, args, (;)) materialize(x) = x function materialize(a::Applied) - return a.f(materialize.(a.args)...; a.kwargs...) + return a.f(materialize.(a.args)...; a.kwargs...) end function (a1::Applied == a2::Applied) - return a1.f == a2.f && a1.args == a2.args && a1.kwargs == a2.kwargs + return a1.f == a2.f && a1.args == a2.args && a1.kwargs == a2.kwargs end # @@ -44,9 +44,9 @@ end # # Used for dispatch -const Scaled{C<:Number,A} = Applied{typeof(*),Tuple{C,A},NamedTuple{(),Tuple{}}} -const Sum{A} = Applied{typeof(sum),Tuple{Vector{A}},NamedTuple{(),Tuple{}}} -const Prod{A} = Applied{typeof(prod),Tuple{Vector{A}},NamedTuple{(),Tuple{}}} +const Scaled{C <: Number, A} = Applied{typeof(*), Tuple{C, A}, NamedTuple{(), Tuple{}}} +const Sum{A} = Applied{typeof(sum), Tuple{Vector{A}}, NamedTuple{(), Tuple{}}} +const Prod{A} = Applied{typeof(prod), Tuple{Vector{A}}, NamedTuple{(), Tuple{}}} # Some convenient empty constructors Sum{A}() where {A} = Applied(sum, (A[],)) @@ -61,12 +61,12 @@ argument(co::Scaled{C}) where {C} = co.args[2] # 1.3 * Op("X", 1) + 1.3 * Op("X", 2) # 1.3 * Op("X", 1) * Op("X", 2) + 1.3 * Op("X", 3) * Op("X", 4) -function (a1::Scaled{C,A} + a2::Scaled{C,A}) where {C,A} - return Sum{Scaled{C,A}}() + a1 + a2 +function (a1::Scaled{C, A} + a2::Scaled{C, A}) where {C, A} + return Sum{Scaled{C, A}}() + a1 + a2 end function (a1::Prod{A} + a2::Prod{A}) where {A} - return Sum{Prod{A}}() + a1 + a2 + return Sum{Prod{A}}() + a1 + a2 end (c::Number * a::Scaled{C}) where {C} = (c * coefficient(a)) * argument(a) @@ -83,8 +83,8 @@ end (a1::A - a2::Sum{A}) where {A} = a1 + (-a2) (a1::Sum{A} - a2::Prod{A}) where {A} = a1 + (-a2) -(a1::Sum{A} - a2::Scaled{C,Prod{A}}) where {C,A} = a1 + (-a2) -(a1::Sum{A} - a2::Sum{Scaled{C,Prod{A}}}) where {C,A} = a1 + (-a2) +(a1::Sum{A} - a2::Scaled{C, Prod{A}}) where {C, A} = a1 + (-a2) +(a1::Sum{A} - a2::Sum{Scaled{C, Prod{A}}}) where {C, A} = a1 + (-a2) (a1::Prod{A} * a2::A) where {A} = Applied(prod, (vcat(only(a1.args), [a2]),)) (a1::A * a2::Prod{A}) where {A} = Applied(prod, (vcat([a1], only(a2.args)),)) @@ -97,24 +97,24 @@ end # 1.3 * Op("X", 1) + 1 * Op("X", 2) # 1.3 * Op("X", 1) * Op("X", 2) + 1 * Op("X", 3) # 1.3 * Op("X", 1) * Op("X", 2) + 1 * Op("X", 3) * Op("X", 4) -function (co1::Scaled{C1,A} + co2::Scaled{C2,A}) where {C1,C2,A} - c1, c2 = promote(coefficient(co1), coefficient(co2)) - return c1 * argument(co1) + c2 * argument(co2) +function (co1::Scaled{C1, A} + co2::Scaled{C2, A}) where {C1, C2, A} + c1, c2 = promote(coefficient(co1), coefficient(co2)) + return c1 * argument(co1) + c2 * argument(co2) end # (1.3 * Op("X", 1)) * (1.3 * Op("X", 2)) -function (co1::Scaled{C1} * co2::Scaled{C2}) where {C1,C2} - c = coefficient(co1) * coefficient(co2) - o = argument(co1) * argument(co2) - return c * o +function (co1::Scaled{C1} * co2::Scaled{C2}) where {C1, C2} + c = coefficient(co1) * coefficient(co2) + o = argument(co1) * argument(co2) + return c * o end -function (a1::Prod{A} * a2::Scaled{C,A}) where {C,A} - return coefficient(a2) * (a1 * argument(a2)) +function (a1::Prod{A} * a2::Scaled{C, A}) where {C, A} + return coefficient(a2) * (a1 * argument(a2)) end -function (a1::Prod{A} + a2::Scaled{C,A}) where {C,A} - return one(C) * a1 + Prod{A}() * a2 +function (a1::Prod{A} + a2::Scaled{C, A}) where {C, A} + return one(C) * a1 + Prod{A}() * a2 end # (Op("X", 1) + Op("X", 2)) + (Op("X", 3) + Op("X", 4)) @@ -124,14 +124,14 @@ end (a1::Prod{A} * a2::Prod{A}) where {A} = Applied(prod, (vcat(only(a1.args), only(a2.args)),)) -(os::Sum{Scaled{C,A}} + o::A) where {C,A} = os + one(C) * o -(o::A + os::Sum{Scaled{C,A}}) where {C,A} = one(C) * o + os +(os::Sum{Scaled{C, A}} + o::A) where {C, A} = os + one(C) * o +(o::A + os::Sum{Scaled{C, A}}) where {C, A} = one(C) * o + os # Op("X", 1) + Op("X", 2) + 1.3 * Op("X", 3) -(os::Sum{A} + co::Scaled{C,A}) where {C,A} = one(C) * os + co +(os::Sum{A} + co::Scaled{C, A}) where {C, A} = one(C) * os + co # 1.3 * Op("X", 1) + (Op("X", 2) + Op("X", 3)) -(co::Scaled{C,A} + os::Sum{A}) where {C,A} = co + one(C) * os +(co::Scaled{C, A} + os::Sum{A}) where {C, A} = co + one(C) * os # 1.3 * (Op("X", 1) + Op("X", 2)) (c::Number * os::Sum) = Applied(sum, (c * os.args[1],)) @@ -141,16 +141,16 @@ end (a1::Sum * a2::Sum) = Applied(prod, ([a1, a2],)) function _expand(a1::Sum, a2::Sum) - return Applied(sum, (vec([a1[i] * a2[j] for i in 1:length(a1), j in 1:length(a2)]),)) + return Applied(sum, (vec([a1[i] * a2[j] for i in 1:length(a1), j in 1:length(a2)]),)) end function expand(a::Prod) - if length(a) == 1 - return a[1] - elseif length(a) ≥ 2 - a12 = _expand(a[1], a[2]) - return expand(Applied(prod, (vcat([a12], a[3:end]),))) - end + if length(a) == 1 + return a[1] + elseif length(a) ≥ 2 + a12 = _expand(a[1], a[2]) + return expand(Applied(prod, (vcat([a12], a[3:end]),))) + end end # (Op("X", 1) + Op("X", 2)) * 1.3 @@ -160,59 +160,59 @@ end (os::Sum / c::Number) = inv(c) * os # Promotions -function (co1::Scaled{C,Prod{A}} + co2::Scaled{C,A}) where {C,A} - return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) +function (co1::Scaled{C, Prod{A}} + co2::Scaled{C, A}) where {C, A} + return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) end function (a1::Scaled - a2::Scaled) - return a1 + (-a2) + return a1 + (-a2) end function (a1::Prod{A} + a2::A) where {A} - return a1 + Applied(prod, ([a2],)) + return a1 + Applied(prod, ([a2],)) end function (a1::Sum{A} + a2::Prod{A}) where {A} - return Prod{A}() * a1 + a2 + return Prod{A}() * a1 + a2 end -function (a1::Sum{A} + a2::Sum{Scaled{C,Prod{A}}}) where {C,A} - return (one(C) * Prod{A}() * a1) + a2 +function (a1::Sum{A} + a2::Sum{Scaled{C, Prod{A}}}) where {C, A} + return (one(C) * Prod{A}() * a1) + a2 end function (a1::Prod{A} - a2::A) where {A} - return a1 + (-a2) + return a1 + (-a2) end -function (co1::Sum{Scaled{C,Prod{A}}} + co2::Scaled{C,A}) where {C,A} - return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) +function (co1::Sum{Scaled{C, Prod{A}}} + co2::Scaled{C, A}) where {C, A} + return co1 + coefficient(co2) * Applied(prod, ([argument(co2)],)) end -function (a1::Sum{Scaled{C1,Prod{A}}} - a2::Scaled{C2,A}) where {C1,C2,A} - return a1 + (-a2) +function (a1::Sum{Scaled{C1, Prod{A}}} - a2::Scaled{C2, A}) where {C1, C2, A} + return a1 + (-a2) end -function (a1::Sum{Scaled{C,Prod{A}}} - a2::Prod{A}) where {C,A} - return a1 + (-a2) +function (a1::Sum{Scaled{C, Prod{A}}} - a2::Prod{A}) where {C, A} + return a1 + (-a2) end -function (a1::Sum{Scaled{C1,Prod{A}}} - a2::Scaled{C2,Prod{A}}) where {C1,C2,A} - return a1 + (-a2) +function (a1::Sum{Scaled{C1, Prod{A}}} - a2::Scaled{C2, Prod{A}}) where {C1, C2, A} + return a1 + (-a2) end -function (a1::Sum{A} + a2::Scaled{C,Prod{A}}) where {C,A} - return Sum{Scaled{C,Prod{A}}}() + a1 + a2 +function (a1::Sum{A} + a2::Scaled{C, Prod{A}}) where {C, A} + return Sum{Scaled{C, Prod{A}}}() + a1 + a2 end -function (a1::Sum{Scaled{C1,Prod{A}}} + a2::Scaled{C2,A}) where {C1,C2,A} - C = promote_type(C1, C2) - return one(C) * a1 + one(C) * a2 +function (a1::Sum{Scaled{C1, Prod{A}}} + a2::Scaled{C2, A}) where {C1, C2, A} + C = promote_type(C1, C2) + return one(C) * a1 + one(C) * a2 end # (::Sum{Scaled{Bool,Prod{Op}}} + ::Scaled{Float64,Prod{Op}}) -function (a1::Sum{Scaled{C1,A}} + a2::Scaled{C2,A}) where {C1,C2,A} - C = promote_type(C1, C2) - return one(C) * a1 + one(C) * a2 +function (a1::Sum{Scaled{C1, A}} + a2::Scaled{C2, A}) where {C1, C2, A} + C = promote_type(C1, C2) + return one(C) * a1 + one(C) * a2 end # TODO: Is this needed? It seems like: @@ -220,57 +220,57 @@ end # (a1::Sum{A} + a2::A) # # is not being called. -function (a1::Sum{Scaled{C,A}} + a2::Scaled{C,A}) where {C,A} - return Applied(sum, (vcat(only(a1.args), [a2]),)) +function (a1::Sum{Scaled{C, A}} + a2::Scaled{C, A}) where {C, A} + return Applied(sum, (vcat(only(a1.args), [a2]),)) end -function (a1::Sum{Scaled{C,Prod{A}}} + a2::Sum{A}) where {C,A} - a2 = one(C) * a2 - a2 = Prod{A}() * a2 - return a1 + one(C) * Prod{A}() * a2 +function (a1::Sum{Scaled{C, Prod{A}}} + a2::Sum{A}) where {C, A} + a2 = one(C) * a2 + a2 = Prod{A}() * a2 + return a1 + one(C) * Prod{A}() * a2 end function (a1::Sum{Prod{A}} + a2::A) where {A} - return a1 + (Prod{A}() * a2) + return a1 + (Prod{A}() * a2) end -function (a1::Sum{Prod{A}} + a2::Scaled{C,A}) where {C,A} - return a1 + (Prod{A}() * a2) +function (a1::Sum{Prod{A}} + a2::Scaled{C, A}) where {C, A} + return a1 + (Prod{A}() * a2) end -function (a1::Sum{Scaled{C,Prod{A}}} + a2::A) where {C,A} - return a1 + one(C) * a2 +function (a1::Sum{Scaled{C, Prod{A}}} + a2::A) where {C, A} + return a1 + one(C) * a2 end -(a1::Sum{Scaled{C,Prod{A}}} - a2::A) where {C,A} = a1 + (-a2) +(a1::Sum{Scaled{C, Prod{A}}} - a2::A) where {C, A} = a1 + (-a2) -function (a1::Sum{Scaled{C,Prod{A}}} + a2::Sum{Scaled{C,A}}) where {C,A} - return a1 + (Prod{A}() * a2) +function (a1::Sum{Scaled{C, Prod{A}}} + a2::Sum{Scaled{C, A}}) where {C, A} + return a1 + (Prod{A}() * a2) end -function (o::A + os::Sum{Scaled{C,Prod{A}}}) where {C,A} - return one(C) * o + os +function (o::A + os::Sum{Scaled{C, Prod{A}}}) where {C, A} + return one(C) * o + os end function (a::Sum^n::Int) - r = a - for _ in 2:n - r *= a - end - return r + r = a + for _ in 2:n + r *= a + end + return r end function (a::Prod^n::Int) - r = a - for _ in 2:n - r *= a - end - return r + r = a + for _ in 2:n + r *= a + end + return r end exp(a::Applied) = Applied(exp, (a,)) -const Exp{A} = Applied{typeof(exp),Tuple{A},NamedTuple{(),Tuple{}}} -const Adjoint{A} = Applied{typeof(adjoint),Tuple{A},NamedTuple{(),Tuple{}}} +const Exp{A} = Applied{typeof(exp), Tuple{A}, NamedTuple{(), Tuple{}}} +const Adjoint{A} = Applied{typeof(adjoint), Tuple{A}, NamedTuple{(), Tuple{}}} argument(a::Exp) = a.args[1] @@ -281,7 +281,7 @@ argument(a::Exp) = a.args[1] (e1::Exp * e2::Applied) = Applied(prod, ([e1, e2],)) function reverse(a::Prod) - return Applied(prod, (reverse(only(a.args)),)) + return Applied(prod, (reverse(only(a.args)),)) end adjoint(a::Prod) = Applied(prod, (map(adjoint, reverse(only(a.args))),)) @@ -290,32 +290,32 @@ adjoint(a::Prod) = Applied(prod, (map(adjoint, reverse(only(a.args))),)) # Convenient indexing # -getindex(a::Union{Sum,Prod}, I...) = only(a.args)[I...] -iterate(a::Union{Sum,Prod}, args...) = iterate(only(a.args), args...) -size(a::Union{Sum,Prod}) = size(only(a.args)) -length(a::Union{Sum,Prod}) = length(only(a.args)) -firstindex(a::Union{Sum,Prod}) = 1 -lastindex(a::Union{Sum,Prod}) = length(a) -keys(a::Union{Sum,Prod}) = 1:length(a) - -length(a::Scaled{C,<:Sum}) where {C} = length(argument(a)) -length(a::Scaled{C,<:Prod}) where {C} = length(argument(a)) -getindex(a::Scaled{C,<:Sum}, I...) where {C} = getindex(argument(a), I...) -getindex(a::Scaled{C,<:Prod}, I...) where {C} = getindex(argument(a), I...) -lastindex(a::Scaled{C,<:Sum}) where {C} = lastindex(argument(a)) -lastindex(a::Scaled{C,<:Prod}) where {C} = lastindex(argument(a)) +getindex(a::Union{Sum, Prod}, I...) = only(a.args)[I...] +iterate(a::Union{Sum, Prod}, args...) = iterate(only(a.args), args...) +size(a::Union{Sum, Prod}) = size(only(a.args)) +length(a::Union{Sum, Prod}) = length(only(a.args)) +firstindex(a::Union{Sum, Prod}) = 1 +lastindex(a::Union{Sum, Prod}) = length(a) +keys(a::Union{Sum, Prod}) = 1:length(a) + +length(a::Scaled{C, <:Sum}) where {C} = length(argument(a)) +length(a::Scaled{C, <:Prod}) where {C} = length(argument(a)) +getindex(a::Scaled{C, <:Sum}, I...) where {C} = getindex(argument(a), I...) +getindex(a::Scaled{C, <:Prod}, I...) where {C} = getindex(argument(a), I...) +lastindex(a::Scaled{C, <:Sum}) where {C} = lastindex(argument(a)) +lastindex(a::Scaled{C, <:Prod}) where {C} = lastindex(argument(a)) # # Functions convenient for OpSum code # -terms(a::Union{Sum,Prod}) = only(a.args) -terms(a::Scaled{C,<:Union{Sum,Prod}}) where {C} = terms(argument(a)) +terms(a::Union{Sum, Prod}) = only(a.args) +terms(a::Scaled{C, <:Union{Sum, Prod}}) where {C} = terms(argument(a)) copy(a::Applied) = Applied(deepcopy(a.f), deepcopy(a.args), deepcopy(a.kwargs)) Sum(a::Vector) = Applied(sum, (a,)) Prod(a::Vector) = Applied(prod, (a,)) function isless(a1::Applied{F}, a2::Applied{F}) where {F} - return (isless(a1.args, a2.args) && isless(a1.kwargs, a2.kwargs)) + return (isless(a1.args, a2.args) && isless(a1.kwargs, a2.kwargs)) end # @@ -323,63 +323,63 @@ end # function show(io::IO, ::MIME"text/plain", a::Sum) - print(io, "sum(\n") - for n in eachindex(a) - print(io, " ", a[n]) - if n ≠ lastindex(a) - print(io, "\n") + print(io, "sum(\n") + for n in eachindex(a) + print(io, " ", a[n]) + if n ≠ lastindex(a) + print(io, "\n") + end end - end - print(io, "\n)") - return nothing + print(io, "\n)") + return nothing end show(io::IO, a::Sum) = show(io, MIME("text/plain"), a) function show(io::IO, ::MIME"text/plain", a::Prod) - print(io, "prod(\n") - for n in eachindex(a) - print(io, " ", a[n]) - if n ≠ lastindex(a) - print(io, "\n") + print(io, "prod(\n") + for n in eachindex(a) + print(io, " ", a[n]) + if n ≠ lastindex(a) + print(io, "\n") + end end - end - print(io, "\n)") - return nothing + print(io, "\n)") + return nothing end show(io::IO, a::Prod) = show(io, MIME("text/plain"), a) function show(io::IO, m::MIME"text/plain", a::Exp) - print(io, a.f, "(") - for n in 1:length(a.args) - print(io, a.args[n]) - if n < length(a.args) - print(io, ", ") + print(io, a.f, "(") + for n in 1:length(a.args) + print(io, a.args[n]) + if n < length(a.args) + print(io, ", ") + end end - end - print(io, ")") - return nothing + print(io, ")") + return nothing end show(io::IO, a::Exp) = show(io, MIME("text/plain"), a) function show(io::IO, m::MIME"text/plain", a::Applied) - print(io, a.f, "(") - for n in eachindex(a.args) - print(io, a.args[n]) - if n < length(a.args) - print(io, ", ") + print(io, a.f, "(") + for n in eachindex(a.args) + print(io, a.args[n]) + if n < length(a.args) + print(io, ", ") + end end - end - if !isempty(a.kwargs) - print(io, "; ") - for n in 1:length(a.kwargs) - print(io, keys(a.kwargs)[n], "=", a.kwargs[n]) - if n < length(a.kwargs) - print(io, ", ") - end + if !isempty(a.kwargs) + print(io, "; ") + for n in 1:length(a.kwargs) + print(io, keys(a.kwargs)[n], "=", a.kwargs[n]) + if n < length(a.kwargs) + print(io, ", ") + end + end end - end - print(io, ")") - return nothing + print(io, ")") + return nothing end show(io::IO, a::Applied) = show(io, MIME("text/plain"), a) diff --git a/src/lib/Ops/ops_itensor.jl b/src/lib/Ops/ops_itensor.jl index e650376dcf..0d08c769eb 100644 --- a/src/lib/Ops/ops_itensor.jl +++ b/src/lib/Ops/ops_itensor.jl @@ -1,73 +1,73 @@ using .SiteTypes: SiteTypes, op function SiteTypes.op(I::UniformScaling, s::Index...) - return I.λ * op("Id", s...) + return I.λ * op("Id", s...) end function ITensor(o::Op, s::Vector{<:Index}) - return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) + return op(o.which_op, map(n -> s[n], o.sites)...; o.params...) end function ITensor(o::Scaled, s::Vector{<:Index}) - c = coefficient(o) - if isreal(c) - c = real(c) - end - return c * ITensor(argument(o), s) + c = coefficient(o) + if isreal(c) + c = real(c) + end + return c * ITensor(argument(o), s) end function ITensor(o::Prod, s::Vector{<:Index}) - T = ITensor(true) - for a in o.args[1] - Tₙ = ITensor(a, s) - # TODO: Implement this logic inside `apply` - if hascommoninds(T, Tₙ) - T = T(Tₙ) - else - T *= Tₙ + T = ITensor(true) + for a in o.args[1] + Tₙ = ITensor(a, s) + # TODO: Implement this logic inside `apply` + if hascommoninds(T, Tₙ) + T = T(Tₙ) + else + T *= Tₙ + end end - end - return T + return T end function ITensor(o::Sum, s::Vector{<:Index}) - T = ITensor() - for a in o.args[1] - T += ITensor(a, s) - end - return T + T = ITensor() + for a in o.args[1] + T += ITensor(a, s) + end + return T end function ITensor(o::Exp, s::Vector{<:Index}) - return exp(ITensor(argument(o), s)) + return exp(ITensor(argument(o), s)) end function ITensor(o::LazyApply.Adjoint, s::Vector{<:Index}) - return swapprime(dag(ITensor(o', s)), 0 => 1) + return swapprime(dag(ITensor(o', s)), 0 => 1) end function Sum{ITensor}(o::Sum, s::Vector{<:Index}) - return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) + return Applied(sum, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) end function Prod{ITensor}(o::Prod, s::Vector{<:Index}) - return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) + return Applied(prod, (map(oₙ -> ITensor(oₙ, s), only(o.args)),)) end -function Prod{ITensor}(o::Scaled{C,Prod{Op}}, s::Vector{<:Index}) where {C} - t = Prod{ITensor}(argument(o), s) - t1 = coefficient(o) * only(t.args)[1] - return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) +function Prod{ITensor}(o::Scaled{C, Prod{Op}}, s::Vector{<:Index}) where {C} + t = Prod{ITensor}(argument(o), s) + t1 = coefficient(o) * only(t.args)[1] + return Applied(prod, (vcat([t1], only(t.args)[2:end]),)) end function apply(o::Prod{ITensor}, v::ITensor; kwargs...) - ov = v - for oₙ in reverse(only(o.args)) - ov = apply(oₙ, ov; kwargs...) - end - return ov + ov = v + for oₙ in reverse(only(o.args)) + ov = apply(oₙ, ov; kwargs...) + end + return ov end function (o::Prod{ITensor})(v::ITensor; kwargs...) - return apply(o, v; kwargs...) + return apply(o, v; kwargs...) end diff --git a/src/lib/Ops/src/op.jl b/src/lib/Ops/src/op.jl index 886bb969b9..9f74e8ceca 100644 --- a/src/lib/Ops/src/op.jl +++ b/src/lib/Ops/src/op.jl @@ -18,15 +18,15 @@ export Op, OpSum, which_op, site, sites, params, Applied, expand # [(1,), ("X", 1, 2), ("Y", 2), ("Z", 4)] # function split(f, t::Tuple) - n = findall(f, t) - nsplit = length(n) + 1 - s = Vector{Any}(undef, nsplit) - s[1] = t[1:(first(n) - 1)] - for i in 2:(nsplit - 1) - s[i] = t[n[i - 1]:(n[i] - 1)] - end - s[end] = t[last(n):end] - return s + n = findall(f, t) + nsplit = length(n) + 1 + s = Vector{Any}(undef, nsplit) + s[1] = t[1:(first(n) - 1)] + for i in 2:(nsplit - 1) + s[i] = t[n[i - 1]:(n[i] - 1)] + end + s[end] = t[last(n):end] + return s end ## XXX: Very long compile times: @@ -51,12 +51,12 @@ end ## end struct Op - which_op - sites::Tuple - params::NamedTuple - function Op(which_op, site...; kwargs...) - return new(which_op, site, NamedTuple(kwargs)) - end + which_op + sites::Tuple + params::NamedTuple + function Op(which_op, site...; kwargs...) + return new(which_op, site, NamedTuple(kwargs)) + end end which_op(o::Op) = o.which_op @@ -66,11 +66,11 @@ site(o::Op) = only(sites(o)) params(o::Op) = o.params function (o1::Op == o2::Op) - return o1.which_op == o2.which_op && o1.sites == o2.sites && o1.params == o2.params + return o1.which_op == o2.which_op && o1.sites == o2.sites && o1.params == o2.params end function hash(o::Op, h::UInt) - return hash(which_op(o), hash(sites(o), hash(params(o), hash(:Op, h)))) + return hash(which_op(o), hash(sites(o), hash(params(o), hash(:Op, h)))) end # Version of `isless` defined for matrices @@ -80,39 +80,39 @@ _isless(a::AbstractString, b::AbstractMatrix) = true _isless(a::AbstractMatrix, b::AbstractString) = !_isless(b, a) function isless(o1::Op, o2::Op) - if sites(o1) ≠ sites(o2) - return sites(o1) < sites(o2) - end - if which_op(o1) ≠ which_op(o2) - return _isless(which_op(o1), which_op(o2)) - end - return params(o1) < params(o2) + if sites(o1) ≠ sites(o2) + return sites(o1) < sites(o2) + end + if which_op(o1) ≠ which_op(o2) + return _isless(which_op(o1), which_op(o2)) + end + return params(o1) < params(o2) end function isless(o1::Prod{Op}, o2::Prod{Op}) - if length(o1) ≠ length(o2) - return length(o1) < length(o2) - end - for n in 1:length(o1) - if o1[n] ≠ o2[n] - return (o1[n] < o2[n]) + if length(o1) ≠ length(o2) + return length(o1) < length(o2) + end + for n in 1:length(o1) + if o1[n] ≠ o2[n] + return (o1[n] < o2[n]) + end end - end - return false + return false end -function isless(o1::Scaled{C1,Prod{Op}}, o2::Scaled{C2,Prod{Op}}) where {C1,C2} - if argument(o1) == argument(o2) - if coefficient(o1) ≈ coefficient(o2) - return false - else - c1 = coefficient(o1) - c2 = coefficient(o2) - #"lexicographic" ordering on complex numbers - return real(c1) < real(c2) || (real(c1) ≈ real(c2) && imag(c1) < imag(c2)) +function isless(o1::Scaled{C1, Prod{Op}}, o2::Scaled{C2, Prod{Op}}) where {C1, C2} + if argument(o1) == argument(o2) + if coefficient(o1) ≈ coefficient(o2) + return false + else + c1 = coefficient(o1) + c2 = coefficient(o2) + #"lexicographic" ordering on complex numbers + return real(c1) < real(c2) || (real(c1) ≈ real(c2) && imag(c1) < imag(c2)) + end end - end - return argument(o1) < argument(o2) + return argument(o1) < argument(o2) end ## function Op(t::Tuple) @@ -135,29 +135,29 @@ end ## return Op(which_op, sites...; params...) ## end -function sites(a::Union{Sum,Prod}) - s = [] - for n in 1:length(a) - s = s ∪ sites(a[n]) - end - return map(identity, s) +function sites(a::Union{Sum, Prod}) + s = [] + for n in 1:length(a) + s = s ∪ sites(a[n]) + end + return map(identity, s) end -sites(a::Scaled{C,<:Sum}) where {C} = sites(argument(a)) -sites(a::Scaled{C,<:Prod}) where {C} = sites(argument(a)) +sites(a::Scaled{C, <:Sum}) where {C} = sites(argument(a)) +sites(a::Scaled{C, <:Prod}) where {C} = sites(argument(a)) -params(a::Scaled{C,<:Prod}) where {C} = params(only(argument(a))) +params(a::Scaled{C, <:Prod}) where {C} = params(only(argument(a))) -which_op(a::Scaled{C,Op}) where {C} = which_op(argument(a)) -sites(a::Scaled{C,Op}) where {C} = sites(argument(a)) -params(a::Scaled{C,Op}) where {C} = params(argument(a)) +which_op(a::Scaled{C, Op}) where {C} = which_op(argument(a)) +sites(a::Scaled{C, Op}) where {C} = sites(argument(a)) +params(a::Scaled{C, Op}) where {C} = params(argument(a)) # # Op algebra # -function convert(::Type{Scaled{C1,Prod{Op}}}, o::Scaled{C2,Prod{Op}}) where {C1,C2} - c = convert(C1, coefficient(o)) - return c * argument(o) +function convert(::Type{Scaled{C1, Prod{Op}}}, o::Scaled{C2, Prod{Op}}) where {C1, C2} + c = convert(C1, coefficient(o)) + return c * argument(o) end """ @@ -178,7 +178,7 @@ associated with the `TagType` defined by special Index tags, such as `"S=1/2"`, `"S=1"`, `"Fermion"`, and `"Electron"`. """ -const OpSum{C} = Sum{Scaled{C,Prod{Op}}} +const OpSum{C} = Sum{Scaled{C, Prod{Op}}} # This helps with in-place operations OpSum() = OpSum{ComplexF64}() @@ -225,111 +225,111 @@ adjoint(o::LazyApply.Adjoint{Op}) = only(o.args) # const OpSumLike{C} = Union{ - Sum{Op}, - Sum{Scaled{C,Op}}, - Sum{Prod{Op}}, - Sum{Scaled{C,Prod{Op}}}, - Prod{Op}, - Scaled{C,Prod{Op}}, + Sum{Op}, + Sum{Scaled{C, Op}}, + Sum{Prod{Op}}, + Sum{Scaled{C, Prod{Op}}}, + Prod{Op}, + Scaled{C, Prod{Op}}, } -const WhichOp = Union{AbstractString,AbstractMatrix{<:Number}} +const WhichOp = Union{AbstractString, AbstractMatrix{<:Number}} # Make a `Scaled{C,Prod{Op}}` from a `Tuple` input, # for example: # # (1.2, "X", 1, "Y", 2) -> 1.2 * Op("X", 1) * Op("Y", 2) # -function op_term(a::Tuple{Number,Vararg}) - c = first(a) - return c * op_term(Base.tail(a)) +function op_term(a::Tuple{Number, Vararg}) + c = first(a) + return c * op_term(Base.tail(a)) end function op_site(which_op, params::NamedTuple, sites...) - return Op(which_op, sites...; params...) + return Op(which_op, sites...; params...) end function op_site(which_op, sites_params...) - if last(sites_params) isa NamedTuple - sites = Base.front(sites_params) - params = last(sites_params) - return Op(which_op, sites...; params...) - end - return Op(which_op, sites_params...) + if last(sites_params) isa NamedTuple + sites = Base.front(sites_params) + params = last(sites_params) + return Op(which_op, sites...; params...) + end + return Op(which_op, sites_params...) end function op_term(a::Tuple{Vararg}) - a_split = split(x -> x isa WhichOp, a) - @assert isempty(first(a_split)) - popfirst!(a_split) - o = op_site(first(a_split)...) - popfirst!(a_split) - for aₙ in a_split - o *= op_site(aₙ...) - end - return o + a_split = split(x -> x isa WhichOp, a) + @assert isempty(first(a_split)) + popfirst!(a_split) + o = op_site(first(a_split)...) + popfirst!(a_split) + for aₙ in a_split + o *= op_site(aₙ...) + end + return o end function (o1::OpSumLike + o2::Tuple) - return o1 + op_term(o2) + return o1 + op_term(o2) end function (o1::Tuple + o2::OpSumLike) - return op_term(o1) + o2 + return op_term(o1) + o2 end function (o1::OpSumLike - o2::Tuple) - return o1 - op_term(o2) + return o1 - op_term(o2) end function (o1::Tuple - o2::OpSumLike) - return op_term(o1) - o2 + return op_term(o1) - o2 end function (o1::OpSumLike * o2::Tuple) - return o1 * op_term(o2) + return o1 * op_term(o2) end function (o1::Tuple * o2::OpSumLike) - return op_term(o1) * o2 + return op_term(o1) * o2 end function show(io::IO, ::MIME"text/plain", o::Op) - print(io, which_op(o)) - print(io, sites(o)) - if !isempty(params(o)) - print(io, params(o)) - end - return nothing + print(io, which_op(o)) + print(io, sites(o)) + if !isempty(params(o)) + print(io, params(o)) + end + return nothing end show(io::IO, o::Op) = show(io, MIME("text/plain"), o) function show(io::IO, ::MIME"text/plain", o::Prod{Op}) - for n in 1:length(o) - print(io, o[n]) - if n < length(o) - print(io, " ") + for n in 1:length(o) + print(io, o[n]) + if n < length(o) + print(io, " ") + end end - end - return nothing + return nothing end show(io::IO, o::Prod{Op}) = show(io, MIME("text/plain"), o) -function show(io::IO, m::MIME"text/plain", o::Scaled{C,O}) where {C,O<:Union{Op,Prod{Op}}} - c = coefficient(o) - if isreal(c) - c = real(c) - end - print(io, c) - print(io, " ") - show(io, m, argument(o)) - return nothing +function show(io::IO, m::MIME"text/plain", o::Scaled{C, O}) where {C, O <: Union{Op, Prod{Op}}} + c = coefficient(o) + if isreal(c) + c = real(c) + end + print(io, c) + print(io, " ") + show(io, m, argument(o)) + return nothing end -show(io::IO, o::Scaled{C,Prod{Op}}) where {C} = show(io, MIME("text/plain"), o) +show(io::IO, o::Scaled{C, Prod{Op}}) where {C} = show(io, MIME("text/plain"), o) function show(io::IO, ::MIME"text/plain", o::LazyApply.Adjoint{Op}) - print(io, o') - print(io, "'") - return nothing + print(io, o') + print(io, "'") + return nothing end show(io::IO, o::LazyApply.Adjoint{Op}) = show(io, MIME("text/plain"), o) diff --git a/src/lib/Ops/src/trotter.jl b/src/lib/Ops/src/trotter.jl index 9c3b1a5641..c26d40b934 100644 --- a/src/lib/Ops/src/trotter.jl +++ b/src/lib/Ops/src/trotter.jl @@ -5,31 +5,31 @@ abstract type ExpAlgorithm end struct Exact <: ExpAlgorithm end struct Trotter{Order} <: ExpAlgorithm - nsteps::Int + nsteps::Int end Trotter{Order}() where {Order} = Trotter{Order}(1) Base.one(::Trotter{Order}) where {Order} = Trotter{Order}(1) -function Base.exp(o::Sum; alg::ExpAlgorithm=Exact()) - return exp(alg, o) +function Base.exp(o::Sum; alg::ExpAlgorithm = Exact()) + return exp(alg, o) end function Base.exp(::Exact, o::Sum) - return Applied(prod, ([Applied(exp, (o,))],)) + return Applied(prod, ([Applied(exp, (o,))],)) end function exp_one_step(trotter::Trotter{1}, o::Sum) - exp_o = Applied(prod, (map(exp, reverse(only(o.args))),)) - return exp_o + exp_o = Applied(prod, (map(exp, reverse(only(o.args))),)) + return exp_o end function exp_one_step(trotter::Trotter{2}, o::Sum) - exp_o_order_1 = exp_one_step(Trotter{1}(), o / 2) - exp_o = reverse(exp_o_order_1) * exp_o_order_1 - return exp_o + exp_o_order_1 = exp_one_step(Trotter{1}(), o / 2) + exp_o = reverse(exp_o_order_1) * exp_o_order_1 + return exp_o end function Base.exp(trotter::Trotter, o::Sum) - expδo = exp_one_step(one(trotter), o / trotter.nsteps) - return expδo^trotter.nsteps + expδo = exp_one_step(one(trotter), o / trotter.nsteps) + return expδo^trotter.nsteps end diff --git a/src/lib/QuantumNumbers/src/arrow.jl b/src/lib/QuantumNumbers/src/arrow.jl index 68d4be8e11..c3e30b1889 100644 --- a/src/lib/QuantumNumbers/src/arrow.jl +++ b/src/lib/QuantumNumbers/src/arrow.jl @@ -12,5 +12,5 @@ associated with an index, i.e. the index leg is directed into or out of a given Reverse direction of a directed `Arrow`. """ function Base.:(-)(dir::Arrow) - return Arrow(-Int(dir)) + return Arrow(-Int(dir)) end diff --git a/src/lib/QuantumNumbers/src/qnval.jl b/src/lib/QuantumNumbers/src/qnval.jl index aaa218a4d6..c73bc7a22e 100644 --- a/src/lib/QuantumNumbers/src/qnval.jl +++ b/src/lib/QuantumNumbers/src/qnval.jl @@ -2,19 +2,19 @@ using ..ITensors: ITensors, name, val using ..SmallStrings: SmallString struct QNVal - name::SmallString - val::Int - modulus::Int - function QNVal(name, v::Int, m::Int=1) - am = abs(m) - if am > 1 - return new(SmallString(name), mod(v, am), m) + name::SmallString + val::Int + modulus::Int + function QNVal(name, v::Int, m::Int = 1) + am = abs(m) + if am > 1 + return new(SmallString(name), mod(v, am), m) + end + return new(SmallString(name), v, m) end - return new(SmallString(name), v, m) - end end -QNVal(v::Int, m::Int=1) = QNVal("", v, m) +QNVal(v::Int, m::Int = 1) = QNVal("", v, m) QNVal() = QNVal("", 0, 0) ITensors.name(qv::QNVal) = qv.name @@ -24,13 +24,13 @@ isactive(qv::QNVal) = modulus(qv) != 0 Base.:(<)(qv1::QNVal, qv2::QNVal) = (name(qv1) < name(qv2)) function qn_mod(val::Int, modulus::Int) - amod = abs(modulus) - amod <= 1 && return val - return mod(val, amod) + amod = abs(modulus) + amod <= 1 && return val + return mod(val, amod) end function Base.:(-)(qv::QNVal) - return QNVal(name(qv), qn_mod(-val(qv), modulus(qv)), modulus(qv)) + return QNVal(name(qv), qn_mod(-val(qv), modulus(qv)), modulus(qv)) end Base.zero(::Type{QNVal}) = QNVal() @@ -42,19 +42,19 @@ Base.:(*)(dir::Arrow, qv::QNVal) = QNVal(name(qv), Int(dir) * val(qv), modulus(q Base.:(*)(qv::QNVal, dir::Arrow) = (dir * qv) function pm(qv1::QNVal, qv2::QNVal, fac::Int) - if name(qv1) != name(qv2) - error("Cannot add QNVals with different names \"$(name(qv1))\", \"$(name(qv2))\"") - end - if modulus(qv1) != modulus(qv2) - error( - "QNVals with matching name \"$(name(qv1))\" cannot have different modulus values " - ) - end - m1 = modulus(qv1) - if m1 == 1 || m1 == -1 - return QNVal(name(qv1), val(qv1) + fac * val(qv2), m1) - end - return QNVal(name(qv1), Base.mod(val(qv1) + fac * val(qv2), abs(m1)), m1) + if name(qv1) != name(qv2) + error("Cannot add QNVals with different names \"$(name(qv1))\", \"$(name(qv2))\"") + end + if modulus(qv1) != modulus(qv2) + error( + "QNVals with matching name \"$(name(qv1))\" cannot have different modulus values " + ) + end + m1 = modulus(qv1) + if m1 == 1 || m1 == -1 + return QNVal(name(qv1), val(qv1) + fac * val(qv2), m1) + end + return QNVal(name(qv1), Base.mod(val(qv1) + fac * val(qv2), abs(m1)), m1) end Base.:(+)(qv1::QNVal, qv2::QNVal) = pm(qv1, qv2, +1) diff --git a/src/lib/SiteTypes/src/sitetype.jl b/src/lib/SiteTypes/src/sitetype.jl index 5ecd5b1c02..8b8f863d8a 100644 --- a/src/lib/SiteTypes/src/sitetype.jl +++ b/src/lib/SiteTypes/src/sitetype.jl @@ -1,11 +1,11 @@ using ChainRulesCore: @ignore_derivatives using ..ITensors: - ITensors, Index, ITensor, itensor, dag, onehot, prime, product, swapprime, tags + ITensors, Index, ITensor, itensor, dag, onehot, prime, product, swapprime, tags using ..SmallStrings: SmallString using ..TagSets: TagSets, TagSet, addtags, commontags @eval struct SiteType{T} - (f::Type{<:SiteType})() = $(Expr(:new, :f)) + (f::Type{<:SiteType})() = $(Expr(:new, :f)) end # Note that the complicated definition of @@ -117,14 +117,14 @@ SiteType(t::SmallString) = SiteType{t}() tag(::SiteType{T}) where {T} = T macro SiteType_str(s) - return SiteType{SmallString(s)} + return SiteType{SmallString(s)} end # Keep TagType defined for backwards # compatibility; will be deprecated later const TagType = SiteType macro TagType_str(s) - return TagType{SmallString(s)} + return TagType{SmallString(s)} end #--------------------------------------- @@ -134,7 +134,7 @@ end #--------------------------------------- @eval struct OpName{Name} - (f::Type{<:OpName})() = $(Expr(:new, :f)) + (f::Type{<:OpName})() = $(Expr(:new, :f)) end # Note that the complicated definition of @@ -165,7 +165,7 @@ OpName(s::Symbol) = OpName{s}() ITensors.name(::OpName{N}) where {N} = N macro OpName_str(s) - return OpName{Symbol(s)} + return OpName{Symbol(s)} end # Default implementations of op and op! @@ -173,28 +173,28 @@ op(::OpName; kwargs...) = nothing op(::OpName, ::SiteType; kwargs...) = nothing op(::OpName, ::SiteType, ::Index...; kwargs...) = nothing function op( - ::OpName, ::SiteType, ::SiteType, sitetypes_inds::Union{SiteType,Index}...; kwargs... -) - return nothing + ::OpName, ::SiteType, ::SiteType, sitetypes_inds::Union{SiteType, Index}...; kwargs... + ) + return nothing end op!(::ITensor, ::OpName, ::SiteType, ::Index...; kwargs...) = nothing function op!( - ::ITensor, - ::OpName, - ::SiteType, - ::SiteType, - sitetypes_inds::Union{SiteType,Index}...; - kwargs..., -) - return nothing + ::ITensor, + ::OpName, + ::SiteType, + ::SiteType, + sitetypes_inds::Union{SiteType, Index}...; + kwargs..., + ) + return nothing end # Deprecated version, for backwards compatibility op(::SiteType, ::Index, ::AbstractString; kwargs...) = nothing function _sitetypes(ts::TagSet) - Ntags = length(ts) - return SiteType[SiteType(TagSets.data(ts)[n]) for n in 1:Ntags] + Ntags = length(ts) + return SiteType[SiteType(TagSets.data(ts)[n]) for n in 1:Ntags] end _sitetypes(i::Index) = _sitetypes(tags(i)) @@ -233,177 +233,177 @@ with ITensor [here](https://docs.itensor.org/ITensorMPS/stable/IncludedSiteTypes Note that some site types such as "S=1/2" and "Qubit" are aliases for each other and share operator definitions. """ -function op(name::AbstractString, s::Index...; adjoint::Bool=false, kwargs...) - name = strip(name) - # TODO: filter out only commons tags - # if there are multiple indices - commontags_s = commontags(s...) - - # first we handle the + and - algebra, which requires a space between ops to avoid clashing - name_split = nothing - @ignore_derivatives name_split = String.(split(name, " ")) - oplocs = findall(x -> x ∈ ("+", "-"), name_split) - - if !isempty(oplocs) - @ignore_derivatives !isempty(kwargs) && - error("Lazy algebra on parametric gates not allowed") - - # the string representation of algebra ops: ex ["+", "-", "+"] - labels = name_split[oplocs] - # assign coefficients to each term: ex [+1, -1, +1] - coeffs = [1, [(-1)^Int(label == "-") for label in labels]...] - - # grad the name of each operator block separated by an algebra op, and do so by - # making sure blank spaces between opnames are kept when building the new block. - start, opnames = 0, String[] - for oploc in oplocs - finish = oploc - opnames = vcat( - opnames, [prod([name_split[k] * " " for k in (start + 1):(finish - 1)])] - ) - start = oploc +function op(name::AbstractString, s::Index...; adjoint::Bool = false, kwargs...) + name = strip(name) + # TODO: filter out only commons tags + # if there are multiple indices + commontags_s = commontags(s...) + + # first we handle the + and - algebra, which requires a space between ops to avoid clashing + name_split = nothing + @ignore_derivatives name_split = String.(split(name, " ")) + oplocs = findall(x -> x ∈ ("+", "-"), name_split) + + if !isempty(oplocs) + @ignore_derivatives !isempty(kwargs) && + error("Lazy algebra on parametric gates not allowed") + + # the string representation of algebra ops: ex ["+", "-", "+"] + labels = name_split[oplocs] + # assign coefficients to each term: ex [+1, -1, +1] + coeffs = [1, [(-1)^Int(label == "-") for label in labels]...] + + # grad the name of each operator block separated by an algebra op, and do so by + # making sure blank spaces between opnames are kept when building the new block. + start, opnames = 0, String[] + for oploc in oplocs + finish = oploc + opnames = vcat( + opnames, [prod([name_split[k] * " " for k in (start + 1):(finish - 1)])] + ) + start = oploc + end + opnames = vcat( + opnames, [prod([name_split[k] * " " for k in (start + 1):length(name_split)])] + ) + + # build the vector of blocks and sum + op_list = [ + coeff * (op(opname, s...; kwargs...)) for (coeff, opname) in zip(coeffs, opnames) + ] + return sum(op_list) end - opnames = vcat( - opnames, [prod([name_split[k] * " " for k in (start + 1):length(name_split)])] - ) - # build the vector of blocks and sum - op_list = [ - coeff * (op(opname, s...; kwargs...)) for (coeff, opname) in zip(coeffs, opnames) - ] - return sum(op_list) - end - - # the the multiplication come after - oploc = findfirst("*", name) - if !isnothing(oploc) - op1, op2 = nothing, nothing - @ignore_derivatives begin - op1 = name[1:prevind(name, oploc.start)] - op2 = name[nextind(name, oploc.start):end] - if !(op1[end] == ' ' && op2[1] == ' ') - @warn "($op1*$op2) composite op definition `A*B` deprecated: please use `A * B` instead (with spaces)" - end + # the the multiplication come after + oploc = findfirst("*", name) + if !isnothing(oploc) + op1, op2 = nothing, nothing + @ignore_derivatives begin + op1 = name[1:prevind(name, oploc.start)] + op2 = name[nextind(name, oploc.start):end] + if !(op1[end] == ' ' && op2[1] == ' ') + @warn "($op1*$op2) composite op definition `A*B` deprecated: please use `A * B` instead (with spaces)" + end + end + return product(op(op1, s...; kwargs...), op(op2, s...; kwargs...)) end - return product(op(op1, s...; kwargs...), op(op2, s...; kwargs...)) - end - - common_stypes = _sitetypes(commontags_s) - @ignore_derivatives push!(common_stypes, SiteType("Generic")) - opn = OpName(name) - - # - # Try calling a function of the form: - # op(::OpName, ::SiteType, ::Index...; kwargs...) - # - for st in common_stypes - res = op(opn, st, s...; kwargs...) - if !isnothing(res) - adjoint && return swapprime(dag(res), 0 => 1) - return res + + common_stypes = _sitetypes(commontags_s) + @ignore_derivatives push!(common_stypes, SiteType("Generic")) + opn = OpName(name) + + # + # Try calling a function of the form: + # op(::OpName, ::SiteType, ::Index...; kwargs...) + # + for st in common_stypes + res = op(opn, st, s...; kwargs...) + if !isnothing(res) + adjoint && return swapprime(dag(res), 0 => 1) + return res + end end - end - - # - # Try calling a function of the form: - # op(::OpName; kwargs...) - # for backward compatibility with previous - # gate system in PastaQ.jl - # - op_mat = op(opn; kwargs...) - if !isnothing(op_mat) - rs = reverse(s) - res = itensor(op_mat, prime.(rs)..., dag.(rs)...) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - # - # otherwise try calling a function of the form: - # op(::OpName, ::SiteType; kwargs...) - # which returns a Julia matrix - # - for st in common_stypes - op_mat = op(opn, st; kwargs...) + + # + # Try calling a function of the form: + # op(::OpName; kwargs...) + # for backward compatibility with previous + # gate system in PastaQ.jl + # + op_mat = op(opn; kwargs...) if !isnothing(op_mat) - rs = reverse(s) - #return itensor(op_mat, prime.(rs)..., dag.(rs)...) - res = itensor(op_mat, prime.(rs)..., dag.(rs)...) - adjoint && return swapprime(dag(res), 0 => 1) - return res - end - end - - # otherwise try calling a function of the form: - # op!(::ITensor, ::OpName, ::SiteType, ::Index...; kwargs...) - # - Op = ITensor(prime.(s)..., dag.(s)...) - for st in common_stypes - op!(Op, opn, st, s...; kwargs...) - if !isempty(Op) - adjoint && return swapprime(dag(Op), 0 => 1) - return Op - end - end - - if length(s) > 1 - # No overloads for common tags found. It might be a - # case of making an operator with mixed site types, - # searching for overloads like: - # op(::OpName, - # ::SiteType..., - # ::Index...; - # kwargs...) - # op!(::ITensor, ::OpName, - # ::SiteType..., - # ::Index...; - # kwargs...) - stypes = _sitetypes.(s) - - for st in Iterators.product(stypes...) - res = op(opn, st..., s...; kwargs...) - if !isnothing(res) + rs = reverse(s) + res = itensor(op_mat, prime.(rs)..., dag.(rs)...) adjoint && return swapprime(dag(res), 0 => 1) return res - end + end + # + # otherwise try calling a function of the form: + # op(::OpName, ::SiteType; kwargs...) + # which returns a Julia matrix + # + for st in common_stypes + op_mat = op(opn, st; kwargs...) + if !isnothing(op_mat) + rs = reverse(s) + #return itensor(op_mat, prime.(rs)..., dag.(rs)...) + res = itensor(op_mat, prime.(rs)..., dag.(rs)...) + adjoint && return swapprime(dag(res), 0 => 1) + return res + end end + # otherwise try calling a function of the form: + # op!(::ITensor, ::OpName, ::SiteType, ::Index...; kwargs...) + # Op = ITensor(prime.(s)..., dag.(s)...) - for st in Iterators.product(stypes...) - op!(Op, opn, st..., s...; kwargs...) - if !isempty(Op) - adjoint && return swapprime(dag(Op), 0 => 1) - return Op - end + for st in common_stypes + op!(Op, opn, st, s...; kwargs...) + if !isempty(Op) + adjoint && return swapprime(dag(Op), 0 => 1) + return Op + end end - throw( - ArgumentError( - "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", - ), - ) - end - - # - # otherwise try calling a function of the form: - # op(::SiteType, ::Index, ::AbstractString) - # - # (Note: this version is for backwards compatibility - # after version 0.1.10, and may be eventually - # deprecated) - # - for st in common_stypes - res = op(st, s[1], name; kwargs...) - if !isnothing(res) - adjoint && return dag(res) - return res + if length(s) > 1 + # No overloads for common tags found. It might be a + # case of making an operator with mixed site types, + # searching for overloads like: + # op(::OpName, + # ::SiteType..., + # ::Index...; + # kwargs...) + # op!(::ITensor, ::OpName, + # ::SiteType..., + # ::Index...; + # kwargs...) + stypes = _sitetypes.(s) + + for st in Iterators.product(stypes...) + res = op(opn, st..., s...; kwargs...) + if !isnothing(res) + adjoint && return swapprime(dag(res), 0 => 1) + return res + end + end + + Op = ITensor(prime.(s)..., dag.(s)...) + for st in Iterators.product(stypes...) + op!(Op, opn, st..., s...; kwargs...) + if !isempty(Op) + adjoint && return swapprime(dag(Op), 0 => 1) + return Op + end + end + + throw( + ArgumentError( + "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", + ), + ) + end + + # + # otherwise try calling a function of the form: + # op(::SiteType, ::Index, ::AbstractString) + # + # (Note: this version is for backwards compatibility + # after version 0.1.10, and may be eventually + # deprecated) + # + for st in common_stypes + res = op(st, s[1], name; kwargs...) + if !isnothing(res) + adjoint && return dag(res) + return res + end end - end - return throw( - ArgumentError( - "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", - ), - ) + return throw( + ArgumentError( + "Overload of \"op\" or \"op!\" functions not found for operator name \"$name\" and Index tags: $(tags.(s)).", + ), + ) end op(name::AbstractString; kwargs...) = error("Must input indices when creating an `op`.") @@ -466,28 +466,28 @@ s = siteinds("S=1/2", 4) Sz2 = op("Sz", s, 2) ``` """ -function op(opname, s::Vector{<:Index}, ns::NTuple{N,Integer}; kwargs...) where {N} - return op(opname, ntuple(n -> s[ns[n]], Val(N))...; kwargs...) +function op(opname, s::Vector{<:Index}, ns::NTuple{N, Integer}; kwargs...) where {N} + return op(opname, ntuple(n -> s[ns[n]], Val(N))...; kwargs...) end function op(opname, s::Vector{<:Index}, ns::Vararg{Integer}; kwargs...) - return op(opname, s, ns; kwargs...) + return op(opname, s, ns; kwargs...) end function op(s::Vector{<:Index}, opname, ns::Tuple{Vararg{Integer}}; kwargs...) - return op(opname, s, ns...; kwargs...) + return op(opname, s, ns...; kwargs...) end function op(s::Vector{<:Index}, opname, ns::Integer...; kwargs...) - return op(opname, s, ns; kwargs...) + return op(opname, s, ns; kwargs...) end function op(s::Vector{<:Index}, opname, ns::Tuple{Vararg{Integer}}, kwargs::NamedTuple) - return op(opname, s, ns; kwargs...) + return op(opname, s, ns; kwargs...) end function op(s::Vector{<:Index}, opname, ns::Integer, kwargs::NamedTuple) - return op(opname, s, (ns,); kwargs...) + return op(opname, s, (ns,); kwargs...) end op(s::Vector{<:Index}, o::Tuple) = op(s, o...) @@ -497,19 +497,19 @@ op(o::Tuple, s::Vector{<:Index}) = op(s, o...) op(f::Function, args...; kwargs...) = f(op(args...; kwargs...)) function op( - s::Vector{<:Index}, - f::Function, - opname::AbstractString, - ns::Tuple{Vararg{Integer}}; - kwargs..., -) - return f(op(opname, s, ns...; kwargs...)) + s::Vector{<:Index}, + f::Function, + opname::AbstractString, + ns::Tuple{Vararg{Integer}}; + kwargs..., + ) + return f(op(opname, s, ns...; kwargs...)) end function op( - s::Vector{<:Index}, f::Function, opname::AbstractString, ns::Integer...; kwargs... -) - return f(op(opname, s, ns; kwargs...)) + s::Vector{<:Index}, f::Function, opname::AbstractString, ns::Integer...; kwargs... + ) + return f(op(opname, s, ns; kwargs...)) end # Here, Ref is used to not broadcast over the vector of indices @@ -543,7 +543,7 @@ gates = ops(os, s) #--------------------------------------- @eval struct StateName{Name} - (f::Type{<:StateName})() = $(Expr(:new, :f)) + (f::Type{<:StateName})() = $(Expr(:new, :f)) end StateName(s::AbstractString) = StateName{SmallString(s)}() @@ -552,7 +552,7 @@ StateName(s::SmallString) = StateName{s}() ITensors.name(::StateName{N}) where {N} = N macro StateName_str(s) - return StateName{SmallString(s)} + return StateName{SmallString(s)} end state(::StateName, ::SiteType; kwargs...) = nothing @@ -593,44 +593,44 @@ sxm = state(s,"X-") ``` """ function state(s::Index, name::AbstractString; kwargs...)::ITensor - stypes = _sitetypes(s) - sname = StateName(name) - - # Try calling state(::StateName"Name",::SiteType"Tag",s::Index; kwargs...) - for st in stypes - v = state(sname, st, s; kwargs...) - if !isnothing(v) - if v isa ITensor - return v - else - # TODO: deprecate, only for backwards compatibility. - return itensor(v, s) - end + stypes = _sitetypes(s) + sname = StateName(name) + + # Try calling state(::StateName"Name",::SiteType"Tag",s::Index; kwargs...) + for st in stypes + v = state(sname, st, s; kwargs...) + if !isnothing(v) + if v isa ITensor + return v + else + # TODO: deprecate, only for backwards compatibility. + return itensor(v, s) + end + end end - end - - # Try calling state!(::ITensor,::StateName"Name",::SiteType"Tag",s::Index;kwargs...) - T = ITensor(s) - for st in stypes - state!(T, sname, st, s; kwargs...) - !isempty(T) && return T - end - - # - # otherwise try calling a function of the form: - # state(::StateName"Name", ::SiteType"Tag"; kwargs...) - # which returns a Julia vector - # - for st in stypes - v = state(sname, st; kwargs...) - !isnothing(v) && return itensor(v, s) - end - - return throw( - ArgumentError( - "Overload of \"state\" or \"state!\" functions not found for state name \"$name\" and Index tags $(tags(s))", - ), - ) + + # Try calling state!(::ITensor,::StateName"Name",::SiteType"Tag",s::Index;kwargs...) + T = ITensor(s) + for st in stypes + state!(T, sname, st, s; kwargs...) + !isempty(T) && return T + end + + # + # otherwise try calling a function of the form: + # state(::StateName"Name", ::SiteType"Tag"; kwargs...) + # which returns a Julia vector + # + for st in stypes + v = state(sname, st; kwargs...) + !isnothing(v) && return itensor(v, s) + end + + return throw( + ArgumentError( + "Overload of \"state\" or \"state!\" functions not found for state name \"$name\" and Index tags $(tags(s))", + ), + ) end state(s::Index, n::Integer) = onehot(s => n) @@ -644,7 +644,7 @@ state(sset::Vector{<:Index}, j::Integer, st; kwargs...) = state(sset[j], st; kwa #--------------------------------------- @eval struct ValName{Name} - (f::Type{<:ValName})() = $(Expr(:new, :f)) + (f::Type{<:ValName})() = $(Expr(:new, :f)) end ValName(s::AbstractString) = ValName{SmallString(s)}() @@ -653,7 +653,7 @@ ValName(s::Symbol) = ValName{s}() ITensors.name(::ValName{N}) where {N} = N macro ValName_str(s) - return ValName{SmallString(s)} + return ValName{SmallString(s)} end val(::ValName, ::SiteType) = nothing @@ -686,18 +686,18 @@ val(s,"Occ") == 2 ``` """ function val(s::Index, name::AbstractString)::Int - stypes = _sitetypes(s) - sname = ValName(name) - - # Try calling val(::StateName"Name",::SiteType"Tag",) - for st in stypes - res = val(sname, st) - !isnothing(res) && return res - end - - return throw( - ArgumentError("Overload of \"val\" function not found for Index tags $(tags(s))") - ) + stypes = _sitetypes(s) + sname = ValName(name) + + # Try calling val(::StateName"Name",::SiteType"Tag",) + for st in stypes + res = val(sname, st) + !isnothing(res) && return res + end + + return throw( + ArgumentError("Overload of \"val\" function not found for Index tags $(tags(s))") + ) end val(s::Index, n::Integer) = n @@ -715,21 +715,21 @@ space(st::SiteType; kwargs...) = nothing space(st::SiteType, n::Int; kwargs...) = space(st; kwargs...) function space_error_message(st::SiteType) - return "Overload of \"space\",\"siteind\", or \"siteinds\" functions not found for Index tag: $(tag(st))" + return "Overload of \"space\",\"siteind\", or \"siteinds\" functions not found for Index tag: $(tag(st))" end -function siteind(st::SiteType; addtags="", kwargs...) - sp = space(st; kwargs...) - isnothing(sp) && return nothing - return Index(sp, "Site, $(tag(st)), $addtags") +function siteind(st::SiteType; addtags = "", kwargs...) + sp = space(st; kwargs...) + isnothing(sp) && return nothing + return Index(sp, "Site, $(tag(st)), $addtags") end function siteind(st::SiteType, n; kwargs...) - s = siteind(st; kwargs...) - !isnothing(s) && return addtags(s, "n=$n") - sp = space(st, n; kwargs...) - isnothing(sp) && error(space_error_message(st)) - return Index(sp, "Site, $(tag(st)), n=$n") + s = siteind(st; kwargs...) + !isnothing(s) && return addtags(s, "n=$n") + sp = space(st, n; kwargs...) + isnothing(sp) && error(space_error_message(st)) + return Index(sp, "Site, $(tag(st)), n=$n") end siteind(tag::String; kwargs...) = siteind(SiteType(tag); kwargs...) @@ -739,8 +739,8 @@ siteind(tag::String, n; kwargs...) = siteind(SiteType(tag), n; kwargs...) # Special case of `siteind` where integer (dim) provided # instead of a tag string #siteind(d::Integer, n::Integer; kwargs...) = Index(d, "Site,n=$n") -function siteind(d::Integer, n::Integer; addtags="", kwargs...) - return Index(d, "Site,n=$n, $addtags") +function siteind(d::Integer, n::Integer; addtags = "", kwargs...) + return Index(d, "Site,n=$n, $addtags") end #--------------------------------------- @@ -767,14 +767,14 @@ s = siteinds("S=1/2", N; conserve_qns=true) ``` """ function siteinds(tag::String, N::Integer; kwargs...) - st = SiteType(tag) + st = SiteType(tag) - si = siteinds(st, N; kwargs...) - if !isnothing(si) - return si - end + si = siteinds(st, N; kwargs...) + if !isnothing(si) + return si + end - return [siteind(st, j; kwargs...) for j in 1:N] + return [siteind(st, j; kwargs...) for j in 1:N] end """ @@ -784,7 +784,7 @@ Create an array of `N` physical site indices where the site type at site `n` is by `f(n)` (`f` should return a string). """ function siteinds(f::Function, N::Integer; kwargs...) - return [siteind(f(n), n; kwargs...) for n in 1:N] + return [siteind(f(n), n; kwargs...) for n in 1:N] end # Special case of `siteinds` where integer (dim) @@ -798,7 +798,7 @@ Create an array of `N` site indices, each of dimension `d`. - `addtags::String`: additional tags to be added to all indices """ function siteinds(d::Integer, N::Integer; kwargs...) - return [siteind(d, n; kwargs...) for n in 1:N] + return [siteind(d, n; kwargs...) for n in 1:N] end #--------------------------------------- @@ -812,23 +812,23 @@ has_fermion_string(operator::AbstractArray{<:Number}, s::Index; kwargs...)::Bool has_fermion_string(::OpName, ::SiteType) = nothing function has_fermion_string(opname::AbstractString, s::Index; kwargs...)::Bool - opname = strip(opname) - - # Interpret operator names joined by * - # as acting sequentially on the same site - starpos = findfirst(isequal('*'), opname) - if !isnothing(starpos) - op1 = opname[1:prevind(opname, starpos)] - op2 = opname[nextind(opname, starpos):end] - return xor(has_fermion_string(op1, s; kwargs...), has_fermion_string(op2, s; kwargs...)) - end - - Ntags = length(tags(s)) - stypes = _sitetypes(s) - opn = OpName(opname) - for st in stypes - res = has_fermion_string(opn, st) - !isnothing(res) && return res - end - return false + opname = strip(opname) + + # Interpret operator names joined by * + # as acting sequentially on the same site + starpos = findfirst(isequal('*'), opname) + if !isnothing(starpos) + op1 = opname[1:prevind(opname, starpos)] + op2 = opname[nextind(opname, starpos):end] + return xor(has_fermion_string(op1, s; kwargs...), has_fermion_string(op2, s; kwargs...)) + end + + Ntags = length(tags(s)) + stypes = _sitetypes(s) + opn = OpName(opname) + for st in stypes + res = has_fermion_string(opn, st) + !isnothing(res) && return res + end + return false end diff --git a/src/lib/SiteTypes/src/sitetypes/electron.jl b/src/lib/SiteTypes/src/sitetypes/electron.jl index d8fd90848e..a54dee4090 100644 --- a/src/lib/SiteTypes/src/sitetypes/electron.jl +++ b/src/lib/SiteTypes/src/sitetypes/electron.jl @@ -13,48 +13,48 @@ Create the Hilbert space for a site of type "Electron". Optionally specify the conserved symmetries and their quantum number labels. """ function space( - ::SiteType"Electron"; - conserve_qns=false, - conserve_sz=conserve_qns, - conserve_nf=conserve_qns, - conserve_nfparity=conserve_qns, - qnname_sz="Sz", - qnname_nf="Nf", - qnname_nfparity="NfParity", - # Deprecated - conserve_parity=nothing, -) - if !isnothing(conserve_parity) - conserve_nfparity = conserve_parity - end - if conserve_sz && conserve_nf - return [ - QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1 - QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1 - QN((qnname_nf, 2, -1), (qnname_sz, 0)) => 1 - ] - elseif conserve_nf - return [ - QN(qnname_nf, 0, -1) => 1 - QN(qnname_nf, 1, -1) => 2 - QN(qnname_nf, 2, -1) => 1 - ] - elseif conserve_sz - return [ - QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 - QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1 - QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1 - QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 - ] - elseif conserve_nfparity - return [ - QN(qnname_nfparity, 0, -2) => 1 - QN(qnname_nfparity, 1, -2) => 2 - QN(qnname_nfparity, 0, -2) => 1 - ] - end - return 4 + ::SiteType"Electron"; + conserve_qns = false, + conserve_sz = conserve_qns, + conserve_nf = conserve_qns, + conserve_nfparity = conserve_qns, + qnname_sz = "Sz", + qnname_nf = "Nf", + qnname_nfparity = "NfParity", + # Deprecated + conserve_parity = nothing, + ) + if !isnothing(conserve_parity) + conserve_nfparity = conserve_parity + end + if conserve_sz && conserve_nf + return [ + QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1 + QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1 + QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1 + QN((qnname_nf, 2, -1), (qnname_sz, 0)) => 1 + ] + elseif conserve_nf + return [ + QN(qnname_nf, 0, -1) => 1 + QN(qnname_nf, 1, -1) => 2 + QN(qnname_nf, 2, -1) => 1 + ] + elseif conserve_sz + return [ + QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 + QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1 + QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1 + QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1 + ] + elseif conserve_nfparity + return [ + QN(qnname_nfparity, 0, -2) => 1 + QN(qnname_nfparity, 1, -2) => 2 + QN(qnname_nfparity, 0, -2) => 1 + ] + end + return 4 end val(::ValName"Emp", ::SiteType"Electron") = 1 @@ -76,261 +76,261 @@ state(::StateName"↓", st::SiteType"Electron") = state(StateName("Dn"), st) state(::StateName"↑↓", st::SiteType"Electron") = state(StateName("UpDn"), st) function op(::OpName"Nup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.0 + ] end function op(on::OpName"n↑", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Ndn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 1.0 + ] end function op(on::OpName"n↓", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Nupdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.0 + ] end function op(on::OpName"n↑↓", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Ntot", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 2.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 2.0 + ] end function op(on::OpName"ntot", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Cup", ::SiteType"Electron") - return [ - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.0 + 0.0 0.0 0.0 0.0 + ] end function op(on::OpName"c↑", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Cdagup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 1.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + ] end function op(on::OpName"c†↑", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Cdn", ::SiteType"Electron") - return [ - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 -1.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 -1.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(on::OpName"c↓", st::SiteType"Electron") - return op(alias(on), st) + return op(alias(on), st) end function op(::OpName"Cdagdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 1.0 0.0 0.0 0.0 + 0.0 -1.0 0.0 0.0 + ] end function op(::OpName"c†↓", st::SiteType"Electron") - return op(OpName("Cdagdn"), st) + return op(OpName("Cdagdn"), st) end function op(::OpName"Aup", ::SiteType"Electron") - return [ - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 1.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"a↑", st::SiteType"Electron") - return op(OpName("Aup"), st) + return op(OpName("Aup"), st) end function op(::OpName"Adagup", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 1.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + ] end function op(::OpName"a†↑", st::SiteType"Electron") - return op(OpName("Adagup"), st) + return op(OpName("Adagup"), st) end function op(::OpName"Adn", ::SiteType"Electron") - return [ - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 1.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 1.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"a↓", st::SiteType"Electron") - return op(OpName("Adn"), st) + return op(OpName("Adn"), st) end function op(::OpName"Adagdn", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 1.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 + ] end function op(::OpName"a†↓", st::SiteType"Electron") - return op(OpName("Adagdn"), st) + return op(OpName("Adagdn"), st) end function op(::OpName"F", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - 0.0 0.0 -1.0 0.0 - 0.0 0.0 0.0 1.0 - ] + return [ + 1.0 0.0 0.0 0.0 + 0.0 -1.0 0.0 0.0 + 0.0 0.0 -1.0 0.0 + 0.0 0.0 0.0 1.0 + ] end function op(::OpName"Fup", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 -1.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 -1.0 - ] + return [ + 1.0 0.0 0.0 0.0 + 0.0 -1.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 -1.0 + ] end function op(::OpName"F↑", st::SiteType"Electron") - return op(OpName("Fup"), st) + return op(OpName("Fup"), st) end function op(::OpName"Fdn", ::SiteType"Electron") - return [ - 1.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 -1.0 0.0 - 0.0 0.0 0.0 -1.0 - ] + return [ + 1.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 + 0.0 0.0 -1.0 0.0 + 0.0 0.0 0.0 -1.0 + ] end function op(::OpName"F↓", st::SiteType"Electron") - return op(OpName("Fdn"), st) + return op(OpName("Fdn"), st) end function op(::OpName"Sz", ::SiteType"Electron") - #Op[s' => 2, s => 2] = +0.5 - #return Op[s' => 3, s => 3] = -0.5 - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.5 0.0 0.0 - 0.0 0.0 -0.5 0.0 - 0.0 0.0 0.0 0.0 - ] + #Op[s' => 2, s => 2] = +0.5 + #return Op[s' => 3, s => 3] = -0.5 + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.5 0.0 0.0 + 0.0 0.0 -0.5 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"Sᶻ", st::SiteType"Electron") - return op(OpName("Sz"), st) + return op(OpName("Sz"), st) end function op(::OpName"Sx", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.5 0.0 - 0.0 0.5 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.5 0.0 + 0.0 0.5 0.0 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"Sˣ", st::SiteType"Electron") - return op(OpName("Sx"), st) + return op(OpName("Sx"), st) end function op(::OpName"S+", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 1.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 1.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"S⁺", st::SiteType"Electron") - return op(OpName("S+"), st) + return op(OpName("S+"), st) end function op(::OpName"Sp", st::SiteType"Electron") - return op(OpName("S+"), st) + return op(OpName("S+"), st) end function op(::OpName"Splus", st::SiteType"Electron") - return op(OpName("S+"), st) + return op(OpName("S+"), st) end function op(::OpName"S-", ::SiteType"Electron") - return [ - 0.0 0.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - 0.0 1.0 0.0 0.0 - 0.0 0.0 0.0 0.0 - ] + return [ + 0.0 0.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + 0.0 1.0 0.0 0.0 + 0.0 0.0 0.0 0.0 + ] end function op(::OpName"S⁻", st::SiteType"Electron") - return op(OpName("S-"), st) + return op(OpName("S-"), st) end function op(::OpName"Sm", st::SiteType"Electron") - return op(OpName("S-"), st) + return op(OpName("S-"), st) end function op(::OpName"Sminus", st::SiteType"Electron") - return op(OpName("S-"), st) + return op(OpName("S-"), st) end has_fermion_string(::OpName"Cup", ::SiteType"Electron") = true function has_fermion_string(on::OpName"c↑", st::SiteType"Electron") - return has_fermion_string(alias(on), st) + return has_fermion_string(alias(on), st) end has_fermion_string(::OpName"Cdagup", ::SiteType"Electron") = true function has_fermion_string(on::OpName"c†↑", st::SiteType"Electron") - return has_fermion_string(alias(on), st) + return has_fermion_string(alias(on), st) end has_fermion_string(::OpName"Cdn", ::SiteType"Electron") = true function has_fermion_string(on::OpName"c↓", st::SiteType"Electron") - return has_fermion_string(alias(on), st) + return has_fermion_string(alias(on), st) end has_fermion_string(::OpName"Cdagdn", ::SiteType"Electron") = true function has_fermion_string(on::OpName"c†↓", st::SiteType"Electron") - return has_fermion_string(alias(on), st) + return has_fermion_string(alias(on), st) end diff --git a/src/lib/SiteTypes/src/sitetypes/generic_sites.jl b/src/lib/SiteTypes/src/sitetypes/generic_sites.jl index 6c7ab60320..1f62da6001 100644 --- a/src/lib/SiteTypes/src/sitetypes/generic_sites.jl +++ b/src/lib/SiteTypes/src/sitetypes/generic_sites.jl @@ -3,25 +3,25 @@ using NDTensors: NDTensors, dim, tensor using ..ITensors: ITensor, itensor, settensor! function op!( - o::ITensor, ::OpName"Id", ::SiteType"Generic", s1::Index, sn::Index...; eltype=Float64 -) - s = (s1, sn...) - n = prod(dim.(s)) - t = itensor(Matrix(one(eltype) * I, n, n), prime.(s)..., dag.(s)...) - return settensor!(o, tensor(t)) + o::ITensor, ::OpName"Id", ::SiteType"Generic", s1::Index, sn::Index...; eltype = Float64 + ) + s = (s1, sn...) + n = prod(dim.(s)) + t = itensor(Matrix(one(eltype) * I, n, n), prime.(s)..., dag.(s)...) + return settensor!(o, tensor(t)) end function op!(o::ITensor, on::OpName"I", st::SiteType"Generic", s::Index...; kwargs...) - return op!(o, alias(on), st, s...; kwargs...) + return op!(o, alias(on), st, s...; kwargs...) end function op!(o::ITensor, ::OpName"F", st::SiteType"Generic", s::Index; kwargs...) - return op!(o, OpName("Id"), st, s; kwargs...) + return op!(o, OpName("Id"), st, s; kwargs...) end function default_random_matrix(eltype::Type, s::Index...) - n = prod(dim.(s)) - return randn(eltype, n, n) + n = prod(dim.(s)) + return randn(eltype, n, n) end # Haar-random unitary @@ -30,20 +30,20 @@ end # Section 4.6 # http://math.mit.edu/~edelman/publications/random_matrix_theory.pdf function op!( - o::ITensor, - ::OpName"RandomUnitary", - ::SiteType"Generic", - s1::Index, - sn::Index...; - eltype=ComplexF64, - random_matrix=default_random_matrix(eltype, s1, sn...), -) - s = (s1, sn...) - Q, _ = NDTensors.qr_positive(random_matrix) - t = itensor(Q, prime.(s)..., dag.(s)...) - return settensor!(o, tensor(t)) + o::ITensor, + ::OpName"RandomUnitary", + ::SiteType"Generic", + s1::Index, + sn::Index...; + eltype = ComplexF64, + random_matrix = default_random_matrix(eltype, s1, sn...), + ) + s = (s1, sn...) + Q, _ = NDTensors.qr_positive(random_matrix) + t = itensor(Q, prime.(s)..., dag.(s)...) + return settensor!(o, tensor(t)) end function op!(o::ITensor, ::OpName"randU", st::SiteType"Generic", s::Index...; kwargs...) - return op!(o, OpName("RandomUnitary"), st, s...; kwargs...) + return op!(o, OpName("RandomUnitary"), st, s...; kwargs...) end diff --git a/src/lib/SiteTypes/src/sitetypes/qudit.jl b/src/lib/SiteTypes/src/sitetypes/qudit.jl index 6644cf06e8..29cd1ea5ea 100644 --- a/src/lib/SiteTypes/src/sitetypes/qudit.jl +++ b/src/lib/SiteTypes/src/sitetypes/qudit.jl @@ -12,92 +12,92 @@ Create the Hilbert space for a site of type "Qudit". Optionally specify the conserved symmetries and their quantum number labels. """ function space( - ::SiteType"Qudit"; - dim=2, - conserve_qns=false, - conserve_number=conserve_qns, - qnname_number="Number", -) - if conserve_number - return [QN(qnname_number, n - 1) => 1 for n in 1:dim] - end - return dim + ::SiteType"Qudit"; + dim = 2, + conserve_qns = false, + conserve_number = conserve_qns, + qnname_number = "Number", + ) + if conserve_number + return [QN(qnname_number, n - 1) => 1 for n in 1:dim] + end + return dim end function val(::ValName{N}, ::SiteType"Qudit") where {N} - return parse(Int, String(N)) + 1 + return parse(Int, String(N)) + 1 end function state(::StateName{N}, ::SiteType"Qudit", s::Index) where {N} - n = parse(Int, String(N)) - st = zeros(dim(s)) - st[n + 1] = 1.0 - return itensor(st, s) + n = parse(Int, String(N)) + st = zeros(dim(s)) + st[n + 1] = 1.0 + return itensor(st, s) end # one-body operators function op(::OpName"Id", ::SiteType"Qudit", ds::Int...) - d = prod(ds) - return Matrix(1.0I, d, d) + d = prod(ds) + return Matrix(1.0I, d, d) end op(on::OpName"I", st::SiteType"Qudit", ds::Int...) = op(alias(on), st, ds...) op(on::OpName"F", st::SiteType"Qudit", ds::Int...) = op(OpName"Id"(), st, ds...) function op(::OpName"Adag", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:(d - 1) - mat[k + 1, k] = √k - end - return mat + mat = zeros(d, d) + for k in 1:(d - 1) + mat[k + 1, k] = √k + end + return mat end op(on::OpName"adag", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) op(on::OpName"a†", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) function op(::OpName"A", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:(d - 1) - mat[k, k + 1] = √k - end - return mat + mat = zeros(d, d) + for k in 1:(d - 1) + mat[k, k + 1] = √k + end + return mat end op(on::OpName"a", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) function op(::OpName"N", ::SiteType"Qudit", d::Int) - mat = zeros(d, d) - for k in 1:d - mat[k, k] = k - 1 - end - return mat + mat = zeros(d, d) + for k in 1:d + mat[k, k] = k - 1 + end + return mat end op(on::OpName"n", st::SiteType"Qudit", d::Int) = op(alias(on), st, d) # two-body operators function op(::OpName"ab", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a"), st, d1), op(OpName("a"), st, d2)) + return kron(op(OpName("a"), st, d1), op(OpName("a"), st, d2)) end function op(::OpName"a†b", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a†"), st, d1), op(OpName("a"), st, d2)) + return kron(op(OpName("a†"), st, d1), op(OpName("a"), st, d2)) end function op(::OpName"ab†", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a"), st, d1), op(OpName("a†"), st, d2)) + return kron(op(OpName("a"), st, d1), op(OpName("a†"), st, d2)) end function op(::OpName"a†b†", st::SiteType"Qudit", d1::Int, d2::Int) - return kron(op(OpName("a†"), st, d1), op(OpName("a†"), st, d2)) + return kron(op(OpName("a†"), st, d1), op(OpName("a†"), st, d2)) end # interface function op(on::OpName, st::SiteType"Qudit", s1::Index, s_tail::Index...; kwargs...) - rs = reverse((s1, s_tail...)) - ds = dim.(rs) - opmat = op(on, st, ds...; kwargs...) - return itensor(opmat, prime.(rs)..., dag.(rs)...) + rs = reverse((s1, s_tail...)) + ds = dim.(rs) + opmat = op(on, st, ds...; kwargs...) + return itensor(opmat, prime.(rs)..., dag.(rs)...) end function op(on::OpName, st::SiteType"Qudit"; kwargs...) - return error("`op` can't be called without indices or dimensions.") + return error("`op` can't be called without indices or dimensions.") end # Zygote diff --git a/src/oneitensor.jl b/src/oneitensor.jl index e6cfb90bec..1f048e5b03 100644 --- a/src/oneitensor.jl +++ b/src/oneitensor.jl @@ -16,4 +16,4 @@ dag(t::OneITensor) = t (::OneITensor * A::ITensor) = A (A::ITensor * ::OneITensor) = A *(t::OneITensor) = t -deepcontract(ts::Union{ITensor,OneITensor}...) = *(ts...) +deepcontract(ts::Union{ITensor, OneITensor}...) = *(ts...) diff --git a/src/packagecompile/compile.jl b/src/packagecompile/compile.jl index 88d1ce6b5b..19d735456d 100644 --- a/src/packagecompile/compile.jl +++ b/src/packagecompile/compile.jl @@ -4,40 +4,40 @@ default_compile_filename() = "sys_itensors.so" default_compile_path() = joinpath(default_compile_dir(), default_compile_filename()) -function compile_note(; dir=default_compile_dir(), filename=default_compile_filename()) - path = joinpath(dir, filename) - return """ - You will be able to start Julia with a compiled version of ITensors using: +function compile_note(; dir = default_compile_dir(), filename = default_compile_filename()) + path = joinpath(dir, filename) + return """ + You will be able to start Julia with a compiled version of ITensors using: - ``` - ~ julia --sysimage $path - ``` + ``` + ~ julia --sysimage $path + ``` - and you should see that the startup times and JIT compilation times are substantially improved when you are using ITensors. + and you should see that the startup times and JIT compilation times are substantially improved when you are using ITensors. - In unix, you can create an alias with the Bash command: + In unix, you can create an alias with the Bash command: - ``` - ~ alias julia_itensors="julia --sysimage $path -e 'using ITensors' -i" - ``` + ``` + ~ alias julia_itensors="julia --sysimage $path -e 'using ITensors' -i" + ``` - which you can put in your `~/.bashrc`, `~/.zshrc`, etc. This also executes - `using ITensors` so that ITensors is loaded and ready to use, you can leave off ` - -e 'using ITensors' -i` if you don't want that. Then you can start Julia with a - version of ITensors installed with the command: + which you can put in your `~/.bashrc`, `~/.zshrc`, etc. This also executes + `using ITensors` so that ITensors is loaded and ready to use, you can leave off ` + -e 'using ITensors' -i` if you don't want that. Then you can start Julia with a + version of ITensors installed with the command: - ``` - ~ julia_itensors - ``` + ``` + ~ julia_itensors + ``` - Note that if you update ITensors to a new version, for example with `using - Pkg; Pkg.update("ITensors")`, you will need to run the `ITensors.compile()` - command again to recompile the new version of ITensors. - """ + Note that if you update ITensors to a new version, for example with `using + Pkg; Pkg.update("ITensors")`, you will need to run the `ITensors.compile()` + command again to recompile the new version of ITensors. + """ end -function compile(; backend=Algorithm"PackageCompiler"(), kwargs...) - return compile(backend; kwargs...) +function compile(; backend = Algorithm"PackageCompiler"(), kwargs...) + return compile(backend; kwargs...) end @doc """ diff --git a/src/qn/qnitensor.jl b/src/qn/qnitensor.jl index 0faf2f9b9d..c7b760737d 100644 --- a/src/qn/qnitensor.jl +++ b/src/qn/qnitensor.jl @@ -2,15 +2,15 @@ using .QuantumNumbers: QuantumNumbers, removeqn using NDTensors: sim @propagate_inbounds @inline function _setindex!!( - ::HasQNs, T::Tensor, x::Number, I::Integer... -) - fluxT = flux(T) - if !isnothing(fluxT) && fluxT != flux(T, I...) - error( - "In `setindex!`, the element $I of ITensor: \n$(T)\n you are trying to set is in a block with flux $(flux(T, I...)), which is different from the flux $fluxT of the other blocks of the ITensor. You may be trying to create an ITensor that does not have a well defined quantum number flux.", + ::HasQNs, T::Tensor, x::Number, I::Integer... ) - end - return setindex!!(T, x, I...) + fluxT = flux(T) + if !isnothing(fluxT) && fluxT != flux(T, I...) + error( + "In `setindex!`, the element $I of ITensor: \n$(T)\n you are trying to set is in a block with flux $(flux(T, I...)), which is different from the flux $fluxT of the other blocks of the ITensor. You may be trying to create an ITensor that does not have a well defined quantum number flux.", + ) + end + return setindex!!(T, x, I...) end # TODO: Replace with a simpler and more generic `zeros` constructor @@ -22,11 +22,11 @@ end # This is only used internally inside the implementation of `directsum` # right now. function zeros_itensor(elt::Type{<:Number}, inds::QNIndex...) - return itensor( - tensor( - BlockSparse(elt, undef, NDTensors.Dictionary{Block{length(inds)},Int}(), 0), inds - ), - ) + return itensor( + tensor( + BlockSparse(elt, undef, NDTensors.Dictionary{Block{length(inds)}, Int}(), 0), inds + ), + ) end """ @@ -154,34 +154,34 @@ julia> flux(A) QN(-1) ``` """ -function ITensor(::Type{ElT}, flux::QN, inds::QNIndices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzblocks(flux, is) - if length(blocks) == 0 - error("ITensor with flux=$flux resulted in no allowed blocks") - end - T = BlockSparseTensor(ElT, blocks, is) - return itensor(T) +function ITensor(::Type{ElT}, flux::QN, inds::QNIndices) where {ElT <: Number} + is = Tuple(inds) + blocks = nzblocks(flux, is) + if length(blocks) == 0 + error("ITensor with flux=$flux resulted in no allowed blocks") + end + T = BlockSparseTensor(ElT, blocks, is) + return itensor(T) end # This helps with making code more generic between block sparse # and dense. -function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - return itensor(Dense(ElT, dim(inds)), inds) +function ITensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT <: Number} + return itensor(Dense(ElT, dim(inds)), inds) end -function ITensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return ITensor(ElT, flux, indices(is...)) +function ITensor(::Type{ElT}, flux::QN, is...) where {ElT <: Number} + return ITensor(ElT, flux, indices(is...)) end ITensor(flux::QN, is...) = ITensor(Float64, flux, is...) -ITensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} = emptyITensor(ElT, inds) +ITensor(::Type{ElT}, inds::QNIndices) where {ElT <: Number} = emptyITensor(ElT, inds) ITensor(inds::QNIndices) = emptyITensor(inds) # TODO: generalize to list of Tuple, Vector, and QNIndex -ITensor(::Type{ElT}, is::QNIndex...) where {ElT<:Number} = emptyITensor(ElT, indices(is...)) +ITensor(::Type{ElT}, is::QNIndex...) where {ElT <: Number} = emptyITensor(ElT, indices(is...)) # TODO: generalize to list of Tuple, Vector, and QNIndex ITensor(is::QNIndex...) = emptyITensor(indices(is...)) @@ -208,20 +208,20 @@ C = ITensor(ComplexF64,undef,QN(0),i',dag(i)) ``` """ function ITensor( - ::Type{ElT}, ::UndefInitializer, flux::QN, inds::Indices -) where {ElT<:Number} - is = Tuple(inds) - blocks = nzblocks(flux, is) - T = BlockSparseTensor(ElT, undef, blocks, is) - return itensor(T) + ::Type{ElT}, ::UndefInitializer, flux::QN, inds::Indices + ) where {ElT <: Number} + is = Tuple(inds) + blocks = nzblocks(flux, is) + T = BlockSparseTensor(ElT, undef, blocks, is) + return itensor(T) end -function ITensor(::Type{ElT}, ::UndefInitializer, flux::QN, is...) where {ElT<:Number} - return ITensor(ElT, undef, flux, indices(is...)) +function ITensor(::Type{ElT}, ::UndefInitializer, flux::QN, is...) where {ElT <: Number} + return ITensor(ElT, undef, flux, indices(is...)) end function ITensor(::UndefInitializer, flux::QN, is...) - return ITensor(Float64, undef, flux, indices(is...)) + return ITensor(Float64, undef, flux, indices(is...)) end """ @@ -249,17 +249,17 @@ C = ITensor(ComplexF64, 4, QN(0), i', dag(i)) `float`, and in that case the particular element type should not be relied on. """ function ITensor(eltype::Type{<:Number}, x::Number, flux::QN, is::Indices) - is_tuple = Tuple(is) - blocks = nzblocks(flux, is_tuple) - if length(blocks) == 0 - error("ITensor with flux=$flux resulted in no allowed blocks") - end - T = BlockSparseTensor(eltype(x), blocks, is_tuple) - return itensor(T) + is_tuple = Tuple(is) + blocks = nzblocks(flux, is_tuple) + if length(blocks) == 0 + error("ITensor with flux=$flux resulted in no allowed blocks") + end + T = BlockSparseTensor(eltype(x), blocks, is_tuple) + return itensor(T) end function ITensor(eltype::Type{<:Number}, x::Number, flux::QN, is...) - return ITensor(eltype, x, flux, indices(is...)) + return ITensor(eltype, x, flux, indices(is...)) end ITensor(x::Number, flux::QN, is...) = ITensor(eltype(x), x, flux, is...) @@ -313,37 +313,37 @@ Block: (2, 2) ``` """ function ITensor( - ::AliasStyle, - elt::Type{<:Number}, - A::AbstractArray{<:Number}, - inds::QNIndices; - tol=0.0, - checkflux=true, -) - is = Tuple(inds) - length(A) ≠ dim(inds) && throw( - DimensionMismatch( - "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of the indices ($(dim(is)))", - ), - ) - blocks = Block{length(is)}[] - T = BlockSparseTensor(elt, blocks, inds) - A = reshape(A, dims(is)...) - _copyto_dropzeros!(T, A; tol) - if checkflux - ITensors.checkflux(T) - end - return itensor(T) + ::AliasStyle, + elt::Type{<:Number}, + A::AbstractArray{<:Number}, + inds::QNIndices; + tol = 0.0, + checkflux = true, + ) + is = Tuple(inds) + length(A) ≠ dim(inds) && throw( + DimensionMismatch( + "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of the indices ($(dim(is)))", + ), + ) + blocks = Block{length(is)}[] + T = BlockSparseTensor(elt, blocks, inds) + A = reshape(A, dims(is)...) + _copyto_dropzeros!(T, A; tol) + if checkflux + ITensors.checkflux(T) + end + return itensor(T) end function _copyto_dropzeros!(T::Tensor, A::AbstractArray; tol) - for i in eachindex(T) - Aᵢ = A[i] - if abs(Aᵢ) > tol - T[i] = Aᵢ + for i in eachindex(T) + Aᵢ = A[i] + if abs(Aᵢ) > tol + T[i] = Aᵢ + end end - end - return T + return T end # TODO: Deprecated. @@ -355,15 +355,15 @@ Construct an ITensor with `NDTensors.BlockSparse` storage of element type `ElT` If `ElT` is not specified it defaults to `NDTensors.EmptyNumber`. """ -function emptyITensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return itensor(EmptyBlockSparseTensor(ElT, inds)) +function emptyITensor(::Type{ElT}, inds::QNIndices) where {ElT <: Number} + return itensor(EmptyBlockSparseTensor(ElT, inds)) end emptyITensor(inds::QNIndices) = emptyITensor(EmptyNumber, inds) function emptyITensor(eltype::Type{<:Number}, flux::QN, is...) - return error( - "Trying to create an empty ITensor with flux $flux, cannot create empty ITensor with a specified flux.", - ) + return error( + "Trying to create an empty ITensor with flux $flux, cannot create empty ITensor with a specified flux.", + ) end emptyITensor(flux::QN, is...) = emptyITensor(EmptyNumber, flux, is...) @@ -376,58 +376,58 @@ elements of type `ElT` where the nonzero blocks are determined by `flux`. If `ElT` is not specified it defaults to `Float64`. If the flux is not specified it defaults to `QN()`. """ -function random_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, flux, inds) +function random_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT <: Number} + return random_itensor(Random.default_rng(), ElT, flux, inds) end function random_itensor( - rng::AbstractRNG, ::Type{ElT}, flux::QN, inds::Indices -) where {ElT<:Number} - T = ITensor(ElT, undef, flux, inds) - randn!(rng, T) - return T + rng::AbstractRNG, ::Type{ElT}, flux::QN, inds::Indices + ) where {ElT <: Number} + T = ITensor(ElT, undef, flux, inds) + randn!(rng, T) + return T end -function random_itensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, flux, is...) +function random_itensor(::Type{ElT}, flux::QN, is...) where {ElT <: Number} + return random_itensor(Random.default_rng(), ElT, flux, is...) end -function random_itensor(rng::AbstractRNG, ::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return random_itensor(rng, ElT, flux, indices(is...)) +function random_itensor(rng::AbstractRNG, ::Type{ElT}, flux::QN, is...) where {ElT <: Number} + return random_itensor(rng, ElT, flux, indices(is...)) end -function random_itensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, inds) +function random_itensor(::Type{ElT}, inds::QNIndices) where {ElT <: Number} + return random_itensor(Random.default_rng(), ElT, inds) end -function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return random_itensor(rng, ElT, QN(), inds) +function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndices) where {ElT <: Number} + return random_itensor(rng, ElT, QN(), inds) end function random_itensor(flux::QN, inds::Indices) - return random_itensor(Random.default_rng(), flux, inds) + return random_itensor(Random.default_rng(), flux, inds) end function random_itensor(rng::AbstractRNG, flux::QN, inds::Indices) - return random_itensor(rng, Float64, flux, inds) + return random_itensor(rng, Float64, flux, inds) end function random_itensor(flux::QN, is...) - return random_itensor(Random.default_rng(), flux, is...) + return random_itensor(Random.default_rng(), flux, is...) end function random_itensor(rng::AbstractRNG, flux::QN, is...) - return random_itensor(rng, Float64, flux, indices(is...)) + return random_itensor(rng, Float64, flux, indices(is...)) end # TODO: generalize to list of Tuple, Vector, and QNIndex -function random_itensor(::Type{ElT}, inds::QNIndex...) where {ElT<:Number} - return random_itensor(Random.default_rng(), ElT, inds...) +function random_itensor(::Type{ElT}, inds::QNIndex...) where {ElT <: Number} + return random_itensor(Random.default_rng(), ElT, inds...) end # TODO: generalize to list of Tuple, Vector, and QNIndex -function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndex...) where {ElT<:Number} - return random_itensor(rng, ElT, QN(), inds) +function random_itensor(rng::AbstractRNG, ::Type{ElT}, inds::QNIndex...) where {ElT <: Number} + return random_itensor(rng, ElT, QN(), inds) end random_itensor(inds::QNIndices) = random_itensor(Random.default_rng(), inds) @@ -439,15 +439,15 @@ random_itensor(inds::QNIndex...) = random_itensor(Random.default_rng(), inds...) # TODO: generalize to list of Tuple, Vector, and QNIndex function random_itensor(rng::AbstractRNG, inds::QNIndex...) - return random_itensor(rng, Float64, QN(), inds) + return random_itensor(rng, Float64, QN(), inds) end -function combiner(inds::QNIndices; dir=nothing, tags="CMB,Link") - # TODO: support combining multiple set of indices - is = Tuple(inds) - new_ind = ⊗(is...; dir, tags) - comb_ind, perm, comb = combineblocks(new_ind) - return itensor(Combiner(perm, comb), (comb_ind, dag.(is)...)) +function combiner(inds::QNIndices; dir = nothing, tags = "CMB,Link") + # TODO: support combining multiple set of indices + is = Tuple(inds) + new_ind = ⊗(is...; dir, tags) + comb_ind, perm, comb = combineblocks(new_ind) + return itensor(Combiner(perm, comb), (comb_ind, dag.(is)...)) end # @@ -464,27 +464,27 @@ Make an ITensor with storage type `NDTensors.DiagBlockSparse` with elements If the element type is not specified, it defaults to `Float64`. If theflux is not specified, it defaults to `QN()`. """ -function diag_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(ElT, blocks, is) - return itensor(T) +function diag_itensor(::Type{ElT}, flux::QN, inds::Indices) where {ElT <: Number} + is = Tuple(inds) + blocks = nzdiagblocks(flux, is) + T = DiagBlockSparseTensor(ElT, blocks, is) + return itensor(T) end -function diag_itensor(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return diag_itensor(ElT, flux, indices(is...)) +function diag_itensor(::Type{ElT}, flux::QN, is...) where {ElT <: Number} + return diag_itensor(ElT, flux, indices(is...)) end -function diag_itensor(x::ElT, flux::QN, inds::QNIndices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(float(ElT), blocks, is) - NDTensors.data(T) .= x - return itensor(T) +function diag_itensor(x::ElT, flux::QN, inds::QNIndices) where {ElT <: Number} + is = Tuple(inds) + blocks = nzdiagblocks(flux, is) + T = DiagBlockSparseTensor(float(ElT), blocks, is) + NDTensors.data(T) .= x + return itensor(T) end function diag_itensor(x::Number, flux::QN, is...) - return diag_itensor(x, flux, indices(is...)) + return diag_itensor(x, flux, indices(is...)) end diag_itensor(x::Number, is::QNIndices) = diag_itensor(x, QN(), is) @@ -496,12 +496,12 @@ diag_itensor(flux::QN, is::Indices) = diag_itensor(Float64, flux, is) diag_itensor(flux::QN, is...) = diag_itensor(Float64, flux, indices(is...)) -function diag_itensor(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return diag_itensor(ElT, QN(), inds) +function diag_itensor(::Type{ElT}, inds::QNIndices) where {ElT <: Number} + return diag_itensor(ElT, QN(), inds) end function diag_itensor(inds::QNIndices) - return diag_itensor(Float64, QN(), inds) + return diag_itensor(Float64, QN(), inds) end """ @@ -515,76 +515,76 @@ specified `flux`. If the element type is not specified, it defaults to `Float64`. If theflux is not specified, it defaults to `QN()`. """ -function delta(::Type{ElT}, flux::QN, inds::Indices) where {ElT<:Number} - is = Tuple(inds) - blocks = nzdiagblocks(flux, is) - T = DiagBlockSparseTensor(one(ElT), blocks, is) - return itensor(T) +function delta(::Type{ElT}, flux::QN, inds::Indices) where {ElT <: Number} + is = Tuple(inds) + blocks = nzdiagblocks(flux, is) + T = DiagBlockSparseTensor(one(ElT), blocks, is) + return itensor(T) end -function delta(::Type{ElT}, flux::QN, is...) where {ElT<:Number} - return delta(ElT, flux, indices(is...)) +function delta(::Type{ElT}, flux::QN, is...) where {ElT <: Number} + return delta(ElT, flux, indices(is...)) end delta(flux::QN, inds::Indices) = delta(Float64, flux, is) delta(flux::QN, is...) = delta(Float64, flux, indices(is...)) -function delta(::Type{ElT}, inds::QNIndices) where {ElT<:Number} - return delta(ElT, QN(), inds) +function delta(::Type{ElT}, inds::QNIndices) where {ElT <: Number} + return delta(ElT, QN(), inds) end delta(inds::QNIndices) = delta(Float64, QN(), inds) -function dropzeros(T::ITensor; tol=0) - # XXX: replace with empty(T) - T̃ = emptyITensor(eltype(T), inds(T)) - for b in eachnzblock(T) - Tb = T[b] - if norm(Tb) > tol - T̃[b] = Tb +function dropzeros(T::ITensor; tol = 0) + # XXX: replace with empty(T) + T̃ = emptyITensor(eltype(T), inds(T)) + for b in eachnzblock(T) + Tb = T[b] + if norm(Tb) > tol + T̃[b] = Tb + end end - end - return T̃ + return T̃ end function δ_split(i1::Index, i2::Index) - d = emptyITensor(i1, i2) - for n in 1:min(dim(i1), dim(i2)) - d[n, n] = 1 - end - return d + d = emptyITensor(i1, i2) + for n in 1:min(dim(i1), dim(i2)) + d[n, n] = 1 + end + return d end -function splitblocks(A::ITensor, is=inds(A); tol=0) - if !hasqns(A) +function splitblocks(A::ITensor, is = inds(A); tol = 0) + if !hasqns(A) + return A + end + isA = filterinds(A; inds = is) + for i in isA + i_split = splitblocks(i) + ĩ_split = sim(i_split) + # Ideally use norm δ tensor but currently + # it doesn't work properly: + #A *= δ(dag(i), ĩ_split) + d = δ_split(dag(i), ĩ_split) + A *= δ_split(dag(i), ĩ_split) + A = replaceind(A, ĩ_split, i_split) + end + A = dropzeros(A; tol = tol) return A - end - isA = filterinds(A; inds=is) - for i in isA - i_split = splitblocks(i) - ĩ_split = sim(i_split) - # Ideally use norm δ tensor but currently - # it doesn't work properly: - #A *= δ(dag(i), ĩ_split) - d = δ_split(dag(i), ĩ_split) - A *= δ_split(dag(i), ĩ_split) - A = replaceind(A, ĩ_split, i_split) - end - A = dropzeros(A; tol=tol) - return A -end - -function QuantumNumbers.removeqn(T::ITensor, qn_name::String; mergeblocks=true) - if !hasqns(T) - return T - end - inds_R = removeqn(inds(T), qn_name; mergeblocks) - R = ITensor(inds_R) - for iv in eachindex(T) - if !iszero(T[iv]) - R[iv] = T[iv] +end + +function QuantumNumbers.removeqn(T::ITensor, qn_name::String; mergeblocks = true) + if !hasqns(T) + return T + end + inds_R = removeqn(inds(T), qn_name; mergeblocks) + R = ITensor(inds_R) + for iv in eachindex(T) + if !iszero(T[iv]) + R[iv] = T[iv] + end end - end - return R + return R end diff --git a/src/readwrite.jl b/src/readwrite.jl index c75462e326..9b30240b91 100644 --- a/src/readwrite.jl +++ b/src/readwrite.jl @@ -1,13 +1,13 @@ -function readcpp(io::IO, ::Type{Vector{T}}; format="v3") where {T} - v = Vector{T}() - if format == "v3" - size = read(io, UInt64) - resize!(v, size) - for n in 1:size - v[n] = readcpp(io, T; format) +function readcpp(io::IO, ::Type{Vector{T}}; format = "v3") where {T} + v = Vector{T}() + if format == "v3" + size = read(io, UInt64) + resize!(v, size) + for n in 1:size + v[n] = readcpp(io, T; format) + end + else + throw(ArgumentError("read Vector: format=$format not supported")) end - else - throw(ArgumentError("read Vector: format=$format not supported")) - end - return v + return v end diff --git a/src/set_types.jl b/src/set_types.jl index dc96a8cc12..89cd03287f 100644 --- a/src/set_types.jl +++ b/src/set_types.jl @@ -1,5 +1,5 @@ using NDTensors.TypeParameterAccessors: unwrap_array_type NDTensors.TypeParameterAccessors.parenttype(::ITensor) = typeof(tensor(T)) function NDTensors.TypeParameterAccessors.unwrap_array_type(T::ITensor) - return unwrap_array_type(tensor(T)) + return unwrap_array_type(tensor(T)) end diff --git a/src/tensor_operations/itensor_combiner.jl b/src/tensor_operations/itensor_combiner.jl index 5b350b131f..e31994ba9b 100644 --- a/src/tensor_operations/itensor_combiner.jl +++ b/src/tensor_operations/itensor_combiner.jl @@ -1,7 +1,7 @@ -function combiner(is::Indices; dir=nothing, tags="CMB,Link") - new_ind = Index(prod(dims(is)); dir, tags) - new_is = (new_ind, is...) - return itensor(Combiner(), new_is) +function combiner(is::Indices; dir = nothing, tags = "CMB,Link") + new_ind = Index(prod(dims(is)); dir, tags) + new_is = (new_ind, is...) + return itensor(Combiner(), new_is) end combiner(is...; kwargs...) = combiner(indices(is...); kwargs...) @@ -9,7 +9,7 @@ combiner(i::Index; kwargs...) = combiner((i,); kwargs...) # Special case when no indices are combined (useful for generic code) function combiner(; kwargs...) - return itensor(Combiner(), ()) + return itensor(Combiner(), ()) end """ @@ -22,10 +22,10 @@ the other indices given to the combiner when it is made For more information, see the `combiner` function. """ function combinedind(T::ITensor) - if storage(T) isa Combiner && order(T) > 0 - return inds(T)[1] - end - return nothing + if storage(T) isa Combiner && order(T) > 0 + return inds(T)[1] + end + return nothing end # TODO: add iscombiner(::Tensor) to NDTensors diff --git a/src/tensor_operations/permutations.jl b/src/tensor_operations/permutations.jl index ce6a1f7237..383930359d 100644 --- a/src/tensor_operations/permutations.jl +++ b/src/tensor_operations/permutations.jl @@ -36,46 +36,46 @@ T[1, 1, 1] == pT_alias[1, 1, 1] ``` """ function permute(T::ITensor, new_inds...; kwargs...) - if !hassameinds(T, indices(new_inds...)) - error( - "In `permute(::ITensor, inds...)`, the input ITensor has indices: \n\n$(inds(T))\n\nbut the desired Index ordering is: \n\n$(indices(new_inds...))", + if !hassameinds(T, indices(new_inds...)) + error( + "In `permute(::ITensor, inds...)`, the input ITensor has indices: \n\n$(inds(T))\n\nbut the desired Index ordering is: \n\n$(indices(new_inds...))", + ) + end + allow_alias = deprecated_keyword_argument( + Bool, + kwargs; + new_kw = :allow_alias, + old_kw = :always_copy, + default = false, + funcsym = :permute, + map = !, ) - end - allow_alias = deprecated_keyword_argument( - Bool, - kwargs; - new_kw=:allow_alias, - old_kw=:always_copy, - default=false, - funcsym=:permute, - map=!, - ) - aliasstyle::Union{AllowAlias,NeverAlias} = allow_alias ? AllowAlias() : NeverAlias() - return permute(aliasstyle, T, new_inds...) + aliasstyle::Union{AllowAlias, NeverAlias} = allow_alias ? AllowAlias() : NeverAlias() + return permute(aliasstyle, T, new_inds...) end # TODO: move to NDTensors function NDTensors.permutedims(::AllowAlias, T::Tensor, perm) - return NDTensors.is_trivial_permutation(perm) ? T : permutedims(NeverAlias(), T, perm) + return NDTensors.is_trivial_permutation(perm) ? T : permutedims(NeverAlias(), T, perm) end # TODO: move to NDTensors, define `permutedims` in terms of `NeverAlias` function NDTensors.permutedims(::NeverAlias, T::Tensor, perm) - return permutedims(T, perm) + return permutedims(T, perm) end function _permute(as::AliasStyle, T::Tensor, new_inds) - perm = NDTensors.getperm(new_inds, inds(T)) - return permutedims(as, T, perm) + perm = NDTensors.getperm(new_inds, inds(T)) + return permutedims(as, T, perm) end function permute(as::AliasStyle, T::ITensor, new_inds) - return itensor(_permute(as, tensor(T), new_inds)) + return itensor(_permute(as, tensor(T), new_inds)) end # Version listing indices function permute(as::AliasStyle, T::ITensor, new_inds::Index...) - return permute(as, T, new_inds) + return permute(as, T, new_inds) end """ @@ -98,7 +98,7 @@ transposing its indices returns numerically the same ITensor. """ function ishermitian(T::ITensor; kwargs...) - return isapprox(T, dag(transpose(T)); kwargs...) + return isapprox(T, dag(transpose(T)); kwargs...) end """ @@ -156,7 +156,7 @@ so that no permutation is required. See also [`matrix`](@ref), [`vector`](@ref). """ -array(T::ITensor, inds...) = array(permute(T, inds...; allow_alias=true)) +array(T::ITensor, inds...) = array(permute(T, inds...; allow_alias = true)) """ matrix(T::ITensor) @@ -179,8 +179,8 @@ column, depends on the internal layout of the ITensor. See also [`array`](@ref), [`vector`](@ref). """ function matrix(T::ITensor) - ndims(T) != 2 && throw(DimensionMismatch()) - return array(tensor(T)) + ndims(T) != 2 && throw(DimensionMismatch()) + return array(tensor(T)) end """ @@ -204,7 +204,7 @@ so that no permutation is required. See also [`array`](@ref), [`vector`](@ref). """ -matrix(T::ITensor, inds...) = matrix(permute(T, inds...; allow_alias=true)) +matrix(T::ITensor, inds...) = matrix(permute(T, inds...; allow_alias = true)) """ vector(T::ITensor) @@ -216,8 +216,8 @@ or a view in the case the ITensor's storage is Dense. See also [`array`](@ref), [`matrix`](@ref). """ function vector(T::ITensor) - ndims(T) != 1 && throw(DimensionMismatch()) - return array(tensor(T)) + ndims(T) != 1 && throw(DimensionMismatch()) + return array(tensor(T)) end """ @@ -241,4 +241,4 @@ so that no permutation is required. See also [`array`](@ref), [`matrix`](@ref). """ -vector(T::ITensor, inds...) = vector(permute(T, inds...; allow_alias=true)) +vector(T::ITensor, inds...) = vector(permute(T, inds...; allow_alias = true)) diff --git a/test/base/runtests.jl b/test/base/runtests.jl index ff4b5e43d1..8857975494 100644 --- a/test/base/runtests.jl +++ b/test/base/runtests.jl @@ -5,12 +5,12 @@ ITensors.Strided.disable_threads() ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end end end diff --git a/test/base/test_argsdict.jl b/test/base/test_argsdict.jl index 0fb251b785..9191e50fe1 100644 --- a/test/base/test_argsdict.jl +++ b/test/base/test_argsdict.jl @@ -2,114 +2,114 @@ using ITensors using Test @testset "Test argsdict function" begin - args_copy = copy(ARGS) - empty!(args_copy) - push!( - args_copy, - "x", - "n = 1", - "nf :: Float64 = 2", - "ni :: AutoType = 2", - "ns :: String = 2", - "nc :: ComplexF64 = 3", - "x = 2e-1, 2e-3, 0.1", - "2::AutoType", - "N = 1e-3", - "vc = ComplexF64[1 + 2im, 3]", - "3", - "--autotype", - "vf = [1.0, 3]", - "y = true", - "1+2im", - "s = \"use_qr\"", - "--stringtype", - "y", - ) - args = argsdict(args_copy) - empty!(args_copy) + args_copy = copy(ARGS) + empty!(args_copy) + push!( + args_copy, + "x", + "n = 1", + "nf :: Float64 = 2", + "ni :: AutoType = 2", + "ns :: String = 2", + "nc :: ComplexF64 = 3", + "x = 2e-1, 2e-3, 0.1", + "2::AutoType", + "N = 1e-3", + "vc = ComplexF64[1 + 2im, 3]", + "3", + "--autotype", + "vf = [1.0, 3]", + "y = true", + "1+2im", + "s = \"use_qr\"", + "--stringtype", + "y", + ) + args = argsdict(args_copy) + empty!(args_copy) - @test args["_arg1"] == "x" - @test args["nf"] == 2.0 - @test args["nc"] == 3.0 + 0.0im - @test args["ns"] == "2" - @test args["ni"] == 2 - @test args["y"] == true - @test args["N"] == 0.001 - @test args["x"] == (0.2, 0.002, 0.1) - @test args["_arg2"] == 2 - @test args["vc"] == Complex{Float64}[1.0 + 2.0im, 3.0 + 0.0im] - @test args["_arg3"] == "3" - @test args["vf"] == [1.0, 3.0] - @test args["n"] == 1 - @test args["_arg4"] == 1 + 2im - @test args["s"] == "use_qr" - @test args["_arg5"] == "y" + @test args["_arg1"] == "x" + @test args["nf"] == 2.0 + @test args["nc"] == 3.0 + 0.0im + @test args["ns"] == "2" + @test args["ni"] == 2 + @test args["y"] == true + @test args["N"] == 0.001 + @test args["x"] == (0.2, 0.002, 0.1) + @test args["_arg2"] == 2 + @test args["vc"] == Complex{Float64}[1.0 + 2.0im, 3.0 + 0.0im] + @test args["_arg3"] == "3" + @test args["vf"] == [1.0, 3.0] + @test args["n"] == 1 + @test args["_arg4"] == 1 + 2im + @test args["s"] == "use_qr" + @test args["_arg5"] == "y" - push!( - args_copy, - "x", - "n -> 1", - "nf :: Float64 -> 2", - "ni :: AutoType -> 2", - "ns :: String -> 2", - "nc :: ComplexF64 -> 3", - "x -> 2e-1, 2e-3, 0.1", - "2", - "N -> 1e-3", - "vc -> ComplexF64[1 + 2im, 3]", - "3", - "--autotype", - "vf -> [1.0, 3]", - "y -> true", - "1+2im", - "s -> \"use_qr\"", - "--stringtype", - "y", - ) - args = argsdict( - args_copy; - first_arg=2, - delim="->", - as_symbols=true, - default_named_type=String, - default_positional_type=ITensors.AutoType, - prefix="test", - ) - empty!(args_copy) + push!( + args_copy, + "x", + "n -> 1", + "nf :: Float64 -> 2", + "ni :: AutoType -> 2", + "ns :: String -> 2", + "nc :: ComplexF64 -> 3", + "x -> 2e-1, 2e-3, 0.1", + "2", + "N -> 1e-3", + "vc -> ComplexF64[1 + 2im, 3]", + "3", + "--autotype", + "vf -> [1.0, 3]", + "y -> true", + "1+2im", + "s -> \"use_qr\"", + "--stringtype", + "y", + ) + args = argsdict( + args_copy; + first_arg = 2, + delim = "->", + as_symbols = true, + default_named_type = String, + default_positional_type = ITensors.AutoType, + prefix = "test", + ) + empty!(args_copy) - @test args[:nf] == 2.0 - @test args[:ni] == 2 - @test args[:ns] == "2" - @test args[:nc] == 3.0 + 0.0im - @test args[:y] == true - @test args[:N] == "1e-3" - @test args[:x] == "2e-1, 2e-3, 0.1" - @test args[:test1] == 2 - @test args[:vc] == "ComplexF64[1 + 2im, 3]" - @test args[:test2] == 3 - @test args[:vf] == [1.0, 3.0] - @test args[:n] == "1" - @test args[:test3] == 1 + 2im - @test args[:s] == "use_qr" - @test args[:test4] == "y" + @test args[:nf] == 2.0 + @test args[:ni] == 2 + @test args[:ns] == "2" + @test args[:nc] == 3.0 + 0.0im + @test args[:y] == true + @test args[:N] == "1e-3" + @test args[:x] == "2e-1, 2e-3, 0.1" + @test args[:test1] == 2 + @test args[:vc] == "ComplexF64[1 + 2im, 3]" + @test args[:test2] == 3 + @test args[:vf] == [1.0, 3.0] + @test args[:n] == "1" + @test args[:test3] == 1 + 2im + @test args[:s] == "use_qr" + @test args[:test4] == "y" - # - # Check for some syntax errors - # + # + # Check for some syntax errors + # - push!(args_copy, "x y=2") - @test_throws ErrorException argsdict(args_copy) - empty!(args_copy) + push!(args_copy, "x y=2") + @test_throws ErrorException argsdict(args_copy) + empty!(args_copy) - push!(args_copy, "x=y=2") - @test_throws ErrorException argsdict(args_copy) - empty!(args_copy) + push!(args_copy, "x=y=2") + @test_throws ErrorException argsdict(args_copy) + empty!(args_copy) - push!(args_copy, "x::MyType = 2") - @test_throws UndefVarError argsdict(args_copy) - empty!(args_copy) + push!(args_copy, "x::MyType = 2") + @test_throws UndefVarError argsdict(args_copy) + empty!(args_copy) - push!(args_copy, "x = y") - @test_throws UndefVarError argsdict(args_copy) - empty!(args_copy) + push!(args_copy, "x = y") + @test_throws UndefVarError argsdict(args_copy) + empty!(args_copy) end diff --git a/test/base/test_broadcast.jl b/test/base/test_broadcast.jl index 9eb11ec00a..b858c47736 100644 --- a/test/base/test_broadcast.jl +++ b/test/base/test_broadcast.jl @@ -1,308 +1,308 @@ using ITensors, Test @testset "ITensor broadcast syntax" begin - i = Index(2, "i") - A = random_itensor(i, i') - B = random_itensor(i', i) - α = 2 - β = 3 - - @testset "Copy" begin - Bc = copy(B) - Bc .= A - @test Bc[1, 1] == A[1, 1] - @test Bc[2, 1] == A[1, 2] - @test Bc[1, 2] == A[2, 1] - @test Bc[2, 2] == A[2, 2] - end - - @testset "Fill" begin - Bc = copy(B) - Bc .= α - @test Bc[1, 1] == α - @test Bc[2, 1] == α - @test Bc[1, 2] == α - @test Bc[2, 2] == α - end - - @testset "Scaling" begin - Bc = copy(B) - Bc .*= α - @test Bc[1, 1] ≈ α * B[1, 1] - @test Bc[2, 1] ≈ α * B[2, 1] - @test Bc[1, 2] ≈ α * B[1, 2] - @test Bc[2, 2] ≈ α * B[2, 2] - end - - @testset "Dividing" begin - Bc = copy(B) - Bc ./= α - @test Bc[1, 1] ≈ B[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] / α - @test Bc[1, 2] ≈ B[1, 2] / α - @test Bc[2, 2] ≈ B[2, 2] / α - end - - @testset "Scalar multiplication (in-place)" begin - Bc = copy(B) - Bc .= α .* A - @test Bc[1, 1] ≈ α * A[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] - @test Bc[1, 2] ≈ α * A[2, 1] - @test Bc[2, 2] ≈ α * A[2, 2] - end - - @testset "Dividing (in-place)" begin - Bc = copy(B) - Bc .= A ./ α - @test Bc[1, 1] ≈ A[1, 1] / α - @test Bc[2, 1] ≈ A[1, 2] / α - @test Bc[1, 2] ≈ A[2, 1] / α - @test Bc[2, 2] ≈ A[2, 2] / α - - Bc = copy(B) - Bc .= α ./ A - @test Bc[1, 1] ≈ α / A[1, 1] - @test Bc[2, 1] ≈ α / A[1, 2] - @test Bc[1, 2] ≈ α / A[2, 1] - @test Bc[2, 2] ≈ α / A[2, 2] - - Bc = copy(B) - Bc .= Bc ./ A - @test Bc[1, 1] ≈ B[1, 1] / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] / A[2, 2] - - Bc = copy(B) - Bc .= A ./ Bc - @test Bc[1, 1] ≈ A[1, 1] / B[1, 1] - @test Bc[2, 1] ≈ A[1, 2] / B[2, 1] - @test Bc[1, 2] ≈ A[2, 1] / B[1, 2] - @test Bc[2, 2] ≈ A[2, 2] / B[2, 2] - end - - @testset "Add and divide (in-place)" begin - Bc = copy(B) - Bc .+= A ./ α - @test Bc[1, 1] ≈ B[1, 1] + A[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] + A[1, 2] / α - @test Bc[1, 2] ≈ B[1, 2] + A[2, 1] / α - @test Bc[2, 2] ≈ B[2, 2] + A[2, 2] / α - - Bc = copy(B) - Bc .+= α ./ A - @test Bc[1, 1] ≈ B[1, 1] + α / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] + α / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] + α / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] + α / A[2, 2] - end - - @testset "Subtract and divide (in-place)" begin - Bc = copy(B) - Bc .-= A ./ α - @test Bc[1, 1] ≈ B[1, 1] - A[1, 1] / α - @test Bc[2, 1] ≈ B[2, 1] - A[1, 2] / α - @test Bc[1, 2] ≈ B[1, 2] - A[2, 1] / α - @test Bc[2, 2] ≈ B[2, 2] - A[2, 2] / α - - Bc = copy(B) - Bc .-= α ./ A - @test Bc[1, 1] ≈ B[1, 1] - α / A[1, 1] - @test Bc[2, 1] ≈ B[2, 1] - α / A[1, 2] - @test Bc[1, 2] ≈ B[1, 2] - α / A[2, 1] - @test Bc[2, 2] ≈ B[2, 2] - α / A[2, 2] - end - - @testset "Scalar multiplication (out-of-place)" begin - Bc = α .* A - @test Bc[1, 1] ≈ α * A[1, 1] - @test Bc[2, 1] ≈ α * A[2, 1] - @test Bc[1, 2] ≈ α * A[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] - end - - @testset "Addition" begin - Bc = copy(B) - Bc .= A .+ Bc - @test Bc[1, 1] ≈ A[1, 1] + B[1, 1] - @test Bc[2, 1] ≈ A[1, 2] + B[2, 1] - @test Bc[1, 2] ≈ A[2, 1] + B[1, 2] - @test Bc[2, 2] ≈ A[2, 2] + B[2, 2] - end - - @testset "Addition (with α)" begin - Bc = copy(B) - Bc .+= A .* α - - @test Bc[1, 1] ≈ α * A[1, 1] + B[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] + B[2, 1] - @test Bc[1, 2] ≈ α * A[2, 1] + B[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] + B[2, 2] - end - - @testset "Addition (with α and β)" begin - Bc = copy(B) - Bc .= α .* A .+ β .* Bc - - @test Bc[1, 1] ≈ α * A[1, 1] + β * B[1, 1] - @test Bc[2, 1] ≈ α * A[1, 2] + β * B[2, 1] - @test Bc[1, 2] ≈ α * A[2, 1] + β * B[1, 2] - @test Bc[2, 2] ≈ α * A[2, 2] + β * B[2, 2] - end - - @testset "Addition errors" begin - C = random_itensor(i, i') - @test_throws ErrorException C .= A .+ B - @test_throws ErrorException C = A .+ B - @test_throws ErrorException C .= A .* B - end - - @testset "Contraction" begin - ii = Index(2; tags="ii") - jj = Index(2; tags="jj") - kk = Index(2; tags="kk") - - AA = random_itensor(ii, jj) - BB = random_itensor(kk, jj) - CC = random_itensor(kk, ii) - - R = copy(CC) - R .= AA .* BB - @test AA * BB ≈ R - - R = copy(CC) - R .= α .* AA .* BB - @test α * AA * BB ≈ R - - R = copy(CC) - R .= AA .* α .* BB - @test α * AA * BB ≈ R - - R = copy(CC) - R .= AA .* BB .* α - @test α * AA * BB ≈ R - - R = copy(CC) - R .+= α .* AA .* BB - @test α * AA * BB + CC ≈ R + i = Index(2, "i") + A = random_itensor(i, i') + B = random_itensor(i', i) + α = 2 + β = 3 + + @testset "Copy" begin + Bc = copy(B) + Bc .= A + @test Bc[1, 1] == A[1, 1] + @test Bc[2, 1] == A[1, 2] + @test Bc[1, 2] == A[2, 1] + @test Bc[2, 2] == A[2, 2] + end + + @testset "Fill" begin + Bc = copy(B) + Bc .= α + @test Bc[1, 1] == α + @test Bc[2, 1] == α + @test Bc[1, 2] == α + @test Bc[2, 2] == α + end + + @testset "Scaling" begin + Bc = copy(B) + Bc .*= α + @test Bc[1, 1] ≈ α * B[1, 1] + @test Bc[2, 1] ≈ α * B[2, 1] + @test Bc[1, 2] ≈ α * B[1, 2] + @test Bc[2, 2] ≈ α * B[2, 2] + end + + @testset "Dividing" begin + Bc = copy(B) + Bc ./= α + @test Bc[1, 1] ≈ B[1, 1] / α + @test Bc[2, 1] ≈ B[2, 1] / α + @test Bc[1, 2] ≈ B[1, 2] / α + @test Bc[2, 2] ≈ B[2, 2] / α + end + + @testset "Scalar multiplication (in-place)" begin + Bc = copy(B) + Bc .= α .* A + @test Bc[1, 1] ≈ α * A[1, 1] + @test Bc[2, 1] ≈ α * A[1, 2] + @test Bc[1, 2] ≈ α * A[2, 1] + @test Bc[2, 2] ≈ α * A[2, 2] + end + + @testset "Dividing (in-place)" begin + Bc = copy(B) + Bc .= A ./ α + @test Bc[1, 1] ≈ A[1, 1] / α + @test Bc[2, 1] ≈ A[1, 2] / α + @test Bc[1, 2] ≈ A[2, 1] / α + @test Bc[2, 2] ≈ A[2, 2] / α + + Bc = copy(B) + Bc .= α ./ A + @test Bc[1, 1] ≈ α / A[1, 1] + @test Bc[2, 1] ≈ α / A[1, 2] + @test Bc[1, 2] ≈ α / A[2, 1] + @test Bc[2, 2] ≈ α / A[2, 2] + + Bc = copy(B) + Bc .= Bc ./ A + @test Bc[1, 1] ≈ B[1, 1] / A[1, 1] + @test Bc[2, 1] ≈ B[2, 1] / A[1, 2] + @test Bc[1, 2] ≈ B[1, 2] / A[2, 1] + @test Bc[2, 2] ≈ B[2, 2] / A[2, 2] + + Bc = copy(B) + Bc .= A ./ Bc + @test Bc[1, 1] ≈ A[1, 1] / B[1, 1] + @test Bc[2, 1] ≈ A[1, 2] / B[2, 1] + @test Bc[1, 2] ≈ A[2, 1] / B[1, 2] + @test Bc[2, 2] ≈ A[2, 2] / B[2, 2] + end + + @testset "Add and divide (in-place)" begin + Bc = copy(B) + Bc .+= A ./ α + @test Bc[1, 1] ≈ B[1, 1] + A[1, 1] / α + @test Bc[2, 1] ≈ B[2, 1] + A[1, 2] / α + @test Bc[1, 2] ≈ B[1, 2] + A[2, 1] / α + @test Bc[2, 2] ≈ B[2, 2] + A[2, 2] / α + + Bc = copy(B) + Bc .+= α ./ A + @test Bc[1, 1] ≈ B[1, 1] + α / A[1, 1] + @test Bc[2, 1] ≈ B[2, 1] + α / A[1, 2] + @test Bc[1, 2] ≈ B[1, 2] + α / A[2, 1] + @test Bc[2, 2] ≈ B[2, 2] + α / A[2, 2] + end + + @testset "Subtract and divide (in-place)" begin + Bc = copy(B) + Bc .-= A ./ α + @test Bc[1, 1] ≈ B[1, 1] - A[1, 1] / α + @test Bc[2, 1] ≈ B[2, 1] - A[1, 2] / α + @test Bc[1, 2] ≈ B[1, 2] - A[2, 1] / α + @test Bc[2, 2] ≈ B[2, 2] - A[2, 2] / α + + Bc = copy(B) + Bc .-= α ./ A + @test Bc[1, 1] ≈ B[1, 1] - α / A[1, 1] + @test Bc[2, 1] ≈ B[2, 1] - α / A[1, 2] + @test Bc[1, 2] ≈ B[1, 2] - α / A[2, 1] + @test Bc[2, 2] ≈ B[2, 2] - α / A[2, 2] + end + + @testset "Scalar multiplication (out-of-place)" begin + Bc = α .* A + @test Bc[1, 1] ≈ α * A[1, 1] + @test Bc[2, 1] ≈ α * A[2, 1] + @test Bc[1, 2] ≈ α * A[1, 2] + @test Bc[2, 2] ≈ α * A[2, 2] + end + + @testset "Addition" begin + Bc = copy(B) + Bc .= A .+ Bc + @test Bc[1, 1] ≈ A[1, 1] + B[1, 1] + @test Bc[2, 1] ≈ A[1, 2] + B[2, 1] + @test Bc[1, 2] ≈ A[2, 1] + B[1, 2] + @test Bc[2, 2] ≈ A[2, 2] + B[2, 2] + end + + @testset "Addition (with α)" begin + Bc = copy(B) + Bc .+= A .* α + + @test Bc[1, 1] ≈ α * A[1, 1] + B[1, 1] + @test Bc[2, 1] ≈ α * A[1, 2] + B[2, 1] + @test Bc[1, 2] ≈ α * A[2, 1] + B[1, 2] + @test Bc[2, 2] ≈ α * A[2, 2] + B[2, 2] + end + + @testset "Addition (with α and β)" begin + Bc = copy(B) + Bc .= α .* A .+ β .* Bc + + @test Bc[1, 1] ≈ α * A[1, 1] + β * B[1, 1] + @test Bc[2, 1] ≈ α * A[1, 2] + β * B[2, 1] + @test Bc[1, 2] ≈ α * A[2, 1] + β * B[1, 2] + @test Bc[2, 2] ≈ α * A[2, 2] + β * B[2, 2] + end + + @testset "Addition errors" begin + C = random_itensor(i, i') + @test_throws ErrorException C .= A .+ B + @test_throws ErrorException C = A .+ B + @test_throws ErrorException C .= A .* B + end + + @testset "Contraction" begin + ii = Index(2; tags = "ii") + jj = Index(2; tags = "jj") + kk = Index(2; tags = "kk") + + AA = random_itensor(ii, jj) + BB = random_itensor(kk, jj) + CC = random_itensor(kk, ii) + + R = copy(CC) + R .= AA .* BB + @test AA * BB ≈ R + + R = copy(CC) + R .= α .* AA .* BB + @test α * AA * BB ≈ R + + R = copy(CC) + R .= AA .* α .* BB + @test α * AA * BB ≈ R + + R = copy(CC) + R .= AA .* BB .* α + @test α * AA * BB ≈ R + + R = copy(CC) + R .+= α .* AA .* BB + @test α * AA * BB + CC ≈ R - R = copy(CC) - R .= β .* R .+ AA .* BB .* α - @test α * AA * BB + β * CC ≈ R - end + R = copy(CC) + R .= β .* R .+ AA .* BB .* α + @test α * AA * BB + β * CC ≈ R + end - @testset "General functions" begin - absA = abs.(A) + @testset "General functions" begin + absA = abs.(A) - @test absA[1, 1] ≈ abs(A[1, 1]) - @test absA[2, 1] ≈ abs(A[2, 1]) + @test absA[1, 1] ≈ abs(A[1, 1]) + @test absA[2, 1] ≈ abs(A[2, 1]) - Bc = copy(B) - Bc .= sqrt.(absA) + Bc = copy(B) + Bc .= sqrt.(absA) - @test Bc[1, 1] ≈ sqrt(absA[1, 1]) - @test Bc[2, 1] ≈ sqrt(absA[1, 2]) + @test Bc[1, 1] ≈ sqrt(absA[1, 1]) + @test Bc[2, 1] ≈ sqrt(absA[1, 2]) - Bc2 = copy(B) - Bc2 .+= sqrt.(absA) + Bc2 = copy(B) + Bc2 .+= sqrt.(absA) - @test Bc2[1, 1] ≈ B[1, 1] + sqrt(absA[1, 1]) - @test Bc2[2, 1] ≈ B[2, 1] + sqrt(absA[1, 2]) - end + @test Bc2[1, 1] ≈ B[1, 1] + sqrt(absA[1, 1]) + @test Bc2[2, 1] ≈ B[2, 1] + sqrt(absA[1, 2]) + end - @testset "Some other operations" begin - i = Index(2) - A = random_itensor(i) - B = random_itensor(i) + @testset "Some other operations" begin + i = Index(2) + A = random_itensor(i) + B = random_itensor(i) - absA = abs.(A) + absA = abs.(A) - @test absA[1] ≈ abs(A[1]) - @test absA[2] ≈ abs(A[2]) + @test absA[1] ≈ abs(A[1]) + @test absA[2] ≈ abs(A[2]) - Bc = copy(B) - Bc .= sqrt.(absA) + Bc = copy(B) + Bc .= sqrt.(absA) - @test Bc[1] ≈ sqrt(absA[1]) - @test Bc[2] ≈ sqrt(absA[2]) + @test Bc[1] ≈ sqrt(absA[1]) + @test Bc[2] ≈ sqrt(absA[2]) - Bc2 = copy(B) - Bc2 .+= sqrt.(absA) + Bc2 = copy(B) + Bc2 .+= sqrt.(absA) - @test Bc2[1] ≈ B[1] + sqrt(absA[1]) - @test Bc2[2] ≈ B[2] + sqrt(absA[2]) + @test Bc2[1] ≈ B[1] + sqrt(absA[1]) + @test Bc2[2] ≈ B[2] + sqrt(absA[2]) - Bc3 = copy(B) - Bc3 .= sqrt.(absA) .+ sin.(Bc3) + Bc3 = copy(B) + Bc3 .= sqrt.(absA) .+ sin.(Bc3) - @test Bc3[1] ≈ sin(B[1]) + sqrt(absA[1]) - @test Bc3[2] ≈ sin(B[2]) + sqrt(absA[2]) + @test Bc3[1] ≈ sin(B[1]) + sqrt(absA[1]) + @test Bc3[2] ≈ sin(B[2]) + sqrt(absA[2]) - sqrtabsA = sqrt.(abs.(A)) + sqrtabsA = sqrt.(abs.(A)) - @test sqrtabsA[1] ≈ sqrt(abs(A[1])) - @test sqrtabsA[2] ≈ sqrt(abs(A[2])) + @test sqrtabsA[1] ≈ sqrt(abs(A[1])) + @test sqrtabsA[2] ≈ sqrt(abs(A[2])) - sqrtabsA = cos.(sin.(sqrt.(abs.(A)))) + sqrtabsA = cos.(sin.(sqrt.(abs.(A)))) - @test sqrtabsA[1] ≈ cos(sin(sqrt(abs(A[1])))) - @test sqrtabsA[2] ≈ cos(sin(sqrt(abs(A[2])))) + @test sqrtabsA[1] ≈ cos(sin(sqrt(abs(A[1])))) + @test sqrtabsA[2] ≈ cos(sin(sqrt(abs(A[2])))) - # Not currently supported - #Ap = A .+ 3 + # Not currently supported + #Ap = A .+ 3 - #@test Ap[1] ≈ A[1] + 3 - #@test Ap[2] ≈ A[2] + 3 + #@test Ap[1] ≈ A[1] + 3 + #@test Ap[2] ≈ A[2] + 3 - Apow1 = A .^ 2.0 + Apow1 = A .^ 2.0 - @test Apow1[1] ≈ A[1]^2 - @test Apow1[2] ≈ A[2]^2 + @test Apow1[1] ≈ A[1]^2 + @test Apow1[2] ≈ A[2]^2 - Apow2 = A .^ 3 + Apow2 = A .^ 3 - @test Apow2[1] ≈ A[1]^3 - @test Apow2[2] ≈ A[2]^3 + @test Apow2[1] ≈ A[1]^3 + @test Apow2[2] ≈ A[2]^3 - Ac = copy(A) - Ac .+= B .^ 2.0 + Ac = copy(A) + Ac .+= B .^ 2.0 - @test Ac[1] ≈ A[1] + B[1]^2 - @test Ac[2] ≈ A[2] + B[2]^2 + @test Ac[1] ≈ A[1] + B[1]^2 + @test Ac[2] ≈ A[2] + B[2]^2 - Ac = copy(A) - Ac .-= B .^ 2.0 + Ac = copy(A) + Ac .-= B .^ 2.0 - @test Ac[1] ≈ A[1] - B[1]^2 - @test Ac[2] ≈ A[2] - B[2]^2 + @test Ac[1] ≈ A[1] - B[1]^2 + @test Ac[2] ≈ A[2] - B[2]^2 - Ac = copy(A) - Ac .-= B .^ 3 + Ac = copy(A) + Ac .-= B .^ 3 - @test Ac[1] ≈ A[1] - B[1]^3 - @test Ac[2] ≈ A[2] - B[2]^3 - end + @test Ac[1] ≈ A[1] - B[1]^3 + @test Ac[2] ≈ A[2] - B[2]^3 + end - @testset "Hadamard product" begin - i = Index(2, "i") - A = random_itensor(i, i') - B = random_itensor(i', i) + @testset "Hadamard product" begin + i = Index(2, "i") + A = random_itensor(i, i') + B = random_itensor(i', i) + + C = A ⊙ B + @test C[1, 1] ≈ A[1, 1] * B[1, 1] + @test C[1, 2] ≈ A[1, 2] * B[2, 1] + @test C[2, 1] ≈ A[2, 1] * B[1, 2] + @test C[2, 2] ≈ A[2, 2] * B[2, 2] + + Ac = copy(A) + Ac .= Ac .⊙ B + @test C[1, 1] ≈ A[1, 1] * B[1, 1] + @test C[1, 2] ≈ A[1, 2] * B[2, 1] + @test C[2, 1] ≈ A[2, 1] * B[1, 2] + @test C[2, 2] ≈ A[2, 2] * B[2, 2] - C = A ⊙ B - @test C[1, 1] ≈ A[1, 1] * B[1, 1] - @test C[1, 2] ≈ A[1, 2] * B[2, 1] - @test C[2, 1] ≈ A[2, 1] * B[1, 2] - @test C[2, 2] ≈ A[2, 2] * B[2, 2] - - Ac = copy(A) - Ac .= Ac .⊙ B - @test C[1, 1] ≈ A[1, 1] * B[1, 1] - @test C[1, 2] ≈ A[1, 2] * B[2, 1] - @test C[2, 1] ≈ A[2, 1] * B[1, 2] - @test C[2, 2] ≈ A[2, 2] * B[2, 2] - - D = random_itensor(i', Index(2)) - @test_throws ErrorException A ⊙ D - end + D = random_itensor(i', Index(2)) + @test_throws ErrorException A ⊙ D + end end diff --git a/test/base/test_combiner.jl b/test/base/test_combiner.jl index 9329b3183c..848c23a5ed 100644 --- a/test/base/test_combiner.jl +++ b/test/base/test_combiner.jl @@ -2,262 +2,262 @@ using ITensors, Test using Combinatorics: permutations @testset "Combiner" begin - i = Index(2, "i") - j = Index(3, "j") - k = Index(4, "k") - l = Index(5, "l") + i = Index(2, "i") + j = Index(3, "j") + k = Index(4, "k") + l = Index(5, "l") - A = random_itensor(i, j, k, l) + A = random_itensor(i, j, k, l) - @testset "Basic combiner properties" begin - C = combiner(i, j, k) - @test eltype(storage(C)) === Number - @test ITensors.data(C) isa NDTensors.NoData - @test NDTensors.uncombinedinds(NDTensors.tensor(C)) == (i, j, k) - C2 = copy(C) - @test eltype(storage(C2)) === Number - @test ITensors.data(C2) isa NDTensors.NoData - @test NDTensors.uncombinedinds(NDTensors.tensor(C2)) == (i, j, k) - end - - @testset "Empty combiner" begin - C = combiner() - @test order(C) == 0 - @test isnothing(combinedind(C)) - AC = A * C - @test A == AC - AC = C * A - @test A == AC + @testset "Basic combiner properties" begin + C = combiner(i, j, k) + @test eltype(storage(C)) === Number + @test ITensors.data(C) isa NDTensors.NoData + @test NDTensors.uncombinedinds(NDTensors.tensor(C)) == (i, j, k) + C2 = copy(C) + @test eltype(storage(C2)) === Number + @test ITensors.data(C2) isa NDTensors.NoData + @test NDTensors.uncombinedinds(NDTensors.tensor(C2)) == (i, j, k) + end - R = ITensor(0.0, j, l, k, i) - R .= A .* C - @test R == A + @testset "Empty combiner" begin + C = combiner() + @test order(C) == 0 + @test isnothing(combinedind(C)) + AC = A * C + @test A == AC + AC = C * A + @test A == AC - R = ITensor(j, l, k, i) - R .= A .* C - @test R == A - end + R = ITensor(0.0, j, l, k, i) + R .= A .* C + @test R == A - @testset "Two index combiner" begin - for inds_ij in permutations([i, j]) - C = combiner(inds_ij...) - c = combinedind(C) - B = A * C - @test hasinds(B, l, k, c) - @test c == commonind(B, C) - @test combinedind(C) == c - @test isnothing(combinedind(A)) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - @test isnothing(combinedind(D)) + R = ITensor(j, l, k, i) + R .= A .* C + @test R == A end - for inds_il in permutations([i, l]) - C = combiner(inds_il...) - c = combinedind(C) - B = A * C - @test hasinds(B, j, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - end + @testset "Two index combiner" begin + for inds_ij in permutations([i, j]) + C = combiner(inds_ij...) + c = combinedind(C) + B = A * C + @test hasinds(B, l, k, c) + @test c == commonind(B, C) + @test combinedind(C) == c + @test isnothing(combinedind(A)) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + @test isnothing(combinedind(D)) + end - for inds_ik in permutations([i, k]) - C = combiner(inds_ik...) - c = combinedind(C) - B = A * C - @test hasinds(B, j, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - end + for inds_il in permutations([i, l]) + C = combiner(inds_il...) + c = combinedind(C) + B = A * C + @test hasinds(B, j, k) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + end - for inds_jk in permutations([j, k]) - C = combiner(inds_jk...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end + for inds_ik in permutations([i, k]) + C = combiner(inds_ik...) + c = combinedind(C) + B = A * C + @test hasinds(B, j, l) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + end - for inds_jl in permutations([j, l]) - C = combiner(inds_jl...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end + for inds_jk in permutations([j, k]) + C = combiner(inds_jk...) + c = combinedind(C) + B = A * C + @test hasinds(B, i, l) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasinds(B, i, l) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end - for inds_kl in permutations([k, l]) - C = combiner(inds_kl...) - c = combinedind(C) - B = A * C - @test hasinds(B, i, j) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasinds(B, i, j) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end - end + for inds_jl in permutations([j, l]) + C = combiner(inds_jl...) + c = combinedind(C) + B = A * C + @test hasinds(B, i, k) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasinds(B, i, k) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end - @testset "Three index combiner" begin - for inds_ijl in permutations([i, j, l]) - C = combiner(inds_ijl...) - c = combinedind(C) - B = A * C - @test hasind(B, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, k) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A + for inds_kl in permutations([k, l]) + C = combiner(inds_kl...) + c = combinedind(C) + B = A * C + @test hasinds(B, i, j) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasinds(B, i, j) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end end - for inds_ijk in permutations([i, j, k]) - C = combiner(inds_ijk...) - c = combinedind(C) - B = A * C - @test hasind(B, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, l) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - end + @testset "Three index combiner" begin + for inds_ijl in permutations([i, j, l]) + C = combiner(inds_ijl...) + c = combinedind(C) + B = A * C + @test hasind(B, k) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasind(B, k) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end + + for inds_ijk in permutations([i, j, k]) + C = combiner(inds_ijk...) + c = combinedind(C) + B = A * C + @test hasind(B, l) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasind(B, l) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end - for inds_jkl in permutations([j, k, l]) - C = combiner(inds_jkl...) - c = combinedind(C) - B = A * C - @test hasind(B, i) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A - B = C * A - @test hasind(B, i) - @test c == commonind(B, C) - D = B * C - @test hasinds(D, i, j, k, l) - @test D ≈ A - D = C * B - @test hasinds(D, i, j, k, l) - @test D ≈ A + for inds_jkl in permutations([j, k, l]) + C = combiner(inds_jkl...) + c = combinedind(C) + B = A * C + @test hasind(B, i) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + B = C * A + @test hasind(B, i) + @test c == commonind(B, C) + D = B * C + @test hasinds(D, i, j, k, l) + @test D ≈ A + D = C * B + @test hasinds(D, i, j, k, l) + @test D ≈ A + end end - end - @testset "SVD/Combiner should play nice" begin - C = combiner(i, j, k) - c = combinedind(C) - Ac = A * C - U, S, V, spec, u, v = svd(Ac, c) - Uc = C * U - Ua, Sa, Va, spec, ua, va = svd(A, i, j, k) - replaceind!(Ua, ua, u) - @test A ≈ C * Ac - @test A ≈ Ac * C - @test Ua * C ≈ U - @test C * Ua ≈ U - @test Ua ≈ Uc - @test Uc * S * V ≈ A - @test (C * Ua) * S * V ≈ Ac - C = combiner(i, j) - c = combinedind(C) - Ac = A * C - U, S, V, spec, u, v = svd(Ac, c) - Uc = U * C - Ua, Sa, Va, spec, ua, va = svd(A, i, j) - replaceind!(Ua, ua, u) - @test Ua ≈ Uc - @test Ua * C ≈ U - @test C * Ua ≈ U - @test Uc * S * V ≈ A - @test (C * Ua) * S * V ≈ Ac - end + @testset "SVD/Combiner should play nice" begin + C = combiner(i, j, k) + c = combinedind(C) + Ac = A * C + U, S, V, spec, u, v = svd(Ac, c) + Uc = C * U + Ua, Sa, Va, spec, ua, va = svd(A, i, j, k) + replaceind!(Ua, ua, u) + @test A ≈ C * Ac + @test A ≈ Ac * C + @test Ua * C ≈ U + @test C * Ua ≈ U + @test Ua ≈ Uc + @test Uc * S * V ≈ A + @test (C * Ua) * S * V ≈ Ac + C = combiner(i, j) + c = combinedind(C) + Ac = A * C + U, S, V, spec, u, v = svd(Ac, c) + Uc = U * C + Ua, Sa, Va, spec, ua, va = svd(A, i, j) + replaceind!(Ua, ua, u) + @test Ua ≈ Uc + @test Ua * C ≈ U + @test C * Ua ≈ U + @test Uc * S * V ≈ A + @test (C * Ua) * S * V ≈ Ac + end - @testset "mult/Combiner should play nice" begin - C = combiner(i, j, k) - Ac = A * C - B = random_itensor(l) - AB = Ac * B - @test AB * C ≈ A * B - end + @testset "mult/Combiner should play nice" begin + C = combiner(i, j, k) + Ac = A * C + B = random_itensor(l) + AB = Ac * B + @test AB * C ≈ A * B + end - @testset "Replace index combiner" begin - C = combiner(l; tags="nl") - c = combinedind(C) - B = A * C - replaceind!(B, c, l) - @test B == A - end + @testset "Replace index combiner" begin + C = combiner(l; tags = "nl") + c = combinedind(C) + B = A * C + replaceind!(B, c, l) + @test B == A + end end diff --git a/test/base/test_ctmrg.jl b/test/base/test_ctmrg.jl index 8d7819a329..c81384a982 100644 --- a/test/base/test_ctmrg.jl +++ b/test/base/test_ctmrg.jl @@ -6,47 +6,47 @@ include(joinpath(src_dir, "ctmrg_isotropic.jl")) include(joinpath(src_dir, "2d_classical_ising.jl")) @testset "ctmrg" begin - # Make Ising model partition function - β = 1.1 * βc - d = 2 - s = Index(d, "Site") - sₕ = addtags(s, "horiz") - sᵥ = addtags(s, "vert") + # Make Ising model partition function + β = 1.1 * βc + d = 2 + s = Index(d, "Site") + sₕ = addtags(s, "horiz") + sᵥ = addtags(s, "vert") - T = ising_mpo(sₕ, sᵥ, β) + T = ising_mpo(sₕ, sᵥ, β) - χ0 = 1 - l = Index(χ0, "Link") - lₕ = addtags(l, "horiz") - lᵥ = addtags(l, "vert") + χ0 = 1 + l = Index(χ0, "Link") + lₕ = addtags(l, "horiz") + lᵥ = addtags(l, "vert") - # Initial CTM - Cₗᵤ = ITensor(lᵥ, lₕ) - Cₗᵤ[1, 1] = 1.0 + # Initial CTM + Cₗᵤ = ITensor(lᵥ, lₕ) + Cₗᵤ[1, 1] = 1.0 - # Initial HRTM - Aₗ = ITensor(lᵥ, lᵥ', sₕ) - Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0 - Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 2] = 0.0 + # Initial HRTM + Aₗ = ITensor(lᵥ, lᵥ', sₕ) + Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0 + Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 2] = 0.0 - Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax=20, nsteps=100) + Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax = 20, nsteps = 100) - lᵥ = commonind(Cₗᵤ, Aₗ) - lₕ = noncommoninds(Cₗᵤ, Aₗ)[1] + lᵥ = commonind(Cₗᵤ, Aₗ) + lₕ = noncommoninds(Cₗᵤ, Aₗ)[1] - Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) + Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ) - ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ') + ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ') - ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1) + ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1) - κ = (ACTₗ * dag(ACₗ))[] + κ = (ACTₗ * dag(ACₗ))[] - @test κ ≈ exp(-β * ising_free_energy(β)) + @test κ ≈ exp(-β * ising_free_energy(β)) - # Calculate magnetization - Tsz = ising_mpo(sₕ, sᵥ, β; sz=true) - ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1) - m = (ACTszₗ * dag(ACₗ))[] / κ - @test abs(m) ≈ ising_magnetization(β) + # Calculate magnetization + Tsz = ising_mpo(sₕ, sᵥ, β; sz = true) + ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1) + m = (ACTszₗ * dag(ACₗ))[] / κ + @test abs(m) ≈ ising_magnetization(β) end diff --git a/test/base/test_debug_checks.jl b/test/base/test_debug_checks.jl index b0e6170cb7..943ed919f9 100644 --- a/test/base/test_debug_checks.jl +++ b/test/base/test_debug_checks.jl @@ -2,35 +2,35 @@ using ITensors using Test @testset "Test debug checks on IndexSet construction" begin - i = Index(2, "i") + i = Index(2, "i") - initially_using_debug_checks = ITensors.using_debug_checks() + initially_using_debug_checks = ITensors.using_debug_checks() - ITensors.disable_debug_checks() - @test !ITensors.using_debug_checks() - # Test that no error is thrown in constructor - @test ITensor(i, i) isa ITensor - @test ITensor(i, i') isa ITensor - - # Turn on debug checks - ITensors.enable_debug_checks() - @test ITensors.using_debug_checks() - @test_throws ErrorException ITensor(i, i) - # Test that no error is thrown in constructor - @test ITensor(i, i') isa ITensor + ITensors.disable_debug_checks() + @test !ITensors.using_debug_checks() + # Test that no error is thrown in constructor + @test ITensor(i, i) isa ITensor + @test ITensor(i, i') isa ITensor - # Turn off debug checks - ITensors.disable_debug_checks() - @test !ITensors.using_debug_checks() - # Test that no error is thrown in constructor - @test ITensor(i, i) isa ITensor - @test ITensor(i, i') isa ITensor + # Turn on debug checks + ITensors.enable_debug_checks() + @test ITensors.using_debug_checks() + @test_throws ErrorException ITensor(i, i) + # Test that no error is thrown in constructor + @test ITensor(i, i') isa ITensor - # Reset to the initial value - if !initially_using_debug_checks + # Turn off debug checks ITensors.disable_debug_checks() - else - ITensors.enable_debug_checks() - end - @test ITensors.using_debug_checks() == initially_using_debug_checks + @test !ITensors.using_debug_checks() + # Test that no error is thrown in constructor + @test ITensor(i, i) isa ITensor + @test ITensor(i, i') isa ITensor + + # Reset to the initial value + if !initially_using_debug_checks + ITensors.disable_debug_checks() + else + ITensors.enable_debug_checks() + end + @test ITensors.using_debug_checks() == initially_using_debug_checks end diff --git a/test/base/test_emptyitensor.jl b/test/base/test_emptyitensor.jl index b53c22d47d..7a47bf0099 100644 --- a/test/base/test_emptyitensor.jl +++ b/test/base/test_emptyitensor.jl @@ -2,102 +2,102 @@ using ITensors using Test @testset "Empty ITensor storage operations" begin - i, j, k = Index.(2, ("i", "j", "k")) + i, j, k = Index.(2, ("i", "j", "k")) - A = ITensor(i, j) - B = ITensor(j, k) + A = ITensor(i, j) + B = ITensor(j, k) - @test norm(A) == 0.0 - @test norm(B) == 0.0 + @test norm(A) == 0.0 + @test norm(B) == 0.0 - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa - ITensors.EmptyStorage{ITensors.EmptyNumber,<:ITensors.Dense{ITensors.EmptyNumber}} + C = A * B + @test hassameinds(C, (i, k)) + @test storage(C) isa + ITensors.EmptyStorage{ITensors.EmptyNumber, <:ITensors.Dense{ITensors.EmptyNumber}} - A = ITensor(Float64, i, j) - B = ITensor(j, k) - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.Dense{Float64}} + A = ITensor(Float64, i, j) + B = ITensor(j, k) + C = A * B + @test hassameinds(C, (i, k)) + @test storage(C) isa ITensors.EmptyStorage{Float64, <:ITensors.Dense{Float64}} - A = ITensor(i, j) - B = ITensor(ComplexF64, j, k) + A = ITensor(i, j) + B = ITensor(ComplexF64, j, k) - @test norm(A) == 0.0 - @test norm(B) == 0.0 - @test norm(B) isa Float64 + @test norm(A) == 0.0 + @test norm(B) == 0.0 + @test norm(B) isa Float64 - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{ComplexF64,<:ITensors.Dense{ComplexF64}} + C = A * B + @test hassameinds(C, (i, k)) + @test storage(C) isa ITensors.EmptyStorage{ComplexF64, <:ITensors.Dense{ComplexF64}} - A = ITensor(Float64, i, j) - B = ITensor(ComplexF64, j, k) - C = A * B - @test hassameinds(C, (i, k)) - @test storage(C) isa ITensors.EmptyStorage{ComplexF64,<:ITensors.Dense{ComplexF64}} + A = ITensor(Float64, i, j) + B = ITensor(ComplexF64, j, k) + C = A * B + @test hassameinds(C, (i, k)) + @test storage(C) isa ITensors.EmptyStorage{ComplexF64, <:ITensors.Dense{ComplexF64}} end @testset "Empty ITensor storage addition" begin - i, j = Index.((2, 3)) + i, j = Index.((2, 3)) - A = ITensor(i, j) - B = random_itensor(j, i) + A = ITensor(i, j) + B = random_itensor(j, i) - C = A + B - @test inds(C) == (i, j) - @test C ≈ B + C = A + B + @test inds(C) == (i, j) + @test C ≈ B - C = B + A - @test inds(C) == (j, i) - @test C ≈ B + C = B + A + @test inds(C) == (j, i) + @test C ≈ B end @testset "Empty QN ITensor storage operations" begin - i = Index([QN(0) => 1, QN(1) => 1]) - A = ITensor(i', dag(i)) + i = Index([QN(0) => 1, QN(1) => 1]) + A = ITensor(i', dag(i)) - @test storage(A) isa ITensors.EmptyStorage{ - ITensors.EmptyNumber,<:ITensors.BlockSparse{ITensors.EmptyNumber} - } + @test storage(A) isa ITensors.EmptyStorage{ + ITensors.EmptyNumber, <:ITensors.BlockSparse{ITensors.EmptyNumber}, + } - C = A' * A + C = A' * A - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{ - ITensors.EmptyNumber,<:ITensors.BlockSparse{ITensors.EmptyNumber} - } + @test hassameinds(C, (i'', i)) + @test storage(C) isa ITensors.EmptyStorage{ + ITensors.EmptyNumber, <:ITensors.BlockSparse{ITensors.EmptyNumber}, + } - B = random_itensor(dag(i), i') + B = random_itensor(dag(i), i') - C = A' * B + C = A' * B - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.BlockSparse{Float64}} + @test hassameinds(C, (i'', i)) + @test storage(C) isa ITensors.EmptyStorage{Float64, <:ITensors.BlockSparse{Float64}} - C = B' * A + C = B' * A - @test hassameinds(C, (i'', i)) - @test storage(C) isa ITensors.EmptyStorage{Float64,<:ITensors.BlockSparse{Float64}} + @test hassameinds(C, (i'', i)) + @test storage(C) isa ITensors.EmptyStorage{Float64, <:ITensors.BlockSparse{Float64}} - C = B + A - @test inds(C) == inds(B) - @test C ≈ B + C = B + A + @test inds(C) == inds(B) + @test C ≈ B - C = A + B - @test inds(C) == inds(A) - @test C ≈ B + C = A + B + @test inds(C) == inds(A) + @test C ≈ B end @testset "blockoffsets" for space in (2, [QN(0) => 1, QN(1) => 1]) - i = Index(space) - A = ITensor(i', dag(i)) - @test blockoffsets(A) == NDTensors.BlockOffsets{2}() + i = Index(space) + A = ITensor(i', dag(i)) + @test blockoffsets(A) == NDTensors.BlockOffsets{2}() end @testset "zero" for space in (2, [QN(0) => 1, QN(1) => 1]) - i = Index(space) - A = ITensor(i', dag(i)) - @test NDTensors.tensor(zero(A)) isa typeof(NDTensors.tensor(A)) + i = Index(space) + A = ITensor(i', dag(i)) + @test NDTensors.tensor(zero(A)) isa typeof(NDTensors.tensor(A)) end diff --git a/test/base/test_examples.jl b/test/base/test_examples.jl index f1db458ace..7616c452a1 100644 --- a/test/base/test_examples.jl +++ b/test/base/test_examples.jl @@ -3,12 +3,12 @@ using ITensors: ITensors using Suppressor: @capture_out using Test: @test_nowarn, @testset @testset "Example Codes" begin - @testset "Basic Ops $filename" for filename in ["basic_ops.jl", "qn_itensors.jl"] - @test_nowarn begin - @capture_out begin - include(joinpath(pkgdir(ITensors), "examples", "basic_ops", filename)) - end + @testset "Basic Ops $filename" for filename in ["basic_ops.jl", "qn_itensors.jl"] + @test_nowarn begin + @capture_out begin + include(joinpath(pkgdir(ITensors), "examples", "basic_ops", filename)) + end + end end - end end end diff --git a/test/base/test_exports.jl b/test/base/test_exports.jl index 0072b0b9d2..4eb92dd923 100644 --- a/test/base/test_exports.jl +++ b/test/base/test_exports.jl @@ -4,8 +4,8 @@ using Test: @test, @testset include("utils/TestITensorsExportedNames/TestITensorsExportedNames.jl") using .TestITensorsExportedNames: ITENSORS_EXPORTED_NAMES @testset "Test exports of ITensors" begin - # @show setdiff(names(ITensors), ITENSORS_EXPORTED_NAMES) - # @show setdiff(ITENSORS_EXPORTED_NAMES, names(ITensors)) - @test issetequal(names(ITensors), ITENSORS_EXPORTED_NAMES) + # @show setdiff(names(ITensors), ITENSORS_EXPORTED_NAMES) + # @show setdiff(ITENSORS_EXPORTED_NAMES, names(ITensors)) + @test issetequal(names(ITensors), ITENSORS_EXPORTED_NAMES) end end diff --git a/test/base/test_global_variables.jl b/test/base/test_global_variables.jl index a14db46760..30be263f8d 100644 --- a/test/base/test_global_variables.jl +++ b/test/base/test_global_variables.jl @@ -2,51 +2,51 @@ using ITensors using Test @testset "Warn ITensor order" begin - # Check it starts at default value - @test ITensors.get_warn_order() == ITensors.default_warn_order - - # Set to 4 and reset - @test ITensors.set_warn_order(4) == ITensors.default_warn_order - @test ITensors.get_warn_order() == 4 - @test ITensors.reset_warn_order() == 4 - @test ITensors.get_warn_order() == ITensors.default_warn_order + # Check it starts at default value + @test ITensors.get_warn_order() == ITensors.default_warn_order - # Disable it (set to nothing) and reset - @test ITensors.disable_warn_order() == ITensors.default_warn_order - @test isnothing(ITensors.get_warn_order()) - @test isnothing(ITensors.reset_warn_order()) - @test ITensors.get_warn_order() == ITensors.default_warn_order + # Set to 4 and reset + @test ITensors.set_warn_order(4) == ITensors.default_warn_order + @test ITensors.get_warn_order() == 4 + @test ITensors.reset_warn_order() == 4 + @test ITensors.get_warn_order() == ITensors.default_warn_order - # Disable macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order(6) - @test ITensors.get_warn_order() == 6 - @disable_warn_order begin + # Disable it (set to nothing) and reset + @test ITensors.disable_warn_order() == ITensors.default_warn_order @test isnothing(ITensors.get_warn_order()) - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order + @test isnothing(ITensors.reset_warn_order()) + @test ITensors.get_warn_order() == ITensors.default_warn_order - # Set macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order(6) - @test ITensors.get_warn_order() == 6 - @set_warn_order 10 begin - @test ITensors.get_warn_order() == 10 - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order + # Disable macro + @test ITensors.get_warn_order() == ITensors.default_warn_order + ITensors.set_warn_order(6) + @test ITensors.get_warn_order() == 6 + @disable_warn_order begin + @test isnothing(ITensors.get_warn_order()) + end + @test ITensors.get_warn_order() == 6 + ITensors.reset_warn_order() + @test ITensors.get_warn_order() == ITensors.default_warn_order - # Reset macro - @test ITensors.get_warn_order() == ITensors.default_warn_order - ITensors.set_warn_order!(6) - @test ITensors.get_warn_order() == 6 - @reset_warn_order begin - @test ITensors.get_warn_order() == ITensors.default_warn_order - end - @test ITensors.get_warn_order() == 6 - ITensors.reset_warn_order() - @test ITensors.get_warn_order() == ITensors.default_warn_order + # Set macro + @test ITensors.get_warn_order() == ITensors.default_warn_order + ITensors.set_warn_order(6) + @test ITensors.get_warn_order() == 6 + @set_warn_order 10 begin + @test ITensors.get_warn_order() == 10 + end + @test ITensors.get_warn_order() == 6 + ITensors.reset_warn_order() + @test ITensors.get_warn_order() == ITensors.default_warn_order + + # Reset macro + @test ITensors.get_warn_order() == ITensors.default_warn_order + ITensors.set_warn_order!(6) + @test ITensors.get_warn_order() == 6 + @reset_warn_order begin + @test ITensors.get_warn_order() == ITensors.default_warn_order + end + @test ITensors.get_warn_order() == 6 + ITensors.reset_warn_order() + @test ITensors.get_warn_order() == ITensors.default_warn_order end diff --git a/test/base/test_index.jl b/test/base/test_index.jl index 31fe1014a4..44012eee81 100644 --- a/test/base/test_index.jl +++ b/test/base/test_index.jl @@ -5,181 +5,181 @@ using Test import ITensors: In, Out, Neither @testset "Index" begin - @testset "Index with dim" begin - i = Index(2) - @test id(i) != 0 - @test hasid(i, id(i)) - @test dim(i) == 2 - @test dir(i) == Neither - @test plev(i) == 0 - @test tags(i) == TagSet("") - @test Int(i) == 2 - @test length(i) == 1 - @test Tuple(i) == (i,) - @test collect(i)[] === i - end - @testset "Index with all args" begin - i = Index(1, 2, In, "Link", 1) - @test id(i) == 1 - @test dim(i) == 2 - @test dir(i) == In - @test plev(i) == 1 - @test tags(i) == TagSet("Link") - j = copy(i) - @test id(j) == 1 - @test dim(j) == 2 - @test dir(j) == In - @test plev(j) == 1 - @test tags(j) == TagSet("Link") - @test j == i - end - @testset "prime" begin - i = Index(2) - @test plev(i) == 0 - i2 = prime(i, 2) - @test plev(i2) == 2 - i1 = i' - @test plev(i1) == 1 - i2 = i'' - @test plev(i2) == 2 - i3 = i''' - @test plev(i3) == 3 - i6 = i^6 - @test plev(i6) == 6 - i0 = noprime(i) - @test plev(i0) == 0 - end - @testset "IndexVal" begin - i = Index(2) - @test_deprecated i[1] - @test_deprecated i(1) - @test val(i => 1) == 1 - @test ind(i => 1) == i - @test isindequal(i, i => 2) - @test isindequal(i => 2, i) - @test plev(i' => 2) == 1 - @test val(i' => 2) == 2 - @test plev(prime(i => 2, 4)) == 4 - - @test plev(i => 2) == 0 - @test plev(i' => 2) == 1 - @test prime(i => 2) == (i' => 2) - @test IndexVal(i, 1) == Pair(i, 1) - iv = i => 2 - ĩv = sim(i => 2) - @test ind(iv) ≠ ind(ĩv) - @test val(iv) == val(ĩv) - end - @testset "Iteration" begin - i = Index(3) + @testset "Index with dim" begin + i = Index(2) + @test id(i) != 0 + @test hasid(i, id(i)) + @test dim(i) == 2 + @test dir(i) == Neither + @test plev(i) == 0 + @test tags(i) == TagSet("") + @test Int(i) == 2 + @test length(i) == 1 + @test Tuple(i) == (i,) + @test collect(i)[] === i + end + @testset "Index with all args" begin + i = Index(1, 2, In, "Link", 1) + @test id(i) == 1 + @test dim(i) == 2 + @test dir(i) == In + @test plev(i) == 1 + @test tags(i) == TagSet("Link") + j = copy(i) + @test id(j) == 1 + @test dim(j) == 2 + @test dir(j) == In + @test plev(j) == 1 + @test tags(j) == TagSet("Link") + @test j == i + end + @testset "prime" begin + i = Index(2) + @test plev(i) == 0 + i2 = prime(i, 2) + @test plev(i2) == 2 + i1 = i' + @test plev(i1) == 1 + i2 = i'' + @test plev(i2) == 2 + i3 = i''' + @test plev(i3) == 3 + i6 = i^6 + @test plev(i6) == 6 + i0 = noprime(i) + @test plev(i0) == 0 + end + @testset "IndexVal" begin + i = Index(2) + @test_deprecated i[1] + @test_deprecated i(1) + @test val(i => 1) == 1 + @test ind(i => 1) == i + @test isindequal(i, i => 2) + @test isindequal(i => 2, i) + @test plev(i' => 2) == 1 + @test val(i' => 2) == 2 + @test plev(prime(i => 2, 4)) == 4 - c = 1 - for iv in eachindval(i) - @test iv == (i => c) - c += 1 + @test plev(i => 2) == 0 + @test plev(i' => 2) == 1 + @test prime(i => 2) == (i' => 2) + @test IndexVal(i, 1) == Pair(i, 1) + iv = i => 2 + ĩv = sim(i => 2) + @test ind(iv) ≠ ind(ĩv) + @test val(iv) == val(ĩv) end + @testset "Iteration" begin + i = Index(3) - c = 1 - for n in eachval(i) - @test n == c - c += 1 + c = 1 + for iv in eachindval(i) + @test iv == (i => c) + c += 1 + end + + c = 1 + for n in eachval(i) + @test n == c + c += 1 + end + end + @testset "Broadcasting" begin + N = 3 + i = Index(2) + ps = (n - 1 for n in 1:4) + is = prime.(i, ps) + @test is[1] == i + @test is[2] == i' + @test is[3] == i'' + ts = ("i$n" for n in 1:4) + is = settags.(i, ts) + @test is[1] == addtags(i, "i1") + @test is[2] == addtags(i, "i2") + @test is[3] == addtags(i, "i3") end - end - @testset "Broadcasting" begin - N = 3 - i = Index(2) - ps = (n - 1 for n in 1:4) - is = prime.(i, ps) - @test is[1] == i - @test is[2] == i' - @test is[3] == i'' - ts = ("i$n" for n in 1:4) - is = settags.(i, ts) - @test is[1] == addtags(i, "i1") - @test is[2] == addtags(i, "i2") - @test is[3] == addtags(i, "i3") - end - @testset "Index ID random seed" begin - Random.seed!(index_id_rng(), 1234) - i = Index(2) - j = Index(2) - Random.seed!(index_id_rng(), 1234) - ĩ = Index(2) - j̃ = Index(2) - Random.seed!(index_id_rng(), 123) - ĩ′ = Index(2) - j̃′ = Index(2) - @test id(i) == id(ĩ) - @test id(j) == id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) + @testset "Index ID random seed" begin + Random.seed!(index_id_rng(), 1234) + i = Index(2) + j = Index(2) + Random.seed!(index_id_rng(), 1234) + ĩ = Index(2) + j̃ = Index(2) + Random.seed!(index_id_rng(), 123) + ĩ′ = Index(2) + j̃′ = Index(2) + @test id(i) == id(ĩ) + @test id(j) == id(j̃) + @test id(i) ≠ id(ĩ′) + @test id(j) ≠ id(j̃′) - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - @test id(i) ≠ id(ĩ) - @test id(j) ≠ id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) + Random.seed!(index_id_rng(), 1234) + Random.seed!(1234) + i = Index(2) + j = Index(2) + Random.seed!(1234) + ĩ = Index(2) + j̃ = Index(2) + Random.seed!(123) + ĩ′ = Index(2) + j̃′ = Index(2) + @test id(i) ≠ id(ĩ) + @test id(j) ≠ id(j̃) + @test id(i) ≠ id(ĩ′) + @test id(j) ≠ id(j̃′) - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - A = random_itensor(i, j) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - à = random_itensor(ĩ, j̃) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - Ã′ = random_itensor(ĩ′, j̃′) - @test id(i) ≠ id(ĩ) - @test id(j) ≠ id(j̃) - @test id(i) ≠ id(ĩ′) - @test id(j) ≠ id(j̃′) - @test all(tensor(A) .== tensor(Ã)) - @test all(tensor(A) .≠ tensor(Ã′)) + Random.seed!(index_id_rng(), 1234) + Random.seed!(1234) + i = Index(2) + j = Index(2) + A = random_itensor(i, j) + Random.seed!(1234) + ĩ = Index(2) + j̃ = Index(2) + à = random_itensor(ĩ, j̃) + Random.seed!(123) + ĩ′ = Index(2) + j̃′ = Index(2) + Ã′ = random_itensor(ĩ′, j̃′) + @test id(i) ≠ id(ĩ) + @test id(j) ≠ id(j̃) + @test id(i) ≠ id(ĩ′) + @test id(j) ≠ id(j̃′) + @test all(tensor(A) .== tensor(Ã)) + @test all(tensor(A) .≠ tensor(Ã′)) - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - i = Index(2) - j = Index(2) - A = random_itensor(i, j) - Random.seed!(index_id_rng(), 1234) - Random.seed!(1234) - ĩ = Index(2) - j̃ = Index(2) - à = random_itensor(ĩ, j̃) - Random.seed!(index_id_rng(), 1234) - Random.seed!(123) - ĩ′ = Index(2) - j̃′ = Index(2) - Ã′ = random_itensor(ĩ′, j̃′) - @test id(i) == id(ĩ) - @test id(j) == id(j̃) - @test id(i) == id(ĩ′) - @test id(j) == id(j̃′) - @test all(tensor(A) .== tensor(Ã)) - @test all(tensor(A) .≠ tensor(Ã′)) - end - @testset "directsum" begin - i = Index(2, "i") - j = Index(3, "j") - ij = directsum(i, j; tags="test") - @test dim(ij) == dim(i) + dim(j) - @test hastags(ij, "test") - k = Index(4, "k") - ijk = directsum(i, j, k; tags="test2") - @test dim(ijk) == dim(i) + dim(j) + dim(k) - @test hastags(ijk, "test2") - end + Random.seed!(index_id_rng(), 1234) + Random.seed!(1234) + i = Index(2) + j = Index(2) + A = random_itensor(i, j) + Random.seed!(index_id_rng(), 1234) + Random.seed!(1234) + ĩ = Index(2) + j̃ = Index(2) + à = random_itensor(ĩ, j̃) + Random.seed!(index_id_rng(), 1234) + Random.seed!(123) + ĩ′ = Index(2) + j̃′ = Index(2) + Ã′ = random_itensor(ĩ′, j̃′) + @test id(i) == id(ĩ) + @test id(j) == id(j̃) + @test id(i) == id(ĩ′) + @test id(j) == id(j̃′) + @test all(tensor(A) .== tensor(Ã)) + @test all(tensor(A) .≠ tensor(Ã′)) + end + @testset "directsum" begin + i = Index(2, "i") + j = Index(3, "j") + ij = directsum(i, j; tags = "test") + @test dim(ij) == dim(i) + dim(j) + @test hastags(ij, "test") + k = Index(4, "k") + ijk = directsum(i, j, k; tags = "test2") + @test dim(ijk) == dim(i) + dim(j) + dim(k) + @test hastags(ijk, "test2") + end end diff --git a/test/base/test_indexset.jl b/test/base/test_indexset.jl index 7509fa375a..00c2f25b90 100644 --- a/test/base/test_indexset.jl +++ b/test/base/test_indexset.jl @@ -4,371 +4,371 @@ using Combinatorics using Compat @testset "IndexSet" begin - idim = 2 - jdim = 3 - kdim = 4 - ldim = 5 - i = Index(idim, "i") - j = Index(jdim, "j") - k = Index(kdim, "k") - l = Index(ldim, "l") - @testset "show" begin - indset = IndexSet(i, j, k) - @test length(sprint(show, indset)) > 1 - end - @testset "Basic constructors" begin - I = IndexSet(i, j, k) - @test IndexSet(I) === I - @test l ∈ IndexSet(I..., l) - @test l ∈ IndexSet(l, I...) - @test length(IndexSet(i, j)) == 2 - # construct with function - ind_list = [i, j, k] - I = IndexSet(ii -> ind_list[ii], 3) - @test i ∈ I - @test j ∈ I - @test k ∈ I - I = IndexSet(ii -> ind_list[ii], Order(3)) - @test i ∈ I - @test j ∈ I - @test k ∈ I - end - @testset "length of IndexSet and friends" begin - @test length(IndexSet(i, j)) == 2 - @test size(IndexSet(i, j)) == (length(IndexSet(i, j)),) - end - @testset "Convert to Index" begin - @test Index(IndexSet(i)) === i - @test_throws BoundsError Index(IndexSet(i, j)) - end - @testset "Index dimensions" begin - I = IndexSet(i, j, k) - @test dim(I) == idim * jdim * kdim - @test dims(I) == [idim, jdim, kdim] - @test dim(I, 1) == idim - @test dim(I, 2) == jdim - @test dim(I, 3) == kdim - - @test maxdim(I) == max(idim, jdim, kdim) - end - - @testset "Set operations" begin - I1 = @inferred(IndexSet(i, j, k)) - I2 = @inferred(IndexSet(k, l)) - I3 = @inferred(IndexSet(j, l)) - @test I1 isa Vector{Index{Int}} - @test @inferred(hassameinds(I1, (k, j, i))) - @test @inferred(Nothing, getfirst(setdiff(I1, I2, I3))) == i - @test isnothing(@inferred(Nothing, getfirst(setdiff(I1, IndexSet(k, j, i))))) - @test @inferred(setdiff(I1, I2)) == [i, j] - @test hassameinds(@inferred(setdiff(I1, I2)), IndexSet(i, j)) - @test hassameinds(@inferred(setdiff(I1, I2)), (j, i)) - @test I1 ∩ I2 == [k] - @test hassameinds(I1 ∩ I2, IndexSet(k)) - @test @inferred(Nothing, getfirst(intersect(I1, I2))) == k - @test isnothing(@inferred(Nothing, getfirst(intersect(I1, IndexSet(l))))) - @test @inferred(intersect(I1, IndexSet(j, l))) == [j] - @test hassameinds(@inferred(intersect(I1, IndexSet(j, l))), IndexSet(j)) - @test @inferred(Nothing, getfirst(intersect(I1, IndexSet(j, l)))) == j - @test @inferred(intersect(I1, IndexSet(j, k))) == [j, k] - @test hassameinds(@inferred(intersect(I1, (j, k))), IndexSet(j, k)) - @test hassameinds(@inferred(intersect(I1, (j, k, l))), (j, k)) - @test @inferred(filterinds(I1, "i")) == IndexSet(i) - @test @inferred(filterinds(I1; tags="i")) == IndexSet(i) - @test @inferred(filterinds(I1; inds=j)) == IndexSet(j) - @test @inferred(filterinds(I1; tags="i", inds=j)) == IndexSet() - @test @inferred(filterinds(I1; plev=1, inds=j)) == IndexSet() - @test @inferred(filterinds(I1; plev=0, inds=k)) == IndexSet(k) - @test @inferred(filterinds(I1; plev=0)) == IndexSet(i, j, k) - @test @inferred(filterinds(I1; inds=l)) == IndexSet() - @test @inferred(hassameinds(filter(I1, "i"), IndexSet(i))) - @test @inferred(Nothing, getfirst(I1, "j")) == j - @test isnothing(@inferred(Nothing, getfirst(I1, "l"))) - @test @inferred(Nothing, findfirst(I1, i)) == 1 - @test @inferred(Nothing, findfirst(I1, j)) == 2 - @test @inferred(Nothing, findfirst(I1, k)) == 3 - @test isnothing(@inferred(Nothing, findfirst(I1, Index(2)))) - end - - @testset "Set operations with Order" begin - i, j, k, l = Index.(2, ("i", "j", "k", "l")) - - Iij = IndexSet(i, j) - Ijl = IndexSet(j, l) - Ikl = IndexSet(k, l) - Iijk = IndexSet(i, j, k) - - # - # setdiff - # intersect - # symdiff - # union - # filter - # - - # - # setdiff - # - - @test @inferred(setdiff(Iijk, Ikl)) == [i, j] - - @test @inferred(setdiff(Iij, Iijk)) == Index{Int}[] - - @test @inferred(uniqueinds(Iijk, Ikl; tags="i")) == [i] - - @test @inferred(uniqueinds(Iijk, Ikl; tags=not("i"))) == [j] - - @test @inferred(setdiff(Iijk, Ijl, Ikl)) == [i] - - # - # intersect - # - - @test @inferred(intersect(Iijk, Ikl)) == [k] - - @test @inferred(intersect(Iijk, Iij)) == [i, j] - - @test @inferred(commoninds(Iijk, Iij; tags="i")) == [i] - - # - # symdiff - # - - @test @inferred(symdiff(Iijk, Ikl)) == [i, j, l] - - @test @inferred(symdiff(Iijk, Iij)) == [k] - - # - # union - # - - @test @inferred(union(Iijk, Ikl)) == [i, j, k, l] - - @test @inferred(union(Iijk, Iij)) == [i, j, k] - end - - @testset "intersect index ordering" begin - I = IndexSet(i, k, j) - J = IndexSet(j, l, i) - # Test that intersect respects the ordering - # of the indices in the first IndexSet - @test @inferred(hassameinds(intersect(I, J), IndexSet(i, j))) - @test @inferred(hassameinds(intersect(J, I), IndexSet(j, i))) - end - @testset "adjoint" begin - I = IndexSet(i, k, j) - @test adjoint(I) == IndexSet(i', k', j') - end - @testset "mapprime" begin - I = IndexSet(i', k'', j) - @test mapprime(I, 1, 5) == IndexSet(i^5, k'', j) - @test mapprime(I, 2, 0) == IndexSet(i', k, j) - - J = IndexSet(i, j, k') - @test mapprime(J, 0, 2) == IndexSet(i'', j'', k') - - J = mapprime(J, 1, 5) - @test J == IndexSet(i, j, k^5) - end - @testset "strides" begin - I = IndexSet(i, j) - @test NDTensors.dim_to_strides(I) == (1, idim) - @test NDTensors.dim_to_stride(I, 1) == 1 - @test NDTensors.dim_to_stride(I, 2) == idim - end - @testset "setprime" begin - I = IndexSet(i, j) - J = setprime(I, 2, i) - @test i'' ∈ J - end - @testset "prime" begin - I = IndexSet(i, j) - J = prime(I, j) - @test i ∈ J - @test j' ∈ J - J = prime(I; inds=j) - @test i ∈ J - @test j' ∈ J - J = prime(I; inds=not(j)) - @test i' ∈ J - @test j ∈ J - end - @testset "noprime" begin - I = IndexSet(i'', j') - J = noprime(I) - @test i ∈ J - @test j ∈ J - end - @testset "swapprime" begin - I = IndexSet(i, j) - @test swapprime(I, 0, 1) == IndexSet(i', j') - @test swapprime(I, 0, 4) == IndexSet(i^4, j^4) - I = IndexSet(i, j'') - @test swapprime(I, 2, 0) == IndexSet(i'', j) - I = IndexSet(i, j'', k, l) - @test swapprime(I, 2, 0) == IndexSet(i'', j, k'', l'') - I = IndexSet(i, k'', j'') - @test swapprime(I, 2, 1) == IndexSet(i, k', j') - # In-place version: - I = IndexSet(i, k'', j''') - I = swapprime(I, 2, 0) - @test I == IndexSet(i'', k, j''') - # With tags specified: - I = IndexSet(i, k, j) - @test swapprime(I, 0, 1, "i") == IndexSet(i', k, j) - @test swapprime(I, 0, 1, "j") == IndexSet(i, k, j') - - I = IndexSet(i, i', j) - @test swapprime(I, 0, 1, "i") == IndexSet(i', i, j) - @test swapprime(I, 0, 1, "j") == IndexSet(i, i', j') - end - - @testset "swaptags" begin - i1 = Index(2, "Site,A") - i2 = Index(2, "Site,B") - is = IndexSet(i1, i2) - sis = swaptags(is, "Site", "Link") - for j in sis - @test !hastags(j, "Site") - @test hastags(j, "Link") + idim = 2 + jdim = 3 + kdim = 4 + ldim = 5 + i = Index(idim, "i") + j = Index(jdim, "j") + k = Index(kdim, "k") + l = Index(ldim, "l") + @testset "show" begin + indset = IndexSet(i, j, k) + @test length(sprint(show, indset)) > 1 end - end - - @testset "hastags" begin - i = Index(2, "i, x") - j = Index(2, "j, x") - is = IndexSet(i, j) - @test hastags(is, "i") - @test anyhastags(is, "i") - @test !allhastags(is, "i") - @test allhastags(is, "x") - end - - @testset "broadcasting" begin - x = Index([QN(n) => 1 for n in 0:1], "x") - y = Index([QN(n) => 2 for n in 0:1], "y") - I = IndexSet(x, y) - - # prime - J = prime.(I) - # broken for now - #@inferred broadcast(prime, I) - @test J isa IndexSet - @test x' ∈ J - @test y' ∈ J - - # prime 2 - J = prime.(I, 2) - # broken for now - #@inferred broadcast(prime, I, 2) - @test J isa IndexSet - @test x'' ∈ J - @test y'' ∈ J - - # tag - J = addtags.(I, "t") - # broken for now - #@inferred broadcast(addtags, I, "t") - @test J isa IndexSet - @test addtags(x, "t") ∈ J - @test addtags(y, "t") ∈ J - - # dag - J = dag.(I) - # broken for now - #@inferred broadcast(dag, I) - @test J isa IndexSet - @test x ∈ J - @test y ∈ J - @test dir(J[1]) == -dir(I[1]) - @test dir(J, x) == -dir(I, x) - @test dir(J[2]) == -dir(I[2]) - @test dir(J, y) == -dir(I, y) - @test ITensors.dirs(J, (x, y)) == [-dir(I, x), -dir(I, y)] - @test ITensors.dirs(J) == [-dir(I, x), -dir(I, y)] - - # dir - dirsI = dir.(I) - # broken for now - #@inferred broadcast(dir, I) - @test dirsI isa Vector{ITensors.Arrow} - @test dirsI == [ITensors.Out, ITensors.Out] - - # dims - dimsI = dim.(I) - # broken for now - #@inferred broadcast(dim, I) - @test dimsI isa Vector{Int} - @test dimsI == [2, 4] - - # pairs - J = prime.(I) - pairsI = I .=> J - #@inferred broadcast(=>, I, J) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => x', y => y'] - - pairsI = I .=> 1 - #@inferred broadcast(=>, I, 1) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 1] - - pairsI = I .=> (1, 2) - #@inferred broadcast(=>, I, (1, 2)) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 2] - - pairsI = I .=> [1, 2] - #@inferred broadcast(=>, I, [1, 2]) - @test pairsI isa Vector{<:Pair} - @test pairsI == [x => 1, y => 2] - end - - @testset "ITensors.indpairs" begin - si = [QN(0) => 1, QN(1) => 2, QN(2) => 3] - sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4] - sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5] - sl = [QN(0) => 2] - i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l")) - T = random_itensor(dag(j), k', i', dag(k), j', dag(i)) - ip = ITensors.indpairs(T) - i1 = first.(ip) - i2 = last.(ip) - @test i1' == i2 - for x in i1 - @test dir(x) == dir(T, x) + @testset "Basic constructors" begin + I = IndexSet(i, j, k) + @test IndexSet(I) === I + @test l ∈ IndexSet(I..., l) + @test l ∈ IndexSet(l, I...) + @test length(IndexSet(i, j)) == 2 + # construct with function + ind_list = [i, j, k] + I = IndexSet(ii -> ind_list[ii], 3) + @test i ∈ I + @test j ∈ I + @test k ∈ I + I = IndexSet(ii -> ind_list[ii], Order(3)) + @test i ∈ I + @test j ∈ I + @test k ∈ I end - for x in i2 - @test dir(x) == dir(T, x) + @testset "length of IndexSet and friends" begin + @test length(IndexSet(i, j)) == 2 + @test size(IndexSet(i, j)) == (length(IndexSet(i, j)),) end - end - - @testset "permute" begin - i, j, k = Index.(Ref([QN() => 2]), ("i", "j", "k")) - is1 = (dag(i), j, dag(k)) - is2 = (i, dag(j), k) - for x1 in permutations(is1), x2 in permutations(is2) - # permute x1 into the ordering of x2 - px1 = permute(x1, x2) - @test px1 == x2 - for y in x1 - @test dir(x1, y) == dir(px1, y) - @test -dir(x2, y) == dir(px1, y) - end - # permute x2 into the ordering of x1 - px2 = permute(x2, x1) - @test px2 == x1 - for y in x2 - @test dir(x2, y) == dir(px2, y) - @test -dir(x1, y) == dir(px2, y) - end + @testset "Convert to Index" begin + @test Index(IndexSet(i)) === i + @test_throws BoundsError Index(IndexSet(i, j)) + end + @testset "Index dimensions" begin + I = IndexSet(i, j, k) + @test dim(I) == idim * jdim * kdim + @test dims(I) == [idim, jdim, kdim] + @test dim(I, 1) == idim + @test dim(I, 2) == jdim + @test dim(I, 3) == kdim + + @test maxdim(I) == max(idim, jdim, kdim) + end + + @testset "Set operations" begin + I1 = @inferred(IndexSet(i, j, k)) + I2 = @inferred(IndexSet(k, l)) + I3 = @inferred(IndexSet(j, l)) + @test I1 isa Vector{Index{Int}} + @test @inferred(hassameinds(I1, (k, j, i))) + @test @inferred(Nothing, getfirst(setdiff(I1, I2, I3))) == i + @test isnothing(@inferred(Nothing, getfirst(setdiff(I1, IndexSet(k, j, i))))) + @test @inferred(setdiff(I1, I2)) == [i, j] + @test hassameinds(@inferred(setdiff(I1, I2)), IndexSet(i, j)) + @test hassameinds(@inferred(setdiff(I1, I2)), (j, i)) + @test I1 ∩ I2 == [k] + @test hassameinds(I1 ∩ I2, IndexSet(k)) + @test @inferred(Nothing, getfirst(intersect(I1, I2))) == k + @test isnothing(@inferred(Nothing, getfirst(intersect(I1, IndexSet(l))))) + @test @inferred(intersect(I1, IndexSet(j, l))) == [j] + @test hassameinds(@inferred(intersect(I1, IndexSet(j, l))), IndexSet(j)) + @test @inferred(Nothing, getfirst(intersect(I1, IndexSet(j, l)))) == j + @test @inferred(intersect(I1, IndexSet(j, k))) == [j, k] + @test hassameinds(@inferred(intersect(I1, (j, k))), IndexSet(j, k)) + @test hassameinds(@inferred(intersect(I1, (j, k, l))), (j, k)) + @test @inferred(filterinds(I1, "i")) == IndexSet(i) + @test @inferred(filterinds(I1; tags = "i")) == IndexSet(i) + @test @inferred(filterinds(I1; inds = j)) == IndexSet(j) + @test @inferred(filterinds(I1; tags = "i", inds = j)) == IndexSet() + @test @inferred(filterinds(I1; plev = 1, inds = j)) == IndexSet() + @test @inferred(filterinds(I1; plev = 0, inds = k)) == IndexSet(k) + @test @inferred(filterinds(I1; plev = 0)) == IndexSet(i, j, k) + @test @inferred(filterinds(I1; inds = l)) == IndexSet() + @test @inferred(hassameinds(filter(I1, "i"), IndexSet(i))) + @test @inferred(Nothing, getfirst(I1, "j")) == j + @test isnothing(@inferred(Nothing, getfirst(I1, "l"))) + @test @inferred(Nothing, findfirst(I1, i)) == 1 + @test @inferred(Nothing, findfirst(I1, j)) == 2 + @test @inferred(Nothing, findfirst(I1, k)) == 3 + @test isnothing(@inferred(Nothing, findfirst(I1, Index(2)))) + end + + @testset "Set operations with Order" begin + i, j, k, l = Index.(2, ("i", "j", "k", "l")) + + Iij = IndexSet(i, j) + Ijl = IndexSet(j, l) + Ikl = IndexSet(k, l) + Iijk = IndexSet(i, j, k) + + # + # setdiff + # intersect + # symdiff + # union + # filter + # + + # + # setdiff + # + + @test @inferred(setdiff(Iijk, Ikl)) == [i, j] + + @test @inferred(setdiff(Iij, Iijk)) == Index{Int}[] + + @test @inferred(uniqueinds(Iijk, Ikl; tags = "i")) == [i] + + @test @inferred(uniqueinds(Iijk, Ikl; tags = not("i"))) == [j] + + @test @inferred(setdiff(Iijk, Ijl, Ikl)) == [i] + + # + # intersect + # + + @test @inferred(intersect(Iijk, Ikl)) == [k] + + @test @inferred(intersect(Iijk, Iij)) == [i, j] + + @test @inferred(commoninds(Iijk, Iij; tags = "i")) == [i] + + # + # symdiff + # + + @test @inferred(symdiff(Iijk, Ikl)) == [i, j, l] + + @test @inferred(symdiff(Iijk, Iij)) == [k] + + # + # union + # + + @test @inferred(union(Iijk, Ikl)) == [i, j, k, l] + + @test @inferred(union(Iijk, Iij)) == [i, j, k] + end + + @testset "intersect index ordering" begin + I = IndexSet(i, k, j) + J = IndexSet(j, l, i) + # Test that intersect respects the ordering + # of the indices in the first IndexSet + @test @inferred(hassameinds(intersect(I, J), IndexSet(i, j))) + @test @inferred(hassameinds(intersect(J, I), IndexSet(j, i))) + end + @testset "adjoint" begin + I = IndexSet(i, k, j) + @test adjoint(I) == IndexSet(i', k', j') + end + @testset "mapprime" begin + I = IndexSet(i', k'', j) + @test mapprime(I, 1, 5) == IndexSet(i^5, k'', j) + @test mapprime(I, 2, 0) == IndexSet(i', k, j) + + J = IndexSet(i, j, k') + @test mapprime(J, 0, 2) == IndexSet(i'', j'', k') + + J = mapprime(J, 1, 5) + @test J == IndexSet(i, j, k^5) + end + @testset "strides" begin + I = IndexSet(i, j) + @test NDTensors.dim_to_strides(I) == (1, idim) + @test NDTensors.dim_to_stride(I, 1) == 1 + @test NDTensors.dim_to_stride(I, 2) == idim + end + @testset "setprime" begin + I = IndexSet(i, j) + J = setprime(I, 2, i) + @test i'' ∈ J + end + @testset "prime" begin + I = IndexSet(i, j) + J = prime(I, j) + @test i ∈ J + @test j' ∈ J + J = prime(I; inds = j) + @test i ∈ J + @test j' ∈ J + J = prime(I; inds = not(j)) + @test i' ∈ J + @test j ∈ J + end + @testset "noprime" begin + I = IndexSet(i'', j') + J = noprime(I) + @test i ∈ J + @test j ∈ J + end + @testset "swapprime" begin + I = IndexSet(i, j) + @test swapprime(I, 0, 1) == IndexSet(i', j') + @test swapprime(I, 0, 4) == IndexSet(i^4, j^4) + I = IndexSet(i, j'') + @test swapprime(I, 2, 0) == IndexSet(i'', j) + I = IndexSet(i, j'', k, l) + @test swapprime(I, 2, 0) == IndexSet(i'', j, k'', l'') + I = IndexSet(i, k'', j'') + @test swapprime(I, 2, 1) == IndexSet(i, k', j') + # In-place version: + I = IndexSet(i, k'', j''') + I = swapprime(I, 2, 0) + @test I == IndexSet(i'', k, j''') + # With tags specified: + I = IndexSet(i, k, j) + @test swapprime(I, 0, 1, "i") == IndexSet(i', k, j) + @test swapprime(I, 0, 1, "j") == IndexSet(i, k, j') + + I = IndexSet(i, i', j) + @test swapprime(I, 0, 1, "i") == IndexSet(i', i, j) + @test swapprime(I, 0, 1, "j") == IndexSet(i, i', j') + end + + @testset "swaptags" begin + i1 = Index(2, "Site,A") + i2 = Index(2, "Site,B") + is = IndexSet(i1, i2) + sis = swaptags(is, "Site", "Link") + for j in sis + @test !hastags(j, "Site") + @test hastags(j, "Link") + end + end + + @testset "hastags" begin + i = Index(2, "i, x") + j = Index(2, "j, x") + is = IndexSet(i, j) + @test hastags(is, "i") + @test anyhastags(is, "i") + @test !allhastags(is, "i") + @test allhastags(is, "x") + end + + @testset "broadcasting" begin + x = Index([QN(n) => 1 for n in 0:1], "x") + y = Index([QN(n) => 2 for n in 0:1], "y") + I = IndexSet(x, y) + + # prime + J = prime.(I) + # broken for now + #@inferred broadcast(prime, I) + @test J isa IndexSet + @test x' ∈ J + @test y' ∈ J + + # prime 2 + J = prime.(I, 2) + # broken for now + #@inferred broadcast(prime, I, 2) + @test J isa IndexSet + @test x'' ∈ J + @test y'' ∈ J + + # tag + J = addtags.(I, "t") + # broken for now + #@inferred broadcast(addtags, I, "t") + @test J isa IndexSet + @test addtags(x, "t") ∈ J + @test addtags(y, "t") ∈ J + + # dag + J = dag.(I) + # broken for now + #@inferred broadcast(dag, I) + @test J isa IndexSet + @test x ∈ J + @test y ∈ J + @test dir(J[1]) == -dir(I[1]) + @test dir(J, x) == -dir(I, x) + @test dir(J[2]) == -dir(I[2]) + @test dir(J, y) == -dir(I, y) + @test ITensors.dirs(J, (x, y)) == [-dir(I, x), -dir(I, y)] + @test ITensors.dirs(J) == [-dir(I, x), -dir(I, y)] + + # dir + dirsI = dir.(I) + # broken for now + #@inferred broadcast(dir, I) + @test dirsI isa Vector{ITensors.Arrow} + @test dirsI == [ITensors.Out, ITensors.Out] + + # dims + dimsI = dim.(I) + # broken for now + #@inferred broadcast(dim, I) + @test dimsI isa Vector{Int} + @test dimsI == [2, 4] + + # pairs + J = prime.(I) + pairsI = I .=> J + #@inferred broadcast(=>, I, J) + @test pairsI isa Vector{<:Pair} + @test pairsI == [x => x', y => y'] + + pairsI = I .=> 1 + #@inferred broadcast(=>, I, 1) + @test pairsI isa Vector{<:Pair} + @test pairsI == [x => 1, y => 1] + + pairsI = I .=> (1, 2) + #@inferred broadcast(=>, I, (1, 2)) + @test pairsI isa Vector{<:Pair} + @test pairsI == [x => 1, y => 2] + + pairsI = I .=> [1, 2] + #@inferred broadcast(=>, I, [1, 2]) + @test pairsI isa Vector{<:Pair} + @test pairsI == [x => 1, y => 2] + end + + @testset "ITensors.indpairs" begin + si = [QN(0) => 1, QN(1) => 2, QN(2) => 3] + sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4] + sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5] + sl = [QN(0) => 2] + i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l")) + T = random_itensor(dag(j), k', i', dag(k), j', dag(i)) + ip = ITensors.indpairs(T) + i1 = first.(ip) + i2 = last.(ip) + @test i1' == i2 + for x in i1 + @test dir(x) == dir(T, x) + end + for x in i2 + @test dir(x) == dir(T, x) + end + end + + @testset "permute" begin + i, j, k = Index.(Ref([QN() => 2]), ("i", "j", "k")) + is1 = (dag(i), j, dag(k)) + is2 = (i, dag(j), k) + for x1 in permutations(is1), x2 in permutations(is2) + # permute x1 into the ordering of x2 + px1 = permute(x1, x2) + @test px1 == x2 + for y in x1 + @test dir(x1, y) == dir(px1, y) + @test -dir(x2, y) == dir(px1, y) + end + # permute x2 into the ordering of x1 + px2 = permute(x2, x1) + @test px2 == x1 + for y in x2 + @test dir(x2, y) == dir(px2, y) + @test -dir(x1, y) == dir(px2, y) + end + end + end + + @testset "dag" begin + is = [Index(2), Index(3)] + @test is == dag(is) + is = Index[] + @test dag(is) == Index[] end - end - - @testset "dag" begin - is = [Index(2), Index(3)] - @test is == dag(is) - is = Index[] - @test dag(is) == Index[] - end end diff --git a/test/base/test_indices.jl b/test/base/test_indices.jl index 6d7db4d62c..3caf4903a7 100644 --- a/test/base/test_indices.jl +++ b/test/base/test_indices.jl @@ -3,242 +3,242 @@ using ITensors using ITensors.NDTensors @testset "Allow general mixtures of collections of indices" begin - d = 2 - is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) - i, j, k, l, m = is - is1 = ([i, j], k, (l, m)) - is2 = [[i, j], k, (l, m)] - A = randn(dims(is)) - D = randn(minimum(dims(is))) - x = randn() - @test hassameinds(ITensor(i), (i,)) - @test hassameinds(ITensor(Float64, i), (i,)) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(random_itensor(is1), is) - @test hassameinds(random_itensor(is2), is) - @test hassameinds(random_itensor(is1...), is) - @test hassameinds(random_itensor(Float64, is1), is) - @test hassameinds(random_itensor(Float64, is2), is) - @test hassameinds(random_itensor(Float64, is1...), is) - @test hassameinds(ITensor(x, is1), is) - @test hassameinds(ITensor(x, is2), is) - @test hassameinds(ITensor(x, is1...), is) - @test hassameinds(ITensor(Float64, x, is1), is) - @test hassameinds(ITensor(Float64, x, is2), is) - @test hassameinds(ITensor(Float64, x, is1...), is) - @test hassameinds(ITensor(Float64, undef, is1), is) - @test hassameinds(ITensor(Float64, undef, is2), is) - @test hassameinds(ITensor(Float64, undef, is1...), is) - @test hassameinds(ITensor(undef, is1), is) - @test hassameinds(ITensor(undef, is2), is) - @test hassameinds(ITensor(undef, is1...), is) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(ITensor(A, is1), is) - @test hassameinds(ITensor(A, is2), is) - @test hassameinds(ITensor(A, is1...), is) - @test hassameinds(itensor(A, is1), is) - @test hassameinds(itensor(A, is2), is) - @test hassameinds(itensor(A, is1...), is) - @test hassameinds(ITensor(Float64, A, is1), is) - @test hassameinds(ITensor(Float64, A, is2), is) - @test hassameinds(ITensor(Float64, A, is1...), is) - @test hassameinds(itensor(Float64, A, is1), is) - @test hassameinds(itensor(Float64, A, is2), is) - @test hassameinds(itensor(Float64, A, is1...), is) - @test hassameinds(diag_itensor(is1), is) - @test hassameinds(diag_itensor(is2), is) - @test hassameinds(diag_itensor(is1...), is) - @test hassameinds(diag_itensor(Float64, is1), is) - @test hassameinds(diag_itensor(Float64, is2), is) - @test hassameinds(diag_itensor(Float64, is1...), is) - @test hassameinds(diag_itensor(D, is1), is) - @test hassameinds(diag_itensor(D, is2), is) - @test hassameinds(diag_itensor(D, is1...), is) - @test hassameinds(diag_itensor(Float64, D, is1), is) - @test hassameinds(diag_itensor(Float64, D, is2), is) - @test hassameinds(diag_itensor(Float64, D, is1...), is) - @test hassameinds(diag_itensor(x, is1), is) - @test hassameinds(diag_itensor(x, is2), is) - @test hassameinds(diag_itensor(x, is1...), is) - @test hassameinds(diag_itensor(Float64, x, is1), is) - @test hassameinds(diag_itensor(Float64, x, is2), is) - @test hassameinds(diag_itensor(Float64, x, is1...), is) - @test hassameinds(diagitensor(D, is1), is) - @test hassameinds(diagitensor(D, is2), is) - @test hassameinds(diagitensor(D, is1...), is) - @test hassameinds(diagitensor(Float64, D, is1), is) - @test hassameinds(diagitensor(Float64, D, is2), is) - @test hassameinds(diagitensor(Float64, D, is1...), is) - @test hassameinds(delta(is1), is) - @test hassameinds(delta(is2), is) - @test hassameinds(delta(is1...), is) - @test hassameinds(delta(Float64, is1), is) - @test hassameinds(delta(Float64, is2), is) - @test hassameinds(delta(Float64, is1...), is) - @test hasinds(combiner(is1), is) - @test hasinds(combiner(is2), is) - @test hasinds(combiner(is1...), is) + d = 2 + is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) + i, j, k, l, m = is + is1 = ([i, j], k, (l, m)) + is2 = [[i, j], k, (l, m)] + A = randn(dims(is)) + D = randn(minimum(dims(is))) + x = randn() + @test hassameinds(ITensor(i), (i,)) + @test hassameinds(ITensor(Float64, i), (i,)) + @test hassameinds(ITensor(is1), is) + @test hassameinds(ITensor(is2), is) + @test hassameinds(ITensor(is1...), is) + @test hassameinds(ITensor(Float64, is1), is) + @test hassameinds(ITensor(Float64, is2), is) + @test hassameinds(ITensor(Float64, is1...), is) + @test hassameinds(random_itensor(is1), is) + @test hassameinds(random_itensor(is2), is) + @test hassameinds(random_itensor(is1...), is) + @test hassameinds(random_itensor(Float64, is1), is) + @test hassameinds(random_itensor(Float64, is2), is) + @test hassameinds(random_itensor(Float64, is1...), is) + @test hassameinds(ITensor(x, is1), is) + @test hassameinds(ITensor(x, is2), is) + @test hassameinds(ITensor(x, is1...), is) + @test hassameinds(ITensor(Float64, x, is1), is) + @test hassameinds(ITensor(Float64, x, is2), is) + @test hassameinds(ITensor(Float64, x, is1...), is) + @test hassameinds(ITensor(Float64, undef, is1), is) + @test hassameinds(ITensor(Float64, undef, is2), is) + @test hassameinds(ITensor(Float64, undef, is1...), is) + @test hassameinds(ITensor(undef, is1), is) + @test hassameinds(ITensor(undef, is2), is) + @test hassameinds(ITensor(undef, is1...), is) + @test hassameinds(ITensor(is1), is) + @test hassameinds(ITensor(is2), is) + @test hassameinds(ITensor(is1...), is) + @test hassameinds(ITensor(Float64, is1), is) + @test hassameinds(ITensor(Float64, is2), is) + @test hassameinds(ITensor(Float64, is1...), is) + @test hassameinds(ITensor(A, is1), is) + @test hassameinds(ITensor(A, is2), is) + @test hassameinds(ITensor(A, is1...), is) + @test hassameinds(itensor(A, is1), is) + @test hassameinds(itensor(A, is2), is) + @test hassameinds(itensor(A, is1...), is) + @test hassameinds(ITensor(Float64, A, is1), is) + @test hassameinds(ITensor(Float64, A, is2), is) + @test hassameinds(ITensor(Float64, A, is1...), is) + @test hassameinds(itensor(Float64, A, is1), is) + @test hassameinds(itensor(Float64, A, is2), is) + @test hassameinds(itensor(Float64, A, is1...), is) + @test hassameinds(diag_itensor(is1), is) + @test hassameinds(diag_itensor(is2), is) + @test hassameinds(diag_itensor(is1...), is) + @test hassameinds(diag_itensor(Float64, is1), is) + @test hassameinds(diag_itensor(Float64, is2), is) + @test hassameinds(diag_itensor(Float64, is1...), is) + @test hassameinds(diag_itensor(D, is1), is) + @test hassameinds(diag_itensor(D, is2), is) + @test hassameinds(diag_itensor(D, is1...), is) + @test hassameinds(diag_itensor(Float64, D, is1), is) + @test hassameinds(diag_itensor(Float64, D, is2), is) + @test hassameinds(diag_itensor(Float64, D, is1...), is) + @test hassameinds(diag_itensor(x, is1), is) + @test hassameinds(diag_itensor(x, is2), is) + @test hassameinds(diag_itensor(x, is1...), is) + @test hassameinds(diag_itensor(Float64, x, is1), is) + @test hassameinds(diag_itensor(Float64, x, is2), is) + @test hassameinds(diag_itensor(Float64, x, is1...), is) + @test hassameinds(diagitensor(D, is1), is) + @test hassameinds(diagitensor(D, is2), is) + @test hassameinds(diagitensor(D, is1...), is) + @test hassameinds(diagitensor(Float64, D, is1), is) + @test hassameinds(diagitensor(Float64, D, is2), is) + @test hassameinds(diagitensor(Float64, D, is1...), is) + @test hassameinds(delta(is1), is) + @test hassameinds(delta(is2), is) + @test hassameinds(delta(is1...), is) + @test hassameinds(delta(Float64, is1), is) + @test hassameinds(delta(Float64, is2), is) + @test hassameinds(delta(Float64, is1...), is) + @test hasinds(combiner(is1), is) + @test hasinds(combiner(is2), is) + @test hasinds(combiner(is1...), is) end @testset "Allow general mixtures of collections of QN indices" begin - d = [QN() => 2] - is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) - i, j, k, l, m = is - is1 = ([i, j], k, (l, m)) - is2 = [[i, j], k, (l, m)] - A = randn(dims(is)) - D = randn(minimum(dims(is))) - x = randn() - @test hassameinds(ITensor(i), (i,)) - @test hassameinds(ITensor(Float64, i), (i,)) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(random_itensor(is1), is) - @test hassameinds(random_itensor(is2), is) - @test hassameinds(random_itensor(is1...), is) - @test hassameinds(random_itensor(Float64, is1), is) - @test hassameinds(random_itensor(Float64, is2), is) - @test hassameinds(random_itensor(Float64, is1...), is) - @test hassameinds(ITensor(x, is1), is) - @test hassameinds(ITensor(x, is2), is) - @test hassameinds(ITensor(x, is1...), is) - @test hassameinds(ITensor(Float64, x, is1), is) - @test hassameinds(ITensor(Float64, x, is2), is) - @test hassameinds(ITensor(Float64, x, is1...), is) - @test hassameinds(ITensor(Float64, undef, is1), is) - @test hassameinds(ITensor(Float64, undef, is2), is) - @test hassameinds(ITensor(Float64, undef, is1...), is) - @test hassameinds(ITensor(undef, is1), is) - @test hassameinds(ITensor(undef, is2), is) - @test hassameinds(ITensor(undef, is1...), is) - @test hassameinds(ITensor(is1), is) - @test hassameinds(ITensor(is2), is) - @test hassameinds(ITensor(is1...), is) - @test hassameinds(ITensor(Float64, is1), is) - @test hassameinds(ITensor(Float64, is2), is) - @test hassameinds(ITensor(Float64, is1...), is) - @test hassameinds(ITensor(A, is1), is) - @test hassameinds(ITensor(A, is2), is) - @test hassameinds(ITensor(A, is1...), is) - @test hassameinds(itensor(A, is1), is) - @test hassameinds(itensor(A, is2), is) - @test hassameinds(itensor(A, is1...), is) - @test hassameinds(ITensor(Float64, A, is1), is) - @test hassameinds(ITensor(Float64, A, is2), is) - @test hassameinds(ITensor(Float64, A, is1...), is) - @test hassameinds(itensor(Float64, A, is1), is) - @test hassameinds(itensor(Float64, A, is2), is) - @test hassameinds(itensor(Float64, A, is1...), is) - @test hassameinds(diag_itensor(is1), is) - @test hassameinds(diag_itensor(is2), is) - @test hassameinds(diag_itensor(is1...), is) - @test hassameinds(diag_itensor(Float64, is1), is) - @test hassameinds(diag_itensor(Float64, is2), is) - @test hassameinds(diag_itensor(Float64, is1...), is) - @test hassameinds(diag_itensor(D, is1), is) - @test hassameinds(diag_itensor(D, is2), is) - @test hassameinds(diag_itensor(D, is1...), is) - @test hassameinds(diag_itensor(Float64, D, is1), is) - @test hassameinds(diag_itensor(Float64, D, is2), is) - @test hassameinds(diag_itensor(Float64, D, is1...), is) - @test hassameinds(diag_itensor(x, is1), is) - @test hassameinds(diag_itensor(x, is2), is) - @test hassameinds(diag_itensor(x, is1...), is) - @test hassameinds(diag_itensor(Float64, x, is1), is) - @test hassameinds(diag_itensor(Float64, x, is2), is) - @test hassameinds(diag_itensor(Float64, x, is1...), is) - @test hassameinds(diagitensor(D, is1), is) - @test hassameinds(diagitensor(D, is2), is) - @test hassameinds(diagitensor(D, is1...), is) - @test hassameinds(diagitensor(Float64, D, is1), is) - @test hassameinds(diagitensor(Float64, D, is2), is) - @test hassameinds(diagitensor(Float64, D, is1...), is) - @test hassameinds(delta(is1), is) - @test hassameinds(delta(is2), is) - @test hassameinds(delta(is1...), is) - @test hassameinds(delta(Float64, is1), is) - @test hassameinds(delta(Float64, is2), is) - @test hassameinds(delta(Float64, is1...), is) - @test hasinds(combiner(is1), is) - @test hasinds(combiner(is2), is) - @test hasinds(combiner(is1...), is) + d = [QN() => 2] + is = Index.((d, d, d, d, d), ("i", "j", "k", "l", "m")) + i, j, k, l, m = is + is1 = ([i, j], k, (l, m)) + is2 = [[i, j], k, (l, m)] + A = randn(dims(is)) + D = randn(minimum(dims(is))) + x = randn() + @test hassameinds(ITensor(i), (i,)) + @test hassameinds(ITensor(Float64, i), (i,)) + @test hassameinds(ITensor(is1), is) + @test hassameinds(ITensor(is2), is) + @test hassameinds(ITensor(is1...), is) + @test hassameinds(ITensor(Float64, is1), is) + @test hassameinds(ITensor(Float64, is2), is) + @test hassameinds(ITensor(Float64, is1...), is) + @test hassameinds(random_itensor(is1), is) + @test hassameinds(random_itensor(is2), is) + @test hassameinds(random_itensor(is1...), is) + @test hassameinds(random_itensor(Float64, is1), is) + @test hassameinds(random_itensor(Float64, is2), is) + @test hassameinds(random_itensor(Float64, is1...), is) + @test hassameinds(ITensor(x, is1), is) + @test hassameinds(ITensor(x, is2), is) + @test hassameinds(ITensor(x, is1...), is) + @test hassameinds(ITensor(Float64, x, is1), is) + @test hassameinds(ITensor(Float64, x, is2), is) + @test hassameinds(ITensor(Float64, x, is1...), is) + @test hassameinds(ITensor(Float64, undef, is1), is) + @test hassameinds(ITensor(Float64, undef, is2), is) + @test hassameinds(ITensor(Float64, undef, is1...), is) + @test hassameinds(ITensor(undef, is1), is) + @test hassameinds(ITensor(undef, is2), is) + @test hassameinds(ITensor(undef, is1...), is) + @test hassameinds(ITensor(is1), is) + @test hassameinds(ITensor(is2), is) + @test hassameinds(ITensor(is1...), is) + @test hassameinds(ITensor(Float64, is1), is) + @test hassameinds(ITensor(Float64, is2), is) + @test hassameinds(ITensor(Float64, is1...), is) + @test hassameinds(ITensor(A, is1), is) + @test hassameinds(ITensor(A, is2), is) + @test hassameinds(ITensor(A, is1...), is) + @test hassameinds(itensor(A, is1), is) + @test hassameinds(itensor(A, is2), is) + @test hassameinds(itensor(A, is1...), is) + @test hassameinds(ITensor(Float64, A, is1), is) + @test hassameinds(ITensor(Float64, A, is2), is) + @test hassameinds(ITensor(Float64, A, is1...), is) + @test hassameinds(itensor(Float64, A, is1), is) + @test hassameinds(itensor(Float64, A, is2), is) + @test hassameinds(itensor(Float64, A, is1...), is) + @test hassameinds(diag_itensor(is1), is) + @test hassameinds(diag_itensor(is2), is) + @test hassameinds(diag_itensor(is1...), is) + @test hassameinds(diag_itensor(Float64, is1), is) + @test hassameinds(diag_itensor(Float64, is2), is) + @test hassameinds(diag_itensor(Float64, is1...), is) + @test hassameinds(diag_itensor(D, is1), is) + @test hassameinds(diag_itensor(D, is2), is) + @test hassameinds(diag_itensor(D, is1...), is) + @test hassameinds(diag_itensor(Float64, D, is1), is) + @test hassameinds(diag_itensor(Float64, D, is2), is) + @test hassameinds(diag_itensor(Float64, D, is1...), is) + @test hassameinds(diag_itensor(x, is1), is) + @test hassameinds(diag_itensor(x, is2), is) + @test hassameinds(diag_itensor(x, is1...), is) + @test hassameinds(diag_itensor(Float64, x, is1), is) + @test hassameinds(diag_itensor(Float64, x, is2), is) + @test hassameinds(diag_itensor(Float64, x, is1...), is) + @test hassameinds(diagitensor(D, is1), is) + @test hassameinds(diagitensor(D, is2), is) + @test hassameinds(diagitensor(D, is1...), is) + @test hassameinds(diagitensor(Float64, D, is1), is) + @test hassameinds(diagitensor(Float64, D, is2), is) + @test hassameinds(diagitensor(Float64, D, is1...), is) + @test hassameinds(delta(is1), is) + @test hassameinds(delta(is2), is) + @test hassameinds(delta(is1...), is) + @test hassameinds(delta(Float64, is1), is) + @test hassameinds(delta(Float64, is2), is) + @test hassameinds(delta(Float64, is1...), is) + @test hasinds(combiner(is1), is) + @test hasinds(combiner(is2), is) + @test hasinds(combiner(is1...), is) - # With flux - @test hassameinds(ITensor(QN(), i), (i,)) - @test hassameinds(ITensor(Float64, QN(), i), (i,)) - @test hassameinds(ITensor(QN(), is1), is) - @test hassameinds(ITensor(QN(), is2), is) - @test hassameinds(ITensor(QN(), is1...), is) - @test hassameinds(ITensor(Float64, QN(), is1), is) - @test hassameinds(ITensor(Float64, QN(), is2), is) - @test hassameinds(ITensor(Float64, QN(), is1...), is) - @test hassameinds(random_itensor(QN(), is1), is) - @test hassameinds(random_itensor(QN(), is2), is) - @test hassameinds(random_itensor(QN(), is1...), is) - @test hassameinds(random_itensor(Float64, QN(), is1), is) - @test hassameinds(random_itensor(Float64, QN(), is2), is) - @test hassameinds(random_itensor(Float64, QN(), is1...), is) - @test hassameinds(ITensor(x, QN(), is1), is) - @test hassameinds(ITensor(x, QN(), is2), is) - @test hassameinds(ITensor(x, QN(), is1...), is) - @test hassameinds(ITensor(Float64, x, QN(), is1), is) - @test hassameinds(ITensor(Float64, x, QN(), is2), is) - @test hassameinds(ITensor(Float64, x, QN(), is1...), is) - @test hassameinds(ITensor(Float64, undef, QN(), is1), is) - @test hassameinds(ITensor(Float64, undef, QN(), is2), is) - @test hassameinds(ITensor(Float64, undef, QN(), is1...), is) - @test hassameinds(ITensor(undef, QN(), is1), is) - @test hassameinds(ITensor(undef, QN(), is2), is) - @test hassameinds(ITensor(undef, QN(), is1...), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is1), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is2), is) - @test_throws ErrorException hassameinds(emptyITensor(QN(), is1...), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is2), is) - @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1...), is) + # With flux + @test hassameinds(ITensor(QN(), i), (i,)) + @test hassameinds(ITensor(Float64, QN(), i), (i,)) + @test hassameinds(ITensor(QN(), is1), is) + @test hassameinds(ITensor(QN(), is2), is) + @test hassameinds(ITensor(QN(), is1...), is) + @test hassameinds(ITensor(Float64, QN(), is1), is) + @test hassameinds(ITensor(Float64, QN(), is2), is) + @test hassameinds(ITensor(Float64, QN(), is1...), is) + @test hassameinds(random_itensor(QN(), is1), is) + @test hassameinds(random_itensor(QN(), is2), is) + @test hassameinds(random_itensor(QN(), is1...), is) + @test hassameinds(random_itensor(Float64, QN(), is1), is) + @test hassameinds(random_itensor(Float64, QN(), is2), is) + @test hassameinds(random_itensor(Float64, QN(), is1...), is) + @test hassameinds(ITensor(x, QN(), is1), is) + @test hassameinds(ITensor(x, QN(), is2), is) + @test hassameinds(ITensor(x, QN(), is1...), is) + @test hassameinds(ITensor(Float64, x, QN(), is1), is) + @test hassameinds(ITensor(Float64, x, QN(), is2), is) + @test hassameinds(ITensor(Float64, x, QN(), is1...), is) + @test hassameinds(ITensor(Float64, undef, QN(), is1), is) + @test hassameinds(ITensor(Float64, undef, QN(), is2), is) + @test hassameinds(ITensor(Float64, undef, QN(), is1...), is) + @test hassameinds(ITensor(undef, QN(), is1), is) + @test hassameinds(ITensor(undef, QN(), is2), is) + @test hassameinds(ITensor(undef, QN(), is1...), is) + @test_throws ErrorException hassameinds(emptyITensor(QN(), is1), is) + @test_throws ErrorException hassameinds(emptyITensor(QN(), is2), is) + @test_throws ErrorException hassameinds(emptyITensor(QN(), is1...), is) + @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1), is) + @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is2), is) + @test_throws ErrorException hassameinds(emptyITensor(Float64, QN(), is1...), is) end @testset "Test Index collection as Vector of abstract type" begin - d = 2 - i = Index(d) - A = randn(d, d) - T = itensor(A, Index[i', dag(i)]) - @test storage(T) isa NDTensors.Dense{Float64} - T = itensor(A, Any[i', dag(i)]) - @test storage(T) isa NDTensors.Dense{Float64} + d = 2 + i = Index(d) + A = randn(d, d) + T = itensor(A, Index[i', dag(i)]) + @test storage(T) isa NDTensors.Dense{Float64} + T = itensor(A, Any[i', dag(i)]) + @test storage(T) isa NDTensors.Dense{Float64} - i = Index([QN() => d]) - A = randn(d, d) - T = itensor(A, Index[i', dag(i)]) - @test storage(T) isa NDTensors.BlockSparse{Float64} - T = itensor(A, Any[i', dag(i)]) - @test storage(T) isa NDTensors.BlockSparse{Float64} + i = Index([QN() => d]) + A = randn(d, d) + T = itensor(A, Index[i', dag(i)]) + @test storage(T) isa NDTensors.BlockSparse{Float64} + T = itensor(A, Any[i', dag(i)]) + @test storage(T) isa NDTensors.BlockSparse{Float64} end @testset "Test output types of ITensors.indices" begin - i = Index(2) - @test ITensors.indices([i'', i', i]) == Index{Int}[i'', i', i] - @test ITensors.indices((i'', i', i)) == (i'', i', i) - @test ITensors.indices(((i'',), (i',), i)) == (i'', i', i) - @test ITensors.indices(((i'', i'), (i,))) == (i'', i', i) - @test ITensors.indices([(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] - @test ITensors.indices(Any[(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] - @test ITensors.indices([(i'',), (i',), [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices([(i'',), i', [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices(Any[(i'',), i', [i]]) == Index{Int}[i'', i', i] - @test ITensors.indices(((i'',), i', [i])) == Index{Int}[i'', i', i] + i = Index(2) + @test ITensors.indices([i'', i', i]) == Index{Int}[i'', i', i] + @test ITensors.indices((i'', i', i)) == (i'', i', i) + @test ITensors.indices(((i'',), (i',), i)) == (i'', i', i) + @test ITensors.indices(((i'', i'), (i,))) == (i'', i', i) + @test ITensors.indices([(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] + @test ITensors.indices(Any[(i'',), (i',), (i,)]) == Index{Int}[i'', i', i] + @test ITensors.indices([(i'',), (i',), [i]]) == Index{Int}[i'', i', i] + @test ITensors.indices([(i'',), i', [i]]) == Index{Int}[i'', i', i] + @test ITensors.indices(Any[(i'',), i', [i]]) == Index{Int}[i'', i', i] + @test ITensors.indices(((i'',), i', [i])) == Index{Int}[i'', i', i] end diff --git a/test/base/test_inference.jl b/test/base/test_inference.jl index 7b1d0420b1..227e0a15c6 100644 --- a/test/base/test_inference.jl +++ b/test/base/test_inference.jl @@ -3,100 +3,100 @@ using ITensors.NDTensors using Test @testset "ITensors priming and tagging" begin - i = Index(2) - T1 = random_itensor(i'', i') - T2 = random_itensor(i', i) - - @test inds(@inferred(adjoint(T1))) == (i''', i'') - @test inds(@inferred(prime(T1, 2))) == (i'''', i''') - @test inds(@inferred(addtags(T1, "x"))) == (addtags(i, "x")'', addtags(i, "x")') - @test inds(@inferred(T1 * T2)) == (i'', i) - - @test @inferred(order(T1)) == 2 - @test @inferred(ndims(T1)) == 2 - @test @inferred(dim(T1)) == 4 - @test @inferred(maxdim(T1)) == 2 + i = Index(2) + T1 = random_itensor(i'', i') + T2 = random_itensor(i', i) + + @test inds(@inferred(adjoint(T1))) == (i''', i'') + @test inds(@inferred(prime(T1, 2))) == (i'''', i''') + @test inds(@inferred(addtags(T1, "x"))) == (addtags(i, "x")'', addtags(i, "x")') + @test inds(@inferred(T1 * T2)) == (i'', i) + + @test @inferred(order(T1)) == 2 + @test @inferred(ndims(T1)) == 2 + @test @inferred(dim(T1)) == 4 + @test @inferred(maxdim(T1)) == 2 end @testset "NDTensors Dense contract" begin - i = Index(2) - T1 = randomTensor((i'', i')) - T2 = randomTensor((i', i)) - R = randomTensor((i'', i)) - - labelsT1 = (1, -1) - labelsT2 = (-1, 2) - labelsR = (1, 2) - - @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa - DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - @test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa - DenseTensor{ - Float64,2,Tuple{Index{Int64},Index{Int64}},Dense{Float64,Vector{Float64}} - } - - A = Base.ReshapedArray(randn(4), (2, 2), ()) - B = Base.ReshapedArray(randn(4), (2, 2), ()) - @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1.0) - @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1) + i = Index(2) + T1 = randomTensor((i'', i')) + T2 = randomTensor((i', i)) + R = randomTensor((i'', i)) + + labelsT1 = (1, -1) + labelsT2 = (-1, 2) + labelsR = (1, 2) + + @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa + DenseTensor{ + Float64, 2, Tuple{Index{Int64}, Index{Int64}}, Dense{Float64, Vector{Float64}}, + } + @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa DenseTensor{ + Float64, 2, Tuple{Index{Int64}, Index{Int64}}, Dense{Float64, Vector{Float64}}, + } + @test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa + DenseTensor{ + Float64, 2, Tuple{Index{Int64}, Index{Int64}}, Dense{Float64, Vector{Float64}}, + } + + A = Base.ReshapedArray(randn(4), (2, 2), ()) + B = Base.ReshapedArray(randn(4), (2, 2), ()) + @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1.0) + @inferred NDTensors._contract_scalar_perm!(B, A, (2, 1), 1.0, 1) end @testset "NDTensors BlockSparse contract" begin - i = Index([QN(0) => 2, QN(1) => 2]) - IT1 = random_itensor(i'', dag(i)') - IT2 = random_itensor(i', dag(i)) - IR = random_itensor(i'', dag(i)) - T1, T2, R = Tensor.((IT1, IT2, IR)) - - labelsT1 = (1, -1) - labelsT2 = (-1, 2) - labelsR = (1, 2) - - indsR = @inferred( - NDTensors.contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR) - ) - @test indsR isa Tuple{Index{Vector{Pair{QN,Int}}},Index{Vector{Pair{QN,Int}}}} - - TensorT = @inferred(NDTensors.contraction_output_type(typeof(T1), typeof(T2), indsR)) - @test TensorT <: Tensor{Float64,2,BlockSparse{Float64,Vector{Float64},2},typeof(indsR)} - - blockoffsetsR, contraction_plan = @inferred( - NDTensors.contract_blockoffsets( - blockoffsets(T1), - inds(T1), - labelsT1, - blockoffsets(T2), - inds(T2), - labelsT2, - indsR, - labelsR, + i = Index([QN(0) => 2, QN(1) => 2]) + IT1 = random_itensor(i'', dag(i)') + IT2 = random_itensor(i', dag(i)) + IR = random_itensor(i'', dag(i)) + T1, T2, R = Tensor.((IT1, IT2, IR)) + + labelsT1 = (1, -1) + labelsT2 = (-1, 2) + labelsR = (1, 2) + + indsR = @inferred( + NDTensors.contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR) ) - ) - @test blockoffsetsR isa BlockOffsets{2} - @test contraction_plan isa Vector{Tuple{Block{2},Block{2},Block{2}}} - - @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa - Tuple{BlockSparseTensor,Vector{Tuple{Block{2},Block{2},Block{2}}}} - - if VERSION ≥ v"1.7" - # Only properly inferred in Julia 1.7 and later - @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa - BlockSparseTensor - end - - # TODO: this function doesn't exist yet - #@test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa BlockSparseTensor - - b = Block(1, 1) - B1 = T1[b] - B2 = T2[b] - BR = R[b] - @test @inferred( - NDTensors.contract!(BR, labelsR, B1, labelsT1, B2, labelsT2, 1.0, 0.0) - ) isa DenseTensor + @test indsR isa Tuple{Index{Vector{Pair{QN, Int}}}, Index{Vector{Pair{QN, Int}}}} + + TensorT = @inferred(NDTensors.contraction_output_type(typeof(T1), typeof(T2), indsR)) + @test TensorT <: Tensor{Float64, 2, BlockSparse{Float64, Vector{Float64}, 2}, typeof(indsR)} + + blockoffsetsR, contraction_plan = @inferred( + NDTensors.contract_blockoffsets( + blockoffsets(T1), + inds(T1), + labelsT1, + blockoffsets(T2), + inds(T2), + labelsT2, + indsR, + labelsR, + ) + ) + @test blockoffsetsR isa BlockOffsets{2} + @test contraction_plan isa Vector{Tuple{Block{2}, Block{2}, Block{2}}} + + @test @inferred(NDTensors.contraction_output(T1, labelsT1, T2, labelsT2, labelsR)) isa + Tuple{BlockSparseTensor, Vector{Tuple{Block{2}, Block{2}, Block{2}}}} + + if VERSION ≥ v"1.7" + # Only properly inferred in Julia 1.7 and later + @test @inferred(NDTensors.contract(T1, labelsT1, T2, labelsT2, labelsR)) isa + BlockSparseTensor + end + + # TODO: this function doesn't exist yet + #@test @inferred(NDTensors.contract!!(R, labelsR, T1, labelsT1, T2, labelsT2)) isa BlockSparseTensor + + b = Block(1, 1) + B1 = T1[b] + B2 = T2[b] + BR = R[b] + @test @inferred( + NDTensors.contract!(BR, labelsR, B1, labelsT1, B2, labelsT2, 1.0, 0.0) + ) isa DenseTensor end diff --git a/test/base/test_itensor_scalar.jl b/test/base/test_itensor_scalar.jl index 26cf4ebc26..076eeaaa5c 100644 --- a/test/base/test_itensor_scalar.jl +++ b/test/base/test_itensor_scalar.jl @@ -2,66 +2,66 @@ using ITensors using Test @testset "Scalar ITensors" begin - A = ITensor(2.4) - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 2.4 - @test A[1] == 2.4 - @test scalar(A) == 2.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A = ITensor(2.4) + @test storage(A) isa ITensors.Dense{Float64} + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 2.4 + @test A[1] == 2.4 + @test scalar(A) == 2.4 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - A[] = 3.4 - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 3.4 - @test A[1] == 3.4 - @test scalar(A) == 3.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A[] = 3.4 + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 3.4 + @test A[1] == 3.4 + @test scalar(A) == 3.4 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - A[1] = 4.4 - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 4.4 - @test A[1] == 4.4 - @test scalar(A) == 4.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A[1] = 4.4 + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 4.4 + @test A[1] == 4.4 + @test scalar(A) == 4.4 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 0.0 - @test A[1] == 0.0 - @test scalar(A) == 0.0 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A = ITensor() + @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 0.0 + @test A[1] == 0.0 + @test scalar(A) == 0.0 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - A[] = 3.4 - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 3.4 - @test A[1] == 3.4 - @test scalar(A) == 3.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A = ITensor() + @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} + A[] = 3.4 + @test storage(A) isa ITensors.Dense{Float64} + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 3.4 + @test A[1] == 3.4 + @test scalar(A) == 3.4 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - A = ITensor() - @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} - A[1] = 4.4 - @test storage(A) isa ITensors.Dense{Float64} - @test ndims(A) == 0 - @test order(A) == 0 - @test A[] == 4.4 - @test A[1] == 4.4 - @test scalar(A) == 4.4 - @test ITensors.symmetrystyle(A) == ITensors.NonQN() + A = ITensor() + @test storage(A) isa ITensors.EmptyStorage{ITensors.EmptyNumber} + A[1] = 4.4 + @test storage(A) isa ITensors.Dense{Float64} + @test ndims(A) == 0 + @test order(A) == 0 + @test A[] == 4.4 + @test A[1] == 4.4 + @test scalar(A) == 4.4 + @test ITensors.symmetrystyle(A) == ITensors.NonQN() - x = 2.3 - ITensor(fill(x, ())) == ITensor(x) - ITensor(fill(x, (1))) == ITensor(x) - ITensor(fill(x, (1, 1))) == ITensor(x) - ITensor(fill(x, (1, 1, 1))) == ITensor(x) - @test_throws ErrorException ITensor(fill(x, (2, 2))) + x = 2.3 + ITensor(fill(x, ())) == ITensor(x) + ITensor(fill(x, (1))) == ITensor(x) + ITensor(fill(x, (1, 1))) == ITensor(x) + ITensor(fill(x, (1, 1, 1))) == ITensor(x) + @test_throws ErrorException ITensor(fill(x, (2, 2))) end diff --git a/test/base/test_itensor_scalar_contract.jl b/test/base/test_itensor_scalar_contract.jl index ed24027730..efbf5e7b2e 100644 --- a/test/base/test_itensor_scalar_contract.jl +++ b/test/base/test_itensor_scalar_contract.jl @@ -5,98 +5,98 @@ using Random Random.seed!(1234) @testset "Test contractions with scalar-like ITensors" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") - α = Index(1, "α") - - is = (i, j, k) - - A = random_itensor(is..., dag(α)) - B = ITensor(2, α, α', α'') - - C = A * B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - - C = ITensor(is..., α', α'') - C .= A .* B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - - C = ITensor(shuffle([(is..., α', α'')...])...) - C .= A .* B - @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) -end - -@testset "NaN in-place contraction bug regression test" begin - BlasFloats = (Float32, Float64, ComplexF32, ComplexF64) - @testset "Scalar contract, no permutation" for ElA in BlasFloats, ElB in BlasFloats i = Index(2, "i") - j = Index(3, "j") + j = Index(2, "j") + k = Index(2, "k") α = Index(1, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), i, j, α) - R .= NaN - @test any(isnan, R) + is = (i, j, k) - R .= A .* B - @test !any(isnan, R) - @test array(R) ≈ array(A) * array(B)[] + A = random_itensor(is..., dag(α)) + B = ITensor(2, α, α', α'') - R .= NaN - @test any(isnan, R) + C = A * B + @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - R .= B .* A - @test !any(isnan, R) - @test array(R) ≈ array(A) * array(B)[] - end + C = ITensor(is..., α', α'') + C .= A .* B + @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) - @testset "Scalar contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats - i = Index(2, "i") - j = Index(3, "j") - α = Index(1, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), j, i, α) - - R .= NaN - @test any(isnan, R) - - R .= A .* B - @test !any(isnan, R) - @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] - - R .= NaN - @test any(isnan, R) - - R .= B .* A - @test !any(isnan, R) - @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] - end + C = ITensor(shuffle([(is..., α', α'')...])...) + C .= A .* B + @test C ≈ B[1, 1, 1] * A * ITensor(1, inds(B)) +end - @testset "General contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats - i = Index(2, "i") - j = Index(3, "j") - α = Index(2, "α") - A = random_itensor(ElA, i, j, α') - B = random_itensor(ElB, dag(α)', α) - R = ITensor(promote_type(ElA, ElB), j, i, α) - - R .= NaN - @test any(isnan, R) - - R .= A .* B - @test !any(isnan, R) - @test reshape(array(R), 6, 2) ≈ - reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) - - R .= NaN - @test any(isnan, R) - - R .= B .* A - @test !any(isnan, R) - @test reshape(array(R), 6, 2) ≈ - reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) - end +@testset "NaN in-place contraction bug regression test" begin + BlasFloats = (Float32, Float64, ComplexF32, ComplexF64) + @testset "Scalar contract, no permutation" for ElA in BlasFloats, ElB in BlasFloats + i = Index(2, "i") + j = Index(3, "j") + α = Index(1, "α") + A = random_itensor(ElA, i, j, α') + B = random_itensor(ElB, dag(α)', α) + R = ITensor(promote_type(ElA, ElB), i, j, α) + + R .= NaN + @test any(isnan, R) + + R .= A .* B + @test !any(isnan, R) + @test array(R) ≈ array(A) * array(B)[] + + R .= NaN + @test any(isnan, R) + + R .= B .* A + @test !any(isnan, R) + @test array(R) ≈ array(A) * array(B)[] + end + + @testset "Scalar contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats + i = Index(2, "i") + j = Index(3, "j") + α = Index(1, "α") + A = random_itensor(ElA, i, j, α') + B = random_itensor(ElB, dag(α)', α) + R = ITensor(promote_type(ElA, ElB), j, i, α) + + R .= NaN + @test any(isnan, R) + + R .= A .* B + @test !any(isnan, R) + @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] + + R .= NaN + @test any(isnan, R) + + R .= B .* A + @test !any(isnan, R) + @test array(R) ≈ permutedims(array(A), (2, 1, 3)) * array(B)[] + end + + @testset "General contraction, permutation" for ElA in BlasFloats, ElB in BlasFloats + i = Index(2, "i") + j = Index(3, "j") + α = Index(2, "α") + A = random_itensor(ElA, i, j, α') + B = random_itensor(ElB, dag(α)', α) + R = ITensor(promote_type(ElA, ElB), j, i, α) + + R .= NaN + @test any(isnan, R) + + R .= A .* B + @test !any(isnan, R) + @test reshape(array(R), 6, 2) ≈ + reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) + + R .= NaN + @test any(isnan, R) + + R .= B .* A + @test !any(isnan, R) + @test reshape(array(R), 6, 2) ≈ + reshape(permutedims(array(A), (2, 1, 3)), 6, 2) * array(B) + end end diff --git a/test/base/test_itensor_slice.jl b/test/base/test_itensor_slice.jl index 7823ee32d4..28ef3571c5 100644 --- a/test/base/test_itensor_slice.jl +++ b/test/base/test_itensor_slice.jl @@ -5,55 +5,55 @@ import Random: seed! seed!(12345) @testset "Dense ITensor slicing functionality" begin - i = Index(2) - j = Index(3) - k = Index(4) - l = Index(5) - - A₀ = random_itensor(i, j, k, l) - a = randn(dim(l), dim(k)) - - A = copy(A₀) - A[l => 1:dim(l), i => 1, k => 1:dim(k), j => 2] = a - - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + i = Index(2) + j = Index(3) + k = Index(4) + l = Index(5) + + A₀ = random_itensor(i, j, k, l) + a = randn(dim(l), dim(k)) + + A = copy(A₀) + A[l => 1:dim(l), i => 1, k => 1:dim(k), j => 2] = a + + for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) + if ii == 1 && jj == 2 + @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] + else + @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + end end - end - A = copy(A₀) - A[1, 2, :, :] = transpose(a) + A = copy(A₀) + A[1, 2, :, :] = transpose(a) - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) + if ii == 1 && jj == 2 + @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] + else + @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + end end - end - A = copy(A₀) - A[l => 1:(dim(l) - 1), i => 1, k => 1:(dim(k) - 1), j => 2] = a[1:(end - 1), 1:(end - 1)] + A = copy(A₀) + A[l => 1:(dim(l) - 1), i => 1, k => 1:(dim(k) - 1), j => 2] = a[1:(end - 1), 1:(end - 1)] - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 && kk ∈ 1:(dim(k) - 1) && ll ∈ 1:(dim(l) - 1) - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) + if ii == 1 && jj == 2 && kk ∈ 1:(dim(k) - 1) && ll ∈ 1:(dim(l) - 1) + @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] + else + @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + end end - end - A = copy(A₀) - A[k => :, i => 1, l => :, j => 2] = a' + A = copy(A₀) + A[k => :, i => 1, l => :, j => 2] = a' - for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) - if ii == 1 && jj == 2 - @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] - else - @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l) + if ii == 1 && jj == 2 + @test A[j => 2, l => ll, i => 1, k => kk] == a[ll, kk] + else + @test A[j => jj, l => ll, i => ii, k => kk] == A₀[j => jj, l => ll, i => ii, k => kk] + end end - end end diff --git a/test/base/test_ndtensors.jl b/test/base/test_ndtensors.jl index aa20e928b0..a4b333c2fe 100644 --- a/test/base/test_ndtensors.jl +++ b/test/base/test_ndtensors.jl @@ -3,25 +3,25 @@ using ITensors.NDTensors using Test @testset "NDTensors compatibility" begin - i = Index([QN(0) => 1, QN(1) => 1]) + i = Index([QN(0) => 1, QN(1) => 1]) - T = BlockSparseTensor(Float64, [Block(1, 1)], (i', dag(i))) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] + T = BlockSparseTensor(Float64, [Block(1, 1)], (i', dag(i))) + @test nnzblocks(T) == 1 + @test nzblocks(T) == [Block(1, 1)] - T = BlockSparseTensor(Float64, [Block(1, 1)], [i', dag(i)]) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] + T = BlockSparseTensor(Float64, [Block(1, 1)], [i', dag(i)]) + @test nnzblocks(T) == 1 + @test nzblocks(T) == [Block(1, 1)] - T = BlockSparseTensor(Float64, [Block(1, 1)], IndexSet(i', dag(i))) - @test nnzblocks(T) == 1 - @test nzblocks(T) == [Block(1, 1)] + T = BlockSparseTensor(Float64, [Block(1, 1)], IndexSet(i', dag(i))) + @test nnzblocks(T) == 1 + @test nzblocks(T) == [Block(1, 1)] - @testset "blockdim" begin - i = Index(2) - @test_throws ErrorException blockdim(i, Block(1)) - @test_throws ErrorException blockdim(i, 1) - @test_throws ErrorException blockdim(1, Block(1)) - @test_throws ErrorException blockdim(1, 1) - end + @testset "blockdim" begin + i = Index(2) + @test_throws ErrorException blockdim(i, Block(1)) + @test_throws ErrorException blockdim(i, 1) + @test_throws ErrorException blockdim(1, Block(1)) + @test_throws ErrorException blockdim(1, 1) + end end diff --git a/test/base/test_not.jl b/test/base/test_not.jl index 1efa534b04..256d073146 100644 --- a/test/base/test_not.jl +++ b/test/base/test_not.jl @@ -1,53 +1,53 @@ using ITensors, Test @testset "not" begin - i = Index(2, "i") - j = Index(2, "j") - k = Index(2, "k") + i = Index(2, "i") + j = Index(2, "j") + k = Index(2, "k") - A = random_itensor(i, j, k') + A = random_itensor(i, j, k') - Ap = prime(A, not("j")) + Ap = prime(A, not("j")) - @test hassameinds(Ap, (i', j, k'')) + @test hassameinds(Ap, (i', j, k'')) - Ap = prime(A; tags=(!ts"j")) + Ap = prime(A; tags = (!ts"j")) - @test hassameinds(Ap, (i', j, k'')) + @test hassameinds(Ap, (i', j, k'')) - At = addtags(A, "x", not("k")) + At = addtags(A, "x", not("k")) - @test hassameinds(At, (addtags(i, "x"), addtags(j, "x"), k')) + @test hassameinds(At, (addtags(i, "x"), addtags(j, "x"), k')) - Ap2 = prime(A, 2, not(i)) + Ap2 = prime(A, 2, not(i)) - @test hassameinds(Ap2, (i, j'', k''')) + @test hassameinds(Ap2, (i, j'', k''')) - Ap2 = prime(A, 2; inds=(!i)) + Ap2 = prime(A, 2; inds = (!i)) - @test hassameinds(Ap2, (i, j'', k''')) + @test hassameinds(Ap2, (i, j'', k''')) - Ap3 = prime(A, 3, not(i, k')) + Ap3 = prime(A, 3, not(i, k')) - @test hassameinds(Ap3, (i, j''', k')) + @test hassameinds(Ap3, (i, j''', k')) - Ap3 = prime(A, 3, !(i, k')) + Ap3 = prime(A, 3, !(i, k')) - @test hassameinds(Ap3, (i, j''', k')) + @test hassameinds(Ap3, (i, j''', k')) - At2 = settags(A, "y", not(IndexSet(j, k'))) + At2 = settags(A, "y", not(IndexSet(j, k'))) - @test hassameinds(At2, (settags(i, "y"), j, k')) + @test hassameinds(At2, (settags(i, "y"), j, k')) - At2 = settags(A, "y"; inds=(!IndexSet(j, k'))) + At2 = settags(A, "y"; inds = (!IndexSet(j, k'))) - @test hassameinds(At2, (settags(i, "y"), j, k')) + @test hassameinds(At2, (settags(i, "y"), j, k')) - B = filterinds(A; plev=not(0)) + B = filterinds(A; plev = not(0)) - @test hassameinds(B, (k',)) + @test hassameinds(B, (k',)) - @test_throws MethodError !"x" + @test_throws MethodError !"x" - @test_throws MethodError !1 + @test_throws MethodError !1 end diff --git a/test/base/test_oneitensor.jl b/test/base/test_oneitensor.jl index 049743d3cf..bb3c5a9a91 100644 --- a/test/base/test_oneitensor.jl +++ b/test/base/test_oneitensor.jl @@ -2,17 +2,17 @@ using ITensors using Test @testset "OneITensor" begin - let i = Index(2), it = ITensor(i), OneITensor = ITensors.OneITensor - @test OneITensor() isa OneITensor - @test inds(OneITensor()) == () - @test eltype(OneITensor()) <: Bool - @test isone(dim(OneITensor())) - @test ITensors.isoneitensor(OneITensor()) - @test !ITensors.isoneitensor(it) - @test dag(OneITensor()) == OneITensor() - @test OneITensor() * it == it - @test it * OneITensor() == it - @test *(OneITensor()) == OneITensor() - @test contract([it, OneITensor(), OneITensor()]) == it - end + let i = Index(2), it = ITensor(i), OneITensor = ITensors.OneITensor + @test OneITensor() isa OneITensor + @test inds(OneITensor()) == () + @test eltype(OneITensor()) <: Bool + @test isone(dim(OneITensor())) + @test ITensors.isoneitensor(OneITensor()) + @test !ITensors.isoneitensor(it) + @test dag(OneITensor()) == OneITensor() + @test OneITensor() * it == it + @test it * OneITensor() == it + @test *(OneITensor()) == OneITensor() + @test contract([it, OneITensor(), OneITensor()]) == it + end end diff --git a/test/base/test_qn.jl b/test/base/test_qn.jl index 37bfb75663..e37ef643b6 100644 --- a/test/base/test_qn.jl +++ b/test/base/test_qn.jl @@ -3,152 +3,152 @@ using ITensors, Test import ITensors: nactive @testset "QN" begin - @testset "QNVal Basics" begin - qv = ITensors.QNVal() - @test !isactive(qv) - @test qv == zero(ITensors.QNVal) - - qv = ITensors.QNVal("Sz", 0) - @test ITensors.name(qv) == ITensors.SmallString("Sz") - @test val(qv) == 0 - @test modulus(qv) == 1 - @test isactive(qv) - - qv = ITensors.QNVal("A", 1, 2) - @test ITensors.name(qv) == ITensors.SmallString("A") - @test val(qv) == 1 - @test modulus(qv) == 2 - @test !isfermionic(qv) - - qv = ITensors.QNVal("Nf", 1, -1) - @test ITensors.name(qv) == ITensors.SmallString("Nf") - @test val(qv) == 1 - @test modulus(qv) == -1 - @test isfermionic(qv) - - qv = zero(ITensors.QNVal("Sz", 5)) - @test ITensors.name(qv) == ITensors.SmallString("Sz") - @test val(qv) == 0 - @test modulus(qv) == 1 - @test isactive(qv) - end - - @testset "QN Basics" begin - q = QN() - @test length(sprint(show, q)) > 1 - - q = QN(("Sz", 1)) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "Sz") == 1 - @test !isfermionic(q) - - q = QN("Sz", 1) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "Sz") == 1 - @test !isfermionic(q) - - q = QN("P", 1, 2) - @test length(sprint(show, q)) > 1 - @test isactive(q[1]) - @test val(q, "P") == 1 - @test modulus(q, "P") == 2 - @test nactive(q) == 1 - - q = QN(("A", 1), ("B", 2)) - @test isactive(q[1]) - @test isactive(q[2]) - @test val(q, "A") == 1 - @test val(q, "B") == 2 - @test modulus(q, "A") == 1 - @test modulus(q, "B") == 1 - - q = QN(("B", 2), ("A", 1)) - @test val(q, "A") == 1 - @test val(q, "B") == 2 - @test nactive(q) == 2 - - q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4)) - @test nactive(q) == 4 - - @test_throws BoundsError begin - q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4), ("E", 5)) + @testset "QNVal Basics" begin + qv = ITensors.QNVal() + @test !isactive(qv) + @test qv == zero(ITensors.QNVal) + + qv = ITensors.QNVal("Sz", 0) + @test ITensors.name(qv) == ITensors.SmallString("Sz") + @test val(qv) == 0 + @test modulus(qv) == 1 + @test isactive(qv) + + qv = ITensors.QNVal("A", 1, 2) + @test ITensors.name(qv) == ITensors.SmallString("A") + @test val(qv) == 1 + @test modulus(qv) == 2 + @test !isfermionic(qv) + + qv = ITensors.QNVal("Nf", 1, -1) + @test ITensors.name(qv) == ITensors.SmallString("Nf") + @test val(qv) == 1 + @test modulus(qv) == -1 + @test isfermionic(qv) + + qv = zero(ITensors.QNVal("Sz", 5)) + @test ITensors.name(qv) == ITensors.SmallString("Sz") + @test val(qv) == 0 + @test modulus(qv) == 1 + @test isactive(qv) + end + + @testset "QN Basics" begin + q = QN() + @test length(sprint(show, q)) > 1 + + q = QN(("Sz", 1)) + @test length(sprint(show, q)) > 1 + @test isactive(q[1]) + @test val(q, "Sz") == 1 + @test !isfermionic(q) + + q = QN("Sz", 1) + @test length(sprint(show, q)) > 1 + @test isactive(q[1]) + @test val(q, "Sz") == 1 + @test !isfermionic(q) + + q = QN("P", 1, 2) + @test length(sprint(show, q)) > 1 + @test isactive(q[1]) + @test val(q, "P") == 1 + @test modulus(q, "P") == 2 + @test nactive(q) == 1 + + q = QN(("A", 1), ("B", 2)) + @test isactive(q[1]) + @test isactive(q[2]) + @test val(q, "A") == 1 + @test val(q, "B") == 2 + @test modulus(q, "A") == 1 + @test modulus(q, "B") == 1 + + q = QN(("B", 2), ("A", 1)) + @test val(q, "A") == 1 + @test val(q, "B") == 2 + @test nactive(q) == 2 + + q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4)) + @test nactive(q) == 4 + + @test_throws BoundsError begin + q = QN(("A", 1), ("B", 2), ("C", 3), ("D", 4), ("E", 5)) + end + end + + @testset "Comparison" begin + @test QN() == QN() + @test QN("A", 1) == QN("A", 1) + @test QN(("A", 1), ("B", 3)) == QN(("A", 1), ("B", 3)) + @test QN(("A", 1), ("B", 3)) == QN(("B", 3), ("A", 1)) + + # Zero value and missing sector treated the same: + @test QN(("A", 0), ("B", 3)) == QN("B", 3) + @test QN(("B", 3), ("A", 0)) == QN("B", 3) + end + + @testset "Arithmetic" begin + @test QN("Sz", 1) + QN() == QN("Sz", 1) + @test QN("Sz", 1) + QN("Sz", 2) == QN("Sz", 3) + @test QN("Sz", 1) + QN("Sz", -2) == QN("Sz", -1) + + @test QN(("A", 1), ("Sz", 0)) + QN(("A", 0), ("Sz", 1)) == QN(("A", 1), ("Sz", 1)) + + @test QN("P", 0, 2) + QN("P", 1, 2) == QN("P", 1, 2) + @test QN("P", 1, 2) + QN("P", 1, 2) == QN("P", 0, 2) + + # Arithmetic involving mixed-label QNs + @test QN() - QN("Sz", 2) == QN("Sz", -2) + @test QN("Sz", 2) - QN() == QN("Sz", 2) + @test QN() - QN(("Sz", 2), ("N", 1)) == QN(("Sz", -2), ("N", -1)) + @test QN("N", 1) - QN("Sz", 2) == QN(("N", 1), ("Sz", -2)) + + # Regression test for https://github.com/ITensor/ITensors.jl/issues/1658 + @test QN("S3", 3) + QN(("S1", 1), ("S2", 2)) == QN(("S1", 1), ("S2", 2), ("S3", 3)) + @test QN(("S1", 1), ("S2", 2)) + QN("S3", 3) == QN(("S1", 1), ("S2", 2), ("S3", 3)) + end + + @testset "Ordering" begin + z = QN() + qa = QN(("Sz", 1), ("Nf", 1)) + qb = QN(("Sz", 0), ("Nf", 2)) + qc = QN(("Sz", 1), ("Nf", 2)) + qd = QN(("Sz", 1), ("Nf", 2)) + qe = QN(("Sz", -1), ("Nf", -2)) + + @test !(z < z) + @test !(qa < z) + @test (z < qa) + @test (z < qb) + @test !(qb < z) + @test (z < qc) + @test !(qc < z) + @test (z < qd) + @test !(qd < z) + @test !(z < qe) + @test (qe < z) + + @test !(qa > qb) + @test qb > qa + @test !(qb == qa) + @test (qb < qc) + @test !(qc < qb) + @test !(qc == qb) + @test (qc == qd) + @test !(qc < qd) + @test !(qd < qc) + end + + @testset "Hashing" begin + @test hash(QN(("Sz", 0))) == hash(QN()) + @test hash(QN("Sz", 0)) == hash(QN("N", 0)) + @test hash(QN(("Sz", 1), ("N", 2))) == hash(QN(("N", 2), ("Sz", 1))) + end + + @testset "Negative value for mod > 1" begin + @test QN("T", -1, 3) == QN("T", 2, 3) + @test QN("T", -2, 3) == QN("T", 1, 3) + @test QN("T", -3, 3) == QN("T", 0, 3) end - end - - @testset "Comparison" begin - @test QN() == QN() - @test QN("A", 1) == QN("A", 1) - @test QN(("A", 1), ("B", 3)) == QN(("A", 1), ("B", 3)) - @test QN(("A", 1), ("B", 3)) == QN(("B", 3), ("A", 1)) - - # Zero value and missing sector treated the same: - @test QN(("A", 0), ("B", 3)) == QN("B", 3) - @test QN(("B", 3), ("A", 0)) == QN("B", 3) - end - - @testset "Arithmetic" begin - @test QN("Sz", 1) + QN() == QN("Sz", 1) - @test QN("Sz", 1) + QN("Sz", 2) == QN("Sz", 3) - @test QN("Sz", 1) + QN("Sz", -2) == QN("Sz", -1) - - @test QN(("A", 1), ("Sz", 0)) + QN(("A", 0), ("Sz", 1)) == QN(("A", 1), ("Sz", 1)) - - @test QN("P", 0, 2) + QN("P", 1, 2) == QN("P", 1, 2) - @test QN("P", 1, 2) + QN("P", 1, 2) == QN("P", 0, 2) - - # Arithmetic involving mixed-label QNs - @test QN() - QN("Sz", 2) == QN("Sz", -2) - @test QN("Sz", 2) - QN() == QN("Sz", 2) - @test QN() - QN(("Sz", 2), ("N", 1)) == QN(("Sz", -2), ("N", -1)) - @test QN("N", 1) - QN("Sz", 2) == QN(("N", 1), ("Sz", -2)) - - # Regression test for https://github.com/ITensor/ITensors.jl/issues/1658 - @test QN("S3", 3) + QN(("S1", 1), ("S2", 2)) == QN(("S1", 1), ("S2", 2), ("S3", 3)) - @test QN(("S1", 1), ("S2", 2)) + QN("S3", 3) == QN(("S1", 1), ("S2", 2), ("S3", 3)) - end - - @testset "Ordering" begin - z = QN() - qa = QN(("Sz", 1), ("Nf", 1)) - qb = QN(("Sz", 0), ("Nf", 2)) - qc = QN(("Sz", 1), ("Nf", 2)) - qd = QN(("Sz", 1), ("Nf", 2)) - qe = QN(("Sz", -1), ("Nf", -2)) - - @test !(z < z) - @test !(qa < z) - @test (z < qa) - @test (z < qb) - @test !(qb < z) - @test (z < qc) - @test !(qc < z) - @test (z < qd) - @test !(qd < z) - @test !(z < qe) - @test (qe < z) - - @test !(qa > qb) - @test qb > qa - @test !(qb == qa) - @test (qb < qc) - @test !(qc < qb) - @test !(qc == qb) - @test (qc == qd) - @test !(qc < qd) - @test !(qd < qc) - end - - @testset "Hashing" begin - @test hash(QN(("Sz", 0))) == hash(QN()) - @test hash(QN("Sz", 0)) == hash(QN("N", 0)) - @test hash(QN(("Sz", 1), ("N", 2))) == hash(QN(("N", 2), ("Sz", 1))) - end - - @testset "Negative value for mod > 1" begin - @test QN("T", -1, 3) == QN("T", 2, 3) - @test QN("T", -2, 3) == QN("T", 1, 3) - @test QN("T", -3, 3) == QN("T", 0, 3) - end end diff --git a/test/base/test_qncombiner.jl b/test/base/test_qncombiner.jl index 093dce60e4..15076c7921 100644 --- a/test/base/test_qncombiner.jl +++ b/test/base/test_qncombiner.jl @@ -1,12 +1,12 @@ using ITensors, Test @testset "QN Combiner" begin - d = 1 - i = Index([QN(0) => d, QN(0) => d]) - A = random_itensor(i) - C = combiner(i) - AC = A * C + d = 1 + i = Index([QN(0) => d, QN(0) => d]) + A = random_itensor(i) + C = combiner(i) + AC = A * C - à = AC * dag(C) - @test à ≈ A + à = AC * dag(C) + @test à ≈ A end diff --git a/test/base/test_qndiagitensor.jl b/test/base/test_qndiagitensor.jl index f7ce83a96f..ce23766482 100644 --- a/test/base/test_qndiagitensor.jl +++ b/test/base/test_qndiagitensor.jl @@ -1,134 +1,134 @@ using ITensors, Test @testset "diag_itensor (DiagBlockSparse)" begin - @testset "diag_itensor get and set elements" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") + @testset "diag_itensor get and set elements" begin + i = Index(QN(0) => 2, QN(1) => 3; tags = "i") - D = diag_itensor(QN(), i, dag(i')) + D = diag_itensor(QN(), i, dag(i')) - for b in eachnzblock(D) - @test flux(D, b) == QN() - end + for b in eachnzblock(D) + @test flux(D, b) == QN() + end - D[i => 1, i' => 1] = 1 - D[i => 2, i' => 2] = 2 - D[i => 3, i' => 3] = 3 - D[i => 4, i' => 4] = 4 - D[i => 5, i' => 5] = 5 + D[i => 1, i' => 1] = 1 + D[i => 2, i' => 2] = 2 + D[i => 3, i' => 3] = 3 + D[i => 4, i' => 4] = 4 + D[i => 5, i' => 5] = 5 - @test_throws ErrorException D[i => 1, i' => 2] = 2.0 + @test_throws ErrorException D[i => 1, i' => 2] = 2.0 - @test D[i => 1, i' => 1] == 1 - @test D[i => 2, i' => 2] == 2 - @test D[i => 3, i' => 3] == 3 - @test D[i => 4, i' => 4] == 4 - @test D[i => 5, i' => 5] == 5 - end + @test D[i => 1, i' => 1] == 1 + @test D[i => 2, i' => 2] == 2 + @test D[i => 3, i' => 3] == 3 + @test D[i => 4, i' => 4] == 4 + @test D[i => 5, i' => 5] == 5 + end - @testset "diag_itensor Tuple constructor" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") + @testset "diag_itensor Tuple constructor" begin + i = Index(QN(0) => 2, QN(1) => 3; tags = "i") - D = diag_itensor((i, dag(i'))) + D = diag_itensor((i, dag(i'))) - for b in eachnzblock(D) - @test flux(D, b) == QN() + for b in eachnzblock(D) + @test flux(D, b) == QN() + end end - end - @testset "delta" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - ĩ = sim(i; tags="i_sim") - j = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="j") + @testset "delta" begin + i = Index(QN(0) => 2, QN(1) => 3; tags = "i") + ĩ = sim(i; tags = "i_sim") + j = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "j") - A = random_itensor(QN(), i, dag(j)) + A = random_itensor(QN(), i, dag(j)) - δiĩ = δ(dag(i), ĩ) + δiĩ = δ(dag(i), ĩ) - @test storage(δiĩ) isa NDTensors.DiagBlockSparse{ElT,ElT} where {ElT<:Number} + @test storage(δiĩ) isa NDTensors.DiagBlockSparse{ElT, ElT} where {ElT <: Number} - B = A * δiĩ + B = A * δiĩ - A = permute(A, i, j) - B = permute(B, ĩ, j) + A = permute(A, i, j) + B = permute(B, ĩ, j) + + @test norm(dense(NDTensors.tensor(A)) - dense(NDTensors.tensor(B))) ≈ 0 + end - @test norm(dense(NDTensors.tensor(A)) - dense(NDTensors.tensor(B))) ≈ 0 - end + @testset "delta Tuple constructor" begin + i = Index(QN(0) => 2, QN(1) => 3; tags = "i") + ĩ = sim(i; tags = "i_sim") - @testset "delta Tuple constructor" begin - i = Index(QN(0) => 2, QN(1) => 3; tags="i") - ĩ = sim(i; tags="i_sim") + δiĩ = δ((dag(i), ĩ)) - δiĩ = δ((dag(i), ĩ)) + for b in eachnzblock(δiĩ) + @test flux(δiĩ, b) == QN() + end + end + + @testset "denseblocks: convert DiagBlockSparse to BlockSparse" begin + i = Index([QN(0) => 2, QN(1) => 3]) + A = diag_itensor(i', dag(i)) + randn!(ITensors.data(A)) + B = denseblocks(A) + for n in 1:dim(i) + @test A[n, n] == B[n, n] + end + @test dense(A) == dense(B) + end - for b in eachnzblock(δiĩ) - @test flux(δiĩ, b) == QN() + @testset "Regression test for QN delta contraction bug" begin + # http://itensor.org/support/2814/block-sparse-itensor-wrong-results-multiplying-delta-tensor + s = Index([QN(("N", i, 1)) => 1 for i in 1:2]) + l = dag(addtags(s, "left")) + r = addtags(s, "right") + u = addtags(s, "up") + d = dag(addtags(s, "down")) + A = ITensor(l, r, u, d) + A[1, 1, 1, 1] = 1.0 + A[1, 1, 2, 2] = 1.0 + A[2, 2, 1, 1] = 1.0 + A[2, 2, 2, 2] = 1.0 + δlr = δ(dag(l), dag(r)) + δud = δ(dag(u), dag(d)) + A1 = A * δlr + denseA1 = dense(A) * dense(δlr) + A2 = A1 * δud + denseA2 = denseA1 * dense(δud) + @test dense(A1) ≈ denseA1 + @test dense(A2) ≈ denseA2 + @test A2[] ≈ 4 end - end - - @testset "denseblocks: convert DiagBlockSparse to BlockSparse" begin - i = Index([QN(0) => 2, QN(1) => 3]) - A = diag_itensor(i', dag(i)) - randn!(ITensors.data(A)) - B = denseblocks(A) - for n in 1:dim(i) - @test A[n, n] == B[n, n] + + @testset "Regression test for QN delta dag, contract, and norm" begin + i = Index([QN("Sz", 0) => 1, QN("Sz", 1) => 1]) + x = δ(i, dag(i)') + + @test isone(x[1, 1]) + @test isone(dag(x)[1, 1]) + + c = 2 + 3im + x *= c + + @test x[1, 1] == c + @test dag(x)[1, 1] == conj(c) + @test (x * dag(x))[] == 2 * abs2(c) + @test (x * dag(x))[] ≈ norm(x)^2 + end + + @testset "Regression test for printing a QN Diag ITensor" begin + # https://github.com/ITensor/NDTensors.jl/issues/61 + i = Index([QN() => 2]) + A = random_itensor(i', dag(i)) + U, S, V = svd(A, i') + # Test printing S + io = IOBuffer() + show(io, S) + sS = String(take!(io)) + @test sS isa String + # Test printing U + io = IOBuffer() + show(io, U) + sU = String(take!(io)) + @test sU isa String end - @test dense(A) == dense(B) - end - - @testset "Regression test for QN delta contraction bug" begin - # http://itensor.org/support/2814/block-sparse-itensor-wrong-results-multiplying-delta-tensor - s = Index([QN(("N", i, 1)) => 1 for i in 1:2]) - l = dag(addtags(s, "left")) - r = addtags(s, "right") - u = addtags(s, "up") - d = dag(addtags(s, "down")) - A = ITensor(l, r, u, d) - A[1, 1, 1, 1] = 1.0 - A[1, 1, 2, 2] = 1.0 - A[2, 2, 1, 1] = 1.0 - A[2, 2, 2, 2] = 1.0 - δlr = δ(dag(l), dag(r)) - δud = δ(dag(u), dag(d)) - A1 = A * δlr - denseA1 = dense(A) * dense(δlr) - A2 = A1 * δud - denseA2 = denseA1 * dense(δud) - @test dense(A1) ≈ denseA1 - @test dense(A2) ≈ denseA2 - @test A2[] ≈ 4 - end - - @testset "Regression test for QN delta dag, contract, and norm" begin - i = Index([QN("Sz", 0) => 1, QN("Sz", 1) => 1]) - x = δ(i, dag(i)') - - @test isone(x[1, 1]) - @test isone(dag(x)[1, 1]) - - c = 2 + 3im - x *= c - - @test x[1, 1] == c - @test dag(x)[1, 1] == conj(c) - @test (x * dag(x))[] == 2 * abs2(c) - @test (x * dag(x))[] ≈ norm(x)^2 - end - - @testset "Regression test for printing a QN Diag ITensor" begin - # https://github.com/ITensor/NDTensors.jl/issues/61 - i = Index([QN() => 2]) - A = random_itensor(i', dag(i)) - U, S, V = svd(A, i') - # Test printing S - io = IOBuffer() - show(io, S) - sS = String(take!(io)) - @test sS isa String - # Test printing U - io = IOBuffer() - show(io, U) - sU = String(take!(io)) - @test sU isa String - end end diff --git a/test/base/test_qnindex.jl b/test/base/test_qnindex.jl index 9ba14f5eea..59292e272a 100644 --- a/test/base/test_qnindex.jl +++ b/test/base/test_qnindex.jl @@ -3,68 +3,68 @@ using ITensors, Test import ITensors: In, Out, Neither @testset "QN Index" begin - @testset "hasqns function" begin - i = Index(4, "i") - @test hasqns(i) == false - j = Index(QN(0) => 1, QN(1) => 1) - @test hasqns(j) == true - end + @testset "hasqns function" begin + i = Index(4, "i") + @test hasqns(i) == false + j = Index(QN(0) => 1, QN(1) => 1) + @test hasqns(j) == true + end - @testset "Array of QN Constructor" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - @test hasqns(i) - @test dim(i) == 3 - @test hastags(i, "i") - end + @testset "Array of QN Constructor" begin + i = Index([QN(0) => 1, QN(1) => 2], "i") + @test hasqns(i) + @test dim(i) == 3 + @test hastags(i, "i") + end - @testset "Vararg Constructor" begin - i = Index(QN(0) => 1, QN(1) => 2; tags="i") - @test hasqns(i) - @test dim(i) == 3 - @test hastags(i, "i") - @test dir(i) == Out - @test dir(i => 2) == Out + @testset "Vararg Constructor" begin + i = Index(QN(0) => 1, QN(1) => 2; tags = "i") + @test hasqns(i) + @test dim(i) == 3 + @test hastags(i, "i") + @test dir(i) == Out + @test dir(i => 2) == Out - j = Index(QN(0) => 1, QN(1) => 2; tags="j", dir=In) - @test hasqns(j) - @test dim(j) == 3 - @test hastags(j, "j") - @test dir(j) == In - @test dir(j => 2) == In - end + j = Index(QN(0) => 1, QN(1) => 2; tags = "j", dir = In) + @test hasqns(j) + @test dim(j) == 3 + @test hastags(j, "j") + @test dir(j) == In + @test dir(j => 2) == In + end - @testset "flux and qn" begin - i = dag(Index([QN(0) => 2, QN(1) => 2], "i")) + @testset "flux and qn" begin + i = dag(Index([QN(0) => 2, QN(1) => 2], "i")) - @test flux(i => 1) == QN(0) - @test flux(i => 2) == QN(0) - @test flux(i => 3) == QN(-1) - @test flux(i => 4) == QN(-1) - @test flux(i => Block(1)) == QN(0) - @test flux(i => Block(2)) == QN(-1) + @test flux(i => 1) == QN(0) + @test flux(i => 2) == QN(0) + @test flux(i => 3) == QN(-1) + @test flux(i => 4) == QN(-1) + @test flux(i => Block(1)) == QN(0) + @test flux(i => Block(2)) == QN(-1) - @test qn(i => 1) == QN(0) - @test qn(i => 2) == QN(0) - @test qn(i => 3) == QN(1) - @test qn(i => 4) == QN(1) - @test qn(i => Block(1)) == QN(0) - @test qn(i => Block(2)) == QN(1) - end + @test qn(i => 1) == QN(0) + @test qn(i => 2) == QN(0) + @test qn(i => 3) == QN(1) + @test qn(i => 4) == QN(1) + @test qn(i => Block(1)) == QN(0) + @test qn(i => Block(2)) == QN(1) + end - @testset "directsum" begin - i = Index([QN(0) => 1, QN(1) => 2], "i") - j = Index([QN(2) => 3, QN(3) => 4], "j") - ij = ITensors.directsum(i, j; tags="test") - @test dim(ij) == dim(i) + dim(j) - @test hastags(ij, "test") - @test flux(ij, Block(1)) == QN(0) - @test flux(ij, Block(2)) == QN(1) - @test flux(ij, Block(3)) == QN(2) - @test flux(ij, Block(4)) == QN(3) - @test dim(ij, Block(1)) == 1 - @test dim(ij, Block(2)) == 2 - @test dim(ij, Block(3)) == 3 - @test dim(ij, Block(4)) == 4 - @test_throws ErrorException ITensors.directsum(i, dag(j)) - end + @testset "directsum" begin + i = Index([QN(0) => 1, QN(1) => 2], "i") + j = Index([QN(2) => 3, QN(3) => 4], "j") + ij = ITensors.directsum(i, j; tags = "test") + @test dim(ij) == dim(i) + dim(j) + @test hastags(ij, "test") + @test flux(ij, Block(1)) == QN(0) + @test flux(ij, Block(2)) == QN(1) + @test flux(ij, Block(3)) == QN(2) + @test flux(ij, Block(4)) == QN(3) + @test dim(ij, Block(1)) == 1 + @test dim(ij, Block(2)) == 2 + @test dim(ij, Block(3)) == 3 + @test dim(ij, Block(4)) == 4 + @test_throws ErrorException ITensors.directsum(i, dag(j)) + end end diff --git a/test/base/test_smallstring.jl b/test/base/test_smallstring.jl index a56d332f97..d8b8143455 100644 --- a/test/base/test_smallstring.jl +++ b/test/base/test_smallstring.jl @@ -6,70 +6,70 @@ using Test import ITensors: SmallString, Tag, isint, isnull, IntChar @testset "SmallString" begin - @testset "ctors" begin - s = SmallString() - @test isnull(s) - end + @testset "ctors" begin + s = SmallString() + @test isnull(s) + end - @testset "setindex" begin - s = SmallString() - @test isnull(s) - t = setindex(s, IntChar(1), 1) - @test !isnull(t) - end + @testset "setindex" begin + s = SmallString() + @test isnull(s) + t = setindex(s, IntChar(1), 1) + @test !isnull(t) + end - @testset "comparison" begin - u = SmallString("1") - t = SmallString("1") - @test u == t - t = SmallString("2") - @test u < t - end + @testset "comparison" begin + u = SmallString("1") + t = SmallString("1") + @test u == t + t = SmallString("2") + @test u < t + end - @testset "Convert to String" begin - s = SmallString("abc") - @test typeof(s) == SmallString + @testset "Convert to String" begin + s = SmallString("abc") + @test typeof(s) == SmallString - sg = String(s) - for n in 1:length(sg) - @test sg[n] == convert(Char, s[n]) - end + sg = String(s) + for n in 1:length(sg) + @test sg[n] == convert(Char, s[n]) + end - s = SmallString("") - sg = String(s) - @test sg == "" - end + s = SmallString("") + sg = String(s) + @test sg == "" + end - @testset "isint" begin - i = SmallString("123") - @test isint(i) == true + @testset "isint" begin + i = SmallString("123") + @test isint(i) == true - s = SmallString("abc") - @test isint(s) == false + s = SmallString("abc") + @test isint(s) == false - # Test maximum length - s = SmallString("12345678") - @test isint(s) == true - end + # Test maximum length + s = SmallString("12345678") + @test isint(s) == true + end - @testset "isless" begin - s1 = SmallString("ab") - s2 = SmallString("xy") - @test isless(s1, s2) == true - @test isless(s2, s1) == false - @test isless(s1, s1) == false - @test isless(s2, s2) == false - end + @testset "isless" begin + s1 = SmallString("ab") + s2 = SmallString("xy") + @test isless(s1, s2) == true + @test isless(s2, s1) == false + @test isless(s1, s1) == false + @test isless(s2, s2) == false + end - @testset "show" begin - t = Tag("") - @test sprint(show, t) == "" + @testset "show" begin + t = Tag("") + @test sprint(show, t) == "" - t = Tag("Red") - @test sprint(show, t) == "Red" + t = Tag("Red") + @test sprint(show, t) == "Red" - # Make sure to test maximum length tag - t = Tag("Electron") - @test sprint(show, t) == "Electron" - end + # Make sure to test maximum length tag + t = Tag("Electron") + @test sprint(show, t) == "Electron" + end end diff --git a/test/base/test_symmetrystyle.jl b/test/base/test_symmetrystyle.jl index 648e642c24..3488b531bf 100644 --- a/test/base/test_symmetrystyle.jl +++ b/test/base/test_symmetrystyle.jl @@ -3,46 +3,46 @@ using ITensors.NDTensors using Test @testset "SymmetryStyle trait" begin - i = Index(2) - iqn = Index([QN(0) => 1, QN(1) => 2]) + i = Index(2) + iqn = Index([QN(0) => 1, QN(1) => 2]) - @test @inferred(ITensors.symmetrystyle(i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i,))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i', i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i'', i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i'', i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i'', i', i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(i''', i'', i', i)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle((i''', i'', i', i))) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle([i''', i'', i', i])) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle(i)) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle((i,))) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle([i])) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle(i', i)) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle((i', i))) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle([i', i])) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle(i'', i', i)) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle((i'', i', i))) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle([i'', i', i])) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle(i''', i'', i', i)) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle((i''', i'', i', i))) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle([i''', i'', i', i])) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn,))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn', iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn', iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn', iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn'', iqn', iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((iqn'', iqn', iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle([iqn'', iqn', iqn])) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(iqn', i)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle(i', i, iqn)) == ITensors.HasQNs() - @test @inferred(ITensors.symmetrystyle((i', i, iqn))) == ITensors.HasQNs() - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle([i', i, iqn])) == - ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(iqn)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle((iqn,))) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle([iqn])) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(iqn', iqn)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle((iqn', iqn))) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle([iqn', iqn])) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(iqn'', iqn', iqn)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle((iqn'', iqn', iqn))) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle([iqn'', iqn', iqn])) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(iqn', i)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(i', i, iqn)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle((i', i, iqn))) == ITensors.HasQNs() + @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle([i', i, iqn])) == + ITensors.HasQNs() - A = random_itensor(i', dag(i)) - Aqn = random_itensor(iqn', dag(iqn)) + A = random_itensor(i', dag(i)) + Aqn = random_itensor(iqn', dag(iqn)) - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(A)) == ITensors.NonQN() - @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(Aqn)) == ITensors.HasQNs() + @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(A)) == ITensors.NonQN() + @test @inferred(ITensors.SymmetryStyle, ITensors.symmetrystyle(Aqn)) == ITensors.HasQNs() - T = Tensor(A) - Tqn = Tensor(Aqn) + T = Tensor(A) + Tqn = Tensor(Aqn) - @test @inferred(ITensors.symmetrystyle(T)) == ITensors.NonQN() - @test @inferred(ITensors.symmetrystyle(Tqn)) == ITensors.HasQNs() + @test @inferred(ITensors.symmetrystyle(T)) == ITensors.NonQN() + @test @inferred(ITensors.symmetrystyle(Tqn)) == ITensors.HasQNs() end diff --git a/test/base/test_tagset.jl b/test/base/test_tagset.jl index 794a64f258..4bbf0edead 100644 --- a/test/base/test_tagset.jl +++ b/test/base/test_tagset.jl @@ -1,145 +1,145 @@ using ITensors, Test @testset "TagSet" begin - ts = TagSet("t3,t2,t1") - ts2 = copy(ts) - @test ts == ts2 - @test hastags(ts, "t1") - @test hastags(ts, "t2") - @test hastags(ts, "t3") - @test hastags(ts, "t3,t1") - @test !hastags(ts, "t4") - @test TagSet(ts) === ts - - @test ITensors.commontags() == ts"" - @test ITensors.commontags(ts"a,b", ts"a,c") == ts"a" - @test ITensors.commontags(Index(2, "a,b,x"), Index(3, "x,a,c"), Index(4, "x,a,z,w")) == - ts"a,x" - - t1 = TagSet("t1") - t2 = TagSet("t2") - t3 = TagSet("t3") - @test ts[1] == t1[1] - @test ts[2] == t2[1] - @test ts[3] == t3[1] - - @testset "Empty TagSet" begin - ts1 = TagSet() - @test length(ts1) == 0 - - ts2 = TagSet("") - @test ts2 == ts1 - @test length(ts2) == 0 - end - - @testset "Ignore Whitespace" begin - ts = TagSet(" aaa , bb bb , ccc ") - @test hastags(ts, " aaa ") - @test hastags(ts, "aaa") - @test hastags(ts, " aa a ") - @test hastags(ts, "bbbb") - end - - @testset "Remove tags" begin - ts1 = TagSet("x,y,z") - ts2 = TagSet("x,z") - @test removetags(ts1, "y") == ts2 - end - - @testset "Unicode tags" begin - ts = TagSet("α") - @test length(ts) == 1 - @test hastags(ts, "α") - @test ts[1] == ITensors.SmallString("α") - - ts = TagSet("α,β") - @test length(ts) == 2 - @test hastags(ts, "β") - @test hastags(ts, "α") - @test ts[1] == ITensors.SmallString("α") - @test ts[2] == ITensors.SmallString("β") - - ts = TagSet("αβγδϵζηθ,ijklmnop,qrstuvwx,ΑΒΓΔΕΖΗΘ") - @test length(ts) == 4 - @test hastags(ts, "αβγδϵζηθ") - @test hastags(ts, "ijklmnop") - @test hastags(ts, "qrstuvwx") - @test hastags(ts, "ΑΒΓΔΕΖΗΘ") - @test ts[1] == ITensors.SmallString("ijklmnop") - @test ts[2] == ITensors.SmallString("qrstuvwx") - @test ts[3] == ITensors.SmallString("ΑΒΓΔΕΖΗΘ") - @test ts[4] == ITensors.SmallString("αβγδϵζηθ") - end - - @testset "Tag long" begin - ts = TagSet("abcdefghijklmnop,ijklmnopqabcdefg") - @test length(ts) == 2 - @test hastags(ts, "abcdefghijklmnop") - @test hastags(ts, "ijklmnopqabcdefg") - end - - @testset "Tag too long" begin - @test !ITensors.using_strict_tags() - @test TagSet("ijklmnopqabcdefgh") == TagSet("ijklmnopqabcdefg") - @test TagSet("abcd,ijklmnopqabcdefgh") == TagSet("abcd,ijklmnopqabcdefg") - @test TagSet("ijklmnopqabcdefgh,abcd") == TagSet("abcd,ijklmnopqabcdefg") - ITensors.set_strict_tags!(true) - @test ITensors.using_strict_tags() - @test_throws ErrorException TagSet("ijklmnopqabcdefgh") - ITensors.set_strict_tags!(false) - end - - @testset "Too many tags" begin - @test !ITensors.using_strict_tags() - @test TagSet("a,b,c,d,e,f") == TagSet("a,b,c,d") - @test addtags(TagSet("a,b,c,d"), "e") == TagSet("a,b,c,d") - @test replacetags(TagSet("a,b,c,d"), "d", "e,f") == TagSet("a,b,c,e") - ITensors.set_strict_tags!(true) - @test ITensors.using_strict_tags() - @test_throws ErrorException TagSet("a,b,c,d,e,f") - @test_throws ErrorException addtags(TagSet("a,b,c,d"), "e") - @test_throws ErrorException replacetags(TagSet("a,b,c,d"), "d", "e,f") - ITensors.set_strict_tags!(false) - end - - @testset "Integer Tags" begin - ts = TagSet("123") - @test length(ts) == 1 - @test hastags(ts, "123") - end - - @testset "Show TagSet" begin - ts = TagSet("Site,n=2") - @test length(sprint(show, ts)) > 1 - end - - @testset "Iterate Tagset" begin - ts = TagSet("Site, n=2") - @test [tag for tag in ts] == [ts[1], ts[2]] - end - - @testset "addtags" begin - ts = TagSet("Blue") - @test hastags(ts, "Blue") - - ts = addtags(ts, "Red") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - - ts = addtags(ts, "Green") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - @test hastags(ts, "Green") - - ts = addtags(ts, "Yellow") - @test hastags(ts, "Blue") - @test hastags(ts, "Red") - @test hastags(ts, "Green") - @test hastags(ts, "Yellow") - - @test addtags(ts, "Orange") == ts - ITensors.set_strict_tags!(true) - @test_throws ErrorException addtags(ts, "Orange") - ITensors.set_strict_tags!(false) - end + ts = TagSet("t3,t2,t1") + ts2 = copy(ts) + @test ts == ts2 + @test hastags(ts, "t1") + @test hastags(ts, "t2") + @test hastags(ts, "t3") + @test hastags(ts, "t3,t1") + @test !hastags(ts, "t4") + @test TagSet(ts) === ts + + @test ITensors.commontags() == ts"" + @test ITensors.commontags(ts"a,b", ts"a,c") == ts"a" + @test ITensors.commontags(Index(2, "a,b,x"), Index(3, "x,a,c"), Index(4, "x,a,z,w")) == + ts"a,x" + + t1 = TagSet("t1") + t2 = TagSet("t2") + t3 = TagSet("t3") + @test ts[1] == t1[1] + @test ts[2] == t2[1] + @test ts[3] == t3[1] + + @testset "Empty TagSet" begin + ts1 = TagSet() + @test length(ts1) == 0 + + ts2 = TagSet("") + @test ts2 == ts1 + @test length(ts2) == 0 + end + + @testset "Ignore Whitespace" begin + ts = TagSet(" aaa , bb bb , ccc ") + @test hastags(ts, " aaa ") + @test hastags(ts, "aaa") + @test hastags(ts, " aa a ") + @test hastags(ts, "bbbb") + end + + @testset "Remove tags" begin + ts1 = TagSet("x,y,z") + ts2 = TagSet("x,z") + @test removetags(ts1, "y") == ts2 + end + + @testset "Unicode tags" begin + ts = TagSet("α") + @test length(ts) == 1 + @test hastags(ts, "α") + @test ts[1] == ITensors.SmallString("α") + + ts = TagSet("α,β") + @test length(ts) == 2 + @test hastags(ts, "β") + @test hastags(ts, "α") + @test ts[1] == ITensors.SmallString("α") + @test ts[2] == ITensors.SmallString("β") + + ts = TagSet("αβγδϵζηθ,ijklmnop,qrstuvwx,ΑΒΓΔΕΖΗΘ") + @test length(ts) == 4 + @test hastags(ts, "αβγδϵζηθ") + @test hastags(ts, "ijklmnop") + @test hastags(ts, "qrstuvwx") + @test hastags(ts, "ΑΒΓΔΕΖΗΘ") + @test ts[1] == ITensors.SmallString("ijklmnop") + @test ts[2] == ITensors.SmallString("qrstuvwx") + @test ts[3] == ITensors.SmallString("ΑΒΓΔΕΖΗΘ") + @test ts[4] == ITensors.SmallString("αβγδϵζηθ") + end + + @testset "Tag long" begin + ts = TagSet("abcdefghijklmnop,ijklmnopqabcdefg") + @test length(ts) == 2 + @test hastags(ts, "abcdefghijklmnop") + @test hastags(ts, "ijklmnopqabcdefg") + end + + @testset "Tag too long" begin + @test !ITensors.using_strict_tags() + @test TagSet("ijklmnopqabcdefgh") == TagSet("ijklmnopqabcdefg") + @test TagSet("abcd,ijklmnopqabcdefgh") == TagSet("abcd,ijklmnopqabcdefg") + @test TagSet("ijklmnopqabcdefgh,abcd") == TagSet("abcd,ijklmnopqabcdefg") + ITensors.set_strict_tags!(true) + @test ITensors.using_strict_tags() + @test_throws ErrorException TagSet("ijklmnopqabcdefgh") + ITensors.set_strict_tags!(false) + end + + @testset "Too many tags" begin + @test !ITensors.using_strict_tags() + @test TagSet("a,b,c,d,e,f") == TagSet("a,b,c,d") + @test addtags(TagSet("a,b,c,d"), "e") == TagSet("a,b,c,d") + @test replacetags(TagSet("a,b,c,d"), "d", "e,f") == TagSet("a,b,c,e") + ITensors.set_strict_tags!(true) + @test ITensors.using_strict_tags() + @test_throws ErrorException TagSet("a,b,c,d,e,f") + @test_throws ErrorException addtags(TagSet("a,b,c,d"), "e") + @test_throws ErrorException replacetags(TagSet("a,b,c,d"), "d", "e,f") + ITensors.set_strict_tags!(false) + end + + @testset "Integer Tags" begin + ts = TagSet("123") + @test length(ts) == 1 + @test hastags(ts, "123") + end + + @testset "Show TagSet" begin + ts = TagSet("Site,n=2") + @test length(sprint(show, ts)) > 1 + end + + @testset "Iterate Tagset" begin + ts = TagSet("Site, n=2") + @test [tag for tag in ts] == [ts[1], ts[2]] + end + + @testset "addtags" begin + ts = TagSet("Blue") + @test hastags(ts, "Blue") + + ts = addtags(ts, "Red") + @test hastags(ts, "Blue") + @test hastags(ts, "Red") + + ts = addtags(ts, "Green") + @test hastags(ts, "Blue") + @test hastags(ts, "Red") + @test hastags(ts, "Green") + + ts = addtags(ts, "Yellow") + @test hastags(ts, "Blue") + @test hastags(ts, "Red") + @test hastags(ts, "Green") + @test hastags(ts, "Yellow") + + @test addtags(ts, "Orange") == ts + ITensors.set_strict_tags!(true) + @test_throws ErrorException addtags(ts, "Orange") + ITensors.set_strict_tags!(false) + end end diff --git a/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl b/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl index 65ac69c9f3..eb0edaa622 100644 --- a/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl +++ b/test/base/utils/TestITensorsExportedNames/TestITensorsExportedNames.jl @@ -9,195 +9,195 @@ open("itensors_exported_names.jl", "w") do io end =# const ITENSORS_EXPORTED_NAMES = [ - Symbol("@disable_warn_order"), - Symbol("@reset_warn_order"), - Symbol("@set_warn_order"), - Symbol("@ts_str"), - :Apply, - :Block, - :ITensor, - :ITensors, - :Index, - :IndexSet, - :IndexVal, - :LinearAlgebra, - :NDTensors, - :Order, - :QN, - :Spectrum, - :TagSet, - :addblock!, - :addtags, - :addtags!, - :allhastags, - :anyhastags, - :apply, - :argsdict, - :array, - :axpy!, - :blockdim, - :blockoffsets, - :checkflux, - :combinedind, - :combiner, - :commonind, - :commonindex, - :commoninds, - :complex!, - :contract, - :convert_eltype, - :convert_leaf_eltype, - :dag, - :delta, - :dense, - :denseblocks, - :diag, - :diagITensor, - :diag_itensor, - :diagitensor, - :dim, - :dims, - :dir, - :directsum, - :disable_tblis!, - :disable_warn_order!, - :dot, - :eachindval, - :eachnzblock, - :eachval, - :eigen, - :eigs, - :emptyITensor, - :enable_tblis!, - :entropy, - :factorize, - :filterinds, - :findindex, - :findinds, - :firstind, - :firstintersect, - :firstsetdiff, - :flux, - :fparity, - :getfirst, - :getindex, - :hadamard_product, - :hascommoninds, - :hasid, - :hasind, - :hasinds, - :hasplev, - :hasqns, - :hassameinds, - :hastags, - :id, - :ind, - :index_id_rng, - :inds, - :inner, - :insertblock!, - :isactive, - :isfermionic, - :ishermitian, - :isindequal, - :itensor, - :linkindex, - :lq, - :mapprime, - :mapprime!, - :matmul, - :matrix, - :maxdim, - :mindim, - :modulus, - :mul!, - :nblocks, - :nnz, - :nnzblocks, - :noncommonind, - :noncommoninds, - :noprime, - :noprime!, - :norm, - :normalize, - :normalize!, - :not, - :nullspace, - :nzblock, - :nzblocks, - :onehot, - :order, - :permute, - :plev, - :polar, - :pop, - :popfirst, - :prime, - :prime!, - :product, - :push, - :pushfirst, - :ql, - :qn, - :qr, - :randn!, - :randomITensor, - :random_itensor, - :readcpp, - :removeqn, - :removeqns, - :removetags, - :removetags!, - :replaceind, - :replaceind!, - :replaceindex!, - :replaceinds, - :replaceinds!, - :replaceprime, - :replacetags, - :replacetags!, - :reset_warn_order!, - :rmul!, - :rq, - :scalar, - :scale!, - :set_warn_order!, - :setdir, - :setelt, - :setindex, - :setprime, - :setprime!, - :setspace, - :settags, - :settags!, - :sim, - :siteindex, - :space, - :splitblocks, - :storage, - :store, - :svd, - :swapind, - :swapinds, - :swapinds!, - :swapprime, - :swapprime!, - :swaptags, - :swaptags!, - :tags, - :tr, - :transpose, - :truncerror, - :unionind, - :unioninds, - :uniqueind, - :uniqueindex, - :uniqueinds, - :use_debug_checks, - :val, - :vector, - :δ, - :⊕, - :⊙, + Symbol("@disable_warn_order"), + Symbol("@reset_warn_order"), + Symbol("@set_warn_order"), + Symbol("@ts_str"), + :Apply, + :Block, + :ITensor, + :ITensors, + :Index, + :IndexSet, + :IndexVal, + :LinearAlgebra, + :NDTensors, + :Order, + :QN, + :Spectrum, + :TagSet, + :addblock!, + :addtags, + :addtags!, + :allhastags, + :anyhastags, + :apply, + :argsdict, + :array, + :axpy!, + :blockdim, + :blockoffsets, + :checkflux, + :combinedind, + :combiner, + :commonind, + :commonindex, + :commoninds, + :complex!, + :contract, + :convert_eltype, + :convert_leaf_eltype, + :dag, + :delta, + :dense, + :denseblocks, + :diag, + :diagITensor, + :diag_itensor, + :diagitensor, + :dim, + :dims, + :dir, + :directsum, + :disable_tblis!, + :disable_warn_order!, + :dot, + :eachindval, + :eachnzblock, + :eachval, + :eigen, + :eigs, + :emptyITensor, + :enable_tblis!, + :entropy, + :factorize, + :filterinds, + :findindex, + :findinds, + :firstind, + :firstintersect, + :firstsetdiff, + :flux, + :fparity, + :getfirst, + :getindex, + :hadamard_product, + :hascommoninds, + :hasid, + :hasind, + :hasinds, + :hasplev, + :hasqns, + :hassameinds, + :hastags, + :id, + :ind, + :index_id_rng, + :inds, + :inner, + :insertblock!, + :isactive, + :isfermionic, + :ishermitian, + :isindequal, + :itensor, + :linkindex, + :lq, + :mapprime, + :mapprime!, + :matmul, + :matrix, + :maxdim, + :mindim, + :modulus, + :mul!, + :nblocks, + :nnz, + :nnzblocks, + :noncommonind, + :noncommoninds, + :noprime, + :noprime!, + :norm, + :normalize, + :normalize!, + :not, + :nullspace, + :nzblock, + :nzblocks, + :onehot, + :order, + :permute, + :plev, + :polar, + :pop, + :popfirst, + :prime, + :prime!, + :product, + :push, + :pushfirst, + :ql, + :qn, + :qr, + :randn!, + :randomITensor, + :random_itensor, + :readcpp, + :removeqn, + :removeqns, + :removetags, + :removetags!, + :replaceind, + :replaceind!, + :replaceindex!, + :replaceinds, + :replaceinds!, + :replaceprime, + :replacetags, + :replacetags!, + :reset_warn_order!, + :rmul!, + :rq, + :scalar, + :scale!, + :set_warn_order!, + :setdir, + :setelt, + :setindex, + :setprime, + :setprime!, + :setspace, + :settags, + :settags!, + :sim, + :siteindex, + :space, + :splitblocks, + :storage, + :store, + :svd, + :swapind, + :swapinds, + :swapinds!, + :swapprime, + :swapprime!, + :swaptags, + :swaptags!, + :tags, + :tr, + :transpose, + :truncerror, + :unionind, + :unioninds, + :uniqueind, + :uniqueindex, + :uniqueinds, + :use_debug_checks, + :val, + :vector, + :δ, + :⊕, + :⊙, ] end diff --git a/test/base/utils/util.jl b/test/base/utils/util.jl index 6839f2201f..14874eee5c 100644 --- a/test/base/utils/util.jl +++ b/test/base/utils/util.jl @@ -2,39 +2,39 @@ using ITensors using Random # Based on https://discourse.julialang.org/t/lapackexception-1-while-svd-but-not-svdvals/23787 -function make_illconditioned_matrix(T=5000) - t = 0:(T - 1) - f = LinRange(0, 0.5 - 1 / length(t) / 2, length(t) ÷ 2) - y = sin.(t) - function check_freq(f) - zerofreq = findfirst(iszero, f) - zerofreq !== nothing && - zerofreq != 1 && - throw(ArgumentError("If zero frequency is included it must be the first frequency")) - return zerofreq - end - function get_fourier_regressor(t, f) - zerofreq = check_freq(f) - N = length(t) - Nf = length(f) - Nreg = zerofreq === nothing ? 2Nf : 2Nf - 1 - N >= Nreg || throw(ArgumentError("Too many frequency components $Nreg > $N")) - A = zeros(N, Nreg) - sinoffset = Nf - for fn in 1:Nf - if fn == zerofreq - sinoffset = Nf - 1 - end - for n in 1:N - phi = 2π * f[fn] * t[n] - A[n, fn] = cos(phi) - if fn != zerofreq - A[n, fn + sinoffset] = -sin(phi) +function make_illconditioned_matrix(T = 5000) + t = 0:(T - 1) + f = LinRange(0, 0.5 - 1 / length(t) / 2, length(t) ÷ 2) + y = sin.(t) + function check_freq(f) + zerofreq = findfirst(iszero, f) + zerofreq !== nothing && + zerofreq != 1 && + throw(ArgumentError("If zero frequency is included it must be the first frequency")) + return zerofreq + end + function get_fourier_regressor(t, f) + zerofreq = check_freq(f) + N = length(t) + Nf = length(f) + Nreg = zerofreq === nothing ? 2Nf : 2Nf - 1 + N >= Nreg || throw(ArgumentError("Too many frequency components $Nreg > $N")) + A = zeros(N, Nreg) + sinoffset = Nf + for fn in 1:Nf + if fn == zerofreq + sinoffset = Nf - 1 + end + for n in 1:N + phi = 2π * f[fn] * t[n] + A[n, fn] = cos(phi) + if fn != zerofreq + A[n, fn + sinoffset] = -sin(phi) + end + end end - end + return A, zerofreq end - return A, zerofreq - end - A, z = get_fourier_regressor(t, f) - return [A y] + A, z = get_fourier_regressor(t, f) + return [A y] end diff --git a/test/ext/ITensorsChainRulesCoreExt/runtests.jl b/test/ext/ITensorsChainRulesCoreExt/runtests.jl index c2fe5a8b6f..76d6d69d9e 100644 --- a/test/ext/ITensorsChainRulesCoreExt/runtests.jl +++ b/test/ext/ITensorsChainRulesCoreExt/runtests.jl @@ -6,11 +6,11 @@ ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end end diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl index d21460f932..be468e40c1 100644 --- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl +++ b/test/ext/ITensorsChainRulesCoreExt/test_chainrules_ops.jl @@ -9,275 +9,275 @@ include("utils/chainrulestestutils.jl") using Zygote: ZygoteRuleConfig, gradient @testset "ChainRules rrules: Ops" begin - s = siteinds("S=1/2", 4) - - x = 2.4 - V = random_itensor(s[1], s[2]) - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = exp(ITensor(Op("Ry", 1; θ=x), s)) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - function sometimes_broken_test() + s = siteinds("S=1/2", 4) + + x = 2.4 + V = random_itensor(s[1], s[2]) + f = function (x) - y = Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x) - return y[1].params.θ + y = ITensor(Op("Ry", 1; θ = x), s) + return y[1, 1] end args = (x,) test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, ) - return nothing - end - - @static if VERSION > v"1.8" - @test_skip sometimes_broken_test() - else - sometimes_broken_test() - end - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = exp(ITensor(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x), s)) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(exp(Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x)), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(2 * Op("Ry", 1; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(2 * (Op("Ry", 1; θ=x) + Op("Ry", 1; θ=x)), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - f = function (x) - y = ITensor(Op("Ry", 1; θ=x) * Op("Ry", 2; θ=x), s) - return y[1, 1] - end - args = (x,) - test_rrule( - ZygoteRuleConfig(), - f, - args...; - rrule_f=rrule_via_ad, - check_inferred=false, - rtol=1.0e-7, - atol=1.0e-7, - ) - - if VERSION ≥ v"1.8" + f = function (x) - y = ITensor(exp(-x * Op("X", 1) * Op("X", 2)), s) - return norm(y) + y = exp(ITensor(Op("Ry", 1; θ = x), s)) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) + + function sometimes_broken_test() + f = function (x) + y = Op("Ry", 1; θ = x) + Op("Ry", 1; θ = x) + return y[1].params.θ + end + args = (x,) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) + return nothing + end + + @static if VERSION > v"1.8" + @test_skip sometimes_broken_test() + else + sometimes_broken_test() + end f = function (x) - y = exp(-x * Op("X", 1) * Op("X", 2)) - y *= exp(-x * Op("X", 1) * Op("X", 2)) - U = ITensor(y, s) - return norm(U) + y = ITensor(Op("Ry", 1; θ = x) + Op("Ry", 1; θ = x), s) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - U1(θ) = Op("Ry", 1; θ) - U2(θ) = Op("Ry", 2; θ) - - f = function (x) - return ITensor(U1(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(U1(x) * U2(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(1.2 * U1(x), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(1.2 * U1(x)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - f = function (x) - return ITensor(exp(x * U1(1.2)), s)[1, 1] - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - function H(x1, x2) - os = Ops.OpSum() - os += x1 * Op("X", 1) - os += x2 * Op("X", 2) - return os - end - - # These are broken in versions of Zygote after 0.6.43, - # See: https://github.com/FluxML/Zygote.jl/issues/1304 - @test_skip begin + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) + f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{1}(1)), s)[1, 1] + y = exp(ITensor(Op("Ry", 1; θ = x) + Op("Ry", 1; θ = x), s)) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{2}(1)), s)[1, 1] + y = ITensor(exp(Op("Ry", 1; θ = x) + Op("Ry", 1; θ = x)), s) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) f = function (x) - return ITensor(exp(1.5 * H(x, x); alg=Trotter{2}(2)), s)[1, 1] + y = ITensor(2 * Op("Ry", 1; θ = x), s) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) f = function (x) - return ITensor(exp(x * H(x, x); alg=Trotter{2}(2)), s)[1, 1] + y = ITensor(2 * (Op("Ry", 1; θ = x) + Op("Ry", 1; θ = x)), s) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end - - f = function (x) - y = -x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)) - U = ITensor(y, s) - return norm(U * V) - end - args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - - if VERSION ≥ v"1.8" + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) + f = function (x) - y = exp(-x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)); alg=Trotter{1}(1)) - U = ITensor(y, s) - return norm(U * V) + y = ITensor(Op("Ry", 1; θ = x) * Op("Ry", 2; θ = x), s) + return y[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule( + ZygoteRuleConfig(), + f, + args...; + rrule_f = rrule_via_ad, + check_inferred = false, + rtol = 1.0e-7, + atol = 1.0e-7, + ) + + if VERSION ≥ v"1.8" + f = function (x) + y = ITensor(exp(-x * Op("X", 1) * Op("X", 2)), s) + return norm(y) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + y = exp(-x * Op("X", 1) * Op("X", 2)) + y *= exp(-x * Op("X", 1) * Op("X", 2)) + U = ITensor(y, s) + return norm(U) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + end + + U1(θ) = Op("Ry", 1; θ) + U2(θ) = Op("Ry", 2; θ) + + f = function (x) + return ITensor(U1(x), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) - ## XXX: Fix f = function (x) - y = exp(-x * Op("X", 1) * Op("X", 2)) - y *= exp(-x * Op("X", 1) * Op("X", 2)) - U = Prod{ITensor}(y, s) - return norm(U(V)) + return ITensor(U1(x) * U2(x), s)[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) - ## XXX: Fix f = function (x) - y = exp(-x * (Op("X", 1) + Op("Z", 1) + Op("Z", 1)); alg=Trotter{1}(1)) - U = Prod{ITensor}(y, s) - return norm(U(V)) + return ITensor(1.2 * U1(x), s)[1, 1] end args = (x,) - test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false) - end + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + return ITensor(exp(1.2 * U1(x)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + return ITensor(exp(x * U1(1.2)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + function H(x1, x2) + os = Ops.OpSum() + os += x1 * Op("X", 1) + os += x2 * Op("X", 2) + return os + end + + # These are broken in versions of Zygote after 0.6.43, + # See: https://github.com/FluxML/Zygote.jl/issues/1304 + @test_skip begin + f = function (x) + return ITensor(exp(1.5 * H(x, x); alg = Trotter{1}(1)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + return ITensor(exp(1.5 * H(x, x); alg = Trotter{2}(1)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + return ITensor(exp(1.5 * H(x, x); alg = Trotter{2}(2)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + f = function (x) + return ITensor(exp(x * H(x, x); alg = Trotter{2}(2)), s)[1, 1] + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + end + + f = function (x) + y = -x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)) + U = ITensor(y, s) + return norm(U * V) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + if VERSION ≥ v"1.8" + f = function (x) + y = exp(-x * (Op("X", 1) * Op("X", 2) + Op("Z", 1) * Op("Z", 2)); alg = Trotter{1}(1)) + U = ITensor(y, s) + return norm(U * V) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + ## XXX: Fix + f = function (x) + y = exp(-x * Op("X", 1) * Op("X", 2)) + y *= exp(-x * Op("X", 1) * Op("X", 2)) + U = Prod{ITensor}(y, s) + return norm(U(V)) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + + ## XXX: Fix + f = function (x) + y = exp(-x * (Op("X", 1) + Op("Z", 1) + Op("Z", 1)); alg = Trotter{1}(1)) + U = Prod{ITensor}(y, s) + return norm(U(V)) + end + args = (x,) + test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false) + end end diff --git a/test/ext/ITensorsTensorOperationsExt/runtests.jl b/test/ext/ITensorsTensorOperationsExt/runtests.jl index 63d1753ff2..1fedb70457 100644 --- a/test/ext/ITensorsTensorOperationsExt/runtests.jl +++ b/test/ext/ITensorsTensorOperationsExt/runtests.jl @@ -10,154 +10,154 @@ using ITensors: dim, optimal_contraction_sequence using TensorOperations: TensorOperations @testset "ITensor contraction sequence optimization" begin - d = 100 - i = Index(d, "i") - A = random_itensor(i', dag(i)) + d = 100 + i = Index(d, "i") + A = random_itensor(i', dag(i)) - @test !ITensors.using_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() - A2 = A' * A - @test hassameinds(A2, (i'', i)) + A2 = A' * A + @test hassameinds(A2, (i'', i)) - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() + ITensors.enable_contraction_sequence_optimization() + @test ITensors.using_contraction_sequence_optimization() - @test A' * A ≈ A2 + @test A' * A ≈ A2 - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() + ITensors.disable_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() - A3 = A'' * A' * A - @test hassameinds(A3, (i''', i)) - @test contract([A'', A', A]) ≈ A3 - @test contract([A'', A', A]; sequence="automatic") ≈ A3 - @test contract([A'', A', A]; sequence="left_associative") ≈ A3 - @test contract([A'', A', A]; sequence="right_associative") ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 2], 3]) ≈ A3 - @test contract([A'', A', A]; sequence=[[2, 3], 1]) ≈ A3 - # A bad sequence - @test contract([A'', A', A]; sequence=[[1, 3], 2]) ≈ A3 + A3 = A'' * A' * A + @test hassameinds(A3, (i''', i)) + @test contract([A'', A', A]) ≈ A3 + @test contract([A'', A', A]; sequence = "automatic") ≈ A3 + @test contract([A'', A', A]; sequence = "left_associative") ≈ A3 + @test contract([A'', A', A]; sequence = "right_associative") ≈ A3 + @test contract([A'', A', A]; sequence = [[1, 2], 3]) ≈ A3 + @test contract([A'', A', A]; sequence = [[2, 3], 1]) ≈ A3 + # A bad sequence + @test contract([A'', A', A]; sequence = [[1, 3], 2]) ≈ A3 - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() + ITensors.enable_contraction_sequence_optimization() + @test ITensors.using_contraction_sequence_optimization() - @test A'' * A' * A ≈ A3 - @test A * A'' * A' ≈ A3 - @test contract([A'', A', A]) ≈ A3 - @test contract([A, A'', A']) ≈ A3 - @test contract([A'', A', A]; sequence="automatic") ≈ A3 - @test contract([A'', A', A]; sequence="left_associative") ≈ A3 - @test contract([A'', A', A]; sequence="right_associative") ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 2], 3]) ≈ A3 - @test contract([A'', A', A]; sequence=[[2, 3], 1]) ≈ A3 - @test contract([A'', A', A]; sequence=[[1, 3], 2]) ≈ A3 + @test A'' * A' * A ≈ A3 + @test A * A'' * A' ≈ A3 + @test contract([A'', A', A]) ≈ A3 + @test contract([A, A'', A']) ≈ A3 + @test contract([A'', A', A]; sequence = "automatic") ≈ A3 + @test contract([A'', A', A]; sequence = "left_associative") ≈ A3 + @test contract([A'', A', A]; sequence = "right_associative") ≈ A3 + @test contract([A'', A', A]; sequence = [[1, 2], 3]) ≈ A3 + @test contract([A'', A', A]; sequence = [[2, 3], 1]) ≈ A3 + @test contract([A'', A', A]; sequence = [[1, 3], 2]) ≈ A3 - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() + ITensors.disable_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() - # This is not the only sequence - @test ITensors.optimal_contraction_sequence([A, A'', A']) == Any[1, Any[3, 2]] + # This is not the only sequence + @test ITensors.optimal_contraction_sequence([A, A'', A']) == Any[1, Any[3, 2]] - time_without_opt = @elapsed A * A'' * A' + time_without_opt = @elapsed A * A'' * A' - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() + ITensors.enable_contraction_sequence_optimization() + @test ITensors.using_contraction_sequence_optimization() - time_with_opt = @elapsed A * A'' * A' + time_with_opt = @elapsed A * A'' * A' - @test time_with_opt < time_without_opt + @test time_with_opt < time_without_opt - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() + ITensors.disable_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() - A4 = A''' * A'' * A' * A - @test hassameinds(A4, (i'''', i)) - @test contract([A''', A'', A', A]; sequence=[[[1, 2], 3], 4]) ≈ A4 + A4 = A''' * A'' * A' * A + @test hassameinds(A4, (i'''', i)) + @test contract([A''', A'', A', A]; sequence = [[[1, 2], 3], 4]) ≈ A4 - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() + ITensors.enable_contraction_sequence_optimization() + @test ITensors.using_contraction_sequence_optimization() - @test A'' * A * A''' * A' ≈ A4 + @test A'' * A * A''' * A' ≈ A4 - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() + ITensors.disable_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() - seq = ITensors.optimal_contraction_sequence([A, A'', A', A''']) - @test length(seq) == 2 - @test issetequal(Any[3, 1], first(seq)) || issetequal(Any[2, 4], first(seq)) + seq = ITensors.optimal_contraction_sequence([A, A'', A', A''']) + @test length(seq) == 2 + @test issetequal(Any[3, 1], first(seq)) || issetequal(Any[2, 4], first(seq)) - time_without_opt = @elapsed A * A'' * A' * A''' + time_without_opt = @elapsed A * A'' * A' * A''' - ITensors.enable_contraction_sequence_optimization() - @test ITensors.using_contraction_sequence_optimization() + ITensors.enable_contraction_sequence_optimization() + @test ITensors.using_contraction_sequence_optimization() - time_with_opt = @elapsed A * A'' * A' * A''' + time_with_opt = @elapsed A * A'' * A' * A''' - @test time_with_opt < time_without_opt + @test time_with_opt < time_without_opt - ITensors.disable_contraction_sequence_optimization() - @test !ITensors.using_contraction_sequence_optimization() + ITensors.disable_contraction_sequence_optimization() + @test !ITensors.using_contraction_sequence_optimization() end @testset "contract sequence optimization interfaces" begin - # Network and dimensions need to be large enough - # so that tensor allocations dominate over network - # analysis for testing the number of allocations below. - d0 = 2 - δd = 5000 - ntensors = 6 - ElType = Float64 - d = [d0 + (n - 1) * δd for n in 1:ntensors] - t = ["$n" for n in 1:ntensors] - is = Index.(d, t) - - As = [random_itensor(ElType, is[n], is[mod1(n + 1, ntensors)]) for n in 1:ntensors] - - # Warmup - contract(As) - allocations_left_associative = @allocated contract(As) - - allocations_left_associative_pairwise = 0 - tmp = As[1] - for n in 2:length(As) - tmp * As[n] - allocations_left_associative_pairwise += @allocated tmp = tmp * As[n] - end - @test allocations_left_associative ≈ allocations_left_associative_pairwise rtol = 0.01 - - sequence = foldr((x, y) -> [x, y], 1:ntensors) - b1 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [5, 6]]]]] - b2 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [6, 5]]]]] - @test b1 || b2 - - As_network = foldr((x, y) -> [x, y], As) - - # Warmup - contract(As; sequence=sequence) - contract(As; sequence="right_associative") - contract(As; sequence="automatic") - contract(As_network) - - # Measure allocations of different interfaces - allocations_right_associative_1 = @allocated contract(As; sequence=sequence) - allocations_right_associative_2 = @allocated contract(As; sequence="right_associative") - allocations_right_associative_3 = @allocated contract(As; sequence="automatic") - allocations_right_associative_4 = @allocated contract(As_network) - - allocations_right_associative_pairwise = 0 - tmp = As[end] - for n in reverse(1:(length(As) - 1)) - tmp * As[n] - allocations_right_associative_pairwise += @allocated tmp = tmp * As[n] - end - @test allocations_right_associative_pairwise ≈ allocations_right_associative_1 rtol = 0.2 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_2 rtol = 0.2 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_3 rtol = 0.2 - @test allocations_right_associative_pairwise ≈ allocations_right_associative_4 rtol = 0.2 - - @test allocations_right_associative_1 < allocations_left_associative - @test allocations_right_associative_2 < allocations_left_associative - @test allocations_right_associative_3 < allocations_left_associative - @test allocations_right_associative_4 < allocations_left_associative + # Network and dimensions need to be large enough + # so that tensor allocations dominate over network + # analysis for testing the number of allocations below. + d0 = 2 + δd = 5000 + ntensors = 6 + ElType = Float64 + d = [d0 + (n - 1) * δd for n in 1:ntensors] + t = ["$n" for n in 1:ntensors] + is = Index.(d, t) + + As = [random_itensor(ElType, is[n], is[mod1(n + 1, ntensors)]) for n in 1:ntensors] + + # Warmup + contract(As) + allocations_left_associative = @allocated contract(As) + + allocations_left_associative_pairwise = 0 + tmp = As[1] + for n in 2:length(As) + tmp * As[n] + allocations_left_associative_pairwise += @allocated tmp = tmp * As[n] + end + @test allocations_left_associative ≈ allocations_left_associative_pairwise rtol = 0.01 + + sequence = foldr((x, y) -> [x, y], 1:ntensors) + b1 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [5, 6]]]]] + b2 = optimal_contraction_sequence(As) == Any[1, Any[2, Any[3, Any[4, [6, 5]]]]] + @test b1 || b2 + + As_network = foldr((x, y) -> [x, y], As) + + # Warmup + contract(As; sequence = sequence) + contract(As; sequence = "right_associative") + contract(As; sequence = "automatic") + contract(As_network) + + # Measure allocations of different interfaces + allocations_right_associative_1 = @allocated contract(As; sequence = sequence) + allocations_right_associative_2 = @allocated contract(As; sequence = "right_associative") + allocations_right_associative_3 = @allocated contract(As; sequence = "automatic") + allocations_right_associative_4 = @allocated contract(As_network) + + allocations_right_associative_pairwise = 0 + tmp = As[end] + for n in reverse(1:(length(As) - 1)) + tmp * As[n] + allocations_right_associative_pairwise += @allocated tmp = tmp * As[n] + end + @test allocations_right_associative_pairwise ≈ allocations_right_associative_1 rtol = 0.2 + @test allocations_right_associative_pairwise ≈ allocations_right_associative_2 rtol = 0.2 + @test allocations_right_associative_pairwise ≈ allocations_right_associative_3 rtol = 0.2 + @test allocations_right_associative_pairwise ≈ allocations_right_associative_4 rtol = 0.2 + + @test allocations_right_associative_1 < allocations_left_associative + @test allocations_right_associative_2 < allocations_left_associative + @test allocations_right_associative_3 < allocations_left_associative + @test allocations_right_associative_4 < allocations_left_associative end end diff --git a/test/ext/ITensorsVectorInterfaceExt/runtests.jl b/test/ext/ITensorsVectorInterfaceExt/runtests.jl index 6578a86aa2..65a80072a2 100644 --- a/test/ext/ITensorsVectorInterfaceExt/runtests.jl +++ b/test/ext/ITensorsVectorInterfaceExt/runtests.jl @@ -2,133 +2,133 @@ using ITensors: ITensor, Index, dag, inds, random_itensor using Test: @test, @testset using VectorInterface: - add, - add!, - add!!, - inner, - scalartype, - scale, - scale!, - scale!!, - zerovector, - zerovector!, - zerovector!! + add, + add!, + add!!, + inner, + scalartype, + scale, + scale!, + scale!!, + zerovector, + zerovector!, + zerovector!! const elts = (Float32, Float64, Complex{Float32}, Complex{Float64}) @testset "ITensorsVectorInterfaceExt (eltype=$elt)" for elt in elts - i, j, k = Index.((2, 2, 2)) - a = random_itensor(elt, i, j, k) - b = random_itensor(elt, k, i, j) - α = randn(elt) - β = randn(elt) - αᶜ = randn(complex(elt)) - βᶜ = randn(complex(elt)) - - # add - @test add(a, b) ≈ a + b - @test add(a, b, α) ≈ a + α * b - @test add(a, b, α, β) ≈ β * a + α * b - - @test add(a, b, αᶜ) ≈ a + αᶜ * b - @test add(a, b, αᶜ, βᶜ) ≈ βᶜ * a + αᶜ * b - - # add! - a′ = copy(a) - add!(a′, b) - @test a′ ≈ a + b - a′ = copy(a) - add!(a′, b, α) - @test a′ ≈ a + α * b - a′ = copy(a) - add!(a′, b, α, β) - @test a′ ≈ β * a + α * b - - # add!! - a′ = copy(a) - add!!(a′, b) - @test a′ ≈ a + b - a′ = copy(a) - add!!(a′, b, α) - @test a′ ≈ a + α * b - a′ = copy(a) - add!!(a′, b, α, β) - @test a′ ≈ β * a + α * b - - a′ = copy(a) - a′ = add!!(a′, b, αᶜ) - @test a′ ≈ a + αᶜ * b - a′ = copy(a) - a′ = add!!(a′, b, αᶜ, βᶜ) - @test a′ ≈ βᶜ * a + αᶜ * b - - # inner - @test inner(a, b) ≈ (dag(a) * b)[] - @test inner(a, a) ≈ (dag(a) * a)[] - - # scalartype - @test scalartype(a) === elt - @test scalartype(b) === elt - @test scalartype([a, b]) === elt - @test scalartype([a, random_itensor(Float32, i, j)]) === elt - @test scalartype(ITensor[]) === Bool - - # scale - @test scale(a, α) ≈ α * a - - @test scale(a, αᶜ) ≈ αᶜ * a - - # scale! - a′ = copy(a) - scale!(a′, α) - @test a′ ≈ α * a - a′ = copy(a) - scale!(a′, b, α) - @test a′ ≈ α * b - - # scale!! - a′ = copy(a) - scale!!(a′, α) - @test a′ ≈ α * a - a′ = copy(a) - scale!!(a′, b, α) - @test a′ ≈ α * b - - a′ = copy(a) - a′ = scale!!(a′, αᶜ) - @test a′ ≈ αᶜ * a - a′ = copy(a) - a′ = scale!!(a′, b, αᶜ) - @test a′ ≈ αᶜ * b - - # zerovector - z = zerovector(a) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - z = zerovector(a, complex(elt)) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === complex(eltype(a)) - - # zerovector! - z = copy(a) - zerovector!(z) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - # zerovector!! - z = copy(a) - zerovector!!(z, elt) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === eltype(a) - - z = copy(a) - z = zerovector!!(z, complex(elt)) - @test iszero(z) - @test issetequal(inds(a), inds(z)) - @test eltype(z) === complex(eltype(a)) + i, j, k = Index.((2, 2, 2)) + a = random_itensor(elt, i, j, k) + b = random_itensor(elt, k, i, j) + α = randn(elt) + β = randn(elt) + αᶜ = randn(complex(elt)) + βᶜ = randn(complex(elt)) + + # add + @test add(a, b) ≈ a + b + @test add(a, b, α) ≈ a + α * b + @test add(a, b, α, β) ≈ β * a + α * b + + @test add(a, b, αᶜ) ≈ a + αᶜ * b + @test add(a, b, αᶜ, βᶜ) ≈ βᶜ * a + αᶜ * b + + # add! + a′ = copy(a) + add!(a′, b) + @test a′ ≈ a + b + a′ = copy(a) + add!(a′, b, α) + @test a′ ≈ a + α * b + a′ = copy(a) + add!(a′, b, α, β) + @test a′ ≈ β * a + α * b + + # add!! + a′ = copy(a) + add!!(a′, b) + @test a′ ≈ a + b + a′ = copy(a) + add!!(a′, b, α) + @test a′ ≈ a + α * b + a′ = copy(a) + add!!(a′, b, α, β) + @test a′ ≈ β * a + α * b + + a′ = copy(a) + a′ = add!!(a′, b, αᶜ) + @test a′ ≈ a + αᶜ * b + a′ = copy(a) + a′ = add!!(a′, b, αᶜ, βᶜ) + @test a′ ≈ βᶜ * a + αᶜ * b + + # inner + @test inner(a, b) ≈ (dag(a) * b)[] + @test inner(a, a) ≈ (dag(a) * a)[] + + # scalartype + @test scalartype(a) === elt + @test scalartype(b) === elt + @test scalartype([a, b]) === elt + @test scalartype([a, random_itensor(Float32, i, j)]) === elt + @test scalartype(ITensor[]) === Bool + + # scale + @test scale(a, α) ≈ α * a + + @test scale(a, αᶜ) ≈ αᶜ * a + + # scale! + a′ = copy(a) + scale!(a′, α) + @test a′ ≈ α * a + a′ = copy(a) + scale!(a′, b, α) + @test a′ ≈ α * b + + # scale!! + a′ = copy(a) + scale!!(a′, α) + @test a′ ≈ α * a + a′ = copy(a) + scale!!(a′, b, α) + @test a′ ≈ α * b + + a′ = copy(a) + a′ = scale!!(a′, αᶜ) + @test a′ ≈ αᶜ * a + a′ = copy(a) + a′ = scale!!(a′, b, αᶜ) + @test a′ ≈ αᶜ * b + + # zerovector + z = zerovector(a) + @test iszero(z) + @test issetequal(inds(a), inds(z)) + @test eltype(z) === eltype(a) + + z = zerovector(a, complex(elt)) + @test iszero(z) + @test issetequal(inds(a), inds(z)) + @test eltype(z) === complex(eltype(a)) + + # zerovector! + z = copy(a) + zerovector!(z) + @test iszero(z) + @test issetequal(inds(a), inds(z)) + @test eltype(z) === eltype(a) + + # zerovector!! + z = copy(a) + zerovector!!(z, elt) + @test iszero(z) + @test issetequal(inds(a), inds(z)) + @test eltype(z) === eltype(a) + + z = copy(a) + z = zerovector!!(z, complex(elt)) + @test iszero(z) + @test issetequal(inds(a), inds(z)) + @test eltype(z) === complex(eltype(a)) end end diff --git a/test/ext/NDTensorsMappedArraysExt/runtests.jl b/test/ext/NDTensorsMappedArraysExt/runtests.jl index 6c38ed96fa..24913b2e5f 100644 --- a/test/ext/NDTensorsMappedArraysExt/runtests.jl +++ b/test/ext/NDTensorsMappedArraysExt/runtests.jl @@ -6,19 +6,19 @@ using Test: @test, @testset f(i::Int...) = float(sum(iⱼ -> iⱼ^2, i)) f(i::CartesianIndex) = f(Tuple(i)...) @testset "NDTensorsMappedArraysExt" begin - a = mappedarray(f, CartesianIndices((2, 2))) - b = copy(a) - i, j = Index.((2, 2)) - ta = itensor(a, i, j) - tb = itensor(b, i, j) - @test ta ≈ tb - @test ta[i => 1, j => 2] ≈ tb[i => 1, j => 2] - @test 2 * ta ≈ 2 * tb - @test ta + ta ≈ tb + tb - @test ta * ta ≈ tb * tb - ua, sa, va = svd(ta, i) - @test ua * sa * va ≈ ta - qa, ra = qr(ta, i) - @test qa * ra ≈ ta + a = mappedarray(f, CartesianIndices((2, 2))) + b = copy(a) + i, j = Index.((2, 2)) + ta = itensor(a, i, j) + tb = itensor(b, i, j) + @test ta ≈ tb + @test ta[i => 1, j => 2] ≈ tb[i => 1, j => 2] + @test 2 * ta ≈ 2 * tb + @test ta + ta ≈ tb + tb + @test ta * ta ≈ tb * tb + ua, sa, va = svd(ta, i) + @test ua * sa * va ≈ ta + qa, ra = qr(ta, i) + @test qa * ra ≈ ta end end diff --git a/test/lib/LazyApply/outdated/test_lazyapply.jl b/test/lib/LazyApply/outdated/test_lazyapply.jl index cbdadd25bc..802d76351e 100644 --- a/test/lib/LazyApply/outdated/test_lazyapply.jl +++ b/test/lib/LazyApply/outdated/test_lazyapply.jl @@ -2,43 +2,43 @@ using Test using ITensors.LazyApply: LazyApply, Add, Mul, ∑, ∏, α, materialize @testset "LazyApply general functionality" begin - @test materialize(∏([1, 2, Add(3, 4)])) == prod([1, 2, 3 + 4]) - @test ∏([1, 2, Add(3, 4)]) isa ∏ - @test materialize(3 * ∏([1, 2, Add(3, 4)])) == 3 * prod([1, 2, 3 + 4]) - @test materialize(exp(∏([1, 2, ∑([3, 4])]))) == exp(prod([1, 2, sum([3 + 4])])) - @test materialize(2 * ∑([1, 2, ∏([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) - @test 2 * ∑([1, 2, ∏([3, 4])]) == ∑([2, 4, 2∏([3, 4])]) - @test 2 * ∑([1, 2, ∏([3, 4])]) isa ∑ - @test 2∑(["X", "Y"]) == ∑([Mul(2, "X"), Mul(2, "Y")]) - @test materialize(∑() + 3 + 4) == sum([3, 4]) - @test ∑() + 3 + 4 isa ∑ - @test materialize(∑([1, 2, 3]) + ∑([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) - @test ∑([1, 2, 3]) + ∑([4, 5, 6]) isa ∑ - @test materialize(Add(1, 2) + Add(3, 4)) == 1 + 2 + 3 + 4 - @test Add(1, 2) + Add(3, 4) == Add(1, 2, 3, 4) - @test Add(1, 2) + Add(3, 4) isa Add - @test materialize(2 * Add(1, 2)) == 2 * (1 + 2) - @test 2 * Add(1, 2) isa Add - @test materialize(3 + Add(1, 2)) == 3 + 1 + 2 - @test 3 + Add(1, 2) isa Add - @test materialize(2 * ∏([1, 2])) == 2 * prod([1, 2]) - @test 2 * ∏([1, 2]) isa α - @test 2 * ∏([1, 2]) isa α{<:∏} - @test 2 * ∏([1, 2]) isa α{∏{Int}} - @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) isa ∑ - @test materialize(∑(∏([1, 2]) + ∏([3, 4]))) == sum([prod([1, 2]), prod([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) - @test ∏([1, 2]) + ∏([3, 4]) isa ∑ - @test ∏([1, 2]) - ∏([3, 4]) isa ∑ - @test materialize(∏(["X", "Y", "Z"])) == "XYZ" - @test ∏(["X", "Y", "Z"]) isa ∏ - @test materialize(∏() * "X" * "Y" * "Z") == "XYZ" - @test ∏() * "X" * "Y" * "Z" == ∏(["X", "Y", "Z"]) - @test ∏() * "X" * "Y" * "Z" isa ∏ - @test 2∏() * "X" * "Y" == 2∏(["X", "Y"]) - @test 2∏() * "X" * "Y" isa α{<:∏} - @test 2∏() * "X" * "Y" isa α{∏{String}} - @test 2∏() * "X" * "Y" isa α{∏{String},Int} - @test 2∏(["X"]) * 3∏(["Y"]) == 6∏(["X", "Y"]) + @test materialize(∏([1, 2, Add(3, 4)])) == prod([1, 2, 3 + 4]) + @test ∏([1, 2, Add(3, 4)]) isa ∏ + @test materialize(3 * ∏([1, 2, Add(3, 4)])) == 3 * prod([1, 2, 3 + 4]) + @test materialize(exp(∏([1, 2, ∑([3, 4])]))) == exp(prod([1, 2, sum([3 + 4])])) + @test materialize(2 * ∑([1, 2, ∏([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) + @test 2 * ∑([1, 2, ∏([3, 4])]) == ∑([2, 4, 2∏([3, 4])]) + @test 2 * ∑([1, 2, ∏([3, 4])]) isa ∑ + @test 2∑(["X", "Y"]) == ∑([Mul(2, "X"), Mul(2, "Y")]) + @test materialize(∑() + 3 + 4) == sum([3, 4]) + @test ∑() + 3 + 4 isa ∑ + @test materialize(∑([1, 2, 3]) + ∑([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) + @test ∑([1, 2, 3]) + ∑([4, 5, 6]) isa ∑ + @test materialize(Add(1, 2) + Add(3, 4)) == 1 + 2 + 3 + 4 + @test Add(1, 2) + Add(3, 4) == Add(1, 2, 3, 4) + @test Add(1, 2) + Add(3, 4) isa Add + @test materialize(2 * Add(1, 2)) == 2 * (1 + 2) + @test 2 * Add(1, 2) isa Add + @test materialize(3 + Add(1, 2)) == 3 + 1 + 2 + @test 3 + Add(1, 2) isa Add + @test materialize(2 * ∏([1, 2])) == 2 * prod([1, 2]) + @test 2 * ∏([1, 2]) isa α + @test 2 * ∏([1, 2]) isa α{<:∏} + @test 2 * ∏([1, 2]) isa α{∏{Int}} + @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) + @test ∏([1, 2]) + ∏([3, 4]) isa ∑ + @test materialize(∑(∏([1, 2]) + ∏([3, 4]))) == sum([prod([1, 2]), prod([3, 4])]) + @test ∏([1, 2]) + ∏([3, 4]) == ∑([∏([1, 2]), ∏([3, 4])]) + @test ∏([1, 2]) + ∏([3, 4]) isa ∑ + @test ∏([1, 2]) - ∏([3, 4]) isa ∑ + @test materialize(∏(["X", "Y", "Z"])) == "XYZ" + @test ∏(["X", "Y", "Z"]) isa ∏ + @test materialize(∏() * "X" * "Y" * "Z") == "XYZ" + @test ∏() * "X" * "Y" * "Z" == ∏(["X", "Y", "Z"]) + @test ∏() * "X" * "Y" * "Z" isa ∏ + @test 2∏() * "X" * "Y" == 2∏(["X", "Y"]) + @test 2∏() * "X" * "Y" isa α{<:∏} + @test 2∏() * "X" * "Y" isa α{∏{String}} + @test 2∏() * "X" * "Y" isa α{∏{String}, Int} + @test 2∏(["X"]) * 3∏(["Y"]) == 6∏(["X", "Y"]) end diff --git a/test/lib/LazyApply/runtests.jl b/test/lib/LazyApply/runtests.jl index c2fe5a8b6f..76d6d69d9e 100644 --- a/test/lib/LazyApply/runtests.jl +++ b/test/lib/LazyApply/runtests.jl @@ -6,11 +6,11 @@ ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end end diff --git a/test/lib/LazyApply/test_lazyapply.jl b/test/lib/LazyApply/test_lazyapply.jl index 647e96276e..cbed895abb 100644 --- a/test/lib/LazyApply/test_lazyapply.jl +++ b/test/lib/LazyApply/test_lazyapply.jl @@ -2,35 +2,35 @@ using Test using ITensors.LazyApply: LazyApply, Sum, Prod, Scaled, materialize @testset "LazyApply general functionality" begin - @test (materialize ∘ materialize ∘ materialize)(exp(Prod([1, 2, Sum([3, 4])]))) == - exp(prod([1, 2, sum([3 + 4])])) - @test_broken materialize(2 * Sum([1, 2, Prod([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) - @test 2 * Sum([1, 2, Prod([3, 4])]) == Sum([2, 4, 2Prod([3, 4])]) - @test 2 * Sum([1, 2, Prod([3, 4])]) isa Sum - @test_broken materialize(Sum() + 3 + 4) == sum([3, 4]) - @test_broken Sum() + 3 + 4 isa Sum - @test materialize(Sum([1, 2, 3]) + Sum([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) - @test Sum([1, 2, 3]) + Sum([4, 5, 6]) isa Sum - @test materialize(2 * Prod([1, 2])) == 2 * prod([1, 2]) - @test_broken 2 * Prod([1, 2]) isa Scaled - @test_broken 2 * Prod([1, 2]) isa Scaled{<:Prod} - @test_broken 2 * Prod([1, 2]) isa Scaled{Prod{Int}} - @test 2 * Prod([1, 2]) isa Prod{Int} - @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) isa Sum - @test_broken materialize(Sum(Prod([1, 2]) + Prod([3, 4]))) == - sum([prod([1, 2]), prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) - @test Prod([1, 2]) + Prod([3, 4]) isa Sum - @test_broken Prod([1, 2]) - Prod([3, 4]) isa Sum - @test materialize(Prod(["X", "Y", "Z"])) == "XYZ" - @test Prod(["X", "Y", "Z"]) isa Prod - @test_broken materialize(Prod() * "X" * "Y" * "Z") == "XYZ" - @test_broken Prod() * "X" * "Y" * "Z" == Prod(["X", "Y", "Z"]) - @test_broken Prod() * "X" * "Y" * "Z" isa Prod - @test_broken 2Prod() * "X" * "Y" == 2Prod(["X", "Y"]) - @test_broken 2Prod() * "X" * "Y" isa Scaled{<:Prod} - @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String}} - @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String},Int} - @test_broken 2Prod(["X"]) * 3Prod(["Y"]) == 6Prod(["X", "Y"]) + @test (materialize ∘ materialize ∘ materialize)(exp(Prod([1, 2, Sum([3, 4])]))) == + exp(prod([1, 2, sum([3 + 4])])) + @test_broken materialize(2 * Sum([1, 2, Prod([3, 4])])) == 2 * sum([1, 2, prod([3, 4])]) + @test 2 * Sum([1, 2, Prod([3, 4])]) == Sum([2, 4, 2Prod([3, 4])]) + @test 2 * Sum([1, 2, Prod([3, 4])]) isa Sum + @test_broken materialize(Sum() + 3 + 4) == sum([3, 4]) + @test_broken Sum() + 3 + 4 isa Sum + @test materialize(Sum([1, 2, 3]) + Sum([4, 5, 6])) == sum([1, 2, 3, 4, 5, 6]) + @test Sum([1, 2, 3]) + Sum([4, 5, 6]) isa Sum + @test materialize(2 * Prod([1, 2])) == 2 * prod([1, 2]) + @test_broken 2 * Prod([1, 2]) isa Scaled + @test_broken 2 * Prod([1, 2]) isa Scaled{<:Prod} + @test_broken 2 * Prod([1, 2]) isa Scaled{Prod{Int}} + @test 2 * Prod([1, 2]) isa Prod{Int} + @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) + @test Prod([1, 2]) + Prod([3, 4]) isa Sum + @test_broken materialize(Sum(Prod([1, 2]) + Prod([3, 4]))) == + sum([prod([1, 2]), prod([3, 4])]) + @test Prod([1, 2]) + Prod([3, 4]) == Sum([Prod([1, 2]), Prod([3, 4])]) + @test Prod([1, 2]) + Prod([3, 4]) isa Sum + @test_broken Prod([1, 2]) - Prod([3, 4]) isa Sum + @test materialize(Prod(["X", "Y", "Z"])) == "XYZ" + @test Prod(["X", "Y", "Z"]) isa Prod + @test_broken materialize(Prod() * "X" * "Y" * "Z") == "XYZ" + @test_broken Prod() * "X" * "Y" * "Z" == Prod(["X", "Y", "Z"]) + @test_broken Prod() * "X" * "Y" * "Z" isa Prod + @test_broken 2Prod() * "X" * "Y" == 2Prod(["X", "Y"]) + @test_broken 2Prod() * "X" * "Y" isa Scaled{<:Prod} + @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String}} + @test_broken 2Prod() * "X" * "Y" isa Scaled{Prod{String}, Int} + @test_broken 2Prod(["X"]) * 3Prod(["Y"]) == 6Prod(["X", "Y"]) end diff --git a/test/lib/Ops/runtests.jl b/test/lib/Ops/runtests.jl index c2fe5a8b6f..76d6d69d9e 100644 --- a/test/lib/Ops/runtests.jl +++ b/test/lib/Ops/runtests.jl @@ -6,11 +6,11 @@ ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end end diff --git a/test/lib/Ops/test_ops.jl b/test/lib/Ops/test_ops.jl index 8061194d41..bdbec75eab 100644 --- a/test/lib/Ops/test_ops.jl +++ b/test/lib/Ops/test_ops.jl @@ -6,242 +6,242 @@ using ITensors.Ops: Ops, Op, OpSum, Prod, Scaled, Sum, coefficient, expand using ITensors.SiteTypes: op, siteinds function heisenberg(N) - os = Sum{Op}() - for j in 1:(N - 1) - os += "Sz", j, "Sz", j + 1 - os += 0.5, "S+", j, "S-", j + 1 - os += 0.5, "S-", j, "S+", j + 1 - end - return os + os = Sum{Op}() + for j in 1:(N - 1) + os += "Sz", j, "Sz", j + 1 + os += 0.5, "S+", j, "S-", j + 1 + os += 0.5, "S-", j, "S+", j + 1 + end + return os end @testset "Basic Ops" begin - x1 = Op("X", 1) - x2 = Op("X", 2) - I1 = Op(I, 1) - I2 = Op(I, 2) - y1 = Op("Y", 1) - y2 = Op("Y", 2) - CX12 = Op("CX", 1, 2) - Ry4 = Op("Ry", 4; θ=π / 3) - - @test 2y2 isa Scaled{<:Number,Op} - @test coefficient(2y2) == 2 - @test y2 / 2 isa Scaled{<:Number,Op} - @test coefficient(y2 / 2) ≈ 0.5 - @test -y2 isa Scaled{<:Number,Op} - @test 1y2 + x1 isa Sum{<:Scaled{<:Number,Op}} - @test 1y2 + x1 isa Sum{Scaled{Int,Op}} - @test x1 * y2 isa Prod{Op} - @test 2x1 * y2 isa Scaled{<:Number,Prod{Op}} - @test x1 * y2 + CX12 isa Sum{Prod{Op}} - @test x1 * y2 + x1 * CX12 isa Sum{Prod{Op}} - @test x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test x1 * y2 - CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test 2x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test 2x1 * y2 - 2CX12 isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test (2x1 * y2 - 2CX12) / 3 isa Sum{<:Scaled{<:Number,Prod{Op}}} - - o1 = Op("X", 1) - o2 = Op("Y", 2) - - @test o1 + o2 isa Sum{Op} - @test o1 - o2 isa Sum{Scaled{Int,Op}} - @test 1.3 * o1 isa Scaled{Float64,Op} - @test o1 * 1.4 isa Scaled{Float64,Op} - @test o1 + o2 + o2 isa Sum{Op} - @test 1.3o1 + 1.3o2 isa Sum{Scaled{Float64,Op}} - @test 1.3o1 + o2 isa Sum{Scaled{Float64,Op}} - @test (o1 + o2) + (o1 + o2) isa Sum{Op} - @test 1.3o1 + 1o2 isa Sum{Scaled{Float64,Op}} - @test 1.3 * (o1 + o2) isa Sum{Scaled{Float64,Op}} - @test o1 + o2 + 1.3o2 isa Sum{Scaled{Float64,Op}} - @test o1 * o2 isa Prod{Op} - @test o1 * o2 * o2 isa Prod{Op} - @test o1 * (o2 * o2) isa Prod{Op} - @test 1.3 * o1 * o2 isa Scaled{Float64,Prod{Op}} - @test 1.3 * (o1 * o2) isa Scaled{Float64,Prod{Op}} - @test 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + 1.2 * o1 isa Sum{Scaled{Float64,Prod{Op}}} - @test Ops.OpSum() + o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + 1.2 * o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + (1.2 + 2.3im) * o1 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() - 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test Ops.OpSum() + o1 * o2 isa Sum{Scaled{ComplexF64,Prod{Op}}} - @test o1 + o2 + 2.3 * o1 * o2 isa Sum{Scaled{Float64,Prod{Op}}} - @test Sum{Op}() + ("X", 1, "Y", 2) + ("Y", 2) isa Sum{Prod{Op}} - @test Sum{Op}() + ("X", 1, "Y", 2) + (1.2, "Y", 2) isa Sum{Scaled{Float64,Prod{Op}}} - @test OpSum() - (0.5, "Z", 1, "Z", 2) isa Sum{Scaled{ComplexF64,Prod{Op}}} - - N = 4 - s = siteinds("Qubit", N) - - @test ITensor(o1, s) ≈ op("X", s, 1) - @test ITensor(2 * o1, s) ≈ 2 * ITensor(o1, s) - @test ITensor(o1 * o2, s) ≈ ITensor(o1, s) * ITensor(o2, s) - @test ITensor(2 * o1 * o2, s) ≈ 2 * ITensor(o1, s) * ITensor(o2, s) - @test ITensor(2 * o1 * o2 + o1 * o2, s) ≈ - 2 * ITensor(o1, s) * ITensor(o2, s) + ITensor(o1, s) * ITensor(o2, s) - @test ITensor(exp(o1), s) ≈ exp(ITensor(o1, s)) - @test ITensor(exp(1.2 * o1), s) ≈ exp(1.2 * ITensor(o1, s)) - @test ITensor(1.3 * exp(1.2 * o1), s) ≈ 1.3 * exp(1.2 * ITensor(o1, s)) - - o = (2x1 * y2 - 2CX12) / 3 - @test coefficient(o[1]) ≈ 2 / 3 - @test coefficient(o[2]) ≈ -2 / 3 - - t1 = ITensor(x1, s) - @test hassameinds(t1, (s[1]', dag(s[1]))) - @test t1[1, 1] == 0 - @test t1[1, 2] == 1 - - @test ITensor(2.3x1, s) ≈ 2.3 * t1 - @test ITensor(x1 + x1, s) ≈ 2t1 - @test ITensor(x1 + 2.3x1, s) ≈ 3.3t1 - - @test ITensor(Op(I, 2), s) ≈ ITensor([1 0; 0 1], s[2]', dag(s[2])) - @test ITensor(Op(2I, 2), s) ≈ 2 * ITensor([1 0; 0 1], s[2]', dag(s[2])) - - c = x1 * y2 * CX12 - cdag = c' - @test c[1]' == cdag[3] - @test c[2]' == cdag[2] - @test c[3]' == cdag[1] - - x = randn(2, 2) - tx = ITensor(Op(x, 3), s) - @test tx[s[3]' => 1, s[3] => 2] == x[1, 2] - - @test ITensor(x1 * x1, s) ≈ ITensor(Op([1 0; 0 1], 1), s) - @test ITensor(x1 * x1 * x1, s) ≈ ITensor(Op([0 1; 1 0], 1), s) - @test ITensor(2x1 * x1, s) ≈ ITensor(Op([2 0; 0 2], 1), s) - @test ITensor(x1 * y1, s) ≈ ITensor(Op([im 0; 0 -im], 1), s) - @test ITensor(y1 * x1, s) ≈ ITensor(Op([-im 0; 0 im], 1), s) - @test ITensor(2x1 * x1 + y1, s) ≈ - ITensor(2 * [1 0; 0 1] + [0 -im; im 0], s[1]', dag(s[1])) - - # TODO: Need to add support for filling out with "Id" or "F" - @test_broken ITensor(2y1 * x2 + x1, s) ≈ - 2 * ITensor(y1, s) * ITensor(x2, s) + ITensor(x1, s) * ITensor(I2, s) - - @test y1'' == y1 - - @test ITensor(y1', s) ≈ ITensor(Op([0 -im; im 0], 1), s) - - @test ITensor(exp(x1), s) ≈ ITensor(Op(exp([0 1; 1 0]), 1), s) - @test ITensor(exp(2x1 * x1), s) ≈ ITensor(exp(2 * [1 0; 0 1]), s[1]', dag(s[1])) - @test ITensor(exp(2x1 * x1 + y1), s) ≈ - ITensor(exp(2 * [1 0; 0 1] + [0 -im; im 0]), s[1]', dag(s[1])) - - @test ITensor(I1, s) ≈ ITensor([1 0; 0 1], s[1]', dag(s[1])) - - @test exp(Op("X", 1)) * Op("Y", 2) isa Prod{Any} - @test ITensor(exp(Op("X", 1)) * Op("Y", 1), s) ≈ - product(exp(ITensor(Op("X", 1), s)), ITensor(Op("Y", 1), s)) - - # TODO: Need to define `(::Scaled * ::Op)::Scaled` - @test_broken 2exp(Op("X", 1)) * Op("Y", 2) isa Scaled{<:Number,Prod{Any}} - - H = Sum{Scaled{Bool,Prod{Op}}}() - Op("X", 1) - @test H isa Sum - @test H isa Sum{<:Scaled} - @test H isa Sum{<:Scaled{<:Number,<:Prod}} - @test H isa Sum{<:Scaled{<:Number,Prod{Op}}} - @test H isa Sum{Scaled{T,Prod{Op}}} where {T} - @test H isa Sum{Scaled{Int,Prod{Op}}} - @test length(H) == 1 - @test coefficient(H[1]) == -1 - - H = Sum{Op}() - Op("X", 1) - @test H isa Sum - @test H isa Sum{<:Scaled} - @test H isa Sum{<:Scaled{<:Number,Op}} - @test H isa Sum{Scaled{T,Op}} where {T} - @test H isa Sum{Scaled{Int,Op}} - @test length(H) == 1 - @test coefficient(H[1]) == -1 - - # OpSum conversion - H = Sum{Op}() - H -= 2.3, "X", 1, "X", 2 - H += 1.2, "Z", 1 - H += 1.3, "Z", 2, (θ=π / 3,) - @test H isa Sum{Scaled{Float64,Prod{Op}}} - @test length(H) == 3 - @test coefficient(H[1]) == -2.3 - @test length(H[1]) == 2 - @test Ops.sites(H[1]) == [1, 2] - @test coefficient(H[2]) == 1.2 - @test length(H[2]) == 1 - @test Ops.sites(H[2]) == [1] - @test coefficient(H[3]) == 1.3 - @test length(H[3]) == 1 - @test Ops.sites(H[3]) == [2] - @test Ops.params(H[3]) == (θ=π / 3,) - - @test_broken Sum{Op}(("X", 1)) isa Sum{Op} - @test_broken Sum{Op}((2.3, "X", 1)) isa Sum{Scaled{Float64,Op}} - @test_broken Sum{Op}("X", 1) isa Sum{Op} - @test_broken Sum{Op}(2, "X", 1) isa Sum{Scaled{Int,Op}} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled{<:Number,Op}} - @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{Scaled{Int,Op}} - - @testset "Expand expression, 2 products" begin - expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) - expr_expanded = - Op("X", 1) * Op("Z", 1) + - Op("Y", 2) * Op("Z", 1) + - Op("X", 1) * Op("W", 2) + - Op("Y", 2) * Op("W", 2) - @test expand(expr) == expr_expanded - end - - @testset "Expand expression, 3 products" begin - expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) * (Op("A", 1) + Op("B", 2)) - expr_expanded = - Op("X", 1) * Op("Z", 1) * Op("A", 1) + - Op("Y", 2) * Op("Z", 1) * Op("A", 1) + - Op("X", 1) * Op("W", 2) * Op("A", 1) + - Op("Y", 2) * Op("W", 2) * Op("A", 1) + - Op("X", 1) * Op("Z", 1) * Op("B", 2) + - Op("Y", 2) * Op("Z", 1) * Op("B", 2) + - Op("X", 1) * Op("W", 2) * Op("B", 2) + - Op("Y", 2) * Op("W", 2) * Op("B", 2) - @test expand(expr) == expr_expanded - end - - H = heisenberg(4) - @test length(H) == 9 - @test H^2 == H * H - @test length(H^2) == 2 - @test length(expand(H^2)) == 81 - - @testset "Conversion to Sum of ITensors" begin - H = Sum{Op}() + ("X", 1) + ("Y", 2) - @test_broken H == Sum{Op}([("X", 1), ("Y", 2)]) - @test H == Sum{Op}() + Op("X", 1) + Op("Y", 2) - s = siteinds("Qubit", 2) - Hₜ = Sum{ITensor}(H, s) - @test Hₜ isa Sum{ITensor} - @test Hₜ[1] ≈ ITensor(Op("X", 1), s) - @test Hₜ[2] ≈ ITensor(Op("Y", 2), s) - end - - @testset "Conversion to Prod of ITensors" begin - C = Prod{Op}() * ("X", 1) * ("Y", 2) - @test_broken C == Prod{Op}([("X", 1), ("Y", 2)]) - @test C == Prod{Op}() * Op("X", 1) * Op("Y", 2) - @test C == Op("X", 1) * Op("Y", 2) - s = siteinds("Qubit", 2) - Cₜ = Prod{ITensor}(C, s) - @test Cₜ isa Prod{ITensor} - @test Cₜ[1] ≈ ITensor(Op("X", 1), s) - @test Cₜ[2] ≈ ITensor(Op("Y", 2), s) - end + x1 = Op("X", 1) + x2 = Op("X", 2) + I1 = Op(I, 1) + I2 = Op(I, 2) + y1 = Op("Y", 1) + y2 = Op("Y", 2) + CX12 = Op("CX", 1, 2) + Ry4 = Op("Ry", 4; θ = π / 3) + + @test 2y2 isa Scaled{<:Number, Op} + @test coefficient(2y2) == 2 + @test y2 / 2 isa Scaled{<:Number, Op} + @test coefficient(y2 / 2) ≈ 0.5 + @test -y2 isa Scaled{<:Number, Op} + @test 1y2 + x1 isa Sum{<:Scaled{<:Number, Op}} + @test 1y2 + x1 isa Sum{Scaled{Int, Op}} + @test x1 * y2 isa Prod{Op} + @test 2x1 * y2 isa Scaled{<:Number, Prod{Op}} + @test x1 * y2 + CX12 isa Sum{Prod{Op}} + @test x1 * y2 + x1 * CX12 isa Sum{Prod{Op}} + @test x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number, Prod{Op}}} + @test x1 * y2 - CX12 isa Sum{<:Scaled{<:Number, Prod{Op}}} + @test 2x1 * y2 + 2CX12 isa Sum{<:Scaled{<:Number, Prod{Op}}} + @test 2x1 * y2 - 2CX12 isa Sum{<:Scaled{<:Number, Prod{Op}}} + @test (2x1 * y2 - 2CX12) / 3 isa Sum{<:Scaled{<:Number, Prod{Op}}} + + o1 = Op("X", 1) + o2 = Op("Y", 2) + + @test o1 + o2 isa Sum{Op} + @test o1 - o2 isa Sum{Scaled{Int, Op}} + @test 1.3 * o1 isa Scaled{Float64, Op} + @test o1 * 1.4 isa Scaled{Float64, Op} + @test o1 + o2 + o2 isa Sum{Op} + @test 1.3o1 + 1.3o2 isa Sum{Scaled{Float64, Op}} + @test 1.3o1 + o2 isa Sum{Scaled{Float64, Op}} + @test (o1 + o2) + (o1 + o2) isa Sum{Op} + @test 1.3o1 + 1o2 isa Sum{Scaled{Float64, Op}} + @test 1.3 * (o1 + o2) isa Sum{Scaled{Float64, Op}} + @test o1 + o2 + 1.3o2 isa Sum{Scaled{Float64, Op}} + @test o1 * o2 isa Prod{Op} + @test o1 * o2 * o2 isa Prod{Op} + @test o1 * (o2 * o2) isa Prod{Op} + @test 1.3 * o1 * o2 isa Scaled{Float64, Prod{Op}} + @test 1.3 * (o1 * o2) isa Scaled{Float64, Prod{Op}} + @test 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64, Prod{Op}}} + @test 1.3 * o1 * o2 + o1 * o2 isa Sum{Scaled{Float64, Prod{Op}}} + @test 1.3 * o1 * o2 + 1.3 * o1 * o2 isa Sum{Scaled{Float64, Prod{Op}}} + @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + o1 isa Sum{Scaled{Float64, Prod{Op}}} + @test 1.3 * o1 * o2 + 1.3 * o1 * o2 + 1.2 * o1 isa Sum{Scaled{Float64, Prod{Op}}} + @test Ops.OpSum() + o1 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test Ops.OpSum() + 1.2 * o1 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test Ops.OpSum() + (1.2 + 2.3im) * o1 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test Ops.OpSum() + 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test Ops.OpSum() - 1.2 * o1 * o2 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test Ops.OpSum() + o1 * o2 isa Sum{Scaled{ComplexF64, Prod{Op}}} + @test o1 + o2 + 2.3 * o1 * o2 isa Sum{Scaled{Float64, Prod{Op}}} + @test Sum{Op}() + ("X", 1, "Y", 2) + ("Y", 2) isa Sum{Prod{Op}} + @test Sum{Op}() + ("X", 1, "Y", 2) + (1.2, "Y", 2) isa Sum{Scaled{Float64, Prod{Op}}} + @test OpSum() - (0.5, "Z", 1, "Z", 2) isa Sum{Scaled{ComplexF64, Prod{Op}}} + + N = 4 + s = siteinds("Qubit", N) + + @test ITensor(o1, s) ≈ op("X", s, 1) + @test ITensor(2 * o1, s) ≈ 2 * ITensor(o1, s) + @test ITensor(o1 * o2, s) ≈ ITensor(o1, s) * ITensor(o2, s) + @test ITensor(2 * o1 * o2, s) ≈ 2 * ITensor(o1, s) * ITensor(o2, s) + @test ITensor(2 * o1 * o2 + o1 * o2, s) ≈ + 2 * ITensor(o1, s) * ITensor(o2, s) + ITensor(o1, s) * ITensor(o2, s) + @test ITensor(exp(o1), s) ≈ exp(ITensor(o1, s)) + @test ITensor(exp(1.2 * o1), s) ≈ exp(1.2 * ITensor(o1, s)) + @test ITensor(1.3 * exp(1.2 * o1), s) ≈ 1.3 * exp(1.2 * ITensor(o1, s)) + + o = (2x1 * y2 - 2CX12) / 3 + @test coefficient(o[1]) ≈ 2 / 3 + @test coefficient(o[2]) ≈ -2 / 3 + + t1 = ITensor(x1, s) + @test hassameinds(t1, (s[1]', dag(s[1]))) + @test t1[1, 1] == 0 + @test t1[1, 2] == 1 + + @test ITensor(2.3x1, s) ≈ 2.3 * t1 + @test ITensor(x1 + x1, s) ≈ 2t1 + @test ITensor(x1 + 2.3x1, s) ≈ 3.3t1 + + @test ITensor(Op(I, 2), s) ≈ ITensor([1 0; 0 1], s[2]', dag(s[2])) + @test ITensor(Op(2I, 2), s) ≈ 2 * ITensor([1 0; 0 1], s[2]', dag(s[2])) + + c = x1 * y2 * CX12 + cdag = c' + @test c[1]' == cdag[3] + @test c[2]' == cdag[2] + @test c[3]' == cdag[1] + + x = randn(2, 2) + tx = ITensor(Op(x, 3), s) + @test tx[s[3]' => 1, s[3] => 2] == x[1, 2] + + @test ITensor(x1 * x1, s) ≈ ITensor(Op([1 0; 0 1], 1), s) + @test ITensor(x1 * x1 * x1, s) ≈ ITensor(Op([0 1; 1 0], 1), s) + @test ITensor(2x1 * x1, s) ≈ ITensor(Op([2 0; 0 2], 1), s) + @test ITensor(x1 * y1, s) ≈ ITensor(Op([im 0; 0 -im], 1), s) + @test ITensor(y1 * x1, s) ≈ ITensor(Op([-im 0; 0 im], 1), s) + @test ITensor(2x1 * x1 + y1, s) ≈ + ITensor(2 * [1 0; 0 1] + [0 -im; im 0], s[1]', dag(s[1])) + + # TODO: Need to add support for filling out with "Id" or "F" + @test_broken ITensor(2y1 * x2 + x1, s) ≈ + 2 * ITensor(y1, s) * ITensor(x2, s) + ITensor(x1, s) * ITensor(I2, s) + + @test y1'' == y1 + + @test ITensor(y1', s) ≈ ITensor(Op([0 -im; im 0], 1), s) + + @test ITensor(exp(x1), s) ≈ ITensor(Op(exp([0 1; 1 0]), 1), s) + @test ITensor(exp(2x1 * x1), s) ≈ ITensor(exp(2 * [1 0; 0 1]), s[1]', dag(s[1])) + @test ITensor(exp(2x1 * x1 + y1), s) ≈ + ITensor(exp(2 * [1 0; 0 1] + [0 -im; im 0]), s[1]', dag(s[1])) + + @test ITensor(I1, s) ≈ ITensor([1 0; 0 1], s[1]', dag(s[1])) + + @test exp(Op("X", 1)) * Op("Y", 2) isa Prod{Any} + @test ITensor(exp(Op("X", 1)) * Op("Y", 1), s) ≈ + product(exp(ITensor(Op("X", 1), s)), ITensor(Op("Y", 1), s)) + + # TODO: Need to define `(::Scaled * ::Op)::Scaled` + @test_broken 2exp(Op("X", 1)) * Op("Y", 2) isa Scaled{<:Number, Prod{Any}} + + H = Sum{Scaled{Bool, Prod{Op}}}() - Op("X", 1) + @test H isa Sum + @test H isa Sum{<:Scaled} + @test H isa Sum{<:Scaled{<:Number, <:Prod}} + @test H isa Sum{<:Scaled{<:Number, Prod{Op}}} + @test H isa Sum{Scaled{T, Prod{Op}}} where {T} + @test H isa Sum{Scaled{Int, Prod{Op}}} + @test length(H) == 1 + @test coefficient(H[1]) == -1 + + H = Sum{Op}() - Op("X", 1) + @test H isa Sum + @test H isa Sum{<:Scaled} + @test H isa Sum{<:Scaled{<:Number, Op}} + @test H isa Sum{Scaled{T, Op}} where {T} + @test H isa Sum{Scaled{Int, Op}} + @test length(H) == 1 + @test coefficient(H[1]) == -1 + + # OpSum conversion + H = Sum{Op}() + H -= 2.3, "X", 1, "X", 2 + H += 1.2, "Z", 1 + H += 1.3, "Z", 2, (θ = π / 3,) + @test H isa Sum{Scaled{Float64, Prod{Op}}} + @test length(H) == 3 + @test coefficient(H[1]) == -2.3 + @test length(H[1]) == 2 + @test Ops.sites(H[1]) == [1, 2] + @test coefficient(H[2]) == 1.2 + @test length(H[2]) == 1 + @test Ops.sites(H[2]) == [1] + @test coefficient(H[3]) == 1.3 + @test length(H[3]) == 1 + @test Ops.sites(H[3]) == [2] + @test Ops.params(H[3]) == (θ = π / 3,) + + @test_broken Sum{Op}(("X", 1)) isa Sum{Op} + @test_broken Sum{Op}((2.3, "X", 1)) isa Sum{Scaled{Float64, Op}} + @test_broken Sum{Op}("X", 1) isa Sum{Op} + @test_broken Sum{Op}(2, "X", 1) isa Sum{Scaled{Int, Op}} + @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum + @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled} + @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{<:Scaled{<:Number, Op}} + @test_broken Sum{Op}([Op("X", 1), 2Op("Y", 1)]) isa Sum{Scaled{Int, Op}} + + @testset "Expand expression, 2 products" begin + expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) + expr_expanded = + Op("X", 1) * Op("Z", 1) + + Op("Y", 2) * Op("Z", 1) + + Op("X", 1) * Op("W", 2) + + Op("Y", 2) * Op("W", 2) + @test expand(expr) == expr_expanded + end + + @testset "Expand expression, 3 products" begin + expr = (Op("X", 1) + Op("Y", 2)) * (Op("Z", 1) + Op("W", 2)) * (Op("A", 1) + Op("B", 2)) + expr_expanded = + Op("X", 1) * Op("Z", 1) * Op("A", 1) + + Op("Y", 2) * Op("Z", 1) * Op("A", 1) + + Op("X", 1) * Op("W", 2) * Op("A", 1) + + Op("Y", 2) * Op("W", 2) * Op("A", 1) + + Op("X", 1) * Op("Z", 1) * Op("B", 2) + + Op("Y", 2) * Op("Z", 1) * Op("B", 2) + + Op("X", 1) * Op("W", 2) * Op("B", 2) + + Op("Y", 2) * Op("W", 2) * Op("B", 2) + @test expand(expr) == expr_expanded + end + + H = heisenberg(4) + @test length(H) == 9 + @test H^2 == H * H + @test length(H^2) == 2 + @test length(expand(H^2)) == 81 + + @testset "Conversion to Sum of ITensors" begin + H = Sum{Op}() + ("X", 1) + ("Y", 2) + @test_broken H == Sum{Op}([("X", 1), ("Y", 2)]) + @test H == Sum{Op}() + Op("X", 1) + Op("Y", 2) + s = siteinds("Qubit", 2) + Hₜ = Sum{ITensor}(H, s) + @test Hₜ isa Sum{ITensor} + @test Hₜ[1] ≈ ITensor(Op("X", 1), s) + @test Hₜ[2] ≈ ITensor(Op("Y", 2), s) + end + + @testset "Conversion to Prod of ITensors" begin + C = Prod{Op}() * ("X", 1) * ("Y", 2) + @test_broken C == Prod{Op}([("X", 1), ("Y", 2)]) + @test C == Prod{Op}() * Op("X", 1) * Op("Y", 2) + @test C == Op("X", 1) * Op("Y", 2) + s = siteinds("Qubit", 2) + Cₜ = Prod{ITensor}(C, s) + @test Cₜ isa Prod{ITensor} + @test Cₜ[1] ≈ ITensor(Op("X", 1), s) + @test Cₜ[2] ≈ ITensor(Op("Y", 2), s) + end end diff --git a/test/lib/Ops/test_trotter.jl b/test/lib/Ops/test_trotter.jl index 479eb20d82..3fff4e7056 100644 --- a/test/lib/Ops/test_trotter.jl +++ b/test/lib/Ops/test_trotter.jl @@ -4,26 +4,26 @@ using ITensors.Ops: Op, Prod, Sum, Trotter using ITensors.SiteTypes: siteinds @testset "Simple trotterization" begin - H = Sum{Op}() + ("X", 1) + ("Y", 1) + H = Sum{Op}() + ("X", 1) + ("Y", 1) - s = siteinds("Qubit", 1) + s = siteinds("Qubit", 1) - for nsteps in [10, 100, 1000] - expHᵉˣᵃᶜᵗ = ITensor(exp(H), s) - @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{1}(nsteps)), s) rtol = 1 / nsteps - @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{2}(nsteps)), s) rtol = (1 / nsteps)^2 - @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{4}(nsteps)), s) rtol = - (1 / nsteps)^2 - @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg=Trotter{8}(nsteps)), s) rtol = - (1 / nsteps)^2 + for nsteps in [10, 100, 1000] + expHᵉˣᵃᶜᵗ = ITensor(exp(H), s) + @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg = Trotter{1}(nsteps)), s) rtol = 1 / nsteps + @test expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg = Trotter{2}(nsteps)), s) rtol = (1 / nsteps)^2 + @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg = Trotter{4}(nsteps)), s) rtol = + (1 / nsteps)^2 + @test_broken expHᵉˣᵃᶜᵗ ≈ ITensor(exp(H; alg = Trotter{8}(nsteps)), s) rtol = + (1 / nsteps)^2 - # Convert to ITensors - t = 1.0 - Uᵉˣᵃᶜᵗ = ITensor(exp(im * t * H), s) - U = Prod{ITensor}(exp(im * t * H; alg=Trotter{2}(nsteps)), s) - ψ₀ = onehot(s .=> "0") - Uᵉˣᵃᶜᵗψ₀ = Uᵉˣᵃᶜᵗ(ψ₀) - Uψ₀ = U(ψ₀) - @test Uᵉˣᵃᶜᵗψ₀ ≈ Uψ₀ rtol = (1 / nsteps)^2 - end + # Convert to ITensors + t = 1.0 + Uᵉˣᵃᶜᵗ = ITensor(exp(im * t * H), s) + U = Prod{ITensor}(exp(im * t * H; alg = Trotter{2}(nsteps)), s) + ψ₀ = onehot(s .=> "0") + Uᵉˣᵃᶜᵗψ₀ = Uᵉˣᵃᶜᵗ(ψ₀) + Uψ₀ = U(ψ₀) + @test Uᵉˣᵃᶜᵗψ₀ ≈ Uψ₀ rtol = (1 / nsteps)^2 + end end diff --git a/test/runtests.jl b/test/runtests.jl index 66dc122234..a5a329d29e 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -6,30 +6,30 @@ ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "ITensors tests" begin - # Make a copy in case a test modifies it. - test_args = copy(ARGS) - println("Passed arguments ARGS = $(test_args) to tests.") - if isempty(test_args) || "all" in test_args || "base" in test_args - println( - """\nArguments ARGS = $(test_args) are empty, or contain `"all"` or `"base"`. Running base (non-MPS/MPO) ITensors tests.""", - ) - dirs = [ - "lib/LazyApply", - "lib/Ops", - "base", - "threading", - "ext/ITensorsChainRulesCoreExt", - "ext/ITensorsTensorOperationsExt", - "ext/ITensorsVectorInterfaceExt", - "ext/NDTensorsMappedArraysExt", - ] - @time for dir in dirs - println("\nTest $(@__DIR__)/$(dir)") - @time include(joinpath(@__DIR__, dir, "runtests.jl")) - if ARGS ≠ test_args - # Fix ARGS in case a test modifies it. - append!(empty!(ARGS), test_args) - end + # Make a copy in case a test modifies it. + test_args = copy(ARGS) + println("Passed arguments ARGS = $(test_args) to tests.") + if isempty(test_args) || "all" in test_args || "base" in test_args + println( + """\nArguments ARGS = $(test_args) are empty, or contain `"all"` or `"base"`. Running base (non-MPS/MPO) ITensors tests.""", + ) + dirs = [ + "lib/LazyApply", + "lib/Ops", + "base", + "threading", + "ext/ITensorsChainRulesCoreExt", + "ext/ITensorsTensorOperationsExt", + "ext/ITensorsVectorInterfaceExt", + "ext/NDTensorsMappedArraysExt", + ] + @time for dir in dirs + println("\nTest $(@__DIR__)/$(dir)") + @time include(joinpath(@__DIR__, dir, "runtests.jl")) + if ARGS ≠ test_args + # Fix ARGS in case a test modifies it. + append!(empty!(ARGS), test_args) + end + end end - end end diff --git a/test/threading/runtests.jl b/test/threading/runtests.jl index c2fe5a8b6f..76d6d69d9e 100644 --- a/test/threading/runtests.jl +++ b/test/threading/runtests.jl @@ -6,11 +6,11 @@ ITensors.BLAS.set_num_threads(1) ITensors.disable_threaded_blocksparse() @testset "$(@__DIR__)" begin - filenames = filter(readdir(@__DIR__)) do f - startswith("test_")(f) && endswith(".jl")(f) - end - @testset "Test $(@__DIR__)/$filename" for filename in filenames - println("Running $(@__DIR__)/$filename") - @time include(filename) - end + filenames = filter(readdir(@__DIR__)) do f + startswith("test_")(f) && endswith(".jl")(f) + end + @testset "Test $(@__DIR__)/$filename" for filename in filenames + println("Running $(@__DIR__)/$filename") + @time include(filename) + end end diff --git a/test/threading/test_threading.jl b/test/threading/test_threading.jl index 5b7988d549..b5f78add23 100644 --- a/test/threading/test_threading.jl +++ b/test/threading/test_threading.jl @@ -3,80 +3,80 @@ using Test using LinearAlgebra if isone(Threads.nthreads()) - @warn "Testing block sparse multithreading but only one thread is set!" + @warn "Testing block sparse multithreading but only one thread is set!" end @testset "Threading" begin - blas_num_threads = BLAS.get_num_threads() - strided_num_threads = ITensors.NDTensors.Strided.get_num_threads() + blas_num_threads = BLAS.get_num_threads() + strided_num_threads = ITensors.NDTensors.Strided.get_num_threads() - BLAS.set_num_threads(1) - ITensors.NDTensors.Strided.set_num_threads(1) + BLAS.set_num_threads(1) + ITensors.NDTensors.Strided.set_num_threads(1) - @testset "Getting and setting global flags" begin - enabled0 = ITensors.enable_threaded_blocksparse(false) - @test !ITensors.using_threaded_blocksparse() - enabled1 = ITensors.enable_threaded_blocksparse(true) - @test !enabled1 - @test ITensors.using_threaded_blocksparse() - enabled2 = ITensors.enable_threaded_blocksparse(false) - @test enabled2 - @test !ITensors.using_threaded_blocksparse() - enabled3 = ITensors.enable_threaded_blocksparse(enabled0) - @test !enabled3 - @test ITensors.using_threaded_blocksparse() == enabled0 - end + @testset "Getting and setting global flags" begin + enabled0 = ITensors.enable_threaded_blocksparse(false) + @test !ITensors.using_threaded_blocksparse() + enabled1 = ITensors.enable_threaded_blocksparse(true) + @test !enabled1 + @test ITensors.using_threaded_blocksparse() + enabled2 = ITensors.enable_threaded_blocksparse(false) + @test enabled2 + @test !ITensors.using_threaded_blocksparse() + enabled3 = ITensors.enable_threaded_blocksparse(enabled0) + @test !enabled3 + @test ITensors.using_threaded_blocksparse() == enabled0 + end - @testset "Threaded contraction" begin - i = Index([QN(0) => 500, QN(1) => 500]) - A = random_itensor(i', dag(i)) + @testset "Threaded contraction" begin + i = Index([QN(0) => 500, QN(1) => 500]) + A = random_itensor(i', dag(i)) - enabled = ITensors.disable_threaded_blocksparse() - R = A' * A - ITensors.enable_threaded_blocksparse() - Rthreaded = A' * A - @test R ≈ Rthreaded - if !enabled - ITensors.disable_threaded_blocksparse() - end + enabled = ITensors.disable_threaded_blocksparse() + R = A' * A + ITensors.enable_threaded_blocksparse() + Rthreaded = A' * A + @test R ≈ Rthreaded + if !enabled + ITensors.disable_threaded_blocksparse() + end - # New interface - enabled = ITensors.enable_threaded_blocksparse(false) - R = A' * A - ITensors.enable_threaded_blocksparse(true) - Rthreaded = A' * A - @test R ≈ Rthreaded - ITensors.enable_threaded_blocksparse(enabled) + # New interface + enabled = ITensors.enable_threaded_blocksparse(false) + R = A' * A + ITensors.enable_threaded_blocksparse(true) + Rthreaded = A' * A + @test R ≈ Rthreaded + ITensors.enable_threaded_blocksparse(enabled) - # TODO: Test timing? - # ITensors.enable_threaded_blocksparse(false) - # time = @elapsed B = A' * A - # ITensors.enable_threaded_blocksparse(true) - # time_threaded = @elapsed B = A' * A - # @test time > time_threaded + # TODO: Test timing? + # ITensors.enable_threaded_blocksparse(false) + # time = @elapsed B = A' * A + # ITensors.enable_threaded_blocksparse(true) + # time_threaded = @elapsed B = A' * A + # @test time > time_threaded - end + end - @testset "Contraction resulting in no blocks with threading bug" begin - i = Index([QN(0) => 1, QN(1) => 1]) - A = ITensor(i', dag(i)) - B = ITensor(i', dag(i)) - A[i' => 1, i => 1] = 11.0 - B[i' => 2, i => 2] = 22.0 + @testset "Contraction resulting in no blocks with threading bug" begin + i = Index([QN(0) => 1, QN(1) => 1]) + A = ITensor(i', dag(i)) + B = ITensor(i', dag(i)) + A[i' => 1, i => 1] = 11.0 + B[i' => 2, i => 2] = 22.0 - enabled = ITensors.enable_threaded_blocksparse(false) - C1 = A' * B - ITensors.enable_threaded_blocksparse(true) - C2 = A' * B - ITensors.enable_threaded_blocksparse(enabled) + enabled = ITensors.enable_threaded_blocksparse(false) + C1 = A' * B + ITensors.enable_threaded_blocksparse(true) + C2 = A' * B + ITensors.enable_threaded_blocksparse(enabled) - @test nnzblocks(C1) == 0 - @test nnzblocks(C2) == 0 - @test nnz(C1) == 0 - @test nnz(C2) == 0 - @test C1 ≈ C2 - end + @test nnzblocks(C1) == 0 + @test nnzblocks(C2) == 0 + @test nnz(C1) == 0 + @test nnz(C2) == 0 + @test C1 ≈ C2 + end - BLAS.set_num_threads(blas_num_threads) - ITensors.NDTensors.Strided.set_num_threads(strided_num_threads) + BLAS.set_num_threads(blas_num_threads) + ITensors.NDTensors.Strided.set_num_threads(strided_num_threads) end