@@ -18,6 +19,7 @@ If practical and applicable, please include a minimal demonstration of the previ
```julia
[YOUR MINIMAL DEMONSTRATION OF NEW BEHAVIOR]
```
+
# How Has This Been Tested?
diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml
index 3f78afc276..1525861f42 100644
--- a/.github/workflows/FormatCheck.yml
+++ b/.github/workflows/FormatCheck.yml
@@ -1,11 +1,14 @@
name: "Format Check"
on:
- push:
- branches:
- - 'main'
- tags: '*'
- pull_request:
+ pull_request_target:
+ paths: ['**/*.jl']
+ types: [opened, synchronize, reopened, ready_for_review]
+
+permissions:
+ contents: read
+ actions: write
+ pull-requests: write
jobs:
format-check:
diff --git a/.github/workflows/FormatPullRequest.yml b/.github/workflows/FormatPullRequest.yml
new file mode 100644
index 0000000000..93875cc384
--- /dev/null
+++ b/.github/workflows/FormatPullRequest.yml
@@ -0,0 +1,14 @@
+name: "Format Pull Request"
+
+on:
+ schedule:
+ - cron: '0 0 * * *'
+ workflow_dispatch:
+permissions:
+ contents: write
+ pull-requests: write
+
+jobs:
+ format-pull-request:
+ name: "Format Pull Request"
+ uses: "ITensor/ITensorActions/.github/workflows/FormatPullRequest.yml@main"
diff --git a/.github/workflows/VersionCheck.yml b/.github/workflows/VersionCheck.yml
new file mode 100644
index 0000000000..69444f1b89
--- /dev/null
+++ b/.github/workflows/VersionCheck.yml
@@ -0,0 +1,11 @@
+name: "Version Check"
+
+on:
+ pull_request:
+
+jobs:
+ version-check:
+ name: "Version Check"
+ uses: "ITensor/ITensorActions/.github/workflows/VersionCheck.yml@main"
+ with:
+ localregistry: https://github.com/ITensor/ITensorRegistry.git
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index f6e2753708..a4f7c6b2e0 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -1,5 +1,5 @@
ci:
- skip: [julia-formatter]
+ skip: [runic]
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
@@ -11,10 +11,8 @@ repos:
- id: end-of-file-fixer
exclude: '.*references/.*\.txt$' # do not check reference TN images
exclude_types: [markdown] # incompatible with Literate.jl
- - id: trailing-whitespace
- exclude: '.*references/.*\.txt$' # do not check reference TN images
-- repo: "https://github.com/domluna/JuliaFormatter.jl"
- rev: v2.1.6
+- repo: https://github.com/fredrikekre/runic-pre-commit
+ rev: v2.0.1
hooks:
- - id: "julia-formatter"
+ - id: runic
diff --git a/NDTensors/Project.toml b/NDTensors/Project.toml
index 0c71276090..a6f620e419 100644
--- a/NDTensors/Project.toml
+++ b/NDTensors/Project.toml
@@ -1,7 +1,7 @@
name = "NDTensors"
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
authors = ["Matthew Fishman "]
-version = "0.4.11"
+version = "0.4.12"
[deps]
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
diff --git a/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl b/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl
index 64c360adb8..5cedde9385 100644
--- a/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl
+++ b/NDTensors/ext/NDTensorsHDF5Ext/blocksparse.jl
@@ -3,66 +3,66 @@ using NDTensors: data, Block, blockoffsets, BlockOffsets, BlockSparse
# Helper function for HDF5 write/read of BlockSparse
function offsets_to_array(boff::BlockOffsets{N}) where {N}
- nblocks = length(boff)
- asize = (N + 1) * nblocks
- n = 1
- a = Vector{Int}(undef, asize)
- for bo in pairs(boff)
- for j in 1:N
- a[n] = bo[1][j]
- n += 1
+ nblocks = length(boff)
+ asize = (N + 1) * nblocks
+ n = 1
+ a = Vector{Int}(undef, asize)
+ for bo in pairs(boff)
+ for j in 1:N
+ a[n] = bo[1][j]
+ n += 1
+ end
+ a[n] = bo[2]
+ n += 1
end
- a[n] = bo[2]
- n += 1
- end
- return a
+ return a
end
# Helper function for HDF5 write/read of BlockSparse
function array_to_offsets(a, N::Int)
- asize = length(a)
- nblocks = div(asize, N + 1)
- boff = BlockOffsets{N}()
- j = 0
- for b in 1:nblocks
- insert!(boff, Block(ntuple(i -> (a[j + i]), N)), a[j + N + 1])
- j += (N + 1)
- end
- return boff
+ asize = length(a)
+ nblocks = div(asize, N + 1)
+ boff = BlockOffsets{N}()
+ j = 0
+ for b in 1:nblocks
+ insert!(boff, Block(ntuple(i -> (a[j + i]), N)), a[j + N + 1])
+ j += (N + 1)
+ end
+ return boff
end
-function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::String, B::BlockSparse)
- g = create_group(parent, name)
- attributes(g)["type"] = "BlockSparse{$(eltype(B))}"
- attributes(g)["version"] = 1
- if eltype(B) != Nothing
- write(g, "ndims", ndims(B))
- write(g, "data", data(B))
- off_array = offsets_to_array(blockoffsets(B))
- write(g, "offsets", off_array)
- end
+function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::String, B::BlockSparse)
+ g = create_group(parent, name)
+ attributes(g)["type"] = "BlockSparse{$(eltype(B))}"
+ attributes(g)["version"] = 1
+ return if eltype(B) != Nothing
+ write(g, "ndims", ndims(B))
+ write(g, "data", data(B))
+ off_array = offsets_to_array(blockoffsets(B))
+ write(g, "offsets", off_array)
+ end
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store}
-) where {Store<:BlockSparse}
- g = open_group(parent, name)
- ElT = eltype(Store)
- typestr = "BlockSparse{$ElT}"
- if read(attributes(g)["type"]) != typestr
- error("HDF5 group or file does not contain $typestr data")
- end
- N = read(g, "ndims")
- off_array = read(g, "offsets")
- boff = array_to_offsets(off_array, N)
- # Attribute __complex__ is attached to the "data" dataset
- # by the h5 library used by C++ version of ITensor:
- if haskey(attributes(g["data"]), "__complex__")
- M = read(g, "data")
- nelt = size(M, 1) * size(M, 2)
- data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
- else
- data = read(g, "data")
- end
- return BlockSparse(data, boff)
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{Store}
+ ) where {Store <: BlockSparse}
+ g = open_group(parent, name)
+ ElT = eltype(Store)
+ typestr = "BlockSparse{$ElT}"
+ if read(attributes(g)["type"]) != typestr
+ error("HDF5 group or file does not contain $typestr data")
+ end
+ N = read(g, "ndims")
+ off_array = read(g, "offsets")
+ boff = array_to_offsets(off_array, N)
+ # Attribute __complex__ is attached to the "data" dataset
+ # by the h5 library used by C++ version of ITensor:
+ if haskey(attributes(g["data"]), "__complex__")
+ M = read(g, "data")
+ nelt = size(M, 1) * size(M, 2)
+ data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
+ else
+ data = read(g, "data")
+ end
+ return BlockSparse(data, boff)
end
diff --git a/NDTensors/ext/NDTensorsHDF5Ext/dense.jl b/NDTensors/ext/NDTensorsHDF5Ext/dense.jl
index baab94b601..cf1ddb882a 100644
--- a/NDTensors/ext/NDTensorsHDF5Ext/dense.jl
+++ b/NDTensors/ext/NDTensorsHDF5Ext/dense.jl
@@ -2,36 +2,36 @@ using HDF5: HDF5, attributes, create_group, open_group, read, write
using NDTensors: Dense
function HDF5.write(
- parent::Union{HDF5.File,HDF5.Group}, name::String, D::Store
-) where {Store<:Dense}
- g = create_group(parent, name)
- attributes(g)["type"] = "Dense{$(eltype(Store))}"
- attributes(g)["version"] = 1
- if eltype(D) != Nothing
- write(g, "data", D.data)
- end
+ parent::Union{HDF5.File, HDF5.Group}, name::String, D::Store
+ ) where {Store <: Dense}
+ g = create_group(parent, name)
+ attributes(g)["type"] = "Dense{$(eltype(Store))}"
+ attributes(g)["version"] = 1
+ return if eltype(D) != Nothing
+ write(g, "data", D.data)
+ end
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store}
-) where {Store<:Dense}
- g = open_group(parent, name)
- ElT = eltype(Store)
- typestr = "Dense{$ElT}"
- if read(attributes(g)["type"]) != typestr
- error("HDF5 group or file does not contain $typestr data")
- end
- if ElT == Nothing
- return Dense{Nothing}()
- end
- # Attribute __complex__ is attached to the "data" dataset
- # by the h5 library used by C++ version of ITensor:
- if haskey(attributes(g["data"]), "__complex__")
- M = read(g, "data")
- nelt = size(M, 1) * size(M, 2)
- data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
- else
- data = read(g, "data")
- end
- return Dense{ElT}(data)
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{Store}
+ ) where {Store <: Dense}
+ g = open_group(parent, name)
+ ElT = eltype(Store)
+ typestr = "Dense{$ElT}"
+ if read(attributes(g)["type"]) != typestr
+ error("HDF5 group or file does not contain $typestr data")
+ end
+ if ElT == Nothing
+ return Dense{Nothing}()
+ end
+ # Attribute __complex__ is attached to the "data" dataset
+ # by the h5 library used by C++ version of ITensor:
+ if haskey(attributes(g["data"]), "__complex__")
+ M = read(g, "data")
+ nelt = size(M, 1) * size(M, 2)
+ data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
+ else
+ data = read(g, "data")
+ end
+ return Dense{ElT}(data)
end
diff --git a/NDTensors/ext/NDTensorsHDF5Ext/diag.jl b/NDTensors/ext/NDTensorsHDF5Ext/diag.jl
index b5e8215173..0341a10e7e 100644
--- a/NDTensors/ext/NDTensorsHDF5Ext/diag.jl
+++ b/NDTensors/ext/NDTensorsHDF5Ext/diag.jl
@@ -2,37 +2,37 @@ using HDF5: HDF5, attributes, create_group, open_group, read, write
using NDTensors: datatype, Dense, Diag
function HDF5.write(
- parent::Union{HDF5.File,HDF5.Group}, name::String, D::Store
-) where {Store<:Diag}
- g = create_group(parent, name)
- attributes(g)["type"] = "Diag{$(eltype(Store)),$(datatype(Store))}"
- attributes(g)["version"] = 1
- if eltype(D) != Nothing
- write(g, "data", D.data)
- end
+ parent::Union{HDF5.File, HDF5.Group}, name::String, D::Store
+ ) where {Store <: Diag}
+ g = create_group(parent, name)
+ attributes(g)["type"] = "Diag{$(eltype(Store)),$(datatype(Store))}"
+ attributes(g)["version"] = 1
+ return if eltype(D) != Nothing
+ write(g, "data", D.data)
+ end
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Store}
-) where {Store<:Diag}
- g = open_group(parent, name)
- ElT = eltype(Store)
- DataT = datatype(Store)
- typestr = "Diag{$ElT,$DataT}"
- if read(attributes(g)["type"]) != typestr
- error("HDF5 group or file does not contain $typestr data")
- end
- if ElT == Nothing
- return Dense{Nothing}()
- end
- # Attribute __complex__ is attached to the "data" dataset
- # by the h5 library used by C++ version of ITensor:
- if haskey(attributes(g["data"]), "__complex__")
- M = read(g, "data")
- nelt = size(M, 1) * size(M, 2)
- data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
- else
- data = read(g, "data")
- end
- return Diag{ElT,DataT}(data)
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{Store}
+ ) where {Store <: Diag}
+ g = open_group(parent, name)
+ ElT = eltype(Store)
+ DataT = datatype(Store)
+ typestr = "Diag{$ElT,$DataT}"
+ if read(attributes(g)["type"]) != typestr
+ error("HDF5 group or file does not contain $typestr data")
+ end
+ if ElT == Nothing
+ return Dense{Nothing}()
+ end
+ # Attribute __complex__ is attached to the "data" dataset
+ # by the h5 library used by C++ version of ITensor:
+ if haskey(attributes(g["data"]), "__complex__")
+ M = read(g, "data")
+ nelt = size(M, 1) * size(M, 2)
+ data = Vector(reinterpret(ComplexF64, reshape(M, nelt)))
+ else
+ data = read(g, "data")
+ end
+ return Diag{ElT, DataT}(data)
end
diff --git a/NDTensors/ext/NDTensorsTBLISExt/contract.jl b/NDTensors/ext/NDTensorsTBLISExt/contract.jl
index 4b2661ea6f..1bb28db46d 100644
--- a/NDTensors/ext/NDTensorsTBLISExt/contract.jl
+++ b/NDTensors/ext/NDTensorsTBLISExt/contract.jl
@@ -1,43 +1,42 @@
-
function contract!(
- ::Val{:TBLIS},
- R::DenseTensor{ElT},
- labelsR,
- T1::DenseTensor{ElT},
- labelsT1,
- T2::DenseTensor{ElT},
- labelsT2,
- α::ElT,
- β::ElT,
-) where {ElT<:LinearAlgebra.BlasReal}
- # TBLIS Tensors
- R_tblis = TBLIS.TTensor{ElT}(array(R), β)
- T1_tblis = TBLIS.TTensor{ElT}(array(T1), α)
- T2_tblis = TBLIS.TTensor{ElT}(array(T2))
+ ::Val{:TBLIS},
+ R::DenseTensor{ElT},
+ labelsR,
+ T1::DenseTensor{ElT},
+ labelsT1,
+ T2::DenseTensor{ElT},
+ labelsT2,
+ α::ElT,
+ β::ElT,
+ ) where {ElT <: LinearAlgebra.BlasReal}
+ # TBLIS Tensors
+ R_tblis = TBLIS.TTensor{ElT}(array(R), β)
+ T1_tblis = TBLIS.TTensor{ElT}(array(T1), α)
+ T2_tblis = TBLIS.TTensor{ElT}(array(T2))
- function label_to_char(label)
- # Start at 'a'
- char_start = Char(96)
- if label < 0
- # Start at 'z'
- char_start = Char(123)
+ function label_to_char(label)
+ # Start at 'a'
+ char_start = Char(96)
+ if label < 0
+ # Start at 'z'
+ char_start = Char(123)
+ end
+ return char_start + label
end
- return char_start + label
- end
- function labels_to_tblis(labels)
- if isempty(labels)
- return ""
+ function labels_to_tblis(labels)
+ if isempty(labels)
+ return ""
+ end
+ str = prod(label_to_char.(labels))
+ return str
end
- str = prod(label_to_char.(labels))
- return str
- end
- labelsT1_tblis = labels_to_tblis(labelsT1)
- labelsT2_tblis = labels_to_tblis(labelsT2)
- labelsR_tblis = labels_to_tblis(labelsR)
+ labelsT1_tblis = labels_to_tblis(labelsT1)
+ labelsT2_tblis = labels_to_tblis(labelsT2)
+ labelsR_tblis = labels_to_tblis(labelsR)
- TBLIS.mul!(R_tblis, T1_tblis, T2_tblis, labelsT1_tblis, labelsT2_tblis, labelsR_tblis)
+ TBLIS.mul!(R_tblis, T1_tblis, T2_tblis, labelsT1_tblis, labelsT2_tblis, labelsR_tblis)
- return R
+ return R
end
diff --git a/NDTensors/src/NDTensors.jl b/NDTensors/src/NDTensors.jl
index 7b2bf2b54a..d984c09758 100644
--- a/NDTensors/src/NDTensors.jl
+++ b/NDTensors/src/NDTensors.jl
@@ -106,65 +106,65 @@ blas_get_num_threads() = BLAS.get_num_threads()
const _using_threaded_blocksparse = Ref(false)
function enable_threaded_blocksparse_docstring(module_name)
- return """
- $(module_name).enable_threaded_blocksparse()
- $(module_name).disable_threaded_blocksparse()
-
- Enable or disable block sparse multithreading.
-
- Returns the current state of `$(module_name).using_threaded_blocksparse()`, i.e. `true` if threaded block sparse was previously enabled, and `false` if threaded block sparse was previously disabled. This is helpful for turning block sparse threading on or off temporarily. For example:
- ```julia
- using_threaded_blocksparse = $(module_name).enable_threaded_blocksparse()
- # Run code that you want to be threaded
- if !using_threaded_blocksparse
- $(module_name).disable_threaded_blocksparse()
- end
- ```
+ return """
+ $(module_name).enable_threaded_blocksparse()
+ $(module_name).disable_threaded_blocksparse()
+
+ Enable or disable block sparse multithreading.
+
+ Returns the current state of `$(module_name).using_threaded_blocksparse()`, i.e. `true` if threaded block sparse was previously enabled, and `false` if threaded block sparse was previously disabled. This is helpful for turning block sparse threading on or off temporarily. For example:
+ ```julia
+ using_threaded_blocksparse = $(module_name).enable_threaded_blocksparse()
+ # Run code that you want to be threaded
+ if !using_threaded_blocksparse
+ $(module_name).disable_threaded_blocksparse()
+ end
+ ```
- Note that you need to start Julia with multiple threads. For example, to start Julia with 4 threads, you can use any of the following:
- ```
- \$ julia --threads=4
+ Note that you need to start Julia with multiple threads. For example, to start Julia with 4 threads, you can use any of the following:
+ ```
+ \$ julia --threads=4
- \$ julia -t 4
+ \$ julia -t 4
- \$ JULIA_NUM_THREADS=4 julia
- ```
+ \$ JULIA_NUM_THREADS=4 julia
+ ```
- In addition, we have found that it is best to disable `BLAS` and `Strided` multithreading when using block sparse multithreading. You can do that with the commands `using LinearAlgebra; BLAS.set_num_threads(1)` and `$(module_name).Strided.disable_threads()`.
+ In addition, we have found that it is best to disable `BLAS` and `Strided` multithreading when using block sparse multithreading. You can do that with the commands `using LinearAlgebra; BLAS.set_num_threads(1)` and `$(module_name).Strided.disable_threads()`.
- See also: `$(module_name).enable_threaded_blocksparse`, `$(module_name).disable_threaded_blocksparse`, `$(module_name).using_threaded_blocksparse`.
- """
+ See also: `$(module_name).enable_threaded_blocksparse`, `$(module_name).disable_threaded_blocksparse`, `$(module_name).using_threaded_blocksparse`.
+ """
end
function _enable_threaded_blocksparse()
- current_using_threaded_blocksparse = using_threaded_blocksparse()
- if !current_using_threaded_blocksparse
- if Threads.nthreads() == 1
- println(
- "WARNING: You are trying to enable block sparse multithreading, but you have started Julia with only a single thread. You can start Julia with `N` threads with `julia -t N`, and check the number of threads Julia can use with `Threads.nthreads()`. Your system has $(Sys.CPU_THREADS) threads available to use, which you can determine by running `Sys.CPU_THREADS`.\n",
- )
+ current_using_threaded_blocksparse = using_threaded_blocksparse()
+ if !current_using_threaded_blocksparse
+ if Threads.nthreads() == 1
+ println(
+ "WARNING: You are trying to enable block sparse multithreading, but you have started Julia with only a single thread. You can start Julia with `N` threads with `julia -t N`, and check the number of threads Julia can use with `Threads.nthreads()`. Your system has $(Sys.CPU_THREADS) threads available to use, which you can determine by running `Sys.CPU_THREADS`.\n",
+ )
+ end
+ if BLAS.get_num_threads() > 1 && Threads.nthreads() > 1
+ println(
+ "WARNING: You are enabling block sparse multithreading, but your BLAS configuration $(BLAS.get_config()) is currently set to use $(BLAS.get_num_threads()) threads. When using block sparse multithreading, we recommend setting BLAS to use only a single thread, otherwise you may see suboptimal performance. You can set it with `using LinearAlgebra; BLAS.set_num_threads(1)`.\n",
+ )
+ end
+ if Strided.get_num_threads() > 1
+ println(
+ "WARNING: You are enabling block sparse multithreading, but Strided.jl is currently set to use $(Strided.get_num_threads()) threads for performing dense tensor permutations. When using block sparse multithreading, we recommend setting Strided.jl to use only a single thread, otherwise you may see suboptimal performance. You can set it with `NDTensors.Strided.disable_threads()` and see the current number of threads it is using with `NDTensors.Strided.get_num_threads()`.\n",
+ )
+ end
+ _using_threaded_blocksparse[] = true
end
- if BLAS.get_num_threads() > 1 && Threads.nthreads() > 1
- println(
- "WARNING: You are enabling block sparse multithreading, but your BLAS configuration $(BLAS.get_config()) is currently set to use $(BLAS.get_num_threads()) threads. When using block sparse multithreading, we recommend setting BLAS to use only a single thread, otherwise you may see suboptimal performance. You can set it with `using LinearAlgebra; BLAS.set_num_threads(1)`.\n",
- )
- end
- if Strided.get_num_threads() > 1
- println(
- "WARNING: You are enabling block sparse multithreading, but Strided.jl is currently set to use $(Strided.get_num_threads()) threads for performing dense tensor permutations. When using block sparse multithreading, we recommend setting Strided.jl to use only a single thread, otherwise you may see suboptimal performance. You can set it with `NDTensors.Strided.disable_threads()` and see the current number of threads it is using with `NDTensors.Strided.get_num_threads()`.\n",
- )
- end
- _using_threaded_blocksparse[] = true
- end
- return current_using_threaded_blocksparse
+ return current_using_threaded_blocksparse
end
function _disable_threaded_blocksparse()
- current_using_threaded_blocksparse = using_threaded_blocksparse()
- if current_using_threaded_blocksparse
- _using_threaded_blocksparse[] = false
- end
- return current_using_threaded_blocksparse
+ current_using_threaded_blocksparse = using_threaded_blocksparse()
+ if current_using_threaded_blocksparse
+ _using_threaded_blocksparse[] = false
+ end
+ return current_using_threaded_blocksparse
end
"""
@@ -191,13 +191,13 @@ const _using_auto_fermion = Ref(false)
using_auto_fermion() = _using_auto_fermion[]
function enable_auto_fermion()
- _using_auto_fermion[] = true
- return nothing
+ _using_auto_fermion[] = true
+ return nothing
end
function disable_auto_fermion()
- _using_auto_fermion[] = false
- return nothing
+ _using_auto_fermion[] = false
+ return nothing
end
#####################################
@@ -209,13 +209,13 @@ const _using_tblis = Ref(false)
using_tblis() = _using_tblis[]
function enable_tblis()
- _using_tblis[] = true
- return nothing
+ _using_tblis[] = true
+ return nothing
end
function disable_tblis()
- _using_tblis[] = false
- return nothing
+ _using_tblis[] = false
+ return nothing
end
function backend_octavian end
diff --git a/NDTensors/src/abstractarray/tensoralgebra/contract.jl b/NDTensors/src/abstractarray/tensoralgebra/contract.jl
index 160373832a..250265cabe 100644
--- a/NDTensors/src/abstractarray/tensoralgebra/contract.jl
+++ b/NDTensors/src/abstractarray/tensoralgebra/contract.jl
@@ -5,181 +5,181 @@ using .Expose: expose
export backend_auto, backend_blas, backend_generic
@eval struct GemmBackend{T}
- (f::Type{<:GemmBackend})() = $(Expr(:new, :f))
+ (f::Type{<:GemmBackend})() = $(Expr(:new, :f))
end
GemmBackend(s) = GemmBackend{Symbol(s)}()
macro GemmBackend_str(s)
- return :(GemmBackend{$(Expr(:quote, Symbol(s)))})
+ return :(GemmBackend{$(Expr(:quote, Symbol(s)))})
end
const gemm_backend = Ref(:Auto)
function backend_auto()
- return gemm_backend[] = :Auto
+ return gemm_backend[] = :Auto
end
function backend_blas()
- return gemm_backend[] = :BLAS
+ return gemm_backend[] = :BLAS
end
function backend_generic()
- return gemm_backend[] = :Generic
+ return gemm_backend[] = :Generic
end
@inline function auto_select_backend(
- ::Type{<:StridedVecOrMat{<:BlasFloat}},
- ::Type{<:StridedVecOrMat{<:BlasFloat}},
- ::Type{<:StridedVecOrMat{<:BlasFloat}},
-)
- return GemmBackend(:BLAS)
+ ::Type{<:StridedVecOrMat{<:BlasFloat}},
+ ::Type{<:StridedVecOrMat{<:BlasFloat}},
+ ::Type{<:StridedVecOrMat{<:BlasFloat}},
+ )
+ return GemmBackend(:BLAS)
end
@inline function auto_select_backend(
- ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat}
-)
- return GemmBackend(:Generic)
+ ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat}, ::Type{<:AbstractVecOrMat}
+ )
+ return GemmBackend(:Generic)
end
function _gemm!(
- tA, tB, alpha, A::TA, B::TB, beta, C::TC
-) where {TA<:AbstractVecOrMat,TB<:AbstractVecOrMat,TC<:AbstractVecOrMat}
- if gemm_backend[] == :Auto
- _gemm!(auto_select_backend(TA, TB, TC), tA, tB, alpha, A, B, beta, C)
- else
- _gemm!(GemmBackend(gemm_backend[]), tA, tB, alpha, A, B, beta, C)
- end
+ tA, tB, alpha, A::TA, B::TB, beta, C::TC
+ ) where {TA <: AbstractVecOrMat, TB <: AbstractVecOrMat, TC <: AbstractVecOrMat}
+ return if gemm_backend[] == :Auto
+ _gemm!(auto_select_backend(TA, TB, TC), tA, tB, alpha, A, B, beta, C)
+ else
+ _gemm!(GemmBackend(gemm_backend[]), tA, tB, alpha, A, B, beta, C)
+ end
end
# BLAS matmul
function _gemm!(
- ::GemmBackend{:BLAS},
- tA,
- tB,
- alpha,
- A::AbstractVecOrMat,
- B::AbstractVecOrMat,
- beta,
- C::AbstractVecOrMat,
-)
- #@timeit_debug timer "BLAS.gemm!" begin
- return BLAS.gemm!(tA, tB, alpha, A, B, beta, C)
- #end # @timeit
+ ::GemmBackend{:BLAS},
+ tA,
+ tB,
+ alpha,
+ A::AbstractVecOrMat,
+ B::AbstractVecOrMat,
+ beta,
+ C::AbstractVecOrMat,
+ )
+ #@timeit_debug timer "BLAS.gemm!" begin
+ return BLAS.gemm!(tA, tB, alpha, A, B, beta, C)
+ #end # @timeit
end
# generic matmul
function _gemm!(
- ::GemmBackend{:Generic},
- tA,
- tB,
- alpha::AT,
- A::AbstractVecOrMat,
- B::AbstractVecOrMat,
- beta::BT,
- C::AbstractVecOrMat,
-) where {AT,BT}
- mul!(
- expose(C),
- expose(tA == 'T' ? transpose(A) : A),
- expose(tB == 'T' ? transpose(B) : B),
- alpha,
- beta,
- )
- return C
+ ::GemmBackend{:Generic},
+ tA,
+ tB,
+ alpha::AT,
+ A::AbstractVecOrMat,
+ B::AbstractVecOrMat,
+ beta::BT,
+ C::AbstractVecOrMat,
+ ) where {AT, BT}
+ mul!(
+ expose(C),
+ expose(tA == 'T' ? transpose(A) : A),
+ expose(tB == 'T' ? transpose(B) : B),
+ alpha,
+ beta,
+ )
+ return C
end
# Non-trivial permutation
function _contract_scalar_perm!(
- Rᵃ::AbstractArray{ElR}, Tᵃ::AbstractArray, perm, α, β=zero(ElR)
-) where {ElR}
- if iszero(β)
- if iszero(α)
- fill!(Rᵃ, 0)
- else
- Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> α * t)
- end
- elseif isone(β)
- if iszero(α)
- # Rᵃ .= Rᵃ
- # No-op
- else
- Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> r + α * t)
- end
- else
- if iszero(α)
- # Rᵃ .= β .* Rᵃ
- LinearAlgebra.scal!(length(Rᵃ), β, Rᵃ, 1)
+ Rᵃ::AbstractArray{ElR}, Tᵃ::AbstractArray, perm, α, β = zero(ElR)
+ ) where {ElR}
+ if iszero(β)
+ if iszero(α)
+ fill!(Rᵃ, 0)
+ else
+ Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> α * t)
+ end
+ elseif isone(β)
+ if iszero(α)
+ # Rᵃ .= Rᵃ
+ # No-op
+ else
+ Rᵃ = permutedims!!(Rᵃ, Tᵃ, perm, (r, t) -> r + α * t)
+ end
else
- Rᵃ .= α .* permutedims(expose(Tᵃ), perm) .+ β .* Rᵃ
+ if iszero(α)
+ # Rᵃ .= β .* Rᵃ
+ LinearAlgebra.scal!(length(Rᵃ), β, Rᵃ, 1)
+ else
+ Rᵃ .= α .* permutedims(expose(Tᵃ), perm) .+ β .* Rᵃ
+ end
end
- end
- return Rᵃ
+ return Rᵃ
end
function _contract!(
- CT::AbstractArray{El,NC},
- AT::AbstractArray{El,NA},
- BT::AbstractArray{El,NB},
- props::ContractionProperties,
- α::Number=one(El),
- β::Number=zero(El),
-) where {El,NC,NA,NB}
- tA = 'N'
- if props.permuteA
- #@timeit_debug timer "_contract!: permutedims A" begin
- Ap = permutedims(expose(AT), props.PA)
- #end # @timeit
- AM = transpose(reshape(Ap, (props.dmid, props.dleft)))
- else
- #A doesn't have to be permuted
- if Atrans(props)
- AM = transpose(reshape(AT, (props.dmid, props.dleft)))
+ CT::AbstractArray{El, NC},
+ AT::AbstractArray{El, NA},
+ BT::AbstractArray{El, NB},
+ props::ContractionProperties,
+ α::Number = one(El),
+ β::Number = zero(El),
+ ) where {El, NC, NA, NB}
+ tA = 'N'
+ if props.permuteA
+ #@timeit_debug timer "_contract!: permutedims A" begin
+ Ap = permutedims(expose(AT), props.PA)
+ #end # @timeit
+ AM = transpose(reshape(Ap, (props.dmid, props.dleft)))
else
- AM = reshape(AT, (props.dleft, props.dmid))
+ #A doesn't have to be permuted
+ if Atrans(props)
+ AM = transpose(reshape(AT, (props.dmid, props.dleft)))
+ else
+ AM = reshape(AT, (props.dleft, props.dmid))
+ end
end
- end
- tB = 'N'
- if props.permuteB
- #@timeit_debug timer "_contract!: permutedims B" begin
- Bp = permutedims(expose(BT), props.PB)
- #end # @timeit
- BM = reshape(Bp, (props.dmid, props.dright))
- else
- if Btrans(props)
- BM = transpose(reshape(BT, (props.dright, props.dmid)))
+ tB = 'N'
+ if props.permuteB
+ #@timeit_debug timer "_contract!: permutedims B" begin
+ Bp = permutedims(expose(BT), props.PB)
+ #end # @timeit
+ BM = reshape(Bp, (props.dmid, props.dright))
else
- BM = reshape(BT, (props.dmid, props.dright))
+ if Btrans(props)
+ BM = transpose(reshape(BT, (props.dright, props.dmid)))
+ else
+ BM = reshape(BT, (props.dmid, props.dright))
+ end
end
- end
- # TODO: this logic may be wrong
- if props.permuteC
- # if we are computing C = α * A B + β * C
- # we need to make sure C is permuted to the same
- # ordering as A B which is the inverse of props.PC
- if β ≠ 0
- CM = reshape(permutedims(expose(CT), invperm(props.PC)), (props.dleft, props.dright))
- else
- # Need to copy here since we will be permuting
- # into C later
- CM = reshape(copy(CT), (props.dleft, props.dright))
- end
- else
- if Ctrans(props)
- CM = transpose(reshape(CT, (props.dright, props.dleft)))
+ # TODO: this logic may be wrong
+ if props.permuteC
+ # if we are computing C = α * A B + β * C
+ # we need to make sure C is permuted to the same
+ # ordering as A B which is the inverse of props.PC
+ if β ≠ 0
+ CM = reshape(permutedims(expose(CT), invperm(props.PC)), (props.dleft, props.dright))
+ else
+ # Need to copy here since we will be permuting
+ # into C later
+ CM = reshape(copy(CT), (props.dleft, props.dright))
+ end
else
- CM = reshape(CT, (props.dleft, props.dright))
+ if Ctrans(props)
+ CM = transpose(reshape(CT, (props.dright, props.dleft)))
+ else
+ CM = reshape(CT, (props.dleft, props.dright))
+ end
end
- end
- #tC = similar(CM)
- #_gemm!(tA, tB, El(α), AM, BM, El(β), CM)
- CM = mul!!(CM, AM, BM, El(α), El(β))
+ #tC = similar(CM)
+ #_gemm!(tA, tB, El(α), AM, BM, El(β), CM)
+ CM = mul!!(CM, AM, BM, El(α), El(β))
- if props.permuteC
- Cr = reshape(CM, props.newCrange)
- # TODO: use invperm(pC) here?
- #@timeit_debug timer "_contract!: permutedims C" begin
- CT .= permutedims(expose(Cr), props.PC)
- #end # @timeit
- end
+ if props.permuteC
+ Cr = reshape(CM, props.newCrange)
+ # TODO: use invperm(pC) here?
+ #@timeit_debug timer "_contract!: permutedims C" begin
+ CT .= permutedims(expose(Cr), props.PC)
+ #end # @timeit
+ end
- return CT
+ return CT
end
diff --git a/NDTensors/src/blocksparse/block.jl b/NDTensors/src/blocksparse/block.jl
index f35eace158..92da33c3db 100644
--- a/NDTensors/src/blocksparse/block.jl
+++ b/NDTensors/src/blocksparse/block.jl
@@ -1,26 +1,25 @@
-
#
# Block
#
struct Block{N}
- data::NTuple{N,UInt}
- hash::UInt
- function Block{N}(data::NTuple{N,UInt}) where {N}
- h = _hash(data)
- return new{N}(data, h)
- end
- function Block{0}(::Tuple{})
- h = _hash(())
- return new{0}((), h)
- end
+ data::NTuple{N, UInt}
+ hash::UInt
+ function Block{N}(data::NTuple{N, UInt}) where {N}
+ h = _hash(data)
+ return new{N}(data, h)
+ end
+ function Block{0}(::Tuple{})
+ h = _hash(())
+ return new{0}((), h)
+ end
end
#
# Constructors
#
-Block{N}(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(UInt.(t))
+Block{N}(t::Tuple{Vararg{Any, N}}) where {N} = Block{N}(UInt.(t))
Block{N}(I::CartesianIndex{N}) where {N} = Block{N}(I.I)
@@ -36,13 +35,13 @@ Block(v::MVector{N}) where {N} = Block{N}(v)
Block(v::SVector{N}) where {N} = Block{N}(v)
-Block(t::NTuple{N,UInt}) where {N} = Block{N}(t)
+Block(t::NTuple{N, UInt}) where {N} = Block{N}(t)
-Block(t::Tuple{Vararg{Any,N}}) where {N} = Block{N}(t)
+Block(t::Tuple{Vararg{Any, N}}) where {N} = Block{N}(t)
Block(::Tuple{}) = Block{0}(())
-Block(I::Union{Integer,Block{1}}...) = Block(I)
+Block(I::Union{Integer, Block{1}}...) = Block(I)
#
# Conversions
@@ -50,7 +49,7 @@ Block(I::Union{Integer,Block{1}}...) = Block(I)
CartesianIndex(b::Block) = CartesianIndex(Tuple(b))
-Tuple(b::Block{N}) where {N} = NTuple{N,UInt}(b.data)
+Tuple(b::Block{N}) where {N} = NTuple{N, UInt}(b.data)
convert(::Type{Block}, I::CartesianIndex{N}) where {N} = Block{N}(I.I)
@@ -60,7 +59,7 @@ convert(::Type{Block}, t::Tuple) = Block(t)
convert(::Type{Block{N}}, t::Tuple) where {N} = Block{N}(t)
-(::Type{IntT})(b::Block{1}) where {IntT<:Integer} = IntT(only(b))
+(::Type{IntT})(b::Block{1}) where {IntT <: Integer} = IntT(only(b))
#
# Getting and setting fields
@@ -68,7 +67,7 @@ convert(::Type{Block{N}}, t::Tuple) where {N} = Block{N}(t)
gethash(b::Block) = b.hash[]
-sethash!(b::Block, h::UInt) = (b.hash[]=h; return b)
+sethash!(b::Block, h::UInt) = (b.hash[] = h; return b)
#
# Basic functions
@@ -82,11 +81,11 @@ isless(b1::Block, b2::Block) = isless(Tuple(b1), Tuple(b2))
iterate(b::Block, args...) = iterate(b.data, args...)
@propagate_inbounds function getindex(b::Block, i::Integer)
- return b.data[i]
+ return b.data[i]
end
@propagate_inbounds function setindex(b::Block{N}, val, i::Integer) where {N}
- return Block{N}(setindex(b.data, UInt(val), i))
+ return Block{N}(setindex(b.data, UInt(val), i))
end
ValLength(::Type{<:Block{N}}) where {N} = Val{N}
@@ -115,30 +114,30 @@ checkbounds(::Tensor, ::Block) = nothing
_hash(t::Tuple) = _hash(t, zero(UInt))
_hash(::Tuple{}, h::UInt) = h + Base.tuplehash_seed
@generated function _hash(b::NTuple{N}, h::UInt) where {N}
- quote
- out = h + Base.tuplehash_seed
- @nexprs $N i -> out = hash(b[$N - i + 1], out)
- end
+ return quote
+ out = h + Base.tuplehash_seed
+ @nexprs $N i -> out = hash(b[$N - i + 1], out)
+ end
end
if VERSION < v"1.7.0-DEV.933"
- # Stop inlining after some number of arguments to avoid code blowup
- function _hash(t::Base.Any16, h::UInt)
- out = h + Base.tuplehash_seed
- for i in length(t):-1:1
- out = hash(t[i], out)
+ # Stop inlining after some number of arguments to avoid code blowup
+ function _hash(t::Base.Any16, h::UInt)
+ out = h + Base.tuplehash_seed
+ for i in length(t):-1:1
+ out = hash(t[i], out)
+ end
+ return out
end
- return out
- end
else
- # Stop inlining after some number of arguments to avoid code blowup
- function _hash(t::Base.Any32, h::UInt)
- out = h + Base.tuplehash_seed
- for i in length(t):-1:1
- out = hash(t[i], out)
+ # Stop inlining after some number of arguments to avoid code blowup
+ function _hash(t::Base.Any32, h::UInt)
+ out = h + Base.tuplehash_seed
+ for i in length(t):-1:1
+ out = hash(t[i], out)
+ end
+ return out
end
- return out
- end
end
hash(b::Block) = UInt(b.hash)
diff --git a/NDTensors/src/blocksparse/blocksparsetensor.jl b/NDTensors/src/blocksparse/blocksparsetensor.jl
index 2df6352a30..1bb9fe69fc 100644
--- a/NDTensors/src/blocksparse/blocksparsetensor.jl
+++ b/NDTensors/src/blocksparse/blocksparsetensor.jl
@@ -5,44 +5,44 @@ using TypeParameterAccessors: similartype
# BlockSparseTensor (Tensor using BlockSparse storage)
#
-const BlockSparseTensor{ElT,N,StoreT,IndsT} =
- Tensor{ElT,N,StoreT,IndsT} where {StoreT<:BlockSparse}
+const BlockSparseTensor{ElT, N, StoreT, IndsT} =
+ Tensor{ElT, N, StoreT, IndsT} where {StoreT <: BlockSparse}
nonzeros(T::Tensor) = data(T)
function BlockSparseTensor(
- ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, inds
-) where {ElT<:Number}
- nnz_tot = nnz(boffs, inds)
- storage = BlockSparse(ElT, undef, boffs, nnz_tot)
- return tensor(storage, inds)
+ ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, inds
+ ) where {ElT <: Number}
+ nnz_tot = nnz(boffs, inds)
+ storage = BlockSparse(ElT, undef, boffs, nnz_tot)
+ return tensor(storage, inds)
end
function BlockSparseTensor(
- datatype::Type{<:AbstractArray}, ::UndefInitializer, boffs::BlockOffsets, inds
-)
- nnz_tot = nnz(boffs, inds)
- storage = BlockSparse(datatype, undef, boffs, nnz_tot)
- return tensor(storage, inds)
+ datatype::Type{<:AbstractArray}, ::UndefInitializer, boffs::BlockOffsets, inds
+ )
+ nnz_tot = nnz(boffs, inds)
+ storage = BlockSparse(datatype, undef, boffs, nnz_tot)
+ return tensor(storage, inds)
end
function BlockSparseTensor(
- ::Type{ElT}, ::UndefInitializer, blocks::Vector{BlockT}, inds
-) where {ElT<:Number,BlockT<:Union{Block,NTuple}}
- boffs, nnz = blockoffsets(blocks, inds)
- storage = BlockSparse(ElT, undef, boffs, nnz)
- return tensor(storage, inds)
+ ::Type{ElT}, ::UndefInitializer, blocks::Vector{BlockT}, inds
+ ) where {ElT <: Number, BlockT <: Union{Block, NTuple}}
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = BlockSparse(ElT, undef, boffs, nnz)
+ return tensor(storage, inds)
end
function BlockSparseTensor(
- datatype::Type{<:AbstractArray},
- ::UndefInitializer,
- blocks::Vector{<:Union{Block,NTuple}},
- inds,
-)
- boffs, nnz = blockoffsets(blocks, inds)
- storage = BlockSparse(datatype, undef, boffs, nnz)
- return tensor(storage, inds)
+ datatype::Type{<:AbstractArray},
+ ::UndefInitializer,
+ blocks::Vector{<:Union{Block, NTuple}},
+ inds,
+ )
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = BlockSparse(datatype, undef, boffs, nnz)
+ return tensor(storage, inds)
end
"""
@@ -52,23 +52,23 @@ Construct a block sparse tensor with uninitialized memory
from indices and locations of non-zero blocks.
"""
function BlockSparseTensor(::UndefInitializer, blockoffsets, inds)
- return BlockSparseTensor(default_eltype(), undef, blockoffsets, inds)
+ return BlockSparseTensor(default_eltype(), undef, blockoffsets, inds)
end
function BlockSparseTensor(
- datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, inds
-)
- nnz_tot = nnz(blockoffsets, inds)
- storage = BlockSparse(datatype, blockoffsets, nnz_tot)
- return tensor(storage, inds)
+ datatype::Type{<:AbstractArray}, blockoffsets::BlockOffsets, inds
+ )
+ nnz_tot = nnz(blockoffsets, inds)
+ storage = BlockSparse(datatype, blockoffsets, nnz_tot)
+ return tensor(storage, inds)
end
function BlockSparseTensor(eltype::Type{<:Number}, blockoffsets::BlockOffsets, inds)
- return BlockSparseTensor(Vector{eltype}, blockoffsets, inds)
+ return BlockSparseTensor(Vector{eltype}, blockoffsets, inds)
end
function BlockSparseTensor(blockoffsets::BlockOffsets, inds)
- return BlockSparseTensor(default_eltype(), blockoffsets, inds)
+ return BlockSparseTensor(default_eltype(), blockoffsets, inds)
end
"""
@@ -79,11 +79,11 @@ Construct a block sparse tensor with no blocks.
BlockSparseTensor(inds) = BlockSparseTensor(default_eltype(), inds)
function BlockSparseTensor(datatype::Type{<:AbstractArray}, inds)
- return BlockSparseTensor(datatype, BlockOffsets{length(inds)}(), inds)
+ return BlockSparseTensor(datatype, BlockOffsets{length(inds)}(), inds)
end
function BlockSparseTensor(eltype::Type{<:Number}, inds)
- return BlockSparseTensor(Vector{eltype}, inds)
+ return BlockSparseTensor(Vector{eltype}, inds)
end
"""
@@ -91,8 +91,8 @@ end
Construct a block sparse tensor with no blocks.
"""
-function BlockSparseTensor(inds::Vararg{DimT,N}) where {DimT,N}
- return BlockSparseTensor(BlockOffsets{N}(), inds)
+function BlockSparseTensor(inds::Vararg{DimT, N}) where {DimT, N}
+ return BlockSparseTensor(BlockOffsets{N}(), inds)
end
"""
@@ -101,69 +101,69 @@ end
Construct a block sparse tensor with the specified blocks.
Defaults to setting structurally non-zero blocks to zero.
"""
-function BlockSparseTensor(blocks::Vector{BlockT}, inds) where {BlockT<:Union{Block,NTuple}}
- return BlockSparseTensor(default_eltype(), blocks, inds)
+function BlockSparseTensor(blocks::Vector{BlockT}, inds) where {BlockT <: Union{Block, NTuple}}
+ return BlockSparseTensor(default_eltype(), blocks, inds)
end
function BlockSparseTensor(
- ::Type{ElT}, blocks::Vector{BlockT}, inds
-) where {ElT<:Number,BlockT<:Union{Block,NTuple}}
- boffs, nnz = blockoffsets(blocks, inds)
- storage = BlockSparse(ElT, boffs, nnz)
- return tensor(storage, inds)
+ ::Type{ElT}, blocks::Vector{BlockT}, inds
+ ) where {ElT <: Number, BlockT <: Union{Block, NTuple}}
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = BlockSparse(ElT, boffs, nnz)
+ return tensor(storage, inds)
end
function BlockSparseTensor(
- datatype::Type{<:AbstractArray}, blocks::Vector{<:Union{Block,NTuple}}, inds
-)
- boffs, nnz = blockoffsets(blocks, inds)
- storage = BlockSparse(datatype, boffs, nnz)
- return tensor(storage, inds)
+ datatype::Type{<:AbstractArray}, blocks::Vector{<:Union{Block, NTuple}}, inds
+ )
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = BlockSparse(datatype, boffs, nnz)
+ return tensor(storage, inds)
end
function BlockSparseTensor(
- x::Number, blocks::Vector{BlockT}, inds
-) where {BlockT<:Union{Block,NTuple}}
- boffs, nnz = blockoffsets(blocks, inds)
- storage = BlockSparse(x, boffs, nnz)
- return tensor(storage, inds)
+ x::Number, blocks::Vector{BlockT}, inds
+ ) where {BlockT <: Union{Block, NTuple}}
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = BlockSparse(x, boffs, nnz)
+ return tensor(storage, inds)
end
#complex(::Type{BlockSparseTensor{ElT,N,StoreT,IndsT}}) where {ElT<:Number,N,StoreT<:BlockSparse
# = Tensor{ElT,N,StoreT,IndsT} where {StoreT<:BlockSparse}
function randn(
- TensorT::Type{<:BlockSparseTensor{ElT,N}}, blocks::Vector{<:BlockT}, inds
-) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- return randn(Random.default_rng(), TensorT, blocks, inds)
+ TensorT::Type{<:BlockSparseTensor{ElT, N}}, blocks::Vector{<:BlockT}, inds
+ ) where {ElT, BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ return randn(Random.default_rng(), TensorT, blocks, inds)
end
function randn(
- rng::AbstractRNG, ::Type{<:BlockSparseTensor{ElT,N}}, blocks::Vector{<:BlockT}, inds
-) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- boffs, nnz = blockoffsets(blocks, inds)
- storage = randn(rng, BlockSparse{ElT}, boffs, nnz)
- return tensor(storage, inds)
+ rng::AbstractRNG, ::Type{<:BlockSparseTensor{ElT, N}}, blocks::Vector{<:BlockT}, inds
+ ) where {ElT, BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ boffs, nnz = blockoffsets(blocks, inds)
+ storage = randn(rng, BlockSparse{ElT}, boffs, nnz)
+ return tensor(storage, inds)
end
function randomBlockSparseTensor(
- ::Type{ElT}, blocks::Vector{<:BlockT}, inds
-) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- return randomBlockSparseTensor(Random.default_rng(), ElT, blocks, inds)
+ ::Type{ElT}, blocks::Vector{<:BlockT}, inds
+ ) where {ElT, BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ return randomBlockSparseTensor(Random.default_rng(), ElT, blocks, inds)
end
function randomBlockSparseTensor(
- rng::AbstractRNG, ::Type{ElT}, blocks::Vector{<:BlockT}, inds
-) where {ElT,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- return randn(rng, BlockSparseTensor{ElT,N}, blocks, inds)
+ rng::AbstractRNG, ::Type{ElT}, blocks::Vector{<:BlockT}, inds
+ ) where {ElT, BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ return randn(rng, BlockSparseTensor{ElT, N}, blocks, inds)
end
function randomBlockSparseTensor(blocks::Vector, inds)
- return randomBlockSparseTensor(Random.default_rng(), blocks, inds)
+ return randomBlockSparseTensor(Random.default_rng(), blocks, inds)
end
function randomBlockSparseTensor(rng::AbstractRNG, blocks::Vector, inds)
- return randomBlockSparseTensor(rng, default_eltype(), blocks, inds)
+ return randomBlockSparseTensor(rng, default_eltype(), blocks, inds)
end
"""
@@ -174,31 +174,31 @@ Construct a block sparse tensor with the specified blocks.
Defaults to setting structurally non-zero blocks to zero.
"""
function BlockSparseTensor(
- blocks::Vector{BlockT}, inds::Vararg{BlockDim,N}
-) where {BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- return BlockSparseTensor(blocks, inds)
+ blocks::Vector{BlockT}, inds::Vararg{BlockDim, N}
+ ) where {BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ return BlockSparseTensor(blocks, inds)
end
function BlockSparseTensor{ElT}(
- blocks::Vector{BlockT}, inds::Vararg{BlockDim,N}
-) where {ElT<:Number,BlockT<:Union{Block{N},NTuple{N,<:Integer}}} where {N}
- return BlockSparseTensor(ElT, blocks, inds)
+ blocks::Vector{BlockT}, inds::Vararg{BlockDim, N}
+ ) where {ElT <: Number, BlockT <: Union{Block{N}, NTuple{N, <:Integer}}} where {N}
+ return BlockSparseTensor(ElT, blocks, inds)
end
function zeros(
- tensor::BlockSparseTensor{ElT,N}, blockoffsets::BlockOffsets{N}, inds
-) where {ElT,N}
- return BlockSparseTensor(datatype(tensor), blockoffsets, inds)
+ tensor::BlockSparseTensor{ElT, N}, blockoffsets::BlockOffsets{N}, inds
+ ) where {ElT, N}
+ return BlockSparseTensor(datatype(tensor), blockoffsets, inds)
end
function zeros(
- tensortype::Type{<:BlockSparseTensor{ElT,N}}, blockoffsets::BlockOffsets{N}, inds
-) where {ElT,N}
- return BlockSparseTensor(datatype(tensortype), blockoffsets, inds)
+ tensortype::Type{<:BlockSparseTensor{ElT, N}}, blockoffsets::BlockOffsets{N}, inds
+ ) where {ElT, N}
+ return BlockSparseTensor(datatype(tensortype), blockoffsets, inds)
end
function zeros(tensortype::Type{<:BlockSparseTensor}, inds)
- return BlockSparseTensor(datatype(tensortype), inds)
+ return BlockSparseTensor(datatype(tensortype), inds)
end
zeros(tensor::BlockSparseTensor, inds) = zeros(typeof(tensor), inds)
@@ -208,8 +208,8 @@ IndexStyle(::Type{<:BlockSparseTensor}) = IndexCartesian()
# Get the CartesianIndices for the range of indices
# of the specified
-function blockindices(T::BlockSparseTensor{ElT,N}, block) where {ElT,N}
- return CartesianIndex(blockstart(T, block)):CartesianIndex(blockend(T, block))
+function blockindices(T::BlockSparseTensor{ElT, N}, block) where {ElT, N}
+ return CartesianIndex(blockstart(T, block)):CartesianIndex(blockend(T, block))
end
"""
@@ -221,13 +221,13 @@ exist, return nothing for the offset.
Also returns the block the index is found in and the offset
within the block.
"""
-function indexoffset(T::BlockSparseTensor{ElT,N}, i::Vararg{Int,N}) where {ElT,N}
- index_within_block, block = blockindex(T, i...)
- block_dims = blockdims(T, block)
- offset_within_block = LinearIndices(block_dims)[CartesianIndex(index_within_block)]
- offset_of_block = offset(T, block)
- offset_of_i = isnothing(offset_of_block) ? nothing : offset_of_block + offset_within_block
- return offset_of_i, block, offset_within_block
+function indexoffset(T::BlockSparseTensor{ElT, N}, i::Vararg{Int, N}) where {ElT, N}
+ index_within_block, block = blockindex(T, i...)
+ block_dims = blockdims(T, block)
+ offset_within_block = LinearIndices(block_dims)[CartesianIndex(index_within_block)]
+ offset_of_block = offset(T, block)
+ offset_of_i = isnothing(offset_of_block) ? nothing : offset_of_block + offset_within_block
+ return offset_of_i, block, offset_within_block
end
# TODO: Add a checkbounds
@@ -236,16 +236,16 @@ end
# block,index_within_block = blockindex(T,i...)
# return blockview(T,block)[index_within_block]
@propagate_inbounds function getindex(
- T::BlockSparseTensor{ElT,N}, i::Vararg{Int,N}
-) where {ElT,N}
- offset, _ = indexoffset(T, i...)
- isnothing(offset) && return zero(ElT)
- return storage(T)[offset]
+ T::BlockSparseTensor{ElT, N}, i::Vararg{Int, N}
+ ) where {ElT, N}
+ offset, _ = indexoffset(T, i...)
+ isnothing(offset) && return zero(ElT)
+ return storage(T)[offset]
end
-@propagate_inbounds function getindex(T::BlockSparseTensor{ElT,0}) where {ElT}
- nnzblocks(T) == 0 && return zero(ElT)
- return expose(storage(T))[]
+@propagate_inbounds function getindex(T::BlockSparseTensor{ElT, 0}) where {ElT}
+ nnzblocks(T) == 0 && return zero(ElT)
+ return expose(storage(T))[]
end
# These may not be valid if the Tensor has no blocks
@@ -260,193 +260,194 @@ end
# XXX rename to insertblock!, no need to return offset
using TypeParameterAccessors: unwrap_array_type
using .Expose: Exposed, expose, unexpose
-function insertblock_offset!(T::BlockSparseTensor{ElT,N}, newblock::Block{N}) where {ElT,N}
- newdim = blockdim(T, newblock)
- newoffset = nnz(T)
- insert!(blockoffsets(T), newblock, newoffset)
- # Insert new block into data
- new_data = generic_zeros(unwrap_array_type(T), newdim)
- # TODO: `append!` is broken on `Metal` since `resize!`
- # isn't implemented.
- append!(expose(data(T)), new_data)
- return newoffset
+function insertblock_offset!(T::BlockSparseTensor{ElT, N}, newblock::Block{N}) where {ElT, N}
+ newdim = blockdim(T, newblock)
+ newoffset = nnz(T)
+ insert!(blockoffsets(T), newblock, newoffset)
+ # Insert new block into data
+ new_data = generic_zeros(unwrap_array_type(T), newdim)
+ # TODO: `append!` is broken on `Metal` since `resize!`
+ # isn't implemented.
+ append!(expose(data(T)), new_data)
+ return newoffset
end
-function insertblock!(T::BlockSparseTensor{<:Number,N}, block::Block{N}) where {N}
- insertblock_offset!(T, block)
- return T
+function insertblock!(T::BlockSparseTensor{<:Number, N}, block::Block{N}) where {N}
+ insertblock_offset!(T, block)
+ return T
end
insertblock!(T::BlockSparseTensor, block) = insertblock!(T, Block(block))
# Insert missing diagonal blocks as zero blocks
function insert_diag_blocks!(T::AbstractArray)
- for b in eachdiagblock(T)
- blockT = blockview(T, b)
- if isnothing(blockT)
- # Block was not found in the list, insert it
- insertblock!(T, b)
+ for b in eachdiagblock(T)
+ blockT = blockview(T, b)
+ if isnothing(blockT)
+ # Block was not found in the list, insert it
+ insertblock!(T, b)
+ end
end
- end
+ return nothing
end
# TODO: Add a checkbounds
@propagate_inbounds function setindex!(
- T::BlockSparseTensor{ElT,N}, val, i::Vararg{Int,N}
-) where {ElT,N}
- offset, block, offset_within_block = indexoffset(T, i...)
- if isnothing(offset)
- offset_of_block = insertblock_offset!(T, block)
- offset = offset_of_block + offset_within_block
- end
- storage(T)[offset] = val
- return T
+ T::BlockSparseTensor{ElT, N}, val, i::Vararg{Int, N}
+ ) where {ElT, N}
+ offset, block, offset_within_block = indexoffset(T, i...)
+ if isnothing(offset)
+ offset_of_block = insertblock_offset!(T, block)
+ offset = offset_of_block + offset_within_block
+ end
+ storage(T)[offset] = val
+ return T
end
hasblock(T::Tensor, block::Block) = isassigned(blockoffsets(T), block)
@propagate_inbounds function setindex!(
- T::BlockSparseTensor{ElT,N}, val, b::Block{N}
-) where {ElT,N}
- if !hasblock(T, b)
- insertblock!(T, b)
- end
- Tb = T[b]
- Tb .= val
- return T
+ T::BlockSparseTensor{ElT, N}, val, b::Block{N}
+ ) where {ElT, N}
+ if !hasblock(T, b)
+ insertblock!(T, b)
+ end
+ Tb = T[b]
+ Tb .= val
+ return T
end
getindex(T::BlockSparseTensor, block::Block) = blockview(T, block)
-to_indices(T::Tensor{<:Any,N}, b::Tuple{Block{N}}) where {N} = blockindices(T, b...)
+to_indices(T::Tensor{<:Any, N}, b::Tuple{Block{N}}) where {N} = blockindices(T, b...)
function blockview(T::BlockSparseTensor, block::Block)
- return blockview(T, block, offset(T, block))
+ return blockview(T, block, offset(T, block))
end
function blockview(T::BlockSparseTensor, block::Block, offset::Integer)
- return blockview(T, BlockOffset(block, offset))
+ return blockview(T, BlockOffset(block, offset))
end
# Case where the block isn't found, return nothing
function blockview(T::BlockSparseTensor, block::Block, ::Nothing)
- return nothing
+ return nothing
end
blockview(T::BlockSparseTensor, block) = blockview(T, Block(block))
function blockview(T::BlockSparseTensor, bof::BlockOffset)
- blockT, offsetT = bof
- blockdimsT = blockdims(T, blockT)
- blockdimT = prod(blockdimsT)
- dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdimT)]
- return tensor(Dense(dataTslice), blockdimsT)
+ blockT, offsetT = bof
+ blockdimsT = blockdims(T, blockT)
+ blockdimT = prod(blockdimsT)
+ dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdimT)]
+ return tensor(Dense(dataTslice), blockdimsT)
end
view(T::BlockSparseTensor, b::Block) = blockview(T, b)
# convert to Dense
-function dense(T::TensorT) where {TensorT<:BlockSparseTensor}
- R = zeros(dense(TensorT), inds(T))
- ## Here this failed with scalar indexing (R[blockindices] = blockview)
- ## We can fix this by using copyto the arrays
- r = array(R)
- for block in keys(blockoffsets(T))
- # TODO: make sure this assignment is efficient
- rview = @view r[blockindices(T, block)]
- copyto!(expose(rview), expose(array(blockview(T, block))))
- end
- return tensor(Dense(r), inds(T))
-end
-
-function diag(ETensor::Exposed{<:AbstractArray,<:BlockSparseTensor})
- tensor = unexpose(ETensor)
- tensordiag = NDTensors.similar(
- dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),)
- )
- for j in 1:diaglength(tensor)
- @inbounds tensordiag[j] = getdiagindex(tensor, j)
- end
- return tensordiag
+function dense(T::TensorT) where {TensorT <: BlockSparseTensor}
+ R = zeros(dense(TensorT), inds(T))
+ ## Here this failed with scalar indexing (R[blockindices] = blockview)
+ ## We can fix this by using copyto the arrays
+ r = array(R)
+ for block in keys(blockoffsets(T))
+ # TODO: make sure this assignment is efficient
+ rview = @view r[blockindices(T, block)]
+ copyto!(expose(rview), expose(array(blockview(T, block))))
+ end
+ return tensor(Dense(r), inds(T))
+end
+
+function diag(ETensor::Exposed{<:AbstractArray, <:BlockSparseTensor})
+ tensor = unexpose(ETensor)
+ tensordiag = NDTensors.similar(
+ dense(typeof(tensor)), eltype(tensor), (diaglength(tensor),)
+ )
+ for j in 1:diaglength(tensor)
+ @inbounds tensordiag[j] = getdiagindex(tensor, j)
+ end
+ return tensordiag
end
function Base.mapreduce(
- f, op, t1::BlockSparseTensor, t_tail::BlockSparseTensor...; kwargs...
-)
- # TODO: Take advantage of block sparsity here.
- return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...)
+ f, op, t1::BlockSparseTensor, t_tail::BlockSparseTensor...; kwargs...
+ )
+ # TODO: Take advantage of block sparsity here.
+ return mapreduce(f, op, array(t1), array.(t_tail)...; kwargs...)
end
# This is a special case that optimizes for a single tensor
# and takes advantage of block sparsity. Once the more general
# case handles block sparsity, this can be removed.
function Base.mapreduce(f, op, t::BlockSparseTensor; kwargs...)
- elt = eltype(t)
- if !iszero(f(zero(elt)))
- return mapreduce(f, op, array(t); kwargs...)
- end
- if length(t) > nnz(t)
- # Some elements are zero, account for that
- # with the initial value.
- init_kwargs = (; init=zero(elt))
- else
- init_kwargs = (;)
- end
- return mapreduce(f, op, storage(t); kwargs..., init_kwargs...)
+ elt = eltype(t)
+ if !iszero(f(zero(elt)))
+ return mapreduce(f, op, array(t); kwargs...)
+ end
+ if length(t) > nnz(t)
+ # Some elements are zero, account for that
+ # with the initial value.
+ init_kwargs = (; init = zero(elt))
+ else
+ init_kwargs = (;)
+ end
+ return mapreduce(f, op, storage(t); kwargs..., init_kwargs...)
end
function blocksparse_isequal(x, y)
- return array(x) == array(y)
+ return array(x) == array(y)
end
function Base.:(==)(x::BlockSparseTensor, y::BlockSparseTensor)
- return blocksparse_isequal(x, y)
+ return blocksparse_isequal(x, y)
end
function Base.:(==)(x::BlockSparseTensor, y::Tensor)
- return blocksparse_isequal(x, y)
+ return blocksparse_isequal(x, y)
end
function Base.:(==)(x::Tensor, y::BlockSparseTensor)
- return blocksparse_isequal(x, y)
+ return blocksparse_isequal(x, y)
end
## TODO currently this fails on GPU with scalar indexing
function map_diag!(
- f::Function,
- exposed_t_destination::Exposed{<:AbstractArray,<:BlockSparseTensor},
- exposed_t_source::Exposed{<:AbstractArray,<:BlockSparseTensor},
-)
- t_destination = unexpose(exposed_t_destination)
- t_source = unexpose(exposed_t_source)
- for i in 1:diaglength(t_destination)
- NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i)
- end
- return t_destination
+ f::Function,
+ exposed_t_destination::Exposed{<:AbstractArray, <:BlockSparseTensor},
+ exposed_t_source::Exposed{<:AbstractArray, <:BlockSparseTensor},
+ )
+ t_destination = unexpose(exposed_t_destination)
+ t_source = unexpose(exposed_t_source)
+ for i in 1:diaglength(t_destination)
+ NDTensors.setdiagindex!(t_destination, f(NDTensors.getdiagindex(t_source, i)), i)
+ end
+ return t_destination
end
#
# Operations
#
# TODO: extend to case with different block structures
-function +(T1::BlockSparseTensor{<:Number,N}, T2::BlockSparseTensor{<:Number,N}) where {N}
- inds(T1) ≠ inds(T2) &&
- error("Cannot add block sparse tensors with different block structure")
- R = copy(T1)
- return permutedims!!(R, T2, ntuple(identity, Val(N)), +)
+function +(T1::BlockSparseTensor{<:Number, N}, T2::BlockSparseTensor{<:Number, N}) where {N}
+ inds(T1) ≠ inds(T2) &&
+ error("Cannot add block sparse tensors with different block structure")
+ R = copy(T1)
+ return permutedims!!(R, T2, ntuple(identity, Val(N)), +)
end
-function permutedims(T::BlockSparseTensor{<:Number,N}, perm::NTuple{N,Int}) where {N}
- blockoffsetsR, indsR = permutedims(blockoffsets(T), inds(T), perm)
- R = NDTensors.similar(T, blockoffsetsR, indsR)
- permutedims!(R, T, perm)
- return R
+function permutedims(T::BlockSparseTensor{<:Number, N}, perm::NTuple{N, Int}) where {N}
+ blockoffsetsR, indsR = permutedims(blockoffsets(T), inds(T), perm)
+ R = NDTensors.similar(T, blockoffsetsR, indsR)
+ permutedims!(R, T, perm)
+ return R
end
-function _permute_combdims(combdims::NTuple{NC,Int}, perm::NTuple{NP,Int}) where {NC,NP}
- res = MVector{NC,Int}(undef)
- iperm = invperm(perm)
- for i in 1:NC
- res[i] = iperm[combdims[i]]
- end
- return Tuple(res)
+function _permute_combdims(combdims::NTuple{NC, Int}, perm::NTuple{NP, Int}) where {NC, NP}
+ res = MVector{NC, Int}(undef)
+ iperm = invperm(perm)
+ for i in 1:NC
+ res[i] = iperm[combdims[i]]
+ end
+ return Tuple(res)
end
#
@@ -455,172 +456,172 @@ end
# Note that combdims is expected to be contiguous and ordered
# smallest to largest
-function combine_dims(blocks::Vector{Block{N}}, inds, combdims::NTuple{NC,Int}) where {N,NC}
- nblcks = nblocks(inds, combdims)
- blocks_comb = Vector{Block{N - NC + 1}}(undef, length(blocks))
- for (i, block) in enumerate(blocks)
- blocks_comb[i] = combine_dims(block, inds, combdims)
- end
- return blocks_comb
+function combine_dims(blocks::Vector{Block{N}}, inds, combdims::NTuple{NC, Int}) where {N, NC}
+ nblcks = nblocks(inds, combdims)
+ blocks_comb = Vector{Block{N - NC + 1}}(undef, length(blocks))
+ for (i, block) in enumerate(blocks)
+ blocks_comb[i] = combine_dims(block, inds, combdims)
+ end
+ return blocks_comb
end
-function combine_dims(block::Block, inds, combdims::NTuple{NC,Int}) where {NC}
- nblcks = nblocks(inds, combdims)
- slice = getindices(block, combdims)
- slice_comb = LinearIndices(nblcks)[slice...]
- block_comb = deleteat(block, combdims)
- block_comb = insertafter(block_comb, tuple(slice_comb), minimum(combdims) - 1)
- return block_comb
+function combine_dims(block::Block, inds, combdims::NTuple{NC, Int}) where {NC}
+ nblcks = nblocks(inds, combdims)
+ slice = getindices(block, combdims)
+ slice_comb = LinearIndices(nblcks)[slice...]
+ block_comb = deleteat(block, combdims)
+ block_comb = insertafter(block_comb, tuple(slice_comb), minimum(combdims) - 1)
+ return block_comb
end
# In the dimension dim, permute the blocks
function perm_blocks(blocks::Blocks{N}, dim::Int, perm) where {N}
- blocks_perm = Blocks{N}(undef, nnzblocks(blocks))
- iperm = invperm(perm)
- for (i, block) in enumerate(blocks)
- blocks_perm[i] = setindex(block, iperm[block[dim]], dim)
- end
- return blocks_perm
+ blocks_perm = Blocks{N}(undef, nnzblocks(blocks))
+ iperm = invperm(perm)
+ for (i, block) in enumerate(blocks)
+ blocks_perm[i] = setindex(block, iperm[block[dim]], dim)
+ end
+ return blocks_perm
end
# In the dimension dim, permute the block
function perm_block(block::Block, dim::Int, perm)
- iperm = invperm(perm)
- return setindex(block, iperm[block[dim]], dim)
+ iperm = invperm(perm)
+ return setindex(block, iperm[block[dim]], dim)
end
# In the dimension dim, combine the specified blocks
function combine_blocks(blocks::Blocks, dim::Int, blockcomb::Vector{Int})
- blocks_comb = copy(blocks)
- nnz_comb = nnzblocks(blocks)
- for (i, block) in enumerate(blocks)
- dimval = block[dim]
- blocks_comb[i] = setindex(block, blockcomb[dimval], dim)
- end
- unique!(blocks_comb)
- return blocks_comb
+ blocks_comb = copy(blocks)
+ nnz_comb = nnzblocks(blocks)
+ for (i, block) in enumerate(blocks)
+ dimval = block[dim]
+ blocks_comb[i] = setindex(block, blockcomb[dimval], dim)
+ end
+ unique!(blocks_comb)
+ return blocks_comb
end
function permutedims_combine_output(
- T::BlockSparseTensor{ElT,N},
- is,
- perm::NTuple{N,Int},
- combdims::NTuple{NC,Int},
- blockperm::Vector{Int},
- blockcomb::Vector{Int},
-) where {ElT,N,NC}
- # Permute the indices
- indsT = inds(T)
- inds_perm = permute(indsT, perm)
+ T::BlockSparseTensor{ElT, N},
+ is,
+ perm::NTuple{N, Int},
+ combdims::NTuple{NC, Int},
+ blockperm::Vector{Int},
+ blockcomb::Vector{Int},
+ ) where {ElT, N, NC}
+ # Permute the indices
+ indsT = inds(T)
+ inds_perm = permute(indsT, perm)
- # Now that the indices are permuted, compute
- # which indices are now combined
- combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm))
+ # Now that the indices are permuted, compute
+ # which indices are now combined
+ combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm))
- # Permute the nonzero blocks (dimension-wise)
- blocks = nzblocks(T)
- blocks_perm = permutedims(blocks, perm)
+ # Permute the nonzero blocks (dimension-wise)
+ blocks = nzblocks(T)
+ blocks_perm = permutedims(blocks, perm)
- # Combine the nonzero blocks (dimension-wise)
- blocks_perm_comb = combine_dims(blocks_perm, inds_perm, combdims_perm)
+ # Combine the nonzero blocks (dimension-wise)
+ blocks_perm_comb = combine_dims(blocks_perm, inds_perm, combdims_perm)
- # Permute the blocks (within the newly combined dimension)
- comb_ind_loc = minimum(combdims_perm)
- blocks_perm_comb = perm_blocks(blocks_perm_comb, comb_ind_loc, blockperm)
- blocks_perm_comb = sort(blocks_perm_comb; lt=isblockless)
+ # Permute the blocks (within the newly combined dimension)
+ comb_ind_loc = minimum(combdims_perm)
+ blocks_perm_comb = perm_blocks(blocks_perm_comb, comb_ind_loc, blockperm)
+ blocks_perm_comb = sort(blocks_perm_comb; lt = isblockless)
- # Combine the blocks (within the newly combined and permuted dimension)
- blocks_perm_comb = combine_blocks(blocks_perm_comb, comb_ind_loc, blockcomb)
+ # Combine the blocks (within the newly combined and permuted dimension)
+ blocks_perm_comb = combine_blocks(blocks_perm_comb, comb_ind_loc, blockcomb)
- return BlockSparseTensor(unwrap_array_type(T), blocks_perm_comb, is)
+ return BlockSparseTensor(unwrap_array_type(T), blocks_perm_comb, is)
end
function permutedims_combine(
- T::BlockSparseTensor{ElT,N},
- is,
- perm::NTuple{N,Int},
- combdims::NTuple{NC,Int},
- blockperm::Vector{Int},
- blockcomb::Vector{Int},
-) where {ElT,N,NC}
- R = permutedims_combine_output(T, is, perm, combdims, blockperm, blockcomb)
-
- # Permute the indices
- inds_perm = permute(inds(T), perm)
-
- # Now that the indices are permuted, compute
- # which indices are now combined
- combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm))
- comb_ind_loc = minimum(combdims_perm)
-
- # Determine the new index before combining
- inds_to_combine = getindices(inds_perm, combdims_perm)
- ind_comb = ⊗(inds_to_combine...)
- ind_comb = permuteblocks(ind_comb, blockperm)
-
- for bof in pairs(blockoffsets(T))
- Tb = blockview(T, bof)
- b = nzblock(bof)
- b_perm = permute(b, perm)
- b_perm_comb = combine_dims(b_perm, inds_perm, combdims_perm)
- b_perm_comb = perm_block(b_perm_comb, comb_ind_loc, blockperm)
- b_in_combined_dim = b_perm_comb[comb_ind_loc]
- new_b_in_combined_dim = blockcomb[b_in_combined_dim]
- offset = 0
- pos_in_new_combined_block = 1
- while b_in_combined_dim - pos_in_new_combined_block > 0 &&
- blockcomb[b_in_combined_dim - pos_in_new_combined_block] == new_b_in_combined_dim
- offset += blockdim(ind_comb, b_in_combined_dim - pos_in_new_combined_block)
- pos_in_new_combined_block += 1
+ T::BlockSparseTensor{ElT, N},
+ is,
+ perm::NTuple{N, Int},
+ combdims::NTuple{NC, Int},
+ blockperm::Vector{Int},
+ blockcomb::Vector{Int},
+ ) where {ElT, N, NC}
+ R = permutedims_combine_output(T, is, perm, combdims, blockperm, blockcomb)
+
+ # Permute the indices
+ inds_perm = permute(inds(T), perm)
+
+ # Now that the indices are permuted, compute
+ # which indices are now combined
+ combdims_perm = TupleTools.sort(_permute_combdims(combdims, perm))
+ comb_ind_loc = minimum(combdims_perm)
+
+ # Determine the new index before combining
+ inds_to_combine = getindices(inds_perm, combdims_perm)
+ ind_comb = ⊗(inds_to_combine...)
+ ind_comb = permuteblocks(ind_comb, blockperm)
+
+ for bof in pairs(blockoffsets(T))
+ Tb = blockview(T, bof)
+ b = nzblock(bof)
+ b_perm = permute(b, perm)
+ b_perm_comb = combine_dims(b_perm, inds_perm, combdims_perm)
+ b_perm_comb = perm_block(b_perm_comb, comb_ind_loc, blockperm)
+ b_in_combined_dim = b_perm_comb[comb_ind_loc]
+ new_b_in_combined_dim = blockcomb[b_in_combined_dim]
+ offset = 0
+ pos_in_new_combined_block = 1
+ while b_in_combined_dim - pos_in_new_combined_block > 0 &&
+ blockcomb[b_in_combined_dim - pos_in_new_combined_block] == new_b_in_combined_dim
+ offset += blockdim(ind_comb, b_in_combined_dim - pos_in_new_combined_block)
+ pos_in_new_combined_block += 1
+ end
+ b_new = setindex(b_perm_comb, new_b_in_combined_dim, comb_ind_loc)
+
+ Rb_total = blockview(R, b_new)
+ dimsRb_tot = dims(Rb_total)
+ subind = ntuple(
+ i -> if i == comb_ind_loc
+ range(1 + offset; stop = offset + blockdim(ind_comb, b_in_combined_dim))
+ else
+ range(1; stop = dimsRb_tot[i])
+ end,
+ N - NC + 1,
+ )
+ Rb = @view array(Rb_total)[subind...]
+
+ # XXX Are these equivalent?
+ #Tb_perm = permutedims(Tb,perm)
+ #copyto!(Rb,Tb_perm)
+
+ # XXX Not sure what this was for
+ Rb = reshape(Rb, permute(dims(Tb), perm))
+ # TODO: Make this `convert` call more general
+ # for GPUs.
+ Tbₐ = convert(Array, Tb)
+ ## @strided Rb .= permutedims(Tbₐ, perm)
+ permutedims!(expose(Rb), expose(Tbₐ), perm)
end
- b_new = setindex(b_perm_comb, new_b_in_combined_dim, comb_ind_loc)
-
- Rb_total = blockview(R, b_new)
- dimsRb_tot = dims(Rb_total)
- subind = ntuple(
- i -> if i == comb_ind_loc
- range(1 + offset; stop=offset + blockdim(ind_comb, b_in_combined_dim))
- else
- range(1; stop=dimsRb_tot[i])
- end,
- N - NC + 1,
- )
- Rb = @view array(Rb_total)[subind...]
- # XXX Are these equivalent?
- #Tb_perm = permutedims(Tb,perm)
- #copyto!(Rb,Tb_perm)
-
- # XXX Not sure what this was for
- Rb = reshape(Rb, permute(dims(Tb), perm))
- # TODO: Make this `convert` call more general
- # for GPUs.
- Tbₐ = convert(Array, Tb)
- ## @strided Rb .= permutedims(Tbₐ, perm)
- permutedims!(expose(Rb), expose(Tbₐ), perm)
- end
-
- return R
+ return R
end
# TODO: optimize by avoiding findfirst
function _number_uncombined(blockval::Integer, blockcomb::Vector)
- if blockval == blockcomb[end]
- return length(blockcomb) - findfirst(==(blockval), blockcomb) + 1
- end
- return findfirst(==(blockval + 1), blockcomb) - findfirst(==(blockval), blockcomb)
+ if blockval == blockcomb[end]
+ return length(blockcomb) - findfirst(==(blockval), blockcomb) + 1
+ end
+ return findfirst(==(blockval + 1), blockcomb) - findfirst(==(blockval), blockcomb)
end
# TODO: optimize by avoiding findfirst
function _number_uncombined_shift(blockval::Integer, blockcomb::Vector)
- if blockval == 1
- return 0
- end
- ncomb_shift = 0
- for i in 1:(blockval - 1)
- ncomb_shift += findfirst(==(i + 1), blockcomb) - findfirst(==(i), blockcomb) - 1
- end
- return ncomb_shift
+ if blockval == 1
+ return 0
+ end
+ ncomb_shift = 0
+ for i in 1:(blockval - 1)
+ ncomb_shift += findfirst(==(i + 1), blockcomb) - findfirst(==(i), blockcomb) - 1
+ end
+ return ncomb_shift
end
# Uncombine the blocks along the dimension dim
@@ -628,164 +629,164 @@ end
# is [1,2,2,3] and dim = 2, so the blocks (1,2),(2,3) get
# split into (1,2),(1,3),(2,4))
function uncombine_blocks(blocks::Blocks{N}, dim::Int, blockcomb::Vector{Int}) where {N}
- blocks_uncomb = Blocks{N}()
- ncomb_tot = 0
- for i in 1:length(blocks)
- block = blocks[i]
+ blocks_uncomb = Blocks{N}()
+ ncomb_tot = 0
+ for i in 1:length(blocks)
+ block = blocks[i]
+ blockval = block[dim]
+ ncomb = _number_uncombined(blockval, blockcomb)
+ ncomb_shift = _number_uncombined_shift(blockval, blockcomb)
+ push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim))
+ for j in 1:(ncomb - 1)
+ push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim))
+ end
+ end
+ return blocks_uncomb
+end
+
+function uncombine_block(block::Block{N}, dim::Int, blockcomb::Vector{Int}) where {N}
+ blocks_uncomb = Blocks{N}()
+ ncomb_tot = 0
blockval = block[dim]
ncomb = _number_uncombined(blockval, blockcomb)
ncomb_shift = _number_uncombined_shift(blockval, blockcomb)
push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim))
for j in 1:(ncomb - 1)
- push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim))
+ push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim))
end
- end
- return blocks_uncomb
-end
-
-function uncombine_block(block::Block{N}, dim::Int, blockcomb::Vector{Int}) where {N}
- blocks_uncomb = Blocks{N}()
- ncomb_tot = 0
- blockval = block[dim]
- ncomb = _number_uncombined(blockval, blockcomb)
- ncomb_shift = _number_uncombined_shift(blockval, blockcomb)
- push!(blocks_uncomb, setindex(block, blockval + ncomb_shift, dim))
- for j in 1:(ncomb - 1)
- push!(blocks_uncomb, setindex(block, blockval + ncomb_shift + j, dim))
- end
- return blocks_uncomb
+ return blocks_uncomb
end
function uncombine_output(
- T::BlockSparseTensor{ElT,N},
- T_labels,
- is,
- is_labels,
- combdim::Int,
- blockperm::Vector{Int},
- blockcomb::Vector{Int},
-) where {ElT<:Number,N}
- labels_uncomb_perm = setdiff(is_labels, T_labels)
- ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...)
- inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim)
- # Uncombine the blocks of T
- blocks_uncomb = uncombine_blocks(nzblocks(T), combdim, blockcomb)
- blocks_uncomb_perm = perm_blocks(blocks_uncomb, combdim, invperm(blockperm))
- boffs_uncomb_perm, nnz_uncomb_perm = blockoffsets(blocks_uncomb_perm, inds_uncomb_perm)
- T_uncomb_perm = tensor(
- BlockSparse(unwrap_array_type(T), boffs_uncomb_perm, nnz_uncomb_perm), inds_uncomb_perm
- )
- R = reshape(T_uncomb_perm, is)
- return R
+ T::BlockSparseTensor{ElT, N},
+ T_labels,
+ is,
+ is_labels,
+ combdim::Int,
+ blockperm::Vector{Int},
+ blockcomb::Vector{Int},
+ ) where {ElT <: Number, N}
+ labels_uncomb_perm = setdiff(is_labels, T_labels)
+ ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...)
+ inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim)
+ # Uncombine the blocks of T
+ blocks_uncomb = uncombine_blocks(nzblocks(T), combdim, blockcomb)
+ blocks_uncomb_perm = perm_blocks(blocks_uncomb, combdim, invperm(blockperm))
+ boffs_uncomb_perm, nnz_uncomb_perm = blockoffsets(blocks_uncomb_perm, inds_uncomb_perm)
+ T_uncomb_perm = tensor(
+ BlockSparse(unwrap_array_type(T), boffs_uncomb_perm, nnz_uncomb_perm), inds_uncomb_perm
+ )
+ R = reshape(T_uncomb_perm, is)
+ return R
end
function reshape(blockT::Block{NT}, indsT, indsR) where {NT}
- nblocksT = nblocks(indsT)
- nblocksR = nblocks(indsR)
- blockR = Tuple(
- CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]]
- )
- return blockR
+ nblocksT = nblocks(indsT)
+ nblocksR = nblocks(indsR)
+ blockR = Tuple(
+ CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]]
+ )
+ return blockR
end
function uncombine(
- T::BlockSparseTensor{<:Number,NT},
- T_labels,
- is,
- is_labels,
- combdim::Int,
- blockperm::Vector{Int},
- blockcomb::Vector{Int},
-) where {NT}
- NR = length(is)
- R = uncombine_output(T, T_labels, is, is_labels, combdim, blockperm, blockcomb)
- invblockperm = invperm(blockperm)
- # This is needed for reshaping the block
- # TODO: It is already calculated in uncombine_output, use it from there
- labels_uncomb_perm = setdiff(is_labels, T_labels)
- ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...)
- ind_uncomb = permuteblocks(ind_uncomb_perm, blockperm)
- # Same as inds(T) but with the blocks uncombined
- inds_uncomb = insertat(inds(T), ind_uncomb, combdim)
- inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim)
- for bof in pairs(blockoffsets(T))
- b = nzblock(bof)
- Tb_tot = blockview(T, bof)
- dimsTb_tot = dims(Tb_tot)
- bs_uncomb = uncombine_block(b, combdim, blockcomb)
- offset = 0
- for i in 1:length(bs_uncomb)
- b_uncomb = bs_uncomb[i]
- b_uncomb_perm = perm_block(b_uncomb, combdim, invblockperm)
- b_uncomb_perm_reshape = reshape(b_uncomb_perm, inds_uncomb_perm, is)
- Rb = blockview(R, b_uncomb_perm_reshape)
- b_uncomb_in_combined_dim = b_uncomb_perm[combdim]
- start = offset + 1
- stop = offset + blockdim(ind_uncomb_perm, b_uncomb_in_combined_dim)
- subind = ntuple(
- i -> i == combdim ? range(start; stop=stop) : range(1; stop=dimsTb_tot[i]), NT
- )
- offset = stop
- Tb = @view array(Tb_tot)[subind...]
-
- # Alternative (but maybe slower):
- #copyto!(Rb,Tb)
-
- if length(Tb) == 1
- # Call `cpu` to avoid allowscalar error on GPU.
- # TODO: Replace with `@allowscalar`, requires adding
- # `GPUArraysCore.jl` as a dependency.
- Rb[] = cpu(Tb)[]
- else
- # XXX: this used to be:
- # Rbₐᵣ = ReshapedArray(parent(Rbₐ), size(Tb), ())
- # however that doesn't work with subarrays
- Rbₐ = convert(Array, Rb)
- ## Rbₐᵣ = ReshapedArray(Rbₐ, size(Tb), ())
- Rbₐᵣ = reshape(Rbₐ, size(Tb))
- ## @strided Rbₐᵣ .= Tb
- copyto!(expose(Rbₐᵣ), expose(Tb))
- end
+ T::BlockSparseTensor{<:Number, NT},
+ T_labels,
+ is,
+ is_labels,
+ combdim::Int,
+ blockperm::Vector{Int},
+ blockcomb::Vector{Int},
+ ) where {NT}
+ NR = length(is)
+ R = uncombine_output(T, T_labels, is, is_labels, combdim, blockperm, blockcomb)
+ invblockperm = invperm(blockperm)
+ # This is needed for reshaping the block
+ # TODO: It is already calculated in uncombine_output, use it from there
+ labels_uncomb_perm = setdiff(is_labels, T_labels)
+ ind_uncomb_perm = ⊗(is[map(x -> findfirst(==(x), is_labels), labels_uncomb_perm)]...)
+ ind_uncomb = permuteblocks(ind_uncomb_perm, blockperm)
+ # Same as inds(T) but with the blocks uncombined
+ inds_uncomb = insertat(inds(T), ind_uncomb, combdim)
+ inds_uncomb_perm = insertat(inds(T), ind_uncomb_perm, combdim)
+ for bof in pairs(blockoffsets(T))
+ b = nzblock(bof)
+ Tb_tot = blockview(T, bof)
+ dimsTb_tot = dims(Tb_tot)
+ bs_uncomb = uncombine_block(b, combdim, blockcomb)
+ offset = 0
+ for i in 1:length(bs_uncomb)
+ b_uncomb = bs_uncomb[i]
+ b_uncomb_perm = perm_block(b_uncomb, combdim, invblockperm)
+ b_uncomb_perm_reshape = reshape(b_uncomb_perm, inds_uncomb_perm, is)
+ Rb = blockview(R, b_uncomb_perm_reshape)
+ b_uncomb_in_combined_dim = b_uncomb_perm[combdim]
+ start = offset + 1
+ stop = offset + blockdim(ind_uncomb_perm, b_uncomb_in_combined_dim)
+ subind = ntuple(
+ i -> i == combdim ? range(start; stop = stop) : range(1; stop = dimsTb_tot[i]), NT
+ )
+ offset = stop
+ Tb = @view array(Tb_tot)[subind...]
+
+ # Alternative (but maybe slower):
+ #copyto!(Rb,Tb)
+
+ if length(Tb) == 1
+ # Call `cpu` to avoid allowscalar error on GPU.
+ # TODO: Replace with `@allowscalar`, requires adding
+ # `GPUArraysCore.jl` as a dependency.
+ Rb[] = cpu(Tb)[]
+ else
+ # XXX: this used to be:
+ # Rbₐᵣ = ReshapedArray(parent(Rbₐ), size(Tb), ())
+ # however that doesn't work with subarrays
+ Rbₐ = convert(Array, Rb)
+ ## Rbₐᵣ = ReshapedArray(Rbₐ, size(Tb), ())
+ Rbₐᵣ = reshape(Rbₐ, size(Tb))
+ ## @strided Rbₐᵣ .= Tb
+ copyto!(expose(Rbₐᵣ), expose(Tb))
+ end
+ end
end
- end
- return R
+ return R
end
function copyto!(R::BlockSparseTensor, T::BlockSparseTensor)
- for bof in pairs(blockoffsets(T))
- copyto!(blockview(R, nzblock(bof)), blockview(T, bof))
- end
- return R
+ for bof in pairs(blockoffsets(T))
+ copyto!(blockview(R, nzblock(bof)), blockview(T, bof))
+ end
+ return R
end
# TODO: handle case where:
# f(zero(ElR),zero(ElT)) != promote_type(ElR,ElT)
function permutedims!!(
- R::BlockSparseTensor{ElR,N},
- T::BlockSparseTensor{ElT,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {ElR,ElT,N}
- RR = convert(promote_type(typeof(R), typeof(T)), R)
- permutedims!(RR, T, perm, f)
- return RR
+ R::BlockSparseTensor{ElR, N},
+ T::BlockSparseTensor{ElT, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {ElR, ElT, N}
+ RR = convert(promote_type(typeof(R), typeof(T)), R)
+ permutedims!(RR, T, perm, f)
+ return RR
end
#
-scale_blocks!(T, compute_fac::Function=(b) -> 1) = T
+scale_blocks!(T, compute_fac::Function = (b) -> 1) = T
#
function scale_blocks!(
- T::BlockSparseTensor{<:Number,N}, compute_fac::Function=(b) -> 1
-) where {N}
- for blockT in keys(blockoffsets(T))
- fac = compute_fac(blockT)
- if fac != 1
- Tblock = blockview(T, blockT)
- scale!(Tblock, fac)
+ T::BlockSparseTensor{<:Number, N}, compute_fac::Function = (b) -> 1
+ ) where {N}
+ for blockT in keys(blockoffsets(T))
+ fac = compute_fac(blockT)
+ if fac != 1
+ Tblock = blockview(T, blockT)
+ scale!(Tblock, fac)
+ end
end
- end
- return T
+ return T
end
#
@@ -793,165 +794,165 @@ permfactor(perm, block, inds) = 1
using TypeParameterAccessors: set_type_parameters, parenttype
function permutedims!(
- R::BlockSparseTensor{<:Number,N},
- T::BlockSparseTensor{<:Number,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {N}
- blocks_R = keys(blockoffsets(R))
- perm_blocks_T = map(b -> permute(b, perm), keys(blockoffsets(T)))
- blocks = union(blocks_R, perm_blocks_T)
- for block in blocks
- block_T = permute(block, invperm(perm))
-
- # Loop over non-zero blocks of T/R
- Rblock = blockview(R, block)
- Tblock = blockview(T, block_T)
-
- #
- pfac = permfactor(perm, block_T, inds(T))
- f_fac = isone(pfac) ? f : ((r, t) -> f(r, pfac * t))
-
- Rblock_exists = !isnothing(Rblock)
- Tblock_exists = !isnothing(Tblock)
- if !Rblock_exists
- # Rblock doesn't exist
- block_size = permute(size(Tblock), perm)
- # TODO: Make GPU friendly.
- DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(R), datatype(R)))
- Rblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size)
- elseif !Tblock_exists
- # Tblock doesn't exist
- block_size = permute(size(Rblock), invperm(perm))
- # TODO: Make GPU friendly.
- DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(T), datatype(T)))
- Tblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size)
- end
- permutedims!(Rblock, Tblock, perm, f_fac)
- if !Rblock_exists
- # Set missing nonzero block
- ## To make sure no allowscalar issue grab the data
- if !iszero(data(Rblock))
- R[block] = Rblock
- end
+ R::BlockSparseTensor{<:Number, N},
+ T::BlockSparseTensor{<:Number, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {N}
+ blocks_R = keys(blockoffsets(R))
+ perm_blocks_T = map(b -> permute(b, perm), keys(blockoffsets(T)))
+ blocks = union(blocks_R, perm_blocks_T)
+ for block in blocks
+ block_T = permute(block, invperm(perm))
+
+ # Loop over non-zero blocks of T/R
+ Rblock = blockview(R, block)
+ Tblock = blockview(T, block_T)
+
+ #
+ pfac = permfactor(perm, block_T, inds(T))
+ f_fac = isone(pfac) ? f : ((r, t) -> f(r, pfac * t))
+
+ Rblock_exists = !isnothing(Rblock)
+ Tblock_exists = !isnothing(Tblock)
+ if !Rblock_exists
+ # Rblock doesn't exist
+ block_size = permute(size(Tblock), perm)
+ # TODO: Make GPU friendly.
+ DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(R), datatype(R)))
+ Rblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size)
+ elseif !Tblock_exists
+ # Tblock doesn't exist
+ block_size = permute(size(Rblock), invperm(perm))
+ # TODO: Make GPU friendly.
+ DenseT = set_type_parameters(Dense, (eltype, parenttype), (eltype(T), datatype(T)))
+ Tblock = tensor(generic_zeros(DenseT, prod(block_size)), block_size)
+ end
+ permutedims!(Rblock, Tblock, perm, f_fac)
+ if !Rblock_exists
+ # Set missing nonzero block
+ ## To make sure no allowscalar issue grab the data
+ if !iszero(data(Rblock))
+ R[block] = Rblock
+ end
+ end
end
- end
- return R
-end
-
-const IntTuple = NTuple{N,Int} where {N}
-const IntOrIntTuple = Union{Int,IntTuple}
-
-function permute_combine(inds::IndsT, pos::Vararg{IntOrIntTuple,N}) where {IndsT,N}
- IndT = eltype(IndsT)
- # Using SizedVector since setindex! doesn't
- # work for MVector when eltype not isbitstype
- newinds = SizedVector{N,IndT}(undef)
- for i in 1:N
- pos_i = pos[i]
- newind_i = inds[pos_i[1]]
- for p in 2:length(pos_i)
- newind_i = newind_i ⊗ inds[pos_i[p]]
+ return R
+end
+
+const IntTuple = NTuple{N, Int} where {N}
+const IntOrIntTuple = Union{Int, IntTuple}
+
+function permute_combine(inds::IndsT, pos::Vararg{IntOrIntTuple, N}) where {IndsT, N}
+ IndT = eltype(IndsT)
+ # Using SizedVector since setindex! doesn't
+ # work for MVector when eltype not isbitstype
+ newinds = SizedVector{N, IndT}(undef)
+ for i in 1:N
+ pos_i = pos[i]
+ newind_i = inds[pos_i[1]]
+ for p in 2:length(pos_i)
+ newind_i = newind_i ⊗ inds[pos_i[p]]
+ end
+ newinds[i] = newind_i
end
- newinds[i] = newind_i
- end
- IndsR = similartype(IndsT, Val{N})
- indsR = IndsR(Tuple(newinds))
- return indsR
+ IndsR = similartype(IndsT, Val{N})
+ indsR = IndsR(Tuple(newinds))
+ return indsR
end
"""
Indices are combined according to the grouping of the input,
for example (1,2),3 will combine the first two indices.
"""
-function combine(inds::IndsT, com::Vararg{IntOrIntTuple,N}) where {IndsT,N}
- IndT = eltype(IndsT)
- # Using SizedVector since setindex! doesn't
- # work for MVector when eltype not isbitstype
- newinds = SizedVector{N,IndT}(undef)
- i_orig = 1
- for i in 1:N
- newind_i = inds[i_orig]
- i_orig += 1
- for p in 2:length(com[i])
- newind_i = newind_i ⊗ inds[i_orig]
- i_orig += 1
+function combine(inds::IndsT, com::Vararg{IntOrIntTuple, N}) where {IndsT, N}
+ IndT = eltype(IndsT)
+ # Using SizedVector since setindex! doesn't
+ # work for MVector when eltype not isbitstype
+ newinds = SizedVector{N, IndT}(undef)
+ i_orig = 1
+ for i in 1:N
+ newind_i = inds[i_orig]
+ i_orig += 1
+ for p in 2:length(com[i])
+ newind_i = newind_i ⊗ inds[i_orig]
+ i_orig += 1
+ end
+ newinds[i] = newind_i
end
- newinds[i] = newind_i
- end
- IndsR = similartype(IndsT, Val{N})
- indsR = IndsR(Tuple(newinds))
- return indsR
+ IndsR = similartype(IndsT, Val{N})
+ indsR = IndsR(Tuple(newinds))
+ return indsR
end
function permute_combine(
- boffs::BlockOffsets, inds::IndsT, pos::Vararg{IntOrIntTuple,N}
-) where {IndsT,N}
- perm = flatten(pos...)
- boffsp, indsp = permutedims(boffs, inds, perm)
- indsR = combine(indsp, pos...)
- boffsR = reshape(boffsp, indsp, indsR)
- return boffsR, indsR
+ boffs::BlockOffsets, inds::IndsT, pos::Vararg{IntOrIntTuple, N}
+ ) where {IndsT, N}
+ perm = flatten(pos...)
+ boffsp, indsp = permutedims(boffs, inds, perm)
+ indsR = combine(indsp, pos...)
+ boffsR = reshape(boffsp, indsp, indsR)
+ return boffsR, indsR
end
function reshape(boffsT::BlockOffsets{NT}, indsT, indsR) where {NT}
- NR = length(indsR)
- boffsR = BlockOffsets{NR}()
- nblocksT = nblocks(indsT)
- nblocksR = nblocks(indsR)
- for (blockT, offsetT) in pairs(boffsT)
- blockR = Block(
- CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]]
- )
- insert!(boffsR, blockR, offsetT)
- end
- return boffsR
+ NR = length(indsR)
+ boffsR = BlockOffsets{NR}()
+ nblocksT = nblocks(indsT)
+ nblocksR = nblocks(indsR)
+ for (blockT, offsetT) in pairs(boffsT)
+ blockR = Block(
+ CartesianIndices(nblocksR)[LinearIndices(nblocksT)[CartesianIndex(blockT)]]
+ )
+ insert!(boffsR, blockR, offsetT)
+ end
+ return boffsR
end
-function reshape(boffsT::BlockOffsets{NT}, blocksR::Vector{Block{NR}}) where {NR,NT}
- boffsR = BlockOffsets{NR}()
- # TODO: check blocksR is ordered and are properly reshaped
- # versions of the blocks of boffsT
- for (i, (blockT, offsetT)) in enumerate(boffsT)
- blockR = blocksR[i]
- boffsR[blockR] = offsetT
- end
- return boffsR
+function reshape(boffsT::BlockOffsets{NT}, blocksR::Vector{Block{NR}}) where {NR, NT}
+ boffsR = BlockOffsets{NR}()
+ # TODO: check blocksR is ordered and are properly reshaped
+ # versions of the blocks of boffsT
+ for (i, (blockT, offsetT)) in enumerate(boffsT)
+ blockR = blocksR[i]
+ boffsR[blockR] = offsetT
+ end
+ return boffsR
end
reshape(T::BlockSparse, boffsR::BlockOffsets) = BlockSparse(data(T), boffsR)
function reshape(T::BlockSparseTensor, boffsR::BlockOffsets, indsR)
- storeR = reshape(storage(T), boffsR)
- return tensor(storeR, indsR)
+ storeR = reshape(storage(T), boffsR)
+ return tensor(storeR, indsR)
end
function reshape(T::BlockSparseTensor, indsR)
- # TODO: add some checks that the block dimensions
- # are consistent (e.g. nnzblocks(T) == nnzblocks(R), etc.)
- boffsR = reshape(blockoffsets(T), inds(T), indsR)
- R = reshape(T, boffsR, indsR)
- return R
+ # TODO: add some checks that the block dimensions
+ # are consistent (e.g. nnzblocks(T) == nnzblocks(R), etc.)
+ boffsR = reshape(blockoffsets(T), inds(T), indsR)
+ R = reshape(T, boffsR, indsR)
+ return R
end
function permute_combine(
- T::BlockSparseTensor{ElT,NT,IndsT}, pos::Vararg{IntOrIntTuple,NR}
-) where {ElT,NT,IndsT,NR}
- boffsR, indsR = permute_combine(blockoffsets(T), inds(T), pos...)
+ T::BlockSparseTensor{ElT, NT, IndsT}, pos::Vararg{IntOrIntTuple, NR}
+ ) where {ElT, NT, IndsT, NR}
+ boffsR, indsR = permute_combine(blockoffsets(T), inds(T), pos...)
- perm = flatten(pos...)
+ perm = flatten(pos...)
- length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($NT)")
- isperm(perm) || error("Index positions must be a permutation")
+ length(perm) ≠ NT && error("Index positions must add up to order of Tensor ($NT)")
+ isperm(perm) || error("Index positions must be a permutation")
- if !is_trivial_permutation(perm)
- Tp = permutedims(T, perm)
- else
- Tp = copy(T)
- end
- NR == NT && return Tp
- R = reshape(Tp, boffsR, indsR)
- return R
+ if !is_trivial_permutation(perm)
+ Tp = permutedims(T, perm)
+ else
+ Tp = copy(T)
+ end
+ NR == NT && return Tp
+ R = reshape(Tp, boffsR, indsR)
+ return R
end
#
@@ -977,26 +978,27 @@ end
# println("Number of nonzero blocks: ",nnzblocks(T))
#end
-function _range2string(rangestart::NTuple{N,Int}, rangeend::NTuple{N,Int}) where {N}
- s = ""
- for n in 1:N
- s = string(s, rangestart[n], ":", rangeend[n])
- if n < N
- s = string(s, ", ")
+function _range2string(rangestart::NTuple{N, Int}, rangeend::NTuple{N, Int}) where {N}
+ s = ""
+ for n in 1:N
+ s = string(s, rangestart[n], ":", rangeend[n])
+ if n < N
+ s = string(s, ", ")
+ end
end
- end
- return s
+ return s
end
function Base.show(io::IO, mime::MIME"text/plain", T::BlockSparseTensor)
- summary(io, T)
- for (n, block) in enumerate(keys(blockoffsets(T)))
- blockdimsT = blockdims(T, block)
- println(io, block)
- println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]")
- print_tensor(io, blockview(T, block))
- n < nnzblocks(T) && print(io, "\n\n")
- end
+ summary(io, T)
+ for (n, block) in enumerate(keys(blockoffsets(T)))
+ blockdimsT = blockdims(T, block)
+ println(io, block)
+ println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]")
+ print_tensor(io, blockview(T, block))
+ n < nnzblocks(T) && print(io, "\n\n")
+ end
+ return nothing
end
Base.show(io::IO, T::BlockSparseTensor) = show(io, MIME("text/plain"), T)
diff --git a/NDTensors/src/blocksparse/diagblocksparse.jl b/NDTensors/src/blocksparse/diagblocksparse.jl
index 13246c887a..4fe47ec603 100644
--- a/NDTensors/src/blocksparse/diagblocksparse.jl
+++ b/NDTensors/src/blocksparse/diagblocksparse.jl
@@ -8,21 +8,21 @@ export DiagBlockSparse, DiagBlockSparseTensor
# in which case the diagonal has a uniform value
# TODO: Define as an `AbstractBlockSparse`, or
# `GenericBlockSparse` parametrized by `Dense` or `Diag`.
-struct DiagBlockSparse{ElT,VecT,N} <: TensorStorage{ElT}
- data::VecT
- diagblockoffsets::BlockOffsets{N} # Block number-offset pairs
-
- # Nonuniform case
- function DiagBlockSparse(
- data::VecT, blockoffsets::BlockOffsets{N}
- ) where {VecT<:AbstractVector{ElT},N} where {ElT}
- return new{ElT,VecT,N}(data, blockoffsets)
- end
+struct DiagBlockSparse{ElT, VecT, N} <: TensorStorage{ElT}
+ data::VecT
+ diagblockoffsets::BlockOffsets{N} # Block number-offset pairs
+
+ # Nonuniform case
+ function DiagBlockSparse(
+ data::VecT, blockoffsets::BlockOffsets{N}
+ ) where {VecT <: AbstractVector{ElT}, N} where {ElT}
+ return new{ElT, VecT, N}(data, blockoffsets)
+ end
- # Uniform case
- function DiagBlockSparse(data::VecT, blockoffsets::BlockOffsets{N}) where {VecT<:Number,N}
- return new{VecT,VecT,N}(data, blockoffsets)
- end
+ # Uniform case
+ function DiagBlockSparse(data::VecT, blockoffsets::BlockOffsets{N}) where {VecT <: Number, N}
+ return new{VecT, VecT, N}(data, blockoffsets)
+ end
end
# Data and type accessors.
@@ -31,76 +31,76 @@ datatype(storagetype::Type{<:DiagBlockSparse}) = fieldtype(storagetype, :data)
blockoffsets(storage::DiagBlockSparse) = getfield(storage, :diagblockoffsets)
blockoffsetstype(storage::DiagBlockSparse) = blockoffsetstype(typeof(storage))
function blockoffsetstype(storagetype::Type{<:DiagBlockSparse})
- return fieldtype(storagetype, :diagblockoffsets)
+ return fieldtype(storagetype, :diagblockoffsets)
end
# TODO: Deprecate?
diagblockoffsets(storage::DiagBlockSparse) = blockoffsets(storage)
function setdata(storagetype::Type{<:DiagBlockSparse}, data::AbstractArray)
- error("Must specify `diagblockoffsets`.")
- return DiagBlockSparse(data, blockoffsetstype(storagetype)())
+ error("Must specify `diagblockoffsets`.")
+ return DiagBlockSparse(data, blockoffsetstype(storagetype)())
end
# TODO: Move this to a `set_types.jl` file.
function set_datatype(
- storagetype::Type{<:DiagBlockSparse}, datatype::Type{<:AbstractVector}
-)
- return DiagBlockSparse{eltype(datatype),datatype,ndims(storagetype)}
+ storagetype::Type{<:DiagBlockSparse}, datatype::Type{<:AbstractVector}
+ )
+ return DiagBlockSparse{eltype(datatype), datatype, ndims(storagetype)}
end
function DiagBlockSparse(
- ::Type{ElT}, boffs::BlockOffsets, diaglength::Integer
-) where {ElT<:Number}
- return DiagBlockSparse(zeros(ElT, diaglength), boffs)
+ ::Type{ElT}, boffs::BlockOffsets, diaglength::Integer
+ ) where {ElT <: Number}
+ return DiagBlockSparse(zeros(ElT, diaglength), boffs)
end
function DiagBlockSparse(boffs::BlockOffsets, diaglength::Integer)
- return DiagBlockSparse(Float64, boffs, diaglength)
+ return DiagBlockSparse(Float64, boffs, diaglength)
end
function DiagBlockSparse(
- ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, diaglength::Integer
-) where {ElT<:Number}
- return DiagBlockSparse(Vector{ElT}(undef, diaglength), boffs)
+ ::Type{ElT}, ::UndefInitializer, boffs::BlockOffsets, diaglength::Integer
+ ) where {ElT <: Number}
+ return DiagBlockSparse(Vector{ElT}(undef, diaglength), boffs)
end
function DiagBlockSparse(
- datatype::Type{<:AbstractArray},
- ::UndefInitializer,
- boffs::BlockOffsets,
- diaglength::Integer,
-)
- return DiagBlockSparse(datatype(undef, diaglength), boffs)
+ datatype::Type{<:AbstractArray},
+ ::UndefInitializer,
+ boffs::BlockOffsets,
+ diaglength::Integer,
+ )
+ return DiagBlockSparse(datatype(undef, diaglength), boffs)
end
function DiagBlockSparse(::UndefInitializer, boffs::BlockOffsets, diaglength::Integer)
- return DiagBlockSparse(Float64, undef, boffs, diaglength)
+ return DiagBlockSparse(Float64, undef, boffs, diaglength)
end
function findblock(
- D::DiagBlockSparse{<:Number,<:Union{Number,AbstractVector},N}, block::Block{N}; vargs...
-) where {N}
- return findblock(diagblockoffsets(D), block; vargs...)
+ D::DiagBlockSparse{<:Number, <:Union{Number, AbstractVector}, N}, block::Block{N}; vargs...
+ ) where {N}
+ return findblock(diagblockoffsets(D), block; vargs...)
end
-const NonuniformDiagBlockSparse{ElT,VecT} =
- DiagBlockSparse{ElT,VecT} where {VecT<:AbstractVector}
-const UniformDiagBlockSparse{ElT,VecT} = DiagBlockSparse{ElT,VecT} where {VecT<:Number}
+const NonuniformDiagBlockSparse{ElT, VecT} =
+ DiagBlockSparse{ElT, VecT} where {VecT <: AbstractVector}
+const UniformDiagBlockSparse{ElT, VecT} = DiagBlockSparse{ElT, VecT} where {VecT <: Number}
@propagate_inbounds function getindex(D::NonuniformDiagBlockSparse, i::Int)
- return data(D)[i]
+ return data(D)[i]
end
getindex(D::UniformDiagBlockSparse, i::Int) = data(D)
@propagate_inbounds function setindex!(D::DiagBlockSparse, val, i::Int)
- data(D)[i] = val
- return D
+ data(D)[i] = val
+ return D
end
function setindex!(D::UniformDiagBlockSparse, val, i::Int)
- return error("Cannot set elements of a uniform DiagBlockSparse storage")
+ return error("Cannot set elements of a uniform DiagBlockSparse storage")
end
#fill!(D::DiagBlockSparse,v) = fill!(data(D),v)
@@ -112,7 +112,7 @@ setdata(D::DiagBlockSparse, ndata) = DiagBlockSparse(ndata, diagblockoffsets(D))
# TODO: Move this to a `set_types.jl` file.
# TODO: Remove this once uniform diagonal tensors use FillArrays for the data.
function set_datatype(storagetype::Type{<:UniformDiagBlockSparse}, datatype::Type)
- return DiagBlockSparse{datatype,datatype,ndims(storagetype)}
+ return DiagBlockSparse{datatype, datatype, ndims(storagetype)}
end
# TODO: Make this more generic. For example, use an
@@ -120,7 +120,7 @@ end
# automatically forward `NeverAlias` to `AllowAlias` since
# aliasing doesn't matter for immutable types.
function conj(::NeverAlias, storage::UniformDiagBlockSparse)
- return conj(AllowAlias(), storage)
+ return conj(AllowAlias(), storage)
end
## convert to complex
@@ -141,10 +141,10 @@ size(D::DiagBlockSparse) = size(data(D))
# TODO: make this work for other storage besides Vector
function zeros(::Type{<:NonuniformDiagBlockSparse{ElT}}, dim::Int64) where {ElT}
- return DiagBlockSparse(zeros(ElT, dim))
+ return DiagBlockSparse(zeros(ElT, dim))
end
function zeros(::Type{<:UniformDiagBlockSparse{ElT}}, dim::Int64) where {ElT}
- return DiagBlockSparse(zero(ElT))
+ return DiagBlockSparse(zero(ElT))
end
#
@@ -153,19 +153,19 @@ end
#
function promote_rule(
- ::Type{<:UniformDiagBlockSparse{ElT1}}, ::Type{<:UniformDiagBlockSparse{ElT2}}
-) where {ElT1,ElT2}
- ElR = promote_type(ElT1, ElT2)
- return DiagBlockSparse{ElR,ElR}
+ ::Type{<:UniformDiagBlockSparse{ElT1}}, ::Type{<:UniformDiagBlockSparse{ElT2}}
+ ) where {ElT1, ElT2}
+ ElR = promote_type(ElT1, ElT2)
+ return DiagBlockSparse{ElR, ElR}
end
function promote_rule(
- ::Type{<:NonuniformDiagBlockSparse{ElT1,VecT1}},
- ::Type{<:NonuniformDiagBlockSparse{ElT2,VecT2}},
-) where {ElT1,VecT1<:AbstractVector,ElT2,VecT2<:AbstractVector}
- ElR = promote_type(ElT1, ElT2)
- VecR = promote_type(VecT1, VecT2)
- return DiagBlockSparse{ElR,VecR}
+ ::Type{<:NonuniformDiagBlockSparse{ElT1, VecT1}},
+ ::Type{<:NonuniformDiagBlockSparse{ElT2, VecT2}},
+ ) where {ElT1, VecT1 <: AbstractVector, ElT2, VecT2 <: AbstractVector}
+ ElR = promote_type(ElT1, ElT2)
+ VecR = promote_type(VecT1, VecT2)
+ return DiagBlockSparse{ElR, VecR}
end
# This is an internal definition, is there a more general way?
@@ -180,62 +180,62 @@ end
# TODO: how do we make this work more generally for T2<:AbstractVector{S2}?
# Make a similartype(AbstractVector{S2},T1) -> AbstractVector{T1} function?
function promote_rule(
- ::Type{<:UniformDiagBlockSparse{ElT1,VecT1}},
- ::Type{<:NonuniformDiagBlockSparse{ElT2,Vector{ElT2}}},
-) where {ElT1,VecT1<:Number,ElT2}
- ElR = promote_type(ElT1, ElT2)
- VecR = Vector{ElR}
- return DiagBlockSparse{ElR,VecR}
+ ::Type{<:UniformDiagBlockSparse{ElT1, VecT1}},
+ ::Type{<:NonuniformDiagBlockSparse{ElT2, Vector{ElT2}}},
+ ) where {ElT1, VecT1 <: Number, ElT2}
+ ElR = promote_type(ElT1, ElT2)
+ VecR = Vector{ElR}
+ return DiagBlockSparse{ElR, VecR}
end
function promote_rule(
- ::Type{BlockSparseT1}, ::Type{<:NonuniformDiagBlockSparse{ElT2,VecT2,N2}}
-) where {BlockSparseT1<:BlockSparse,ElT2<:Number,VecT2<:AbstractVector,N2}
- return promote_type(BlockSparseT1, BlockSparse{ElT2,VecT2,N2})
+ ::Type{BlockSparseT1}, ::Type{<:NonuniformDiagBlockSparse{ElT2, VecT2, N2}}
+ ) where {BlockSparseT1 <: BlockSparse, ElT2 <: Number, VecT2 <: AbstractVector, N2}
+ return promote_type(BlockSparseT1, BlockSparse{ElT2, VecT2, N2})
end
function promote_rule(
- ::Type{BlockSparseT1}, ::Type{<:UniformDiagBlockSparse{ElT2,ElT2}}
-) where {BlockSparseT1<:BlockSparse,ElT2<:Number}
- return promote_type(BlockSparseT1, ElT2)
+ ::Type{BlockSparseT1}, ::Type{<:UniformDiagBlockSparse{ElT2, ElT2}}
+ ) where {BlockSparseT1 <: BlockSparse, ElT2 <: Number}
+ return promote_type(BlockSparseT1, ElT2)
end
# Convert a DiagBlockSparse storage type to the closest Dense storage type
-dense(::Type{<:NonuniformDiagBlockSparse{ElT,VecT}}) where {ElT,VecT} = Dense{ElT,VecT}
-dense(::Type{<:UniformDiagBlockSparse{ElT}}) where {ElT} = Dense{ElT,Vector{ElT}}
+dense(::Type{<:NonuniformDiagBlockSparse{ElT, VecT}}) where {ElT, VecT} = Dense{ElT, VecT}
+dense(::Type{<:UniformDiagBlockSparse{ElT}}) where {ElT} = Dense{ElT, Vector{ElT}}
-const DiagBlockSparseTensor{ElT,N,StoreT,IndsT} =
- Tensor{ElT,N,StoreT,IndsT} where {StoreT<:DiagBlockSparse}
-const NonuniformDiagBlockSparseTensor{ElT,N,StoreT,IndsT} =
- Tensor{ElT,N,StoreT,IndsT} where {StoreT<:NonuniformDiagBlockSparse}
-const UniformDiagBlockSparseTensor{ElT,N,StoreT,IndsT} =
- Tensor{ElT,N,StoreT,IndsT} where {StoreT<:UniformDiagBlockSparse}
+const DiagBlockSparseTensor{ElT, N, StoreT, IndsT} =
+ Tensor{ElT, N, StoreT, IndsT} where {StoreT <: DiagBlockSparse}
+const NonuniformDiagBlockSparseTensor{ElT, N, StoreT, IndsT} =
+ Tensor{ElT, N, StoreT, IndsT} where {StoreT <: NonuniformDiagBlockSparse}
+const UniformDiagBlockSparseTensor{ElT, N, StoreT, IndsT} =
+ Tensor{ElT, N, StoreT, IndsT} where {StoreT <: UniformDiagBlockSparse}
function DiagBlockSparseTensor(
- ::Type{ElT}, ::UndefInitializer, blocks::Vector, inds
-) where {ElT}
- blockoffsets, nnz = diagblockoffsets(blocks, inds)
- storage = DiagBlockSparse(ElT, undef, blockoffsets, nnz)
- return tensor(storage, inds)
+ ::Type{ElT}, ::UndefInitializer, blocks::Vector, inds
+ ) where {ElT}
+ blockoffsets, nnz = diagblockoffsets(blocks, inds)
+ storage = DiagBlockSparse(ElT, undef, blockoffsets, nnz)
+ return tensor(storage, inds)
end
function DiagBlockSparseTensor(::UndefInitializer, blocks::Vector, inds)
- return DiagBlockSparseTensor(Float64, undef, blocks, inds)
+ return DiagBlockSparseTensor(Float64, undef, blocks, inds)
end
function DiagBlockSparseTensor(::Type{ElT}, blocks::Vector, inds) where {ElT}
- blockoffsets, nnz = diagblockoffsets(blocks, inds)
- storage = DiagBlockSparse(ElT, blockoffsets, nnz)
- return tensor(storage, inds)
+ blockoffsets, nnz = diagblockoffsets(blocks, inds)
+ storage = DiagBlockSparse(ElT, blockoffsets, nnz)
+ return tensor(storage, inds)
end
DiagBlockSparseTensor(blocks::Vector, inds) = DiagBlockSparseTensor(Float64, blocks, inds)
# Uniform case
function DiagBlockSparseTensor(x::Number, blocks::Vector, inds)
- blockoffsets, nnz = diagblockoffsets(blocks, inds)
- storage = DiagBlockSparse(x, blockoffsets)
- return tensor(storage, inds)
+ blockoffsets, nnz = diagblockoffsets(blocks, inds)
+ storage = DiagBlockSparse(x, blockoffsets)
+ return tensor(storage, inds)
end
diagblockoffsets(T::DiagBlockSparseTensor) = diagblockoffsets(storage(T))
@@ -248,23 +248,23 @@ that is a view to the data in that block (to avoid block lookup if the position
is known already).
"""
function blockview(T::DiagBlockSparseTensor, blockT::Block)
- return blockview(T, BlockOffset(blockT, offset(T, blockT)))
+ return blockview(T, BlockOffset(blockT, offset(T, blockT)))
end
getindex(T::DiagBlockSparseTensor, block::Block) = blockview(T, block)
function blockview(T::DiagBlockSparseTensor, bof::BlockOffset)
- blockT, offsetT = bof
- blockdimsT = blockdims(T, blockT)
- blockdiaglengthT = minimum(blockdimsT)
- dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdiaglengthT)]
- return tensor(Diag(dataTslice), blockdimsT)
+ blockT, offsetT = bof
+ blockdimsT = blockdims(T, blockT)
+ blockdiaglengthT = minimum(blockdimsT)
+ dataTslice = @view data(storage(T))[(offsetT + 1):(offsetT + blockdiaglengthT)]
+ return tensor(Diag(dataTslice), blockdimsT)
end
function blockview(T::UniformDiagBlockSparseTensor, bof::BlockOffset)
- blockT, offsetT = bof
- blockdimsT = blockdims(T, blockT)
- return tensor(Diag(getdiagindex(T, 1)), blockdimsT)
+ blockT, offsetT = bof
+ blockdimsT = blockdims(T, blockT)
+ return tensor(Diag(getdiagindex(T, 1)), blockdimsT)
end
IndexStyle(::Type{<:DiagBlockSparseTensor}) = IndexCartesian()
@@ -272,23 +272,23 @@ IndexStyle(::Type{<:DiagBlockSparseTensor}) = IndexCartesian()
# TODO: this needs to be better (promote element type, check order compatibility,
# etc.
function convert(
- ::Type{<:DenseTensor{ElT,N}}, T::DiagBlockSparseTensor{ElT,N}
-) where {ElT<:Number,N}
- return dense(T)
+ ::Type{<:DenseTensor{ElT, N}}, T::DiagBlockSparseTensor{ElT, N}
+ ) where {ElT <: Number, N}
+ return dense(T)
end
# These are rules for determining the output of a pairwise contraction of NDTensors
# (given the indices of the output tensors)
function contraction_output_type(
- TensorT1::Type{<:DiagBlockSparseTensor}, TensorT2::Type{<:BlockSparseTensor}, indsR::Tuple
-)
- return similartype(promote_type(TensorT1, TensorT2), indsR)
+ TensorT1::Type{<:DiagBlockSparseTensor}, TensorT2::Type{<:BlockSparseTensor}, indsR::Tuple
+ )
+ return similartype(promote_type(TensorT1, TensorT2), indsR)
end
function contraction_output_type(
- TensorT1::Type{<:BlockSparseTensor}, TensorT2::Type{<:DiagBlockSparseTensor}, indsR::Tuple
-)
- return contraction_output_type(TensorT2, TensorT1, indsR)
+ TensorT1::Type{<:BlockSparseTensor}, TensorT2::Type{<:DiagBlockSparseTensor}, indsR::Tuple
+ )
+ return contraction_output_type(TensorT2, TensorT1, indsR)
end
# This performs the logic that DiagBlockSparseTensor*DiagBlockSparseTensor -> DiagBlockSparseTensor if it is not an outer
@@ -298,24 +298,24 @@ end
# result in a DiagBlockSparseTensor, for efficiency and type stability? What about a general
# SparseTensor result?
function contraction_output_type(
- TensorT1::Type{<:DiagBlockSparseTensor{<:Number,N1}},
- TensorT2::Type{<:DiagBlockSparseTensor{<:Number,N2}},
- indsR::Tuple,
-) where {N1,N2}
- if ValLength(indsR) === Val{N1 + N2}
- # Turn into is_outer(inds1,inds2,indsR) function?
- # How does type inference work with arithmatic of compile time values?
- return similartype(dense(promote_type(TensorT1, TensorT2)), indsR)
- end
- return similartype(promote_type(TensorT1, TensorT2), indsR)
+ TensorT1::Type{<:DiagBlockSparseTensor{<:Number, N1}},
+ TensorT2::Type{<:DiagBlockSparseTensor{<:Number, N2}},
+ indsR::Tuple,
+ ) where {N1, N2}
+ if ValLength(indsR) === Val{N1 + N2}
+ # Turn into is_outer(inds1,inds2,indsR) function?
+ # How does type inference work with arithmatic of compile time values?
+ return similartype(dense(promote_type(TensorT1, TensorT2)), indsR)
+ end
+ return similartype(promote_type(TensorT1, TensorT2), indsR)
end
# The output must be initialized as zero since it is sparse, cannot be undefined
function contraction_output(T1::DiagBlockSparseTensor, T2::Tensor, indsR)
- return zero_contraction_output(T1, T2, indsR)
+ return zero_contraction_output(T1, T2, indsR)
end
function contraction_output(T1::Tensor, T2::DiagBlockSparseTensor, indsR)
- return contraction_output(T2, T1, indsR)
+ return contraction_output(T2, T1, indsR)
end
# function contraction_output(T1::DiagBlockSparseTensor, T2::DiagBlockSparseTensor, indsR)
@@ -324,377 +324,378 @@ end
# Determine the contraction output and block contractions
function contraction_output(
- tensor1::DiagBlockSparseTensor,
- labelstensor1,
- tensor2::DiagBlockSparseTensor,
- labelstensor2,
- labelsR,
-)
- indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR)
- TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR)
- blockoffsetsR, contraction_plan = contract_blockoffsets(
- blockoffsets(tensor1),
- inds(tensor1),
- labelstensor1,
- blockoffsets(tensor2),
- inds(tensor2),
- labelstensor2,
- indsR,
- labelsR,
- )
- R = similar(TensorR, blockoffsetsR, indsR)
- return R # , contraction_plan
+ tensor1::DiagBlockSparseTensor,
+ labelstensor1,
+ tensor2::DiagBlockSparseTensor,
+ labelstensor2,
+ labelsR,
+ )
+ indsR = contract_inds(inds(tensor1), labelstensor1, inds(tensor2), labelstensor2, labelsR)
+ TensorR = contraction_output_type(typeof(tensor1), typeof(tensor2), indsR)
+ blockoffsetsR, contraction_plan = contract_blockoffsets(
+ blockoffsets(tensor1),
+ inds(tensor1),
+ labelstensor1,
+ blockoffsets(tensor2),
+ inds(tensor2),
+ labelstensor2,
+ indsR,
+ labelsR,
+ )
+ R = similar(TensorR, blockoffsetsR, indsR)
+ return R # , contraction_plan
end
## TODO: Is there a way to make this generic?
# NDTensors.similar
function similar(
- tensortype::Type{<:DiagBlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple
-)
- return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims)
+ tensortype::Type{<:DiagBlockSparseTensor}, blockoffsets::BlockOffsets, dims::Tuple
+ )
+ return Tensor(similar(storagetype(tensortype), blockoffsets, dims), dims)
end
# NDTensors.similar
function similar(
- storagetype::Type{<:DiagBlockSparse}, blockoffsets::BlockOffsets, dims::Tuple
-)
- # TODO: Improve this with FillArrays.jl
- # data = similar(datatype(storagetype), nnz(blockoffsets, dims))
- data = zero(datatype(storagetype))
- return DiagBlockSparse(data, blockoffsets)
+ storagetype::Type{<:DiagBlockSparse}, blockoffsets::BlockOffsets, dims::Tuple
+ )
+ # TODO: Improve this with FillArrays.jl
+ # data = similar(datatype(storagetype), nnz(blockoffsets, dims))
+ data = zero(datatype(storagetype))
+ return DiagBlockSparse(data, blockoffsets)
end
-function array(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N}
- return array(dense(T))
+function array(T::DiagBlockSparseTensor{ElT, N}) where {ElT, N}
+ return array(dense(T))
end
-matrix(T::DiagBlockSparseTensor{<:Number,2}) = array(T)
-vector(T::DiagBlockSparseTensor{<:Number,1}) = array(T)
+matrix(T::DiagBlockSparseTensor{<:Number, 2}) = array(T)
+vector(T::DiagBlockSparseTensor{<:Number, 1}) = array(T)
-function Array{ElT,N}(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N}
- return array(T)
+function Array{ElT, N}(T::DiagBlockSparseTensor{ElT, N}) where {ElT, N}
+ return array(T)
end
-function Array(T::DiagBlockSparseTensor{ElT,N}) where {ElT,N}
- return Array{ElT,N}(T)
+function Array(T::DiagBlockSparseTensor{ElT, N}) where {ElT, N}
+ return Array{ElT, N}(T)
end
getdiagindex(T::DiagBlockSparseTensor{<:Number}, ind::Int) = storage(T)[ind]
# XXX: handle case of missing diagonal blocks
function setdiagindex!(T::DiagBlockSparseTensor{<:Number}, val, ind::Int)
- storage(T)[ind] = val
- return T
+ storage(T)[ind] = val
+ return T
end
function setdiag(T::DiagBlockSparseTensor, val, ind::Int)
- return tensor(DiagBlockSparse(val), inds(T))
+ return tensor(DiagBlockSparse(val), inds(T))
end
function setdiag(T::UniformDiagBlockSparseTensor, val, ind::Int)
- return tensor(DiagBlockSparse(val, blockoffsets(T)), inds(T))
+ return tensor(DiagBlockSparse(val, blockoffsets(T)), inds(T))
end
@propagate_inbounds function getindex(
- T::DiagBlockSparseTensor{ElT,N}, inds::Vararg{Int,N}
-) where {ElT,N}
- if all(==(inds[1]), inds)
- return storage(T)[inds[1]]
- else
- return zero(eltype(ElT))
- end
+ T::DiagBlockSparseTensor{ElT, N}, inds::Vararg{Int, N}
+ ) where {ElT, N}
+ if all(==(inds[1]), inds)
+ return storage(T)[inds[1]]
+ else
+ return zero(eltype(ElT))
+ end
end
-@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number,1}, ind::Int)
- return storage(T)[ind]
+@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number, 1}, ind::Int)
+ return storage(T)[ind]
end
-@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number,0})
- return storage(T)[1]
+@propagate_inbounds function getindex(T::DiagBlockSparseTensor{<:Number, 0})
+ return storage(T)[1]
end
# Set diagonal elements
# Throw error for off-diagonal
@propagate_inbounds function setindex!(
- T::DiagBlockSparseTensor{<:Number,N}, val, inds::Vararg{Int,N}
-) where {N}
- all(==(inds[1]), inds) ||
- error("Cannot set off-diagonal element of DiagBlockSparse storage")
- storage(T)[inds[1]] = val
- return T
+ T::DiagBlockSparseTensor{<:Number, N}, val, inds::Vararg{Int, N}
+ ) where {N}
+ all(==(inds[1]), inds) ||
+ error("Cannot set off-diagonal element of DiagBlockSparse storage")
+ storage(T)[inds[1]] = val
+ return T
end
-@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number,1}, val, ind::Int)
- storage(T)[ind] = val
- return T
+@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number, 1}, val, ind::Int)
+ storage(T)[ind] = val
+ return T
end
-@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number,0}, val)
- storage(T)[1] = val
- return T
+@propagate_inbounds function setindex!(T::DiagBlockSparseTensor{<:Number, 0}, val)
+ storage(T)[1] = val
+ return T
end
function setindex!(
- T::UniformDiagBlockSparseTensor{<:Number,N}, val, inds::Vararg{Int,N}
-) where {N}
- return error("Cannot set elements of a uniform DiagBlockSparse storage")
+ T::UniformDiagBlockSparseTensor{<:Number, N}, val, inds::Vararg{Int, N}
+ ) where {N}
+ return error("Cannot set elements of a uniform DiagBlockSparse storage")
end
# TODO: make a fill!! that works for uniform and non-uniform
#fill!(T::DiagBlockSparseTensor,v) = fill!(storage(T),v)
function dense(
- ::Type{<:Tensor{ElT,N,StoreT,IndsT}}
-) where {ElT,N,StoreT<:DiagBlockSparse,IndsT}
- return Tensor{ElT,N,dense(StoreT),IndsT}
+ ::Type{<:Tensor{ElT, N, StoreT, IndsT}}
+ ) where {ElT, N, StoreT <: DiagBlockSparse, IndsT}
+ return Tensor{ElT, N, dense(StoreT), IndsT}
end
# convert to Dense
function dense(T::DiagBlockSparseTensor)
- return dense(denseblocks(T))
+ return dense(denseblocks(T))
end
# convert to BlockSparse
function denseblocks(D::Tensor)
- nzblocksD = nzblocks(D)
- T = BlockSparseTensor(datatype(D), nzblocksD, inds(D))
- for b in nzblocksD
- T[b] = D[b]
- end
- return T
+ nzblocksD = nzblocks(D)
+ T = BlockSparseTensor(datatype(D), nzblocksD, inds(D))
+ for b in nzblocksD
+ T[b] = D[b]
+ end
+ return T
end
function outer!(
- R::DenseTensor{<:Number,NR},
- T1::DiagBlockSparseTensor{<:Number,N1},
- T2::DiagBlockSparseTensor{<:Number,N2},
-) where {NR,N1,N2}
- for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2)
- indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR)))
- R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2)
- end
- return R
+ R::DenseTensor{<:Number, NR},
+ T1::DiagBlockSparseTensor{<:Number, N1},
+ T2::DiagBlockSparseTensor{<:Number, N2},
+ ) where {NR, N1, N2}
+ for i1 in 1:diaglength(T1), i2 in 1:diaglength(T2)
+ indsR = CartesianIndex{NR}(ntuple(r -> r ≤ N1 ? i1 : i2, Val(NR)))
+ R[indsR] = getdiagindex(T1, i1) * getdiagindex(T2, i2)
+ end
+ return R
end
# TODO: write an optimized version of this?
function outer!(R::DenseTensor{ElR}, T1::DenseTensor, T2::DiagBlockSparseTensor) where {ElR}
- R .= zero(ElR)
- outer!(R, T1, dense(T2))
- return R
+ R .= zero(ElR)
+ outer!(R, T1, dense(T2))
+ return R
end
function outer!(R::DenseTensor{ElR}, T1::DiagBlockSparseTensor, T2::DenseTensor) where {ElR}
- R .= zero(ElR)
- outer!(R, dense(T1), T2)
- return R
+ R .= zero(ElR)
+ outer!(R, dense(T1), T2)
+ return R
end
# Right an in-place version
function outer(
- T1::DiagBlockSparseTensor{ElT1,N1}, T2::DiagBlockSparseTensor{ElT2,N2}
-) where {ElT1,ElT2,N1,N2}
- indsR = unioninds(inds(T1), inds(T2))
- R = tensor(Dense(zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR)
- outer!(R, T1, T2)
- return R
+ T1::DiagBlockSparseTensor{ElT1, N1}, T2::DiagBlockSparseTensor{ElT2, N2}
+ ) where {ElT1, ElT2, N1, N2}
+ indsR = unioninds(inds(T1), inds(T2))
+ R = tensor(Dense(zeros(promote_type(ElT1, ElT2), dim(indsR))), indsR)
+ outer!(R, T1, T2)
+ return R
end
function permutedims!(
- R::DiagBlockSparseTensor{<:Number,N},
- T::DiagBlockSparseTensor{<:Number,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {N}
- # TODO: check that inds(R)==permute(inds(T),perm)?
- for i in 1:diaglength(R)
- @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i)
- end
- return R
+ R::DiagBlockSparseTensor{<:Number, N},
+ T::DiagBlockSparseTensor{<:Number, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {N}
+ # TODO: check that inds(R)==permute(inds(T),perm)?
+ for i in 1:diaglength(R)
+ @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i)
+ end
+ return R
end
function permutedims(
- T::UniformDiagBlockSparseTensor{ElT,N}, perm::NTuple{N,Int}, f::Function=identity
-) where {ElT,N}
- R = tensor(DiagBlockSparse(f(getdiagindex(T, 1))), permute(inds(T), perm))
- return R
+ T::UniformDiagBlockSparseTensor{ElT, N}, perm::NTuple{N, Int}, f::Function = identity
+ ) where {ElT, N}
+ R = tensor(DiagBlockSparse(f(getdiagindex(T, 1))), permute(inds(T), perm))
+ return R
end
# Version that may overwrite in-place or may return the result
function permutedims!!(
- R::NonuniformDiagBlockSparseTensor{<:Number,N},
- T::NonuniformDiagBlockSparseTensor{<:Number,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {N}
- RR = convert(promote_type(typeof(R), typeof(T)), R)
- permutedims!(RR, T, perm, f)
- return RR
+ R::NonuniformDiagBlockSparseTensor{<:Number, N},
+ T::NonuniformDiagBlockSparseTensor{<:Number, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {N}
+ RR = convert(promote_type(typeof(R), typeof(T)), R)
+ permutedims!(RR, T, perm, f)
+ return RR
end
function permutedims!!(
- R::UniformDiagBlockSparseTensor{ElR,N},
- T::UniformDiagBlockSparseTensor{ElT,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {ElR,ElT,N}
- RR = convert(promote_type(typeof(R), typeof(T)), R)
- RR = tensor(DiagBlockSparse(f(getdiagindex(RR, 1), getdiagindex(T, 1))), inds(RR))
- return RR
+ R::UniformDiagBlockSparseTensor{ElR, N},
+ T::UniformDiagBlockSparseTensor{ElT, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {ElR, ElT, N}
+ RR = convert(promote_type(typeof(R), typeof(T)), R)
+ RR = tensor(DiagBlockSparse(f(getdiagindex(RR, 1), getdiagindex(T, 1))), inds(RR))
+ return RR
end
function permutedims!(
- R::DenseTensor{ElR,N},
- T::DiagBlockSparseTensor{ElT,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {ElR,ElT,N}
- for i in 1:diaglength(T)
- @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i)
- end
- return R
+ R::DenseTensor{ElR, N},
+ T::DiagBlockSparseTensor{ElT, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {ElR, ElT, N}
+ for i in 1:diaglength(T)
+ @inbounds setdiagindex!(R, f(getdiagindex(R, i), getdiagindex(T, i)), i)
+ end
+ return R
end
function permutedims!!(
- R::DenseTensor{ElR,N},
- T::DiagBlockSparseTensor{ElT,N},
- perm::NTuple{N,Int},
- f::Function=(r, t) -> t,
-) where {ElR,ElT,N}
- permutedims!(R, T, perm, f)
- return R
+ R::DenseTensor{ElR, N},
+ T::DiagBlockSparseTensor{ElT, N},
+ perm::NTuple{N, Int},
+ f::Function = (r, t) -> t,
+ ) where {ElR, ElT, N}
+ permutedims!(R, T, perm, f)
+ return R
end
function _contract!!(
- R::UniformDiagBlockSparseTensor{ElR,NR},
- labelsR,
- T1::UniformDiagBlockSparseTensor{<:Number,N1},
- labelsT1,
- T2::UniformDiagBlockSparseTensor{<:Number,N2},
- labelsT2,
-) where {ElR,NR,N1,N2}
- if NR == 0 # If all indices of A and B are contracted
- # all indices are summed over, just add the product of the diagonal
- # elements of A and B
- R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1), 1)
- else
- # not all indices are summed over, set the diagonals of the result
- # to the product of the diagonals of A and B
- R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1), 1)
- end
- return R
+ R::UniformDiagBlockSparseTensor{ElR, NR},
+ labelsR,
+ T1::UniformDiagBlockSparseTensor{<:Number, N1},
+ labelsT1,
+ T2::UniformDiagBlockSparseTensor{<:Number, N2},
+ labelsT2,
+ ) where {ElR, NR, N1, N2}
+ if NR == 0 # If all indices of A and B are contracted
+ # all indices are summed over, just add the product of the diagonal
+ # elements of A and B
+ R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1), 1)
+ else
+ # not all indices are summed over, set the diagonals of the result
+ # to the product of the diagonals of A and B
+ R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1), 1)
+ end
+ return R
end
function LinearAlgebra.norm(D::UniformDiagBlockSparseTensor)
- normD² = zero(eltype(D))
- for b in nzblocks(D)
- normD² += norm(D[b])^2
- end
- return √(abs(normD²))
+ normD² = zero(eltype(D))
+ for b in nzblocks(D)
+ normD² += norm(D[b])^2
+ end
+ return √(abs(normD²))
end
function contraction_output(
- T1::TensorT1, labelsT1, T2::TensorT2, labelsT2, labelsR
-) where {TensorT1<:BlockSparseTensor,TensorT2<:DiagBlockSparseTensor}
- indsR = contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR)
- TensorR = contraction_output_type(TensorT1, TensorT2, indsR)
- blockoffsetsR, contraction_plan = contract_blockoffsets(
- blockoffsets(T1),
- inds(T1),
- labelsT1,
- blockoffsets(T2),
- inds(T2),
- labelsT2,
- indsR,
- labelsR,
- )
- R = zeros(TensorR, blockoffsetsR, indsR)
- return R, contraction_plan
+ T1::TensorT1, labelsT1, T2::TensorT2, labelsT2, labelsR
+ ) where {TensorT1 <: BlockSparseTensor, TensorT2 <: DiagBlockSparseTensor}
+ indsR = contract_inds(inds(T1), labelsT1, inds(T2), labelsT2, labelsR)
+ TensorR = contraction_output_type(TensorT1, TensorT2, indsR)
+ blockoffsetsR, contraction_plan = contract_blockoffsets(
+ blockoffsets(T1),
+ inds(T1),
+ labelsT1,
+ blockoffsets(T2),
+ inds(T2),
+ labelsT2,
+ indsR,
+ labelsR,
+ )
+ R = zeros(TensorR, blockoffsetsR, indsR)
+ return R, contraction_plan
end
function contract(
- T1::BlockSparseTensor,
- labelsT1,
- T2::DiagBlockSparseTensor,
- labelsT2,
- labelsR=contract_labels(labelsT1, labelsT2),
-)
- R, contraction_plan = contraction_output(T1, labelsT1, T2, labelsT2, labelsR)
- R = contract!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan)
- return R
+ T1::BlockSparseTensor,
+ labelsT1,
+ T2::DiagBlockSparseTensor,
+ labelsT2,
+ labelsR = contract_labels(labelsT1, labelsT2),
+ )
+ R, contraction_plan = contraction_output(T1, labelsT1, T2, labelsT2, labelsR)
+ R = contract!(R, labelsR, T1, labelsT1, T2, labelsT2, contraction_plan)
+ return R
end
function contract(
- T1::DiagBlockSparseTensor,
- labelsT1,
- T2::BlockSparseTensor,
- labelsT2,
- labelsR=contract_labels(labelsT2, labelsT1),
-)
- return contract(T2, labelsT2, T1, labelsT1, labelsR)
+ T1::DiagBlockSparseTensor,
+ labelsT1,
+ T2::BlockSparseTensor,
+ labelsT2,
+ labelsR = contract_labels(labelsT2, labelsT1),
+ )
+ return contract(T2, labelsT2, T1, labelsT1, labelsR)
end
function contract!(
- R::BlockSparseTensor{ElR,NR},
- labelsR,
- T1::BlockSparseTensor,
- labelsT1,
- T2::DiagBlockSparseTensor,
- labelsT2,
- contraction_plan,
-) where {ElR<:Number,NR}
- if any(b -> !allequal(Tuple(b)), nzblocks(T2))
- return error(
- "When contracting a BlockSparse tensor with a DiagBlockSparse tensor, the DiagBlockSparse tensor must be block diagonal for the time being.",
- )
- end
- already_written_to = Dict{Block{NR},Bool}()
- indsR = inds(R)
- indsT1 = inds(T1)
- indsT2 = inds(T2)
- # In R .= α .* (T1 * T2) .+ β .* R
- α = one(ElR)
- for (block1, block2, blockR) in contraction_plan
- T1block = T1[block1]
- T2block = T2[block2]
- Rblock = R[blockR]
-
- #
- α = compute_alpha(
- ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2
- )
-
- β = one(ElR)
- if !haskey(already_written_to, blockR)
- already_written_to[blockR] = true
- # Overwrite the block of R
- β = zero(ElR)
+ R::BlockSparseTensor{ElR, NR},
+ labelsR,
+ T1::BlockSparseTensor,
+ labelsT1,
+ T2::DiagBlockSparseTensor,
+ labelsT2,
+ contraction_plan,
+ ) where {ElR <: Number, NR}
+ if any(b -> !allequal(Tuple(b)), nzblocks(T2))
+ return error(
+ "When contracting a BlockSparse tensor with a DiagBlockSparse tensor, the DiagBlockSparse tensor must be block diagonal for the time being.",
+ )
end
- contract!(
- expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β
- )
- end
- return R
+ already_written_to = Dict{Block{NR}, Bool}()
+ indsR = inds(R)
+ indsT1 = inds(T1)
+ indsT2 = inds(T2)
+ # In R .= α .* (T1 * T2) .+ β .* R
+ α = one(ElR)
+ for (block1, block2, blockR) in contraction_plan
+ T1block = T1[block1]
+ T2block = T2[block2]
+ Rblock = R[blockR]
+
+ #
+ α = compute_alpha(
+ ElR, labelsR, blockR, indsR, labelsT1, block1, indsT1, labelsT2, block2, indsT2
+ )
+
+ β = one(ElR)
+ if !haskey(already_written_to, blockR)
+ already_written_to[blockR] = true
+ # Overwrite the block of R
+ β = zero(ElR)
+ end
+ contract!(
+ expose(Rblock), labelsR, expose(T1block), labelsT1, expose(T2block), labelsT2, α, β
+ )
+ end
+ return R
end
function contract!(
- C::BlockSparseTensor,
- Clabels,
- A::BlockSparseTensor,
- Alabels,
- B::DiagBlockSparseTensor,
- Blabels,
-)
- return contract!(C, Clabels, B, Blabels, A, Alabels)
+ C::BlockSparseTensor,
+ Clabels,
+ A::BlockSparseTensor,
+ Alabels,
+ B::DiagBlockSparseTensor,
+ Blabels,
+ )
+ return contract!(C, Clabels, B, Blabels, A, Alabels)
end
function Base.show(io::IO, mime::MIME"text/plain", T::DiagBlockSparseTensor)
- summary(io, T)
- for (n, block) in enumerate(keys(diagblockoffsets(T)))
- blockdimsT = blockdims(T, block)
- println(io, block)
- println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]")
- print_tensor(io, blockview(T, block))
- n < nnzblocks(T) && print(io, "\n\n")
- end
+ summary(io, T)
+ for (n, block) in enumerate(keys(diagblockoffsets(T)))
+ blockdimsT = blockdims(T, block)
+ println(io, block)
+ println(io, " [", _range2string(blockstart(T, block), blockend(T, block)), "]")
+ print_tensor(io, blockview(T, block))
+ n < nnzblocks(T) && print(io, "\n\n")
+ end
+ return nothing
end
show(io::IO, T::DiagBlockSparseTensor) = show(io, MIME("text/plain"), T)
diff --git a/NDTensors/src/deprecated.jl b/NDTensors/src/deprecated.jl
index f9afd61fde..7ca9772136 100644
--- a/NDTensors/src/deprecated.jl
+++ b/NDTensors/src/deprecated.jl
@@ -1,4 +1,3 @@
-
# NDTensors.jl
@deprecate use_tblis() NDTensors.using_tblis()
@deprecate enable_tblis!() NDTensors.enable_tblis()
diff --git a/NDTensors/src/diag/diag.jl b/NDTensors/src/diag/diag.jl
index ad89444896..d6a6e9396b 100644
--- a/NDTensors/src/diag/diag.jl
+++ b/NDTensors/src/diag/diag.jl
@@ -1,37 +1,36 @@
-
# Diag can have either Vector storage, in which case
# it is a general Diag tensor, or scalar storage,
# in which case the diagonal has a uniform value
-struct Diag{ElT,DataT} <: TensorStorage{ElT}
- data::DataT
- function Diag{ElT,DataT}(data) where {ElT,DataT<:AbstractVector{ElT}}
- return new{ElT,DataT}(data)
- end
- function Diag{ElT,ElT}(data) where {ElT}
- return new{ElT,ElT}(data)
- end
+struct Diag{ElT, DataT} <: TensorStorage{ElT}
+ data::DataT
+ function Diag{ElT, DataT}(data) where {ElT, DataT <: AbstractVector{ElT}}
+ return new{ElT, DataT}(data)
+ end
+ function Diag{ElT, ElT}(data) where {ElT}
+ return new{ElT, ElT}(data)
+ end
end
-const NonuniformDiag{ElT,DataT} = Diag{ElT,DataT} where {DataT<:AbstractVector}
+const NonuniformDiag{ElT, DataT} = Diag{ElT, DataT} where {DataT <: AbstractVector}
-const UniformDiag{ElT,DataT} = Diag{ElT,DataT} where {DataT<:Number}
+const UniformDiag{ElT, DataT} = Diag{ElT, DataT} where {DataT <: Number}
# Diag constructors
-Diag(data::DataT) where {DataT<:AbstractVector{ElT}} where {ElT} = Diag{ElT,DataT}(data)
+Diag(data::DataT) where {DataT <: AbstractVector{ElT}} where {ElT} = Diag{ElT, DataT}(data)
-Diag(data::ElT) where {ElT<:Number} = Diag{ElT,ElT}(data)
+Diag(data::ElT) where {ElT <: Number} = Diag{ElT, ElT}(data)
-function Diag{ElR}(data::AbstractVector{ElT}) where {ElR,ElT}
- return Diag(convert(similartype(typeof(data), ElR), data))
+function Diag{ElR}(data::AbstractVector{ElT}) where {ElR, ElT}
+ return Diag(convert(similartype(typeof(data), ElR), data))
end
-Diag(::Type{ElT}, n::Integer) where {ElT<:Number} = Diag(zeros(ElT, n))
+Diag(::Type{ElT}, n::Integer) where {ElT <: Number} = Diag(zeros(ElT, n))
-Diag(x::ElT, n::Integer) where {ElT<:Number} = Diag(fill(x, n))
+Diag(x::ElT, n::Integer) where {ElT <: Number} = Diag(fill(x, n))
# End Diag constructors
-datatype(::Type{<:Diag{<:Any,DataT}}) where {DataT} = DataT
+datatype(::Type{<:Diag{<:Any, DataT}}) where {DataT} = DataT
setdata(D::Diag, ndata) = Diag(ndata)
setdata(storagetype::Type{<:Diag}, data) = Diag(data)
@@ -40,41 +39,41 @@ copy(D::Diag) = Diag(copy(data(D)))
# Special printing for uniform Diag
function Base.show(io::IO, mime::MIME"text/plain", diag::UniformDiag)
- println(io, typeof(diag))
- println(io, "Diag storage with uniform diagonal value:")
- println(io, diag[1])
- return nothing
+ println(io, typeof(diag))
+ println(io, "Diag storage with uniform diagonal value:")
+ println(io, diag[1])
+ return nothing
end
getindex(D::UniformDiag, i::Int) = data(D)
function setindex!(D::UniformDiag, val, i::Int)
- return error("Cannot set elements of a uniform Diag storage")
+ return error("Cannot set elements of a uniform Diag storage")
end
# Deal with uniform Diag conversion
-function convert(::Type{<:Diag{ElT,DataT}}, D::Diag) where {ElT,DataT<:AbstractArray}
- @assert data(D) isa AbstractArray
- return Diag(convert(DataT, data(D)))
+function convert(::Type{<:Diag{ElT, DataT}}, D::Diag) where {ElT, DataT <: AbstractArray}
+ @assert data(D) isa AbstractArray
+ return Diag(convert(DataT, data(D)))
end
-function convert(::Type{<:Diag{ElT,DataT}}, D::Diag) where {ElT,DataT<:Number}
- @assert data(D) isa Number
- return Diag(convert(DataT, data(D)))
+function convert(::Type{<:Diag{ElT, DataT}}, D::Diag) where {ElT, DataT <: Number}
+ @assert data(D) isa Number
+ return Diag(convert(DataT, data(D)))
end
function generic_zeros(diagT::Type{<:NonuniformDiag{ElT}}, dim::Integer) where {ElT}
- return diagT(generic_zeros(datatype(diagT), dim))
+ return diagT(generic_zeros(datatype(diagT), dim))
end
generic_zeros(diagT::Type{<:UniformDiag{ElT}}, dim::Integer) where {ElT} = diagT(zero(ElT))
function generic_zeros(diagT::Type{<:Diag{ElT}}, dim::Integer) where {ElT}
- return generic_zeros(diagT{default_datatype(ElT)}, dim)
+ return generic_zeros(diagT{default_datatype(ElT)}, dim)
end
function generic_zeros(diagT::Type{<:Diag}, dim::Integer)
- return generic_zeros(diagT{default_eltype()}, dim)
+ return generic_zeros(diagT{default_eltype()}, dim)
end
#
@@ -83,18 +82,18 @@ end
#
function promote_rule(
- ::Type{<:UniformDiag{ElT1}}, ::Type{<:UniformDiag{ElT2}}
-) where {ElT1,ElT2}
- ElR = promote_type(ElT1, ElT2)
- return Diag{ElR,ElR}
+ ::Type{<:UniformDiag{ElT1}}, ::Type{<:UniformDiag{ElT2}}
+ ) where {ElT1, ElT2}
+ ElR = promote_type(ElT1, ElT2)
+ return Diag{ElR, ElR}
end
function promote_rule(
- ::Type{<:NonuniformDiag{ElT1,DataT1}}, ::Type{<:NonuniformDiag{ElT2,DataT2}}
-) where {ElT1,DataT1<:AbstractVector,ElT2,DataT2<:AbstractVector}
- ElR = promote_type(ElT1, ElT2)
- VecR = promote_type(DataT1, DataT2)
- return Diag{ElR,VecR}
+ ::Type{<:NonuniformDiag{ElT1, DataT1}}, ::Type{<:NonuniformDiag{ElT2, DataT2}}
+ ) where {ElT1, DataT1 <: AbstractVector, ElT2, DataT2 <: AbstractVector}
+ ElR = promote_type(ElT1, ElT2)
+ VecR = promote_type(DataT1, DataT2)
+ return Diag{ElR, VecR}
end
# This is an internal definition, is there a more general way?
@@ -109,26 +108,26 @@ end
# TODO: how do we make this work more generally for T2<:AbstractVector{S2}?
# Make a similartype(AbstractVector{S2},T1) -> AbstractVector{T1} function?
function promote_rule(
- ::Type{<:UniformDiag{ElT1,DataT1}}, ::Type{<:NonuniformDiag{ElT2,AbstractArray{ElT2}}}
-) where {ElT1,DataT1<:Number,ElT2}
- ElR = promote_type(ElT1, ElT2)
+ ::Type{<:UniformDiag{ElT1, DataT1}}, ::Type{<:NonuniformDiag{ElT2, AbstractArray{ElT2}}}
+ ) where {ElT1, DataT1 <: Number, ElT2}
+ ElR = promote_type(ElT1, ElT2)
- VecR = Vector{ElR}
- return Diag{ElR,VecR}
+ VecR = Vector{ElR}
+ return Diag{ElR, VecR}
end
function promote_rule(
- ::Type{DenseT1}, ::Type{<:NonuniformDiag{ElT2,DataT2}}
-) where {DenseT1<:Dense,ElT2,DataT2<:AbstractVector}
- return promote_type(DenseT1, Dense{ElT2,DataT2})
+ ::Type{DenseT1}, ::Type{<:NonuniformDiag{ElT2, DataT2}}
+ ) where {DenseT1 <: Dense, ElT2, DataT2 <: AbstractVector}
+ return promote_type(DenseT1, Dense{ElT2, DataT2})
end
function promote_rule(
- ::Type{DenseT1}, ::Type{<:UniformDiag{ElT2,DataT2}}
-) where {DenseT1<:Dense,ElT2,DataT2<:Number}
- return promote_type(DenseT1, ElT2)
+ ::Type{DenseT1}, ::Type{<:UniformDiag{ElT2, DataT2}}
+ ) where {DenseT1 <: Dense, ElT2, DataT2 <: Number}
+ return promote_type(DenseT1, ElT2)
end
# Convert a Diag storage type to the closest Dense storage type
-dense(::Type{<:NonuniformDiag{ElT,DataT}}) where {ElT,DataT} = Dense{ElT,DataT}
-dense(::Type{<:UniformDiag{ElT}}) where {ElT} = Dense{ElT,default_datatype(ElT)}
+dense(::Type{<:NonuniformDiag{ElT, DataT}}) where {ElT, DataT} = Dense{ElT, DataT}
+dense(::Type{<:UniformDiag{ElT}}) where {ElT} = Dense{ElT, default_datatype(ElT)}
diff --git a/NDTensors/src/diag/tensoralgebra/contract.jl b/NDTensors/src/diag/tensoralgebra/contract.jl
index 6244c0dffd..650e997569 100644
--- a/NDTensors/src/diag/tensoralgebra/contract.jl
+++ b/NDTensors/src/diag/tensoralgebra/contract.jl
@@ -1,14 +1,14 @@
# These are rules for determining the output of a pairwise contraction of NDTensors
# (given the indices of the output tensors)
function contraction_output_type(
- tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DenseTensor}, indsR
-)
- return similartype(promote_type(tensortype1, tensortype2), indsR)
+ tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DenseTensor}, indsR
+ )
+ return similartype(promote_type(tensortype1, tensortype2), indsR)
end
function contraction_output_type(
- tensortype1::Type{<:DenseTensor}, tensortype2::Type{<:DiagTensor}, indsR
-)
- return contraction_output_type(tensortype2, tensortype1, indsR)
+ tensortype1::Type{<:DenseTensor}, tensortype2::Type{<:DiagTensor}, indsR
+ )
+ return contraction_output_type(tensortype2, tensortype1, indsR)
end
# This performs the logic that DiagTensor*DiagTensor -> DiagTensor if it is not an outer
@@ -18,209 +18,209 @@ end
# result in a DiagTensor, for efficiency and type stability? What about a general
# SparseTensor result?
function contraction_output_type(
- tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DiagTensor}, indsR
-)
- if length(indsR) == ndims(tensortype1) + ndims(tensortype2)
- # Turn into is_outer(inds1,inds2,indsR) function?
- # How does type inference work with arithmatic of compile time values?
- return similartype(dense(promote_type(tensortype1, tensortype2)), indsR)
- end
- return similartype(promote_type(tensortype1, tensortype2), indsR)
+ tensortype1::Type{<:DiagTensor}, tensortype2::Type{<:DiagTensor}, indsR
+ )
+ if length(indsR) == ndims(tensortype1) + ndims(tensortype2)
+ # Turn into is_outer(inds1,inds2,indsR) function?
+ # How does type inference work with arithmatic of compile time values?
+ return similartype(dense(promote_type(tensortype1, tensortype2)), indsR)
+ end
+ return similartype(promote_type(tensortype1, tensortype2), indsR)
end
# The output must be initialized as zero since it is sparse, cannot be undefined
function contraction_output(T1::DiagTensor, T2::Tensor, indsR)
- return zero_contraction_output(T1, T2, indsR)
+ return zero_contraction_output(T1, T2, indsR)
end
contraction_output(T1::Tensor, T2::DiagTensor, indsR) = contraction_output(T2, T1, indsR)
function contraction_output(T1::DiagTensor, T2::DiagTensor, indsR)
- return zero_contraction_output(T1, T2, indsR)
+ return zero_contraction_output(T1, T2, indsR)
end
function _contract!!(
- R::UniformDiagTensor{ElR,NR},
- labelsR,
- T1::UniformDiagTensor{<:Number,N1},
- labelsT1,
- T2::UniformDiagTensor{<:Number,N2},
- labelsT2,
-) where {ElR,NR,N1,N2}
- if NR == 0 # If all indices of A and B are contracted
- # all indices are summed over, just add the product of the diagonal
- # elements of A and B
- R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1))
- else
- # not all indices are summed over, set the diagonals of the result
- # to the product of the diagonals of A and B
- R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1))
- end
- return R
+ R::UniformDiagTensor{ElR, NR},
+ labelsR,
+ T1::UniformDiagTensor{<:Number, N1},
+ labelsT1,
+ T2::UniformDiagTensor{<:Number, N2},
+ labelsT2,
+ ) where {ElR, NR, N1, N2}
+ if NR == 0 # If all indices of A and B are contracted
+ # all indices are summed over, just add the product of the diagonal
+ # elements of A and B
+ R = setdiag(R, diaglength(T1) * getdiagindex(T1, 1) * getdiagindex(T2, 1))
+ else
+ # not all indices are summed over, set the diagonals of the result
+ # to the product of the diagonals of A and B
+ R = setdiag(R, getdiagindex(T1, 1) * getdiagindex(T2, 1))
+ end
+ return R
end
function contract!(
- output_tensor::Exposed{<:AbstractArray,<:DiagTensor},
- labelsoutput_tensor,
- tensor1::Exposed,
- labelstensor1,
- tensor2::Exposed,
- labelstensor2,
- α::Number=one(Bool),
- β::Number=zero(Bool),
-)
- @assert isone(α)
- @assert iszero(β)
- return contract!(
- unexpose(output_tensor),
- labelsoutput_tensor,
- unexpose(tensor1),
- labelstensor1,
- unexpose(tensor2),
- labelstensor2,
- )
+ output_tensor::Exposed{<:AbstractArray, <:DiagTensor},
+ labelsoutput_tensor,
+ tensor1::Exposed,
+ labelstensor1,
+ tensor2::Exposed,
+ labelstensor2,
+ α::Number = one(Bool),
+ β::Number = zero(Bool),
+ )
+ @assert isone(α)
+ @assert iszero(β)
+ return contract!(
+ unexpose(output_tensor),
+ labelsoutput_tensor,
+ unexpose(tensor1),
+ labelstensor1,
+ unexpose(tensor2),
+ labelstensor2,
+ )
end
function contract!(
- R::DiagTensor{ElR,NR},
- labelsR,
- T1::DiagTensor{<:Number,N1},
- labelsT1,
- T2::DiagTensor{<:Number,N2},
- labelsT2,
-) where {ElR,NR,N1,N2}
- if NR == 0 # If all indices of A and B are contracted
- # All indices are summed over, just add the product of the diagonal
- # elements of A and B.
- # `expose` allows dispatching on the data type
- # in order to allow scalar indexing on GPU.
- expose(R)[] = mapreduce(*,+,diagview(T1),diagview(T2))
- else
- diagview(R) .= diagview(T1) .* diagview(T2)
- end
- return R
+ R::DiagTensor{ElR, NR},
+ labelsR,
+ T1::DiagTensor{<:Number, N1},
+ labelsT1,
+ T2::DiagTensor{<:Number, N2},
+ labelsT2,
+ ) where {ElR, NR, N1, N2}
+ if NR == 0 # If all indices of A and B are contracted
+ # All indices are summed over, just add the product of the diagonal
+ # elements of A and B.
+ # `expose` allows dispatching on the data type
+ # in order to allow scalar indexing on GPU.
+ expose(R)[] = mapreduce(*, +, diagview(T1), diagview(T2))
+ else
+ diagview(R) .= diagview(T1) .* diagview(T2)
+ end
+ return R
end
function contract!(
- C::DenseTensor{ElC,NC},
- Clabels,
- A::DiagTensor{ElA,NA},
- Alabels,
- B::DenseTensor{ElB,NB},
- Blabels,
- α::Number=one(ElC),
- β::Number=zero(ElC);
- convert_to_dense::Bool=true,
-) where {ElA,NA,ElB,NB,ElC,NC}
- #@timeit_debug timer "diag-dense contract!" begin
- if all(i -> i < 0, Blabels)
- # If all of B is contracted
- # TODO: can also check NC+NB==NA
- min_dim = min(minimum(dims(A)), minimum(dims(B)))
- if length(Clabels) == 0
- # all indices are summed over, just add the product of the diagonal
- # elements of A and B
- # Assumes C starts set to 0
- c₁ = zero(ElC)
- for i in 1:min_dim
- c₁ += getdiagindex(A, i) * getdiagindex(B, i)
- end
- setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1)
- else
- # not all indices are summed over, set the diagonals of the result
- # to the product of the diagonals of A and B
- # TODO: should we make this return a Diag storage?
- for i in 1:min_dim
- setdiagindex!(
- C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i
- )
- end
- end
- else
- # Most general contraction
- if convert_to_dense
- contract!(C, Clabels, dense(A), Alabels, B, Blabels, α, β)
- else
- if !isone(α) || !iszero(β)
- error(
- "`contract!(::DenseTensor, ::DiagTensor, ::DenseTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.",
- )
- end
- astarts = zeros(Int, length(Alabels))
- bstart = 0
- cstart = 0
- b_cstride = 0
- nbu = 0
- for ib in 1:length(Blabels)
- ia = findfirst(==(Blabels[ib]), Alabels)
- if !isnothing(ia)
- b_cstride += stride(B, ib)
- bstart += astarts[ia] * stride(B, ib)
+ C::DenseTensor{ElC, NC},
+ Clabels,
+ A::DiagTensor{ElA, NA},
+ Alabels,
+ B::DenseTensor{ElB, NB},
+ Blabels,
+ α::Number = one(ElC),
+ β::Number = zero(ElC);
+ convert_to_dense::Bool = true,
+ ) where {ElA, NA, ElB, NB, ElC, NC}
+ #@timeit_debug timer "diag-dense contract!" begin
+ return if all(i -> i < 0, Blabels)
+ # If all of B is contracted
+ # TODO: can also check NC+NB==NA
+ min_dim = min(minimum(dims(A)), minimum(dims(B)))
+ if length(Clabels) == 0
+ # all indices are summed over, just add the product of the diagonal
+ # elements of A and B
+ # Assumes C starts set to 0
+ c₁ = zero(ElC)
+ for i in 1:min_dim
+ c₁ += getdiagindex(A, i) * getdiagindex(B, i)
+ end
+ setdiagindex!(C, α * c₁ + β * getdiagindex(C, 1), 1)
else
- nbu += 1
+ # not all indices are summed over, set the diagonals of the result
+ # to the product of the diagonals of A and B
+ # TODO: should we make this return a Diag storage?
+ for i in 1:min_dim
+ setdiagindex!(
+ C, α * getdiagindex(A, i) * getdiagindex(B, i) + β * getdiagindex(C, i), i
+ )
+ end
end
- end
+ else
+ # Most general contraction
+ if convert_to_dense
+ contract!(C, Clabels, dense(A), Alabels, B, Blabels, α, β)
+ else
+ if !isone(α) || !iszero(β)
+ error(
+ "`contract!(::DenseTensor, ::DiagTensor, ::DenseTensor, α, β; convert_to_dense = false)` with `α ≠ 1` or `β ≠ 0` is not currently supported. You can call it with `convert_to_dense = true` instead.",
+ )
+ end
+ astarts = zeros(Int, length(Alabels))
+ bstart = 0
+ cstart = 0
+ b_cstride = 0
+ nbu = 0
+ for ib in 1:length(Blabels)
+ ia = findfirst(==(Blabels[ib]), Alabels)
+ if !isnothing(ia)
+ b_cstride += stride(B, ib)
+ bstart += astarts[ia] * stride(B, ib)
+ else
+ nbu += 1
+ end
+ end
- c_cstride = 0
- for ic in 1:length(Clabels)
- ia = findfirst(==(Clabels[ic]), Alabels)
- if !isnothing(ia)
- c_cstride += stride(C, ic)
- cstart += astarts[ia] * stride(C, ic)
- end
- end
+ c_cstride = 0
+ for ic in 1:length(Clabels)
+ ia = findfirst(==(Clabels[ic]), Alabels)
+ if !isnothing(ia)
+ c_cstride += stride(C, ic)
+ cstart += astarts[ia] * stride(C, ic)
+ end
+ end
- # strides of the uncontracted dimensions of
- # B
- bustride = zeros(Int, nbu)
- custride = zeros(Int, nbu)
- # size of the uncontracted dimensions of
- # B, to be used in CartesianIndices
- busize = zeros(Int, nbu)
- n = 1
- for ib in 1:length(Blabels)
- if Blabels[ib] > 0
- bustride[n] = stride(B, ib)
- busize[n] = size(B, ib)
- ic = findfirst(==(Blabels[ib]), Clabels)
- custride[n] = stride(C, ic)
- n += 1
- end
- end
+ # strides of the uncontracted dimensions of
+ # B
+ bustride = zeros(Int, nbu)
+ custride = zeros(Int, nbu)
+ # size of the uncontracted dimensions of
+ # B, to be used in CartesianIndices
+ busize = zeros(Int, nbu)
+ n = 1
+ for ib in 1:length(Blabels)
+ if Blabels[ib] > 0
+ bustride[n] = stride(B, ib)
+ busize[n] = size(B, ib)
+ ic = findfirst(==(Blabels[ib]), Clabels)
+ custride[n] = stride(C, ic)
+ n += 1
+ end
+ end
- boffset_orig = 1 - sum(strides(B))
- coffset_orig = 1 - sum(strides(C))
- cartesian_inds = CartesianIndices(Tuple(busize))
- for inds in cartesian_inds
- boffset = boffset_orig
- coffset = coffset_orig
- for i in 1:nbu
- ii = inds[i]
- boffset += ii * bustride[i]
- coffset += ii * custride[i]
- end
- c = zero(ElC)
- for j in 1:diaglength(A)
- # With α == 0 && β == 1
- C[cstart + j * c_cstride + coffset] +=
- getdiagindex(A, j) * B[bstart + j * b_cstride + boffset]
- # XXX: not sure if this is correct
- #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset]
+ boffset_orig = 1 - sum(strides(B))
+ coffset_orig = 1 - sum(strides(C))
+ cartesian_inds = CartesianIndices(Tuple(busize))
+ for inds in cartesian_inds
+ boffset = boffset_orig
+ coffset = coffset_orig
+ for i in 1:nbu
+ ii = inds[i]
+ boffset += ii * bustride[i]
+ coffset += ii * custride[i]
+ end
+ c = zero(ElC)
+ for j in 1:diaglength(A)
+ # With α == 0 && β == 1
+ C[cstart + j * c_cstride + coffset] +=
+ getdiagindex(A, j) * B[bstart + j * b_cstride + boffset]
+ # XXX: not sure if this is correct
+ #C[cstart+j*c_cstride+coffset] += α * getdiagindex(A, j)* B[bstart+j*b_cstride+boffset] + β * C[cstart+j*c_cstride+coffset]
+ end
+ end
end
- end
end
- end
- #end # @timeit
+ #end # @timeit
end
function contract!(
- C::DenseTensor,
- Clabels,
- A::DenseTensor,
- Alabels,
- B::DiagTensor,
- Blabels,
- α::Number=one(eltype(C)),
- β::Number=zero(eltype(C)),
-)
- return contract!(C, Clabels, B, Blabels, A, Alabels, α, β)
+ C::DenseTensor,
+ Clabels,
+ A::DenseTensor,
+ Alabels,
+ B::DiagTensor,
+ Blabels,
+ α::Number = one(eltype(C)),
+ β::Number = zero(eltype(C)),
+ )
+ return contract!(C, Clabels, B, Blabels, A, Alabels, α, β)
end
diff --git a/NDTensors/src/empty/empty.jl b/NDTensors/src/empty/empty.jl
index 124e4ea71a..78aaea0e91 100644
--- a/NDTensors/src/empty/empty.jl
+++ b/NDTensors/src/empty/empty.jl
@@ -8,44 +8,44 @@ using TypeParameterAccessors: TypeParameterAccessors, set_eltype, similartype
struct EmptyOrder end
function TypeParameterAccessors.similartype(
- StoreT::Type{<:TensorStorage{EmptyNumber}}, ElT::Type
-)
- return set_eltype(StoreT, ElT)
+ StoreT::Type{<:TensorStorage{EmptyNumber}}, ElT::Type
+ )
+ return set_eltype(StoreT, ElT)
end
function TypeParameterAccessors.similartype(
- StoreT::Type{<:TensorStorage{EmptyNumber}}, DataT::Type{<:AbstractArray}
-)
- return set_datatype(StoreT, DataT)
+ StoreT::Type{<:TensorStorage{EmptyNumber}}, DataT::Type{<:AbstractArray}
+ )
+ return set_datatype(StoreT, DataT)
end
## TODO fix this similartype to use set eltype for BlockSparse
function TypeParameterAccessors.similartype(
- ::Type{StoreT}, ::Type{ElT}
-) where {StoreT<:BlockSparse{EmptyNumber},ElT}
- return BlockSparse{ElT,similartype(datatype(StoreT), ElT),ndims(StoreT)}
+ ::Type{StoreT}, ::Type{ElT}
+ ) where {StoreT <: BlockSparse{EmptyNumber}, ElT}
+ return BlockSparse{ElT, similartype(datatype(StoreT), ElT), ndims(StoreT)}
end
#
# Empty storage
#
-struct EmptyStorage{ElT,StoreT<:TensorStorage} <: TensorStorage{ElT} end
+struct EmptyStorage{ElT, StoreT <: TensorStorage} <: TensorStorage{ElT} end
function EmptyStorage(::Type{ElT}) where {ElT}
- return empty(default_storagetype(default_datatype(ElT)))
- #return emptytype(Dense{ElT,Vector{ElT}})()
+ return empty(default_storagetype(default_datatype(ElT)))
+ #return emptytype(Dense{ElT,Vector{ElT}})()
end
# TODO: should this be `EmptyNumber`?
EmptyStorage() = EmptyStorage(default_eltype())
-storagetype(::Type{EmptyStorage{ElT,StoreT}}) where {ElT,StoreT} = StoreT
-storagetype(::EmptyStorage{ElT,StoreT}) where {ElT,StoreT} = StoreT
+storagetype(::Type{EmptyStorage{ElT, StoreT}}) where {ElT, StoreT} = StoreT
+storagetype(::EmptyStorage{ElT, StoreT}) where {ElT, StoreT} = StoreT
# Get the EmptyStorage version of the TensorStorage
function emptytype(storagetype::Type{<:TensorStorage})
- return EmptyStorage{eltype(storagetype),storagetype}
+ return EmptyStorage{eltype(storagetype), storagetype}
end
empty(storagetype::Type{<:TensorStorage}) = emptytype(storagetype)()
@@ -53,7 +53,7 @@ empty(storagetype::Type{<:TensorStorage}) = emptytype(storagetype)()
data(S::EmptyStorage) = NoData()
## TODO Why is the norm of an empty tensor 0???
-norm(::EmptyStorage{ElT}) where {ElT} = norm(zero(ElT))#EmptyNumber
+norm(::EmptyStorage{ElT}) where {ElT} = norm(zero(ElT)) #EmptyNumber
similar(S::EmptyStorage) = S
similar(S::EmptyStorage, ::Type{ElT}) where {ElT} = empty(similartype(fulltype(S), ElT))
@@ -70,19 +70,19 @@ nnzblocks(::EmptyStorage) = 0
SparseArrays.nnz(::EmptyStorage) = 0
function conj(::AllowAlias, S::EmptyStorage)
- return S
+ return S
end
# TODO: promote the element type properly
(S::EmptyStorage * x::Number) = S
(x::Number * S::EmptyStorage) = S * x
-function Base.real(::Type{<:EmptyStorage{ElT,StoreT}}) where {ElT,StoreT}
- return EmptyStorage{real(ElT),real(StoreT)}
+function Base.real(::Type{<:EmptyStorage{ElT, StoreT}}) where {ElT, StoreT}
+ return EmptyStorage{real(ElT), real(StoreT)}
end
-function complex(::Type{<:EmptyStorage{ElT,StoreT}}) where {ElT,StoreT}
- return EmptyStorage{complex(ElT),complex(StoreT)}
+function complex(::Type{<:EmptyStorage{ElT, StoreT}}) where {ElT, StoreT}
+ return EmptyStorage{complex(ElT), complex(StoreT)}
end
real(S::EmptyStorage) = real(typeof(S))()
@@ -92,7 +92,7 @@ complex(S::EmptyStorage) = complex(typeof(S))()
blockoffsets(storage::EmptyStorage) = BlockOffsets{ndims(storage)}()
function Base.show(io::IO, mime::MIME"text/plain", S::EmptyStorage)
- return println(io, typeof(S))
+ return println(io, typeof(S))
end
using TypeParameterAccessors: TypeParameterAccessors
diff --git a/NDTensors/src/lib/RankFactorization/src/spectrum.jl b/NDTensors/src/lib/RankFactorization/src/spectrum.jl
index 7d6e9c46b7..f3ccbdf237 100644
--- a/NDTensors/src/lib/RankFactorization/src/spectrum.jl
+++ b/NDTensors/src/lib/RankFactorization/src/spectrum.jl
@@ -3,21 +3,21 @@
contains the (truncated) density matrix eigenvalue spectrum which is computed during a
decomposition done by `svd` or `eigen`. In addition stores the truncation error.
"""
-struct Spectrum{VecT<:Union{AbstractVector,Nothing},ElT<:Real}
- eigs::VecT
- truncerr::ElT
+struct Spectrum{VecT <: Union{AbstractVector, Nothing}, ElT <: Real}
+ eigs::VecT
+ truncerr::ElT
end
eigs(s::Spectrum) = s.eigs
truncerror(s::Spectrum) = s.truncerr
function entropy(s::Spectrum)
- S = 0.0
- eigs_s = eigs(s)
- isnothing(eigs_s) &&
- error("Spectrum does not contain any eigenvalues, cannot compute the entropy")
- for p in eigs_s
- p > 1e-13 && (S -= p * log(p))
- end
- return S
+ S = 0.0
+ eigs_s = eigs(s)
+ isnothing(eigs_s) &&
+ error("Spectrum does not contain any eigenvalues, cannot compute the entropy")
+ for p in eigs_s
+ p > 1.0e-13 && (S -= p * log(p))
+ end
+ return S
end
diff --git a/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl b/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl
index 2d6569440e..d980623364 100644
--- a/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl
+++ b/NDTensors/src/lib/RankFactorization/src/truncate_spectrum.jl
@@ -2,104 +2,104 @@ using TypeParameterAccessors: unwrap_array_type
## TODO write Exposed version of truncate
function truncate!!(P::AbstractArray; kwargs...)
- return truncate!!(unwrap_array_type(P), P; kwargs...)
+ return truncate!!(unwrap_array_type(P), P; kwargs...)
end
# CPU version.
function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...)
- truncerr, docut = truncate!(P; kwargs...)
- return P, truncerr, docut
+ truncerr, docut = truncate!(P; kwargs...)
+ return P, truncerr, docut
end
# GPU fallback version, convert to CPU.
function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...)
- P_cpu = cpu(P)
- truncerr, docut = truncate!(P_cpu; kwargs...)
- P = adapt(unwrap_array_type(P), P_cpu)
- return P, truncerr, docut
+ P_cpu = cpu(P)
+ truncerr, docut = truncate!(P_cpu; kwargs...)
+ P = adapt(unwrap_array_type(P), P_cpu)
+ return P, truncerr, docut
end
# CPU implementation.
function truncate!(
- P::AbstractVector;
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
-)
- mindim = replace_nothing(mindim, default_mindim(P))
- maxdim = replace_nothing(maxdim, length(P))
- cutoff = replace_nothing(cutoff, typemin(eltype(P)))
- use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P))
- use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P))
-
- origm = length(P)
- docut = zero(eltype(P))
-
- #if P[1] <= 0.0
- # P[1] = 0.0
- # resize!(P, 1)
- # return 0.0, 0.0
- #end
-
- if origm == 1
- docut = abs(P[1]) / 2
- return zero(eltype(P)), docut
- end
-
- s = sign(P[1])
- s < 0 && (P .*= s)
-
- #Zero out any negative weight
- for n in origm:-1:1
- (P[n] >= zero(eltype(P))) && break
- P[n] = zero(eltype(P))
- end
-
- n = origm
- truncerr = zero(eltype(P))
- while n > maxdim
- truncerr += P[n]
- n -= 1
- end
-
- if use_absolute_cutoff
- #Test if individual prob. weights fall below cutoff
- #rather than using *sum* of discarded weights
- while P[n] <= cutoff && n > mindim
- truncerr += P[n]
- n -= 1
+ P::AbstractVector;
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ )
+ mindim = replace_nothing(mindim, default_mindim(P))
+ maxdim = replace_nothing(maxdim, length(P))
+ cutoff = replace_nothing(cutoff, typemin(eltype(P)))
+ use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P))
+ use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P))
+
+ origm = length(P)
+ docut = zero(eltype(P))
+
+ #if P[1] <= 0.0
+ # P[1] = 0.0
+ # resize!(P, 1)
+ # return 0.0, 0.0
+ #end
+
+ if origm == 1
+ docut = abs(P[1]) / 2
+ return zero(eltype(P)), docut
end
- else
- scale = one(eltype(P))
- if use_relative_cutoff
- scale = sum(P)
- (scale == zero(eltype(P))) && (scale = one(eltype(P)))
+
+ s = sign(P[1])
+ s < 0 && (P .*= s)
+
+ #Zero out any negative weight
+ for n in origm:-1:1
+ (P[n] >= zero(eltype(P))) && break
+ P[n] = zero(eltype(P))
end
- #Continue truncating until *sum* of discarded probability
- #weight reaches cutoff reached (or m==mindim)
- while (truncerr + P[n] <= cutoff * scale) && (n > mindim)
- truncerr += P[n]
- n -= 1
+ n = origm
+ truncerr = zero(eltype(P))
+ while n > maxdim
+ truncerr += P[n]
+ n -= 1
end
- truncerr /= scale
- end
+ if use_absolute_cutoff
+ #Test if individual prob. weights fall below cutoff
+ #rather than using *sum* of discarded weights
+ while P[n] <= cutoff && n > mindim
+ truncerr += P[n]
+ n -= 1
+ end
+ else
+ scale = one(eltype(P))
+ if use_relative_cutoff
+ scale = sum(P)
+ (scale == zero(eltype(P))) && (scale = one(eltype(P)))
+ end
+
+ #Continue truncating until *sum* of discarded probability
+ #weight reaches cutoff reached (or m==mindim)
+ while (truncerr + P[n] <= cutoff * scale) && (n > mindim)
+ truncerr += P[n]
+ n -= 1
+ end
+
+ truncerr /= scale
+ end
- if n < 1
- n = 1
- end
+ if n < 1
+ n = 1
+ end
- if n < origm
- docut = (P[n] + P[n + 1]) / 2
- if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n]
- docut += eltype(P)(1e-3) * P[n]
+ if n < origm
+ docut = (P[n] + P[n + 1]) / 2
+ if abs(P[n] - P[n + 1]) < eltype(P)(1.0e-3) * P[n]
+ docut += eltype(P)(1.0e-3) * P[n]
+ end
end
- end
- s < 0 && (P .*= s)
- resize!(P, n)
- return truncerr, docut
+ s < 0 && (P .*= s)
+ resize!(P, n)
+ return truncerr, docut
end
diff --git a/NDTensors/src/linearalgebra/svd.jl b/NDTensors/src/linearalgebra/svd.jl
index 85ca85b67f..bf7729e9ff 100644
--- a/NDTensors/src/linearalgebra/svd.jl
+++ b/NDTensors/src/linearalgebra/svd.jl
@@ -1,71 +1,71 @@
using TypeParameterAccessors: unwrap_array_type
# The state of the `svd_recursive` algorithm.
function svd_recursive_state(S::AbstractArray, thresh::Float64)
- return svd_recursive_state(unwrap_array_type(S), S, thresh)
+ return svd_recursive_state(unwrap_array_type(S), S, thresh)
end
# CPU version.
function svd_recursive_state(::Type{<:Array}, S::AbstractArray, thresh::Float64)
- N = length(S)
- (N <= 1 || thresh < 0.0) && return (true, 1)
- S1t = S[1] * thresh
- start = 2
- while start <= N
- (S[start] < S1t) && break
- start += 1
- end
- if start >= N
- return (true, N)
- end
- return (false, start)
+ N = length(S)
+ (N <= 1 || thresh < 0.0) && return (true, 1)
+ S1t = S[1] * thresh
+ start = 2
+ while start <= N
+ (S[start] < S1t) && break
+ start += 1
+ end
+ if start >= N
+ return (true, N)
+ end
+ return (false, start)
end
# Convert to CPU to avoid slow scalar indexing
# on GPU.
function svd_recursive_state(::Type{<:AbstractArray}, S::AbstractArray, thresh::Float64)
- return svd_recursive_state(Array, cpu(S), thresh)
+ return svd_recursive_state(Array, cpu(S), thresh)
end
-function svd_recursive(M::AbstractMatrix; thresh::Float64=1E-3, north_pass::Int=2)
- Mr, Mc = size(M)
- if Mr > Mc
- V, S, U = svd_recursive(transpose(M))
- conj!(U)
- conj!(V)
- return U, S, V
- end
+function svd_recursive(M::AbstractMatrix; thresh::Float64 = 1.0e-3, north_pass::Int = 2)
+ Mr, Mc = size(M)
+ if Mr > Mc
+ V, S, U = svd_recursive(transpose(M))
+ conj!(U)
+ conj!(V)
+ return U, S, V
+ end
- #rho = BLAS.gemm('N','T',-1.0,M,M) #negative to sort eigenvalues greatest to smallest
- rho = -M * M' #negative to sort eigenvalues in decreasing order
- D, U = eigen(expose(Hermitian(rho)))
+ #rho = BLAS.gemm('N','T',-1.0,M,M) #negative to sort eigenvalues greatest to smallest
+ rho = -M * M' #negative to sort eigenvalues in decreasing order
+ D, U = eigen(expose(Hermitian(rho)))
- Nd = length(D)
+ Nd = length(D)
- V = M' * U
+ V = M' * U
- V, R = qr_positive(expose(V))
- D[1:Nd] = diag(R)[1:Nd]
+ V, R = qr_positive(expose(V))
+ D[1:Nd] = diag(R)[1:Nd]
- (done, start) = svd_recursive_state(D, thresh)
+ (done, start) = svd_recursive_state(D, thresh)
- done && return U, D, V
+ done && return U, D, V
- u = view(U, :, start:Nd)
- v = view(V, :, start:Nd)
+ u = view(U, :, start:Nd)
+ v = view(V, :, start:Nd)
- b = u' * (M * v)
- bu, bd, bv = svd_recursive(b; thresh=thresh, north_pass=north_pass)
+ b = u' * (M * v)
+ bu, bd, bv = svd_recursive(b; thresh = thresh, north_pass = north_pass)
- u .= u * bu
- v .= v * bv
- view(D, start:Nd) .= bd
+ u .= u * bu
+ v .= v * bv
+ view(D, start:Nd) .= bd
- return U, D, V
+ return U, D, V
end
# TODO: maybe move to another location?
# Include options for other svd algorithms
function polar(M::AbstractMatrix)
- U, S, V = svd(expose(M)) # calls LinearAlgebra.svd(_)
- return U * V', V * Diagonal(S) * V'
+ U, S, V = svd(expose(M)) # calls LinearAlgebra.svd(_)
+ return U * V', V * Diagonal(S) * V'
end
diff --git a/NDTensors/src/linearalgebra/symmetric.jl b/NDTensors/src/linearalgebra/symmetric.jl
index 51ddb7b854..7a85707d1f 100644
--- a/NDTensors/src/linearalgebra/symmetric.jl
+++ b/NDTensors/src/linearalgebra/symmetric.jl
@@ -1,30 +1,29 @@
+dims(H::Hermitian{<:Number, <:Tensor}) = dims(parent(H))
-dims(H::Hermitian{<:Number,<:Tensor}) = dims(parent(H))
+blockdims(H::Hermitian{<:Number, <:Tensor}, b) = blockdims(parent(H), b)
-blockdims(H::Hermitian{<:Number,<:Tensor}, b) = blockdims(parent(H), b)
+dim(H::Hermitian{<:Number, <:Tensor}, i::Int) = dim(parent(H), i)
-dim(H::Hermitian{<:Number,<:Tensor}, i::Int) = dim(parent(H), i)
+matrix(H::Hermitian{<:Number, <:Tensor}) = Hermitian(matrix(parent(H)))
-matrix(H::Hermitian{<:Number,<:Tensor}) = Hermitian(matrix(parent(H)))
+inds(H::Hermitian{<:Number, <:Tensor}) = inds(parent(H))
-inds(H::Hermitian{<:Number,<:Tensor}) = inds(parent(H))
+ind(H::Hermitian{<:Number, <:Tensor}, i::Int) = ind(parent(H), i)
-ind(H::Hermitian{<:Number,<:Tensor}, i::Int) = ind(parent(H), i)
+nnzblocks(H::Hermitian{<:Number, <:Tensor}) = nnzblocks(parent(H))
-nnzblocks(H::Hermitian{<:Number,<:Tensor}) = nnzblocks(parent(H))
+nzblocks(H::Hermitian{<:Number, <:Tensor}) = nzblocks(parent(H))
-nzblocks(H::Hermitian{<:Number,<:Tensor}) = nzblocks(parent(H))
+eachnzblock(H::Hermitian{<:Number, <:Tensor}) = eachnzblock(parent(H))
-eachnzblock(H::Hermitian{<:Number,<:Tensor}) = eachnzblock(parent(H))
+eachblock(H::Hermitian{<:Number, <:Tensor}) = eachblock(parent(H))
-eachblock(H::Hermitian{<:Number,<:Tensor}) = eachblock(parent(H))
+eachdiagblock(H::Hermitian{<:Number, <:Tensor}) = eachdiagblock(parent(H))
-eachdiagblock(H::Hermitian{<:Number,<:Tensor}) = eachdiagblock(parent(H))
+nblocks(H::Hermitian{<:Number, <:Tensor}) = nblocks(parent(H))
-nblocks(H::Hermitian{<:Number,<:Tensor}) = nblocks(parent(H))
-
-function blockview(H::Hermitian{<:Number,<:Tensor}, block)
- return _blockview(H, blockview(parent(H), block))
+function blockview(H::Hermitian{<:Number, <:Tensor}, block)
+ return _blockview(H, blockview(parent(H), block))
end
-_blockview(::Hermitian{<:Number,<:Tensor}, blockviewH) = Hermitian(blockviewH)
-_blockview(::Hermitian{<:Number,<:Tensor}, ::Nothing) = nothing
+_blockview(::Hermitian{<:Number, <:Tensor}, blockviewH) = Hermitian(blockviewH)
+_blockview(::Hermitian{<:Number, <:Tensor}, ::Nothing) = nothing
diff --git a/NDTensors/src/tensoroperations/contraction_logic.jl b/NDTensors/src/tensoroperations/contraction_logic.jl
index 0844d3813d..adced0c397 100644
--- a/NDTensors/src/tensoroperations/contraction_logic.jl
+++ b/NDTensors/src/tensoroperations/contraction_logic.jl
@@ -1,62 +1,61 @@
-
-const Labels{N} = NTuple{N,Int}
+const Labels{N} = NTuple{N, Int}
# Automatically determine the output labels given
# input labels of a contraction
-function contract_labels(T1labels::Labels{N1}, T2labels::Labels{N2}) where {N1,N2}
- ncont = 0
- for i in T1labels
- i < 0 && (ncont += 1)
- end
- NR = N1 + N2 - 2 * ncont
- ValNR = Val{NR}
- return contract_labels(ValNR, T1labels, T2labels)
+function contract_labels(T1labels::Labels{N1}, T2labels::Labels{N2}) where {N1, N2}
+ ncont = 0
+ for i in T1labels
+ i < 0 && (ncont += 1)
+ end
+ NR = N1 + N2 - 2 * ncont
+ ValNR = Val{NR}
+ return contract_labels(ValNR, T1labels, T2labels)
end
function contract_labels(
- ::Type{Val{NR}}, T1labels::Labels{N1}, T2labels::Labels{N2}
-) where {NR,N1,N2}
- Rlabels = MVector{NR,Int}(undef)
- u = 1
- # TODO: use Rlabels, don't assume ncon convention
- for i in 1:N1
- if T1labels[i] > 0
- @inbounds Rlabels[u] = T1labels[i]
- u += 1
+ ::Type{Val{NR}}, T1labels::Labels{N1}, T2labels::Labels{N2}
+ ) where {NR, N1, N2}
+ Rlabels = MVector{NR, Int}(undef)
+ u = 1
+ # TODO: use Rlabels, don't assume ncon convention
+ for i in 1:N1
+ if T1labels[i] > 0
+ @inbounds Rlabels[u] = T1labels[i]
+ u += 1
+ end
end
- end
- for i in 1:N2
- if T2labels[i] > 0
- @inbounds Rlabels[u] = T2labels[i]
- u += 1
+ for i in 1:N2
+ if T2labels[i] > 0
+ @inbounds Rlabels[u] = T2labels[i]
+ u += 1
+ end
end
- end
- return Labels{NR}(Rlabels)
+ return Labels{NR}(Rlabels)
end
function _contract_inds!(
- Ris, T1is, T1labels::Labels{N1}, T2is, T2labels::Labels{N2}, Rlabels::Labels{NR}
-) where {N1,N2,NR}
- for n in 1:NR
- Rlabel = @inbounds Rlabels[n]
- found = false
- for n1 in 1:N1
- if Rlabel == @inbounds T1labels[n1]
- @inbounds Ris[n] = @inbounds T1is[n1]
- found = true
- break
- end
- end
- if !found
- for n2 in 1:N2
- if Rlabel == @inbounds T2labels[n2]
- @inbounds Ris[n] = @inbounds T2is[n2]
- break
+ Ris, T1is, T1labels::Labels{N1}, T2is, T2labels::Labels{N2}, Rlabels::Labels{NR}
+ ) where {N1, N2, NR}
+ for n in 1:NR
+ Rlabel = @inbounds Rlabels[n]
+ found = false
+ for n1 in 1:N1
+ if Rlabel == @inbounds T1labels[n1]
+ @inbounds Ris[n] = @inbounds T1is[n1]
+ found = true
+ break
+ end
+ end
+ if !found
+ for n2 in 1:N2
+ if Rlabel == @inbounds T2labels[n2]
+ @inbounds Ris[n] = @inbounds T2is[n2]
+ break
+ end
+ end
end
- end
end
- end
- return nothing
+ return nothing
end
# Old version that doesn't take into account Rlabels
@@ -94,185 +93,185 @@ end
#end
function contract_inds(T1is, T1labels::Labels{0}, T2is, T2labels::Labels{0}, Rlabels)
- return ()
+ return ()
end
# isbitstype that returns a Val for dispatch
isbitsval(T) = Val(isbitstype(T))
function contract_inds(T1is, T1labels, T2is, T2labels, Rlabels)
- IndT = promote_type(eltype(T1is), eltype(T2is))
- return _contract_inds(isbitsval(IndT), IndT, T1is, T1labels, T2is, T2labels, Rlabels)
+ IndT = promote_type(eltype(T1is), eltype(T2is))
+ return _contract_inds(isbitsval(IndT), IndT, T1is, T1labels, T2is, T2labels, Rlabels)
end
# isbits
function _contract_inds(::Val{true}, IndT, T1is, T1labels, T2is, T2labels, Rlabels)
- Ris = MVector{length(Rlabels),IndT}(undef)
- _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels)
- return Tuple(Ris)
+ Ris = MVector{length(Rlabels), IndT}(undef)
+ _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels)
+ return Tuple(Ris)
end
# !isbits
function _contract_inds(::Val{false}, IndT, T1is, T1labels, T2is, T2labels, Rlabels)
- Ris = SizedVector{length(Rlabels),IndT}(undef)
- _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels)
- return Tuple(Ris)
+ Ris = SizedVector{length(Rlabels), IndT}(undef)
+ _contract_inds!(Ris, T1is, T1labels, T2is, T2labels, Rlabels)
+ return Tuple(Ris)
end
-mutable struct ContractionProperties{NA,NB,NC}
- ai::NTuple{NA,Int}
- bi::NTuple{NB,Int}
- ci::NTuple{NC,Int}
- nactiveA::Int
- nactiveB::Int
- nactiveC::Int
- AtoB::NTuple{NA,Int}
- AtoC::NTuple{NA,Int}
- BtoC::NTuple{NB,Int}
- permuteA::Bool
- permuteB::Bool
- permuteC::Bool
- dleft::Int
- dmid::Int
- dright::Int
- ncont::Int
- Acstart::Int
- Bcstart::Int
- Austart::Int
- Bustart::Int
- PA::NTuple{NA,Int}
- PB::NTuple{NB,Int}
- PC::NTuple{NC,Int}
- ctrans::Bool
- newArange::NTuple{NA,Int}
- newBrange::NTuple{NB,Int}
- newCrange::NTuple{NC,Int}
- function ContractionProperties(
- ai::NTuple{NA,Int}, bi::NTuple{NB,Int}, ci::NTuple{NC,Int}
- ) where {NA,NB,NC}
- return new{NA,NB,NC}(
- ai,
- bi,
- ci,
- 0,
- 0,
- 0,
- ntuple(_ -> 0, Val(NA)),
- ntuple(_ -> 0, Val(NA)),
- ntuple(_ -> 0, Val(NB)),
- false,
- false,
- false,
- 1,
- 1,
- 1,
- 0,
- NA,
- NB,
- NA,
- NB,
- ntuple(i -> i, Val(NA)),
- ntuple(i -> i, Val(NB)),
- ntuple(i -> i, Val(NC)),
- false,
- ntuple(_ -> 0, Val(NA)),
- ntuple(_ -> 0, Val(NB)),
- ntuple(_ -> 0, Val(NC)),
- )
- end
+mutable struct ContractionProperties{NA, NB, NC}
+ ai::NTuple{NA, Int}
+ bi::NTuple{NB, Int}
+ ci::NTuple{NC, Int}
+ nactiveA::Int
+ nactiveB::Int
+ nactiveC::Int
+ AtoB::NTuple{NA, Int}
+ AtoC::NTuple{NA, Int}
+ BtoC::NTuple{NB, Int}
+ permuteA::Bool
+ permuteB::Bool
+ permuteC::Bool
+ dleft::Int
+ dmid::Int
+ dright::Int
+ ncont::Int
+ Acstart::Int
+ Bcstart::Int
+ Austart::Int
+ Bustart::Int
+ PA::NTuple{NA, Int}
+ PB::NTuple{NB, Int}
+ PC::NTuple{NC, Int}
+ ctrans::Bool
+ newArange::NTuple{NA, Int}
+ newBrange::NTuple{NB, Int}
+ newCrange::NTuple{NC, Int}
+ function ContractionProperties(
+ ai::NTuple{NA, Int}, bi::NTuple{NB, Int}, ci::NTuple{NC, Int}
+ ) where {NA, NB, NC}
+ return new{NA, NB, NC}(
+ ai,
+ bi,
+ ci,
+ 0,
+ 0,
+ 0,
+ ntuple(_ -> 0, Val(NA)),
+ ntuple(_ -> 0, Val(NA)),
+ ntuple(_ -> 0, Val(NB)),
+ false,
+ false,
+ false,
+ 1,
+ 1,
+ 1,
+ 0,
+ NA,
+ NB,
+ NA,
+ NB,
+ ntuple(i -> i, Val(NA)),
+ ntuple(i -> i, Val(NB)),
+ ntuple(i -> i, Val(NC)),
+ false,
+ ntuple(_ -> 0, Val(NA)),
+ ntuple(_ -> 0, Val(NB)),
+ ntuple(_ -> 0, Val(NC)),
+ )
+ end
end
-function compute_perms!(props::ContractionProperties{NA,NB,NC}) where {NA,NB,NC}
- #leng.th(props.AtoB)!=0 && return
-
- # Access the fields before the loop
- # since getting fields from the mutable struct
- # takes nontrivial time
- ai = props.ai
- bi = props.bi
- ci = props.ci
-
- ncont = props.ncont
- AtoB = props.AtoB
- Acstart = props.Acstart
- Bcstart = props.Bcstart
- for i in 1:NA
- for j in 1:NB
- if @inbounds ai[i] == @inbounds bi[j]
- ncont += 1
- #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
- i <= Acstart && (Acstart = i)
- j <= Bcstart && (Bcstart = j)
- #AtoB[i] = j
- AtoB = setindex(AtoB, j, i)
- break
- end
+function compute_perms!(props::ContractionProperties{NA, NB, NC}) where {NA, NB, NC}
+ #leng.th(props.AtoB)!=0 && return
+
+ # Access the fields before the loop
+ # since getting fields from the mutable struct
+ # takes nontrivial time
+ ai = props.ai
+ bi = props.bi
+ ci = props.ci
+
+ ncont = props.ncont
+ AtoB = props.AtoB
+ Acstart = props.Acstart
+ Bcstart = props.Bcstart
+ for i in 1:NA
+ for j in 1:NB
+ if @inbounds ai[i] == @inbounds bi[j]
+ ncont += 1
+ #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
+ i <= Acstart && (Acstart = i)
+ j <= Bcstart && (Bcstart = j)
+ #AtoB[i] = j
+ AtoB = setindex(AtoB, j, i)
+ break
+ end
+ end
end
- end
- props.ncont = ncont
- props.AtoB = AtoB
- props.Acstart = Acstart
- props.Bcstart = Bcstart
-
- Austart = props.Austart
- AtoC = props.AtoC
- for i in 1:NA
- for k in 1:NC
- if @inbounds ai[i] == @inbounds ci[k]
- #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
- i <= Austart && (Austart = i)
- #AtoC[i] = k
- AtoC = setindex(AtoC, k, i)
- break
- end
+ props.ncont = ncont
+ props.AtoB = AtoB
+ props.Acstart = Acstart
+ props.Bcstart = Bcstart
+
+ Austart = props.Austart
+ AtoC = props.AtoC
+ for i in 1:NA
+ for k in 1:NC
+ if @inbounds ai[i] == @inbounds ci[k]
+ #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
+ i <= Austart && (Austart = i)
+ #AtoC[i] = k
+ AtoC = setindex(AtoC, k, i)
+ break
+ end
+ end
end
- end
- props.Austart = Austart
- props.AtoC = AtoC
-
- Bustart = props.Bustart
- BtoC = props.BtoC
- for j in 1:NB
- for k in 1:NC
- if bi[j] == ci[k]
- #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
- j <= Bustart && (Bustart = j)
- #BtoC[j] = k
- BtoC = setindex(BtoC, k, j)
- break
- end
+ props.Austart = Austart
+ props.AtoC = AtoC
+
+ Bustart = props.Bustart
+ BtoC = props.BtoC
+ for j in 1:NB
+ for k in 1:NC
+ if bi[j] == ci[k]
+ #TODO: check this if this should be i,j or i-1,j-1 (0-index or 1-index)
+ j <= Bustart && (Bustart = j)
+ #BtoC[j] = k
+ BtoC = setindex(BtoC, k, j)
+ break
+ end
+ end
end
- end
- props.Bustart = Bustart
- props.BtoC = BtoC
+ props.Bustart = Bustart
+ props.BtoC = BtoC
- return nothing
+ return nothing
end
function checkACsameord(props::ContractionProperties)::Bool
- AtoC = props.AtoC
-
- props.Austart >= length(props.ai) && return true
- aCind = props.AtoC[props.Austart]
- for i in 1:length(props.ai)
- if !contractedA(props, i)
- AtoC[i] != aCind && return false
- aCind += 1
+ AtoC = props.AtoC
+
+ props.Austart >= length(props.ai) && return true
+ aCind = props.AtoC[props.Austart]
+ for i in 1:length(props.ai)
+ if !contractedA(props, i)
+ AtoC[i] != aCind && return false
+ aCind += 1
+ end
end
- end
- return true
+ return true
end
function checkBCsameord(props::ContractionProperties)::Bool
- props.Bustart >= length(props.bi) && return true
- bCind = props.BtoC[props.Bustart]
- for i in 1:length(props.bi)
- if !contractedB(props, i)
- props.BtoC[i] != bCind && return false
- bCind += 1
+ props.Bustart >= length(props.bi) && return true
+ bCind = props.BtoC[props.Bustart]
+ for i in 1:length(props.bi)
+ if !contractedB(props, i)
+ props.BtoC[i] != bCind && return false
+ bCind += 1
+ end
end
- end
- return true
+ return true
end
contractedA(props::ContractionProperties, i::Int) = (props.AtoC[i] < 1)
@@ -282,367 +281,367 @@ Btrans(props::ContractionProperties) = !contractedB(props, 1)
Ctrans(props::ContractionProperties) = props.ctrans
function compute_contraction_properties!(
- props::ContractionProperties{NA,NB,NC}, A, B, C
-) where {NA,NB,NC}
- compute_perms!(props)
-
- #Use props.PC.size() as a check to see if we've already run this
- #length(props.PC)!=0 && return
-
- #ra = NA #length(props.ai)
- #rb = NB #length(props.bi)
- #rc = NC #length(props.ci)
-
- #props.PC = fill(0,rc)
-
- PC = props.PC
- AtoC = props.AtoC
- BtoC = props.BtoC
-
- dleft = props.dleft
- dmid = props.dmid
- dright = props.dright
-
- dleft = 1
- dmid = 1
- dright = 1
- c = 1
- for i in 1:NA
- #if !contractedA(props,i)
- if !(AtoC[i] < 1)
- dleft *= size(A, i)
- #props.PC[props.AtoC[i]] = c
- PC = setindex(PC, c, AtoC[i])
- c += 1
- else
- dmid *= size(A, i)
- end
- end
- for j in 1:NB
- #if !contractedB(props,j)
- if !(BtoC[j] < 1)
- dright *= size(B, j)
- #props.PC[props.BtoC[j]] = c
- PC = setindex(PC, c, BtoC[j])
- c += 1
- end
- end
- props.PC = PC
- props.dleft = dleft
- props.dmid = dmid
- props.dright = dright
-
- if !is_trivial_permutation(props.PC)
- props.permuteC = true
- if checkBCsameord(props) && checkACsameord(props)
- #Can avoid permuting C by
- #computing Bt*At = Ct
- props.ctrans = true
- props.permuteC = false
- end
- end
-
- #Check if A can be treated as a matrix without permuting
- props.permuteA = false
- if !(contractedA(props, 1) || contractedA(props, NA))
- #If contracted indices are not all at front or back,
- #will have to permute A
- props.permuteA = true
- else
- #Contracted ind start at front or back, check if contiguous
- #TODO: check that the limits are correct (1-indexed vs. 0-indexed)
- for i in 1:(props.ncont)
- if !contractedA(props, props.Acstart + i - 1)
- #Contracted indices not contiguous, must permute
- props.permuteA = true
- break
- end
- end
- end
-
- #Check if B is matrix-like
- props.permuteB = false
- if !(contractedB(props, 1) || contractedB(props, NB))
- #If contracted indices are not all at front or back,
- #will have to permute B
- props.permuteB = true
- else
- #TODO: check that the limits are correct (1-indexed vs. 0-indexed)
- for i in 1:(props.ncont)
- if !contractedB(props, props.Bcstart + i - 1)
- #Contracted inds not contiguous, permute
- props.permuteB = true
- break
- end
- end
- end
-
- if !props.permuteA && !props.permuteB
- #Check if contracted inds. in same order
- #TODO: check these limits are correct
- for i in 1:(props.ncont)
- if props.AtoB[props.Acstart + i - 1] != (props.Bcstart + i - 1)
- #If not in same order,
- #must permute one of A or B
- #so permute the smaller one
- props.dleft < props.dright ? (props.permuteA = true) : (props.permuteB = true)
- break
- end
- end
- end
-
- if props.permuteC && !(props.permuteA && props.permuteB)
- PCost(d::Real) = d * d
- #Could avoid permuting C if
- #permute both A and B, worth it?
- pCcost = PCost(props.dleft * props.dright)
- extra_pABcost = 0
- !props.permuteA && (extra_pABcost += PCost(props.dleft * props.dmid))
- !props.permuteB && (extra_pABcost += PCost(props.dmid * props.dright))
- if extra_pABcost < pCcost
- props.permuteA = true
- props.permuteB = true
- props.permuteC = false
- end
- end
+ props::ContractionProperties{NA, NB, NC}, A, B, C
+ ) where {NA, NB, NC}
+ compute_perms!(props)
+
+ #Use props.PC.size() as a check to see if we've already run this
+ #length(props.PC)!=0 && return
+
+ #ra = NA #length(props.ai)
+ #rb = NB #length(props.bi)
+ #rc = NC #length(props.ci)
- if props.permuteA
- #props.PA = fill(0,ra)
- #Permute contracted indices to the front,
- #in the same order as on B
+ #props.PC = fill(0,rc)
+ PC = props.PC
AtoC = props.AtoC
BtoC = props.BtoC
- ai = props.ai
- bi = props.bi
- PA = props.PA
- newi = 0
- bind = props.Bcstart
- for i in 1:(props.ncont)
- while !(BtoC[bind] < 1)
- bind += 1
- end
- j = findfirst(==(bi[bind]), ai)
- #props.PA[newi + 1] = j
- PA = setindex(PA, j, newi + 1)
- bind += 1
- newi += 1
+ dleft = props.dleft
+ dmid = props.dmid
+ dright = props.dright
+
+ dleft = 1
+ dmid = 1
+ dright = 1
+ c = 1
+ for i in 1:NA
+ #if !contractedA(props,i)
+ if !(AtoC[i] < 1)
+ dleft *= size(A, i)
+ #props.PC[props.AtoC[i]] = c
+ PC = setindex(PC, c, AtoC[i])
+ c += 1
+ else
+ dmid *= size(A, i)
+ end
end
- #Reset p.AtoC:
- #fill!(props.AtoC,0)
- AtoC = ntuple(_ -> 0, Val(NA))
- #Permute uncontracted indices to
- #appear in same order as on C
- #TODO: check this is correct for 1-indexing
- for k in 1:NC
- j = findfirst(==(props.ci[k]), props.ai)
- if !isnothing(j)
- #props.AtoC[newi+1] = k
- AtoC = setindex(AtoC, k, newi + 1)
- #props.PA[newi+1] = j
- PA = setindex(PA, j, newi + 1)
- newi += 1
- end
- newi == NA && break
+ for j in 1:NB
+ #if !contractedB(props,j)
+ if !(BtoC[j] < 1)
+ dright *= size(B, j)
+ #props.PC[props.BtoC[j]] = c
+ PC = setindex(PC, c, BtoC[j])
+ c += 1
+ end
end
- props.PA = PA
- props.AtoC = AtoC
- end
-
- ##Also update props.Austart,props.Acstart
-
- Acstart = props.Acstart
- Austart = props.Austart
- newArange = props.newArange
- PA = props.PA
-
- Acstart = NA + 1
- Austart = NA + 1
- #TODO: check this is correct for 1-indexing
- for i in 1:NA
- #if contractedA(props,i)
- if @inbounds AtoC[i] < 1
- Acstart = min(i, Acstart)
- else
- Austart = min(i, Austart)
+ props.PC = PC
+ props.dleft = dleft
+ props.dmid = dmid
+ props.dright = dright
+
+ if !is_trivial_permutation(props.PC)
+ props.permuteC = true
+ if checkBCsameord(props) && checkACsameord(props)
+ #Can avoid permuting C by
+ #computing Bt*At = Ct
+ props.ctrans = true
+ props.permuteC = false
+ end
end
- #props.newArange = permute_extents([size(A)...],props.PA)
- newArange = permute(size(A), PA) #[size(A)...][props.PA]
- end
- props.Acstart = Acstart
- props.Austart = Austart
- props.newArange = newArange
-
- if (props.permuteB)
- PB = props.PB
- AtoC = props.AtoC
- BtoC = props.BtoC
- ai = props.ai
- bi = props.bi
- ci = props.ci
- Bcstart = props.Bcstart
- Bustart = props.Bustart
- #props.PB = fill(0,rb)
- #TODO: check this is correct for 1-indexing
- newi = 0 #1
-
- if (props.permuteA)
- #A's contracted indices already set to
- #be in same order as B above, so just
- #permute contracted indices to the front
- #keeping relative order
-
- i = props.Bcstart
- while newi < props.ncont
- while !(BtoC[i] < 1)
- i += 1
+ #Check if A can be treated as a matrix without permuting
+ props.permuteA = false
+ if !(contractedA(props, 1) || contractedA(props, NA))
+ #If contracted indices are not all at front or back,
+ #will have to permute A
+ props.permuteA = true
+ else
+ #Contracted ind start at front or back, check if contiguous
+ #TODO: check that the limits are correct (1-indexed vs. 0-indexed)
+ for i in 1:(props.ncont)
+ if !contractedA(props, props.Acstart + i - 1)
+ #Contracted indices not contiguous, must permute
+ props.permuteA = true
+ break
+ end
end
- #props.PB[newi+1] = i
- PB = setindex(PB, i, newi + 1)
- i += 1
- newi += 1
- end
+ end
+
+ #Check if B is matrix-like
+ props.permuteB = false
+ if !(contractedB(props, 1) || contractedB(props, NB))
+ #If contracted indices are not all at front or back,
+ #will have to permute B
+ props.permuteB = true
else
- #Permute contracted indices to the
- #front and in same order as on A
+ #TODO: check that the limits are correct (1-indexed vs. 0-indexed)
+ for i in 1:(props.ncont)
+ if !contractedB(props, props.Bcstart + i - 1)
+ #Contracted inds not contiguous, permute
+ props.permuteB = true
+ break
+ end
+ end
+ end
- aind = props.Acstart
- for i in 0:(props.ncont - 1)
- while !(AtoC[aind] < 1)
- aind += 1
+ if !props.permuteA && !props.permuteB
+ #Check if contracted inds. in same order
+ #TODO: check these limits are correct
+ for i in 1:(props.ncont)
+ if props.AtoB[props.Acstart + i - 1] != (props.Bcstart + i - 1)
+ #If not in same order,
+ #must permute one of A or B
+ #so permute the smaller one
+ props.dleft < props.dright ? (props.permuteA = true) : (props.permuteB = true)
+ break
+ end
end
- j = findfirst(==(ai[aind]), bi)
- #props.PB[newi + 1] = j
- PB = setindex(PB, j, newi + 1)
- aind += 1
- newi += 1
- end
end
- #Reset p.BtoC:
- #fill!(props.BtoC,0)
- BtoC = ntuple(_ -> 0, Val(NB))
-
- #Permute uncontracted indices to
- #appear in same order as on C
- for k in 1:NC
- j = findfirst(==(ci[k]), bi)
- if !isnothing(j)
- #props.BtoC[newi + 1] = k
- BtoC = setindex(BtoC, k, newi + 1)
- #props.PB[newi + 1] = j
- PB = setindex(PB, j, newi + 1)
- newi += 1
- end
- newi == NB && break
+ if props.permuteC && !(props.permuteA && props.permuteB)
+ PCost(d::Real) = d * d
+ #Could avoid permuting C if
+ #permute both A and B, worth it?
+ pCcost = PCost(props.dleft * props.dright)
+ extra_pABcost = 0
+ !props.permuteA && (extra_pABcost += PCost(props.dleft * props.dmid))
+ !props.permuteB && (extra_pABcost += PCost(props.dmid * props.dright))
+ if extra_pABcost < pCcost
+ props.permuteA = true
+ props.permuteB = true
+ props.permuteC = false
+ end
end
- Bcstart = NB
- Bustart = NB
- for i in 1:NB
- if BtoC[i] < 1
- Bcstart = min(i, Bcstart)
- else
- Bustart = min(i, Bustart)
- end
+
+ if props.permuteA
+ #props.PA = fill(0,ra)
+ #Permute contracted indices to the front,
+ #in the same order as on B
+
+ AtoC = props.AtoC
+ BtoC = props.BtoC
+ ai = props.ai
+ bi = props.bi
+ PA = props.PA
+
+ newi = 0
+ bind = props.Bcstart
+ for i in 1:(props.ncont)
+ while !(BtoC[bind] < 1)
+ bind += 1
+ end
+ j = findfirst(==(bi[bind]), ai)
+ #props.PA[newi + 1] = j
+ PA = setindex(PA, j, newi + 1)
+ bind += 1
+ newi += 1
+ end
+ #Reset p.AtoC:
+ #fill!(props.AtoC,0)
+ AtoC = ntuple(_ -> 0, Val(NA))
+ #Permute uncontracted indices to
+ #appear in same order as on C
+ #TODO: check this is correct for 1-indexing
+ for k in 1:NC
+ j = findfirst(==(props.ci[k]), props.ai)
+ if !isnothing(j)
+ #props.AtoC[newi+1] = k
+ AtoC = setindex(AtoC, k, newi + 1)
+ #props.PA[newi+1] = j
+ PA = setindex(PA, j, newi + 1)
+ newi += 1
+ end
+ newi == NA && break
+ end
+ props.PA = PA
+ props.AtoC = AtoC
end
- #props.newBrange = permute_extents([size(B)...],props.PB)
- #props.newBrange = [size(B)...][props.PB]
- props.newBrange = permute(size(B), PB)
- props.BtoC = BtoC
- props.PB = PB
- props.Bcstart = Bcstart
- props.Bustart = Bustart
- end
+ ##Also update props.Austart,props.Acstart
- if props.permuteA || props.permuteB
- AtoC = props.AtoC
- BtoC = props.BtoC
- PC = props.PC
+ Acstart = props.Acstart
+ Austart = props.Austart
+ newArange = props.newArange
+ PA = props.PA
- #Recompute props.PC
- c = 1
+ Acstart = NA + 1
+ Austart = NA + 1
#TODO: check this is correct for 1-indexing
for i in 1:NA
- AtoC_i = AtoC[i]
- if !(AtoC_i < 1)
- #props.PC[props.AtoC[i]] = c
- PC = setindex(PC, c, AtoC_i)
- c += 1
- end
- end
- #TODO: check this is correct for 1-indexing
- for j in 1:NB
- BtoC_j = BtoC[j]
- if !(BtoC_j < 1)
- #props.PC[props.BtoC[j]] = c
- PC = setindex(PC, c, BtoC_j)
- c += 1
- end
+ #if contractedA(props,i)
+ if @inbounds AtoC[i] < 1
+ Acstart = min(i, Acstart)
+ else
+ Austart = min(i, Austart)
+ end
+ #props.newArange = permute_extents([size(A)...],props.PA)
+ newArange = permute(size(A), PA) #[size(A)...][props.PA]
end
- props.PC = PC
+ props.Acstart = Acstart
+ props.Austart = Austart
+ props.newArange = newArange
+
+ if (props.permuteB)
+ PB = props.PB
+ AtoC = props.AtoC
+ BtoC = props.BtoC
+ ai = props.ai
+ bi = props.bi
+ ci = props.ci
+ Bcstart = props.Bcstart
+ Bustart = props.Bustart
+
+ #props.PB = fill(0,rb)
+ #TODO: check this is correct for 1-indexing
+ newi = 0 #1
+
+ if (props.permuteA)
+ #A's contracted indices already set to
+ #be in same order as B above, so just
+ #permute contracted indices to the front
+ #keeping relative order
+
+ i = props.Bcstart
+ while newi < props.ncont
+ while !(BtoC[i] < 1)
+ i += 1
+ end
+ #props.PB[newi+1] = i
+ PB = setindex(PB, i, newi + 1)
+ i += 1
+ newi += 1
+ end
+ else
+ #Permute contracted indices to the
+ #front and in same order as on A
+
+ aind = props.Acstart
+ for i in 0:(props.ncont - 1)
+ while !(AtoC[aind] < 1)
+ aind += 1
+ end
+ j = findfirst(==(ai[aind]), bi)
+ #props.PB[newi + 1] = j
+ PB = setindex(PB, j, newi + 1)
+ aind += 1
+ newi += 1
+ end
+ end
- props.ctrans = false
- if (is_trivial_permutation(PC))
- props.permuteC = false
- else
- props.permuteC = true
- #Here we already know since pc_triv = false that
- #at best indices from B precede those from A (on result C)
- #so if both sets remain in same order on C
- #just need to transpose C, not permute it
- if checkBCsameord(props) && checkACsameord(props)
- props.ctrans = true
- props.permuteC = false
- end
+ #Reset p.BtoC:
+ #fill!(props.BtoC,0)
+ BtoC = ntuple(_ -> 0, Val(NB))
+
+ #Permute uncontracted indices to
+ #appear in same order as on C
+ for k in 1:NC
+ j = findfirst(==(ci[k]), bi)
+ if !isnothing(j)
+ #props.BtoC[newi + 1] = k
+ BtoC = setindex(BtoC, k, newi + 1)
+ #props.PB[newi + 1] = j
+ PB = setindex(PB, j, newi + 1)
+ newi += 1
+ end
+ newi == NB && break
+ end
+ Bcstart = NB
+ Bustart = NB
+ for i in 1:NB
+ if BtoC[i] < 1
+ Bcstart = min(i, Bcstart)
+ else
+ Bustart = min(i, Bustart)
+ end
+ end
+ #props.newBrange = permute_extents([size(B)...],props.PB)
+ #props.newBrange = [size(B)...][props.PB]
+ props.newBrange = permute(size(B), PB)
+
+ props.BtoC = BtoC
+ props.PB = PB
+ props.Bcstart = Bcstart
+ props.Bustart = Bustart
end
- end
- if props.permuteC
- Rb = MVector{NC,Int}(undef) #Int[]
- k = 1
- AtoC = props.AtoC
- BtoC = props.BtoC
- if !props.permuteA
- #TODO: check this is correct for 1-indexing
- for i in 1:NA
- if !(AtoC[i] < 1)
- #push!(Rb,size(A,i))
- Rb[k] = size(A, i)
- k = k + 1
+ if props.permuteA || props.permuteB
+ AtoC = props.AtoC
+ BtoC = props.BtoC
+ PC = props.PC
+
+ #Recompute props.PC
+ c = 1
+ #TODO: check this is correct for 1-indexing
+ for i in 1:NA
+ AtoC_i = AtoC[i]
+ if !(AtoC_i < 1)
+ #props.PC[props.AtoC[i]] = c
+ PC = setindex(PC, c, AtoC_i)
+ c += 1
+ end
end
- end
- else
- #TODO: check this is correct for 1-indexing
- for i in 1:NA
- if !(AtoC[i] < 1)
- #push!(Rb,size(props.newArange,i))
- Rb[k] = props.newArange[i]
- k = k + 1
+ #TODO: check this is correct for 1-indexing
+ for j in 1:NB
+ BtoC_j = BtoC[j]
+ if !(BtoC_j < 1)
+ #props.PC[props.BtoC[j]] = c
+ PC = setindex(PC, c, BtoC_j)
+ c += 1
+ end
+ end
+ props.PC = PC
+
+ props.ctrans = false
+ if (is_trivial_permutation(PC))
+ props.permuteC = false
+ else
+ props.permuteC = true
+ #Here we already know since pc_triv = false that
+ #at best indices from B precede those from A (on result C)
+ #so if both sets remain in same order on C
+ #just need to transpose C, not permute it
+ if checkBCsameord(props) && checkACsameord(props)
+ props.ctrans = true
+ props.permuteC = false
+ end
end
- end
end
- if !props.permuteB
- #TODO: check this is correct for 1-indexing
- for j in 1:NB
- if !(BtoC[j] < 1)
- #push!(Rb,size(B,j))
- Rb[k] = size(B, j)
- k = k + 1
+
+ return if props.permuteC
+ Rb = MVector{NC, Int}(undef) #Int[]
+ k = 1
+ AtoC = props.AtoC
+ BtoC = props.BtoC
+ if !props.permuteA
+ #TODO: check this is correct for 1-indexing
+ for i in 1:NA
+ if !(AtoC[i] < 1)
+ #push!(Rb,size(A,i))
+ Rb[k] = size(A, i)
+ k = k + 1
+ end
+ end
+ else
+ #TODO: check this is correct for 1-indexing
+ for i in 1:NA
+ if !(AtoC[i] < 1)
+ #push!(Rb,size(props.newArange,i))
+ Rb[k] = props.newArange[i]
+ k = k + 1
+ end
+ end
end
- end
- else
- #TODO: check this is correct for 1-indexing
- for j in 1:NB
- if !(BtoC[j] < 1)
- #push!(Rb,size(props.newBrange,j))
- Rb[k] = props.newBrange[j]
- k = k + 1
+ if !props.permuteB
+ #TODO: check this is correct for 1-indexing
+ for j in 1:NB
+ if !(BtoC[j] < 1)
+ #push!(Rb,size(B,j))
+ Rb[k] = size(B, j)
+ k = k + 1
+ end
+ end
+ else
+ #TODO: check this is correct for 1-indexing
+ for j in 1:NB
+ if !(BtoC[j] < 1)
+ #push!(Rb,size(props.newBrange,j))
+ Rb[k] = props.newBrange[j]
+ k = k + 1
+ end
+ end
end
- end
+ props.newCrange = Tuple(Rb)
end
- props.newCrange = Tuple(Rb)
- end
end
diff --git a/NDTensors/src/truncate.jl b/NDTensors/src/truncate.jl
index 6a5459489a..5f46bb2655 100644
--- a/NDTensors/src/truncate.jl
+++ b/NDTensors/src/truncate.jl
@@ -1,105 +1,105 @@
using TypeParameterAccessors: unwrap_array_type
## TODO write Exposed version of truncate
function truncate!!(P::AbstractArray; kwargs...)
- return truncate!!(unwrap_array_type(P), P; kwargs...)
+ return truncate!!(unwrap_array_type(P), P; kwargs...)
end
# CPU version.
function truncate!!(::Type{<:Array}, P::AbstractArray; kwargs...)
- truncerr, docut = truncate!(P; kwargs...)
- return P, truncerr, docut
+ truncerr, docut = truncate!(P; kwargs...)
+ return P, truncerr, docut
end
using TypeParameterAccessors: unwrap_array_type
# GPU fallback version, convert to CPU.
function truncate!!(::Type{<:AbstractArray}, P::AbstractArray; kwargs...)
- P_cpu = cpu(P)
- truncerr, docut = truncate!(P_cpu; kwargs...)
- P = adapt(unwrap_array_type(P), P_cpu)
- return P, truncerr, docut
+ P_cpu = cpu(P)
+ truncerr, docut = truncate!(P_cpu; kwargs...)
+ P = adapt(unwrap_array_type(P), P_cpu)
+ return P, truncerr, docut
end
# CPU implementation.
function truncate!(
- P::AbstractVector;
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
-)
- mindim = replace_nothing(mindim, default_mindim(P))
- maxdim = replace_nothing(maxdim, length(P))
- cutoff = replace_nothing(cutoff, typemin(eltype(P)))
- use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P))
- use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P))
+ P::AbstractVector;
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ )
+ mindim = replace_nothing(mindim, default_mindim(P))
+ maxdim = replace_nothing(maxdim, length(P))
+ cutoff = replace_nothing(cutoff, typemin(eltype(P)))
+ use_absolute_cutoff = replace_nothing(use_absolute_cutoff, default_use_absolute_cutoff(P))
+ use_relative_cutoff = replace_nothing(use_relative_cutoff, default_use_relative_cutoff(P))
- origm = length(P)
- docut = zero(eltype(P))
+ origm = length(P)
+ docut = zero(eltype(P))
- #if P[1] <= 0.0
- # P[1] = 0.0
- # resize!(P, 1)
- # return 0.0, 0.0
- #end
+ #if P[1] <= 0.0
+ # P[1] = 0.0
+ # resize!(P, 1)
+ # return 0.0, 0.0
+ #end
- if origm == 1
- docut = abs(P[1]) / 2
- return zero(eltype(P)), docut
- end
-
- s = sign(P[1])
- s < 0 && (P .*= s)
-
- #Zero out any negative weight
- for n in origm:-1:1
- (P[n] >= zero(eltype(P))) && break
- P[n] = zero(eltype(P))
- end
+ if origm == 1
+ docut = abs(P[1]) / 2
+ return zero(eltype(P)), docut
+ end
- n = origm
- truncerr = zero(eltype(P))
- while n > maxdim
- truncerr += P[n]
- n -= 1
- end
+ s = sign(P[1])
+ s < 0 && (P .*= s)
- if use_absolute_cutoff
- #Test if individual prob. weights fall below cutoff
- #rather than using *sum* of discarded weights
- while P[n] <= cutoff && n > mindim
- truncerr += P[n]
- n -= 1
- end
- else
- scale = one(eltype(P))
- if use_relative_cutoff
- scale = sum(P)
- (scale == zero(eltype(P))) && (scale = one(eltype(P)))
+ #Zero out any negative weight
+ for n in origm:-1:1
+ (P[n] >= zero(eltype(P))) && break
+ P[n] = zero(eltype(P))
end
- #Continue truncating until *sum* of discarded probability
- #weight reaches cutoff reached (or m==mindim)
- while (truncerr + P[n] <= cutoff * scale) && (n > mindim)
- truncerr += P[n]
- n -= 1
+ n = origm
+ truncerr = zero(eltype(P))
+ while n > maxdim
+ truncerr += P[n]
+ n -= 1
end
- truncerr /= scale
- end
+ if use_absolute_cutoff
+ #Test if individual prob. weights fall below cutoff
+ #rather than using *sum* of discarded weights
+ while P[n] <= cutoff && n > mindim
+ truncerr += P[n]
+ n -= 1
+ end
+ else
+ scale = one(eltype(P))
+ if use_relative_cutoff
+ scale = sum(P)
+ (scale == zero(eltype(P))) && (scale = one(eltype(P)))
+ end
- if n < 1
- n = 1
- end
+ #Continue truncating until *sum* of discarded probability
+ #weight reaches cutoff reached (or m==mindim)
+ while (truncerr + P[n] <= cutoff * scale) && (n > mindim)
+ truncerr += P[n]
+ n -= 1
+ end
+
+ truncerr /= scale
+ end
+
+ if n < 1
+ n = 1
+ end
- if n < origm
- docut = (P[n] + P[n + 1]) / 2
- if abs(P[n] - P[n + 1]) < eltype(P)(1e-3) * P[n]
- docut += eltype(P)(1e-3) * P[n]
+ if n < origm
+ docut = (P[n] + P[n + 1]) / 2
+ if abs(P[n] - P[n + 1]) < eltype(P)(1.0e-3) * P[n]
+ docut += eltype(P)(1.0e-3) * P[n]
+ end
end
- end
- s < 0 && (P .*= s)
- resize!(P, n)
- return truncerr, docut
+ s < 0 && (P .*= s)
+ resize!(P, n)
+ return truncerr, docut
end
diff --git a/NDTensors/src/tupletools.jl b/NDTensors/src/tupletools.jl
index ce7dac1168..11740c02a9 100644
--- a/NDTensors/src/tupletools.jl
+++ b/NDTensors/src/tupletools.jl
@@ -1,16 +1,15 @@
-
# This is a cache of [Val(1), Val(2), ...]
# Hard-coded for now to only handle tensors up to order 100
const ValCache = Val[Val(n) for n in 0:100]
# Faster conversions of collection to tuple than `Tuple(::AbstractVector)`
-_NTuple(::Val{N}, v::Vector{T}) where {N,T} = ntuple(n -> v[n], Val(N))
+_NTuple(::Val{N}, v::Vector{T}) where {N, T} = ntuple(n -> v[n], Val(N))
_Tuple(v::Vector{T}) where {T} = _NTuple(ValCache[length(v) + 1], v)
_Tuple(t::Tuple) = t
"""
ValLength(::Type{NTuple{N}}) = Val{N}
"""
-ValLength(::Type{NTuple{N,T}}) where {N,T} = Val{N}
+ValLength(::Type{NTuple{N, T}}) where {N, T} = Val{N}
"""
ValLength(::NTuple{N}) = Val(N)
@@ -21,9 +20,9 @@ ValLength(::NTuple{N}) where {N} = Val(N)
# is not type stable and therefore not efficient.
ValLength(v::Vector) = Val(length(v))
-ValLength(::Tuple{Vararg{Any,N}}) where {N} = Val(N)
+ValLength(::Tuple{Vararg{Any, N}}) where {N} = Val(N)
-ValLength(::Type{<:Tuple{Vararg{Any,N}}}) where {N} = Val{N}
+ValLength(::Type{<:Tuple{Vararg{Any, N}}}) where {N} = Val{N}
ValLength(::CartesianIndex{N}) where {N} = Val(N)
ValLength(::Type{CartesianIndex{N}}) where {N} = Val{N}
@@ -40,18 +39,18 @@ popfirst(s::NTuple{N}) where {N} = ntuple(i -> s[i + 1], Val(N - 1))
# (for example, tuple, MVector, etc.)
# as long as the constructor accepts a tuple
@inline function _permute(s, perm)
- return ntuple(i -> s[perm[i]], ValLength(perm))
+ return ntuple(i -> s[perm[i]], ValLength(perm))
end
permute(s::Tuple, perm) = _permute(s, perm)
# TODO: is this needed?
-function permute(s::T, perm) where {T<:NTuple}
- return T(_permute(s, perm))
+function permute(s::T, perm) where {T <: NTuple}
+ return T(_permute(s, perm))
end
function permute(s::T, perm) where {T}
- return T(_permute(Tuple(s), perm))
+ return T(_permute(Tuple(s), perm))
end
# TODO: This is to handle Vector, is this correct?
@@ -60,7 +59,7 @@ permute(s::AbstractVector, perm) = _permute(s, perm)
sim(s::NTuple) = s
# type stable findfirst
-@inline _findfirst(args...) = (i=findfirst(args...); i === nothing ? 0 : i)
+@inline _findfirst(args...) = (i = findfirst(args...); i === nothing ? 0 : i)
"""
getperm(col1,col2)
@@ -69,7 +68,7 @@ Get the permutation that takes collection 2 to collection 1,
such that col2[p].==col1
"""
@inline function getperm(s1, s2)
- return ntuple(i -> _findfirst(==(@inbounds s1[i]), s2), length(s1))
+ return ntuple(i -> _findfirst(==(@inbounds s1[i]), s2), length(s1))
end
"""
@@ -78,46 +77,46 @@ end
Get the permutations that takes collections 2 and 3 to collection 1.
"""
function getperms(s, s1, s2)
- N = length(s)
- N1 = length(s1)
- N2 = length(s2)
- N1 + N2 ≠ N && error("Size of partial sets don't match with total set")
- perm1 = ntuple(i -> findfirst(==(s1[i]), s), Val(N1))
- perm2 = ntuple(i -> findfirst(==(s2[i]), s), Val(N2))
- isperm((perm1..., perm2...)) ||
- error("Combined permutations are $((perm1...,perm2...)), not a valid permutation")
- return perm1, perm2
+ N = length(s)
+ N1 = length(s1)
+ N2 = length(s2)
+ N1 + N2 ≠ N && error("Size of partial sets don't match with total set")
+ perm1 = ntuple(i -> findfirst(==(s1[i]), s), Val(N1))
+ perm2 = ntuple(i -> findfirst(==(s2[i]), s), Val(N2))
+ isperm((perm1..., perm2...)) ||
+ error("Combined permutations are $((perm1..., perm2...)), not a valid permutation")
+ return perm1, perm2
end
function invperm!(permres, perm)
- for i in 1:length(perm)
- permres[perm[i]] = i
- end
- return permres
+ for i in 1:length(perm)
+ permres[perm[i]] = i
+ end
+ return permres
end
-function invperm(perm::NTuple{N,Int}) where {N}
- mpermres = MVector{N,Int}(undef)
- invperm!(mpermres, perm)
- return Tuple(mpermres)
+function invperm(perm::NTuple{N, Int}) where {N}
+ mpermres = MVector{N, Int}(undef)
+ invperm!(mpermres, perm)
+ return Tuple(mpermres)
end
function invperm(perm)
- permres = similar(perm)
- invperm!(permres, perm)
- return permres
+ permres = similar(perm)
+ invperm!(permres, perm)
+ return permres
end
# Override TupleTools.isperm to speed up
# Strided.permutedims a bit (see:
# https://github.com/Jutho/Strided.jl/issues/15)
function isperm(p::NTuple{N}) where {N}
- N < 6 && return Base.isperm(p)
- used = @MVector zeros(Bool, N)
- for a in p
- (0 < a <= N) && (used[a] ⊻= true) || return false
- end
- return true
+ N < 6 && return Base.isperm(p)
+ used = @MVector zeros(Bool, N)
+ for a in p
+ (0 < a <= N) && (used[a] ⊻= true) || return false
+ end
+ return true
end
"""
@@ -126,13 +125,13 @@ end
Determine if P is a trivial permutation.
"""
function is_trivial_permutation(P)
- #isperm(P) || error("Input is not a permutation")
- # TODO: use `all(n->P[n]==n,1:length(P))`?
- N = length(P)
- for n in 1:N
- @inbounds P[n] != n && return false
- end
- return true
+ #isperm(P) || error("Input is not a permutation")
+ # TODO: use `all(n->P[n]==n,1:length(P))`?
+ N = length(P)
+ for n in 1:N
+ @inbounds P[n] != n && return false
+ end
+ return true
end
# Combine a bunch of tuples
@@ -141,38 +140,38 @@ end
@inline flatten(x, y, z...) = (x..., flatten(y, z...)...)
function _deleteat(t, pos, i)
- i < pos && return t[i]
- return t[i + 1]
+ i < pos && return t[i]
+ return t[i + 1]
end
function deleteat(t::Tuple, pos::Integer)
- return ntuple(i -> _deleteat(t, pos, i), Val(length(t) - 1))
+ return ntuple(i -> _deleteat(t, pos, i), Val(length(t) - 1))
end
deleteat(t::Tuple, I::Tuple{Int}) = deleteat(t, I[1])
-function deleteat(t::Tuple, I::Tuple{Int,Int,Vararg{Int}})
- return deleteat_sorted(t, TupleTools.sort(I; rev=true))
+function deleteat(t::Tuple, I::Tuple{Int, Int, Vararg{Int}})
+ return deleteat_sorted(t, TupleTools.sort(I; rev = true))
end
deleteat_sorted(t::Tuple, pos::Int64) = deleteat(t, pos[1])
deleteat_sorted(t::Tuple, pos::Tuple{Int}) = deleteat(t, pos[1])
-function deleteat_sorted(t::Tuple, pos::NTuple{N,Int}) where {N}
- return deleteat_sorted(deleteat_sorted(t, pos[1]), Base.tail(pos))
+function deleteat_sorted(t::Tuple, pos::NTuple{N, Int}) where {N}
+ return deleteat_sorted(deleteat_sorted(t, pos[1]), Base.tail(pos))
end
# Make a slice of the block on the specified dimensions
# Make this a generic tupletools function (TupleTools.jl calls it getindices)
-function getindices(t::Tuple, I::NTuple{N,Int}) where {N}
- return ntuple(i -> t[I[i]], Val(N))
+function getindices(t::Tuple, I::NTuple{N, Int}) where {N}
+ return ntuple(i -> t[I[i]], Val(N))
end
function _insertat(t, pos, n_insert, val, i)
- if i < pos
- return t[i]
- elseif i > pos + n_insert - 1
- return t[i - n_insert + 1]
- end
- return val[i - pos + 1]
+ if i < pos
+ return t[i]
+ elseif i > pos + n_insert - 1
+ return t[i - n_insert + 1]
+ end
+ return val[i - pos + 1]
end
"""
@@ -181,20 +180,20 @@ end
Remove the value at pos and insert the elements in val
"""
function insertat(t::Tuple, val::Tuple, pos::Integer)
- N, M = length(t), length(val)
- @boundscheck checkbounds(Base.OneTo(N), pos)
- return ntuple(i -> _insertat(t, pos, M, val, i), Val(N + M - 1))
+ N, M = length(t), length(val)
+ @boundscheck checkbounds(Base.OneTo(N), pos)
+ return ntuple(i -> _insertat(t, pos, M, val, i), Val(N + M - 1))
end
insertat(t::Tuple, val, pos::Integer) = insertat(t, tuple(val), pos)
function _insertafter(t, pos, n_insert, val, i)
- if i <= pos
- return t[i]
- elseif i > pos + n_insert
- return t[i - n_insert]
- end
- return val[i - pos]
+ if i <= pos
+ return t[i]
+ elseif i > pos + n_insert
+ return t[i - n_insert]
+ end
+ return val[i - pos]
end
"""
@@ -202,12 +201,12 @@ end
Insert the elements in val after the position pos
"""
-function insertafter(t::NTuple{N}, val::NTuple{M}, pos::Integer) where {N,M}
- return ntuple(i -> _insertafter(t, pos, M, val, i), Val(N + M))
+function insertafter(t::NTuple{N}, val::NTuple{M}, pos::Integer) where {N, M}
+ return ntuple(i -> _insertafter(t, pos, M, val, i), Val(N + M))
end
function insertafter(t::NTuple{N}, val, pos::Integer) where {N}
- return insertafter(t, tuple(val), pos)
+ return insertafter(t, tuple(val), pos)
end
"""
@@ -216,12 +215,12 @@ end
Determine if s1 and s2 have no overlapping elements.
"""
function isdisjoint(s1, s2)
- for i1 in 1:length(s1)
- for i2 in 1:length(s2)
- s1[i1] == s2[i2] && return false
+ for i1 in 1:length(s1)
+ for i2 in 1:length(s2)
+ s1[i1] == s2[i2] && return false
+ end
end
- end
- return true
+ return true
end
"""
@@ -233,40 +232,40 @@ where element i is t[i+1] - t[i].
diff(t::NTuple{N}) where {N} = ntuple(i -> t[i + 1] - t[i], Val(N - 1))
function count_unique(labelsT1, labelsT2)
- count = 0
- for l1 in labelsT1
- l1 ∉ labelsT2 && (count += 1)
- end
- return count
+ count = 0
+ for l1 in labelsT1
+ l1 ∉ labelsT2 && (count += 1)
+ end
+ return count
end
function count_common(labelsT1, labelsT2)
- count = 0
- for l1 in labelsT1
- l1 ∈ labelsT2 && (count += 1)
- end
- return count
+ count = 0
+ for l1 in labelsT1
+ l1 ∈ labelsT2 && (count += 1)
+ end
+ return count
end
function intersect_positions(labelsT1, labelsT2)
- for i1 in 1:length(labelsT1)
- for i2 in 1:length(labelsT2)
- if labelsT1[i1] == labelsT2[i2]
- return i1, i2
- end
+ for i1 in 1:length(labelsT1)
+ for i2 in 1:length(labelsT2)
+ if labelsT1[i1] == labelsT2[i2]
+ return i1, i2
+ end
+ end
end
- end
- return nothing
+ return nothing
end
function is_replacement(labelsT1, labelsT2)
- return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) == 1
+ return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) == 1
end
function is_combiner(labelsT1, labelsT2)
- return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) > 1
+ return count_unique(labelsT1, labelsT2) == 1 && count_common(labelsT1, labelsT2) > 1
end
function is_uncombiner(labelsT1, labelsT2)
- return count_unique(labelsT1, labelsT2) > 1 && count_common(labelsT1, labelsT2) == 1
+ return count_unique(labelsT1, labelsT2) > 1 && count_common(labelsT1, labelsT2) == 1
end
diff --git a/NDTensors/test/test_linearalgebra.jl b/NDTensors/test/test_linearalgebra.jl
index 96bde8efdf..b92fe68d88 100644
--- a/NDTensors/test/test_linearalgebra.jl
+++ b/NDTensors/test/test_linearalgebra.jl
@@ -8,89 +8,89 @@ include("NDTensorsTestUtils/NDTensorsTestUtils.jl")
using .NDTensorsTestUtils: devices_list, is_supported_eltype
@testset "random_orthog" begin
- n, m = 10, 4
- O1 = random_orthog(n, m)
- @test eltype(O1) == Float64
- @test norm(transpose(O1) * O1 - Diagonal(fill(1.0, m))) < 1E-14
- O2 = random_orthog(m, n)
- @test norm(O2 * transpose(O2) - Diagonal(fill(1.0, m))) < 1E-14
+ n, m = 10, 4
+ O1 = random_orthog(n, m)
+ @test eltype(O1) == Float64
+ @test norm(transpose(O1) * O1 - Diagonal(fill(1.0, m))) < 1.0e-14
+ O2 = random_orthog(m, n)
+ @test norm(O2 * transpose(O2) - Diagonal(fill(1.0, m))) < 1.0e-14
end
@testset "random_unitary" begin
- n, m = 10, 4
- U1 = random_unitary(n, m)
- @test eltype(U1) == ComplexF64
- @test norm(U1' * U1 - Diagonal(fill(1.0, m))) < 1E-14
- U2 = random_unitary(m, n)
- @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1E-14
+ n, m = 10, 4
+ U1 = random_unitary(n, m)
+ @test eltype(U1) == ComplexF64
+ @test norm(U1' * U1 - Diagonal(fill(1.0, m))) < 1.0e-14
+ U2 = random_unitary(m, n)
+ @test norm(U2 * U2' - Diagonal(fill(1.0, m))) < 1.0e-14
end
@testset "QX testing" begin
- @testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, device=$dev" for qx in
- [
- qr, ql
- ],
- elt in (Float64, ComplexF64, Float32, ComplexF32),
- positive in [false, true],
- singular in [false, true],
- dev in devices_list(copy(ARGS))
+ @testset "Dense $qx decomposition, elt=$elt, positve=$positive, singular=$singular, device=$dev" for qx in
+ [
+ qr, ql,
+ ],
+ elt in (Float64, ComplexF64, Float32, ComplexF32),
+ positive in [false, true],
+ singular in [false, true],
+ dev in devices_list(copy(ARGS))
- ## Skip Float64 on Metal
- if !is_supported_eltype(dev, elt)
- continue
+ ## Skip Float64 on Metal
+ if !is_supported_eltype(dev, elt)
+ continue
+ end
+ ## Looks like AMDGPU has an issue with QR when A is singular
+ ## TODO potentially make an is_broken function?
+ if dev == NDTensors.AMDGPUExtensions.roc && singular
+ continue
+ end
+ eps = Base.eps(real(elt)) * 100 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps.
+ n, m = 4, 8
+ Id = Diagonal(fill(1.0, min(n, m)))
+ #
+ # Wide matrix (more columns than rows)
+ #
+ A = dev(randomTensor(elt, (n, m)))
+ # We want to test 0.0 on the diagonal. We need to make all rows equal to gaurantee this with numerical roundoff.
+ @allowscalar if singular
+ for i in 2:n
+ A[i, :] = A[1, :]
+ end
+ end
+ Q, X = qx(A; positive = positive) #X is R or L.
+ Ap = Q * X
+ @test cpu(A) ≈ cpu(Ap) atol = eps
+ @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps
+ @test cpu(array(Q) * array(Q)') ≈ Id atol = eps
+ @allowscalar if positive
+ nr, nc = size(X)
+ dr = qx == ql ? Base.max(0, nc - nr) : 0
+ diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right.
+ @test all(real(diagX) .>= 0.0)
+ @test all(imag(diagX) .== 0.0)
+ end
+ #
+ # Tall matrix (more rows than cols)
+ #
+ A = dev(randomTensor(elt, (m, n))) #Tall array
+ # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff.
+ @allowscalar if singular
+ for i in 2:m
+ A[i, :] = A[1, :]
+ end
+ end
+ Q, X = qx(A; positive = positive)
+ Ap = Q * X
+ @test cpu(A) ≈ cpu(Ap) atol = eps
+ @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps
+ @allowscalar if positive
+ nr, nc = size(X)
+ dr = qx == ql ? Base.max(0, nc - nr) : 0
+ diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right.
+ @test all(real(diagX) .>= 0.0)
+ @test all(imag(diagX) .== 0.0)
+ end
end
- ## Looks like AMDGPU has an issue with QR when A is singular
- ## TODO potentially make an is_broken function?
- if dev == NDTensors.AMDGPUExtensions.roc && singular
- continue
- end
- eps = Base.eps(real(elt)) * 100 #this is set rather tight, so if you increase/change m,n you may have open up the tolerance on eps.
- n, m = 4, 8
- Id = Diagonal(fill(1.0, min(n, m)))
- #
- # Wide matrix (more columns than rows)
- #
- A = dev(randomTensor(elt, (n, m)))
- # We want to test 0.0 on the diagonal. We need to make all rows equal to gaurantee this with numerical roundoff.
- @allowscalar if singular
- for i in 2:n
- A[i, :] = A[1, :]
- end
- end
- Q, X = qx(A; positive=positive) #X is R or L.
- Ap = Q * X
- @test cpu(A) ≈ cpu(Ap) atol = eps
- @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps
- @test cpu(array(Q) * array(Q)') ≈ Id atol = eps
- @allowscalar if positive
- nr, nc = size(X)
- dr = qx == ql ? Base.max(0, nc - nr) : 0
- diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right.
- @test all(real(diagX) .>= 0.0)
- @test all(imag(diagX) .== 0.0)
- end
- #
- # Tall matrix (more rows than cols)
- #
- A = dev(randomTensor(elt, (m, n))) #Tall array
- # We want to test 0.0 on the diagonal. We need make all rows equal to gaurantee this with numerical roundoff.
- @allowscalar if singular
- for i in 2:m
- A[i, :] = A[1, :]
- end
- end
- Q, X = qx(A; positive=positive)
- Ap = Q * X
- @test cpu(A) ≈ cpu(Ap) atol = eps
- @test cpu(array(Q)' * array(Q)) ≈ Id atol = eps
- @allowscalar if positive
- nr, nc = size(X)
- dr = qx == ql ? Base.max(0, nc - nr) : 0
- diagX = diag(X[:, (1 + dr):end]) #location of diag(L) is shifted dr columns over the right.
- @test all(real(diagX) .>= 0.0)
- @test all(imag(diagX) .== 0.0)
- end
- end
end
nothing
diff --git a/Project.toml b/Project.toml
index 4c9134a06e..aafd60bd6d 100644
--- a/Project.toml
+++ b/Project.toml
@@ -1,7 +1,7 @@
name = "ITensors"
uuid = "9136182c-28ba-11e9-034c-db9fb085ebd5"
authors = ["Matthew Fishman ", "Miles Stoudenmire "]
-version = "0.9.11"
+version = "0.9.12"
[deps]
Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
diff --git a/docs/settings.jl b/docs/settings.jl
index a49d903537..a72a97b0da 100644
--- a/docs/settings.jl
+++ b/docs/settings.jl
@@ -3,55 +3,55 @@ using ITensors
# Allows using ITensorMPS.jl docstrings in ITensors.jl documentation:
# https://github.com/JuliaDocs/Documenter.jl/issues/1734
-DocMeta.setdocmeta!(ITensors, :DocTestSetup, :(using ITensors); recursive=true)
+DocMeta.setdocmeta!(ITensors, :DocTestSetup, :(using ITensors); recursive = true)
readme_ccq_logo = """
-
-
-
-
- """
+
+
+
+
+"""
index_ccq_logo = """
- ```@raw html
-
-
- ```
- """
+```@raw html
+
+
+```
+"""
readme_str = read(joinpath(@__DIR__, "..", "README.md"), String)
write(
- joinpath(@__DIR__, "src", "index.md"),
- replace(readme_str, readme_ccq_logo => index_ccq_logo),
+ joinpath(@__DIR__, "src", "index.md"),
+ replace(readme_str, readme_ccq_logo => index_ccq_logo),
)
sitename = "ITensors.jl"
settings = Dict(
- :pages => [
- "Introduction" => "index.md",
- "Getting Started with ITensor" => [
- "Installing Julia and ITensor" => "getting_started/Installing.md",
- "Running ITensor and Julia Codes" => "getting_started/RunningCodes.md",
- "Enabling Debug Checks" => "getting_started/DebugChecks.md",
- "Next Steps" => "getting_started/NextSteps.md",
+ :pages => [
+ "Introduction" => "index.md",
+ "Getting Started with ITensor" => [
+ "Installing Julia and ITensor" => "getting_started/Installing.md",
+ "Running ITensor and Julia Codes" => "getting_started/RunningCodes.md",
+ "Enabling Debug Checks" => "getting_started/DebugChecks.md",
+ "Next Steps" => "getting_started/NextSteps.md",
+ ],
+ "Code Examples" => ["ITensor Examples" => "examples/ITensor.md"],
+ "Documentation" =>
+ ["Index" => "IndexType.md", "ITensor" => "ITensorType.md", "QN" => "QN.md"],
+ "Frequently Asked Questions" => [
+ "ITensor Development FAQs" => "faq/Development.md",
+ "Julia Package Manager FAQs" => "faq/JuliaPkg.md",
+ ],
+ "Upgrade guides" => ["Upgrading from 0.1 to 0.2" => "UpgradeGuide_0.1_to_0.2.md"],
+ "Advanced Usage Guide" => [
+ "Multithreading" => "Multithreading.md",
+ "Running on GPUs" => "RunningOnGPUs.md",
+ "Contraction sequence optimization" => "ContractionSequenceOptimization.md",
+ "HDF5 File Formats" => "HDF5FileFormats.md",
+ ],
],
- "Code Examples" => ["ITensor Examples" => "examples/ITensor.md"],
- "Documentation" =>
- ["Index" => "IndexType.md", "ITensor" => "ITensorType.md", "QN" => "QN.md"],
- "Frequently Asked Questions" => [
- "ITensor Development FAQs" => "faq/Development.md",
- "Julia Package Manager FAQs" => "faq/JuliaPkg.md",
- ],
- "Upgrade guides" => ["Upgrading from 0.1 to 0.2" => "UpgradeGuide_0.1_to_0.2.md"],
- "Advanced Usage Guide" => [
- "Multithreading" => "Multithreading.md",
- "Running on GPUs" => "RunningOnGPUs.md",
- "Contraction sequence optimization" => "ContractionSequenceOptimization.md",
- "HDF5 File Formats" => "HDF5FileFormats.md",
- ],
- ],
- :format =>
- Documenter.HTML(; assets=["assets/favicon.ico", "assets/extras.css"], prettyurls=false),
- :doctest => true,
- :checkdocs => :none,
+ :format =>
+ Documenter.HTML(; assets = ["assets/favicon.ico", "assets/extras.css"], prettyurls = false),
+ :doctest => true,
+ :checkdocs => :none,
)
diff --git a/examples/ctmrg/anisotropic/run.jl b/examples/ctmrg/anisotropic/run.jl
index c050bfe030..e2cc71820f 100644
--- a/examples/ctmrg/anisotropic/run.jl
+++ b/examples/ctmrg/anisotropic/run.jl
@@ -24,4 +24,4 @@ check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
# Run ctmrg
@show κave = ctmrg(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
-@assert isapprox(κave, exp(-β * ising_free_energy(β)); rtol=1e-10)
+@assert isapprox(κave, exp(-β * ising_free_energy(β)); rtol = 1.0e-10)
diff --git a/examples/ctmrg/isotropic/run.jl b/examples/ctmrg/isotropic/run.jl
index 322f266fb7..bed7a0dcdd 100644
--- a/examples/ctmrg/isotropic/run.jl
+++ b/examples/ctmrg/isotropic/run.jl
@@ -6,52 +6,52 @@ include(joinpath(src_dir, "ctmrg_isotropic.jl"))
include(joinpath(src_dir, "2d_classical_ising.jl"))
function main()
- # Make Ising model MPO
- β = 1.1 * βc
- χmax = 20
- cutoff = 1e-8
- nsteps = 100
+ # Make Ising model MPO
+ β = 1.1 * βc
+ χmax = 20
+ cutoff = 1.0e-8
+ nsteps = 100
- d = 2
- s = Index(d, "Site")
- sₕ = addtags(s, "horiz")
- sᵥ = addtags(s, "vert")
+ d = 2
+ s = Index(d, "Site")
+ sₕ = addtags(s, "horiz")
+ sᵥ = addtags(s, "vert")
- T = ising_mpo(sₕ, sᵥ, β)
+ T = ising_mpo(sₕ, sᵥ, β)
- χ0 = 1
- l = Index(χ0, "Link")
- lₕ = addtags(l, "horiz")
- lᵥ = addtags(l, "vert")
+ χ0 = 1
+ l = Index(χ0, "Link")
+ lₕ = addtags(l, "horiz")
+ lᵥ = addtags(l, "vert")
- # Initial CTM
- Cₗᵤ = ITensor(lᵥ, lₕ)
- Cₗᵤ[1, 1] = 1.0
+ # Initial CTM
+ Cₗᵤ = ITensor(lᵥ, lₕ)
+ Cₗᵤ[1, 1] = 1.0
- # Initial HRTM
- Aₗ = ITensor(lᵥ, lᵥ', sₕ)
- Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0
+ # Initial HRTM
+ Aₗ = ITensor(lᵥ, lᵥ', sₕ)
+ Aₗ[lᵥ => 1, lᵥ' => 1, sₕ => 1] = 1.0
- Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax=χmax, cutoff=cutoff, nsteps=nsteps)
+ Cₗᵤ, Aₗ = ctmrg(T, Cₗᵤ, Aₗ; χmax = χmax, cutoff = cutoff, nsteps = nsteps)
- lᵥ = commonind(Cₗᵤ, Aₗ)
- lₕ = uniqueind(Cₗᵤ, Aₗ)
+ lᵥ = commonind(Cₗᵤ, Aₗ)
+ lₕ = uniqueind(Cₗᵤ, Aₗ)
- Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ)
+ Aᵤ = replaceinds(Aₗ, lᵥ => lₕ, lᵥ' => lₕ', sₕ => sᵥ)
- ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ')
+ ACₗ = Aₗ * Cₗᵤ * dag(Cₗᵤ')
- ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1)
+ ACTₗ = prime(ACₗ * dag(Aᵤ') * T * Aᵤ, -1)
- κ = (ACTₗ * dag(ACₗ))[]
+ κ = (ACTₗ * dag(ACₗ))[]
- @show κ, exp(-β * ising_free_energy(β))
+ @show κ, exp(-β * ising_free_energy(β))
- # Calculate magnetization
- Tsz = ising_mpo(sₕ, sᵥ, β; sz=true)
- ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1)
- m = (ACTszₗ * dag(ACₗ))[] / κ
- @show m, ising_magnetization(β)
+ # Calculate magnetization
+ Tsz = ising_mpo(sₕ, sᵥ, β; sz = true)
+ ACTszₗ = prime(ACₗ * dag(Aᵤ') * Tsz * Aᵤ, -1)
+ m = (ACTszₗ * dag(ACₗ))[] / κ
+ return @show m, ising_magnetization(β)
end
main()
diff --git a/examples/src/2d_classical_ising.jl b/examples/src/2d_classical_ising.jl
index d4b70a237d..698c2d5a95 100644
--- a/examples/src/2d_classical_ising.jl
+++ b/examples/src/2d_classical_ising.jl
@@ -3,72 +3,72 @@ using LinearAlgebra
using QuadGK
function ising_mpo(
- pair_sₕ::Pair{<:Index,<:Index},
- pair_sᵥ::Pair{<:Index,<:Index},
- β::Real,
- J::Real=1.0;
- sz::Bool=false,
-)
- sₕ, sₕ′ = pair_sₕ
- sᵥ, sᵥ′ = pair_sᵥ
- @assert dim(sₕ) == dim(sᵥ)
- d = dim(sₕ)
- T = ITensor(sₕ, sₕ′, sᵥ, sᵥ′)
- for i in 1:d
- T[i, i, i, i] = 1.0
- end
- if sz
- T[1, 1, 1, 1] = -1.0
- end
- s̃ₕ, s̃ₕ′, s̃ᵥ, s̃ᵥ′ = sim.((sₕ, sₕ′, sᵥ, sᵥ′))
- T̃ = T * δ(sₕ, s̃ₕ) * δ(sₕ′, s̃ₕ′) * δ(sᵥ, s̃ᵥ) * δ(sᵥ′, s̃ᵥ′)
+ pair_sₕ::Pair{<:Index, <:Index},
+ pair_sᵥ::Pair{<:Index, <:Index},
+ β::Real,
+ J::Real = 1.0;
+ sz::Bool = false,
+ )
+ sₕ, sₕ′ = pair_sₕ
+ sᵥ, sᵥ′ = pair_sᵥ
+ @assert dim(sₕ) == dim(sᵥ)
+ d = dim(sₕ)
+ T = ITensor(sₕ, sₕ′, sᵥ, sᵥ′)
+ for i in 1:d
+ T[i, i, i, i] = 1.0
+ end
+ if sz
+ T[1, 1, 1, 1] = -1.0
+ end
+ s̃ₕ, s̃ₕ′, s̃ᵥ, s̃ᵥ′ = sim.((sₕ, sₕ′, sᵥ, sᵥ′))
+ T̃ = T * δ(sₕ, s̃ₕ) * δ(sₕ′, s̃ₕ′) * δ(sᵥ, s̃ᵥ) * δ(sᵥ′, s̃ᵥ′)
- # Alternative method
- #Q = [exp(β * J) exp(-β * J); exp(-β * J) exp(β * J)]
- #X = √Q
+ # Alternative method
+ #Q = [exp(β * J) exp(-β * J); exp(-β * J) exp(β * J)]
+ #X = √Q
- f(λ₊, λ₋) = [
- (λ₊ + λ₋)/2 (λ₊ - λ₋)/2
- (λ₊ - λ₋)/2 (λ₊ + λ₋)/2
- ]
- λ₊ = √(exp(β * J) + exp(-β * J))
- λ₋ = √(exp(β * J) - exp(-β * J))
- X = f(λ₊, λ₋)
- Xₕ = itensor(vec(X), s̃ₕ, sₕ)
- Xₕ′ = itensor(vec(X), s̃ₕ′, sₕ′)
- Xᵥ = itensor(vec(X), s̃ᵥ, sᵥ)
- Xᵥ′ = itensor(vec(X), s̃ᵥ′, sᵥ′)
- return T̃ * Xₕ′ * Xᵥ′ * Xₕ * Xᵥ
+ f(λ₊, λ₋) = [
+ (λ₊ + λ₋) / 2 (λ₊ - λ₋) / 2
+ (λ₊ - λ₋) / 2 (λ₊ + λ₋) / 2
+ ]
+ λ₊ = √(exp(β * J) + exp(-β * J))
+ λ₋ = √(exp(β * J) - exp(-β * J))
+ X = f(λ₊, λ₋)
+ Xₕ = itensor(vec(X), s̃ₕ, sₕ)
+ Xₕ′ = itensor(vec(X), s̃ₕ′, sₕ′)
+ Xᵥ = itensor(vec(X), s̃ᵥ, sᵥ)
+ Xᵥ′ = itensor(vec(X), s̃ᵥ′, sᵥ′)
+ return T̃ * Xₕ′ * Xᵥ′ * Xₕ * Xᵥ
end
function ising_mpo(sₕ::Index, sᵥ::Index, args...; kwargs...)
- return ising_mpo(sₕ => sₕ', sᵥ => sᵥ', args...; kwargs...)
+ return ising_mpo(sₕ => sₕ', sᵥ => sᵥ', args...; kwargs...)
end
function ising_mpo_dual(
- sh::Tuple{Index,Index}, sv::Tuple{Index,Index}, β::Real, J::Real=1.0
-)
- d = dim(sh[1])
- T = ITensor(sh[1], sh[2], sv[1], sv[2])
- sig(s) = 1.0 - 2.0 * (s - 1)
- E0 = -4.0
- for s1 in 1:d, s2 in 1:d, s3 in 1:d, s4 in 1:d
- E = sig(s1) * sig(s2) + sig(s2) * sig(s3) + sig(s3) * sig(s4) + sig(s4) * sig(s1)
- val = exp(-β * (E - E0))
- T[sh[1] => s1, sv[2] => s2, sh[2] => s3, sv[1] => s4] = val
- end
- return T
+ sh::Tuple{Index, Index}, sv::Tuple{Index, Index}, β::Real, J::Real = 1.0
+ )
+ d = dim(sh[1])
+ T = ITensor(sh[1], sh[2], sv[1], sv[2])
+ sig(s) = 1.0 - 2.0 * (s - 1)
+ E0 = -4.0
+ for s1 in 1:d, s2 in 1:d, s3 in 1:d, s4 in 1:d
+ E = sig(s1) * sig(s2) + sig(s2) * sig(s3) + sig(s3) * sig(s4) + sig(s4) * sig(s1)
+ val = exp(-β * (E - E0))
+ T[sh[1] => s1, sv[2] => s2, sh[2] => s3, sv[1] => s4] = val
+ end
+ return T
end
function ising_partition(sh, sv, β)
- ny, nx = size(sh)
- T = Matrix{ITensor}(undef, ny, nx)
- for iy in 1:ny, ix in 1:nx
- ixp = per(ix + 1, nx)
- iyp = per(iy + 1, ny)
- T[iy, ix] = ising_mpo(sh[iy, ix] => sh[iy, ixp], sv[iy, ix] => sv[iyp, ix], β)
- end
- return T
+ ny, nx = size(sh)
+ T = Matrix{ITensor}(undef, ny, nx)
+ for iy in 1:ny, ix in 1:nx
+ ixp = per(ix + 1, nx)
+ iyp = per(iy + 1, ny)
+ T[iy, ix] = ising_mpo(sh[iy, ix] => sh[iy, ixp], sv[iy, ix] => sv[iyp, ix], β)
+ end
+ return T
end
#
@@ -77,18 +77,18 @@ end
const βc = 0.5 * log(√2 + 1)
-function ising_free_energy(β::Real, J::Real=1.0)
- k = β * J
- c = cosh(2 * k)
- s = sinh(2 * k)
- xmin = 0.0
- xmax = π
- integrand(x) = log(c^2 + √(s^4 + 1 - 2 * s^2 * cos(x)))
- integral, err = quadgk(integrand, xmin, xmax)::Tuple{Float64,Float64}
- return -(log(2) + integral / π) / (2 * β)
+function ising_free_energy(β::Real, J::Real = 1.0)
+ k = β * J
+ c = cosh(2 * k)
+ s = sinh(2 * k)
+ xmin = 0.0
+ xmax = π
+ integrand(x) = log(c^2 + √(s^4 + 1 - 2 * s^2 * cos(x)))
+ integral, err = quadgk(integrand, xmin, xmax)::Tuple{Float64, Float64}
+ return -(log(2) + integral / π) / (2 * β)
end
function ising_magnetization(β::Real)
- β > βc && return (1 - sinh(2 * β)^(-4))^(1 / 8)
- return 0.0
+ β > βc && return (1 - sinh(2 * β)^(-4))^(1 / 8)
+ return 0.0
end
diff --git a/examples/src/ctmrg_anisotropic.jl b/examples/src/ctmrg_anisotropic.jl
index 05fefce08d..f047a5e769 100644
--- a/examples/src/ctmrg_anisotropic.jl
+++ b/examples/src/ctmrg_anisotropic.jl
@@ -1,300 +1,303 @@
using ITensors
-function site_inds(ny, nx, d=1)
- sh = Matrix{Index}(undef, ny, nx)
- sv = Matrix{Index}(undef, ny, nx)
- for iy in 1:ny, ix in 1:nx
- sh[iy, ix] = Index(d, "site,horiz,x=$ix,y=$iy")
- sv[iy, ix] = Index(d, "site,vert,x=$ix,y=$iy")
- end
- return sh, sv
+function site_inds(ny, nx, d = 1)
+ sh = Matrix{Index}(undef, ny, nx)
+ sv = Matrix{Index}(undef, ny, nx)
+ for iy in 1:ny, ix in 1:nx
+ sh[iy, ix] = Index(d, "site,horiz,x=$ix,y=$iy")
+ sv[iy, ix] = Index(d, "site,vert,x=$ix,y=$iy")
+ end
+ return sh, sv
end
-function link_inds(ny, nx, d=1)
- ll = Matrix{Index}(undef, ny, nx)
- lr = Matrix{Index}(undef, ny, nx)
- lu = Matrix{Index}(undef, ny, nx)
- ld = Matrix{Index}(undef, ny, nx)
- for iy in 1:ny, ix in 1:nx
- ll[iy, ix] = Index(d, "link,left,x=$ix,y=$iy")
- lr[iy, ix] = Index(d, "link,right,x=$ix,y=$iy")
- lu[iy, ix] = Index(d, "link,up,x=$ix,y=$iy")
- ld[iy, ix] = Index(d, "link,down,x=$ix,y=$iy")
- end
- return ll, lr, lu, ld
+function link_inds(ny, nx, d = 1)
+ ll = Matrix{Index}(undef, ny, nx)
+ lr = Matrix{Index}(undef, ny, nx)
+ lu = Matrix{Index}(undef, ny, nx)
+ ld = Matrix{Index}(undef, ny, nx)
+ for iy in 1:ny, ix in 1:nx
+ ll[iy, ix] = Index(d, "link,left,x=$ix,y=$iy")
+ lr[iy, ix] = Index(d, "link,right,x=$ix,y=$iy")
+ lu[iy, ix] = Index(d, "link,up,x=$ix,y=$iy")
+ ld[iy, ix] = Index(d, "link,down,x=$ix,y=$iy")
+ end
+ return ll, lr, lu, ld
end
per(n, N) = mod(n - 1, N) + 1
function ctmrg_environment((sh, sv))
- ny, nx = size(sh)
- Clu = Matrix{ITensor}(undef, ny, nx)
- Cru = Matrix{ITensor}(undef, ny, nx)
- Cld = Matrix{ITensor}(undef, ny, nx)
- Crd = Matrix{ITensor}(undef, ny, nx)
+ ny, nx = size(sh)
+ Clu = Matrix{ITensor}(undef, ny, nx)
+ Cru = Matrix{ITensor}(undef, ny, nx)
+ Cld = Matrix{ITensor}(undef, ny, nx)
+ Crd = Matrix{ITensor}(undef, ny, nx)
- Al = Matrix{ITensor}(undef, ny, nx)
- Ar = Matrix{ITensor}(undef, ny, nx)
- Au = Matrix{ITensor}(undef, ny, nx)
- Ad = Matrix{ITensor}(undef, ny, nx)
+ Al = Matrix{ITensor}(undef, ny, nx)
+ Ar = Matrix{ITensor}(undef, ny, nx)
+ Au = Matrix{ITensor}(undef, ny, nx)
+ Ad = Matrix{ITensor}(undef, ny, nx)
- ll, lr, lu, ld = link_inds(ny, nx)
- for iy in 1:ny, ix in 1:nx
- Clu[iy, ix] = random_itensor(ll[iy, ix], lu[iy, ix])
- Cru[iy, ix] = random_itensor(lr[iy, ix], lu[iy, ix])
- Cld[iy, ix] = random_itensor(ll[iy, ix], ld[iy, ix])
- Crd[iy, ix] = random_itensor(lr[iy, ix], ld[iy, ix])
- iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
- Al[iy, ix] = random_itensor(sh[iy, ix], ll[iy, ix], ll[iyp, ix])
- Ar[iy, ix] = random_itensor(sh[iy, ix], lr[iy, ix], lr[iyp, ix])
- Au[iy, ix] = random_itensor(sv[iy, ix], lu[iy, ix], lu[iy, ixp])
- Ad[iy, ix] = random_itensor(sv[iy, ix], ld[iy, ix], ld[iy, ixp])
- end
- normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
- return (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
+ ll, lr, lu, ld = link_inds(ny, nx)
+ for iy in 1:ny, ix in 1:nx
+ Clu[iy, ix] = random_itensor(ll[iy, ix], lu[iy, ix])
+ Cru[iy, ix] = random_itensor(lr[iy, ix], lu[iy, ix])
+ Cld[iy, ix] = random_itensor(ll[iy, ix], ld[iy, ix])
+ Crd[iy, ix] = random_itensor(lr[iy, ix], ld[iy, ix])
+ iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
+ Al[iy, ix] = random_itensor(sh[iy, ix], ll[iy, ix], ll[iyp, ix])
+ Ar[iy, ix] = random_itensor(sh[iy, ix], lr[iy, ix], lr[iyp, ix])
+ Au[iy, ix] = random_itensor(sv[iy, ix], lu[iy, ix], lu[iy, ixp])
+ Ad[iy, ix] = random_itensor(sv[iy, ix], ld[iy, ix], ld[iy, ixp])
+ end
+ normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
+ return (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
end
-function calc_κ(iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left")
- ny, nx = size(T)
- iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
- normC = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix])
- normAlr = scalar(
- Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix]
- )
- normAud = scalar(
- Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp]
- )
- normT = scalar(
- Clu[iy, ix] *
- Al[iy, ix] *
- Cld[iyp, ix] *
- Au[iy, ix] *
- T[iy, ix] *
- Ad[iyp, ix] *
- Cru[iy, ixp] *
- Ar[iy, ixp] *
- Crd[iyp, ixp],
- )
- return normT, normAlr, normAud, normC
+function calc_κ(iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = "left")
+ ny, nx = size(T)
+ iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
+ normC = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix])
+ normAlr = scalar(
+ Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix]
+ )
+ normAud = scalar(
+ Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp]
+ )
+ normT = scalar(
+ Clu[iy, ix] *
+ Al[iy, ix] *
+ Cld[iyp, ix] *
+ Au[iy, ix] *
+ T[iy, ix] *
+ Ad[iyp, ix] *
+ Cru[iy, ixp] *
+ Ar[iy, ixp] *
+ Crd[iyp, ixp],
+ )
+ return normT, normAlr, normAud, normC
end
-function calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left")
- ny, nx = size(T)
- κ = Matrix{Float64}(undef, ny, nx)
- for iy in 1:ny, ix in 1:nx
- normT, normAlr, normAud, normC = calc_κ(
- iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir
- )
- κ[iy, ix] = normT * normC / (normAlr * normAud)
- end
- return κ
+function calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = "left")
+ ny, nx = size(T)
+ κ = Matrix{Float64}(undef, ny, nx)
+ for iy in 1:ny, ix in 1:nx
+ normT, normAlr, normAud, normC = calc_κ(
+ iy, ix, T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = dir
+ )
+ κ[iy, ix] = normT * normC / (normAlr * normAud)
+ end
+ return κ
end
function normalize!((Clu, Cru, Cld, Crd))
- ny, nx = size(Clu)
- for iy in 1:ny, ix in 1:nx
- Clu[iy, ix] /= norm(Clu[iy, ix])
- Cld[iy, ix] /= norm(Cld[iy, ix])
- Cru[iy, ix] /= norm(Cru[iy, ix])
- Crd[iy, ix] /= norm(Crd[iy, ix])
- normC4 = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix])
- normC4 < 0 ? normClu = -abs(normC4)^(1 / 4) : normClu = normC4^(1 / 4)
- Clu[iy, ix] /= normClu
- Cld[iy, ix] /= abs(normClu)
- Cru[iy, ix] /= abs(normClu)
- Crd[iy, ix] /= abs(normClu)
- end
+ ny, nx = size(Clu)
+ for iy in 1:ny, ix in 1:nx
+ Clu[iy, ix] /= norm(Clu[iy, ix])
+ Cld[iy, ix] /= norm(Cld[iy, ix])
+ Cru[iy, ix] /= norm(Cru[iy, ix])
+ Crd[iy, ix] /= norm(Crd[iy, ix])
+ normC4 = scalar(Clu[iy, ix] * Cld[iy, ix] * Cru[iy, ix] * Crd[iy, ix])
+ normC4 < 0 ? normClu = -abs(normC4)^(1 / 4) : normClu = normC4^(1 / 4)
+ Clu[iy, ix] /= normClu
+ Cld[iy, ix] /= abs(normClu)
+ Cru[iy, ix] /= abs(normClu)
+ Crd[iy, ix] /= abs(normClu)
+ end
+ return nothing
end
-function normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left")
- normalize!((Clu, Cru, Cld, Crd))
- ny, nx = size(Clu)
- for iy in 1:ny, ix in 1:nx
- Al[iy, ix] /= norm(Al[iy, ix])
- Ar[iy, ix] /= norm(Ar[iy, ix])
- Au[iy, ix] /= norm(Au[iy, ix])
- Ad[iy, ix] /= norm(Ad[iy, ix])
- iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
- normAlr = scalar(
- Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix]
- )
- normAlr < 0 ? normAl = -abs(normAlr)^(1 / 2) : normAl = normAlr^(1 / 2)
- Al[iy, ix] /= normAl
- Ar[iy, ix] /= abs(normAl)
- normAud = scalar(
- Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp]
- )
- normAud < 0 ? normAu = -abs(normAud)^(1 / 2) : normAu = normAud^(1 / 2)
- Au[iy, ix] /= normAu
- Ad[iy, ix] /= abs(normAu)
- end
+function normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = "left")
+ normalize!((Clu, Cru, Cld, Crd))
+ ny, nx = size(Clu)
+ for iy in 1:ny, ix in 1:nx
+ Al[iy, ix] /= norm(Al[iy, ix])
+ Ar[iy, ix] /= norm(Ar[iy, ix])
+ Au[iy, ix] /= norm(Au[iy, ix])
+ Ad[iy, ix] /= norm(Ad[iy, ix])
+ iyp, ixp = per(iy + 1, ny), per(ix + 1, nx)
+ normAlr = scalar(
+ Clu[iy, ix] * Cru[iy, ix] * Al[iy, ix] * Ar[iy, ix] * Cld[iyp, ix] * Crd[iyp, ix]
+ )
+ normAlr < 0 ? normAl = -abs(normAlr)^(1 / 2) : normAl = normAlr^(1 / 2)
+ Al[iy, ix] /= normAl
+ Ar[iy, ix] /= abs(normAl)
+ normAud = scalar(
+ Clu[iy, ix] * Cld[iy, ix] * Au[iy, ix] * Ad[iy, ix] * Cru[iy, ixp] * Crd[iy, ixp]
+ )
+ normAud < 0 ? normAu = -abs(normAud)^(1 / 2) : normAu = normAud^(1 / 2)
+ Au[iy, ix] /= normAu
+ Ad[iy, ix] /= abs(normAu)
+ end
+ return nothing
end
-function leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir="left", maxdim=5)
- ny, nx = size(T)
- P = Vector{ITensor}(undef, ny)
- P⁻ = Vector{ITensor}(undef, ny)
- if dir == "left" || dir == "up"
- xrange = 1:nx
- elseif dir == "right" || dir == "down"
- xrange = per.((nx - 1):-1:0, nx)
- end
- for ix in xrange
- ixm = per(ix - 1, nx)
- ixp = per(ix + 1, nx)
- ixpp = per(ix + 2, nx)
- for iy in 1:ny
- iym = per(iy - 1, ny)
- iyp = per(iy + 1, ny)
-
- Cu =
- Al[iym, ix] *
- Clu[iym, ix] *
- Au[iym, ix] *
- T[iym, ix] *
- Au[iym, ixp] *
- T[iym, ixp] *
- Cru[iym, ixpp] *
- Ar[iym, ixpp]
- @assert order(Cu) == 4
- Cd =
- Al[iy, ix] *
- Cld[iyp, ix] *
- Ad[iyp, ix] *
- T[iy, ix] *
- Ad[iyp, ixp] *
- T[iy, ixp] *
- Crd[iyp, ixpp] *
- Ar[iy, ixpp]
- @assert order(Cd) == 4
- if dir == "left" || dir == "up"
- li = commonindex(Cru[iy, ixpp], Crd[iy, ixpp])
- si = commonindex(Au[iy, ixp], Ad[iy, ixp])
- elseif dir == "right" || dir == "down"
- li = commonindex(Clu[iy, ix], Cld[iy, ix])
- si = commonindex(Au[iy, ix], Ad[iy, ix])
- end
- Cup = prime(Cu, (li, si))
- ρ = Cd * Cup
- if dir == "left" || dir == "right"
- utags = "$dir,link,x=$ixp,y=$iy"
- elseif dir == "up" || dir == "down"
- utags = "$dir,link,x=$iy,y=$ixp"
- end
- U, S, Vh, spec, u, v = svd(
- ρ, (li, si); utags=utags, vtags="tmp", maxdim=maxdim, cutoff=0.0
- )
- V = dag(Vh)
- U *= δ(u, v)
- invsqrtS = S
- for i in 1:dim(u)
- invsqrtS[i, i] = inv(sqrt(S[i, i]))
- end
- P[iy] = Cup * V * invsqrtS
- P⁻[iy] = Cd * dag(U) * invsqrtS
+function leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = "left", maxdim = 5)
+ ny, nx = size(T)
+ P = Vector{ITensor}(undef, ny)
+ P⁻ = Vector{ITensor}(undef, ny)
+ if dir == "left" || dir == "up"
+ xrange = 1:nx
+ elseif dir == "right" || dir == "down"
+ xrange = per.((nx - 1):-1:0, nx)
end
- for iy in 1:ny
- iym = per(iy - 1, ny)
- iyp = per(iy + 1, ny)
- if dir == "left" || dir == "up"
- Al[iy, ixp] = Al[iy, ix] * P[iy] * T[iy, ix] * P⁻[iyp]
- Clu[iy, ixp] = Clu[iy, ix] * Au[iy, ix] * P⁻[iy]
- Cld[iy, ixp] = Cld[iy, ix] * Ad[iy, ix] * P[iy]
- elseif dir == "right" || dir == "down"
- Ar[iy, ixp] = Ar[iy, ixpp] * P[iy] * T[iy, ixp] * P⁻[iyp]
- Cru[iy, ixp] = Cru[iy, ixpp] * Au[iy, ixp] * P⁻[iy]
- Crd[iy, ixp] = Crd[iy, ixpp] * Ad[iy, ixp] * P[iy]
- end
+ for ix in xrange
+ ixm = per(ix - 1, nx)
+ ixp = per(ix + 1, nx)
+ ixpp = per(ix + 2, nx)
+ for iy in 1:ny
+ iym = per(iy - 1, ny)
+ iyp = per(iy + 1, ny)
+
+ Cu =
+ Al[iym, ix] *
+ Clu[iym, ix] *
+ Au[iym, ix] *
+ T[iym, ix] *
+ Au[iym, ixp] *
+ T[iym, ixp] *
+ Cru[iym, ixpp] *
+ Ar[iym, ixpp]
+ @assert order(Cu) == 4
+ Cd =
+ Al[iy, ix] *
+ Cld[iyp, ix] *
+ Ad[iyp, ix] *
+ T[iy, ix] *
+ Ad[iyp, ixp] *
+ T[iy, ixp] *
+ Crd[iyp, ixpp] *
+ Ar[iy, ixpp]
+ @assert order(Cd) == 4
+ if dir == "left" || dir == "up"
+ li = commonindex(Cru[iy, ixpp], Crd[iy, ixpp])
+ si = commonindex(Au[iy, ixp], Ad[iy, ixp])
+ elseif dir == "right" || dir == "down"
+ li = commonindex(Clu[iy, ix], Cld[iy, ix])
+ si = commonindex(Au[iy, ix], Ad[iy, ix])
+ end
+ Cup = prime(Cu, (li, si))
+ ρ = Cd * Cup
+ if dir == "left" || dir == "right"
+ utags = "$dir,link,x=$ixp,y=$iy"
+ elseif dir == "up" || dir == "down"
+ utags = "$dir,link,x=$iy,y=$ixp"
+ end
+ U, S, Vh, spec, u, v = svd(
+ ρ, (li, si); utags = utags, vtags = "tmp", maxdim = maxdim, cutoff = 0.0
+ )
+ V = dag(Vh)
+ U *= δ(u, v)
+ invsqrtS = S
+ for i in 1:dim(u)
+ invsqrtS[i, i] = inv(sqrt(S[i, i]))
+ end
+ P[iy] = Cup * V * invsqrtS
+ P⁻[iy] = Cd * dag(U) * invsqrtS
+ end
+ for iy in 1:ny
+ iym = per(iy - 1, ny)
+ iyp = per(iy + 1, ny)
+ if dir == "left" || dir == "up"
+ Al[iy, ixp] = Al[iy, ix] * P[iy] * T[iy, ix] * P⁻[iyp]
+ Clu[iy, ixp] = Clu[iy, ix] * Au[iy, ix] * P⁻[iy]
+ Cld[iy, ixp] = Cld[iy, ix] * Ad[iy, ix] * P[iy]
+ elseif dir == "right" || dir == "down"
+ Ar[iy, ixp] = Ar[iy, ixpp] * P[iy] * T[iy, ixp] * P⁻[iyp]
+ Cru[iy, ixp] = Cru[iy, ixpp] * Au[iy, ixp] * P⁻[iy]
+ Crd[iy, ixp] = Crd[iy, ixpp] * Ad[iy, ixp] * P[iy]
+ end
+ end
end
- end
- return normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
+ return normalize!((Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
end
function swapdiag(M)
- Mp = permutedims(M, [2, 1])
- ny, nx = size(Mp)
- for iy in 1:ny, ix in 1:nx
- Mp[iy, ix] = M[ix, iy]
- end
- return Mp
+ Mp = permutedims(M, [2, 1])
+ ny, nx = size(Mp)
+ for iy in 1:ny, ix in 1:nx
+ Mp[iy, ix] = M[ix, iy]
+ end
+ return Mp
end
function rotate_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
- return swapdiag(T), swapdiag.((Clu, Cld, Cru, Crd)), swapdiag.((Au, Ad, Al, Ar))
+ return swapdiag(T), swapdiag.((Clu, Cld, Cru, Crd)), swapdiag.((Au, Ad, Al, Ar))
end
printdiv() = println("\n****************************************")
printstepdiv() = println("\n##################################################")
function sweepsdims(stepsizes::Vector{Int}, dims::Vector{Int})
- nstep = length(stepsizes)
- maxdims = zeros(Int, stepsizes[end])
- for i in 1:stepsizes[1]
- maxdims[i] = dims[1]
- end
- for j in 2:nstep
- for i in (stepsizes[j - 1] + 1):stepsizes[j]
- maxdims[i] = dims[j]
+ nstep = length(stepsizes)
+ maxdims = zeros(Int, stepsizes[end])
+ for i in 1:stepsizes[1]
+ maxdims[i] = dims[1]
+ end
+ for j in 2:nstep
+ for i in (stepsizes[j - 1] + 1):stepsizes[j]
+ maxdims[i] = dims[j]
+ end
end
- end
- return maxdims
+ return maxdims
end
function check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
- ny, nx = size(T)
- for iy in 1:ny
- for ix in 1:nx
- @assert order(Clu[iy, ix]) == 2
- @assert order(Cru[iy, ix]) == 2
- @assert order(Cld[iy, ix]) == 2
- @assert order(Crd[iy, ix]) == 2
- @assert order(Al[iy, ix]) == 3
- @assert order(Ar[iy, ix]) == 3
- @assert order(Au[iy, ix]) == 3
- @assert order(Ad[iy, ix]) == 3
- @assert order(T[iy, ix]) == 4
- @assert order(T[iy, ix]) == 4
- @assert order(T[iy, ix]) == 4
- @assert order(T[iy, ix]) == 4
- @assert length(commoninds(Clu[iy, ix], Cru[iy, ix])) == 1
- @assert length(commoninds(Clu[iy, ix], Cld[iy, ix])) == 1
- @assert length(commoninds(Cld[iy, ix], Crd[iy, ix])) == 1
- @assert length(commoninds(Cru[iy, ix], Crd[iy, ix])) == 1
+ ny, nx = size(T)
+ for iy in 1:ny
+ for ix in 1:nx
+ @assert order(Clu[iy, ix]) == 2
+ @assert order(Cru[iy, ix]) == 2
+ @assert order(Cld[iy, ix]) == 2
+ @assert order(Crd[iy, ix]) == 2
+ @assert order(Al[iy, ix]) == 3
+ @assert order(Ar[iy, ix]) == 3
+ @assert order(Au[iy, ix]) == 3
+ @assert order(Ad[iy, ix]) == 3
+ @assert order(T[iy, ix]) == 4
+ @assert order(T[iy, ix]) == 4
+ @assert order(T[iy, ix]) == 4
+ @assert order(T[iy, ix]) == 4
+ @assert length(commoninds(Clu[iy, ix], Cru[iy, ix])) == 1
+ @assert length(commoninds(Clu[iy, ix], Cld[iy, ix])) == 1
+ @assert length(commoninds(Cld[iy, ix], Crd[iy, ix])) == 1
+ @assert length(commoninds(Cru[iy, ix], Crd[iy, ix])) == 1
+ end
end
- end
+ return nothing
end
-function ctmrg(T::Matrix{ITensor}, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); verbose=false)
- ny, nx = size(T)
+function ctmrg(T::Matrix{ITensor}, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); verbose = false)
+ ny, nx = size(T)
- verbose && println("Original:")
- verbose && @show calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
+ verbose && println("Original:")
+ verbose && @show calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
- nstep = 1000
- maxdim = 10
- dirs = ["left", "up", "right", "down"]
- for ctmrg_step in 1:nstep
- verbose && printstepdiv()
- dir = dirs[per(ctmrg_step, length(dirs))]
- verbose && @show ctmrg_step, dir
+ nstep = 1000
+ maxdim = 10
+ dirs = ["left", "up", "right", "down"]
+ for ctmrg_step in 1:nstep
+ verbose && printstepdiv()
+ dir = dirs[per(ctmrg_step, length(dirs))]
+ verbose && @show ctmrg_step, dir
- if dir == "left" || dir == "right"
- leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir, maxdim=maxdim)
- elseif dir == "up" || dir == "down"
- T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment(
- T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
- )
- leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir, maxdim=maxdim)
- T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment(
- T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
- )
- end
+ if dir == "left" || dir == "right"
+ leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = dir, maxdim = maxdim)
+ elseif dir == "up" || dir == "down"
+ T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment(
+ T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
+ )
+ leftright_move!(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = dir, maxdim = maxdim)
+ T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad) = rotate_environment(
+ T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad)
+ )
+ end
- check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
+ check_environment(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad))
- verbose && @show Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir)
- verbose && @show abs(prod(vec(Mκ)))^(1 / (nx * ny))
- end
- Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir=dir)
- κave = abs(prod(vec(Mκ)))^(1 / (nx * ny))
- return κave
+ verbose && @show Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = dir)
+ verbose && @show abs(prod(vec(Mκ)))^(1 / (nx * ny))
+ end
+ Mκ = calc_κ(T, (Clu, Cru, Cld, Crd), (Al, Ar, Au, Ad); dir = dir)
+ κave = abs(prod(vec(Mκ)))^(1 / (nx * ny))
+ return κave
end
diff --git a/ext/ITensorsHDF5Ext/index.jl b/ext/ITensorsHDF5Ext/index.jl
index 587f7c20ae..e9939cc746 100644
--- a/ext/ITensorsHDF5Ext/index.jl
+++ b/ext/ITensorsHDF5Ext/index.jl
@@ -1,43 +1,43 @@
using HDF5: HDF5, attributes, create_group, open_group, read, write
using ITensors: Arrow, dim, dir, id, Index, plev, QNBlocks, space, tags, TagSet
-function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, I::Index)
- g = create_group(parent, name)
- attributes(g)["type"] = "Index"
- attributes(g)["version"] = 1
- write(g, "id", id(I))
- write(g, "dim", dim(I))
- write(g, "dir", Int(dir(I)))
- write(g, "tags", tags(I))
- write(g, "plev", plev(I))
- if typeof(space(I)) == Int
- attributes(g)["space_type"] = "Int"
- elseif typeof(space(I)) == QNBlocks
- attributes(g)["space_type"] = "QNBlocks"
- write(g, "space", space(I))
- else
- error("Index space type not recognized")
- end
+function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, I::Index)
+ g = create_group(parent, name)
+ attributes(g)["type"] = "Index"
+ attributes(g)["version"] = 1
+ write(g, "id", id(I))
+ write(g, "dim", dim(I))
+ write(g, "dir", Int(dir(I)))
+ write(g, "tags", tags(I))
+ write(g, "plev", plev(I))
+ return if typeof(space(I)) == Int
+ attributes(g)["space_type"] = "Int"
+ elseif typeof(space(I)) == QNBlocks
+ attributes(g)["space_type"] = "QNBlocks"
+ write(g, "space", space(I))
+ else
+ error("Index space type not recognized")
+ end
end
-function HDF5.read(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{Index})
- g = open_group(parent, name)
- if read(attributes(g)["type"]) != "Index"
- error("HDF5 group or file does not contain Index data")
- end
- id = read(g, "id")
- dim = read(g, "dim")
- dir = Arrow(read(g, "dir"))
- tags = read(g, "tags", TagSet)
- plev = read(g, "plev")
- space_type = "Int"
- if haskey(attributes(g), "space_type")
- space_type = read(attributes(g)["space_type"])
- end
- if space_type == "Int"
- space = dim
- elseif space_type == "QNBlocks"
- space = read(g, "space", QNBlocks)
- end
- return Index(id, space, dir, tags, plev)
+function HDF5.read(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{Index})
+ g = open_group(parent, name)
+ if read(attributes(g)["type"]) != "Index"
+ error("HDF5 group or file does not contain Index data")
+ end
+ id = read(g, "id")
+ dim = read(g, "dim")
+ dir = Arrow(read(g, "dir"))
+ tags = read(g, "tags", TagSet)
+ plev = read(g, "plev")
+ space_type = "Int"
+ if haskey(attributes(g), "space_type")
+ space_type = read(attributes(g)["space_type"])
+ end
+ if space_type == "Int"
+ space = dim
+ elseif space_type == "QNBlocks"
+ space = read(g, "space", QNBlocks)
+ end
+ return Index(id, space, dir, tags, plev)
end
diff --git a/ext/ITensorsHDF5Ext/indexset.jl b/ext/ITensorsHDF5Ext/indexset.jl
index 22e9886b67..93727e0425 100644
--- a/ext/ITensorsHDF5Ext/indexset.jl
+++ b/ext/ITensorsHDF5Ext/indexset.jl
@@ -1,24 +1,25 @@
using HDF5: HDF5, attributes, create_group, open_group, read, write
using ITensors: Index, Indices
-function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, is::Indices)
- g = create_group(parent, name)
- attributes(g)["type"] = "IndexSet"
- attributes(g)["version"] = 1
- N = length(is)
- write(g, "length", N)
- for n in 1:N
- write(g, "index_$n", is[n])
- end
+function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, is::Indices)
+ g = create_group(parent, name)
+ attributes(g)["type"] = "IndexSet"
+ attributes(g)["version"] = 1
+ N = length(is)
+ write(g, "length", N)
+ for n in 1:N
+ write(g, "index_$n", is[n])
+ end
+ return nothing
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::Type{<:Indices}
-)
- g = open_group(parent, name)
- if read(attributes(g)["type"]) != "IndexSet"
- error("HDF5 group or file does not contain IndexSet data")
- end
- n = read(g, "length")
- return T(Index[read(g, "index_$j", Index) for j in 1:n])
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, T::Type{<:Indices}
+ )
+ g = open_group(parent, name)
+ if read(attributes(g)["type"]) != "IndexSet"
+ error("HDF5 group or file does not contain IndexSet data")
+ end
+ n = read(g, "length")
+ return T(Index[read(g, "index_$j", Index) for j in 1:n])
end
diff --git a/ext/ITensorsHDF5Ext/itensor.jl b/ext/ITensorsHDF5Ext/itensor.jl
index bca520406a..2e7f421c91 100644
--- a/ext/ITensorsHDF5Ext/itensor.jl
+++ b/ext/ITensorsHDF5Ext/itensor.jl
@@ -1,36 +1,36 @@
using HDF5: HDF5, attributes, create_group, open_group, read, write
using ITensors: inds, itensor, ITensor, storage
using NDTensors:
- NDTensors, BlockSparse, Combiner, Dense, Diag, DiagBlockSparse, EmptyStorage
+ NDTensors, BlockSparse, Combiner, Dense, Diag, DiagBlockSparse, EmptyStorage
-function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, T::ITensor)
- g = create_group(parent, name)
- attributes(g)["type"] = "ITensor"
- attributes(g)["version"] = 1
- write(g, "inds", inds(T))
- return write(g, "storage", storage(T))
+function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, T::ITensor)
+ g = create_group(parent, name)
+ attributes(g)["type"] = "ITensor"
+ attributes(g)["version"] = 1
+ write(g, "inds", inds(T))
+ return write(g, "storage", storage(T))
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{ITensor}
-)
- g = open_group(parent, name)
- if read(attributes(g)["type"]) != "ITensor"
- error("HDF5 group or file does not contain ITensor data")
- end
- inds = read(g, "inds", Vector{<:Index})
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{ITensor}
+ )
+ g = open_group(parent, name)
+ if read(attributes(g)["type"]) != "ITensor"
+ error("HDF5 group or file does not contain ITensor data")
+ end
+ inds = read(g, "inds", Vector{<:Index})
- # check input file for key name of ITensor data
- # ITensors.jl <= v0.1.x uses `store` as key
- # whereas ITensors.jl >= v0.2.x uses `storage` as key
- for key in ["storage", "store"]
- if haskey(g, key)
- stypestr = read(attributes(open_group(g, key))["type"])
- stype = eval(Meta.parse(stypestr))
- storage = read(g, key, stype)
- return itensor(storage, inds)
+ # check input file for key name of ITensor data
+ # ITensors.jl <= v0.1.x uses `store` as key
+ # whereas ITensors.jl >= v0.2.x uses `storage` as key
+ for key in ["storage", "store"]
+ if haskey(g, key)
+ stypestr = read(attributes(open_group(g, key))["type"])
+ stype = eval(Meta.parse(stypestr))
+ storage = read(g, key, stype)
+ return itensor(storage, inds)
+ end
end
- end
- return error("HDF5 file: $(g) does not contain correct ITensor data.\nNeither key
- `store` nor `storage` could be found.")
+ return error("HDF5 file: $(g) does not contain correct ITensor data.\nNeither key
+ `store` nor `storage` could be found.")
end
diff --git a/ext/ITensorsHDF5Ext/qnindex.jl b/ext/ITensorsHDF5Ext/qnindex.jl
index 996f2b4a17..ddfd6196d4 100644
--- a/ext/ITensorsHDF5Ext/qnindex.jl
+++ b/ext/ITensorsHDF5Ext/qnindex.jl
@@ -1,30 +1,31 @@
using HDF5: HDF5, attributes, create_group, open_group, read, write
using ITensors: dims, QNBlock, QNBlocks
-function HDF5.write(parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, B::QNBlocks)
- g = create_group(parent, name)
- attributes(g)["type"] = "QNBlocks"
- attributes(g)["version"] = 1
- write(g, "length", length(B))
- dims = [block[2] for block in B]
- write(g, "dims", dims)
- for n in 1:length(B)
- write(g, "QN[$n]", B[n][1])
- end
+function HDF5.write(parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, B::QNBlocks)
+ g = create_group(parent, name)
+ attributes(g)["type"] = "QNBlocks"
+ attributes(g)["version"] = 1
+ write(g, "length", length(B))
+ dims = [block[2] for block in B]
+ write(g, "dims", dims)
+ for n in 1:length(B)
+ write(g, "QN[$n]", B[n][1])
+ end
+ return nothing
end
function HDF5.read(
- parent::Union{HDF5.File,HDF5.Group}, name::AbstractString, ::Type{QNBlocks}
-)
- g = open_group(parent, name)
- if read(attributes(g)["type"]) != "QNBlocks"
- error("HDF5 group or file does not contain QNBlocks data")
- end
- N = read(g, "length")
- dims = read(g, "dims")
- B = QNBlocks(undef, N)
- for n in 1:length(B)
- B[n] = QNBlock(read(g, "QN[$n]", QN), dims[n])
- end
- return B
+ parent::Union{HDF5.File, HDF5.Group}, name::AbstractString, ::Type{QNBlocks}
+ )
+ g = open_group(parent, name)
+ if read(attributes(g)["type"]) != "QNBlocks"
+ error("HDF5 group or file does not contain QNBlocks data")
+ end
+ N = read(g, "length")
+ dims = read(g, "dims")
+ B = QNBlocks(undef, N)
+ for n in 1:length(B)
+ B[n] = QNBlock(read(g, "QN[$n]", QN), dims[n])
+ end
+ return B
end
diff --git a/src/argsdict/argsdict.jl b/src/argsdict/argsdict.jl
index 7bece157e5..41c3763032 100644
--- a/src/argsdict/argsdict.jl
+++ b/src/argsdict/argsdict.jl
@@ -1,4 +1,3 @@
-
struct AutoType end
"""
@@ -23,16 +22,16 @@ If `val` doesn't have a type declaration, it gets parsed into
`default_type`, which defaults to `AutoType`, so `parse_type("2")`
returns `(AutoType, 2)`
"""
-function parse_type(valtype, default_type::Type=AutoType)
- # Check for a type decleration
- valtype_vec = split(valtype, "::"; limit=2)
- ValType = default_type
- if length(valtype_vec) > 1
- # Type declaration
- ValType = eval(Meta.parse(valtype_vec[2]))
- end
- val = valtype_vec[1]
- return ValType, val
+function parse_type(valtype, default_type::Type = AutoType)
+ # Check for a type decleration
+ valtype_vec = split(valtype, "::"; limit = 2)
+ ValType = default_type
+ if length(valtype_vec) > 1
+ # Type declaration
+ ValType = eval(Meta.parse(valtype_vec[2]))
+ end
+ val = valtype_vec[1]
+ return ValType, val
end
"""
@@ -51,61 +50,61 @@ the delimiter and the values are after, so
`Dict("N" => "2", "X" => "1e-12")`.
"""
function argsdict(
- args_list::Vector;
- first_arg::Int=1,
- delim='=',
- as_symbols::Bool=false,
- default_named_type::Type=AutoType,
- save_positional::Bool=true,
- default_positional_type::Type=String,
- prefix::String="_arg",
-)
- KeyType = as_symbols ? Symbol : String
- parsed = Dict{KeyType,Any}()
- narg = 1
- for n in first_arg:length(args_list)
- a = args_list[n]
+ args_list::Vector;
+ first_arg::Int = 1,
+ delim = '=',
+ as_symbols::Bool = false,
+ default_named_type::Type = AutoType,
+ save_positional::Bool = true,
+ default_positional_type::Type = String,
+ prefix::String = "_arg",
+ )
+ KeyType = as_symbols ? Symbol : String
+ parsed = Dict{KeyType, Any}()
+ narg = 1
+ for n in first_arg:length(args_list)
+ a = args_list[n]
- # Check if it is a command line flag
- if startswith(a, "--")
- flag = a[3:end]
- if flag == "autotype" || flag == "a"
- default_positional_type = AutoType
- default_named_type = AutoType
- elseif flag == "stringtype" || flag == "s"
- default_positional_type = String
- default_named_type = String
- end
- continue
- end
+ # Check if it is a command line flag
+ if startswith(a, "--")
+ flag = a[3:end]
+ if flag == "autotype" || flag == "a"
+ default_positional_type = AutoType
+ default_named_type = AutoType
+ elseif flag == "stringtype" || flag == "s"
+ default_positional_type = String
+ default_named_type = String
+ end
+ continue
+ end
- optval = split(a, delim)
- if length(optval) == 1
- if save_positional
- val = only(optval)
- parsed[KeyType("$prefix$narg")] = auto_parse(
- parse_type(val, default_positional_type)...
- )
- narg += 1
- else
- @warn "Ignoring argument $a since it does not have the delimiter \"$delim\"."
- end
- continue
- elseif length(optval) == 2
- opt, val = optval
- else
- error(
- "Argument $a has more than one delimiter \"$delim\", which is not well defined."
- )
+ optval = split(a, delim)
+ if length(optval) == 1
+ if save_positional
+ val = only(optval)
+ parsed[KeyType("$prefix$narg")] = auto_parse(
+ parse_type(val, default_positional_type)...
+ )
+ narg += 1
+ else
+ @warn "Ignoring argument $a since it does not have the delimiter \"$delim\"."
+ end
+ continue
+ elseif length(optval) == 2
+ opt, val = optval
+ else
+ error(
+ "Argument $a has more than one delimiter \"$delim\", which is not well defined."
+ )
+ end
+ ValType, key = parse_type(opt, default_named_type)
+ key = strip(key)
+ ' ' in key && error("Option \"$key\" contains spaces, which is not well defined")
+ typedkey = KeyType(key)
+ typedval = auto_parse(ValType, val)
+ parsed[typedkey] = typedval
end
- ValType, key = parse_type(opt, default_named_type)
- key = strip(key)
- ' ' in key && error("Option \"$key\" contains spaces, which is not well defined")
- typedkey = KeyType(key)
- typedval = auto_parse(ValType, val)
- parsed[typedkey] = typedval
- end
- return parsed
+ return parsed
end
argsdict(; kwargs...) = argsdict(ARGS; kwargs...)
diff --git a/src/broadcast.jl b/src/broadcast.jl
index 4f0848d580..01b21e3426 100644
--- a/src/broadcast.jl
+++ b/src/broadcast.jl
@@ -1,4 +1,3 @@
-
#
# Broadcasting for IndexSets
#
@@ -60,9 +59,9 @@ BroadcastStyle(::Type{<:ITensor}) = ITensorStyle()
broadcastable(T::ITensor) = T
-function Base.similar(bc::Broadcasted{ITensorStyle}, ::Type{ElT}) where {ElT<:Number}
- A = find_type(ITensor, bc.args)
- return similar(A, ElT)
+function Base.similar(bc::Broadcasted{ITensorStyle}, ::Type{ElT}) where {ElT <: Number}
+ A = find_type(ITensor, bc.args)
+ return similar(A, ElT)
end
#
@@ -73,7 +72,7 @@ end
struct ITensorOpScalarStyle <: BroadcastStyle end
function Base.BroadcastStyle(::ITensorStyle, ::DefaultArrayStyle{0})
- return ITensorOpScalarStyle()
+ return ITensorOpScalarStyle()
end
Base.BroadcastStyle(::ITensorStyle, ::ITensorOpScalarStyle) = ITensorOpScalarStyle()
@@ -81,33 +80,33 @@ Base.BroadcastStyle(::ITensorStyle, ::ITensorOpScalarStyle) = ITensorOpScalarSty
instantiate(bc::Broadcasted{ITensorOpScalarStyle}) = bc
function broadcasted(::typeof(Base.literal_pow), ::typeof(^), T::ITensor, x::Val)
- return broadcasted(ITensorOpScalarStyle(), Base.literal_pow, Ref(^), T, Ref(x))
+ return broadcasted(ITensorOpScalarStyle(), Base.literal_pow, Ref(^), T, Ref(x))
end
function Base.similar(
- bc::Broadcasted{ITensorOpScalarStyle}, ::Type{ElT}
-) where {ElT<:Number}
- A = find_type(ITensor, bc.args)
- return similar(A, ElT)
+ bc::Broadcasted{ITensorOpScalarStyle}, ::Type{ElT}
+ ) where {ElT <: Number}
+ A = find_type(ITensor, bc.args)
+ return similar(A, ElT)
end
#
# For arbitrary function chaining f.(g.(h.(x)))
#
-function instantiate(bc::Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{Broadcasted}})
- return instantiate(broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...))
+function instantiate(bc::Broadcasted{ITensorStyle, <:Any, <:Function, <:Tuple{Broadcasted}})
+ return instantiate(broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...))
end
function instantiate(
- bc::Broadcasted{
- ITensorStyle,
- <:Any,
- <:Function,
- <:Tuple{Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{<:ITensor}}},
- },
-)
- return broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...)
+ bc::Broadcasted{
+ ITensorStyle,
+ <:Any,
+ <:Function,
+ <:Tuple{Broadcasted{ITensorStyle, <:Any, <:Function, <:Tuple{<:ITensor}}},
+ },
+ )
+ return broadcasted(bc.f ∘ bc.args[1].f, bc.args[1].args...)
end
instantiate(bc::Broadcasted{ITensorStyle}) = bc
@@ -121,7 +120,7 @@ instantiate(bc::Broadcasted{ITensorStyle}) = bc
`A = find_type(::Type,As)` returns the first of type Type among the arguments.
"""
function find_type(::Type{T}, args::Tuple) where {T}
- return find_type(T, find_type(T, args[1]), Base.tail(args))
+ return find_type(T, find_type(T, args[1]), Base.tail(args))
end
find_type(::Type{T}, x) where {T} = x
find_type(::Type{T}, a::T, rest) where {T} = a
@@ -134,9 +133,9 @@ find_type(::Type{T}, ::Tuple{}) where {T} = nothing
#
function Base.copyto!(T::ITensor, bc::Broadcasted)
- return error(
- "The broadcasting operation you are attempting is not yet implemented for ITensors, please raise an issue if you would like it to be supported.",
- )
+ return error(
+ "The broadcasting operation you are attempting is not yet implemented for ITensors, please raise an issue if you would like it to be supported.",
+ )
end
#
@@ -145,18 +144,18 @@ end
#
function Base.copyto!(
- T::ITensor,
- bc::Broadcasted{
- ITensorOpScalarStyle,
- <:Any,
- typeof(*),
- <:Tuple{<:Union{<:Number,<:ITensor},<:Union{<:Number,<:ITensor}},
- },
-)
- α = find_type(Number, bc.args)
- A = find_type(ITensor, bc.args)
- map!((t, a) -> α * a, T, T, A)
- return T
+ T::ITensor,
+ bc::Broadcasted{
+ ITensorOpScalarStyle,
+ <:Any,
+ typeof(*),
+ <:Tuple{<:Union{<:Number, <:ITensor}, <:Union{<:Number, <:ITensor}},
+ },
+ )
+ α = find_type(Number, bc.args)
+ A = find_type(ITensor, bc.args)
+ map!((t, a) -> α * a, T, T, A)
+ return T
end
#
@@ -165,17 +164,17 @@ end
#
function Base.copyto!(
- T::ITensor,
- bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(/),<:Tuple{<:ITensor,<:Number}},
-)
- α = find_type(Number, bc.args)
- A = find_type(ITensor, bc.args)
- ## GPU compilers can have a problem when map is
- ## Given bc.f. map seems to make a closure with a
- ## relatively complicated signature
- f = bc.f
- map!((t, a) -> f(a, α), T, T, A)
- return T
+ T::ITensor,
+ bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(/), <:Tuple{<:ITensor, <:Number}},
+ )
+ α = find_type(Number, bc.args)
+ A = find_type(ITensor, bc.args)
+ ## GPU compilers can have a problem when map is
+ ## Given bc.f. map seems to make a closure with a
+ ## relatively complicated signature
+ f = bc.f
+ map!((t, a) -> f(a, α), T, T, A)
+ return T
end
#
@@ -183,21 +182,21 @@ end
#
function Base.copyto!(
- R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(/),<:Tuple{<:ITensor,<:ITensor}}
-)
- T1, T2 = bc.args
- f = bc.f
- if R === T1
- map!((t1, t2) -> f(t1, t2), R, T1, T2)
- ## I tried this and it is numberically wrong
- #map!(f, R, T1, T2)
- elseif R === T2
- map!((t1, t2) -> f(t2, t1), R, T2, T1)
- #map!(f, R, T2, T1)
- else
- error("When dividing two ITensors in-place, one must be the same as the output ITensor")
- end
- return R
+ R::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(/), <:Tuple{<:ITensor, <:ITensor}}
+ )
+ T1, T2 = bc.args
+ f = bc.f
+ if R === T1
+ map!((t1, t2) -> f(t1, t2), R, T1, T2)
+ ## I tried this and it is numberically wrong
+ #map!(f, R, T1, T2)
+ elseif R === T2
+ map!((t1, t2) -> f(t2, t1), R, T2, T1)
+ #map!(f, R, T2, T1)
+ else
+ error("When dividing two ITensors in-place, one must be the same as the output ITensor")
+ end
+ return R
end
#
@@ -205,19 +204,19 @@ end
#
function Base.copyto!(
- R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(⊙),<:Tuple{<:ITensor,<:ITensor}}
-)
- T1, T2 = bc.args
- if R === T1
- map!((t1, t2) -> *(t1, t2), R, T1, T2)
- elseif R === T2
- map!((t1, t2) -> *(t2, t1), R, T2, T1)
- else
- error(
- "When Hadamard producting two ITensors in-place, one must be the same as the output ITensor",
+ R::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(⊙), <:Tuple{<:ITensor, <:ITensor}}
)
- end
- return R
+ T1, T2 = bc.args
+ if R === T1
+ map!((t1, t2) -> *(t1, t2), R, T1, T2)
+ elseif R === T2
+ map!((t1, t2) -> *(t2, t1), R, T2, T1)
+ else
+ error(
+ "When Hadamard producting two ITensors in-place, one must be the same as the output ITensor",
+ )
+ end
+ return R
end
#
@@ -225,25 +224,25 @@ end
#
function Base.copyto!(
- T::ITensor,
- bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(/),<:Tuple{<:Number,<:ITensor}},
-)
- α = find_type(Number, bc.args)
- A = find_type(ITensor, bc.args)
- f = bc.f
- map!((t, a) -> f(α, a), T, T, A)
- return T
+ T::ITensor,
+ bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(/), <:Tuple{<:Number, <:ITensor}},
+ )
+ α = find_type(Number, bc.args)
+ A = find_type(ITensor, bc.args)
+ f = bc.f
+ map!((t, a) -> f(α, a), T, T, A)
+ return T
end
#
# For B .= A .^ 2.5
#
-function Base.copyto!(R::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(^)})
- α = find_type(Number, bc.args)
- T = find_type(ITensor, bc.args)
- map!((r, t) -> t^α, R, R, T)
- return R
+function Base.copyto!(R::ITensor, bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(^)})
+ α = find_type(Number, bc.args)
+ T = find_type(ITensor, bc.args)
+ map!((r, t) -> t^α, R, R, T)
+ return R
end
#
@@ -251,15 +250,15 @@ end
#
function Base.copyto!(
- R::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(Base.literal_pow)}
-)
- α = find_type(Base.RefValue{<:Val}, bc.args).x
- powf = find_type(Base.RefValue{<:Function}, bc.args).x
- @assert !isnothing(powf)
- T = find_type(ITensor, bc.args)
- f = bc.f
- map!((r, t) -> f(^, t, α), R, R, T)
- return R
+ R::ITensor, bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(Base.literal_pow)}
+ )
+ α = find_type(Base.RefValue{<:Val}, bc.args).x
+ powf = find_type(Base.RefValue{<:Function}, bc.args).x
+ @assert !isnothing(powf)
+ T = find_type(ITensor, bc.args)
+ f = bc.f
+ map!((r, t) -> f(^, t, α), R, R, T)
+ return R
end
#
@@ -267,10 +266,10 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{DefaultArrayStyle{0},<:Any,typeof(identity),<:Tuple{<:Number}}
-)
- fill!(T, bc.args[1])
- return T
+ T::ITensor, bc::Broadcasted{DefaultArrayStyle{0}, <:Any, typeof(identity), <:Tuple{<:Number}}
+ )
+ fill!(T, bc.args[1])
+ return T
end
#
@@ -278,19 +277,19 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(identity),<:Tuple{<:ITensor}}
-)
- A = bc.args[1]
- map!((r, t) -> t, T, T, A)
- return T
+ T::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(identity), <:Tuple{<:ITensor}}
+ )
+ A = bc.args[1]
+ map!((r, t) -> t, T, T, A)
+ return T
end
-function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}})
- return +
+function fmap(bc::Broadcasted{ITensorStyle, <:Any, typeof(+), <:Tuple{Vararg{ITensor}}})
+ return +
end
-function fmap(bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}})
- return -
+function fmap(bc::Broadcasted{ITensorStyle, <:Any, typeof(-), <:Tuple{Vararg{ITensor}}})
+ return -
end
#
@@ -298,17 +297,17 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{ITensor}}}
-)
- if T === bc.args[1]
- A = bc.args[2]
- elseif T === bc.args[2]
- A = bc.args[1]
- else
- error("When adding two ITensors in-place, one must be the same as the output ITensor")
- end
- map!(fmap(bc), T, T, A)
- return T
+ T::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(+), <:Tuple{Vararg{ITensor}}}
+ )
+ if T === bc.args[1]
+ A = bc.args[2]
+ elseif T === bc.args[2]
+ A = bc.args[1]
+ else
+ error("When adding two ITensors in-place, one must be the same as the output ITensor")
+ end
+ map!(fmap(bc), T, T, A)
+ return T
end
#
@@ -316,19 +315,19 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(-),<:Tuple{Vararg{ITensor}}}
-)
- if T === bc.args[1]
- A = bc.args[2]
- elseif T === bc.args[2]
- A = bc.args[1]
- else
- error(
- "When subtracting two ITensors in-place, one must be the same as the output ITensor"
+ T::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(-), <:Tuple{Vararg{ITensor}}}
)
- end
- map!(fmap(bc), T, T, A)
- return T
+ if T === bc.args[1]
+ A = bc.args[2]
+ elseif T === bc.args[2]
+ A = bc.args[1]
+ else
+ error(
+ "When subtracting two ITensors in-place, one must be the same as the output ITensor"
+ )
+ end
+ map!(fmap(bc), T, T, A)
+ return T
end
#
@@ -348,46 +347,46 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,<:Union{typeof(+),typeof(-)}}
-)
- C = find_type(ITensor, bc.args)
- bc_bc = find_type(Broadcasted, bc.args)
-
- if T === C
- A = find_type(ITensor, bc_bc.args)
- α = find_type(Number, bc_bc.args)
-
- # Check if it is the case .^(::Int)
- γ = find_type(Base.RefValue{<:Val}, bc_bc.args)
- powf = find_type(Base.RefValue{<:Function}, bc_bc.args)
- ## Putting fmap in the map call still doesn't actually grab the function and causes GPU to fail so just realize the function slightly earlier here
- f1 = bc.f
- f2 = bc_bc.f
-
- if !isnothing(α) && !isnothing(A)
- if bc_bc.args[1] isa Number
- map!((r, t) -> f1(r, f2(α, t)), T, T, A)
- else
- map!((r, t) -> f1(r, f2(t, α)), T, T, A)
- end
- elseif !isnothing(γ) && !isnothing(A) && !isnothing(powf)
- map!((r, t) -> f1(r, f2(powf[], t, γ[])), T, T, A)
+ T::ITensor, bc::Broadcasted{ITensorOpScalarStyle, <:Any, <:Union{typeof(+), typeof(-)}}
+ )
+ C = find_type(ITensor, bc.args)
+ bc_bc = find_type(Broadcasted, bc.args)
+
+ if T === C
+ A = find_type(ITensor, bc_bc.args)
+ α = find_type(Number, bc_bc.args)
+
+ # Check if it is the case .^(::Int)
+ γ = find_type(Base.RefValue{<:Val}, bc_bc.args)
+ powf = find_type(Base.RefValue{<:Function}, bc_bc.args)
+ ## Putting fmap in the map call still doesn't actually grab the function and causes GPU to fail so just realize the function slightly earlier here
+ f1 = bc.f
+ f2 = bc_bc.f
+
+ if !isnothing(α) && !isnothing(A)
+ if bc_bc.args[1] isa Number
+ map!((r, t) -> f1(r, f2(α, t)), T, T, A)
+ else
+ map!((r, t) -> f1(r, f2(t, α)), T, T, A)
+ end
+ elseif !isnothing(γ) && !isnothing(A) && !isnothing(powf)
+ map!((r, t) -> f1(r, f2(powf[], t, γ[])), T, T, A)
+ else
+ # In-place contraction:
+ # C .+= α .* A .* B
+ bc_bc_bc = find_type(Broadcasted, bc_bc.args)
+ if isnothing(α)
+ β = find_type(Number, bc_bc_bc.args)
+ B = find_type(ITensor, bc_bc_bc.args)
+ else
+ A, B = bc_bc_bc.args
+ end
+ mul!(T, A, B, β, f1(1))
+ end
else
- # In-place contraction:
- # C .+= α .* A .* B
- bc_bc_bc = find_type(Broadcasted, bc_bc.args)
- if isnothing(α)
- β = find_type(Number, bc_bc_bc.args)
- B = find_type(ITensor, bc_bc_bc.args)
- else
- A, B = bc_bc_bc.args
- end
- mul!(T, A, B, β, f1(1))
+ error("When adding two ITensors in-place, one must be the same as the output ITensor")
end
- else
- error("When adding two ITensors in-place, one must be the same as the output ITensor")
- end
- return T
+ return T
end
#
@@ -395,46 +394,46 @@ end
# C .= β .* C .+ α .* A .* B
#
-struct axpby{Alpha,Beta} <: Function
- alpha::Alpha
- beta::Beta
+struct axpby{Alpha, Beta} <: Function
+ alpha::Alpha
+ beta::Beta
end
(f::axpby)(y, x) = x * f.alpha + y * f.beta
## TODO this code doesn't actually get called
function Base.copyto!(
- T::ITensor,
- bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}},
-)
- bc_α = bc.args[1]
- bc_β = bc.args[2]
- α = find_type(Number, bc_α.args)
- A = find_type(ITensor, bc_α.args)
- β = find_type(Number, bc_β.args)
- C = find_type(ITensor, bc_β.args)
- (T !== A && T !== C) &&
- error("When adding two ITensors in-place, one must be the same as the output ITensor")
- if T === A
- bc_α, bc_β = bc_β, bc_α
- α, β = β, α
- A, C = C, A
- end
- if !isnothing(A) && !isnothing(C) && !isnothing(α) && !isnothing(β)
- # The following fails to compile on some GPU backends.
- # map!((r, t) -> β * r + α * t, T, T, A)
- map!(axpby(α, β), T, T, A)
- else
- bc_bc_α = find_type(Broadcasted, bc_α.args)
- if isnothing(α)
- α = find_type(Number, bc_bc_α.args)
- B = find_type(ITensor, bc_bc_α.args)
+ T::ITensor,
+ bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(+), <:Tuple{Vararg{Broadcasted}}},
+ )
+ bc_α = bc.args[1]
+ bc_β = bc.args[2]
+ α = find_type(Number, bc_α.args)
+ A = find_type(ITensor, bc_α.args)
+ β = find_type(Number, bc_β.args)
+ C = find_type(ITensor, bc_β.args)
+ (T !== A && T !== C) &&
+ error("When adding two ITensors in-place, one must be the same as the output ITensor")
+ if T === A
+ bc_α, bc_β = bc_β, bc_α
+ α, β = β, α
+ A, C = C, A
+ end
+ if !isnothing(A) && !isnothing(C) && !isnothing(α) && !isnothing(β)
+ # The following fails to compile on some GPU backends.
+ # map!((r, t) -> β * r + α * t, T, T, A)
+ map!(axpby(α, β), T, T, A)
else
- A, B = bc_bc_α.args
+ bc_bc_α = find_type(Broadcasted, bc_α.args)
+ if isnothing(α)
+ α = find_type(Number, bc_bc_α.args)
+ B = find_type(ITensor, bc_bc_α.args)
+ else
+ A, B = bc_bc_α.args
+ end
+ mul!(T, A, B, α, β)
end
- mul!(T, A, B, α, β)
- end
- return T
+ return T
end
#
@@ -443,21 +442,21 @@ end
## TODO this code fails because of scalar indexing
function Base.copyto!(
- T::ITensor,
- bc::Broadcasted{
- ITensorOpScalarStyle,<:Any,typeof(+),<:Tuple{Vararg{Union{<:ITensor,<:Number}}}
- },
-)
- α = find_type(Number, bc.args)
- A = find_type(ITensor, bc.args)
- if A === T
- tensor(T) .= tensor(A) .+ α
- else
- error(
- "Currently, we don't support `B .= A .+ α` if `B !== A` (i.e. only `A .+= α` is supported",
+ T::ITensor,
+ bc::Broadcasted{
+ ITensorOpScalarStyle, <:Any, typeof(+), <:Tuple{Vararg{Union{<:ITensor, <:Number}}},
+ },
)
- end
- return T
+ α = find_type(Number, bc.args)
+ A = find_type(ITensor, bc.args)
+ if A === T
+ tensor(T) .= tensor(A) .+ α
+ else
+ error(
+ "Currently, we don't support `B .= A .+ α` if `B !== A` (i.e. only `A .+= α` is supported",
+ )
+ end
+ return T
end
#
@@ -465,25 +464,25 @@ end
#
function Base.copyto!(
- T::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(*),<:Tuple{<:ITensor,<:ITensor}}
-)
- mul!(T, bc.args[1], bc.args[2])
- return T
+ T::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(*), <:Tuple{<:ITensor, <:ITensor}}
+ )
+ mul!(T, bc.args[1], bc.args[2])
+ return T
end
#
# For C .= α .* A .* B
#
-function Base.copyto!(T::ITensor, bc::Broadcasted{ITensorOpScalarStyle,<:Any,typeof(*)})
- A = find_type(Union{<:Number,<:ITensor}, bc.args)
- bc_bc = find_type(Broadcasted, bc.args)
- if A isa Number
- mul!(T, bc_bc.args[1], bc_bc.args[2], A)
- else
- mul!(T, A, find_type(ITensor, bc_bc.args), find_type(Number, bc_bc.args))
- end
- return T
+function Base.copyto!(T::ITensor, bc::Broadcasted{ITensorOpScalarStyle, <:Any, typeof(*)})
+ A = find_type(Union{<:Number, <:ITensor}, bc.args)
+ bc_bc = find_type(Broadcasted, bc.args)
+ if A isa Number
+ mul!(T, bc_bc.args[1], bc_bc.args[2], A)
+ else
+ mul!(T, A, find_type(ITensor, bc_bc.args), find_type(Number, bc_bc.args))
+ end
+ return T
end
#
@@ -491,12 +490,12 @@ end
#
function Base.copyto!(
- R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,<:Function,<:Tuple{<:ITensor}}
-)
- f = bc.f
- T = bc.args[1]
- map!((r, t) -> f(t), R, R, T)
- return R
+ R::ITensor, bc::Broadcasted{ITensorStyle, <:Any, <:Function, <:Tuple{<:ITensor}}
+ )
+ f = bc.f
+ T = bc.args[1]
+ map!((r, t) -> f(t), R, R, T)
+ return R
end
#
@@ -504,20 +503,20 @@ end
#
function Base.copyto!(
- R::ITensor,
- bc::Broadcasted{
- ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{Union{<:ITensor,<:Broadcasted}}}
- },
-)
- R̃ = find_type(ITensor, bc.args)
- bc2 = find_type(Broadcasted, bc.args)
- f = bc2.f
- if R === R̃
- map!((r, t) -> r + f(t), R, R, bc2.args[1])
- else
- error("In C .= B .+ f.(A), C and B must be the same ITensor")
- end
- return R
+ R::ITensor,
+ bc::Broadcasted{
+ ITensorStyle, <:Any, typeof(+), <:Tuple{Vararg{Union{<:ITensor, <:Broadcasted}}},
+ },
+ )
+ R̃ = find_type(ITensor, bc.args)
+ bc2 = find_type(Broadcasted, bc.args)
+ f = bc2.f
+ if R === R̃
+ map!((r, t) -> r + f(t), R, R, bc2.args[1])
+ else
+ error("In C .= B .+ f.(A), C and B must be the same ITensor")
+ end
+ return R
end
#
@@ -526,20 +525,20 @@ end
## TODO check to see if this code is being called as expected
function Base.copyto!(
- R::ITensor, bc::Broadcasted{ITensorStyle,<:Any,typeof(+),<:Tuple{Vararg{Broadcasted}}}
-)
- bc1 = bc.args[1]
- bc2 = bc.args[2]
- T1 = bc1.args[1]
- f1 = bc1.f
- T2 = bc2.args[1]
- f2 = bc2.f
- if R === T1
- map!((r, t) -> f1(r) + f2(t), R, R, T2)
- elseif R === T2
- map!((r, t) -> f2(r) + f1(t), R, R, T1)
- else
- error("In C .= f.(B) .+ g.(A), C and B or A must be the same ITensor")
- end
- return R
+ R::ITensor, bc::Broadcasted{ITensorStyle, <:Any, typeof(+), <:Tuple{Vararg{Broadcasted}}}
+ )
+ bc1 = bc.args[1]
+ bc2 = bc.args[2]
+ T1 = bc1.args[1]
+ f1 = bc1.f
+ T2 = bc2.args[1]
+ f2 = bc2.f
+ if R === T1
+ map!((r, t) -> f1(r) + f2(t), R, R, T2)
+ elseif R === T2
+ map!((r, t) -> f2(r) + f1(t), R, R, T1)
+ else
+ error("In C .= f.(B) .+ g.(A), C and B or A must be the same ITensor")
+ end
+ return R
end
diff --git a/src/developer_tools.jl b/src/developer_tools.jl
index 356e9b98eb..2baee5f117 100644
--- a/src/developer_tools.jl
+++ b/src/developer_tools.jl
@@ -1,15 +1,15 @@
-
"""
inspectQNITensor is a developer-level debugging tool
to look at internals or properties of QNITensors
"""
function inspectQNITensor(T::ITensor, is::QNIndexSet)
- #@show T.store.blockoffsets
- #@show T.store.data
- println("Block fluxes:")
- for b in nzblocks(T)
- @show flux(T, b)
- end
+ #@show T.store.blockoffsets
+ #@show T.store.data
+ println("Block fluxes:")
+ for b in nzblocks(T)
+ @show flux(T, b)
+ end
+ return nothing
end
inspectQNITensor(T::ITensor, is::IndexSet) = nothing
inspectQNITensor(T::ITensor) = inspectQNITensor(T, inds(T))
@@ -23,8 +23,8 @@ is useful for inspecting output of programs at certain
points while giving the option to continue.
"""
function pause()
- print(stdout, "(Paused) ")
- c = read(stdin, 1)
- c == UInt8[0x71] && exit(0)
- return nothing
+ print(stdout, "(Paused) ")
+ c = read(stdin, 1)
+ c == UInt8[0x71] && exit(0)
+ return nothing
end
diff --git a/src/global_variables.jl b/src/global_variables.jl
index 18ef622ac2..8413cafd73 100644
--- a/src/global_variables.jl
+++ b/src/global_variables.jl
@@ -1,11 +1,10 @@
-
#
# Warn about the order of the ITensor after contractions
#
const default_warn_order = 14
-const warn_order = Ref{Union{Int,Nothing}}(default_warn_order)
+const warn_order = Ref{Union{Int, Nothing}}(default_warn_order)
"""
ITensors.get_warn_order()
@@ -29,10 +28,10 @@ set to before this function was called).
You can get the current threshold with the function `ITensors.get_warn_order(N::Int)`. You can reset to the default value with
`ITensors.reset_warn_order()`.
"""
-function set_warn_order(N::Union{Int,Nothing})
- N_init = get_warn_order()
- warn_order[] = N
- return N_init
+function set_warn_order(N::Union{Int, Nothing})
+ N_init = get_warn_order()
+ warn_order[] = N
+ return N_init
end
"""
@@ -76,12 +75,12 @@ A * B
```
"""
macro disable_warn_order(block)
- quote
- local old_order = disable_warn_order()
- r = $(esc(block))
- set_warn_order(old_order)
- r
- end
+ return quote
+ local old_order = disable_warn_order()
+ r = $(esc(block))
+ set_warn_order(old_order)
+ r
+ end
end
"""
@@ -102,12 +101,12 @@ end
```
"""
macro set_warn_order(new_order, block)
- quote
- local old_order = set_warn_order($(esc(new_order)))
- r = $(esc(block))
- set_warn_order(old_order)
- r
- end
+ return quote
+ local old_order = set_warn_order($(esc(new_order)))
+ r = $(esc(block))
+ set_warn_order(old_order)
+ r
+ end
end
"""
@@ -122,12 +121,12 @@ order in a block of code to the default value $default_warn_order.
```
"""
macro reset_warn_order(block)
- quote
- local old_order = reset_warn_order()
- r = $(esc(block))
- set_warn_order(old_order)
- r
- end
+ return quote
+ local old_order = reset_warn_order()
+ r = $(esc(block))
+ set_warn_order(old_order)
+ r
+ end
end
#
@@ -154,11 +153,11 @@ operations (equivalent to `enable_threaded_blocksparse()`).
operations (equivalent to `enable_threaded_blocksparse()`).
"""
function enable_threaded_blocksparse(enable::Bool)
- return if enable
- enable_threaded_blocksparse()
- else
- disable_threaded_blocksparse()
- end
+ return if enable
+ enable_threaded_blocksparse()
+ else
+ disable_threaded_blocksparse()
+ end
end
"""
@@ -175,21 +174,21 @@ const _using_debug_checks = Ref{Bool}(false)
using_debug_checks() = _using_debug_checks[]
macro debug_check(ex)
- quote
- if using_debug_checks()
- $(esc(ex))
+ return quote
+ if using_debug_checks()
+ $(esc(ex))
+ end
end
- end
end
function enable_debug_checks()
- _using_debug_checks[] = true
- return nothing
+ _using_debug_checks[] = true
+ return nothing
end
function disable_debug_checks()
- _using_debug_checks[] = false
- return nothing
+ _using_debug_checks[] = false
+ return nothing
end
#
@@ -201,11 +200,11 @@ const _using_contraction_sequence_optimization = Ref(false)
using_contraction_sequence_optimization() = _using_contraction_sequence_optimization[]
function enable_contraction_sequence_optimization()
- _using_contraction_sequence_optimization[] = true
- return nothing
+ _using_contraction_sequence_optimization[] = true
+ return nothing
end
function disable_contraction_sequence_optimization()
- _using_contraction_sequence_optimization[] = false
- return nothing
+ _using_contraction_sequence_optimization[] = false
+ return nothing
end
diff --git a/src/index.jl b/src/index.jl
index dd135a0c13..fa75e00265 100644
--- a/src/index.jl
+++ b/src/index.jl
@@ -2,7 +2,7 @@ using NDTensors: NDTensors, sim
using .QuantumNumbers: QuantumNumbers, Arrow, In, Neither, Out
using Random: Xoshiro
using .TagSets:
- TagSets, TagSet, @ts_str, addtags, commontags, hastags, removetags, replacetags
+ TagSets, TagSet, @ts_str, addtags, commontags, hastags, removetags, replacetags
#const IDType = UInt128
const IDType = UInt64
@@ -22,14 +22,14 @@ Internally, an `Index` has a fixed `id` number, which is how the ITensor library
single original `Index`. `Index` objects must have the same `id`, as well as the `tags` to compare equal.
"""
struct Index{T}
- id::IDType
- space::T
- dir::Arrow
- tags::TagSet
- plev::Int
- function Index{T}(id, space::T, dir::Arrow, tags, plev) where {T}
- return new{T}(id, space, dir, tags, plev)
- end
+ id::IDType
+ space::T
+ dir::Arrow
+ tags::TagSet
+ plev::Int
+ function Index{T}(id, space::T, dir::Arrow, tags, plev) where {T}
+ return new{T}(id, space, dir, tags, plev)
+ end
end
#######################
@@ -42,11 +42,11 @@ Index{T}(dim::T) where {T} = Index(dim)
# `Nothing` direction gets converted to `Neither`.
function Index{T}(id, space::T, dir::Nothing, tags, plev) where {T}
- return Index{T}(id, space, Neither, tags, plev)
+ return Index{T}(id, space, Neither, tags, plev)
end
function Index(id, space::T, dir, tags, plev) where {T}
- return Index{T}(id, space, dir, tags, plev)
+ return Index{T}(id, space, dir, tags, plev)
end
"""
@@ -72,8 +72,8 @@ julia> tags(i)
"l"
```
"""
-function Index(dim::Number; tags="", plev=0, dir=Neither)
- return Index(rand(index_id_rng(), IDType), dim, dir, tags, plev)
+function Index(dim::Number; tags = "", plev = 0, dir = Neither)
+ return Index(rand(index_id_rng(), IDType), dim, dir, tags, plev)
end
"""
@@ -97,8 +97,8 @@ julia> tags(i)
"l,tag"
```
"""
-function Index(dim::Number, tags::Union{AbstractString,TagSet}; plev::Int=0)
- return Index(dim; tags, plev)
+function Index(dim::Number, tags::Union{AbstractString, TagSet}; plev::Int = 0)
+ return Index(dim; tags, plev)
end
# This is so that when IndexSets are converted
@@ -120,8 +120,8 @@ copy(i::Index) = Index(id(i), copy(space(i)), dir(i), tags(i), plev(i))
Produces an `Index` with the same properties (dimension or QN structure)
but with a new `id`.
"""
-function NDTensors.sim(i::Index; tags=copy(tags(i)), plev=plev(i), dir=dir(i))
- return Index(rand(index_id_rng(), IDType), copy(space(i)), dir, tags, plev)
+function NDTensors.sim(i::Index; tags = copy(tags(i)), plev = plev(i), dir = dir(i))
+ return Index(rand(index_id_rng(), IDType), copy(space(i)), dir, tags, plev)
end
trivial_space(i::Index) = 1
@@ -222,9 +222,9 @@ julia> hastags(i, "Link")
false
```
"""
-TagSets.hastags(i::Index, ts::Union{AbstractString,TagSet}) = hastags(tags(i), ts)
+TagSets.hastags(i::Index, ts::Union{AbstractString, TagSet}) = hastags(tags(i), ts)
-TagSets.hastags(ts::Union{AbstractString,TagSet}) = x -> hastags(x, ts)
+TagSets.hastags(ts::Union{AbstractString, TagSet}) = x -> hastags(x, ts)
"""
hasplev(i::Index, plev::Int)
@@ -316,7 +316,7 @@ hasqns(i::Index) = hasqns(space(i))
Create a copy of Index i with the specified direction.
"""
function setdir(i::Index, dir::Arrow)
- return Index(id(i), copy(space(i)), dir, copy(tags(i)), plev(i))
+ return Index(id(i), copy(space(i)), dir, copy(tags(i)), plev(i))
end
"""
@@ -337,9 +337,9 @@ not(id::IDType) = Not(id)
# identity of an Index.
# Currently only used for hashing an Index.
struct IndexID
- id::IDType
- tags::TagSet
- plev::Int
+ id::IDType
+ tags::TagSet
+ plev::Int
end
IndexID(i::Index) = IndexID(id(i), tags(i), plev(i))
hash(i::Index, h::UInt) = hash(IndexID(i), h)
@@ -352,7 +352,7 @@ then the prime levels are compared, and finally the
tags are compared.
"""
(i1::Index == i2::Index) =
- (id(i1) == id(i2)) && (plev(i1) == plev(i2)) && (tags(i1) == tags(i2))
+ (id(i1) == id(i2)) && (plev(i1) == plev(i2)) && (tags(i1) == tags(i2))
"""
dag(i::Index)
@@ -447,7 +447,7 @@ TagSets.replacetags(i::Index, rep_ts::Pair) = replacetags(i, rep_ts...)
Return a copy of Index `i` with its
prime level incremented by the amount `plinc`
"""
-prime(i::Index, plinc::Int=1) = setprime(i, plev(i) + plinc)
+prime(i::Index, plinc::Int = 1) = setprime(i, plev(i) + plinc)
"""
setprime(i::Index, plev::Int)
@@ -482,12 +482,12 @@ Base.:^(i::Index, pl::Int) = prime(i, pl)
"""
Iterating over Index `I` gives the IndexVals `I(1)` through `I(dim(I))`.
"""
-function Base.iterate(i::Index, state::Int=1)
- Base.depwarn(
- "iteration of `Index` is deprecated, use `eachindval` or `eachval` instead.", :iterate
- )
- (state > dim(i)) && return nothing
- return (i => state, state + 1)
+function Base.iterate(i::Index, state::Int = 1)
+ Base.depwarn(
+ "iteration of `Index` is deprecated, use `eachindval` or `eachval` instead.", :iterate
+ )
+ (state > dim(i)) && return nothing
+ return (i => state, state + 1)
end
# Treat Index as a scalar for the sake of broadcast.
@@ -526,21 +526,21 @@ eachindval(i::Index) = (i => n for n in eachval(i))
# This is a trivial definition for use in NDTensors
# XXX: rename tensorproduct with ⊗ alias
-function NDTensors.outer(i::Index; dir=dir(i), tags="", plev::Int=0)
- return sim(i; tags=tags, plev=plev, dir=dir)
+function NDTensors.outer(i::Index; dir = dir(i), tags = "", plev::Int = 0)
+ return sim(i; tags = tags, plev = plev, dir = dir)
end
# This is for use in NDTensors
# XXX: rename tensorproduct with ⊗ alias
-function NDTensors.outer(i1::Index, i2::Index; tags="")
- return Index(dim(i1) * dim(i2), tags)
+function NDTensors.outer(i1::Index, i2::Index; tags = "")
+ return Index(dim(i1) * dim(i2), tags)
end
# Non-qn Index
# TODO: add ⊕ alias
-directsum(i::Index, j::Index; tags="sum") = Index(dim(i) + dim(j); tags=tags)
-function directsum(i::Index, j::Index, k::Index, inds::Index...; tags="sum")
- return directsum(directsum(i, j; tags), k, inds...; tags)
+directsum(i::Index, j::Index; tags = "sum") = Index(dim(i) + dim(j); tags = tags)
+function directsum(i::Index, j::Index, k::Index, inds::Index...; tags = "sum")
+ return directsum(directsum(i, j; tags), k, inds...; tags)
end
#
@@ -578,13 +578,13 @@ mergeblocks(i::Index) = i
#
# Keep partial backwards compatibility by defining IndexVal as follows:
-const IndexVal{IndexT} = Pair{IndexT,Int}
+const IndexVal{IndexT} = Pair{IndexT, Int}
IndexVal(i::Index, n::Int) = (i => n)
function (i::Index)(n::Integer)
- Base.depwarn("Index(::Int) is deprecated, for an Index i use i=>n instead.", :Index)
- return i => n
+ Base.depwarn("Index(::Int) is deprecated, for an Index i use i=>n instead.", :Index)
+ return i => n
end
NDTensors.ind(iv::Pair{<:Index}) = first(iv)
@@ -617,39 +617,39 @@ dir(iv::Pair{<:Index}) = dir(ind(iv))
#
function primestring(plev)
- if plev < 0
- return " (warning: prime level $plev is less than 0)"
- end
- if plev == 0
- return ""
- elseif plev > 3
- return "'$plev"
- else
- return "'"^plev
- end
+ if plev < 0
+ return " (warning: prime level $plev is less than 0)"
+ end
+ if plev == 0
+ return ""
+ elseif plev > 3
+ return "'$plev"
+ else
+ return "'"^plev
+ end
end
function Base.show(io::IO, i::Index)
- idstr = "$(id(i) % 1000)"
- if length(tags(i)) > 0
- print(
- io,
- "(dim=$(space(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))",
- )
- else
- print(io, "(dim=$(space(i))|id=$(idstr))$(primestring(plev(i)))")
- end
+ idstr = "$(id(i) % 1000)"
+ return if length(tags(i)) > 0
+ print(
+ io,
+ "(dim=$(space(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))",
+ )
+ else
+ print(io, "(dim=$(space(i))|id=$(idstr))$(primestring(plev(i)))")
+ end
end
-function readcpp(io::IO, ::Type{Index}; format="v3")
- if format != "v3"
- throw(ArgumentError("read Index: format=$format not supported"))
- end
- tags = readcpp(io, TagSet; kwargs...)
- id = read(io, IDType)
- dim = convert(Int64, read(io, Int32))
- dir_int = read(io, Int32)
- dir = dir_int < 0 ? In : Out
- read(io, 8) # Read default IQIndexDat size, 8 bytes
- return Index(id, dim, dir, tags)
+function readcpp(io::IO, ::Type{Index}; format = "v3")
+ if format != "v3"
+ throw(ArgumentError("read Index: format=$format not supported"))
+ end
+ tags = readcpp(io, TagSet; kwargs...)
+ id = read(io, IDType)
+ dim = convert(Int64, read(io, Int32))
+ dir_int = read(io, Int32)
+ dir = dir_int < 0 ? In : Out
+ read(io, 8) # Read default IQIndexDat size, 8 bytes
+ return Index(id, dim, dir, tags)
end
diff --git a/src/itensor.jl b/src/itensor.jl
index 8cf26b2550..a41cfe6a31 100644
--- a/src/itensor.jl
+++ b/src/itensor.jl
@@ -83,20 +83,20 @@ NDTensors.Dense{Float64,Array{Float64,1}}
```
"""
mutable struct ITensor
- tensor
- global @inline _ITensor(parent) = new(parent)
-end
-
-function ITensor(::AllowAlias, T::Tensor{<:Any,<:Any,<:Any,<:Tuple})
- @debug_check begin
- is = inds(T)
- if !allunique(is)
- error(
- "Trying to create ITensors with collection of indices $is. Indices must be unique."
- )
+ tensor
+ global @inline _ITensor(parent) = new(parent)
+end
+
+function ITensor(::AllowAlias, T::Tensor{<:Any, <:Any, <:Any, <:Tuple})
+ @debug_check begin
+ is = inds(T)
+ if !allunique(is)
+ error(
+ "Trying to create ITensors with collection of indices $is. Indices must be unique."
+ )
+ end
end
- end
- return _ITensor(T)
+ return _ITensor(T)
end
#########################
@@ -105,7 +105,7 @@ end
# Version where the indices are not Tuple, so convert to Tuple
function ITensor(::AllowAlias, T::Tensor)::ITensor
- return ITensor(AllowAlias(), setinds(T, NTuple{ndims(T)}(inds(T))))
+ return ITensor(AllowAlias(), setinds(T, NTuple{ndims(T)}(inds(T))))
end
ITensor(::NeverAlias, T::Tensor)::ITensor = ITensor(AllowAlias(), copy(T))
@@ -120,7 +120,7 @@ and a set of indices.
The ITensor stores a view of the TensorStorage.
"""
ITensor(as::AliasStyle, st::TensorStorage, is)::ITensor = ITensor(
- as, Tensor(as, st, Tuple(is))
+ as, Tensor(as, st, Tuple(is))
)
ITensor(as::AliasStyle, is, st::TensorStorage)::ITensor = ITensor(as, st, is)
@@ -139,7 +139,7 @@ of the input data when possible.
itensor(args...; kwargs...)::ITensor = ITensor(AllowAlias(), args...; kwargs...)
ITensor(::AliasStyle, args...; kwargs...)::ITensor = error(
- "ITensor constructor with input arguments of types `$(typeof.(args))` not defined."
+ "ITensor constructor with input arguments of types `$(typeof.(args))` not defined."
)
"""
@@ -180,7 +180,7 @@ B = ITensor(ComplexF64,k,j)
```
"""
function ITensor(eltype::Type{<:Number}, is::Indices)
- return itensor(EmptyStorage(eltype), is)
+ return itensor(EmptyStorage(eltype), is)
end
ITensor(eltype::Type{<:Number}, is...) = ITensor(eltype, indices(is...))
@@ -189,11 +189,11 @@ ITensor(is...) = ITensor(EmptyNumber, is...)
# To fix ambiguity with QN Index version
# TODO: define as `emptyITensor(ElT)`
-ITensor(eltype::Type{<:Number}=EmptyNumber) = ITensor(eltype, ())
+ITensor(eltype::Type{<:Number} = EmptyNumber) = ITensor(eltype, ())
# TODO: define as `emptyITensor(ElT)`
-function ITensor(::Type{ElT}, inds::Tuple{}) where {ElT<:Number}
- return ITensor(EmptyStorage(ElT), inds)
+function ITensor(::Type{ElT}, inds::Tuple{}) where {ElT <: Number}
+ return ITensor(EmptyStorage(ElT), inds)
end
"""
@@ -218,12 +218,12 @@ A = ITensor(undef,i,j)
B = ITensor(ComplexF64,undef,k,j)
```
"""
-function ITensor(::Type{ElT}, ::UndefInitializer, inds::Indices) where {ElT<:Number}
- return itensor(Dense(ElT, undef, dim(inds)), indices(inds))
+function ITensor(::Type{ElT}, ::UndefInitializer, inds::Indices) where {ElT <: Number}
+ return itensor(Dense(ElT, undef, dim(inds)), indices(inds))
end
-function ITensor(::Type{ElT}, ::UndefInitializer, inds...) where {ElT<:Number}
- return ITensor(ElT, undef, indices(inds...))
+function ITensor(::Type{ElT}, ::UndefInitializer, inds...) where {ElT <: Number}
+ return ITensor(ElT, undef, indices(inds...))
end
ITensor(::UndefInitializer, inds::Indices) = ITensor(Float64, undef, inds)
@@ -260,14 +260,14 @@ ITensor(eltype::Type{<:Number}, x::Number, is::Indices) = _ITensor(eltype, x, is
ITensor(eltype::Type{<:Number}, x::Number, is::Tuple{}) = _ITensor(eltype, x, is)
function _ITensor(eltype::Type{<:Number}, x::Number, is::Indices)
- return ITensor(Dense(convert(eltype, x), dim(is)), is)
+ return ITensor(Dense(convert(eltype, x), dim(is)), is)
end
ITensor(eltype::Type{<:Number}, x::Number, is...) = ITensor(eltype, x, indices(is...))
ITensor(x::Number, is...) = ITensor(eltype(x), x, is...)
-const RealOrComplex{T} = Union{T,Complex{T}}
+const RealOrComplex{T} = Union{T, Complex{T}}
ITensor(x::RealOrComplex{Int}, is...) = ITensor(float(x), is...)
@@ -280,7 +280,7 @@ ITensor(x::RealOrComplex{Int}, is...) = ITensor(float(x), is...)
# This is only used internally inside the implementation of `directsum`
# right now.
function zeros_itensor(elt::Type{<:Number}, inds::Index...)
- return ITensor(elt, zero(elt), inds...)
+ return ITensor(elt, zero(elt), inds...)
end
# TODO: Deprecated!
@@ -290,20 +290,20 @@ end
Construct an ITensor with storage type `NDTensors.EmptyStorage`, indices `inds`, and element type `ElT`. If the element type is not specified, it defaults to `NDTensors.EmptyNumber`, which represents a number type that can take on any value (for example, the type of the first value it is set to).
"""
-function emptyITensor(::Type{ElT}, is::Indices) where {ElT<:Number}
- return itensor(EmptyTensor(ElT, is))
+function emptyITensor(::Type{ElT}, is::Indices) where {ElT <: Number}
+ return itensor(EmptyTensor(ElT, is))
end
-function emptyITensor(::Type{ElT}, is...) where {ElT<:Number}
- return emptyITensor(ElT, indices(is...))
+function emptyITensor(::Type{ElT}, is...) where {ElT <: Number}
+ return emptyITensor(ElT, indices(is...))
end
emptyITensor(is::Indices) = emptyITensor(EmptyNumber, is)
emptyITensor(is...) = emptyITensor(EmptyNumber, indices(is...))
-function emptyITensor((::Type{ElT})=EmptyNumber) where {ElT<:Number}
- return itensor(EmptyTensor(ElT, ()))
+function emptyITensor((::Type{ElT}) = EmptyNumber) where {ElT <: Number}
+ return itensor(EmptyTensor(ElT, ()))
end
using NDTensors.TypeParameterAccessors: set_eltype, type_parameters, specify_type_parameters
@@ -349,82 +349,82 @@ T[i => 1, j => 1] == 3.3
In future versions this may not automatically convert `Int`/`Complex{Int}` inputs to floating point versions with `float` (once tensor operations using `Int`/`Complex{Int}` are natively as fast as floating point operations), and in that case the particular element type should not be relied on. To avoid extra conversions (and therefore allocations) it is best practice to directly construct with `itensor([0. 1; 1 0], i', dag(i))` if you want a floating point element type. The conversion is done as a performance optimization since often tensors are passed to BLAS/LAPACK and need to be converted to floating point types compatible with those libraries, but future projects in Julia may allow for efficient operations with more general element types (for example see https://github.com/JuliaLinearAlgebra/Octavian.jl).
"""
function ITensor(
- as::AliasStyle,
- eltype::Type{<:Number},
- A::AbstractArray{<:Number},
- inds::Indices;
- kwargs...,
-)
- length(A) ≠ dim(inds) && throw(
- DimensionMismatch(
- "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))",
- ),
- )
- data = set_eltype(typeof(A), eltype)(as, A)
- return itensor(Dense(data), inds)
+ as::AliasStyle,
+ eltype::Type{<:Number},
+ A::AbstractArray{<:Number},
+ inds::Indices;
+ kwargs...,
+ )
+ length(A) ≠ dim(inds) && throw(
+ DimensionMismatch(
+ "In ITensor(::AbstractArray, inds), length of AbstractArray ($(length(A))) must match total dimension of IndexSet ($(dim(inds)))",
+ ),
+ )
+ data = set_eltype(typeof(A), eltype)(as, A)
+ return itensor(Dense(data), inds)
end
function ITensor(
- as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, inds; kwargs...
-)
- is = indices(inds)
- if !isa(is, Indices)
- error("Indices $inds are not valid for constructing an ITensor.")
- end
- return ITensor(as, eltype, A, is; kwargs...)
+ as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, inds; kwargs...
+ )
+ is = indices(inds)
+ if !isa(is, Indices)
+ error("Indices $inds are not valid for constructing an ITensor.")
+ end
+ return ITensor(as, eltype, A, is; kwargs...)
end
# Convert `Adjoint` to `Matrix`
function ITensor(
- as::AliasStyle, eltype::Type{<:Number}, A::Adjoint, inds::Indices{Index{Int}}; kwargs...
-)
- return ITensor(as, eltype, Matrix(A), inds; kwargs...)
+ as::AliasStyle, eltype::Type{<:Number}, A::Adjoint, inds::Indices{Index{Int}}; kwargs...
+ )
+ return ITensor(as, eltype, Matrix(A), inds; kwargs...)
end
function ITensor(
- as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, is...; kwargs...
-)
- return ITensor(as, eltype, A, indices(is...); kwargs...)
+ as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}, is...; kwargs...
+ )
+ return ITensor(as, eltype, A, indices(is...); kwargs...)
end
function ITensor(eltype::Type{<:Number}, A::AbstractArray{<:Number}, is...; kwargs...)
- return ITensor(NeverAlias(), eltype, A, is...; kwargs...)
+ return ITensor(NeverAlias(), eltype, A, is...; kwargs...)
end
# For now, it's not well defined to construct an ITensor without indices
# from a non-zero dimensional AbstractArray
function ITensor(
- as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}; kwargs...
-)
- if length(A) > 1
- error(
- "Trying to create an ITensor without any indices from $(typeof(A)) $A of dimensions $(size(A)). Cannot construct an ITensor from an $(typeof(A)) with more than one element without any indices.",
+ as::AliasStyle, eltype::Type{<:Number}, A::AbstractArray{<:Number}; kwargs...
)
- end
- return ITensor(eltype, A[]; kwargs...)
+ if length(A) > 1
+ error(
+ "Trying to create an ITensor without any indices from $(typeof(A)) $A of dimensions $(size(A)). Cannot construct an ITensor from an $(typeof(A)) with more than one element without any indices.",
+ )
+ end
+ return ITensor(eltype, A[]; kwargs...)
end
function ITensor(eltype::Type{<:Number}, A::AbstractArray{<:Number}; kwargs...)
- return ITensor(NeverAlias(), eltype, A; kwargs...)
+ return ITensor(NeverAlias(), eltype, A; kwargs...)
end
function ITensor(A::AbstractArray{<:Number}; kwargs...)
- return ITensor(NeverAlias(), eltype(A), A; kwargs...)
+ return ITensor(NeverAlias(), eltype(A), A; kwargs...)
end
function ITensor(
- as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs...
-) where {ElT<:Number}
- return ITensor(as, ElT, A, indices(is...); kwargs...)
+ as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs...
+ ) where {ElT <: Number}
+ return ITensor(as, ElT, A, indices(is...); kwargs...)
end
function ITensor(
- as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs...
-) where {ElT<:RealOrComplex{Int}}
- return ITensor(as, float(ElT), A, is...; kwargs...)
+ as::AliasStyle, A::AbstractArray{ElT}, is...; kwargs...
+ ) where {ElT <: RealOrComplex{Int}}
+ return ITensor(as, float(ElT), A, is...; kwargs...)
end
function ITensor(A::AbstractArray{<:Number}, is...; kwargs...)
- return ITensor(NeverAlias(), A, is...; kwargs...)
+ return ITensor(NeverAlias(), A, is...; kwargs...)
end
#
@@ -441,11 +441,11 @@ the diagonal.
The storage will have `NDTensors.Diag` type.
"""
-function diag_itensor(::Type{ElT}, is::Indices) where {ElT<:Number}
- return itensor(Diag(ElT, mindim(is)), is)
+function diag_itensor(::Type{ElT}, is::Indices) where {ElT <: Number}
+ return itensor(Diag(ElT, mindim(is)), is)
end
-diag_itensor(::Type{ElT}, is...) where {ElT<:Number} = diag_itensor(ElT, indices(is...))
+diag_itensor(::Type{ElT}, is...) where {ElT <: Number} = diag_itensor(ElT, indices(is...))
diag_itensor(is::Indices) = diag_itensor(Float64, is)
diag_itensor(is...) = diag_itensor(indices(is...))
@@ -470,32 +470,32 @@ The version `diagitensor` might output an ITensor whose storage data
is an alias of the input vector data in order to minimize operations.
"""
function diag_itensor(
- as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is::Indices
-)
- length(v) ≠ mindim(is) && error(
- "Length of vector for diagonal must equal minimum of the dimension of the input indices",
- )
- data = set_eltype(typeof(v), eltype)(as, v)
- return itensor(Diag(data), is)
+ as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is::Indices
+ )
+ length(v) ≠ mindim(is) && error(
+ "Length of vector for diagonal must equal minimum of the dimension of the input indices",
+ )
+ data = set_eltype(typeof(v), eltype)(as, v)
+ return itensor(Diag(data), is)
end
function diag_itensor(
- as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is...
-)
- return diag_itensor(as, eltype, v, indices(is...))
+ as::AliasStyle, eltype::Type{<:Number}, v::AbstractVector{<:Number}, is...
+ )
+ return diag_itensor(as, eltype, v, indices(is...))
end
function diag_itensor(as::AliasStyle, v::AbstractVector, is...)
- return diag_itensor(as, eltype(v), v, is...)
+ return diag_itensor(as, eltype(v), v, is...)
end
function diag_itensor(as::AliasStyle, v::AbstractVector{<:RealOrComplex{Int}}, is...)
- return diag_itensor(AllowAlias(), float(eltype(v)), v, is...)
+ return diag_itensor(AllowAlias(), float(eltype(v)), v, is...)
end
diag_itensor(v::AbstractVector{<:Number}, is...) = diag_itensor(NeverAlias(), v, is...)
function diag_itensor(eltype::Type{<:Number}, v::AbstractVector{<:Number}, is...)
- return diag_itensor(NeverAlias(), eltype, v, is...)
+ return diag_itensor(NeverAlias(), eltype, v, is...)
end
diagitensor(args...; kwargs...) = diag_itensor(AllowAlias(), args...; kwargs...)
@@ -516,23 +516,23 @@ be converted to `float(x)`. Note that this behavior is subject to change
in the future.
"""
function diag_itensor(as::AliasStyle, eltype::Type{<:Number}, x::Number, is::Indices)
- return diag_itensor(AllowAlias(), eltype, fill(eltype(x), mindim(is)), is...)
+ return diag_itensor(AllowAlias(), eltype, fill(eltype(x), mindim(is)), is...)
end
function diag_itensor(as::AliasStyle, eltype::Type{<:Number}, x::Number, is...)
- return diag_itensor(as, eltype, x, indices(is...))
+ return diag_itensor(as, eltype, x, indices(is...))
end
function diag_itensor(as::AliasStyle, x::Number, is...)
- return diag_itensor(as, typeof(x), x, is...)
+ return diag_itensor(as, typeof(x), x, is...)
end
function diag_itensor(as::AliasStyle, x::RealOrComplex{Int}, is...)
- return diag_itensor(as, float(typeof(x)), x, is...)
+ return diag_itensor(as, float(typeof(x)), x, is...)
end
function diag_itensor(eltype::Type{<:Number}, x::Number, is...)
- return diag_itensor(NeverAlias(), eltype, x, is...)
+ return diag_itensor(NeverAlias(), eltype, x, is...)
end
diag_itensor(x::Number, is...) = diag_itensor(NeverAlias(), x, is...)
@@ -547,11 +547,11 @@ Make a uniform diagonal ITensor with all diagonal elements
This function has an alias `δ`.
"""
function delta(eltype::Type{<:Number}, is::Indices)
- return itensor(Diag(one(eltype)), is)
+ return itensor(Diag(one(eltype)), is)
end
function delta(eltype::Type{<:Number}, is...)
- return delta(eltype, indices(is...))
+ return delta(eltype, indices(is...))
end
delta(is...) = delta(Float64, is...)
@@ -559,17 +559,17 @@ delta(is...) = delta(Float64, is...)
const δ = delta
function onehot(eltype::Type{<:Number}, ivs::Pair{<:Index}...)
- return onehot(NDTensors.default_datatype(eltype), ivs...)
+ return onehot(NDTensors.default_datatype(eltype), ivs...)
end
function onehot(eltype::Type{<:Number}, ivs::Vector{<:Pair{<:Index}})
- return onehot(NDTensors.default_datatype(eltype), ivs...)
+ return onehot(NDTensors.default_datatype(eltype), ivs...)
end
function setelt(eltype::Type{<:Number}, ivs::Pair{<:Index}...)
- return onehot(NDTensors.default_datatype(eltype), ivs...)
+ return onehot(NDTensors.default_datatype(eltype), ivs...)
end
function onehot(ivs::Pair{<:Index}...)
- return onehot(NDTensors.default_datatype(NDTensors.default_eltype()), ivs...)
+ return onehot(NDTensors.default_datatype(NDTensors.default_eltype()), ivs...)
end
onehot(ivs::Vector{<:Pair{<:Index}}) = onehot(ivs...)
setelt(ivs::Pair{<:Index}...) = onehot(ivs...)
@@ -583,7 +583,7 @@ For example, an ITensor with Diag storage will become Dense storage,
filled with zeros except for the diagonal values.
"""
function dense(A::ITensor)
- return setinds(itensor(dense(tensor(A))), removeqns(inds(A)))
+ return setinds(itensor(dense(tensor(A))), removeqns(inds(A)))
end
"""
@@ -605,52 +605,52 @@ A = random_itensor(i,j)
B = random_itensor(ComplexF64,undef,k,j)
```
"""
-function random_itensor(::Type{S}, is::Indices) where {S<:Number}
- return random_itensor(Random.default_rng(), S, is)
+function random_itensor(::Type{S}, is::Indices) where {S <: Number}
+ return random_itensor(Random.default_rng(), S, is)
end
-function random_itensor(rng::AbstractRNG, ::Type{S}, is::Indices) where {S<:Number}
- T = ITensor(S, undef, is)
- randn!(rng, T)
- return T
+function random_itensor(rng::AbstractRNG, ::Type{S}, is::Indices) where {S <: Number}
+ T = ITensor(S, undef, is)
+ randn!(rng, T)
+ return T
end
-function random_itensor(::Type{S}, is...) where {S<:Number}
- return random_itensor(Random.default_rng(), S, is...)
+function random_itensor(::Type{S}, is...) where {S <: Number}
+ return random_itensor(Random.default_rng(), S, is...)
end
-function random_itensor(rng::AbstractRNG, ::Type{S}, is...) where {S<:Number}
- return random_itensor(rng, S, indices(is...))
+function random_itensor(rng::AbstractRNG, ::Type{S}, is...) where {S <: Number}
+ return random_itensor(rng, S, indices(is...))
end
# To fix ambiguity with QN version
-function random_itensor(::Type{ElT}, is::Tuple{}) where {ElT<:Number}
- return random_itensor(Random.default_rng(), ElT, is)
+function random_itensor(::Type{ElT}, is::Tuple{}) where {ElT <: Number}
+ return random_itensor(Random.default_rng(), ElT, is)
end
# To fix ambiguity with QN version
-function random_itensor(rng::AbstractRNG, ::Type{ElT}, is::Tuple{}) where {ElT<:Number}
- return random_itensor(rng, ElT, Index{Int}[])
+function random_itensor(rng::AbstractRNG, ::Type{ElT}, is::Tuple{}) where {ElT <: Number}
+ return random_itensor(rng, ElT, Index{Int}[])
end
# To fix ambiguity with QN version
function random_itensor(is::Tuple{})
- return random_itensor(Random.default_rng(), is)
+ return random_itensor(Random.default_rng(), is)
end
# To fix ambiguity with QN version
function random_itensor(rng::AbstractRNG, is::Tuple{})
- return random_itensor(rng, Float64, is)
+ return random_itensor(rng, Float64, is)
end
# To fix ambiguity errors with QN version
-function random_itensor(::Type{ElT}) where {ElT<:Number}
- return random_itensor(Random.default_rng(), ElT)
+function random_itensor(::Type{ElT}) where {ElT <: Number}
+ return random_itensor(Random.default_rng(), ElT)
end
# To fix ambiguity errors with QN version
-function random_itensor(rng::AbstractRNG, ::Type{ElT}) where {ElT<:Number}
- return random_itensor(rng, ElT, ())
+function random_itensor(rng::AbstractRNG, ::Type{ElT}) where {ElT <: Number}
+ return random_itensor(rng, ElT, ())
end
random_itensor(is::Indices) = random_itensor(Random.default_rng(), is)
@@ -674,11 +674,11 @@ zero(T::ITensor)::ITensor = itensor(zero(tensor(T)))
# Helper functions for different view behaviors
# TODO: Move to NDTensors.jl
function (arraytype::Type{<:AbstractArray})(::NeverAlias, A::AbstractArray)
- return specify_type_parameters(arraytype, type_parameters(A))(A)
+ return specify_type_parameters(arraytype, type_parameters(A))(A)
end
function (arraytype::Type{<:AbstractArray})(::AllowAlias, A::AbstractArray)
- return convert(specify_type_parameters(arraytype, type_parameters(A)), A)
+ return convert(specify_type_parameters(arraytype, type_parameters(A)), A)
end
"""
@@ -697,45 +697,45 @@ an Array with a copy of the ITensor's elements. The
order in which the indices are provided indicates
the order of the data in the resulting Array.
"""
-function Array{ElT,N}(T::ITensor, is::Indices) where {ElT,N}
- ndims(T) != N && throw(
- DimensionMismatch(
- "cannot convert an $(ndims(T)) dimensional ITensor to an $N-dimensional Array."
- ),
- )
- TT = tensor(permute(T, is))
- return Array{ElT,N}(TT)::Array{ElT,N}
+function Array{ElT, N}(T::ITensor, is::Indices) where {ElT, N}
+ ndims(T) != N && throw(
+ DimensionMismatch(
+ "cannot convert an $(ndims(T)) dimensional ITensor to an $N-dimensional Array."
+ ),
+ )
+ TT = tensor(permute(T, is))
+ return Array{ElT, N}(TT)::Array{ElT, N}
end
-function Array{ElT,N}(T::ITensor, is...) where {ElT,N}
- return Array{ElT,N}(T, indices(is...))
+function Array{ElT, N}(T::ITensor, is...) where {ElT, N}
+ return Array{ElT, N}(T, indices(is...))
end
function Array{ElT}(T::ITensor, is::Indices) where {ElT}
- return Array{ElT,length(is)}(T, is)
+ return Array{ElT, length(is)}(T, is)
end
function Array{ElT}(T::ITensor, is...) where {ElT}
- return Array{ElT}(T, indices(is...))
+ return Array{ElT}(T, indices(is...))
end
function Array(T::ITensor, is...)
- return Array{eltype(T)}(T, is...)
+ return Array{eltype(T)}(T, is...)
end
-function Array{<:Any,N}(T::ITensor, is...) where {N}
- return Array{eltype(T),N}(T, is...)
+function Array{<:Any, N}(T::ITensor, is...) where {N}
+ return Array{eltype(T), N}(T, is...)
end
function Vector{ElT}(T::ITensor)::Vector{ElT} where {ElT}
- ndims(T) != 1 && throw(
- DimensionMismatch("cannot convert an $(ndims(T)) dimensional ITensor to a Vector.")
- )
- return Array{ElT}(T, inds(T)...)
+ ndims(T) != 1 && throw(
+ DimensionMismatch("cannot convert an $(ndims(T)) dimensional ITensor to a Vector.")
+ )
+ return Array{ElT}(T, inds(T)...)
end
function Vector(T::ITensor)::Vector
- return Array(T, inds(T)...)
+ return Array(T, inds(T)...)
end
#########################
# End ITensor constructors
@@ -851,7 +851,7 @@ diaglength(T::ITensor) = diaglength(tensor(T))
# (Maybe create fallback definitions for dense tensors)
#
-hasqns(T::Union{Tensor,ITensor}) = hasqns(inds(T))
+hasqns(T::Union{Tensor, ITensor}) = hasqns(inds(T))
eachnzblock(T::ITensor) = eachnzblock(tensor(T))
@@ -955,26 +955,26 @@ iterate(A::ITensor, args...) = iterate(tensor(A), args...)
#
function settensor!(T::ITensor, t)::ITensor
- T.tensor = t
- return T
+ T.tensor = t
+ return T
end
function setinds!(T::ITensor, is)::ITensor
- # TODO: always convert to Tuple with Tensor type?
- return settensor!(T, setinds(tensor(T), Tuple(is)))
+ # TODO: always convert to Tuple with Tensor type?
+ return settensor!(T, setinds(tensor(T), Tuple(is)))
end
function setstorage!(T::ITensor, st)::ITensor
- return settensor!(T, setstorage(tensor(T), st))
+ return settensor!(T, setstorage(tensor(T), st))
end
function setinds(T::ITensor, is)::ITensor
- # TODO: always convert to Tuple with Tensor type?
- return itensor(setinds(tensor(T), Tuple(is)))
+ # TODO: always convert to Tuple with Tensor type?
+ return itensor(setinds(tensor(T), Tuple(is)))
end
function setstorage(T::ITensor, st)::ITensor
- return itensor(setstorage(tensor(T), st))
+ return itensor(setstorage(tensor(T), st))
end
removeqns(T::ITensor) = dense(T)
@@ -999,21 +999,21 @@ Convert to the complex version of the storage.
complex(T::ITensor) = itensor(complex(tensor(T)))
function complex!(T::ITensor)
- ct = complex(tensor(T))
- setstorage!(T, storage(ct))
- setinds!(T, inds(ct))
- return T
+ ct = complex(tensor(T))
+ setstorage!(T, storage(ct))
+ setinds!(T, inds(ct))
+ return T
end
function convert_eltype(ElType::Type, T::ITensor)
- if eltype(T) == ElType
- return T
- end
- return itensor(adapt(ElType, tensor(T)))
+ if eltype(T) == ElType
+ return T
+ end
+ return itensor(adapt(ElType, tensor(T)))
end
function convert_leaf_eltype(ElType::Type, T::ITensor)
- return convert_eltype(ElType, T)
+ return convert_eltype(ElType, T)
end
"""
@@ -1024,7 +1024,7 @@ Convert the element type of the lowest level containers
an Vector of Vectors.
"""
function convert_leaf_eltype(ElType::Type, A::Array)
- return map(x -> convert_leaf_eltype(ElType, x), A)
+ return map(x -> convert_leaf_eltype(ElType, x), A)
end
"""
@@ -1045,8 +1045,8 @@ lastindex(A::ITensor) = LastVal()
Fill all values of the ITensor with the specified value.
"""
function fill!(T::ITensor, x::Number)
- # Use broadcasting `T .= x`?
- return settensor!(T, fill!!(tensor(T), x))
+ # Use broadcasting `T .= x`?
+ return settensor!(T, fill!!(tensor(T), x))
end
#
@@ -1055,20 +1055,20 @@ end
#
function insertblock!(T::ITensor, args...)
- (!isnothing(flux(T)) && flux(T) ≠ flux(T, args...)) &&
- error("Block does not match current flux")
- TR = insertblock!!(tensor(T), args...)
- settensor!(T, TR)
- return T
+ (!isnothing(flux(T)) && flux(T) ≠ flux(T, args...)) &&
+ error("Block does not match current flux")
+ TR = insertblock!!(tensor(T), args...)
+ settensor!(T, TR)
+ return T
end
function insert_diag_blocks!(T::ITensor)
- ## TODO: Add a check that all diag blocks
- ## have the correct flux
- ## (!isnothing(flux(T)) && check_diagblock_flux(T)) &&
- ## error("Block does not match current flux")
- insert_diag_blocks!(tensor(T))
- return T
+ ## TODO: Add a check that all diag blocks
+ ## have the correct flux
+ ## (!isnothing(flux(T)) && check_diagblock_flux(T)) &&
+ ## error("Block does not match current flux")
+ insert_diag_blocks!(tensor(T))
+ return T
end
"""
@@ -1089,20 +1089,20 @@ A[1, 2] # 2.0, same as: A[i => 1, i' => 2]
@propagate_inbounds @inline _getindex(T::Tensor, I::Integer...) = T[I...]
# TODO: move to NDTensors (would require moving `LastVal` to NDTensors)
-@propagate_inbounds @inline function _getindex(T::Tensor, I::Union{Integer,LastVal}...)
- return T[lastval_to_int(T, I)...]
+@propagate_inbounds @inline function _getindex(T::Tensor, I::Union{Integer, LastVal}...)
+ return T[lastval_to_int(T, I)...]
end
# Special case that handles indexing with `end` like `A[i => end, j => 3]`
-@propagate_inbounds getindex(T::ITensor, I::Union{Integer,LastVal}...)::Any = _getindex(
- tensor(T), I...
+@propagate_inbounds getindex(T::ITensor, I::Union{Integer, LastVal}...)::Any = _getindex(
+ tensor(T), I...
)
# Simple version with just integer indexing, bounds checking gets done by NDTensors
@propagate_inbounds function getindex(T::ITensor, b::Block{N}) where {N}
- # XXX: this should return an ITensor view
- return tensor(T)[b]
+ # XXX: this should return an ITensor view
+ return tensor(T)[b]
end
# Version accepting CartesianIndex, useful when iterating over
@@ -1122,29 +1122,29 @@ A = ITensor(2.0, i, i')
A[i => 1, i' => 2] # 2.0, same as: A[i' => 2, i => 1]
```
"""
-@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any,N})::Any) where {N} = _getindex(
- tensor(T), ivs...
+@propagate_inbounds (getindex(T::ITensor, ivs::Vararg{Any, N})::Any) where {N} = _getindex(
+ tensor(T), ivs...
)
## Allowing one to get the first ITensor element if its an order 0 tensor or an order 1 tensor with a dimension of 1. Also convert GPU back to CPU
@propagate_inbounds function getindex(T::ITensor)::Any
- if order(T) != 0 && dim(T) != 1
- throw(
- DimensionMismatch(
- "In scalar(T) or T[], ITensor T is not a scalar (it has indices $(inds(T)))."
- ),
- )
- end
- return tensor(T)[]
+ if order(T) != 0 && dim(T) != 1
+ throw(
+ DimensionMismatch(
+ "In scalar(T) or T[], ITensor T is not a scalar (it has indices $(inds(T)))."
+ ),
+ )
+ end
+ return tensor(T)[]
end
function _vals(T::ITensor, I::String...)
- return _vals(inds(T), I...)
+ return _vals(inds(T), I...)
end
# Enable indexing with string values, like `A["Up"]`.
function getindex(T::ITensor, I1::String, Is::String...)
- return T[_vals(T, I1, Is...)...]
+ return T[_vals(T, I1, Is...)...]
end
# Defining this with the type signature `I::Vararg{Integer, N}` instead of `I::Integer...` is much faster:
@@ -1158,25 +1158,25 @@ end
# for some reason! Maybe it helps with inlining?
#
@propagate_inbounds @inline function _setindex!!(
- ::SymmetryStyle, T::Tensor, x::Number, I::Vararg{Integer,N}
-) where {N}
- # Generic version, doesn't check the flux
- return setindex!!(T, x, I...)
+ ::SymmetryStyle, T::Tensor, x::Number, I::Vararg{Integer, N}
+ ) where {N}
+ # Generic version, doesn't check the flux
+ return setindex!!(T, x, I...)
end
@propagate_inbounds @inline function _setindex!!(
- T::Tensor, x::Number, I::Vararg{Integer,N}
-) where {N}
- # Use type trait dispatch to split off between QN version that checks the flux
- # and non-QN version that doesn't
+ T::Tensor, x::Number, I::Vararg{Integer, N}
+ ) where {N}
+ # Use type trait dispatch to split off between QN version that checks the flux
+ # and non-QN version that doesn't
- return _setindex!!(symmetrystyle(T), T, x, I...)
+ return _setindex!!(symmetrystyle(T), T, x, I...)
end
@propagate_inbounds @inline function _setindex!!(
- T::Tensor, x::Number, I::Vararg{Union{Integer,LastVal},N}
-) where {N}
- return _setindex!!(T, x, lastval_to_int(T, I)...)
+ T::Tensor, x::Number, I::Vararg{Union{Integer, LastVal}, N}
+ ) where {N}
+ return _setindex!!(T, x, lastval_to_int(T, I)...)
end
"""
@@ -1206,33 +1206,33 @@ A[2, :] = [2.0 3.0]
```
"""
@propagate_inbounds @inline function setindex!(
- T::ITensor, x::Number, I::Vararg{Integer,N}
-) where {N}
- # XXX: for some reason this is slow (257.467 ns (6 allocations: 1.14 KiB) for `A[1, 1, 1] = 1`)
- # Calling `setindex!` directly here is faster (56.635 ns (1 allocation: 368 bytes) for `A[1, 1, 1] = 1`)
- # but of course less generic. Can't figure out how to optimize it,
- # even the generic IndexVal version above is faster (126.818 ns (5 allocations: 768 bytes) for `A[i'' => 1, i' => 1, i => 1] = 1`)
- return settensor!(T, _setindex!!(tensor(T), x, I...))
+ T::ITensor, x::Number, I::Vararg{Integer, N}
+ ) where {N}
+ # XXX: for some reason this is slow (257.467 ns (6 allocations: 1.14 KiB) for `A[1, 1, 1] = 1`)
+ # Calling `setindex!` directly here is faster (56.635 ns (1 allocation: 368 bytes) for `A[1, 1, 1] = 1`)
+ # but of course less generic. Can't figure out how to optimize it,
+ # even the generic IndexVal version above is faster (126.818 ns (5 allocations: 768 bytes) for `A[i'' => 1, i' => 1, i => 1] = 1`)
+ return settensor!(T, _setindex!!(tensor(T), x, I...))
end
@propagate_inbounds function setindex!(T::ITensor, x::Number, I::CartesianIndex)
- return setindex!(T, x, Tuple(I)...)
+ return setindex!(T, x, Tuple(I)...)
end
@propagate_inbounds @inline function setindex!(
- T::ITensor, x::Number, I::Vararg{Any,N}
-) where {N}
- return settensor!(T, _setindex!!(tensor(T), x, I...))
+ T::ITensor, x::Number, I::Vararg{Any, N}
+ ) where {N}
+ return settensor!(T, _setindex!!(tensor(T), x, I...))
end
# XXX: what is this definition for?
Base.checkbounds(::Any, ::Block) = nothing
@propagate_inbounds function setindex!(T::ITensor, A::AbstractArray, I...)
- @boundscheck checkbounds(tensor(T), I...)
- TR = setindex!!(tensor(T), A, I...)
- setstorage!(T, storage(TR))
- return T
+ @boundscheck checkbounds(tensor(T), I...)
+ TR = setindex!!(tensor(T), A, I...)
+ setstorage!(T, storage(TR))
+ return T
end
#function setindex!(T::ITensor, A::AbstractArray, b::Block)
@@ -1242,23 +1242,23 @@ end
#end
function setindex!(T::ITensor, A::AbstractArray, ivs::Pair{<:Index}...)
- input_inds = first.(ivs)
- p = NDTensors.getperm(inds(T), input_inds)
- # Base.to_indices changes Colons into proper ranges, here
- # using the dimensions of the indices.
- vals = to_indices(CartesianIndices(input_inds), last.(ivs))
- # Lazily permute the array to correctly fit into the ITensor,
- # accounting for the input indices being in a different order
- # from the ITensor indices.
- pvals = NDTensors.permute(vals, p)
- T[pvals...] = PermutedDimsArray(reshape(A, length.(vals)), p)
- return T
+ input_inds = first.(ivs)
+ p = NDTensors.getperm(inds(T), input_inds)
+ # Base.to_indices changes Colons into proper ranges, here
+ # using the dimensions of the indices.
+ vals = to_indices(CartesianIndices(input_inds), last.(ivs))
+ # Lazily permute the array to correctly fit into the ITensor,
+ # accounting for the input indices being in a different order
+ # from the ITensor indices.
+ pvals = NDTensors.permute(vals, p)
+ T[pvals...] = PermutedDimsArray(reshape(A, length.(vals)), p)
+ return T
end
# Enable indexing with string values, like `A["Up"]`.
function setindex!(T::ITensor, x::Number, I1::String, Is::String...)
- T[_vals(T, I1, Is...)...] = x
- return T
+ T[_vals(T, I1, Is...)...] = x
+ return T
end
#function setindex!(::ITensor{Any}, ::Number, ivs...)
@@ -1303,8 +1303,8 @@ itensor2inds(A::ITensor)::Any = inds(A)
itensor2inds(A::Tensor) = inds(A)
itensor2inds(i::Index) = (i,)
itensor2inds(A) = A
-function map_itensor2inds(A::Tuple{Vararg{Any,N}}) where {N}
- return ntuple(i -> itensor2inds(A[i]), Val(N))
+function map_itensor2inds(A::Tuple{Vararg{Any, N}}) where {N}
+ return ntuple(i -> itensor2inds(A[i]), Val(N))
end
# in
@@ -1344,39 +1344,39 @@ hassameinds(A, B) = issetequal(itensor2inds(A), itensor2inds(B))
# Apply the Index set function and then filter the results
function filter_inds_set_function(
- ffilter::Function, fset::Function, A::Vararg{Any,N}
-) where {N}
- return filter(ffilter, fset(map_itensor2inds(A)...))
+ ffilter::Function, fset::Function, A::Vararg{Any, N}
+ ) where {N}
+ return filter(ffilter, fset(map_itensor2inds(A)...))
end
function filter_inds_set_function(fset::Function, A...; kwargs...)
- return filter_inds_set_function(fmatch(; kwargs...), fset, A...)
+ return filter_inds_set_function(fmatch(; kwargs...), fset, A...)
end
for (finds, fset) in (
- (:commoninds, :_intersect),
- (:noncommoninds, :_symdiff),
- (:uniqueinds, :_setdiff),
- (:unioninds, :_union),
-)
- @eval begin
- $finds(args...; kwargs...) = filter_inds_set_function($fset, args...; kwargs...)
- end
+ (:commoninds, :_intersect),
+ (:noncommoninds, :_symdiff),
+ (:uniqueinds, :_setdiff),
+ (:unioninds, :_union),
+ )
+ @eval begin
+ $finds(args...; kwargs...) = filter_inds_set_function($fset, args...; kwargs...)
+ end
end
for find in (:commonind, :noncommonind, :uniqueind, :unionind)
- @eval begin
- $find(args...; kwargs...) = getfirst($(Symbol(find, :s))(args...; kwargs...))
- end
+ @eval begin
+ $find(args...; kwargs...) = getfirst($(Symbol(find, :s))(args...; kwargs...))
+ end
end
function index_filter_kwargs_docstring()
- return """
- Optional keyword arguments:
- * tags::String - a tag name or comma separated list of tag names that the returned indices must all have
- * plev::Int - common prime level that the returned indices must all have
- * inds - Index or collection of indices. Returned indices must come from this set of indices.
- """
+ return """
+ Optional keyword arguments:
+ * tags::String - a tag name or comma separated list of tag names that the returned indices must all have
+ * plev::Int - common prime level that the returned indices must all have
+ * inds - Index or collection of indices. Returned indices must come from this set of indices.
+ """
end
# intersect
@@ -1477,44 +1477,44 @@ inds(A...; kwargs...) = filterinds(A...; kwargs...)
# in-place versions of priming and tagging
for (fname, fname!) in [
- (:(prime), :(prime!)),
- (:(setprime), :(setprime!)),
- (:(noprime), :(noprime!)),
- (:(replaceprime), :(replaceprime!)),
- (:(swapprime), :(swapprime!)),
- (:(TagSets.addtags), :(addtags!)),
- (:(TagSets.removetags), :(removetags!)),
- (:(TagSets.replacetags), :(replacetags!)),
- (:(settags), :(settags!)),
- (:(swaptags), :(swaptags!)),
- (:(replaceind), :(replaceind!)),
- (:(replaceinds), :(replaceinds!)),
- (:(swapind), :(swapind!)),
- (:(swapinds), :(swapinds!)),
-]
- @eval begin
- $fname(f::Function, A::ITensor, args...) = ITensor($fname(f, tensor(A), args...))
-
- # Inlining makes the ITensor functions slower
- @noinline function $fname(f::Function, A::Tensor, args...)
- return setinds(A, $fname(f, inds(A), args...))
- end
-
- function $(fname!)(f::Function, A::ITensor, args...)
- return settensor!(A, $fname(f, tensor(A), args...))
- end
-
- $fname(A::ITensor, args...; kwargs...) = itensor($fname(tensor(A), args...; kwargs...))
-
- # Inlining makes the ITensor functions slower
- @noinline function $fname(A::Tensor, args...; kwargs...)
- return setinds(A, $fname(inds(A), args...; kwargs...))
+ (:(prime), :(prime!)),
+ (:(setprime), :(setprime!)),
+ (:(noprime), :(noprime!)),
+ (:(replaceprime), :(replaceprime!)),
+ (:(swapprime), :(swapprime!)),
+ (:(TagSets.addtags), :(addtags!)),
+ (:(TagSets.removetags), :(removetags!)),
+ (:(TagSets.replacetags), :(replacetags!)),
+ (:(settags), :(settags!)),
+ (:(swaptags), :(swaptags!)),
+ (:(replaceind), :(replaceind!)),
+ (:(replaceinds), :(replaceinds!)),
+ (:(swapind), :(swapind!)),
+ (:(swapinds), :(swapinds!)),
+ ]
+ @eval begin
+ $fname(f::Function, A::ITensor, args...) = ITensor($fname(f, tensor(A), args...))
+
+ # Inlining makes the ITensor functions slower
+ @noinline function $fname(f::Function, A::Tensor, args...)
+ return setinds(A, $fname(f, inds(A), args...))
+ end
+
+ function $(fname!)(f::Function, A::ITensor, args...)
+ return settensor!(A, $fname(f, tensor(A), args...))
+ end
+
+ $fname(A::ITensor, args...; kwargs...) = itensor($fname(tensor(A), args...; kwargs...))
+
+ # Inlining makes the ITensor functions slower
+ @noinline function $fname(A::Tensor, args...; kwargs...)
+ return setinds(A, $fname(inds(A), args...; kwargs...))
+ end
+
+ function $(fname!)(A::ITensor, args...; kwargs...)
+ return settensor!(A, $fname(tensor(A), args...; kwargs...))
+ end
end
-
- function $(fname!)(A::ITensor, args...; kwargs...)
- return settensor!(A, $fname(tensor(A), args...; kwargs...))
- end
- end
end
priming_tagging_doc = """
@@ -1707,16 +1707,16 @@ allhastags(A::ITensor, ts) = allhastags(inds(A), ts)
# Returns a tuple of pairs of indices, where the pairs
# are determined by the prime level pairs `plev` and
# tag pairs `tags`.
-function indpairs(T::ITensor; plev::Pair{Int,Int}=0 => 1, tags::Pair=ts"" => ts"")
- is1 = filterinds(T; plev=first(plev), tags=first(tags))
- is2 = filterinds(T; plev=last(plev), tags=last(tags))
- is2to1 = replacetags(mapprime(is2, last(plev) => first(plev)), last(tags) => first(tags))
- is_first = commoninds(is1, is2to1)
- is_last = replacetags(
- mapprime(is_first, first(plev) => last(plev)), first(tags) => last(tags)
- )
- is_last = permute(commoninds(T, is_last), is_last)
- return is_first .=> is_last
+function indpairs(T::ITensor; plev::Pair{Int, Int} = 0 => 1, tags::Pair = ts"" => ts"")
+ is1 = filterinds(T; plev = first(plev), tags = first(tags))
+ is2 = filterinds(T; plev = last(plev), tags = last(tags))
+ is2to1 = replacetags(mapprime(is2, last(plev) => first(plev)), last(tags) => first(tags))
+ is_first = commoninds(is1, is2to1)
+ is_last = replacetags(
+ mapprime(is_first, first(plev) => last(plev)), first(tags) => last(tags)
+ )
+ is_last = permute(commoninds(T, is_last), is_last)
+ return is_first .=> is_last
end
#########################
@@ -1730,36 +1730,36 @@ end
similar(T::ITensor, args...)::ITensor = itensor(NDTensors.similar(tensor(T), args...))
function isapprox(A::ITensor, B::ITensor; kwargs...)
- if !hassameinds(A, B)
- error("In `isapprox(::ITensor, ::ITensor)`, the indices of the ITensors do not
- match. The first ITensor has indices: \n\n$(inds(A))\n\nbut the second
- ITensor has indices: \n\n$(inds(B))")
- end
- B = permute(B, inds(A))
- return isapprox(array(A), array(B); kwargs...)
+ if !hassameinds(A, B)
+ error("In `isapprox(::ITensor, ::ITensor)`, the indices of the ITensors do not
+ match. The first ITensor has indices: \n\n$(inds(A))\n\nbut the second
+ ITensor has indices: \n\n$(inds(B))")
+ end
+ B = permute(B, inds(A))
+ return isapprox(array(A), array(B); kwargs...)
end
function randn!(T::ITensor)
- return randn!(Random.default_rng(), T)
+ return randn!(Random.default_rng(), T)
end
function randn!(rng::AbstractRNG, T::ITensor)
- return settensor!(T, randn!!(rng, tensor(T)))
+ return settensor!(T, randn!!(rng, tensor(T)))
end
norm(T::ITensor) = norm(tensor(T))
-function dag(as::AliasStyle, T::Tensor{ElT,N}) where {ElT,N}
- if using_auto_fermion() && has_fermionic_subspaces(inds(T)) #
- CT = conj(NeverAlias(), T)
- NDTensors.scale_blocks!(CT, block -> NDTensors.permfactor(reverse(1:N), block, inds(T)))
- return setinds(CT, dag(inds(T)))
- end
- return setinds(conj(as, T), dag(inds(T)))
+function dag(as::AliasStyle, T::Tensor{ElT, N}) where {ElT, N}
+ if using_auto_fermion() && has_fermionic_subspaces(inds(T)) #
+ CT = conj(NeverAlias(), T)
+ NDTensors.scale_blocks!(CT, block -> NDTensors.permfactor(reverse(1:N), block, inds(T)))
+ return setinds(CT, dag(inds(T)))
+ end
+ return setinds(conj(as, T), dag(inds(T)))
end
function dag(as::AliasStyle, T::ITensor)
- return itensor(dag(as, tensor(T)))
+ return itensor(dag(as, tensor(T)))
end
# Helpful for generic code
@@ -1775,21 +1775,21 @@ may share data with the input ITensor). If `allow_alias = false`,
an alias is never returned.
"""
function dag(T::ITensor; kwargs...)
- allow_alias::Bool = deprecated_keyword_argument(
- Bool,
- kwargs;
- new_kw=:allow_alias,
- old_kw=:always_copy,
- default=true,
- funcsym=:dag,
- map=!,
- )
- aliasstyle::Union{AllowAlias,NeverAlias} = allow_alias ? AllowAlias() : NeverAlias()
- return dag(aliasstyle, T)
+ allow_alias::Bool = deprecated_keyword_argument(
+ Bool,
+ kwargs;
+ new_kw = :allow_alias,
+ old_kw = :always_copy,
+ default = true,
+ funcsym = :dag,
+ map = !,
+ )
+ aliasstyle::Union{AllowAlias, NeverAlias} = allow_alias ? AllowAlias() : NeverAlias()
+ return dag(aliasstyle, T)
end
function (T::ITensor * x::Number)::ITensor
- return itensor(x * tensor(T))
+ return itensor(x * tensor(T))
end
# TODO: what about noncommutative number types?
@@ -1802,39 +1802,39 @@ end
-(A::ITensor) = itensor(-tensor(A))
function _add(A::Tensor, B::Tensor)
- if _isemptyscalar(A) && ndims(B) > 0
- return itensor(B)
- elseif _isemptyscalar(B) && ndims(A) > 0
- return itensor(A)
- end
- ndims(A) != ndims(B) &&
- throw(DimensionMismatch("cannot add ITensors with different numbers of indices"))
- itA = itensor(A)
- itB = itensor(B)
- itC = copy(itA)
- itC .+= itB
- return itC
+ if _isemptyscalar(A) && ndims(B) > 0
+ return itensor(B)
+ elseif _isemptyscalar(B) && ndims(A) > 0
+ return itensor(A)
+ end
+ ndims(A) != ndims(B) &&
+ throw(DimensionMismatch("cannot add ITensors with different numbers of indices"))
+ itA = itensor(A)
+ itB = itensor(B)
+ itC = copy(itA)
+ itC .+= itB
+ return itC
end
# TODO: move the order-0 EmptyStorage ITensor special case to NDTensors.
# Unfortunately this is more complicated than it might seem since it
# has to pass through the broadcasting mechanism first.
function (A::ITensor + B::ITensor)
- return itensor(_add(tensor(A), tensor(B)))
+ return itensor(_add(tensor(A), tensor(B)))
end
# TODO: move the order-0 EmptyStorage ITensor special to NDTensors
function (A::ITensor - B::ITensor)
- if _isemptyscalar(A) && ndims(B) > 0
- return -B
- elseif _isemptyscalar(B) && ndims(A) > 0
- return A
- end
- ndims(A) != ndims(B) &&
- throw(DimensionMismatch("cannot subtract ITensors with different numbers of indices"))
- C = copy(A)
- C .-= B
- return C
+ if _isemptyscalar(A) && ndims(B) > 0
+ return -B
+ elseif _isemptyscalar(B) && ndims(A) > 0
+ return A
+ end
+ ndims(A) != ndims(B) &&
+ throw(DimensionMismatch("cannot subtract ITensors with different numbers of indices"))
+ C = copy(A)
+ C .-= B
+ return C
end
real(T::ITensor)::ITensor = itensor(real(tensor(T)))
@@ -1846,8 +1846,8 @@ conj(T::ITensor)::ITensor = itensor(conj(tensor(T)))
dag(::Nothing) = nothing
function (A::ITensor == B::ITensor)
- !hassameinds(A, B) && return false
- return norm(A - B) == zero(promote_type(eltype(A), eltype(B)))
+ !hassameinds(A, B) && return false
+ return norm(A - B) == zero(promote_type(eltype(A), eltype(B)))
end
LinearAlgebra.promote_leaf_eltypes(A::ITensor) = eltype(A)
@@ -1881,42 +1881,44 @@ B .= A
```
"""
function copyto!(R::ITensor, T::ITensor)
- R .= T
- return R
+ R .= T
+ return R
end
# Note this already assumes R === T1, which will be lifted
# in the future.
function _map!!(f::Function, R::Tensor, T1::Tensor, T2::Tensor)
- perm = NDTensors.getperm(inds(R), inds(T2))
- if !isperm(perm)
- error("""
- You are trying to add an ITensor with indices:
-
- $(inds(T2))
-
- into an ITensor with indices:
-
- $(inds(R))
-
- but the indices are not permutations of each other.
- """)
- end
- if hasqns(T2) && hasqns(R)
- # Check that Index arrows match
- for (n, p) in enumerate(perm)
- if dir(inds(R)[n]) != dir(inds(T2)[p])
- #println("Mismatched Index: \n$(inds(R)[n])")
- error("Index arrows must be the same to add, subtract, map, or scale QN ITensors")
- end
+ perm = NDTensors.getperm(inds(R), inds(T2))
+ if !isperm(perm)
+ error(
+ """
+ You are trying to add an ITensor with indices:
+
+ $(inds(T2))
+
+ into an ITensor with indices:
+
+ $(inds(R))
+
+ but the indices are not permutations of each other.
+ """
+ )
end
- end
- return permutedims!!(R, T2, perm, f)
+ if hasqns(T2) && hasqns(R)
+ # Check that Index arrows match
+ for (n, p) in enumerate(perm)
+ if dir(inds(R)[n]) != dir(inds(T2)[p])
+ #println("Mismatched Index: \n$(inds(R)[n])")
+ error("Index arrows must be the same to add, subtract, map, or scale QN ITensors")
+ end
+ end
+ end
+ return permutedims!!(R, T2, perm, f)
end
function map!(f::Function, R::ITensor, T1::ITensor, T2::ITensor)
- R !== T1 && error("`map!(f, R, T1, T2)` only supports `R === T1` right now")
- return settensor!(R, _map!!(f, tensor(R), tensor(T1), tensor(T2)))
+ R !== T1 && error("`map!(f, R, T1, T2)` only supports `R === T1` right now")
+ return settensor!(R, _map!!(f, tensor(R), tensor(T1), tensor(T2)))
end
map(f, x::ITensor) = itensor(map(f, tensor(x)))
@@ -1976,26 +1978,26 @@ mul!(R::ITensor, T::ITensor, α::Number) = (R .= T .* α)
# Helper function for deprecating a keyword argument
function deprecated_keyword_argument(
- ::Type{T}, kwargs; new_kw, old_kw, default, funcsym, map=identity
-)::T where {T}
- has_new_kw = haskey(kwargs, new_kw)
- has_old_kw = haskey(kwargs, old_kw)
- res::T = if has_old_kw
- Base.depwarn(
- "In `$func`, keyword argument `$old_kw` is deprecated in favor of `$new_kw`.", func
- )
- if has_new_kw
- println(
- "Warning: keyword arguments `$old_kw` and `$new_kw` are both specified, using `$new_kw`.",
- )
- kwargs[new_kw]
+ ::Type{T}, kwargs; new_kw, old_kw, default, funcsym, map = identity
+ )::T where {T}
+ has_new_kw = haskey(kwargs, new_kw)
+ has_old_kw = haskey(kwargs, old_kw)
+ res::T = if has_old_kw
+ Base.depwarn(
+ "In `$func`, keyword argument `$old_kw` is deprecated in favor of `$new_kw`.", func
+ )
+ if has_new_kw
+ println(
+ "Warning: keyword arguments `$old_kw` and `$new_kw` are both specified, using `$new_kw`.",
+ )
+ kwargs[new_kw]
+ else
+ map(kwargs[old_kw])
+ end
else
- map(kwargs[old_kw])
+ get(kwargs, new_kw, default)
end
- else
- get(kwargs, new_kw, default)
- end
- return res
+ return res
end
#######################################################################
@@ -2004,74 +2006,74 @@ end
#
function summary(io::IO, T::ITensor)
- print(io, "ITensor ord=$(order(T))")
- if hasqns(T)
- println(io)
- for i in 1:order(T)
- print(io, inds(T)[i])
- println(io)
- end
- else
- for i in 1:order(T)
- print(io, " ", inds(T)[i])
+ print(io, "ITensor ord=$(order(T))")
+ if hasqns(T)
+ println(io)
+ for i in 1:order(T)
+ print(io, inds(T)[i])
+ println(io)
+ end
+ else
+ for i in 1:order(T)
+ print(io, " ", inds(T)[i])
+ end
+ println(io)
end
- println(io)
- end
- return print(io, typeof(storage(T)))
+ return print(io, typeof(storage(T)))
end
# TODO: make a specialized printing from Diag
# that emphasizes the missing elements
function show(io::IO, T::ITensor)
- println(io, "ITensor ord=$(order(T))")
- return show(io, MIME"text/plain"(), tensor(T))
+ println(io, "ITensor ord=$(order(T))")
+ return show(io, MIME"text/plain"(), tensor(T))
end
function show(io::IO, mime::MIME"text/plain", T::ITensor)
- return summary(io, T)
+ return summary(io, T)
end
-function readcpp(io::IO, ::Type{Dense{ValT}}; format="v3") where {ValT}
- if format == "v3"
- size = read(io, UInt64)
- data = Vector{ValT}(undef, size)
- for n in 1:size
- data[n] = read(io, ValT)
+function readcpp(io::IO, ::Type{Dense{ValT}}; format = "v3") where {ValT}
+ if format == "v3"
+ size = read(io, UInt64)
+ data = Vector{ValT}(undef, size)
+ for n in 1:size
+ data[n] = read(io, ValT)
+ end
+ return Dense(data)
+ else
+ throw(ArgumentError("read Dense: format=$format not supported"))
end
- return Dense(data)
- else
- throw(ArgumentError("read Dense: format=$format not supported"))
- end
-end
-
-function readcpp(io::IO, ::Type{ITensor}; format="v3")
- if format == "v3"
- # TODO: use Vector{Index} here?
- inds = readcpp(io, IndexSet; kwargs...)
- read(io, 12) # ignore scale factor by reading 12 bytes
- storage_type = read(io, Int32)
- if storage_type == 0 # Null
- storage = Dense{Nothing}()
- elseif storage_type == 1 # DenseReal
- storage = readcpp(io, Dense{Float64}; kwargs...)
- elseif storage_type == 2 # DenseCplx
- storage = readcpp(io, Dense{ComplexF64}; kwargs...)
- elseif storage_type == 3 # Combiner
- storage = CombinerStorage(T.inds[1])
- #elseif storage_type==4 # DiagReal
- #elseif storage_type==5 # DiagCplx
- #elseif storage_type==6 # QDenseReal
- #elseif storage_type==7 # QDenseCplx
- #elseif storage_type==8 # QCombiner
- #elseif storage_type==9 # QDiagReal
- #elseif storage_type==10 # QDiagCplx
- #elseif storage_type==11 # ScalarReal
- #elseif storage_type==12 # ScalarCplx
+end
+
+function readcpp(io::IO, ::Type{ITensor}; format = "v3")
+ if format == "v3"
+ # TODO: use Vector{Index} here?
+ inds = readcpp(io, IndexSet; kwargs...)
+ read(io, 12) # ignore scale factor by reading 12 bytes
+ storage_type = read(io, Int32)
+ if storage_type == 0 # Null
+ storage = Dense{Nothing}()
+ elseif storage_type == 1 # DenseReal
+ storage = readcpp(io, Dense{Float64}; kwargs...)
+ elseif storage_type == 2 # DenseCplx
+ storage = readcpp(io, Dense{ComplexF64}; kwargs...)
+ elseif storage_type == 3 # Combiner
+ storage = CombinerStorage(T.inds[1])
+ #elseif storage_type==4 # DiagReal
+ #elseif storage_type==5 # DiagCplx
+ #elseif storage_type==6 # QDenseReal
+ #elseif storage_type==7 # QDenseCplx
+ #elseif storage_type==8 # QCombiner
+ #elseif storage_type==9 # QDiagReal
+ #elseif storage_type==10 # QDiagCplx
+ #elseif storage_type==11 # ScalarReal
+ #elseif storage_type==12 # ScalarCplx
+ else
+ throw(ErrorException("C++ ITensor storage type $storage_type not yet supported"))
+ end
+ return itensor(storage, inds)
else
- throw(ErrorException("C++ ITensor storage type $storage_type not yet supported"))
+ throw(ArgumentError("read ITensor: format=$format not supported"))
end
- return itensor(storage, inds)
- else
- throw(ArgumentError("read ITensor: format=$format not supported"))
- end
end
diff --git a/src/lastval.jl b/src/lastval.jl
index b1f0c7867f..618d1435f7 100644
--- a/src/lastval.jl
+++ b/src/lastval.jl
@@ -1,6 +1,5 @@
-
struct LastVal{F}
- f::F
+ f::F
end
LastVal() = LastVal(identity)
diff --git a/src/lib/QuantumNumbers/src/qn.jl b/src/lib/QuantumNumbers/src/qn.jl
index 1ee4142a62..936dcdfce2 100644
--- a/src/lib/QuantumNumbers/src/qn.jl
+++ b/src/lib/QuantumNumbers/src/qn.jl
@@ -4,8 +4,8 @@ using ..SmallStrings: SmallString
using StaticArrays: MVector, SVector
const maxQNs = 4
-const QNStorage = SVector{maxQNs,QNVal}
-const MQNStorage = MVector{maxQNs,QNVal}
+const QNStorage = SVector{maxQNs, QNVal}
+const MQNStorage = MVector{maxQNs, QNVal}
"""
A QN object stores a collection of up to four
@@ -21,26 +21,26 @@ the named values. If a name is missing from the
collection, its value is treated as zero.
"""
struct QN
- data::QNStorage
- function QN()
- s = QNStorage(ntuple(_ -> ZeroVal, Val(maxQNs)))
- return new(s)
- end
- QN(s::QNStorage) = new(s)
+ data::QNStorage
+ function QN()
+ s = QNStorage(ntuple(_ -> ZeroVal, Val(maxQNs)))
+ return new(s)
+ end
+ QN(s::QNStorage) = new(s)
end
QN(mqn::MQNStorage) = QN(QNStorage(mqn))
-QN(mqn::NTuple{N,QNVal}) where {N} = QN(QNStorage(mqn))
+QN(mqn::NTuple{N, QNVal}) where {N} = QN(QNStorage(mqn))
function Base.hash(obj::QN, h::UInt)
- out = h
- for qv in obj.data
- if val(qv) != 0
- out = hash(qv, out)
+ out = h
+ for qv in obj.data
+ if val(qv) != 0
+ out = hash(qv, out)
+ end
end
- end
- return out
+ return out
end
"""
@@ -58,18 +58,18 @@ q = QN(("P",0,2),("Sz",0)).
```
"""
function QN(qvs...)
- m = MQNStorage(ntuple(_ -> ZeroVal, Val(maxQNs)))
- for (n, qv) in enumerate(qvs)
- m[n] = QNVal(qv...)
- end
- Nvals = length(qvs)
- sort!(@view m[1:Nvals]; by=name, alg=InsertionSort)
- for n in 1:(length(qvs) - 1)
- if name(m[n]) == name(m[n + 1])
- error("Duplicate name \"$(name(m[n]))\" in QN")
+ m = MQNStorage(ntuple(_ -> ZeroVal, Val(maxQNs)))
+ for (n, qv) in enumerate(qvs)
+ m[n] = QNVal(qv...)
+ end
+ Nvals = length(qvs)
+ sort!(@view m[1:Nvals]; by = name, alg = InsertionSort)
+ for n in 1:(length(qvs) - 1)
+ if name(m[n]) == name(m[n + 1])
+ error("Duplicate name \"$(name(m[n]))\" in QN")
+ end
end
- end
- return QN(QNStorage(m))
+ return QN(QNStorage(m))
end
"""
@@ -79,7 +79,7 @@ Construct a QN with a single named value
by providing the name, value, and optional
modulus.
"""
-QN(name, val::Int, modulus::Int=1) = QN((name, val, modulus))
+QN(name, val::Int, modulus::Int = 1) = QN((name, val, modulus))
"""
QN(val::Int,modulus::Int=1)
@@ -88,7 +88,7 @@ Construct a QN with a single unnamed value
(equivalent to the name being the empty string)
with optional modulus.
"""
-QN(val::Int, modulus::Int=1) = QN(("", val, modulus))
+QN(val::Int, modulus::Int = 1) = QN(("", val, modulus))
data(qn::QN) = qn.data
@@ -101,15 +101,15 @@ Base.lastindex(qn::QN) = length(qn)
isactive(qn::QN) = isactive(qn[1])
function nactive(q::QN)
- for n in 1:maxQNs
- !isactive(q[n]) && (return n - 1)
- end
- return maxQNs
+ for n in 1:maxQNs
+ !isactive(q[n]) && (return n - 1)
+ end
+ return maxQNs
end
-function Base.iterate(qn::QN, state::Int=1)
- (state > length(qn)) && return nothing
- return (qn[state], state + 1)
+function Base.iterate(qn::QN, state::Int = 1)
+ (state > length(qn)) && return nothing
+ return (qn[state], state + 1)
end
Base.keys(qn::QN) = keys(data(qn))
@@ -121,11 +121,11 @@ Get the value within the QN q
corresponding to the string `name`
"""
function ITensors.val(q::QN, name_)
- sname = SmallString(name_)
- for n in 1:maxQNs
- name(q[n]) == sname && return val(q[n])
- end
- return 0
+ sname = SmallString(name_)
+ for n in 1:maxQNs
+ name(q[n]) == sname && return val(q[n])
+ end
+ return 0
end
"""
@@ -135,11 +135,11 @@ Get the modulus within the QN q
corresponding to the string `name`
"""
function modulus(q::QN, name_)
- sname = SmallString(name_)
- for n in 1:maxQNs
- name(q[n]) == sname && return modulus(q[n])
- end
- return 0
+ sname = SmallString(name_)
+ for n in 1:maxQNs
+ name(q[n]) == sname && return modulus(q[n])
+ end
+ return 0
end
"""
@@ -150,195 +150,196 @@ the same names as q, but with
all values set to zero.
"""
function Base.zero(qn::QN)
- mqn = MQNStorage(undef)
- for i in 1:length(mqn)
- mqn[i] = zero(qn[i])
- end
- return QN(mqn)
+ mqn = MQNStorage(undef)
+ for i in 1:length(mqn)
+ mqn[i] = zero(qn[i])
+ end
+ return QN(mqn)
end
function Base.:(*)(dir::Arrow, qn::QN)
- mqn = MQNStorage(undef)
- for i in 1:length(mqn)
- mqn[i] = dir * qn[i]
- end
- return QN(mqn)
+ mqn = MQNStorage(undef)
+ for i in 1:length(mqn)
+ mqn[i] = dir * qn[i]
+ end
+ return QN(mqn)
end
Base.:(*)(qn::QN, dir::Arrow) = (dir * qn)
function Base.:(-)(qn::QN)
- mqn = MQNStorage(undef)
- for i in 1:length(mqn)
- mqn[i] = -qn[i]
- end
- return QN(mqn)
+ mqn = MQNStorage(undef)
+ for i in 1:length(mqn)
+ mqn[i] = -qn[i]
+ end
+ return QN(mqn)
end
function Base.:(+)(a::QN, b::QN)
- na = nactive(a)
- iszero(na) && return b
- nb = nactive(b)
- iszero(nb) && return a
- sectors_a = data(a)
- sectors_b = data(b)
- msectors_c = MQNStorage(sectors_a)
- nc = na
- @inbounds for ib in 1:nb
- sector_b = sectors_b[ib]
- found = false
- for ia in 1:na
- sector_a = sectors_a[ia]
- if name(sector_b) == name(sector_a)
- msectors_c[ia] = sector_a + sector_b
- found = true
- continue
- end
+ na = nactive(a)
+ iszero(na) && return b
+ nb = nactive(b)
+ iszero(nb) && return a
+ sectors_a = data(a)
+ sectors_b = data(b)
+ msectors_c = MQNStorage(sectors_a)
+ nc = na
+ @inbounds for ib in 1:nb
+ sector_b = sectors_b[ib]
+ found = false
+ for ia in 1:na
+ sector_a = sectors_a[ia]
+ if name(sector_b) == name(sector_a)
+ msectors_c[ia] = sector_a + sector_b
+ found = true
+ continue
+ end
+ end
+ if !found
+ if nc >= length(msectors_c)
+ error("Cannot add QN, maximum number of QNVals reached")
+ end
+ msectors_c[nc += 1] = sector_b
+ end
end
- if !found
- if nc >= length(msectors_c)
- error("Cannot add QN, maximum number of QNVals reached")
- end
- msectors_c[nc += 1] = sector_b
- end
- end
- sort!(view(msectors_c, 1:nc); by=name)
- return QN(msectors_c)
+ sort!(view(msectors_c, 1:nc); by = name)
+ return QN(msectors_c)
end
Base.:(-)(a::QN, b::QN) = (a + (-b))
function hasname(qn::QN, qv_find::QNVal)
- for qv in qn
- name(qv) == name(qv_find) && return true
- end
- return false
+ for qv in qn
+ name(qv) == name(qv_find) && return true
+ end
+ return false
end
# Does not perform checks on if QN is already full, drops
# the last QNVal
# Rename insert?
function NDTensors.insertafter(qn::QN, qv::QNVal, pos::Int)
- return QN(NDTensors.insertafter(Tuple(qn), qv, pos)[1:length(qn)])
+ return QN(NDTensors.insertafter(Tuple(qn), qv, pos)[1:length(qn)])
end
function addqnval(qn::QN, qv_add::QNVal)
- isactive(qn[end]) &&
- error("Cannot add QNVal, QN already contains maximum number of QNVals")
- for (pos, qv) in enumerate(qn)
- if qv_add < qv || !isactive(qv)
- return NDTensors.insertafter(qn, qv_add, pos - 1)
+ isactive(qn[end]) &&
+ error("Cannot add QNVal, QN already contains maximum number of QNVals")
+ for (pos, qv) in enumerate(qn)
+ if qv_add < qv || !isactive(qv)
+ return NDTensors.insertafter(qn, qv_add, pos - 1)
+ end
end
- end
+ return nothing
end
# Fills in the qns of qn1 that qn2 has but
# qn1 doesn't
function fillqns_from(qn1::QN, qn2::QN)
- # If qn1 has no non-trivial qns, fill
- # with qn2
- !isactive(qn1) && return zero(qn2)
- !isactive(qn2) && return qn1
- for qv2 in qn2
- if !hasname(qn1, qv2)
- qn1 = addqnval(qn1, zero(qv2))
+ # If qn1 has no non-trivial qns, fill
+ # with qn2
+ !isactive(qn1) && return zero(qn2)
+ !isactive(qn2) && return qn1
+ for qv2 in qn2
+ if !hasname(qn1, qv2)
+ qn1 = addqnval(qn1, zero(qv2))
+ end
end
- end
- return qn1
+ return qn1
end
# Make sure qn1 and qn2 have all of the same qns
function fillqns(qn1::QN, qn2::QN)
- qn1_filled = fillqns_from(qn1, qn2)
- qn2_filled = fillqns_from(qn2, qn1)
- return qn1_filled, qn2_filled
+ qn1_filled = fillqns_from(qn1, qn2)
+ qn2_filled = fillqns_from(qn2, qn1)
+ return qn1_filled, qn2_filled
end
function isequal_assume_filled(qn1::QN, qn2::QN)
- for (qv1, qv2) in zip(qn1, qn2)
- modulus(qv1) != modulus(qv2) && error("QNVals must have same modulus to compare")
- qv1 != qv2 && return false
- end
- return true
+ for (qv1, qv2) in zip(qn1, qn2)
+ modulus(qv1) != modulus(qv2) && error("QNVals must have same modulus to compare")
+ qv1 != qv2 && return false
+ end
+ return true
end
-function Base.:(==)(qn1::QN, qn2::QN; assume_filled=false)
- if !assume_filled
- qn1, qn2 = fillqns(qn1, qn2)
- end
- return isequal_assume_filled(qn1, qn2)
+function Base.:(==)(qn1::QN, qn2::QN; assume_filled = false)
+ if !assume_filled
+ qn1, qn2 = fillqns(qn1, qn2)
+ end
+ return isequal_assume_filled(qn1, qn2)
end
function isless_assume_filled(qn1::QN, qn2::QN)
- for n in 1:length(qn1)
- val1 = val(qn1[n])
- val2 = val(qn2[n])
- val1 != val2 && return val1 < val2
- end
- return false
+ for n in 1:length(qn1)
+ val1 = val(qn1[n])
+ val2 = val(qn2[n])
+ val1 != val2 && return val1 < val2
+ end
+ return false
end
-function Base.isless(qn1::QN, qn2::QN; assume_filled=false)
- return <(qn1, qn2; assume_filled=assume_filled)
+function Base.isless(qn1::QN, qn2::QN; assume_filled = false)
+ return <(qn1, qn2; assume_filled = assume_filled)
end
-function Base.:(<)(qn1::QN, qn2::QN; assume_filled=false)
- if !assume_filled
- qn1, qn2 = fillqns(qn1, qn2)
- end
- return isless_assume_filled(qn1, qn2)
+function Base.:(<)(qn1::QN, qn2::QN; assume_filled = false)
+ if !assume_filled
+ qn1, qn2 = fillqns(qn1, qn2)
+ end
+ return isless_assume_filled(qn1, qn2)
end
function have_same_qns(qn1::QN, qn2::QN)
- for n in 1:length(qn1)
- name(qn1[n]) != name(qn2[n]) && return false
- end
- return true
+ for n in 1:length(qn1)
+ name(qn1[n]) != name(qn2[n]) && return false
+ end
+ return true
end
function have_same_mods(qn1::QN, qn2::QN)
- for n in 1:length(qn1)
- modulus(qn1[n]) != modulus(qn2[n]) && return false
- end
- return true
+ for n in 1:length(qn1)
+ modulus(qn1[n]) != modulus(qn2[n]) && return false
+ end
+ return true
end
function removeqn(qn::QN, qn_name::String)
- ss_qn_name = SmallString(qn_name)
- # Find the location of the QNVal to remove
- n_qn = nothing
- for n in 1:length(qn)
- qnval = qn[n]
- if name(qnval) == ss_qn_name
- n_qn = n
+ ss_qn_name = SmallString(qn_name)
+ # Find the location of the QNVal to remove
+ n_qn = nothing
+ for n in 1:length(qn)
+ qnval = qn[n]
+ if name(qnval) == ss_qn_name
+ n_qn = n
+ end
+ end
+ if isnothing(n_qn)
+ return qn
end
- end
- if isnothing(n_qn)
- return qn
- end
- qn_data = data(qn)
- for j in n_qn:(length(qn) - 1)
- qn_data = Base.setindex(qn_data, qn_data[j + 1], j)
- end
- qn_data = Base.setindex(qn_data, QNVal(), length(qn))
- return QN(qn_data)
+ qn_data = data(qn)
+ for j in n_qn:(length(qn) - 1)
+ qn_data = Base.setindex(qn_data, qn_data[j + 1], j)
+ end
+ qn_data = Base.setindex(qn_data, QNVal(), length(qn))
+ return QN(qn_data)
end
function Base.show(io::IO, q::QN)
- print(io, "QN(")
- Na = nactive(q)
- for n in 1:Na
- v = q[n]
- n > 1 && print(io, ",")
- Na > 1 && print(io, "(")
- if name(v) != SmallString("")
- print(io, "\"$(name(v))\",")
- end
- print(io, "$(val(v))")
- if modulus(v) != 1
- print(io, ",$(modulus(v))")
+ print(io, "QN(")
+ Na = nactive(q)
+ for n in 1:Na
+ v = q[n]
+ n > 1 && print(io, ",")
+ Na > 1 && print(io, "(")
+ if name(v) != SmallString("")
+ print(io, "\"$(name(v))\",")
+ end
+ print(io, "$(val(v))")
+ if modulus(v) != 1
+ print(io, ",$(modulus(v))")
+ end
+ Na > 1 && print(io, ")")
end
- Na > 1 && print(io, ")")
- end
- return print(io, ")")
+ return print(io, ")")
end
diff --git a/src/lib/SiteTypes/src/sitetypes/boson.jl b/src/lib/SiteTypes/src/sitetypes/boson.jl
index 387edad277..a02acb88a0 100644
--- a/src/lib/SiteTypes/src/sitetypes/boson.jl
+++ b/src/lib/SiteTypes/src/sitetypes/boson.jl
@@ -1,4 +1,3 @@
-
alias(::SiteType"Boson") = SiteType"Qudit"()
"""
@@ -17,16 +16,16 @@ space(st::SiteType"Boson"; kwargs...) = space(alias(st); kwargs...)
val(vn::ValName, st::SiteType"Boson") = val(vn, alias(st))
function state(sn::StateName, st::SiteType"Boson", s::Index; kwargs...)
- return state(sn, alias(st), s; kwargs...)
+ return state(sn, alias(st), s; kwargs...)
end
function op(on::OpName, st::SiteType"Boson", ds::Int...; kwargs...)
- return op(on, alias(st), ds...; kwargs...)
+ return op(on, alias(st), ds...; kwargs...)
end
function op(on::OpName, st::SiteType"Boson", s1::Index, s_tail::Index...; kwargs...)
- rs = reverse((s1, s_tail...))
- ds = dim.(rs)
- opmat = op(on, st, ds...; kwargs...)
- return itensor(opmat, prime.(rs)..., dag.(rs)...)
+ rs = reverse((s1, s_tail...))
+ ds = dim.(rs)
+ opmat = op(on, st, ds...; kwargs...)
+ return itensor(opmat, prime.(rs)..., dag.(rs)...)
end
diff --git a/src/lib/SiteTypes/src/sitetypes/fermion.jl b/src/lib/SiteTypes/src/sitetypes/fermion.jl
index 008bdf0877..b98720cba6 100644
--- a/src/lib/SiteTypes/src/sitetypes/fermion.jl
+++ b/src/lib/SiteTypes/src/sitetypes/fermion.jl
@@ -1,4 +1,3 @@
-
"""
space(::SiteType"Fermion";
conserve_qns=false,
@@ -14,49 +13,49 @@ Create the Hilbert space for a site of type "Fermion".
Optionally specify the conserved symmetries and their quantum number labels.
"""
function space(
- ::SiteType"Fermion";
- conserve_qns=false,
- conserve_nf=conserve_qns,
- conserve_nfparity=conserve_qns,
- qnname_nf="Nf",
- qnname_nfparity="NfParity",
- qnname_sz="Sz",
- conserve_sz=false,
- # Deprecated
- conserve_parity=nothing,
-)
- if !isnothing(conserve_parity)
- conserve_nfparity = conserve_parity
- end
- if conserve_sz == true
- conserve_sz = "Up"
- end
- if conserve_nf && conserve_sz == "Up"
- zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
- one = QN((qnname_nf, 1, -1), (qnname_sz, 1)) => 1
- return [zer, one]
- elseif conserve_nf && conserve_sz == "Dn"
- zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
- one = QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1
- return [zer, one]
- elseif conserve_nfparity && conserve_sz == "Up"
- zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1
- one = QN((qnname_nfparity, 1, -2), (qnname_sz, 1)) => 1
- return [zer, one]
- elseif conserve_nfparity && conserve_sz == "Dn"
- zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1
- one = QN((qnname_nfparity, 1, -2), (qnname_sz, -1)) => 1
- return [zer, one]
- elseif conserve_nf
- zer = QN(qnname_nf, 0, -1) => 1
- one = QN(qnname_nf, 1, -1) => 1
- return [zer, one]
- elseif conserve_nfparity
- zer = QN(qnname_nfparity, 0, -2) => 1
- one = QN(qnname_nfparity, 1, -2) => 1
- return [zer, one]
- end
- return 2
+ ::SiteType"Fermion";
+ conserve_qns = false,
+ conserve_nf = conserve_qns,
+ conserve_nfparity = conserve_qns,
+ qnname_nf = "Nf",
+ qnname_nfparity = "NfParity",
+ qnname_sz = "Sz",
+ conserve_sz = false,
+ # Deprecated
+ conserve_parity = nothing,
+ )
+ if !isnothing(conserve_parity)
+ conserve_nfparity = conserve_parity
+ end
+ if conserve_sz == true
+ conserve_sz = "Up"
+ end
+ if conserve_nf && conserve_sz == "Up"
+ zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
+ one = QN((qnname_nf, 1, -1), (qnname_sz, 1)) => 1
+ return [zer, one]
+ elseif conserve_nf && conserve_sz == "Dn"
+ zer = QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
+ one = QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1
+ return [zer, one]
+ elseif conserve_nfparity && conserve_sz == "Up"
+ zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1
+ one = QN((qnname_nfparity, 1, -2), (qnname_sz, 1)) => 1
+ return [zer, one]
+ elseif conserve_nfparity && conserve_sz == "Dn"
+ zer = QN((qnname_nfparity, 0, -2), (qnname_sz, 0)) => 1
+ one = QN((qnname_nfparity, 1, -2), (qnname_sz, -1)) => 1
+ return [zer, one]
+ elseif conserve_nf
+ zer = QN(qnname_nf, 0, -1) => 1
+ one = QN(qnname_nf, 1, -1) => 1
+ return [zer, one]
+ elseif conserve_nfparity
+ zer = QN(qnname_nfparity, 0, -2) => 1
+ one = QN(qnname_nfparity, 1, -2) => 1
+ return [zer, one]
+ end
+ return 2
end
val(::ValName"Emp", ::SiteType"Fermion") = 1
@@ -70,42 +69,42 @@ state(::StateName"0", st::SiteType"Fermion") = state(StateName("Emp"), st)
state(::StateName"1", st::SiteType"Fermion") = state(StateName("Occ"), st)
function op!(Op::ITensor, ::OpName"N", ::SiteType"Fermion", s::Index)
- return Op[s' => 2, s => 2] = 1.0
+ return Op[s' => 2, s => 2] = 1.0
end
function op!(Op::ITensor, on::OpName"n", st::SiteType"Fermion", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"C", ::SiteType"Fermion", s::Index)
- return Op[s' => 1, s => 2] = 1.0
+ return Op[s' => 1, s => 2] = 1.0
end
function op!(Op::ITensor, on::OpName"c", st::SiteType"Fermion", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Cdag", ::SiteType"Fermion", s::Index)
- return Op[s' => 2, s => 1] = 1.0
+ return Op[s' => 2, s => 1] = 1.0
end
function op!(Op::ITensor, on::OpName"c†", st::SiteType"Fermion", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, on::OpName"cdag", st::SiteType"Fermion", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"F", ::SiteType"Fermion", s::Index)
- Op[s' => 1, s => 1] = +1.0
- return Op[s' => 2, s => 2] = -1.0
+ Op[s' => 1, s => 1] = +1.0
+ return Op[s' => 2, s => 2] = -1.0
end
has_fermion_string(::OpName"C", ::SiteType"Fermion") = true
function has_fermion_string(on::OpName"c", st::SiteType"Fermion")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
has_fermion_string(::OpName"Cdag", ::SiteType"Fermion") = true
function has_fermion_string(on::OpName"c†", st::SiteType"Fermion")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
function has_fermion_string(on::OpName"cdag", st::SiteType"Fermion")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
diff --git a/src/lib/SiteTypes/src/sitetypes/qubit.jl b/src/lib/SiteTypes/src/sitetypes/qubit.jl
index e07bd547f0..ee8dff0589 100644
--- a/src/lib/SiteTypes/src/sitetypes/qubit.jl
+++ b/src/lib/SiteTypes/src/sitetypes/qubit.jl
@@ -21,24 +21,24 @@ Create the Hilbert space for a site of type "Qubit".
Optionally specify the conserved symmetries and their quantum number labels.
"""
function space(
- ::SiteType"Qubit";
- conserve_qns=false,
- conserve_parity=conserve_qns,
- conserve_number=false,
- qnname_parity="Parity",
- qnname_number="Number",
-)
- if conserve_number && conserve_parity
- return [
- QN((qnname_number, 0), (qnname_parity, 0, 2)) => 1,
- QN((qnname_number, 1), (qnname_parity, 1, 2)) => 1,
- ]
- elseif conserve_number
- return [QN(qnname_number, 0) => 1, QN(qnname_number, 1) => 1]
- elseif conserve_parity
- return [QN(qnname_parity, 0, 2) => 1, QN(qnname_parity, 1, 2) => 1]
- end
- return 2
+ ::SiteType"Qubit";
+ conserve_qns = false,
+ conserve_parity = conserve_qns,
+ conserve_number = false,
+ qnname_parity = "Parity",
+ qnname_number = "Number",
+ )
+ if conserve_number && conserve_parity
+ return [
+ QN((qnname_number, 0), (qnname_parity, 0, 2)) => 1,
+ QN((qnname_number, 1), (qnname_parity, 1, 2)) => 1,
+ ]
+ elseif conserve_number
+ return [QN(qnname_number, 0) => 1, QN(qnname_number, 1) => 1]
+ elseif conserve_parity
+ return [QN(qnname_parity, 0, 2) => 1, QN(qnname_parity, 1, 2) => 1]
+ end
+ return 2
end
val(::ValName"0", ::SiteType"Qubit") = 1
@@ -78,24 +78,24 @@ state(::StateName"Zm", t::SiteType"Qubit") = state(StateName("1"), t)
# SIC-POVMs
state(::StateName"Tetra1", t::SiteType"Qubit") = state(StateName("Z+"), t)
state(::StateName"Tetra2", t::SiteType"Qubit") = [
- 1 / √3
- √2 / √3
+ 1 / √3
+ √2 / √3
]
state(::StateName"Tetra3", t::SiteType"Qubit") = [
- 1 / √3
- √2 / √3 * exp(im * 2π / 3)
+ 1 / √3
+ √2 / √3 * exp(im * 2π / 3)
]
state(::StateName"Tetra4", t::SiteType"Qubit") = [
- 1 / √3
- √2 / √3 * exp(im * 4π / 3)
+ 1 / √3
+ √2 / √3 * exp(im * 4π / 3)
]
#
# 1-Qubit gates
#
op(::OpName"X", ::SiteType"Qubit") = [
- 0 1
- 1 0
+ 0 1
+ 1 0
]
op(::OpName"σx", t::SiteType"Qubit") = op("X", t)
@@ -103,8 +103,8 @@ op(::OpName"σx", t::SiteType"Qubit") = op("X", t)
op(::OpName"σ1", t::SiteType"Qubit") = op("X", t)
op(::OpName"Y", ::SiteType"Qubit") = [
- 0.0 -1.0im
- 1.0im 0.0
+ 0.0 -1.0im
+ 1.0im 0.0
]
op(::OpName"σy", t::SiteType"Qubit") = op("Y", t)
@@ -112,16 +112,16 @@ op(::OpName"σy", t::SiteType"Qubit") = op("Y", t)
op(::OpName"σ2", t::SiteType"Qubit") = op("Y", t)
op(::OpName"iY", ::SiteType"Qubit") = [
- 0 1
- -1 0
+ 0 1
+ -1 0
]
op(::OpName"iσy", t::SiteType"Qubit") = op("iY", t)
op(::OpName"iσ2", t::SiteType"Qubit") = op("iY", t)
op(::OpName"Z", ::SiteType"Qubit") = [
- 1 0
- 0 -1
+ 1 0
+ 0 -1
]
op(::OpName"σz", t::SiteType"Qubit") = op("Z", t)
@@ -129,75 +129,75 @@ op(::OpName"σz", t::SiteType"Qubit") = op("Z", t)
op(::OpName"σ3", t::SiteType"Qubit") = op("Z", t)
function op(::OpName"√NOT", ::SiteType"Qubit")
- return [
- (1 + im)/2 (1 - im)/2
- (1 - im)/2 (1 + im)/2
- ]
+ return [
+ (1 + im) / 2 (1 - im) / 2
+ (1 - im) / 2 (1 + im) / 2
+ ]
end
op(::OpName"√X", t::SiteType"Qubit") = op("√NOT", t)
op(::OpName"H", ::SiteType"Qubit") = [
- 1/sqrt(2) 1/sqrt(2)
- 1/sqrt(2) -1/sqrt(2)
+ 1 / sqrt(2) 1 / sqrt(2)
+ 1 / sqrt(2) -1 / sqrt(2)
]
# Rϕ with ϕ = π/2
-op(::OpName"Phase", ::SiteType"Qubit"; ϕ::Number=π / 2) = [
- 1 0
- 0 exp(im * ϕ)
+op(::OpName"Phase", ::SiteType"Qubit"; ϕ::Number = π / 2) = [
+ 1 0
+ 0 exp(im * ϕ)
]
op(::OpName"P", t::SiteType"Qubit"; kwargs...) = op("Phase", t; kwargs...)
-op(::OpName"S", t::SiteType"Qubit") = op("Phase", t; ϕ=π / 2)
+op(::OpName"S", t::SiteType"Qubit") = op("Phase", t; ϕ = π / 2)
## Rϕ with ϕ = π/4
op(::OpName"π/8", ::SiteType"Qubit") = [
- 1 0
- 0 1 / sqrt(2)+im / sqrt(2)
+ 1 0
+ 0 1 / sqrt(2) + im / sqrt(2)
]
op(::OpName"T", t::SiteType"Qubit") = op("π/8", t)
# Rotation around X-axis
function op(::OpName"Rx", ::SiteType"Qubit"; θ::Number)
- return [
- cos(θ / 2) -im*sin(θ / 2)
- -im*sin(θ / 2) cos(θ / 2)
- ]
+ return [
+ cos(θ / 2) -im * sin(θ / 2)
+ -im * sin(θ / 2) cos(θ / 2)
+ ]
end
# Rotation around Y-axis
function op(::OpName"Ry", ::SiteType"Qubit"; θ::Number)
- return [
- cos(θ / 2) -sin(θ / 2)
- sin(θ / 2) cos(θ / 2)
- ]
+ return [
+ cos(θ / 2) -sin(θ / 2)
+ sin(θ / 2) cos(θ / 2)
+ ]
end
# Rotation around Z-axis
-function op(::OpName"Rz", ::SiteType"Qubit"; θ=nothing, ϕ=nothing)
- isone(count(isnothing, (θ, ϕ))) || error(
- "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating an Rz gate, but not both.",
- )
- isnothing(θ) && (θ = ϕ)
- return [
- exp(-im * θ / 2) 0
- 0 exp(im * θ / 2)
- ]
+function op(::OpName"Rz", ::SiteType"Qubit"; θ = nothing, ϕ = nothing)
+ isone(count(isnothing, (θ, ϕ))) || error(
+ "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating an Rz gate, but not both.",
+ )
+ isnothing(θ) && (θ = ϕ)
+ return [
+ exp(-im * θ / 2) 0
+ 0 exp(im * θ / 2)
+ ]
end
# Rotation around generic axis n̂
function op(::OpName"Rn", ::SiteType"Qubit"; θ::Real, ϕ::Real, λ::Real)
- return [
- cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- exp(im * ϕ)*sin(θ / 2) exp(im * (ϕ + λ))*cos(θ / 2)
- ]
+ return [
+ cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ exp(im * ϕ) * sin(θ / 2) exp(im * (ϕ + λ)) * cos(θ / 2)
+ ]
end
function op(on::OpName"Rn̂", t::SiteType"Qubit"; kwargs...)
- return op(alias(on), t; kwargs...)
+ return op(alias(on), t; kwargs...)
end
#
@@ -205,161 +205,161 @@ end
#
op(::OpName"CNOT", ::SiteType"Qubit") = [
- 1 0 0 0
- 0 1 0 0
- 0 0 0 1
- 0 0 1 0
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1
+ 0 0 1 0
]
op(::OpName"CX", t::SiteType"Qubit") = op("CNOT", t)
op(::OpName"CY", ::SiteType"Qubit") = [
- 1 0 0 0
- 0 1 0 0
- 0 0 0 -im
- 0 0 im 0
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 -im
+ 0 0 im 0
]
op(::OpName"CZ", ::SiteType"Qubit") = [
- 1 0 0 0
- 0 1 0 0
- 0 0 1 0
- 0 0 0 -1
-]
-
-function op(::OpName"CPHASE", ::SiteType"Qubit"; ϕ::Number)
- return [
1 0 0 0
0 1 0 0
0 0 1 0
- 0 0 0 exp(im * ϕ)
- ]
+ 0 0 0 -1
+]
+
+function op(::OpName"CPHASE", ::SiteType"Qubit"; ϕ::Number)
+ return [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 1 0
+ 0 0 0 exp(im * ϕ)
+ ]
end
op(::OpName"Cphase", t::SiteType"Qubit"; kwargs...) = op("CPHASE", t; kwargs...)
function op(::OpName"CRx", ::SiteType"Qubit"; θ::Number)
- return [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -im*sin(θ / 2)
- 0 0 -im*sin(θ / 2) cos(θ / 2)
- ]
+ return [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -im * sin(θ / 2)
+ 0 0 -im * sin(θ / 2) cos(θ / 2)
+ ]
end
op(::OpName"CRX", t::SiteType"Qubit"; kwargs...) = op("CRx", t; kwargs...)
function op(::OpName"CRy", ::SiteType"Qubit"; θ::Number)
- return [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -sin(θ / 2)
- 0 0 sin(θ / 2) cos(θ / 2)
- ]
+ return [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -sin(θ / 2)
+ 0 0 sin(θ / 2) cos(θ / 2)
+ ]
end
op(::OpName"CRY", t::SiteType"Qubit"; kwargs...) = op("CRy", t; kwargs...)
-function op(::OpName"CRz", ::SiteType"Qubit"; ϕ=nothing, θ=nothing)
- isone(count(isnothing, (θ, ϕ))) || error(
- "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating a CRz gate, but not both.",
- )
- isnothing(θ) && (θ = ϕ)
- return [
- 1 0 0 0
- 0 1 0 0
- 0 0 exp(-im * θ / 2) 0
- 0 0 0 exp(im * θ / 2)
- ]
+function op(::OpName"CRz", ::SiteType"Qubit"; ϕ = nothing, θ = nothing)
+ isone(count(isnothing, (θ, ϕ))) || error(
+ "Must specify the keyword argument `θ` (or the deprecated `ϕ`) when creating a CRz gate, but not both.",
+ )
+ isnothing(θ) && (θ = ϕ)
+ return [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 exp(-im * θ / 2) 0
+ 0 0 0 exp(im * θ / 2)
+ ]
end
op(::OpName"CRZ", t::SiteType"Qubit"; kwargs...) = op("CRz", t; kwargs...)
function op(::OpName"CRn", ::SiteType"Qubit"; θ::Number, ϕ::Number, λ::Number)
- return [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- 0 0 exp(im * ϕ)*sin(θ / 2) exp(im * (ϕ + λ))*cos(θ / 2)
- ]
+ return [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ 0 0 exp(im * ϕ) * sin(θ / 2) exp(im * (ϕ + λ)) * cos(θ / 2)
+ ]
end
function op(::OpName"CRn̂", t::SiteType"Qubit"; kwargs...)
- return op("CRn", t; kwargs...)
+ return op("CRn", t; kwargs...)
end
op(::OpName"SWAP", ::SiteType"Qubit") = [
- 1 0 0 0
- 0 0 1 0
- 0 1 0 0
- 0 0 0 1
+ 1 0 0 0
+ 0 0 1 0
+ 0 1 0 0
+ 0 0 0 1
]
op(::OpName"Swap", t::SiteType"Qubit") = op("SWAP", t)
function op(::OpName"√SWAP", ::SiteType"Qubit")
- return [
- 1 0 0 0
- 0 (1 + im)/2 (1 - im)/2 0
- 0 (1 - im)/2 (1 + im)/2 0
- 0 0 0 1
- ]
+ return [
+ 1 0 0 0
+ 0 (1 + im) / 2 (1 - im) / 2 0
+ 0 (1 - im) / 2 (1 + im) / 2 0
+ 0 0 0 1
+ ]
end
op(::OpName"√Swap", t::SiteType"Qubit") = op("√SWAP", t)
op(::OpName"iSWAP", t::SiteType"Qubit") = [
- 1 0 0 0
- 0 0 im 0
- 0 im 0 0
- 0 0 0 1
+ 1 0 0 0
+ 0 0 im 0
+ 0 im 0 0
+ 0 0 0 1
]
op(::OpName"iSwap", t::SiteType"Qubit") = op("iSWAP", t)
function op(::OpName"√iSWAP", t::SiteType"Qubit")
- return [
- 1 0 0 0
- 0 1/√2 im/√2 0
- 0 im/√2 1/√2 0
- 0 0 0 1
- ]
+ return [
+ 1 0 0 0
+ 0 1 / √2 im / √2 0
+ 0 im / √2 1 / √2 0
+ 0 0 0 1
+ ]
end
op(::OpName"√iSwap", t::SiteType"Qubit") = op("√iSWAP", t)
# Ising (XX) coupling gate
function op(::OpName"Rxx", t::SiteType"Qubit"; ϕ::Number)
- return [
- cos(ϕ) 0 0 -im*sin(ϕ)
- 0 cos(ϕ) -im*sin(ϕ) 0
- 0 -im*sin(ϕ) cos(ϕ) 0
- -im*sin(ϕ) 0 0 cos(ϕ)
- ]
+ return [
+ cos(ϕ) 0 0 -im * sin(ϕ)
+ 0 cos(ϕ) -im * sin(ϕ) 0
+ 0 -im * sin(ϕ) cos(ϕ) 0
+ -im * sin(ϕ) 0 0 cos(ϕ)
+ ]
end
op(::OpName"RXX", t::SiteType"Qubit"; kwargs...) = op("Rxx", t; kwargs...)
# Ising (YY) coupling gate
function op(::OpName"Ryy", ::SiteType"Qubit"; ϕ::Number)
- return [
- cos(ϕ) 0 0 im*sin(ϕ)
- 0 cos(ϕ) -im*sin(ϕ) 0
- 0 -im*sin(ϕ) cos(ϕ) 0
- im*sin(ϕ) 0 0 cos(ϕ)
- ]
+ return [
+ cos(ϕ) 0 0 im * sin(ϕ)
+ 0 cos(ϕ) -im * sin(ϕ) 0
+ 0 -im * sin(ϕ) cos(ϕ) 0
+ im * sin(ϕ) 0 0 cos(ϕ)
+ ]
end
op(::OpName"RYY", t::SiteType"Qubit"; kwargs...) = op("Ryy", t; kwargs...)
# Ising (XY) coupling gate
function op(::OpName"Rxy", t::SiteType"Qubit"; ϕ::Number)
- return [
- 1 0 0 0
- 0 cos(ϕ) -im*sin(ϕ) 0
- 0 -im*sin(ϕ) cos(ϕ) 0
- 0 0 0 1
- ]
+ return [
+ 1 0 0 0
+ 0 cos(ϕ) -im * sin(ϕ) 0
+ 0 -im * sin(ϕ) cos(ϕ) 0
+ 0 0 0 1
+ ]
end
op(::OpName"RXY", t::SiteType"Qubit"; kwargs...) = op("Rxy", t; kwargs...)
# Ising (ZZ) coupling gate
function op(::OpName"Rzz", ::SiteType"Qubit"; ϕ::Number)
- return [
- exp(-im * ϕ) 0 0 0
- 0 exp(im * ϕ) 0 0
- 0 0 exp(im * ϕ) 0
- 0 0 0 exp(-im * ϕ)
- ]
+ return [
+ exp(-im * ϕ) 0 0 0
+ 0 exp(im * ϕ) 0 0
+ 0 0 exp(im * ϕ) 0
+ 0 0 0 exp(-im * ϕ)
+ ]
end
op(::OpName"RZZ", t::SiteType"Qubit"; kwargs...) = op("Rzz", t; kwargs...)
@@ -368,16 +368,16 @@ op(::OpName"RZZ", t::SiteType"Qubit"; kwargs...) = op("Rzz", t; kwargs...)
#
function op(::OpName"Toffoli", ::SiteType"Qubit")
- return [
- 1 0 0 0 0 0 0 0
- 0 1 0 0 0 0 0 0
- 0 0 1 0 0 0 0 0
- 0 0 0 1 0 0 0 0
- 0 0 0 0 1 0 0 0
- 0 0 0 0 0 1 0 0
- 0 0 0 0 0 0 0 1
- 0 0 0 0 0 0 1 0
- ]
+ return [
+ 1 0 0 0 0 0 0 0
+ 0 1 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0
+ 0 0 0 1 0 0 0 0
+ 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 1 0 0
+ 0 0 0 0 0 0 0 1
+ 0 0 0 0 0 0 1 0
+ ]
end
op(::OpName"CCNOT", t::SiteType"Qubit") = op("Toffoli", t)
@@ -387,16 +387,16 @@ op(::OpName"CCX", t::SiteType"Qubit") = op("Toffoli", t)
op(::OpName"TOFF", t::SiteType"Qubit") = op("Toffoli", t)
function op(::OpName"Fredkin", ::SiteType"Qubit")
- return [
- 1 0 0 0 0 0 0 0
- 0 1 0 0 0 0 0 0
- 0 0 1 0 0 0 0 0
- 0 0 0 1 0 0 0 0
- 0 0 0 0 1 0 0 0
- 0 0 0 0 0 0 1 0
- 0 0 0 0 0 1 0 0
- 0 0 0 0 0 0 0 1
- ]
+ return [
+ 1 0 0 0 0 0 0 0
+ 0 1 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0
+ 0 0 0 1 0 0 0 0
+ 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 0 1 0
+ 0 0 0 0 0 1 0 0
+ 0 0 0 0 0 0 0 1
+ ]
end
op(::OpName"CSWAP", t::SiteType"Qubit") = op("Fredkin", t)
@@ -409,37 +409,37 @@ op(::OpName"CS", t::SiteType"Qubit") = op("Fredkin", t)
#
function op(::OpName"CCCNOT", ::SiteType"Qubit")
- return [
- 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
- 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
- ]
+ return [
+ 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
+ 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
+ ]
end
# spin-full operators
op(::OpName"Sz", ::SiteType"Qubit") = [
- 0.5 0.0
- 0.0 -0.5
+ 0.5 0.0
+ 0.0 -0.5
]
op(on::OpName"Sᶻ", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"S+", ::SiteType"Qubit") = [
- 0 1
- 0 0
+ 0 1
+ 0 0
]
op(on::OpName"S⁺", t::SiteType"Qubit") = op(alias(on), t)
@@ -447,8 +447,8 @@ op(on::OpName"S⁺", t::SiteType"Qubit") = op(alias(on), t)
op(on::OpName"Splus", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"S-", ::SiteType"Qubit") = [
- 0 0
- 1 0
+ 0 0
+ 1 0
]
op(on::OpName"S⁻", t::SiteType"Qubit") = op(alias(on), t)
@@ -456,36 +456,36 @@ op(on::OpName"S⁻", t::SiteType"Qubit") = op(alias(on), t)
op(on::OpName"Sminus", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"Sx", ::SiteType"Qubit") = [
- 0.0 0.5
- 0.5 0.0
+ 0.0 0.5
+ 0.5 0.0
]
op(on::OpName"Sˣ", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"iSy", ::SiteType"Qubit") = [
- 0.0 0.5
- -0.5 0.0
+ 0.0 0.5
+ -0.5 0.0
]
op(on::OpName"iSʸ", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"Sy", ::SiteType"Qubit") = [
- 0.0 -0.5im
- 0.5im 0.0
+ 0.0 -0.5im
+ 0.5im 0.0
]
op(on::OpName"Sʸ", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"S2", ::SiteType"Qubit") = [
- 0.75 0.0
- 0.0 0.75
+ 0.75 0.0
+ 0.0 0.75
]
op(on::OpName"S²", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"ProjUp", ::SiteType"Qubit") = [
- 1 0
- 0 0
+ 1 0
+ 0 0
]
op(on::OpName"projUp", t::SiteType"Qubit") = op(alias(on), t)
@@ -493,8 +493,8 @@ op(on::OpName"projUp", t::SiteType"Qubit") = op(alias(on), t)
op(on::OpName"Proj0", t::SiteType"Qubit") = op(alias(on), t)
op(::OpName"ProjDn", ::SiteType"Qubit") = [
- 0 0
- 0 1
+ 0 0
+ 0 1
]
op(on::OpName"projDn", t::SiteType"Qubit") = op(alias(on), t)
diff --git a/src/lib/SiteTypes/src/sitetypes/spinhalf.jl b/src/lib/SiteTypes/src/sitetypes/spinhalf.jl
index c374d47c0d..77b8ff65a0 100644
--- a/src/lib/SiteTypes/src/sitetypes/spinhalf.jl
+++ b/src/lib/SiteTypes/src/sitetypes/spinhalf.jl
@@ -1,4 +1,3 @@
-
"""
space(::SiteType"S=1/2";
conserve_qns = false,
@@ -12,34 +11,34 @@ Create the Hilbert space for a site of type "S=1/2".
Optionally specify the conserved symmetries and their quantum number labels.
"""
function space(
- ::SiteType"S=1/2";
- conserve_qns=false,
- conserve_sz=conserve_qns,
- conserve_szparity=false,
- qnname_sz="Sz",
- qnname_szparity="SzParity",
-)
- if conserve_sz && conserve_szparity
- return [
- QN((qnname_sz, +1), (qnname_szparity, 1, 2)) => 1,
- QN((qnname_sz, -1), (qnname_szparity, 0, 2)) => 1,
- ]
- elseif conserve_sz
- return [QN(qnname_sz, +1) => 1, QN(qnname_sz, -1) => 1]
- elseif conserve_szparity
- return [QN(qnname_szparity, 1, 2) => 1, QN(qnname_szparity, 0, 2) => 1]
- end
- return 2
+ ::SiteType"S=1/2";
+ conserve_qns = false,
+ conserve_sz = conserve_qns,
+ conserve_szparity = false,
+ qnname_sz = "Sz",
+ qnname_szparity = "SzParity",
+ )
+ if conserve_sz && conserve_szparity
+ return [
+ QN((qnname_sz, +1), (qnname_szparity, 1, 2)) => 1,
+ QN((qnname_sz, -1), (qnname_szparity, 0, 2)) => 1,
+ ]
+ elseif conserve_sz
+ return [QN(qnname_sz, +1) => 1, QN(qnname_sz, -1) => 1]
+ elseif conserve_szparity
+ return [QN(qnname_szparity, 1, 2) => 1, QN(qnname_szparity, 0, 2) => 1]
+ end
+ return 2
end
# Use Qubit definition of any operator/state
# called using S=1/2 SiteType
function val(vn::ValName, ::SiteType"S=1/2"; kwargs...)
- return val(vn, SiteType("Qubit"); kwargs...)
+ return val(vn, SiteType("Qubit"); kwargs...)
end
function state(sn::StateName, ::SiteType"S=1/2"; kwargs...)
- return state(sn, SiteType("Qubit"); kwargs...)
+ return state(sn, SiteType("Qubit"); kwargs...)
end
op(o::OpName, ::SiteType"S=1/2"; kwargs...) = op(o, SiteType("Qubit"); kwargs...)
@@ -52,7 +51,7 @@ val(name::ValName, ::SiteType"SpinHalf") = val(name, SiteType("S=1/2"))
state(name::StateName, ::SiteType"SpinHalf") = state(name, SiteType("S=1/2"))
function op(o::OpName, ::SiteType"SpinHalf"; kwargs...)
- return op(o, SiteType("S=1/2"); kwargs...)
+ return op(o, SiteType("S=1/2"); kwargs...)
end
# Support the tag "S=½" as equivalent to "S=1/2"
diff --git a/src/lib/SiteTypes/src/sitetypes/spinone.jl b/src/lib/SiteTypes/src/sitetypes/spinone.jl
index 1de6902243..35c89f3b3d 100644
--- a/src/lib/SiteTypes/src/sitetypes/spinone.jl
+++ b/src/lib/SiteTypes/src/sitetypes/spinone.jl
@@ -13,12 +13,12 @@ Create the Hilbert space for a site of type "S=1".
Optionally specify the conserved symmetries and their quantum number labels.
"""
function space(
- ::SiteType"S=1"; conserve_qns=false, conserve_sz=conserve_qns, qnname_sz="Sz"
-)
- if conserve_sz
- return [QN(qnname_sz, +2) => 1, QN(qnname_sz, 0) => 1, QN(qnname_sz, -2) => 1]
- end
- return 3
+ ::SiteType"S=1"; conserve_qns = false, conserve_sz = conserve_qns, qnname_sz = "Sz"
+ )
+ if conserve_sz
+ return [QN(qnname_sz, +2) => 1, QN(qnname_sz, 0) => 1, QN(qnname_sz, -2) => 1]
+ end
+ return 3
end
val(::ValName"Up", ::SiteType"S=1") = 1
@@ -54,17 +54,17 @@ state(::StateName"Y0", ::SiteType"S=1") = [1 / sqrt(2), 0, 1 / sqrt(2)]
state(::StateName"Y-", ::SiteType"S=1") = [-1 / 2, im / sqrt(2), 1 / 2]
op(::OpName"Sz", ::SiteType"S=1") = [
- 1.0 0.0 0.0
- 0.0 0.0 0.0
- 0.0 0.0 -1.0
+ 1.0 0.0 0.0
+ 0.0 0.0 0.0
+ 0.0 0.0 -1.0
]
op(on::OpName"Sᶻ", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"S+", ::SiteType"S=1") = [
- 0.0 √2 0.0
- 0.0 0.0 √2
- 0.0 0.0 0.0
+ 0.0 √2 0.0
+ 0.0 0.0 √2
+ 0.0 0.0 0.0
]
op(on::OpName"S⁺", t::SiteType"S=1") = op(alias(on), t)
@@ -72,9 +72,9 @@ op(on::OpName"Splus", t::SiteType"S=1") = op(alias(on), t)
op(on::OpName"Sp", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"S-", ::SiteType"S=1") = [
- 0.0 0.0 0.0
- √2 0.0 0.0
- 0.0 √2 0.0
+ 0.0 0.0 0.0
+ √2 0.0 0.0
+ 0.0 √2 0.0
]
op(on::OpName"S⁻", t::SiteType"S=1") = op(alias(on), t)
@@ -82,51 +82,51 @@ op(on::OpName"Sminus", t::SiteType"S=1") = op(alias(on), t)
op(on::OpName"Sm", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"Sx", ::SiteType"S=1") = [
- 0.0 1/√2 0.0
- 1/√2 0.0 1/√2
- 0.0 1/√2 0.0
+ 0.0 1 / √2 0.0
+ 1 / √2 0.0 1 / √2
+ 0.0 1 / √2 0.0
]
op(on::OpName"Sˣ", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"iSy", ::SiteType"S=1") = [
- 0.0 1/√2 0.0
- -1/√2 0.0 1/√2
- 0.0 -1/√2 0.0
+ 0.0 1 / √2 0.0
+ -1 / √2 0.0 1 / √2
+ 0.0 -1 / √2 0.0
]
op(on::OpName"iSʸ", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"Sy", ::SiteType"S=1") = [
- 0.0 -im/√2 0.0
- im/√2 0.0 -im/√2
- 0.0 im/√2 0.0
+ 0.0 -im / √2 0.0
+ im / √2 0.0 -im / √2
+ 0.0 im / √2 0.0
]
op(on::OpName"Sʸ", t::SiteType"S=1") = op(alias(on), t)
op(::OpName"Sz2", ::SiteType"S=1") = [
- 1.0 0.0 0.0
- 0.0 0.0 0.0
- 0.0 0.0 1.0
+ 1.0 0.0 0.0
+ 0.0 0.0 0.0
+ 0.0 0.0 1.0
]
op(::OpName"Sx2", ::SiteType"S=1") = [
- 0.5 0.0 0.5
- 0.0 1.0 0.0
- 0.5 0.0 0.5
+ 0.5 0.0 0.5
+ 0.0 1.0 0.0
+ 0.5 0.0 0.5
]
op(::OpName"Sy2", ::SiteType"S=1") = [
- 0.5 0.0 -0.5
- 0.0 1.0 0.0
- -0.5 0.0 0.5
+ 0.5 0.0 -0.5
+ 0.0 1.0 0.0
+ -0.5 0.0 0.5
]
op(::OpName"S2", ::SiteType"S=1") = [
- 2.0 0.0 0.0
- 0.0 2.0 0.0
- 0.0 0.0 2.0
+ 2.0 0.0 0.0
+ 0.0 2.0 0.0
+ 0.0 0.0 2.0
]
op(on::OpName"S²", t::SiteType"S=1") = op(alias(on), t)
@@ -137,7 +137,7 @@ state(name::StateName, st::SiteType"SpinOne") = state(name, alias(st))
val(name::ValName, st::SiteType"SpinOne") = val(name, alias(st))
function op!(Op::ITensor, o::OpName, st::SiteType"SpinOne", s::Index)
- return op!(Op, o, alias(st), s)
+ return op!(Op, o, alias(st), s)
end
op(o::OpName, st::SiteType"SpinOne") = op(o, alias(st))
diff --git a/src/lib/SiteTypes/src/sitetypes/tj.jl b/src/lib/SiteTypes/src/sitetypes/tj.jl
index 153737e080..f1bc10440a 100644
--- a/src/lib/SiteTypes/src/sitetypes/tj.jl
+++ b/src/lib/SiteTypes/src/sitetypes/tj.jl
@@ -1,4 +1,3 @@
-
"""
space(::SiteType"tJ";
conserve_qns = false,
@@ -14,44 +13,44 @@ Create the Hilbert space for a site of type "tJ".
Optionally specify the conserved symmetries and their quantum number labels.
"""
function space(
- ::SiteType"tJ";
- conserve_qns=false,
- conserve_sz=conserve_qns,
- conserve_nf=conserve_qns,
- conserve_nfparity=conserve_qns,
- qnname_sz="Sz",
- qnname_nf="Nf",
- qnname_nfparity="NfParity",
- # Deprecated
- conserve_parity=nothing,
-)
- if !isnothing(conserve_parity)
- conserve_nfparity = conserve_parity
- end
- if conserve_sz && conserve_nf
- return [
- QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
- QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1
- QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1
- ]
- elseif conserve_nf
- return [
- QN(qnname_nf, 0, -1) => 1
- QN(qnname_nf, 1, -1) => 2
- ]
- elseif conserve_sz
- return [
- QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1
- QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1
- QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1
- ]
- elseif conserve_nfparity
- return [
- QN(qnname_nfparity, 0, -2) => 1
- QN(qnname_nfparity, 1, -2) => 2
- ]
- end
- return 3
+ ::SiteType"tJ";
+ conserve_qns = false,
+ conserve_sz = conserve_qns,
+ conserve_nf = conserve_qns,
+ conserve_nfparity = conserve_qns,
+ qnname_sz = "Sz",
+ qnname_nf = "Nf",
+ qnname_nfparity = "NfParity",
+ # Deprecated
+ conserve_parity = nothing,
+ )
+ if !isnothing(conserve_parity)
+ conserve_nfparity = conserve_parity
+ end
+ if conserve_sz && conserve_nf
+ return [
+ QN((qnname_nf, 0, -1), (qnname_sz, 0)) => 1
+ QN((qnname_nf, 1, -1), (qnname_sz, +1)) => 1
+ QN((qnname_nf, 1, -1), (qnname_sz, -1)) => 1
+ ]
+ elseif conserve_nf
+ return [
+ QN(qnname_nf, 0, -1) => 1
+ QN(qnname_nf, 1, -1) => 2
+ ]
+ elseif conserve_sz
+ return [
+ QN((qnname_sz, 0), (qnname_nfparity, 0, -2)) => 1
+ QN((qnname_sz, +1), (qnname_nfparity, 1, -2)) => 1
+ QN((qnname_sz, -1), (qnname_nfparity, 1, -2)) => 1
+ ]
+ elseif conserve_nfparity
+ return [
+ QN(qnname_nfparity, 0, -2) => 1
+ QN(qnname_nfparity, 1, -2) => 2
+ ]
+ end
+ return 3
end
val(::ValName"Emp", ::SiteType"tJ") = 1
@@ -69,172 +68,172 @@ state(::StateName"↑", st::SiteType"tJ") = state(StateName("Up"), st)
state(::StateName"↓", st::SiteType"tJ") = state(StateName("Dn"), st)
function op!(Op::ITensor, ::OpName"Nup", ::SiteType"tJ", s::Index)
- return Op[s' => 2, s => 2] = 1.0
+ return Op[s' => 2, s => 2] = 1.0
end
function op!(Op::ITensor, on::OpName"n↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Ndn", ::SiteType"tJ", s::Index)
- return Op[s' => 3, s => 3] = 1.0
+ return Op[s' => 3, s => 3] = 1.0
end
function op!(Op::ITensor, on::OpName"n↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Ntot", ::SiteType"tJ", s::Index)
- Op[s' => 2, s => 2] = 1.0
- return Op[s' => 3, s => 3] = 1.0
+ Op[s' => 2, s => 2] = 1.0
+ return Op[s' => 3, s => 3] = 1.0
end
function op!(Op::ITensor, on::OpName"ntot", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Cup", ::SiteType"tJ", s::Index)
- return Op[s' => 1, s => 2] = 1.0
+ return Op[s' => 1, s => 2] = 1.0
end
function op!(Op::ITensor, on::OpName"c↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Cdagup", ::SiteType"tJ", s::Index)
- return Op[s' => 2, s => 1] = 1.0
+ return Op[s' => 2, s => 1] = 1.0
end
function op!(Op::ITensor, on::OpName"c†↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Cdn", ::SiteType"tJ", s::Index)
- return Op[s' => 1, s => 3] = 1.0
+ return Op[s' => 1, s => 3] = 1.0
end
function op!(Op::ITensor, on::OpName"c↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Cdagdn", ::SiteType"tJ", s::Index)
- return Op[s' => 3, s => 1] = 1.0
+ return Op[s' => 3, s => 1] = 1.0
end
function op!(Op::ITensor, on::OpName"c†↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Aup", ::SiteType"tJ", s::Index)
- return Op[s' => 1, s => 2] = 1.0
+ return Op[s' => 1, s => 2] = 1.0
end
function op!(Op::ITensor, on::OpName"a↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Adagup", ::SiteType"tJ", s::Index)
- return Op[s' => 2, s => 1] = 1.0
+ return Op[s' => 2, s => 1] = 1.0
end
function op!(Op::ITensor, on::OpName"a†↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Adn", ::SiteType"tJ", s::Index)
- return Op[s' => 1, s => 3] = 1.0
+ return Op[s' => 1, s => 3] = 1.0
end
function op!(Op::ITensor, on::OpName"a↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Adagdn", ::SiteType"tJ", s::Index)
- return Op[s' => 3, s => 1] = 1.0
+ return Op[s' => 3, s => 1] = 1.0
end
function op!(Op::ITensor, on::OpName"a†↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"F", ::SiteType"tJ", s::Index)
- Op[s' => 1, s => 1] = +1.0
- Op[s' => 2, s => 2] = -1.0
- return Op[s' => 3, s => 3] = -1.0
+ Op[s' => 1, s => 1] = +1.0
+ Op[s' => 2, s => 2] = -1.0
+ return Op[s' => 3, s => 3] = -1.0
end
function op!(Op::ITensor, ::OpName"Fup", ::SiteType"tJ", s::Index)
- Op[s' => 1, s => 1] = +1.0
- Op[s' => 2, s => 2] = -1.0
- return Op[s' => 3, s => 3] = +1.0
+ Op[s' => 1, s => 1] = +1.0
+ Op[s' => 2, s => 2] = -1.0
+ return Op[s' => 3, s => 3] = +1.0
end
function op!(Op::ITensor, on::OpName"F↑", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Fdn", ::SiteType"tJ", s::Index)
- Op[s' => 1, s => 1] = +1.0
- Op[s' => 2, s => 2] = +1.0
- return Op[s' => 3, s => 3] = -1.0
+ Op[s' => 1, s => 1] = +1.0
+ Op[s' => 2, s => 2] = +1.0
+ return Op[s' => 3, s => 3] = -1.0
end
function op!(Op::ITensor, on::OpName"F↓", st::SiteType"tJ", s::Index)
- return op!(Op, alias(on), st, s)
+ return op!(Op, alias(on), st, s)
end
function op!(Op::ITensor, ::OpName"Sz", ::SiteType"tJ", s::Index)
- Op[s' => 2, s => 2] = +0.5
- return Op[s' => 3, s => 3] = -0.5
+ Op[s' => 2, s => 2] = +0.5
+ return Op[s' => 3, s => 3] = -0.5
end
function op!(Op::ITensor, ::OpName"Sᶻ", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("Sz"), st, s)
+ return op!(Op, OpName("Sz"), st, s)
end
function op!(Op::ITensor, ::OpName"Sx", ::SiteType"tJ", s::Index)
- Op[s' => 2, s => 3] = 0.5
- return Op[s' => 3, s => 2] = 0.5
+ Op[s' => 2, s => 3] = 0.5
+ return Op[s' => 3, s => 2] = 0.5
end
function op!(Op::ITensor, ::OpName"Sy", ::SiteType"tJ", s::Index)
- Op[s' => 2, s => 3] = -0.5im
- return Op[s' => 3, s => 2] = 0.5im
+ Op[s' => 2, s => 3] = -0.5im
+ return Op[s' => 3, s => 2] = 0.5im
end
function op!(Op::ITensor, ::OpName"Sʸ", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("Sy"), st, s)
+ return op!(Op, OpName("Sy"), st, s)
end
function op!(Op::ITensor, ::OpName"Sˣ", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("Sx"), st, s)
+ return op!(Op, OpName("Sx"), st, s)
end
function op!(Op::ITensor, ::OpName"S+", ::SiteType"tJ", s::Index)
- return Op[s' => 2, s => 3] = 1.0
+ return Op[s' => 2, s => 3] = 1.0
end
function op!(Op::ITensor, ::OpName"S⁺", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S+"), st, s)
+ return op!(Op, OpName("S+"), st, s)
end
function op!(Op::ITensor, ::OpName"Sp", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S+"), st, s)
+ return op!(Op, OpName("S+"), st, s)
end
function op!(Op::ITensor, ::OpName"Splus", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S+"), st, s)
+ return op!(Op, OpName("S+"), st, s)
end
function op!(Op::ITensor, ::OpName"S-", ::SiteType"tJ", s::Index)
- return Op[s' => 3, s => 2] = 1.0
+ return Op[s' => 3, s => 2] = 1.0
end
function op!(Op::ITensor, ::OpName"S⁻", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S-"), st, s)
+ return op!(Op, OpName("S-"), st, s)
end
function op!(Op::ITensor, ::OpName"Sm", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S-"), st, s)
+ return op!(Op, OpName("S-"), st, s)
end
function op!(Op::ITensor, ::OpName"Sminus", st::SiteType"tJ", s::Index)
- return op!(Op, OpName("S-"), st, s)
+ return op!(Op, OpName("S-"), st, s)
end
has_fermion_string(::OpName"Cup", ::SiteType"tJ") = true
function has_fermion_string(on::OpName"c↑", st::SiteType"tJ")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
has_fermion_string(::OpName"Cdagup", ::SiteType"tJ") = true
function has_fermion_string(on::OpName"c†↑", st::SiteType"tJ")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
has_fermion_string(::OpName"Cdn", ::SiteType"tJ") = true
function has_fermion_string(on::OpName"c↓", st::SiteType"tJ")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
has_fermion_string(::OpName"Cdagdn", ::SiteType"tJ") = true
function has_fermion_string(on::OpName"c†↓", st::SiteType"tJ")
- return has_fermion_string(alias(on), st)
+ return has_fermion_string(alias(on), st)
end
diff --git a/src/lib/SmallStrings/src/smallstring.jl b/src/lib/SmallStrings/src/smallstring.jl
index ac660c5d72..1ead274b12 100644
--- a/src/lib/SmallStrings/src/smallstring.jl
+++ b/src/lib/SmallStrings/src/smallstring.jl
@@ -6,8 +6,8 @@ const IntSmallString = UInt256
# XXX: remove smallLength as a global constant, bad for type inference
const smallLength = 16
-const SmallStringStorage = SVector{smallLength,IntChar}
-const MSmallStringStorage = MVector{smallLength,IntChar}
+const SmallStringStorage = SVector{smallLength, IntChar}
+const MSmallStringStorage = MVector{smallLength, IntChar}
# Similar types are implemented in various packages:
# https://github.com/JuliaString/ShortStrings.jl
@@ -18,14 +18,14 @@ const MSmallStringStorage = MVector{smallLength,IntChar}
# https://github.com/djsegal/FixedLengthStrings.jl
# TODO: make this more generic by parametrizing over the length and Char size. Also, store the length of the string.
struct SmallString
- data::SmallStringStorage
+ data::SmallStringStorage
- SmallString(sv::SmallStringStorage) = new(sv)
+ SmallString(sv::SmallStringStorage) = new(sv)
- function SmallString()
- store = SmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
- return new(store)
- end
+ function SmallString()
+ store = SmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
+ return new(store)
+ end
end
const Tag = SmallString
@@ -35,13 +35,13 @@ data(ss::SmallString) = ss.data
Base.eltype(ss::SmallString) = eltype(data(ss))
function SmallString(str)
- length(str) > smallLength &&
- error("String is too long for SmallString. Maximum length is $smallLength.")
- mstore = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
- for (n, c) in enumerate(str)
- mstore[n] = IntChar(c)
- end
- return SmallString(SmallStringStorage(mstore))
+ length(str) > smallLength &&
+ error("String is too long for SmallString. Maximum length is $smallLength.")
+ mstore = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
+ for (n, c) in enumerate(str)
+ mstore[n] = IntChar(c)
+ end
+ return SmallString(SmallStringStorage(mstore))
end
SmallString(s::SmallString) = SmallString(data(s))
@@ -49,53 +49,53 @@ SmallString(s::SmallString) = SmallString(data(s))
Base.getindex(s::SmallString, n::Int) = getindex(s.data, n)
function Base.setindex(s::SmallString, val, n::Int)
- return SmallString(Base.setindex(s.data, val, n))
+ return SmallString(Base.setindex(s.data, val, n))
end
# TODO: rename to `isempty`
isnull(s::SmallString) = @inbounds s[1] == IntChar(0)
function Base.vcat(s1::SmallString, s2::SmallString)
- v = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
- n = 1
- while n <= smallLength && s1[n] != IntChar(0)
- v[n] = s1[n]
- n += 1
- end
- N1 = n - 1
- n2 = 1
- while n2 <= smallLength && s2[n2] != IntChar(0)
- v[n] = s2[n2]
- n += 1
- n2 += 1
- end
- return SmallString(SmallStringStorage(v))
+ v = MSmallStringStorage(ntuple(_ -> IntChar(0), Val(smallLength)))
+ n = 1
+ while n <= smallLength && s1[n] != IntChar(0)
+ v[n] = s1[n]
+ n += 1
+ end
+ N1 = n - 1
+ n2 = 1
+ while n2 <= smallLength && s2[n2] != IntChar(0)
+ v[n] = s2[n2]
+ n += 1
+ n2 += 1
+ end
+ return SmallString(SmallStringStorage(v))
end
function SmallString(i::IntSmallString)
- mut_is = MVector{1,IntSmallString}(ntoh(i))
- p = convert(Ptr{SmallStringStorage}, pointer_from_objref(mut_is))
- return SmallString(unsafe_load(p))
+ mut_is = MVector{1, IntSmallString}(ntoh(i))
+ p = convert(Ptr{SmallStringStorage}, pointer_from_objref(mut_is))
+ return SmallString(unsafe_load(p))
end
function cast_to_uint(store)
- mut_store = MSmallStringStorage(store)
- storage_begin = convert(Ptr{IntSmallString}, pointer_from_objref(mut_store))
- return ntoh(unsafe_load(storage_begin))
+ mut_store = MSmallStringStorage(store)
+ storage_begin = convert(Ptr{IntSmallString}, pointer_from_objref(mut_store))
+ return ntoh(unsafe_load(storage_begin))
end
function IntSmallString(s::SmallString)
- return cast_to_uint(s.data)
+ return cast_to_uint(s.data)
end
function isint(s::SmallString)::Bool
- ndigits = 1
- while ndigits <= smallLength && s[ndigits] != IntChar(0)
- cur_char = Char(s[ndigits])
- !isdigit(cur_char) && return false
- ndigits += 1
- end
- return true
+ ndigits = 1
+ while ndigits <= smallLength && s[ndigits] != IntChar(0)
+ cur_char = Char(s[ndigits])
+ !isdigit(cur_char) && return false
+ ndigits += 1
+ end
+ return true
end
# Here we use the StaticArrays comparison
@@ -120,31 +120,32 @@ Base.isless(s1::SmallString, s2::SmallString) = isless(s1.data, s2.data)
#######################################################
function Base.String(s::SmallString)
- n = 1
- while n <= smallLength && s[n] != IntChar(0)
- n += 1
- end
- len = n - 1
- return String(Char.(s.data[1:len]))
+ n = 1
+ while n <= smallLength && s[n] != IntChar(0)
+ n += 1
+ end
+ len = n - 1
+ return String(Char.(s.data[1:len]))
end
function Base.show(io::IO, s::SmallString)
- n = 1
- while n <= smallLength && s[n] != IntChar(0)
- print(io, Char(s[n]))
- n += 1
- end
+ n = 1
+ while n <= smallLength && s[n] != IntChar(0)
+ print(io, Char(s[n]))
+ n += 1
+ end
+ return nothing
end
-function readcpp(io::IO, ::Type{SmallString}; format="v3")
- s = SmallString()
- if format == "v3"
- for n in 1:7
- c = read(io, Char)
- s = setindex(s, c, n)
+function readcpp(io::IO, ::Type{SmallString}; format = "v3")
+ s = SmallString()
+ if format == "v3"
+ for n in 1:7
+ c = read(io, Char)
+ s = setindex(s, c, n)
+ end
+ else
+ throw(ArgumentError("read SmallString: format=$format not supported"))
end
- else
- throw(ArgumentError("read SmallString: format=$format not supported"))
- end
- return s
+ return s
end
diff --git a/src/lib/TagSets/src/TagSets.jl b/src/lib/TagSets/src/TagSets.jl
index 45d3152bf7..4d0d6a78c6 100644
--- a/src/lib/TagSets/src/TagSets.jl
+++ b/src/lib/TagSets/src/TagSets.jl
@@ -7,9 +7,9 @@ using ..SmallStrings: SmallString, cast_to_uint, isnull
using StaticArrays: MVector, SVector
const IntTag = UInt256 # An integer that can be cast to a Tag
-const MTagStorage = MVector{16,IntTag} # A mutable tag storage, holding 16 characters
-const TagSetStorage{T,N} = SVector{N,T}
-const MTagSetStorage{T,N} = MVector{N,T} # A mutable tag storage
+const MTagStorage = MVector{16, IntTag} # A mutable tag storage, holding 16 characters
+const TagSetStorage{T, N} = SVector{N, T}
+const MTagSetStorage{T, N} = MVector{N, T} # A mutable tag storage
#
# Turn the strict tags checking on and off
@@ -25,7 +25,7 @@ or the number of characters of a tag is enabled or disabled.
See also [`ITensors.set_strict_tags!`](@ref).
"""
function using_strict_tags()
- return _using_strict_tags[]
+ return _using_strict_tags[]
end
"""
@@ -41,149 +41,150 @@ should only be used if you know what you are doing.
See also [`ITensors.using_strict_tags`](@ref).
"""
function set_strict_tags!(enable::Bool)
- previous = using_strict_tags()
- _using_strict_tags[] = enable
- return previous
+ previous = using_strict_tags()
+ _using_strict_tags[] = enable
+ return previous
end
emptytag(::Type{IntTag}) = IntTag(0)
-function empty_storage(::Type{TagSetStorage{T,N}}) where {T,N}
- return TagSetStorage(ntuple(_ -> emptytag(T), Val(N)))
+function empty_storage(::Type{TagSetStorage{T, N}}) where {T, N}
+ return TagSetStorage(ntuple(_ -> emptytag(T), Val(N)))
end
-function empty_storage(::Type{MTagSetStorage{T,N}}) where {T,N}
- return MTagSetStorage(ntuple(_ -> emptytag(T), Val(N)))
+function empty_storage(::Type{MTagSetStorage{T, N}}) where {T, N}
+ return MTagSetStorage(ntuple(_ -> emptytag(T), Val(N)))
end
#TODO: decide which functions on TagSet should be made generic.
-struct GenericTagSet{T,N}
- data::TagSetStorage{T,N}
- length::Int
- GenericTagSet{T,N}() where {T,N} = new(empty_storage(TagSetStorage{T,N}), 0)
- GenericTagSet{T,N}(tags::TagSetStorage{T,N}, len::Int) where {T,N} = new(tags, len)
+struct GenericTagSet{T, N}
+ data::TagSetStorage{T, N}
+ length::Int
+ GenericTagSet{T, N}() where {T, N} = new(empty_storage(TagSetStorage{T, N}), 0)
+ GenericTagSet{T, N}(tags::TagSetStorage{T, N}, len::Int) where {T, N} = new(tags, len)
end
-GenericTagSet{T,N}(ts::GenericTagSet{T,N}) where {T,N} = ts
+GenericTagSet{T, N}(ts::GenericTagSet{T, N}) where {T, N} = ts
-function GenericTagSet{T,N}(t::T) where {T,N}
- ts = empty_storage(MTagSetStorage{T,N})
- ts[1] = T(t)
- return GenericTagSet{T,N}(TagSetStorage(ts), 1)
+function GenericTagSet{T, N}(t::T) where {T, N}
+ ts = empty_storage(MTagSetStorage{T, N})
+ ts[1] = T(t)
+ return GenericTagSet{T, N}(TagSetStorage(ts), 1)
end
#GenericTagSet{IntTag,N}(t::Tag) where {N} = GenericTagSet{IntTag,N}(IntTag(t))
function _hastag(ts::MTagSetStorage, ntags::Int, tag::IntTag)
- for n in 1:ntags
- @inbounds ts[n] == tag && return true
- end
- return false
+ for n in 1:ntags
+ @inbounds ts[n] == tag && return true
+ end
+ return false
end
function _addtag_ordered!(ts::MTagSetStorage, ntags::Int, tag::IntTag)
- if iszero(ntags) || tag > @inbounds ts[ntags]
- @inbounds setindex!(ts, tag, ntags + 1)
- else
- # check for repeated tags
- _hastag(ts, ntags, tag) && return ntags
- pos = ntags + 1 # position new tag should go
- while pos > 1 && tag < @inbounds ts[pos - 1]
- pos -= 1
- @inbounds setindex!(ts, ts[pos], pos + 1)
+ if iszero(ntags) || tag > @inbounds ts[ntags]
+ @inbounds setindex!(ts, tag, ntags + 1)
+ else
+ # check for repeated tags
+ _hastag(ts, ntags, tag) && return ntags
+ pos = ntags + 1 # position new tag should go
+ while pos > 1 && tag < @inbounds ts[pos - 1]
+ pos -= 1
+ @inbounds setindex!(ts, ts[pos], pos + 1)
+ end
+ @inbounds setindex!(ts, tag, pos)
end
- @inbounds setindex!(ts, tag, pos)
- end
- return ntags + 1
+ return ntags + 1
end
function _addtag!(ts::MTagSetStorage, ntags::Int, tag::IntTag)
- t = SmallString(tag)
- # TODO: change to isempty, remove isnull
- if !isnull(t)
- ntags = _addtag_ordered!(ts, ntags, tag)
- end
- return ntags
+ t = SmallString(tag)
+ # TODO: change to isempty, remove isnull
+ if !isnull(t)
+ ntags = _addtag_ordered!(ts, ntags, tag)
+ end
+ return ntags
end
function reset!(v::MTagStorage, nchar::Int)
- for i in 1:nchar
- @inbounds v[i] = IntTag(0)
- end
+ for i in 1:nchar
+ @inbounds v[i] = IntTag(0)
+ end
+ return nothing
end
function strict_tags_error(str, maxlength, nchar)
- return error(
- "You are trying to make a TagSet from the String \"$(str)\". This has more than the maximum number of allowed tags ($maxlength), or has a tag that is longer than the longest allowed tag ($nchar). Either specify fewer or shorter tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags or tag characters will be ignored.",
- )
+ return error(
+ "You are trying to make a TagSet from the String \"$(str)\". This has more than the maximum number of allowed tags ($maxlength), or has a tag that is longer than the longest allowed tag ($nchar). Either specify fewer or shorter tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags or tag characters will be ignored.",
+ )
end
function strict_tags_add_error(ts, tsadd, maxlength)
- return error(
- "You are trying to add the TagSet $tsadd to the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.",
- )
+ return error(
+ "You are trying to add the TagSet $tsadd to the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.",
+ )
end
function strict_tags_replace_error(ts, tsremove, tsadd, maxlength)
- return error(
- "You are trying to replace the TagSet $tsremove with the TagSet $tsadd in the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.",
- )
+ return error(
+ "You are trying to replace the TagSet $tsremove with the TagSet $tsadd in the TagSet $ts. The result would have more than the maximum number of allowed tags ($maxlength). Either specify fewer tags, or use `ITensors.set_strict_tags!(false)` to disable this error, in which case extra tags will be ignored.",
+ )
end
-function GenericTagSet{T,N}(str::AbstractString) where {T,N}
- # Mutable fixed-size vector as temporary Tag storage
- # TODO: refactor the Val here.
- current_tag = empty_storage(MTagStorage)
- # Mutable fixed-size vector as temporary TagSet storage
- ts = empty_storage(MTagSetStorage{T,N})
- nchar = 0
- ntags = 0
- for current_char in str
- if current_char == ','
- if nchar != 0
+function GenericTagSet{T, N}(str::AbstractString) where {T, N}
+ # Mutable fixed-size vector as temporary Tag storage
+ # TODO: refactor the Val here.
+ current_tag = empty_storage(MTagStorage)
+ # Mutable fixed-size vector as temporary TagSet storage
+ ts = empty_storage(MTagSetStorage{T, N})
+ nchar = 0
+ ntags = 0
+ for current_char in str
+ if current_char == ','
+ if nchar != 0
+ if ntags < N
+ ntags = _addtag!(ts, ntags, cast_to_uint(current_tag))
+ elseif using_strict_tags()
+ strict_tags_error(str, N, length(current_tag))
+ end # else do nothing
+ # Reset the current tag
+ reset!(current_tag, nchar)
+ nchar = 0
+ end
+ elseif current_char != ' ' # TagSet constructor ignores whitespace
+ if nchar ≥ length(current_tag)
+ if using_strict_tags()
+ strict_tags_error(str, N, length(current_tag))
+ else
+ continue
+ end
+ end
+ nchar += 1
+ @inbounds current_tag[nchar] = current_char
+ end
+ end
+ # Store the final tag
+ if nchar != 0
if ntags < N
- ntags = _addtag!(ts, ntags, cast_to_uint(current_tag))
+ ntags = _addtag!(ts, ntags, cast_to_uint(current_tag))
elseif using_strict_tags()
- strict_tags_error(str, N, length(current_tag))
+ strict_tags_error(str, N, length(current_tag))
end # else do nothing
- # Reset the current tag
- reset!(current_tag, nchar)
- nchar = 0
- end
- elseif current_char != ' ' # TagSet constructor ignores whitespace
- if nchar ≥ length(current_tag)
+ end
+ if ntags > N
if using_strict_tags()
- strict_tags_error(str, N, length(current_tag))
+ strict_tags_error(str, N, length(current_tag))
else
- continue
+ ntags = N
end
- end
- nchar += 1
- @inbounds current_tag[nchar] = current_char
- end
- end
- # Store the final tag
- if nchar != 0
- if ntags < N
- ntags = _addtag!(ts, ntags, cast_to_uint(current_tag))
- elseif using_strict_tags()
- strict_tags_error(str, N, length(current_tag))
- end # else do nothing
- end
- if ntags > N
- if using_strict_tags()
- strict_tags_error(str, N, length(current_tag))
- else
- ntags = N
end
- end
- return GenericTagSet{T,N}(TagSetStorage(ts), ntags)
+ return GenericTagSet{T, N}(TagSetStorage(ts), ntags)
end
-const TagSet = GenericTagSet{IntTag,4}
+const TagSet = GenericTagSet{IntTag, 4}
-maxlength(::GenericTagSet{<:Any,N}) where {N} = N
+maxlength(::GenericTagSet{<:Any, N}) where {N} = N
macro ts_str(s)
- return TagSet(s)
+ return TagSet(s)
end
Base.convert(::Type{TagSet}, str::String) = TagSet(str)
@@ -217,112 +218,112 @@ Base.@propagate_inbounds Base.getindex(T::TagSet, n::Integer) = SmallString(data
Base.copy(ts::TagSet) = TagSet(data(ts), length(ts))
function Base.:(==)(ts1::TagSet, ts2::TagSet)
- l1 = length(ts1)
- l2 = length(ts2)
- l1 != l2 && return false
- for n in 1:l1
- @inbounds data(ts1)[n] != data(ts2)[n] && return false
- end
- return true
+ l1 = length(ts1)
+ l2 = length(ts2)
+ l1 != l2 && return false
+ for n in 1:l1
+ @inbounds data(ts1)[n] != data(ts2)[n] && return false
+ end
+ return true
end
# Assumes it is an integer
function hastag(ts::TagSet, tag)
- l = length(ts)
- l < 1 && return false
- for n in 1:l
- @inbounds tag == data(ts)[n] && return true
- end
- return false
+ l = length(ts)
+ l < 1 && return false
+ for n in 1:l
+ @inbounds tag == data(ts)[n] && return true
+ end
+ return false
end
function hastags(ts2::TagSet, tags1)
- ts1 = TagSet(tags1)
- l1 = length(ts1)
- l2 = length(ts2)
- l1 > l2 && return false
- for n1 in 1:l1
- @inbounds !hastag(ts2, data(ts1)[n1]) && return false
- end
- return true
+ ts1 = TagSet(tags1)
+ l1 = length(ts1)
+ l2 = length(ts2)
+ l1 > l2 && return false
+ for n1 in 1:l1
+ @inbounds !hastag(ts2, data(ts1)[n1]) && return false
+ end
+ return true
end
function addtags(ts::TagSet, tagsadd)
- tsadd = TagSet(tagsadd)
- if length(ts) == maxlength(ts)
- if hastags(ts, tsadd)
- return ts
- end
- if using_strict_tags()
- strict_tags_add_error(ts, tsadd, maxlength(ts))
+ tsadd = TagSet(tagsadd)
+ if length(ts) == maxlength(ts)
+ if hastags(ts, tsadd)
+ return ts
+ end
+ if using_strict_tags()
+ strict_tags_add_error(ts, tsadd, maxlength(ts))
+ end
end
- end
- res_ts = MVector(data(ts))
- ntags = length(ts)
- for n in 1:length(tsadd)
- if ntags < maxlength(ts)
- @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n])
- elseif using_strict_tags()
- strict_tags_add_error(ts, tsadd, maxlength(ts))
+ res_ts = MVector(data(ts))
+ ntags = length(ts)
+ for n in 1:length(tsadd)
+ if ntags < maxlength(ts)
+ @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n])
+ elseif using_strict_tags()
+ strict_tags_add_error(ts, tsadd, maxlength(ts))
+ end
end
- end
- return TagSet(TagSetStorage(res_ts), ntags)
+ return TagSet(TagSetStorage(res_ts), ntags)
end
function _removetag!(ts::MTagSetStorage, ntags::Int, t)
- for n in 1:ntags
- if @inbounds ts[n] == t
- for j in n:(ntags - 1)
- @inbounds ts[j] = ts[j + 1]
- end
- @inbounds ts[ntags] = emptytag(IntTag)
- return ntags -= 1
+ for n in 1:ntags
+ if @inbounds ts[n] == t
+ for j in n:(ntags - 1)
+ @inbounds ts[j] = ts[j + 1]
+ end
+ @inbounds ts[ntags] = emptytag(IntTag)
+ return ntags -= 1
+ end
end
- end
- return ntags
+ return ntags
end
#TODO: optimize this function
function removetags(ts::TagSet, tagsremove)
- tsremove = TagSet(tagsremove)
- res_ts = MVector(data(ts))
- ntags = length(ts)
- for n in 1:length(tsremove)
- @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n])
- end
- return TagSet(TagSetStorage(res_ts), ntags)
+ tsremove = TagSet(tagsremove)
+ res_ts = MVector(data(ts))
+ ntags = length(ts)
+ for n in 1:length(tsremove)
+ @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n])
+ end
+ return TagSet(TagSetStorage(res_ts), ntags)
end
#TODO: optimize this function
function replacetags(ts::TagSet, tagsremove, tagsadd)
- tsremove = TagSet(tagsremove)
- tsadd = TagSet(tagsadd)
- res_ts = MVector(data(ts))
- ntags = length(ts)
- # The TagSet must have the tags to be replaced
- !hastags(ts, tsremove) && return ts
- for n in 1:length(tsremove)
- @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n])
- end
- for n in 1:length(tsadd)
- if ntags < maxlength(ts)
- @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n])
- elseif using_strict_tags()
- strict_tags_replace_error(ts, tsremove, tsadd, maxlength(ts))
+ tsremove = TagSet(tagsremove)
+ tsadd = TagSet(tagsadd)
+ res_ts = MVector(data(ts))
+ ntags = length(ts)
+ # The TagSet must have the tags to be replaced
+ !hastags(ts, tsremove) && return ts
+ for n in 1:length(tsremove)
+ @inbounds ntags = _removetag!(res_ts, ntags, data(tsremove)[n])
end
- end
- return TagSet(TagSetStorage(res_ts), ntags)
+ for n in 1:length(tsadd)
+ if ntags < maxlength(ts)
+ @inbounds ntags = _addtag_ordered!(res_ts, ntags, data(tsadd)[n])
+ elseif using_strict_tags()
+ strict_tags_replace_error(ts, tsremove, tsadd, maxlength(ts))
+ end
+ end
+ return TagSet(TagSetStorage(res_ts), ntags)
end
function tagstring(T::TagSet)
- res = ""
- N = length(T)
- N == 0 && return res
- for n in 1:(N - 1)
- res *= "$(SmallString(data(T)[n])),"
- end
- res *= "$(SmallString(data(T)[N]))"
- return res
+ res = ""
+ N = length(T)
+ N == 0 && return res
+ for n in 1:(N - 1)
+ res *= "$(SmallString(data(T)[n])),"
+ end
+ res *= "$(SmallString(data(T)[N]))"
+ return res
end
"""
@@ -353,40 +354,40 @@ Base.iterate(ts::TagSet) = (ts[1], 1)
commontags(ts::TagSet) = ts
function commontags(ts1::TagSet, ts2::TagSet)
- ts3 = TagSet()
- N1 = length(ts1)
- for n1 in 1:N1
- t1 = data(ts1)[n1]
- if hastag(ts2, t1)
- ts3 = addtags(ts3, t1)
+ ts3 = TagSet()
+ N1 = length(ts1)
+ for n1 in 1:N1
+ t1 = data(ts1)[n1]
+ if hastag(ts2, t1)
+ ts3 = addtags(ts3, t1)
+ end
end
- end
- return ts3
+ return ts3
end
function commontags(ts1::TagSet, ts2::TagSet, ts3::TagSet, ts::TagSet...)
- return commontags(commontags(ts1, ts2), ts3, ts...)
+ return commontags(commontags(ts1, ts2), ts3, ts...)
end
function Base.show(io::IO, T::TagSet)
- return print(io, "\"$(tagstring(T))\"")
+ return print(io, "\"$(tagstring(T))\"")
end
-function readcpp(io::IO, ::Type{TagSet}; format="v3")
- ts = TagSet()
- if format == "v3"
- mstore = empty_storage(MTagSetStorage{IntTag,4})
- ntags = 0
- for n in 1:4
- t = readcpp(io, Tag; kwargs...)
- if t != SmallString()
- ntags = _addtag_ordered!(mstore, ntags, IntTag(t))
- end
+function readcpp(io::IO, ::Type{TagSet}; format = "v3")
+ ts = TagSet()
+ if format == "v3"
+ mstore = empty_storage(MTagSetStorage{IntTag, 4})
+ ntags = 0
+ for n in 1:4
+ t = readcpp(io, Tag; kwargs...)
+ if t != SmallString()
+ ntags = _addtag_ordered!(mstore, ntags, IntTag(t))
+ end
+ end
+ ts = TagSet(TagSetStorage(mstore), ntags)
+ else
+ throw(ArgumentError("read TagSet: format=$format not supported"))
end
- ts = TagSet(TagSetStorage(mstore), ntags)
- else
- throw(ArgumentError("read TagSet: format=$format not supported"))
- end
- return ts
+ return ts
end
end
diff --git a/src/not.jl b/src/not.jl
index 193e3f3cb6..a3261ac93e 100644
--- a/src/not.jl
+++ b/src/not.jl
@@ -1,12 +1,11 @@
-
#
# not syntax (to prime or tag the compliment
# of the specified indices/pattern)
#
struct Not{T}
- pattern::T
- Not(p::T) where {T} = new{T}(p)
+ pattern::T
+ Not(p::T) where {T} = new{T}(p)
end
"""
diff --git a/src/nullspace.jl b/src/nullspace.jl
index 50d4206ad5..e01c41a9f6 100644
--- a/src/nullspace.jl
+++ b/src/nullspace.jl
@@ -5,116 +5,116 @@ using .QuantumNumbers: Arrow
#
# XXX: generalize this function
-function _getindex(T::DenseTensor{ElT,N}, I1::Colon, I2::UnitRange{Int64}) where {ElT,N}
- A = array(T)[I1, I2]
- return tensor(Dense(vec(A)), setdims(inds(T), size(A)))
+function _getindex(T::DenseTensor{ElT, N}, I1::Colon, I2::UnitRange{Int64}) where {ElT, N}
+ A = array(T)[I1, I2]
+ return tensor(Dense(vec(A)), setdims(inds(T), size(A)))
end
function getblock_preserve_qns(T::Tensor, b::Block)
- # TODO: make `T[b]` preserve QNs
- Tb = T[b]
- indsTb = getblock.(inds(T), Tuple(b)) .* dir.(inds(T))
- return ITensors.setinds(Tb, indsTb)
+ # TODO: make `T[b]` preserve QNs
+ Tb = T[b]
+ indsTb = getblock.(inds(T), Tuple(b)) .* dir.(inds(T))
+ return ITensors.setinds(Tb, indsTb)
end
-function blocksparsetensor(blocks::Dict{B,TB}) where {B,TB}
- b1, Tb1 = first(pairs(blocks))
- N = length(b1)
- indstypes = typeof.(inds(Tb1))
- blocktype = eltype(Tb1)
- indsT = getindex.(indstypes)
- # Determine the indices from the blocks
- for (b, Tb) in pairs(blocks)
- indsTb = inds(Tb)
- for n in 1:N
- bn = b[n]
- indsTn = indsT[n]
- if bn > length(indsTn)
- resize!(indsTn, bn)
- end
- indsTn[bn] = indsTb[n]
+function blocksparsetensor(blocks::Dict{B, TB}) where {B, TB}
+ b1, Tb1 = first(pairs(blocks))
+ N = length(b1)
+ indstypes = typeof.(inds(Tb1))
+ blocktype = eltype(Tb1)
+ indsT = getindex.(indstypes)
+ # Determine the indices from the blocks
+ for (b, Tb) in pairs(blocks)
+ indsTb = inds(Tb)
+ for n in 1:N
+ bn = b[n]
+ indsTn = indsT[n]
+ if bn > length(indsTn)
+ resize!(indsTn, bn)
+ end
+ indsTn[bn] = indsTb[n]
+ end
end
- end
- T = BlockSparseTensor(blocktype, indsT)
- for (b, Tb) in pairs(blocks)
- if !isempty(Tb)
- T[b] = Tb
+ T = BlockSparseTensor(blocktype, indsT)
+ for (b, Tb) in pairs(blocks)
+ if !isempty(Tb)
+ T[b] = Tb
+ end
end
- end
- return T
+ return T
end
default_atol(A::AbstractArray) = 0.0
function default_rtol(A::AbstractArray, atol::Real)
- return (min(size(A, 1), size(A, 2)) * eps(real(float(one(eltype(A)))))) * iszero(atol)
+ return (min(size(A, 1), size(A, 2)) * eps(real(float(one(eltype(A)))))) * iszero(atol)
end
function _nullspace_hermitian(
- M::DenseTensor; atol::Real=default_atol(M), rtol::Real=default_rtol(M, atol)
-)
- # TODO: try this version
- #D, U = eigen(Hermitian(M))
- Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian=true)
- D = tensor(Dᵢₜ)
- U = tensor(Uᵢₜ)
- tol = max(atol, abs(D[1, 1]) * rtol)
- indstart = sum(d -> abs(d) .> tol, storage(D)) + 1
- indstop = lastindex(U, 2)
- Nb = _getindex(U, :, indstart:indstop)
- return Nb
+ M::DenseTensor; atol::Real = default_atol(M), rtol::Real = default_rtol(M, atol)
+ )
+ # TODO: try this version
+ #D, U = eigen(Hermitian(M))
+ Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian = true)
+ D = tensor(Dᵢₜ)
+ U = tensor(Uᵢₜ)
+ tol = max(atol, abs(D[1, 1]) * rtol)
+ indstart = sum(d -> abs(d) .> tol, storage(D)) + 1
+ indstop = lastindex(U, 2)
+ Nb = _getindex(U, :, indstart:indstop)
+ return Nb
end
function _nullspace_hermitian(
- M::BlockSparseTensor; atol::Real=default_atol(M), rtol::Real=default_rtol(M, atol)
-)
- tol = atol
- # TODO: try this version
- # Insert any missing diagonal blocks
- insert_diag_blocks!(M)
- #D, U = eigen(Hermitian(M))
- Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian=true)
- D = tensor(Dᵢₜ)
- U = tensor(Uᵢₜ)
- nullspace_blocks = Dict()
- for bU in nzblocks(U)
- bM = Block(bU[1], bU[1])
- bD = Block(bU[2], bU[2])
- # Assume sorted from largest to smallest
- tol = max(atol, abs(D[bD][1, 1]) * rtol)
- indstart = sum(d -> abs(d) .> tol, storage(D[bD])) + 1
- Ub = getblock_preserve_qns(U, bU)
- indstop = lastindex(Ub, 2)
- # Drop zero dimensional blocks
- Nb = _getindex(Ub, :, indstart:indstop)
- nullspace_blocks[bU] = Nb
- end
- return blocksparsetensor(nullspace_blocks)
+ M::BlockSparseTensor; atol::Real = default_atol(M), rtol::Real = default_rtol(M, atol)
+ )
+ tol = atol
+ # TODO: try this version
+ # Insert any missing diagonal blocks
+ insert_diag_blocks!(M)
+ #D, U = eigen(Hermitian(M))
+ Dᵢₜ, Uᵢₜ = eigen(itensor(M); ishermitian = true)
+ D = tensor(Dᵢₜ)
+ U = tensor(Uᵢₜ)
+ nullspace_blocks = Dict()
+ for bU in nzblocks(U)
+ bM = Block(bU[1], bU[1])
+ bD = Block(bU[2], bU[2])
+ # Assume sorted from largest to smallest
+ tol = max(atol, abs(D[bD][1, 1]) * rtol)
+ indstart = sum(d -> abs(d) .> tol, storage(D[bD])) + 1
+ Ub = getblock_preserve_qns(U, bU)
+ indstop = lastindex(Ub, 2)
+ # Drop zero dimensional blocks
+ Nb = _getindex(Ub, :, indstart:indstop)
+ nullspace_blocks[bU] = Nb
+ end
+ return blocksparsetensor(nullspace_blocks)
end
-function LinearAlgebra.nullspace(M::Hermitian{<:Number,<:Tensor}; kwargs...)
- return _nullspace_hermitian(parent(M); kwargs...)
+function LinearAlgebra.nullspace(M::Hermitian{<:Number, <:Tensor}; kwargs...)
+ return _nullspace_hermitian(parent(M); kwargs...)
end
#
# QN functionality
#
-function setdims(t::NTuple{N,Pair{QN,Int}}, dims::NTuple{N,Int}) where {N}
- return first.(t) .=> dims
+function setdims(t::NTuple{N, Pair{QN, Int}}, dims::NTuple{N, Int}) where {N}
+ return first.(t) .=> dims
end
-function setdims(t::NTuple{N,Index{Int}}, dims::NTuple{N,Int}) where {N}
- return dims
+function setdims(t::NTuple{N, Index{Int}}, dims::NTuple{N, Int}) where {N}
+ return dims
end
function getblock(i::Index, n::Integer)
- return ITensors.space(i)[n]
+ return ITensors.space(i)[n]
end
# Make `Pair{QN,Int}` act like a regular `dim`
-NDTensors.dim(qnv::Pair{QN,Int}) = last(qnv)
+NDTensors.dim(qnv::Pair{QN, Int}) = last(qnv)
-Base.:*(qnv::Pair{QN,Int}, d::Arrow) = qn(qnv) * d => dim(qnv)
+Base.:*(qnv::Pair{QN, Int}, d::Arrow) = qn(qnv) * d => dim(qnv)
#
# ITensors functionality
@@ -124,29 +124,29 @@ Base.:*(qnv::Pair{QN,Int}, d::Arrow) = qn(qnv) * d => dim(qnv)
matricize(T::ITensor, inds::Index...) = matricize(T, inds)
function matricize(T::ITensor, inds)
- left_inds = commoninds(T, inds)
- right_inds = uniqueinds(T, inds)
- return matricize(T, left_inds, right_inds)
+ left_inds = commoninds(T, inds)
+ right_inds = uniqueinds(T, inds)
+ return matricize(T, left_inds, right_inds)
end
function matricize(T::ITensor, left_inds, right_inds)
- CL = combiner(left_inds; dir=ITensors.Out, tags="CL")
- CR = combiner(right_inds; dir=ITensors.In, tags="CR")
- M = (T * CL) * CR
- return M, CL, CR
+ CL = combiner(left_inds; dir = ITensors.Out, tags = "CL")
+ CR = combiner(right_inds; dir = ITensors.In, tags = "CR")
+ M = (T * CL) * CR
+ return M, CL, CR
end
-function nullspace(::Order{2}, M::ITensor, left_inds, right_inds; tags="n", kwargs...)
- @assert order(M) == 2
- M² = prime(dag(M), right_inds) * M
- M² = permute(M², right_inds'..., right_inds...)
- M²ₜ = tensor(M²)
- Nₜ = nullspace(Hermitian(M²ₜ); kwargs...)
- indsN = (Index(ind(Nₜ, 1); dir=ITensors.Out), Index(ind(Nₜ, 2); dir=ITensors.Out, tags))
- N = itensor(ITensors.setinds(Nₜ, indsN))
- # Make the index match the input index
- Ñ = replaceinds(N, (ind(N, 1),) => right_inds)
- return Ñ
+function nullspace(::Order{2}, M::ITensor, left_inds, right_inds; tags = "n", kwargs...)
+ @assert order(M) == 2
+ M² = prime(dag(M), right_inds) * M
+ M² = permute(M², right_inds'..., right_inds...)
+ M²ₜ = tensor(M²)
+ Nₜ = nullspace(Hermitian(M²ₜ); kwargs...)
+ indsN = (Index(ind(Nₜ, 1); dir = ITensors.Out), Index(ind(Nₜ, 2); dir = ITensors.Out, tags))
+ N = itensor(ITensors.setinds(Nₜ, indsN))
+ # Make the index match the input index
+ Ñ = replaceinds(N, (ind(N, 1),) => right_inds)
+ return Ñ
end
"""
@@ -180,11 +180,11 @@ Keyword arguments:
- `atol::Float64=1E-12` - singular values of T†*T below this value define the null space
- `tags::String="n"` - choose the tags of the index selecting elements of the null space
"""
-function nullspace(T::ITensor, is...; tags="n", atol=1E-12, kwargs...)
- M, CL, CR = matricize(T, is...)
- @assert order(M) == 2
- cL = commoninds(M, CL)
- cR = commoninds(M, CR)
- N₂ = nullspace(Order(2), M, cL, cR; tags, atol, kwargs...)
- return N₂ * CR
+function nullspace(T::ITensor, is...; tags = "n", atol = 1.0e-12, kwargs...)
+ M, CL, CR = matricize(T, is...)
+ @assert order(M) == 2
+ cL = commoninds(M, CL)
+ cR = commoninds(M, CR)
+ N₂ = nullspace(Order(2), M, cL, cR; tags, atol, kwargs...)
+ return N₂ * CR
end
diff --git a/src/qn/flux.jl b/src/qn/flux.jl
index 1709e41479..eda5a608f4 100644
--- a/src/qn/flux.jl
+++ b/src/qn/flux.jl
@@ -1,4 +1,3 @@
-
"""
flux(T::ITensor)
@@ -54,10 +53,10 @@ has. If the Tensor is not blocked or has no non-zero blocks,
this function returns `nothing`.
"""
function flux(T::Tensor)
- (!hasqns(T) || isempty(T)) && return nothing
- @debug_check checkflux(T)
- block1 = first(eachnzblock(T))
- return flux(T, block1)
+ (!hasqns(T) || isempty(T)) && return nothing
+ @debug_check checkflux(T)
+ block1 = first(eachnzblock(T))
+ return flux(T, block1)
end
allfluxequal(T::Tensor, flux_to_check) = all(b -> flux(T, b) == flux_to_check, nzblocks(T))
@@ -71,8 +70,8 @@ are equal. Throws an error if one or more blocks have a different flux.
If the tensor is dense (is not blocked) then `checkflux` returns `nothing`.
"""
function checkflux(T::Tensor)
- (!hasqns(T) || isempty(T)) && return nothing
- return allfluxequal(T) ? nothing : error("Fluxes not all equal")
+ (!hasqns(T) || isempty(T)) && return nothing
+ return allfluxequal(T) ? nothing : error("Fluxes not all equal")
end
"""
diff --git a/src/qn/qnindex.jl b/src/qn/qnindex.jl
index 3fb94c4f22..72ab27641a 100644
--- a/src/qn/qnindex.jl
+++ b/src/qn/qnindex.jl
@@ -1,9 +1,9 @@
using .QuantumNumbers:
- QuantumNumbers, Arrow, Neither, Out, have_same_mods, have_same_qns, removeqn
+ QuantumNumbers, Arrow, Neither, Out, have_same_mods, have_same_qns, removeqn
using .SiteTypes: SiteTypes
using .TagSets: TagSets
-const QNBlock = Pair{QN,Int64}
+const QNBlock = Pair{QN, Int64}
const QNBlocks = Vector{QNBlock}
@@ -25,56 +25,56 @@ qn(qnblocks::QNBlocks, b::Block{1}) = qn(qnblocks[only(b)])
nblocks(qnblocks::QNBlocks) = length(qnblocks)
function dim(qnblocks::QNBlocks)
- dimtot = 0
- for (_, blockdim) in qnblocks
- dimtot += blockdim
- end
- return dimtot
+ dimtot = 0
+ for (_, blockdim) in qnblocks
+ dimtot += blockdim
+ end
+ return dimtot
end
function -(qnb::QNBlock)
- return QNBlock(-qn(qnb), blockdim(qnb))
+ return QNBlock(-qn(qnb), blockdim(qnb))
end
function (qn1::QNBlock + qn2::QNBlock)
- qn(qn1) != qn(qn2) && error("Cannot add qn blocks with different qns")
- return QNBlock(qn(qn1), blockdim(qn1) + blockdim(qn2))
+ qn(qn1) != qn(qn2) && error("Cannot add qn blocks with different qns")
+ return QNBlock(qn(qn1), blockdim(qn1) + blockdim(qn2))
end
function QuantumNumbers.removeqn(qn_block::QNBlock, qn_name::String)
- return removeqn(qn(qn_block), qn_name) => blockdim(qn_block)
+ return removeqn(qn(qn_block), qn_name) => blockdim(qn_block)
end
function -(qns::QNBlocks)
- qns_new = copy(qns)
- for i in 1:length(qns_new)
- qns_new[i] = -qns_new[i]
- end
- return qns_new
+ qns_new = copy(qns)
+ for i in 1:length(qns_new)
+ qns_new[i] = -qns_new[i]
+ end
+ return qns_new
end
function mergeblocks(qns::QNBlocks)
- qnsC = [qns[1]]
-
- # Which block this is, after combining
- block_count = 1
- for i in 2:nblocks(qns)
- if qn(qns[i]) == qn(qns[i - 1])
- qnsC[block_count] += qns[i]
- else
- push!(qnsC, qns[i])
- block_count += 1
+ qnsC = [qns[1]]
+
+ # Which block this is, after combining
+ block_count = 1
+ for i in 2:nblocks(qns)
+ if qn(qns[i]) == qn(qns[i - 1])
+ qnsC[block_count] += qns[i]
+ else
+ push!(qnsC, qns[i])
+ block_count += 1
+ end
end
- end
- return qnsC
+ return qnsC
end
-function QuantumNumbers.removeqn(space::QNBlocks, qn_name::String; mergeblocks=true)
- space = QNBlocks([removeqn(qn_block, qn_name) for qn_block in space])
- if mergeblocks
- space = ITensors.mergeblocks(space)
- end
- return space
+function QuantumNumbers.removeqn(space::QNBlocks, qn_name::String; mergeblocks = true)
+ space = QNBlocks([removeqn(qn_block, qn_name) for qn_block in space])
+ if mergeblocks
+ space = ITensors.mergeblocks(space)
+ end
+ return space
end
"""
@@ -98,19 +98,19 @@ symmetrystyle(::NonQN, ::HasQNs) = HasQNs()
hasqns(::QNBlocks) = true
function QuantumNumbers.have_same_qns(qnblocks::QNBlocks)
- qn1 = qn(qnblocks, 1)
- for n in 2:nblocks(qnblocks)
- !have_same_qns(qn1, qn(qnblocks, n)) && return false
- end
- return true
+ qn1 = qn(qnblocks, 1)
+ for n in 2:nblocks(qnblocks)
+ !have_same_qns(qn1, qn(qnblocks, n)) && return false
+ end
+ return true
end
function QuantumNumbers.have_same_mods(qnblocks::QNBlocks)
- qn1 = qn(qnblocks, 1)
- for n in 2:nblocks(qnblocks)
- !have_same_mods(qn1, qn(qnblocks, n)) && return false
- end
- return true
+ qn1 = qn(qnblocks, 1)
+ for n in 2:nblocks(qnblocks)
+ !have_same_mods(qn1, qn(qnblocks, n)) && return false
+ end
+ return true
end
"""
@@ -125,11 +125,11 @@ dimensions.
Index([QN("Sz", -1) => 1, QN("Sz", 1) => 1]; tags = "i")
```
"""
-function Index(qnblocks::QNBlocks; dir::Arrow=Out, tags="", plev=0)
- # TODO: make this a debug check?
- #have_same_qns(qnblocks) || error("When creating a QN Index, the QN blocks must have the same QNs")
- #have_same_mods(qnblocks) || error("When creating a QN Index, the QN blocks must have the same mods")
- return Index(rand(index_id_rng(), IDType), qnblocks, dir, tags, plev)
+function Index(qnblocks::QNBlocks; dir::Arrow = Out, tags = "", plev = 0)
+ # TODO: make this a debug check?
+ #have_same_qns(qnblocks) || error("When creating a QN Index, the QN blocks must have the same QNs")
+ #have_same_mods(qnblocks) || error("When creating a QN Index, the QN blocks must have the same mods")
+ return Index(rand(index_id_rng(), IDType), qnblocks, dir, tags, plev)
end
"""
@@ -145,8 +145,8 @@ i = Index([QN("Sz", -1) => 1, QN("Sz", 1) => 1], "i")
idag = dag(i) # Same Index with arrow direction flipped
```
"""
-function Index(qnblocks::QNBlocks, tags; dir::Arrow=Out, plev::Integer=0)
- return Index(qnblocks; dir=dir, tags=tags, plev=plev)
+function Index(qnblocks::QNBlocks, tags; dir::Arrow = Out, plev::Integer = 0)
+ return Index(qnblocks; dir = dir, tags = tags, plev = plev)
end
"""
@@ -162,8 +162,8 @@ dimensions.
Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "i")
```
"""
-function Index(qnblocks::QNBlock...; dir::Arrow=Out, tags="", plev=0)
- return Index([qnblocks...]; dir=dir, tags=tags, plev=plev)
+function Index(qnblocks::QNBlock...; dir::Arrow = Out, tags = "", plev = 0)
+ return Index([qnblocks...]; dir = dir, tags = tags, plev = plev)
end
dim(i::QNIndex) = dim(space(i))
@@ -196,25 +196,25 @@ nblocks(i::Index) = 1
# block(qns, 3) == Block(2)
# block(qns, 4) == Block(2)
function block(qns::QNBlocks, n::Int)
- tdim = 0
- for b in 1:nblocks(qns)
- tdim += blockdim(qns, Block(b))
- (n <= tdim) && return Block(b)
- end
- error("qn: QN Index value out of range")
- return Block(0)
+ tdim = 0
+ for b in 1:nblocks(qns)
+ tdim += blockdim(qns, Block(b))
+ (n <= tdim) && return Block(b)
+ end
+ error("qn: QN Index value out of range")
+ return Block(0)
end
function block(iv::Pair{<:Index})
- i = ind(iv)
- v = SiteTypes.val(iv)
- return block(space(i), v)
+ i = ind(iv)
+ v = SiteTypes.val(iv)
+ return block(space(i), v)
end
# Get the QN of the block
qn(i::QNIndex, b::Block{1}) = qn(space(i), b)
-qn(ib::Pair{<:Index,Block{1}}) = qn(first(ib), last(ib))
+qn(ib::Pair{<:Index, Block{1}}) = qn(first(ib), last(ib))
# XXX: deprecate the Integer version
# Miles asks: isn't it pretty convenient to have it?
@@ -241,14 +241,14 @@ qn(iv::Pair{<:Index}) = qn(ind(iv), block(iv))
flux(i::QNIndex, b::Block{1}) = dir(i) * qn(i, b)
-flux(ib::Pair{<:Index,Block{1}}) = flux(first(ib), last(ib))
+flux(ib::Pair{<:Index, Block{1}}) = flux(first(ib), last(ib))
flux(iv::Pair{<:Index}) = flux(ind(iv), block(iv))
function flux(i::Index, b::Block)
- return error(
- "Cannot compute flux: Index has no QNs. Try setting conserve_qns=true in siteinds or constructing Index with QN subspaces.",
- )
+ return error(
+ "Cannot compute flux: Index has no QNs. Try setting conserve_qns=true in siteinds or constructing Index with QN subspaces.",
+ )
end
qnblocks(i::QNIndex) = space(i)
@@ -274,10 +274,10 @@ julia> blockdim(i,2)
```
"""
blockdim(i::QNIndex, b::Integer) = blockdim(i, Block(b))
-function blockdim(i::Index, b::Union{Block,Integer})
- return error(
- "`blockdim(i::Index, b)` not currently defined for non-QN Index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.",
- )
+function blockdim(i::Index, b::Union{Block, Integer})
+ return error(
+ "`blockdim(i::Index, b)` not currently defined for non-QN Index $i of type `$(typeof(i))`. In the future this may be defined for `b == Block(1)` or `b == 1` as `dim(i)` and error otherwise.",
+ )
end
dim(i::QNIndex, b::Block) = blockdim(space(i), b)
@@ -286,13 +286,13 @@ NDTensors.eachblock(i::Index) = (Block(n) for n in 1:nblocks(i))
# Return the first block of the QNIndex with the flux q
function block(::typeof(first), ind::QNIndex, q::QN)
- for b in eachblock(ind)
- if flux(ind => b) == q
- return b
+ for b in eachblock(ind)
+ if flux(ind => b) == q
+ return b
+ end
end
- end
- error("No block found with QN equal to $q")
- return Block(0)
+ error("No block found with QN equal to $q")
+ return Block(0)
end
# Find the first block that matches the pattern f,
@@ -300,13 +300,13 @@ end
# `f` accepts a pair of `i => Block(n)` where `n`
# runs over `nblocks(i)`.
function findfirstblock(f, i::QNIndex)
- for b in ITensors.eachblock(i)
- if f(i => b)
- return b
+ for b in ITensors.eachblock(i)
+ if f(i => b)
+ return b
+ end
end
- end
- error("No block of Index $i matching the specified pattern.")
- return Block(0)
+ error("No block of Index $i matching the specified pattern.")
+ return Block(0)
end
# XXX: call this simply `block` and return a Block{1}
@@ -320,13 +320,13 @@ of the QNIndex having QN equal to `q`. Assumes
all blocks of `ind` have a unique QN.
"""
function qnblocknum(ind::QNIndex, q::QN)
- for b in 1:nblocks(ind)
- if flux(ind => Block(b)) == q
- return b
+ for b in 1:nblocks(ind)
+ if flux(ind => Block(b)) == q
+ return b
+ end
end
- end
- error("No block found with QN equal to $q")
- return 0
+ error("No block found with QN equal to $q")
+ return 0
end
blockdim(ind::QNIndex, q::QN) = blockdim(ind, block(first, ind, q))
@@ -345,94 +345,94 @@ qnblockdim(ind::QNIndex, q::QN) = blockdim(ind, qnblocknum(ind, q))
(dir::Arrow * qnb::QNBlock) = QNBlock(dir * qn(qnb), blockdim(qnb))
function (dir::Arrow * qn::QNBlocks)
- # XXX use:
- # dir .* qn
- qnR = copy(qn)
- for i in 1:nblocks(qnR)
- qnR[i] = dir * qnR[i]
- end
- return qnR
+ # XXX use:
+ # dir .* qn
+ qnR = copy(qn)
+ for i in 1:nblocks(qnR)
+ qnR[i] = dir * qnR[i]
+ end
+ return qnR
end
(qn1::QNBlock * qn2::QNBlock) = QNBlock(qn(qn1) + qn(qn2), blockdim(qn1) * blockdim(qn2))
# TODO: rename tensorproduct with ⊗ alias
function outer(qn1::QNBlocks, qn2::QNBlocks)
- qnR = ITensors.QNBlocks(undef, nblocks(qn1) * nblocks(qn2))
- for (i, t) in enumerate(Iterators.product(qn1, qn2))
- qnR[i] = prod(t)
- end
- return qnR
+ qnR = ITensors.QNBlocks(undef, nblocks(qn1) * nblocks(qn2))
+ for (i, t) in enumerate(Iterators.product(qn1, qn2))
+ qnR[i] = prod(t)
+ end
+ return qnR
end
# TODO: rename tensorproduct with ⊗ alias
-function outer(i1::QNIndex, i2::QNIndex; dir=nothing, tags="", plev::Integer=0)
- if isnothing(dir)
- if ITensors.dir(i1) == ITensors.dir(i2)
- dir = ITensors.dir(i1)
- else
- dir = Out
+function outer(i1::QNIndex, i2::QNIndex; dir = nothing, tags = "", plev::Integer = 0)
+ if isnothing(dir)
+ if ITensors.dir(i1) == ITensors.dir(i2)
+ dir = ITensors.dir(i1)
+ else
+ dir = Out
+ end
end
- end
- newspace = dir * ((ITensors.dir(i1) * space(i1)) ⊗ (ITensors.dir(i2) * space(i2)))
- return Index(newspace; dir, tags, plev)
+ newspace = dir * ((ITensors.dir(i1) * space(i1)) ⊗ (ITensors.dir(i2) * space(i2)))
+ return Index(newspace; dir, tags, plev)
end
# TODO: rename tensorproduct with ⊗ alias
-function outer(i::QNIndex; dir=nothing, tags="", plev::Integer=0)
- if isnothing(dir)
- dir = ITensors.dir(i)
- end
- newspace = dir * (ITensors.dir(i) * space(i))
- return Index(newspace; dir, tags, plev)
+function outer(i::QNIndex; dir = nothing, tags = "", plev::Integer = 0)
+ if isnothing(dir)
+ dir = ITensors.dir(i)
+ end
+ newspace = dir * (ITensors.dir(i) * space(i))
+ return Index(newspace; dir, tags, plev)
end
# TODO: add ⊕ alias
function directsum(
- i::Index{Vector{Pair{QN,Int}}}, j::Index{Vector{Pair{QN,Int}}}; tags="sum"
-)
- dir(i) ≠ dir(j) && error(
- "To direct sum two indices, they must have the same direction. Trying to direct sum indices $i and $j.",
- )
- return Index(vcat(space(i), space(j)); dir=dir(i), tags)
+ i::Index{Vector{Pair{QN, Int}}}, j::Index{Vector{Pair{QN, Int}}}; tags = "sum"
+ )
+ dir(i) ≠ dir(j) && error(
+ "To direct sum two indices, they must have the same direction. Trying to direct sum indices $i and $j.",
+ )
+ return Index(vcat(space(i), space(j)); dir = dir(i), tags)
end
isless(qnb1::QNBlock, qnb2::QNBlock) = isless(qn(qnb1), qn(qnb2))
function permuteblocks(i::QNIndex, perm)
- qnblocks_perm = space(i)[perm]
- return replaceqns(i, qnblocks_perm)
+ qnblocks_perm = space(i)[perm]
+ return replaceqns(i, qnblocks_perm)
end
function combineblocks(qns::QNBlocks)
- perm = sortperm(qns)
- qnsP = qns[perm]
- qnsC = [qnsP[1]]
- comb = Vector{Int}(undef, nblocks(qns))
-
- # Which block this is, after combining
- block_count = 1
- comb[1] = block_count
- for i in 2:nblocks(qnsP)
- if qn(qnsP[i]) == qn(qnsP[i - 1])
- qnsC[block_count] += qnsP[i]
- else
- push!(qnsC, qnsP[i])
- block_count += 1
+ perm = sortperm(qns)
+ qnsP = qns[perm]
+ qnsC = [qnsP[1]]
+ comb = Vector{Int}(undef, nblocks(qns))
+
+ # Which block this is, after combining
+ block_count = 1
+ comb[1] = block_count
+ for i in 2:nblocks(qnsP)
+ if qn(qnsP[i]) == qn(qnsP[i - 1])
+ qnsC[block_count] += qnsP[i]
+ else
+ push!(qnsC, qnsP[i])
+ block_count += 1
+ end
+ comb[i] = block_count
end
- comb[i] = block_count
- end
- return qnsC, perm, comb
+ return qnsC, perm, comb
end
function splitblocks(qns::QNBlocks)
- idim = dim(qns)
- split_qns = similar(qns, idim)
- for n in 1:idim
- b = block(qns, n)
- split_qns[n] = qn(qns, b) => 1
- end
- return split_qns
+ idim = dim(qns)
+ split_qns = similar(qns, idim)
+ for n in 1:idim
+ b = block(qns, n)
+ split_qns[n] = qn(qns, b) => 1
+ end
+ return split_qns
end
# Make a new Index with the specified qn blocks
@@ -441,78 +441,78 @@ replaceqns(i::QNIndex, qns::QNBlocks) = setspace(i, qns)
NDTensors.block(i::QNIndex, n::Integer) = space(i)[n]
function setblockdim!(i::QNIndex, newdim::Integer, n::Integer)
- qns = space(i)
- qns[n] = qn(qns[n]) => newdim
- return i
+ qns = space(i)
+ qns[n] = qn(qns[n]) => newdim
+ return i
end
function setblockqn!(i::QNIndex, newqn::QN, n::Integer)
- qns = space(i)
- qns[n] = newqn => blockdim(qns[n])
- return i
+ qns = space(i)
+ qns[n] = newqn => blockdim(qns[n])
+ return i
end
function setblock!(i::QNIndex, b::QNBlock, n::Integer)
- qns = space(i)
- qns[n] = b
- return i
+ qns = space(i)
+ qns[n] = b
+ return i
end
function deleteat!(i::QNIndex, pos)
- deleteat!(space(i), pos)
- return i
+ deleteat!(space(i), pos)
+ return i
end
function resize!(i::QNIndex, n::Integer)
- resize!(space(i), n)
- return i
+ resize!(space(i), n)
+ return i
end
function combineblocks(i::QNIndex)
- qnsR, perm, comb = combineblocks(space(i))
- iR = replaceqns(i, qnsR)
- return iR, perm, comb
+ qnsR, perm, comb = combineblocks(space(i))
+ iR = replaceqns(i, qnsR)
+ return iR, perm, comb
end
removeqns(i::QNIndex) = setdir(setspace(i, dim(i)), Neither)
-function QuantumNumbers.removeqn(i::QNIndex, qn_name::String; mergeblocks=true)
- return setspace(i, removeqn(space(i), qn_name; mergeblocks))
+function QuantumNumbers.removeqn(i::QNIndex, qn_name::String; mergeblocks = true)
+ return setspace(i, removeqn(space(i), qn_name; mergeblocks))
end
mergeblocks(i::QNIndex) = setspace(i, mergeblocks(space(i)))
-function addqns(i::Index, qns::QNBlocks; dir::Arrow=Out)
- @assert dim(i) == dim(qns)
- return setdir(setspace(i, qns), dir)
+function addqns(i::Index, qns::QNBlocks; dir::Arrow = Out)
+ @assert dim(i) == dim(qns)
+ return setdir(setspace(i, qns), dir)
end
function addqns(i::QNIndex, qns::QNBlocks)
- @assert dim(i) == dim(qns)
- @assert nblocks(qns) == nblocks(i)
- iqns = space(i)
- j = copy(i)
- jqn = space(j)
- for n in 1:nblocks(i)
- @assert blockdim(iqns, n) == blockdim(qns, n)
- iqn_n = qn(iqns, n)
- qn_n = qn(qns, n)
- newqn = iqn_n
- for nqv in 1:nactive(qn_n)
- qv = qn_n[nqv]
- newqn = addqnval(newqn, qv)
+ @assert dim(i) == dim(qns)
+ @assert nblocks(qns) == nblocks(i)
+ iqns = space(i)
+ j = copy(i)
+ jqn = space(j)
+ for n in 1:nblocks(i)
+ @assert blockdim(iqns, n) == blockdim(qns, n)
+ iqn_n = qn(iqns, n)
+ qn_n = qn(qns, n)
+ newqn = iqn_n
+ for nqv in 1:nactive(qn_n)
+ qv = qn_n[nqv]
+ newqn = addqnval(newqn, qv)
+ end
+ jqn[n] = newqn => blockdim(iqns, n)
end
- jqn[n] = newqn => blockdim(iqns, n)
- end
- return j
+ return j
end
# Check that the QNs are all the same
function hassameflux(i1::QNIndex, i2::QNIndex)
- dim_i1 = dim(i1)
- dim_i1 ≠ dim(i2) && return false
- for n in 1:dim_i1
- flux(i1 => n) ≠ flux(i2 => n) && return false
- end
- return true
+ dim_i1 = dim(i1)
+ dim_i1 ≠ dim(i2) && return false
+ for n in 1:dim_i1
+ flux(i1 => n) ≠ flux(i2 => n) && return false
+ end
+ return true
end
hassameflux(::QNIndex, ::Index) = false
@@ -523,23 +523,24 @@ splitblocks(i::Index) = setspace(i, splitblocks(space(i)))
trivial_space(i::QNIndex) = [QN() => 1]
-function mutable_storage(::Type{Order{N}}, ::Type{IndexT}) where {N,IndexT<:QNIndex}
- return SizedVector{N,IndexT}(undef)
+function mutable_storage(::Type{Order{N}}, ::Type{IndexT}) where {N, IndexT <: QNIndex}
+ return SizedVector{N, IndexT}(undef)
end
function show(io::IO, i::QNIndex)
- idstr = "$(id(i) % 1000)"
- if length(tags(i)) > 0
- print(
- io,
- "(dim=$(dim(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))",
- )
- else
- print(io, "(dim=$(dim(i))|id=$(idstr))$(primestring(plev(i)))")
- end
- println(io, " <$(dir(i))>")
- for (n, qnblock) in enumerate(space(i))
- print(io, " $n: $qnblock")
- n < length(space(i)) && println(io)
- end
+ idstr = "$(id(i) % 1000)"
+ if length(tags(i)) > 0
+ print(
+ io,
+ "(dim=$(dim(i))|id=$(idstr)|\"$(TagSets.tagstring(tags(i)))\")$(primestring(plev(i)))",
+ )
+ else
+ print(io, "(dim=$(dim(i))|id=$(idstr))$(primestring(plev(i)))")
+ end
+ println(io, " <$(dir(i))>")
+ for (n, qnblock) in enumerate(space(i))
+ print(io, " $n: $qnblock")
+ n < length(space(i)) && println(io)
+ end
+ return nothing
end
diff --git a/src/qn/qnindexset.jl b/src/qn/qnindexset.jl
index 22bd8dfdff..f45e2ee5e1 100644
--- a/src/qn/qnindexset.jl
+++ b/src/qn/qnindexset.jl
@@ -1,32 +1,31 @@
-
const QNIndexSet = IndexSet{QNIndex}
-const QNIndices = Union{QNIndexSet,Tuple{Vararg{QNIndex}}}
+const QNIndices = Union{QNIndexSet, Tuple{Vararg{QNIndex}}}
# Get a list of the non-zero blocks given a desired flux
# TODO: make a fillqns(inds::Indices) function that makes all indices
# in inds have the same qns. Then, use a faster comparison:
# ==(flux(inds,block; assume_filled=true), qn; assume_filled=true)
function nzblocks(qn::QN, inds::Indices)
- N = length(inds)
- blocks = Block{N}[]
- for block in eachblock(inds)
- if flux(inds, block) == qn
- push!(blocks, block)
+ N = length(inds)
+ blocks = Block{N}[]
+ for block in eachblock(inds)
+ if flux(inds, block) == qn
+ push!(blocks, block)
+ end
end
- end
- return blocks
+ return blocks
end
function nzdiagblocks(qn::QN, inds::Indices)
- N = length(inds)
- blocks = NTuple{N,Int}[]
- for block in eachdiagblock(inds)
- if flux(inds, block) == qn
- push!(blocks, Tuple(block))
+ N = length(inds)
+ blocks = NTuple{N, Int}[]
+ for block in eachdiagblock(inds)
+ if flux(inds, block) == qn
+ push!(blocks, Tuple(block))
+ end
end
- end
- return blocks
+ return blocks
end
anyfermionic(is::Indices) = any(isfermionic, is)
diff --git a/src/set_operations.jl b/src/set_operations.jl
index 657375d75e..c41a72161d 100644
--- a/src/set_operations.jl
+++ b/src/set_operations.jl
@@ -1,4 +1,3 @@
-
#
# Set operations
# These are custom implementations of the set operations in
@@ -16,18 +15,18 @@
_setdiff(s) = Base.copymutable(s)
_setdiff(s, itrs...) = _setdiff!(Base.copymutable(s), itrs...)
function _setdiff!(s, itrs...)
- for x in itrs
- _setdiff!(s, x)
- end
- return s
+ for x in itrs
+ _setdiff!(s, x)
+ end
+ return s
end
function _setdiff!(s, itr)
- isempty(s) && return s
- for x in itr
- n = findfirst(==(x), s)
- !isnothing(n) && deleteat!(s, n)
- end
- return s
+ isempty(s) && return s
+ for x in itr
+ n = findfirst(==(x), s)
+ !isnothing(n) && deleteat!(s, n)
+ end
+ return s
end
# A version of Base.intersect that scales quadratically in the number of elements
@@ -38,10 +37,10 @@ _intersect(s, itr, itrs...) = _intersect!(_intersect(s, itr), itrs...)
# Is this special case needed, or is `filter!` sufficient?
_intersect(s, itr) = Base.mapfilter(in(itr), push!, s, Base.emptymutable(s, eltype(s)))
function _intersect!(s, itrs...)
- for x in itrs
- _intersect!(s, x)
- end
- return s
+ for x in itrs
+ _intersect!(s, x)
+ end
+ return s
end
_intersect!(s, s2) = filter!(in(s2), s)
@@ -49,42 +48,42 @@ _intersect!(s, s2) = filter!(in(s2), s)
# and assumes the elements of each input set are already unique.
_symdiff(s) = Base.copymutable(s)
function _symdiff(s, itrs...)
- return _symdiff!(Base.emptymutable(s, Base.promote_eltype(s, itrs...)), s, itrs...)
+ return _symdiff!(Base.emptymutable(s, Base.promote_eltype(s, itrs...)), s, itrs...)
end
function _symdiff!(s, itrs...)
- for x in itrs
- _symdiff!(s, x)
- end
- return s
+ for x in itrs
+ _symdiff!(s, x)
+ end
+ return s
end
function _symdiff!(s, itr)
- if isempty(s)
- append!(s, itr)
+ if isempty(s)
+ append!(s, itr)
+ return s
+ end
+ for x in itr
+ n = findfirst(==(x), s)
+ !isnothing(n) ? deleteat!(s, n) : push!(s, x)
+ end
return s
- end
- for x in itr
- n = findfirst(==(x), s)
- !isnothing(n) ? deleteat!(s, n) : push!(s, x)
- end
- return s
end
# A version of Base.union that scales quadratically in the number of elements
# and assumes the elements of each input set are already unique.
_union(s) = Base.copymutable(s)
function _union(s, sets...)
- return _union!(Base.emptymutable(s, Base.promote_eltype(s, sets...)), s, sets...)
+ return _union!(Base.emptymutable(s, Base.promote_eltype(s, sets...)), s, sets...)
end
function _union!(s, sets...)
- for x in sets
- _union!(s, x)
- end
- return s
+ for x in sets
+ _union!(s, x)
+ end
+ return s
end
function _union!(s, itr)
- Base.haslength(itr) && sizehint!(s, length(s) + Int(length(itr))::Int)
- for x in itr
- x ∉ s && push!(s, x)
- end
- return s
+ Base.haslength(itr) && sizehint!(s, length(s) + Int(length(itr))::Int)
+ for x in itr
+ x ∉ s && push!(s, x)
+ end
+ return s
end
diff --git a/src/symmetrystyle.jl b/src/symmetrystyle.jl
index 2507d33e04..338b3006b1 100644
--- a/src/symmetrystyle.jl
+++ b/src/symmetrystyle.jl
@@ -1,14 +1,13 @@
-
# Trait to determine if an Index, Index collection, Tensor, or ITensor
# has symmetries
abstract type SymmetryStyle end
function symmetrystyle(T)
- return error("No SymmetryStyle defined for the specified object $T of type $(typeof(T))")
+ return error("No SymmetryStyle defined for the specified object $T of type $(typeof(T))")
end
symmetrystyle(T, S, U, V...)::SymmetryStyle =
- (Base.@_inline_meta; symmetrystyle(symmetrystyle(T), symmetrystyle(S, U, V...)))
+ (Base.@_inline_meta; symmetrystyle(symmetrystyle(T), symmetrystyle(S, U, V...)))
symmetrystyle(T, S)::SymmetryStyle = symmetrystyle(symmetrystyle(T), symmetrystyle(S))
diff --git a/src/tensor_operations/contraction_cost.jl b/src/tensor_operations/contraction_cost.jl
index 508687a9b9..6936f21079 100644
--- a/src/tensor_operations/contraction_cost.jl
+++ b/src/tensor_operations/contraction_cost.jl
@@ -1,4 +1,3 @@
-
left_associative_contraction_sequence(N::Integer) = reduce((x, y) -> Any[y, x], 1:N)
left_associative_contraction_sequence(A) = left_associative_contraction_sequence(length(A))
@@ -16,22 +15,22 @@ contraction.
If no sequence is specified, left associative contraction is used, in other words the sequence
is equivalent to `[[[[1, 2], 3], 4], …]`.
"""
-function contraction_cost(A; sequence=left_associative_contraction_sequence(A))
- pairwise_costs = Number[]
- _contraction_cost!(pairwise_costs, A, sequence)
- return pairwise_costs
+function contraction_cost(A; sequence = left_associative_contraction_sequence(A))
+ pairwise_costs = Number[]
+ _contraction_cost!(pairwise_costs, A, sequence)
+ return pairwise_costs
end
function _contraction_cost!(pairwise_costs, A, sequence)
- inds1 = _contraction_cost!(pairwise_costs, A, sequence[1])
- inds2 = _contraction_cost!(pairwise_costs, A, sequence[2])
- return _pairwise_contraction_cost!(pairwise_costs, inds1, inds2)
+ inds1 = _contraction_cost!(pairwise_costs, A, sequence[1])
+ inds2 = _contraction_cost!(pairwise_costs, A, sequence[2])
+ return _pairwise_contraction_cost!(pairwise_costs, inds1, inds2)
end
_contraction_cost!(pairwise_costs, As, sequence::Integer) = As[sequence]
function _pairwise_contraction_cost!(pairwise_costs, A1, A2)
- cost = dim(union(A1, A2))
- push!(pairwise_costs, cost)
- return symdiff(A1, A2)
+ cost = dim(union(A1, A2))
+ push!(pairwise_costs, cost)
+ return symdiff(A1, A2)
end
diff --git a/src/tensor_operations/matrix_algebra.jl b/src/tensor_operations/matrix_algebra.jl
index eed71ac72a..1395787fb2 100644
--- a/src/tensor_operations/matrix_algebra.jl
+++ b/src/tensor_operations/matrix_algebra.jl
@@ -1,23 +1,23 @@
# Fix for AD
-function _tr(T::ITensor; plev::Pair{Int,Int}=0 => 1, tags::Pair=ts"" => ts"")
- trpairs = indpairs(T; plev=plev, tags=tags)
- Cᴸ = combiner(first.(trpairs))
- Cᴿ = combiner(last.(trpairs))
- Tᶜ = T * Cᴸ * Cᴿ
- cᴸ = uniqueind(Cᴸ, T)
- cᴿ = uniqueind(Cᴿ, T)
- Tᶜ *= δ(eltype(T), dag((cᴸ, cᴿ)))
- if order(Tᶜ) == 0
- return Tᶜ[]
- end
- return Tᶜ
+function _tr(T::ITensor; plev::Pair{Int, Int} = 0 => 1, tags::Pair = ts"" => ts"")
+ trpairs = indpairs(T; plev = plev, tags = tags)
+ Cᴸ = combiner(first.(trpairs))
+ Cᴿ = combiner(last.(trpairs))
+ Tᶜ = T * Cᴸ * Cᴿ
+ cᴸ = uniqueind(Cᴸ, T)
+ cᴿ = uniqueind(Cᴿ, T)
+ Tᶜ *= δ(eltype(T), dag((cᴸ, cᴿ)))
+ if order(Tᶜ) == 0
+ return Tᶜ[]
+ end
+ return Tᶜ
end
# Trace an ITensor over pairs of indices determined by
# the prime levels and tags. Indices that are not in pairs
# are not traced over, corresponding to a "batched" trace.
function tr(T::ITensor; kwargs...)
- return _tr(T; kwargs...)
+ return _tr(T; kwargs...)
end
"""
@@ -36,83 +36,83 @@ length in the dense case), and appear in `A` with opposite directions.
When `ishermitian=true` the exponential of `Hermitian(A_{lr})` is
computed internally.
"""
-function exp(A::ITensor, Linds, Rinds; ishermitian=false)
- @debug_check begin
- if hasqns(A)
- @assert flux(A) == QN()
+function exp(A::ITensor, Linds, Rinds; ishermitian = false)
+ @debug_check begin
+ if hasqns(A)
+ @assert flux(A) == QN()
+ end
end
- end
- NL = length(Linds)
- NR = length(Rinds)
- NL != NR && error("Must have equal number of left and right indices")
- ndims(A) != NL + NR &&
- error("Number of left and right indices must add up to total number of indices")
+ NL = length(Linds)
+ NR = length(Rinds)
+ NL != NR && error("Must have equal number of left and right indices")
+ ndims(A) != NL + NR &&
+ error("Number of left and right indices must add up to total number of indices")
- # Replace Linds and Rinds with index sets having
- # same id's but arrow directions as on A
- Linds = permute(commoninds(A, Linds), Linds)
- Rinds = permute(commoninds(A, Rinds), Rinds)
+ # Replace Linds and Rinds with index sets having
+ # same id's but arrow directions as on A
+ Linds = permute(commoninds(A, Linds), Linds)
+ Rinds = permute(commoninds(A, Rinds), Rinds)
- # Ensure indices have correct directions, QNs, etc.
- for (l, r) in zip(Linds, Rinds)
- if space(l) != space(r)
- error("In exp, indices must come in pairs with equal spaces.")
- end
- if hasqns(A) && dir(l) == dir(r)
- error("In exp, indices must come in pairs with opposite directions")
+ # Ensure indices have correct directions, QNs, etc.
+ for (l, r) in zip(Linds, Rinds)
+ if space(l) != space(r)
+ error("In exp, indices must come in pairs with equal spaces.")
+ end
+ if hasqns(A) && dir(l) == dir(r)
+ error("In exp, indices must come in pairs with opposite directions")
+ end
end
- end
- #
- fermionic_itensor = using_auto_fermion() && has_fermionic_subspaces(inds(A))
- if fermionic_itensor
- # If fermionic, bring indices into i',j',..,dag(j),dag(i)
- # ordering with Out indices coming before In indices
- # Resulting tensor acts like a normal matrix (no extra signs
- # when taking powers A^n)
- if all(j->dir(j)==Out, Linds) && all(j->dir(j)==In, Rinds)
- ordered_inds = [Linds..., reverse(Rinds)...]
- elseif all(j->dir(j)==Out, Rinds) && all(j->dir(j)==In, Linds)
- ordered_inds = [Rinds..., reverse(Linds)...]
- else
- error(
- "For fermionic exp, Linds and Rinds must have same directions within each set. Got dir.(Linds)=",
- dir.(Linds),
- ", dir.(Rinds)=",
- dir.(Rinds),
- )
+ #
+ fermionic_itensor = using_auto_fermion() && has_fermionic_subspaces(inds(A))
+ if fermionic_itensor
+ # If fermionic, bring indices into i',j',..,dag(j),dag(i)
+ # ordering with Out indices coming before In indices
+ # Resulting tensor acts like a normal matrix (no extra signs
+ # when taking powers A^n)
+ if all(j -> dir(j) == Out, Linds) && all(j -> dir(j) == In, Rinds)
+ ordered_inds = [Linds..., reverse(Rinds)...]
+ elseif all(j -> dir(j) == Out, Rinds) && all(j -> dir(j) == In, Linds)
+ ordered_inds = [Rinds..., reverse(Linds)...]
+ else
+ error(
+ "For fermionic exp, Linds and Rinds must have same directions within each set. Got dir.(Linds)=",
+ dir.(Linds),
+ ", dir.(Rinds)=",
+ dir.(Rinds),
+ )
+ end
+ A = permute(A, ordered_inds)
+ # A^n now sign free, ok to temporarily disable fermion system
+ disable_auto_fermion()
end
- A = permute(A, ordered_inds)
- # A^n now sign free, ok to temporarily disable fermion system
- disable_auto_fermion()
- end
- CL = combiner(Linds...; dir=Out)
- CR = combiner(Rinds...; dir=In)
- AC = (A * CR) * CL
- expAT = ishermitian ? exp(Hermitian(tensor(AC))) : exp(tensor(AC))
- expA = (itensor(expAT) * dag(CR)) * dag(CL)
+ CL = combiner(Linds...; dir = Out)
+ CR = combiner(Rinds...; dir = In)
+ AC = (A * CR) * CL
+ expAT = ishermitian ? exp(Hermitian(tensor(AC))) : exp(tensor(AC))
+ expA = (itensor(expAT) * dag(CR)) * dag(CL)
- #
- if fermionic_itensor
- # Ensure expA indices in "matrix" form before re-enabling fermion system
- expA = permute(expA, ordered_inds)
- enable_auto_fermion()
- end
+ #
+ if fermionic_itensor
+ # Ensure expA indices in "matrix" form before re-enabling fermion system
+ expA = permute(expA, ordered_inds)
+ enable_auto_fermion()
+ end
- return expA
+ return expA
end
function exp(A::ITensor; kwargs...)
- Ris = filterinds(A; plev=0)
- Lis = dag.(prime.(Ris))
- return exp(A, Lis, Ris; kwargs...)
+ Ris = filterinds(A; plev = 0)
+ Lis = dag.(prime.(Ris))
+ return exp(A, Lis, Ris; kwargs...)
end
using NDTensors: NDTensors, map_diag, map_diag!
function NDTensors.map_diag!(f::Function, it_destination::ITensor, it_source::ITensor)
- map_diag!(f, tensor(it_destination), tensor(it_source))
- return it_destination
+ map_diag!(f, tensor(it_destination), tensor(it_source))
+ return it_destination
end
NDTensors.map_diag(f::Function, it::ITensor) = itensor(map_diag(f, tensor(it)))
diff --git a/src/tensor_operations/matrix_decomposition.jl b/src/tensor_operations/matrix_decomposition.jl
index 50d061b023..10f2f436a6 100644
--- a/src/tensor_operations/matrix_decomposition.jl
+++ b/src/tensor_operations/matrix_decomposition.jl
@@ -7,12 +7,12 @@ ITensor factorization type for a truncated singular-value
decomposition, returned by `svd`.
"""
struct TruncSVD
- U::ITensor
- S::ITensor
- V::ITensor
- spec::Spectrum
- u::Index
- v::Index
+ U::ITensor
+ S::ITensor
+ V::ITensor
+ spec::Spectrum
+ u::Index
+ v::Index
end
# iteration for destructuring into components `U,S,V,spec,u,v = S`
@@ -108,95 +108,95 @@ Utrunc2, Strunc2, Vtrunc2 = svd(A, i, k; cutoff=1e-10);
See also: [`factorize`](@ref), [`eigen`](@ref)
"""
function svd(
- A::ITensor,
- Linds...;
- leftdir=nothing,
- rightdir=nothing,
- lefttags=nothing,
- righttags=nothing,
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- alg=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
- min_blockdim=nothing,
- # Deprecated
- utags=lefttags,
- vtags=righttags,
-)
- lefttags = NDTensors.replace_nothing(lefttags, ts"Link,u")
- righttags = NDTensors.replace_nothing(righttags, ts"Link,v")
-
- # Deprecated
- utags = NDTensors.replace_nothing(utags, ts"Link,u")
- vtags = NDTensors.replace_nothing(vtags, ts"Link,v")
-
- Lis = commoninds(A, indices(Linds...))
- Ris = uniqueinds(A, Lis)
-
- Lis_original = Lis
- Ris_original = Ris
- if isempty(Lis_original)
- α = trivial_index(Ris)
- vLα = onehot(datatype(A), α => 1)
- A *= vLα
- Lis = [α]
- end
- if isempty(Ris_original)
- α = trivial_index(Lis)
- vRα = onehot(datatype(A), α => 1)
- A *= vRα
- Ris = [α]
- end
-
- CL = combiner(Lis...; dir=leftdir)
- CR = combiner(Ris...; dir=rightdir)
- AC = A * CR * CL
- cL = combinedind(CL)
- cR = combinedind(CR)
- if inds(AC) != (cL, cR)
- AC = permute(AC, cL, cR)
- end
-
- USVT = svd(
- tensor(AC);
- mindim,
- maxdim,
- cutoff,
- alg,
- use_absolute_cutoff,
- use_relative_cutoff,
- min_blockdim,
- )
- if isnothing(USVT)
- return nothing
- end
- UT, ST, VT, spec = USVT
- UC, S, VC = itensor(UT), itensor(ST), itensor(VT)
-
- u = commonind(S, UC)
- v = commonind(S, VC)
-
- U = UC * dag(CL)
- V = VC * dag(CR)
-
- U = settags(U, utags, u)
- S = settags(S, utags, u)
- S = settags(S, vtags, v)
- V = settags(V, vtags, v)
-
- u = settags(u, utags)
- v = settags(v, vtags)
-
- if isempty(Lis_original)
- U *= dag(vLα)
- end
- if isempty(Ris_original)
- V *= dag(vRα)
- end
-
- return TruncSVD(U, S, V, spec, u, v)
+ A::ITensor,
+ Linds...;
+ leftdir = nothing,
+ rightdir = nothing,
+ lefttags = nothing,
+ righttags = nothing,
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ alg = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ min_blockdim = nothing,
+ # Deprecated
+ utags = lefttags,
+ vtags = righttags,
+ )
+ lefttags = NDTensors.replace_nothing(lefttags, ts"Link,u")
+ righttags = NDTensors.replace_nothing(righttags, ts"Link,v")
+
+ # Deprecated
+ utags = NDTensors.replace_nothing(utags, ts"Link,u")
+ vtags = NDTensors.replace_nothing(vtags, ts"Link,v")
+
+ Lis = commoninds(A, indices(Linds...))
+ Ris = uniqueinds(A, Lis)
+
+ Lis_original = Lis
+ Ris_original = Ris
+ if isempty(Lis_original)
+ α = trivial_index(Ris)
+ vLα = onehot(datatype(A), α => 1)
+ A *= vLα
+ Lis = [α]
+ end
+ if isempty(Ris_original)
+ α = trivial_index(Lis)
+ vRα = onehot(datatype(A), α => 1)
+ A *= vRα
+ Ris = [α]
+ end
+
+ CL = combiner(Lis...; dir = leftdir)
+ CR = combiner(Ris...; dir = rightdir)
+ AC = A * CR * CL
+ cL = combinedind(CL)
+ cR = combinedind(CR)
+ if inds(AC) != (cL, cR)
+ AC = permute(AC, cL, cR)
+ end
+
+ USVT = svd(
+ tensor(AC);
+ mindim,
+ maxdim,
+ cutoff,
+ alg,
+ use_absolute_cutoff,
+ use_relative_cutoff,
+ min_blockdim,
+ )
+ if isnothing(USVT)
+ return nothing
+ end
+ UT, ST, VT, spec = USVT
+ UC, S, VC = itensor(UT), itensor(ST), itensor(VT)
+
+ u = commonind(S, UC)
+ v = commonind(S, VC)
+
+ U = UC * dag(CL)
+ V = VC * dag(CR)
+
+ U = settags(U, utags, u)
+ S = settags(S, utags, u)
+ S = settags(S, vtags, v)
+ V = settags(V, vtags, v)
+
+ u = settags(u, utags)
+ v = settags(v, vtags)
+
+ if isempty(Lis_original)
+ U *= dag(vLα)
+ end
+ if isempty(Ris_original)
+ V *= dag(vRα)
+ end
+
+ return TruncSVD(U, S, V, spec, u, v)
end
svd(A::ITensor; kwargs...) = error("Must specify indices in `svd`")
@@ -208,12 +208,12 @@ ITensor factorization type for a truncated eigenvalue
decomposition, returned by `eigen`.
"""
struct TruncEigen
- D::ITensor
- V::ITensor
- Vt::ITensor
- spec::Spectrum
- l::Index
- r::Index
+ D::ITensor
+ V::ITensor
+ Vt::ITensor
+ spec::Spectrum
+ l::Index
+ r::Index
end
# iteration for destructuring into components `D, V, spec, l, r = E`
@@ -289,131 +289,131 @@ A * U ≈ Ul * D # true
See also: [`svd`](@ref), [`factorize`](@ref)
"""
function eigen(
- A::ITensor,
- Linds,
- Rinds;
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
- ishermitian=nothing,
- tags=nothing,
- lefttags=nothing,
- righttags=nothing,
- plev=nothing,
- leftplev=nothing,
- rightplev=nothing,
-)
- ishermitian = NDTensors.replace_nothing(ishermitian, false)
- tags = NDTensors.replace_nothing(tags, ts"Link,eigen")
- lefttags = NDTensors.replace_nothing(lefttags, tags)
- righttags = NDTensors.replace_nothing(righttags, tags)
- plev = NDTensors.replace_nothing(plev, 0)
- leftplev = NDTensors.replace_nothing(leftplev, plev)
- rightplev = NDTensors.replace_nothing(rightplev, plev)
-
- @debug_check begin
- if hasqns(A)
- @assert flux(A) == QN()
+ A::ITensor,
+ Linds,
+ Rinds;
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ ishermitian = nothing,
+ tags = nothing,
+ lefttags = nothing,
+ righttags = nothing,
+ plev = nothing,
+ leftplev = nothing,
+ rightplev = nothing,
+ )
+ ishermitian = NDTensors.replace_nothing(ishermitian, false)
+ tags = NDTensors.replace_nothing(tags, ts"Link,eigen")
+ lefttags = NDTensors.replace_nothing(lefttags, tags)
+ righttags = NDTensors.replace_nothing(righttags, tags)
+ plev = NDTensors.replace_nothing(plev, 0)
+ leftplev = NDTensors.replace_nothing(leftplev, plev)
+ rightplev = NDTensors.replace_nothing(rightplev, plev)
+
+ @debug_check begin
+ if hasqns(A)
+ @assert flux(A) == QN()
+ end
end
- end
-
- N = ndims(A)
- NL = length(Linds)
- NR = length(Rinds)
- NL != NR && error("Must have equal number of left and right indices")
- N != NL + NR &&
- error("Number of left and right indices must add up to total number of indices")
-
- if lefttags == righttags && leftplev == rightplev
- leftplev = rightplev + 1
- end
-
- # Linds, Rinds may not have the correct directions
- Lis = indices(Linds)
- Ris = indices(Rinds)
-
- # Ensure the indices have the correct directions,
- # QNs, etc.
- # First grab the indices in A, then permute them
- # correctly.
- Lis = permute(commoninds(A, Lis), Lis)
- Ris = permute(commoninds(A, Ris), Ris)
-
- for (l, r) in zip(Lis, Ris)
- if space(l) != space(r)
- error("In eigen, indices must come in pairs with equal spaces.")
+
+ N = ndims(A)
+ NL = length(Linds)
+ NR = length(Rinds)
+ NL != NR && error("Must have equal number of left and right indices")
+ N != NL + NR &&
+ error("Number of left and right indices must add up to total number of indices")
+
+ if lefttags == righttags && leftplev == rightplev
+ leftplev = rightplev + 1
end
- if hasqns(A)
- if dir(l) == dir(r)
- error("In eigen, indices must come in pairs with opposite directions")
- end
+
+ # Linds, Rinds may not have the correct directions
+ Lis = indices(Linds)
+ Ris = indices(Rinds)
+
+ # Ensure the indices have the correct directions,
+ # QNs, etc.
+ # First grab the indices in A, then permute them
+ # correctly.
+ Lis = permute(commoninds(A, Lis), Lis)
+ Ris = permute(commoninds(A, Ris), Ris)
+
+ for (l, r) in zip(Lis, Ris)
+ if space(l) != space(r)
+ error("In eigen, indices must come in pairs with equal spaces.")
+ end
+ if hasqns(A)
+ if dir(l) == dir(r)
+ error("In eigen, indices must come in pairs with opposite directions")
+ end
+ end
end
- end
-
- #
- L_arrow_dir = Out
- if hasqns(A) && using_auto_fermion()
- # Make arrows of combined ITensor match those of index sets
- if all(i -> dir(i) == Out, Lis) && all(i -> dir(i) == In, Ris)
- L_arrow_dir = Out
- elseif all(i -> dir(i) == In, Lis) && all(i -> dir(i) == Out, Ris)
- L_arrow_dir = In
- else
- error(
- "With auto_fermion enabled, index sets in eigen must have all arrows the same, and opposite between the sets",
- )
+
+ #
+ L_arrow_dir = Out
+ if hasqns(A) && using_auto_fermion()
+ # Make arrows of combined ITensor match those of index sets
+ if all(i -> dir(i) == Out, Lis) && all(i -> dir(i) == In, Ris)
+ L_arrow_dir = Out
+ elseif all(i -> dir(i) == In, Lis) && all(i -> dir(i) == Out, Ris)
+ L_arrow_dir = In
+ else
+ error(
+ "With auto_fermion enabled, index sets in eigen must have all arrows the same, and opposite between the sets",
+ )
+ end
end
- end
- CL = combiner(Lis...; dir=L_arrow_dir, tags="CMB,left")
- CR = combiner(dag(Ris)...; dir=L_arrow_dir, tags="CMB,right")
+ CL = combiner(Lis...; dir = L_arrow_dir, tags = "CMB,left")
+ CR = combiner(dag(Ris)...; dir = L_arrow_dir, tags = "CMB,right")
- AC = A * dag(CR) * CL
+ AC = A * dag(CR) * CL
- cL = combinedind(CL)
- cR = dag(combinedind(CR))
- if inds(AC) != (cL, cR)
- AC = permute(AC, cL, cR)
- end
+ cL = combinedind(CL)
+ cR = dag(combinedind(CR))
+ if inds(AC) != (cL, cR)
+ AC = permute(AC, cL, cR)
+ end
- AT = ishermitian ? Hermitian(tensor(AC)) : tensor(AC)
+ AT = ishermitian ? Hermitian(tensor(AC)) : tensor(AC)
- DT, VT, spec = eigen(AT; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff)
- D, VC = itensor(DT), itensor(VT)
+ DT, VT, spec = eigen(AT; mindim, maxdim, cutoff, use_absolute_cutoff, use_relative_cutoff)
+ D, VC = itensor(DT), itensor(VT)
- V = VC * dag(CR)
+ V = VC * dag(CR)
- # Set right index tags
- l = uniqueind(D, V)
- r = commonind(D, V)
- l̃ = setprime(settags(l, lefttags), leftplev)
- r̃ = setprime(settags(l̃, righttags), rightplev)
+ # Set right index tags
+ l = uniqueind(D, V)
+ r = commonind(D, V)
+ l̃ = setprime(settags(l, lefttags), leftplev)
+ r̃ = setprime(settags(l̃, righttags), rightplev)
- replaceinds!(D, (l, r), (l̃, r̃))
- replaceind!(V, r, r̃)
+ replaceinds!(D, (l, r), (l̃, r̃))
+ replaceind!(V, r, r̃)
- l, r = l̃, r̃
+ l, r = l̃, r̃
- # The right eigenvectors, after being applied to A
- Vt = replaceinds(V, (Ris..., r), (Lis..., l))
+ # The right eigenvectors, after being applied to A
+ Vt = replaceinds(V, (Ris..., r), (Lis..., l))
- @debug_check begin
- if hasqns(A)
- @assert flux(D) == QN()
- @assert flux(V) == QN()
- @assert flux(Vt) == QN()
+ @debug_check begin
+ if hasqns(A)
+ @assert flux(D) == QN()
+ @assert flux(V) == QN()
+ @assert flux(Vt) == QN()
+ end
end
- end
- return TruncEigen(D, V, Vt, spec, l, r)
+ return TruncEigen(D, V, Vt, spec, l, r)
end
function eigen(A::ITensor; kwargs...)
- Ris = filterinds(A; plev=0)
- Lis = Ris'
- return eigen(A, Lis, Ris; kwargs...)
+ Ris = filterinds(A; plev = 0)
+ Lis = Ris'
+ return eigen(A, Lis, Ris; kwargs...)
end
# ----------------------------- QR/RQ/QL/LQ decompositions ------------------------------
@@ -422,21 +422,21 @@ end
# Helper functions for handleing cases where zero indices are requested on Q or R.
#
function add_trivial_index(A::ITensor, Ainds)
- α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1)
- vα = onehot(datatype(A), α => 1)
- A *= vα
- return A, vα, [α]
+ α = trivial_index(Ainds) #If Ainds[1] has no QNs makes Index(1), otherwise Index(QN()=>1)
+ vα = onehot(datatype(A), α => 1)
+ A *= vα
+ return A, vα, [α]
end
function add_trivial_index(A::ITensor, Linds, Rinds)
- vαl, vαr = nothing, nothing
- if isempty(Linds)
- A, vαl, Linds = add_trivial_index(A, Rinds)
- end
- if isempty(Rinds)
- A, vαr, Rinds = add_trivial_index(A, Linds)
- end
- return A, vαl, vαr, Linds, Rinds
+ vαl, vαr = nothing, nothing
+ if isempty(Linds)
+ A, vαl, Linds = add_trivial_index(A, Rinds)
+ end
+ if isempty(Rinds)
+ A, vαr, Rinds = add_trivial_index(A, Linds)
+ end
+ return A, vαl, vαr, Linds, Rinds
end
remove_trivial_index(Q::ITensor, R::ITensor, vαl, vαr) = (Q * dag(vαl), R * dag(vαr))
@@ -448,9 +448,9 @@ remove_trivial_index(Q::ITensor, R::ITensor, ::Nothing, ::Nothing) = (Q, R)
# Force users to knowingly ask for zero indices using qr(A,()) syntax
#
function noinds_error_message(decomp::String)
- return "$decomp without any input indices is currently not defined.
- In the future it may be defined as performing a $decomp decomposition
- treating the ITensor as a matrix from the primed to the unprimed indices."
+ return "$decomp without any input indices is currently not defined.
+ In the future it may be defined as performing a $decomp decomposition
+ treating the ITensor as a matrix from the primed to the unprimed indices."
end
qr(A::ITensor; kwargs...) = error(noinds_error_message("qr"))
@@ -475,63 +475,63 @@ lq(A::ITensor, Linds...; kwargs...) = lq(A, Linds, uniqueinds(A, Linds); kwargs.
# Core function where both left and right indices are supplied as tuples or vectors
# Handle default tags and dispatch to generic qx/xq functions.
#
-function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qr", kwargs...)
- return qx(qr, A, Linds, Rinds; tags, kwargs...)
+function qr(A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,qr", kwargs...)
+ return qx(qr, A, Linds, Rinds; tags, kwargs...)
end
-function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,ql", kwargs...)
- return qx(ql, A, Linds, Rinds; tags, kwargs...)
+function ql(A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,ql", kwargs...)
+ return qx(ql, A, Linds, Rinds; tags, kwargs...)
end
-function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,rq", kwargs...)
- return xq(ql, A, Linds, Rinds; tags, kwargs...)
+function rq(A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,rq", kwargs...)
+ return xq(ql, A, Linds, Rinds; tags, kwargs...)
end
-function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,lq", kwargs...)
- return xq(qr, A, Linds, Rinds; tags, kwargs...)
+function lq(A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,lq", kwargs...)
+ return xq(qr, A, Linds, Rinds; tags, kwargs...)
end
#
# Generic function implementing both qr and ql decomposition. The X tensor = R or L.
#
function qx(
- qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,qx", positive=false
-)
- # Strip out any extra indices that are not in A.
- # Unit test test/base/test_itensor.jl line 1469 will fail without this.
- Linds = commoninds(A, Linds)
- #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem?
- #
- # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never
- # be empty. A essentially becomes 1D after collection.
- #
- A, vαl, vαr, Linds, Rinds = add_trivial_index(A, Linds, Rinds)
-
- #
- # Use combiners to render A down to a rank 2 tensor ready for matrix QR/QL routine.
- #
- CL, CR = combiner(Linds...), combiner(Rinds...)
- cL, cR = combinedind(CL), combinedind(CR)
- AC = A * CR * CL
-
- #
- # Make sure we don't accidentally pass the transpose into the matrix qr/ql routine.
- #
- AC = permute(AC, cL, cR; allow_alias=true)
-
- QT, XT = qx(tensor(AC); positive) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented.
- #
- # Undo the combine oepration, to recover all tensor indices.
- #
- Q, X = itensor(QT) * dag(CL), itensor(XT) * dag(CR)
-
- # Remove dummy indices. No-op if vαl and vαr are Nothing
- Q, X = remove_trivial_index(Q, X, vαl, vαr)
- #
- # fix up the tag name for the index between Q and X.
- #
- q = commonind(Q, X)
- Q = settags(Q, tags, q)
- X = settags(X, tags, q)
- q = settags(q, tags)
-
- return Q, X, q
+ qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,qx", positive = false
+ )
+ # Strip out any extra indices that are not in A.
+ # Unit test test/base/test_itensor.jl line 1469 will fail without this.
+ Linds = commoninds(A, Linds)
+ #Rinds=commoninds(A,Rinds) #if the user supplied Rinds they could have the same problem?
+ #
+ # Make a dummy index with dim=1 and incorporate into A so the Linds & Rinds can never
+ # be empty. A essentially becomes 1D after collection.
+ #
+ A, vαl, vαr, Linds, Rinds = add_trivial_index(A, Linds, Rinds)
+
+ #
+ # Use combiners to render A down to a rank 2 tensor ready for matrix QR/QL routine.
+ #
+ CL, CR = combiner(Linds...), combiner(Rinds...)
+ cL, cR = combinedind(CL), combinedind(CR)
+ AC = A * CR * CL
+
+ #
+ # Make sure we don't accidentally pass the transpose into the matrix qr/ql routine.
+ #
+ AC = permute(AC, cL, cR; allow_alias = true)
+
+ QT, XT = qx(tensor(AC); positive) #pass order(AC)==2 matrix down to the NDTensors level where qr/ql are implemented.
+ #
+ # Undo the combine oepration, to recover all tensor indices.
+ #
+ Q, X = itensor(QT) * dag(CL), itensor(XT) * dag(CR)
+
+ # Remove dummy indices. No-op if vαl and vαr are Nothing
+ Q, X = remove_trivial_index(Q, X, vαl, vαr)
+ #
+ # fix up the tag name for the index between Q and X.
+ #
+ q = commonind(Q, X)
+ Q = settags(Q, tags, q)
+ X = settags(X, tags, q)
+ q = settags(q, tags)
+
+ return Q, X, q
end
#
@@ -539,17 +539,17 @@ end
# with swapping the left and right indices. The X tensor = R or L.
#
function xq(
- qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags=ts"Link,xq", positive=false
-)
- Q, X, q = qx(A, Rinds, Linds; positive)
- #
- # fix up the tag name for the index between Q and L.
- #
- Q = settags(Q, tags, q)
- X = settags(X, tags, q)
- q = settags(q, tags)
-
- return X, Q, q
+ qx::Function, A::ITensor, Linds::Indices, Rinds::Indices; tags = ts"Link,xq", positive = false
+ )
+ Q, X, q = qx(A, Rinds, Linds; positive)
+ #
+ # fix up the tag name for the index between Q and L.
+ #
+ Q = settags(Q, tags, q)
+ X = settags(X, tags, q)
+ q = settags(q, tags)
+
+ return X, Q, q
end
polar(A::ITensor; kwargs...) = error(noinds_error_message("polar"))
@@ -557,26 +557,26 @@ polar(A::ITensor; kwargs...) = error(noinds_error_message("polar"))
# TODO: allow custom tags in internal indices?
# TODO: return the new common indices?
function polar(A::ITensor, Linds...)
- U, S, V = svd(A, Linds...)
- u = commoninds(S, U)
- v = commoninds(S, V)
- δᵤᵥ′ = δ(eltype(A), u..., v'...)
- Q = U * δᵤᵥ′ * V'
- P = dag(V') * dag(δᵤᵥ′) * S * V
- return Q, P, commoninds(Q, P)
+ U, S, V = svd(A, Linds...)
+ u = commoninds(S, U)
+ v = commoninds(S, V)
+ δᵤᵥ′ = δ(eltype(A), u..., v'...)
+ Q = U * δᵤᵥ′ * V'
+ P = dag(V') * dag(δᵤᵥ′) * S * V
+ return Q, P, commoninds(Q, P)
end
-function factorize_qr(A::ITensor, Linds...; ortho="left", tags=nothing, positive=false)
- if ortho == "left"
- L, R, q = qr(A, Linds...; tags, positive)
- elseif ortho == "right"
- Lis = uniqueinds(A, indices(Linds...))
- R, L, q = qr(A, Lis...; tags, positive)
- else
- error("In factorize using qr decomposition, ortho keyword
- $ortho not supported. Supported options are left or right.")
- end
- return L, R
+function factorize_qr(A::ITensor, Linds...; ortho = "left", tags = nothing, positive = false)
+ if ortho == "left"
+ L, R, q = qr(A, Linds...; tags, positive)
+ elseif ortho == "right"
+ Lis = uniqueinds(A, indices(Linds...))
+ R, L, q = qr(A, Lis...; tags, positive)
+ else
+ error("In factorize using qr decomposition, ortho keyword
+$ortho not supported. Supported options are left or right.")
+ end
+ return L, R
end
using NDTensors: map_diag!
@@ -584,124 +584,124 @@ using NDTensors: map_diag!
# Generic function implementing a square root decomposition of a diagonal, order 2 tensor with inds u, v
#
function sqrt_decomp(D::ITensor, u::Index, v::Index)
- (storage(D) isa Union{Diag,DiagBlockSparse}) ||
- error("Must be a diagonal matrix ITensor.")
- sqrtDL = adapt(datatype(D), diag_itensor(u, dag(u)'))
- sqrtDR = adapt(datatype(D), diag_itensor(v, dag(v)'))
- map_diag!(sqrt ∘ abs, sqrtDL, D)
- map_diag!(sqrt ∘ abs, sqrtDR, D)
- δᵤᵥ = copy(D)
- map_diag!(sign, δᵤᵥ, D)
- return sqrtDL, prime(δᵤᵥ), sqrtDR
+ (storage(D) isa Union{Diag, DiagBlockSparse}) ||
+ error("Must be a diagonal matrix ITensor.")
+ sqrtDL = adapt(datatype(D), diag_itensor(u, dag(u)'))
+ sqrtDR = adapt(datatype(D), diag_itensor(v, dag(v)'))
+ map_diag!(sqrt ∘ abs, sqrtDL, D)
+ map_diag!(sqrt ∘ abs, sqrtDR, D)
+ δᵤᵥ = copy(D)
+ map_diag!(sign, δᵤᵥ, D)
+ return sqrtDL, prime(δᵤᵥ), sqrtDR
end
function factorize_svd(
- A::ITensor,
- Linds...;
- (singular_values!)=nothing,
- ortho="left",
- alg=nothing,
- dir=nothing,
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- tags=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
- min_blockdim=nothing,
-)
- leftdir, rightdir = dir, dir
- if !isnothing(leftdir)
- leftdir = -leftdir
- end
- if !isnothing(rightdir)
- rightdir = -rightdir
- end
- USV = svd(
- A,
- Linds...;
- leftdir,
- rightdir,
- alg,
- mindim,
- maxdim,
- cutoff,
- lefttags=tags,
- righttags=tags,
- use_absolute_cutoff,
- use_relative_cutoff,
- min_blockdim,
- )
- if isnothing(USV)
- return nothing
- end
- U, S, V, spec, u, v = USV
- if ortho == "left"
- L, R = U, S * V
- elseif ortho == "right"
- L, R = U * S, V
- elseif ortho == "none"
- sqrtDL, δᵤᵥ, sqrtDR = sqrt_decomp(S, u, v)
- sqrtDR = denseblocks(sqrtDR) * denseblocks(δᵤᵥ)
- L, R = U * sqrtDL, V * sqrtDR
- else
- error("In factorize using svd decomposition, ortho keyword
- $ortho not supported. Supported options are left, right, or none.")
- end
-
- !isnothing(singular_values!) && (singular_values![] = S)
-
- return L, R, spec
+ A::ITensor,
+ Linds...;
+ (singular_values!) = nothing,
+ ortho = "left",
+ alg = nothing,
+ dir = nothing,
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ tags = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ min_blockdim = nothing,
+ )
+ leftdir, rightdir = dir, dir
+ if !isnothing(leftdir)
+ leftdir = -leftdir
+ end
+ if !isnothing(rightdir)
+ rightdir = -rightdir
+ end
+ USV = svd(
+ A,
+ Linds...;
+ leftdir,
+ rightdir,
+ alg,
+ mindim,
+ maxdim,
+ cutoff,
+ lefttags = tags,
+ righttags = tags,
+ use_absolute_cutoff,
+ use_relative_cutoff,
+ min_blockdim,
+ )
+ if isnothing(USV)
+ return nothing
+ end
+ U, S, V, spec, u, v = USV
+ if ortho == "left"
+ L, R = U, S * V
+ elseif ortho == "right"
+ L, R = U * S, V
+ elseif ortho == "none"
+ sqrtDL, δᵤᵥ, sqrtDR = sqrt_decomp(S, u, v)
+ sqrtDR = denseblocks(sqrtDR) * denseblocks(δᵤᵥ)
+ L, R = U * sqrtDL, V * sqrtDR
+ else
+ error("In factorize using svd decomposition, ortho keyword
+$ortho not supported. Supported options are left, right, or none.")
+ end
+
+ !isnothing(singular_values!) && (singular_values![] = S)
+
+ return L, R, spec
end
function factorize_eigen(
- A::ITensor,
- Linds...;
- ortho="left",
- eigen_perturbation=nothing,
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- tags=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
-)
- if ortho == "left"
- Lis = commoninds(A, indices(Linds...))
- elseif ortho == "right"
- Lis = uniqueinds(A, indices(Linds...))
- else
- error("In factorize using eigen decomposition, ortho keyword
- $ortho not supported. Supported options are left or right.")
- end
- simLis = sim(Lis)
- A2 = A * replaceinds(dag(A), Lis, simLis)
- if !isnothing(eigen_perturbation)
- # This assumes delta_A2 has indices:
- # (Lis..., prime(Lis)...)
- delta_A2 = replaceinds(eigen_perturbation, Lis, dag(simLis))
- delta_A2 = noprime(delta_A2)
- A2 += delta_A2
- end
- F = eigen(
- A2,
- Lis,
- simLis;
- ishermitian=true,
- mindim,
- maxdim,
- cutoff,
- tags,
- use_absolute_cutoff,
- use_relative_cutoff,
- )
- D, _, spec = F
- L = F.Vt
- R = dag(L) * A
- if ortho == "right"
- L, R = R, L
- end
- return L, R, spec
+ A::ITensor,
+ Linds...;
+ ortho = "left",
+ eigen_perturbation = nothing,
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ tags = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ )
+ if ortho == "left"
+ Lis = commoninds(A, indices(Linds...))
+ elseif ortho == "right"
+ Lis = uniqueinds(A, indices(Linds...))
+ else
+ error("In factorize using eigen decomposition, ortho keyword
+$ortho not supported. Supported options are left or right.")
+ end
+ simLis = sim(Lis)
+ A2 = A * replaceinds(dag(A), Lis, simLis)
+ if !isnothing(eigen_perturbation)
+ # This assumes delta_A2 has indices:
+ # (Lis..., prime(Lis)...)
+ delta_A2 = replaceinds(eigen_perturbation, Lis, dag(simLis))
+ delta_A2 = noprime(delta_A2)
+ A2 += delta_A2
+ end
+ F = eigen(
+ A2,
+ Lis,
+ simLis;
+ ishermitian = true,
+ mindim,
+ maxdim,
+ cutoff,
+ tags,
+ use_absolute_cutoff,
+ use_relative_cutoff,
+ )
+ D, _, spec = F
+ L = F.Vt
+ R = dag(L) * A
+ if ortho == "right"
+ L, R = R, L
+ end
+ return L, R, spec
end
factorize(A::ITensor; kwargs...) = error(noinds_error_message("factorize"))
@@ -750,113 +750,119 @@ Perform a factorization of `A` into ITensors `L` and `R` such that `A ≈ L * R`
For truncation arguments, see: [`svd`](@ref)
"""
function factorize(
- A::ITensor,
- Linds...;
- mindim=nothing,
- maxdim=nothing,
- cutoff=nothing,
- ortho=nothing,
- tags=nothing,
- plev=nothing,
- which_decomp=nothing,
- # eigen
- eigen_perturbation=nothing,
- # svd
- svd_alg=nothing,
- use_absolute_cutoff=nothing,
- use_relative_cutoff=nothing,
- min_blockdim=nothing,
- (singular_values!)=nothing,
- dir=nothing,
-)
- @debug_check checkflux(A)
- if !isnothing(eigen_perturbation)
- if !(isnothing(which_decomp) || which_decomp == "eigen")
- error("""when passing a non-trivial eigen_perturbation to `factorize`,
- the which_decomp keyword argument must be either "automatic" or
- "eigen" """)
- end
- which_decomp = "eigen"
- end
- ortho = NDTensors.replace_nothing(ortho, "left")
- tags = NDTensors.replace_nothing(tags, ts"Link,fact")
- plev = NDTensors.replace_nothing(plev, 0)
-
- # Determines when to use eigen vs. svd (eigen is less precise,
- # so eigen should only be used if a larger cutoff is requested)
- automatic_cutoff = 1e-12
- Lis = commoninds(A, indices(Linds...))
- Ris = uniqueinds(A, Lis)
- dL, dR = dim(Lis), dim(Ris)
- if isnothing(eigen_perturbation)
- # maxdim is forced to be at most the max given SVD
- if isnothing(maxdim)
- maxdim = min(dL, dR)
+ A::ITensor,
+ Linds...;
+ mindim = nothing,
+ maxdim = nothing,
+ cutoff = nothing,
+ ortho = nothing,
+ tags = nothing,
+ plev = nothing,
+ which_decomp = nothing,
+ # eigen
+ eigen_perturbation = nothing,
+ # svd
+ svd_alg = nothing,
+ use_absolute_cutoff = nothing,
+ use_relative_cutoff = nothing,
+ min_blockdim = nothing,
+ (singular_values!) = nothing,
+ dir = nothing,
+ )
+ @debug_check checkflux(A)
+ if !isnothing(eigen_perturbation)
+ if !(isnothing(which_decomp) || which_decomp == "eigen")
+ error(
+ """when passing a non-trivial eigen_perturbation to `factorize`,
+ the which_decomp keyword argument must be either "automatic" or
+ "eigen" """
+ )
+ end
+ which_decomp = "eigen"
end
- maxdim = min(maxdim, min(dL, dR))
- else
- if isnothing(maxdim)
- maxdim = max(dL, dR)
+ ortho = NDTensors.replace_nothing(ortho, "left")
+ tags = NDTensors.replace_nothing(tags, ts"Link,fact")
+ plev = NDTensors.replace_nothing(plev, 0)
+
+ # Determines when to use eigen vs. svd (eigen is less precise,
+ # so eigen should only be used if a larger cutoff is requested)
+ automatic_cutoff = 1.0e-12
+ Lis = commoninds(A, indices(Linds...))
+ Ris = uniqueinds(A, Lis)
+ dL, dR = dim(Lis), dim(Ris)
+ if isnothing(eigen_perturbation)
+ # maxdim is forced to be at most the max given SVD
+ if isnothing(maxdim)
+ maxdim = min(dL, dR)
+ end
+ maxdim = min(maxdim, min(dL, dR))
+ else
+ if isnothing(maxdim)
+ maxdim = max(dL, dR)
+ end
+ maxdim = min(maxdim, max(dL, dR))
end
- maxdim = min(maxdim, max(dL, dR))
- end
- might_truncate = !isnothing(cutoff) || maxdim < min(dL, dR)
- if isnothing(which_decomp)
- if !might_truncate && ortho != "none"
- which_decomp = "qr"
- elseif isnothing(cutoff) || cutoff ≤ automatic_cutoff
- which_decomp = "svd"
- elseif cutoff > automatic_cutoff
- which_decomp = "eigen"
+ might_truncate = !isnothing(cutoff) || maxdim < min(dL, dR)
+ if isnothing(which_decomp)
+ if !might_truncate && ortho != "none"
+ which_decomp = "qr"
+ elseif isnothing(cutoff) || cutoff ≤ automatic_cutoff
+ which_decomp = "svd"
+ elseif cutoff > automatic_cutoff
+ which_decomp = "eigen"
+ end
end
- end
- if which_decomp == "svd"
- LR = factorize_svd(
- A,
- Linds...;
- mindim,
- maxdim,
- cutoff,
- tags,
- ortho,
- alg=svd_alg,
- dir,
- singular_values!,
- use_absolute_cutoff,
- use_relative_cutoff,
- min_blockdim,
- )
- if isnothing(LR)
- return nothing
+ if which_decomp == "svd"
+ LR = factorize_svd(
+ A,
+ Linds...;
+ mindim,
+ maxdim,
+ cutoff,
+ tags,
+ ortho,
+ alg = svd_alg,
+ dir,
+ singular_values!,
+ use_absolute_cutoff,
+ use_relative_cutoff,
+ min_blockdim,
+ )
+ if isnothing(LR)
+ return nothing
+ end
+ L, R, spec = LR
+ elseif which_decomp == "eigen"
+ L, R, spec = factorize_eigen(
+ A,
+ Linds...;
+ mindim,
+ maxdim,
+ cutoff,
+ tags,
+ ortho,
+ eigen_perturbation,
+ use_absolute_cutoff,
+ use_relative_cutoff,
+ )
+ elseif which_decomp == "qr"
+ L, R = factorize_qr(A, Linds...; ortho, tags)
+ spec = Spectrum(nothing, 0.0)
+ else
+ throw(
+ ArgumentError(
+ """In factorize, factorization $which_decomp is not
+ currently supported. Use `"svd"`, `"eigen"`, `"qr"` or `nothing`."""
+ )
+ )
end
- L, R, spec = LR
- elseif which_decomp == "eigen"
- L, R, spec = factorize_eigen(
- A,
- Linds...;
- mindim,
- maxdim,
- cutoff,
- tags,
- ortho,
- eigen_perturbation,
- use_absolute_cutoff,
- use_relative_cutoff,
- )
- elseif which_decomp == "qr"
- L, R = factorize_qr(A, Linds...; ortho, tags)
- spec = Spectrum(nothing, 0.0)
- else
- throw(ArgumentError("""In factorize, factorization $which_decomp is not
- currently supported. Use `"svd"`, `"eigen"`, `"qr"` or `nothing`."""))
- end
-
- # Set the tags and prime level
- l = commonind(L, R)
- l̃ = setprime(settags(l, tags), plev)
- L = replaceind(L, l, l̃)
- R = replaceind(R, l, l̃)
- l = l̃
-
- return L, R, spec, l
+
+ # Set the tags and prime level
+ l = commonind(L, R)
+ l̃ = setprime(settags(l, tags), plev)
+ L = replaceind(L, l, l̃)
+ R = replaceind(R, l, l̃)
+ l = l̃
+
+ return L, R, spec, l
end
diff --git a/src/tensor_operations/tensor_algebra.jl b/src/tensor_operations/tensor_algebra.jl
index c70623aac9..9ed8be5ead 100644
--- a/src/tensor_operations/tensor_algebra.jl
+++ b/src/tensor_operations/tensor_algebra.jl
@@ -1,26 +1,26 @@
function _contract(A::Tensor, B::Tensor)
- labelsA, labelsB = compute_contraction_labels(inds(A), inds(B))
- return contract(A, labelsA, B, labelsB)
- # TODO: Alternative to try (`noncommoninds` is too slow right now)
- #return _contract!!(EmptyTensor(Float64, _Tuple(noncommoninds(inds(A), inds(B)))), A, B)
+ labelsA, labelsB = compute_contraction_labels(inds(A), inds(B))
+ return contract(A, labelsA, B, labelsB)
+ # TODO: Alternative to try (`noncommoninds` is too slow right now)
+ #return _contract!!(EmptyTensor(Float64, _Tuple(noncommoninds(inds(A), inds(B)))), A, B)
end
function _contract(A::ITensor, B::ITensor)::ITensor
- C = itensor(_contract(tensor(A), tensor(B)))
- warnTensorOrder = get_warn_order()
- if !isnothing(warnTensorOrder) > 0 && order(C) >= warnTensorOrder
- println("Contraction resulted in ITensor with $(order(C)) indices, which is greater
- than or equal to the ITensor order warning threshold $warnTensorOrder.
- You can modify the threshold with macros like `@set_warn_order N`,
- `@reset_warn_order`, and `@disable_warn_order` or functions like
- `ITensors.set_warn_order(N::Int)`, `ITensors.reset_warn_order()`, and
- `ITensors.disable_warn_order()`.")
- # This prints a vector, not formatted well
- #show(stdout, MIME"text/plain"(), stacktrace())
- Base.show_backtrace(stdout, backtrace())
- println()
- end
- return C
+ C = itensor(_contract(tensor(A), tensor(B)))
+ warnTensorOrder = get_warn_order()
+ if !isnothing(warnTensorOrder) > 0 && order(C) >= warnTensorOrder
+ println("Contraction resulted in ITensor with $(order(C)) indices, which is greater
+ than or equal to the ITensor order warning threshold $warnTensorOrder.
+ You can modify the threshold with macros like `@set_warn_order N`,
+ `@reset_warn_order`, and `@disable_warn_order` or functions like
+ `ITensors.set_warn_order(N::Int)`, `ITensors.reset_warn_order()`, and
+ `ITensors.disable_warn_order()`.")
+ # This prints a vector, not formatted well
+ #show(stdout, MIME"text/plain"(), stacktrace())
+ Base.show_backtrace(stdout, backtrace())
+ println()
+ end
+ return C
end
"""
@@ -58,29 +58,29 @@ C = A * B # inner product of A and B, all indices contracted
```
"""
function (A::ITensor * B::ITensor)::ITensor
- return contract(A, B)
+ return contract(A, B)
end
function contract(A::ITensor, B::ITensor)::ITensor
- NA::Int = ndims(A)
- NB::Int = ndims(B)
- if NA == 0 && NB == 0
- return (iscombiner(A) || iscombiner(B)) ? _contract(A, B) : ITensor(A[] * B[])
- elseif NA == 0
- return iscombiner(A) ? _contract(A, B) : A[] * B
- elseif NB == 0
- return iscombiner(B) ? _contract(B, A) : B[] * A
- end
- return _contract(A, B)
+ NA::Int = ndims(A)
+ NB::Int = ndims(B)
+ if NA == 0 && NB == 0
+ return (iscombiner(A) || iscombiner(B)) ? _contract(A, B) : ITensor(A[] * B[])
+ elseif NA == 0
+ return iscombiner(A) ? _contract(A, B) : A[] * B
+ elseif NB == 0
+ return iscombiner(B) ? _contract(B, A) : B[] * A
+ end
+ return _contract(A, B)
end
function default_sequence()
- return using_contraction_sequence_optimization() ? "automatic" : "left_associative"
+ return using_contraction_sequence_optimization() ? "automatic" : "left_associative"
end
-function contraction_cost(As::Union{Vector{<:ITensor},Tuple{Vararg{ITensor}}}; kwargs...)
- indsAs = [inds(A) for A in As]
- return contraction_cost(indsAs; kwargs...)
+function contraction_cost(As::Union{Vector{<:ITensor}, Tuple{Vararg{ITensor}}}; kwargs...)
+ indsAs = [inds(A) for A in As]
+ return contraction_cost(indsAs; kwargs...)
end
# TODO: provide `contractl`/`contractr`/`*ˡ`/`*ʳ` as shorthands for left associative and right associative contractions.
@@ -102,32 +102,32 @@ integers `n` specifying the ITensor `As[n]` and branches are accessed
by indexing with `1` or `2`, i.e. `sequence = Any[Any[1, 3], Any[2, 4]]`.
"""
function contract(tn::AbstractVector; kwargs...)
- return if all(x -> x isa ITensor, tn)
- contract(convert(Vector{ITensor}, tn); kwargs...)
- else
- deepcontract(tn; kwargs...)
- end
+ return if all(x -> x isa ITensor, tn)
+ contract(convert(Vector{ITensor}, tn); kwargs...)
+ else
+ deepcontract(tn; kwargs...)
+ end
end
# Contract a tensor network such as:
# [A, B, [[C, D], [E, [F, G]]]]
deepcontract(t::ITensor, ts::ITensor...) = *(t, ts...)
function deepcontract(tn::AbstractVector)
- return deepcontract(deepcontract.(tn)...)
+ return deepcontract(deepcontract.(tn)...)
end
function contract(
- As::Union{Vector{ITensor},Tuple{Vararg{ITensor}}}; sequence=default_sequence(), kwargs...
-)::ITensor
- if sequence == "left_associative"
- return foldl((A, B) -> contract(A, B; kwargs...), As)
- elseif sequence == "right_associative"
- return foldr((A, B) -> contract(A, B; kwargs...), As)
- elseif sequence == "automatic"
- return _contract(As, optimal_contraction_sequence(As); kwargs...)
- else
- return _contract(As, sequence; kwargs...)
- end
+ As::Union{Vector{ITensor}, Tuple{Vararg{ITensor}}}; sequence = default_sequence(), kwargs...
+ )::ITensor
+ if sequence == "left_associative"
+ return foldl((A, B) -> contract(A, B; kwargs...), As)
+ elseif sequence == "right_associative"
+ return foldr((A, B) -> contract(A, B; kwargs...), As)
+ elseif sequence == "automatic"
+ return _contract(As, optimal_contraction_sequence(As); kwargs...)
+ else
+ return _contract(As, sequence; kwargs...)
+ end
end
"""
@@ -137,12 +137,12 @@ Returns a contraction sequence for contracting the tensors `T`. The sequence is
generally optimal and is found via the optimaltree function in TensorOperations.jl which must be loaded.
"""
function optimal_contraction_sequence(As)
- return throw(
- ArgumentError(
- "Optimal contraction sequence isn't defined. Try loading a backend package like
- TensorOperations.jl"
- ),
- )
+ return throw(
+ ArgumentError(
+ "Optimal contraction sequence isn't defined. Try loading a backend package like
+ TensorOperations.jl"
+ ),
+ )
end
contract(As::ITensor...; kwargs...)::ITensor = contract(As; kwargs...)
@@ -152,33 +152,33 @@ _contract(As, sequence::Int) = As[sequence]
# Given a contraction sequence, contract the tensors recursively according
# to that sequence.
function _contract(As, sequence::AbstractVector; kwargs...)::ITensor
- return contract(_contract.((As,), sequence)...; kwargs...)
+ return contract(_contract.((As,), sequence)...; kwargs...)
end
*(As::ITensor...; kwargs...)::ITensor = contract(As...; kwargs...)
-function contract!(C::ITensor, A::ITensor, B::ITensor, α::Number, β::Number=0)::ITensor
- labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B))
- labelsC, labelsA, labelsB = labelsCAB
- CT = NDTensors.contract!!(
- tensor(C), _Tuple(labelsC), tensor(A), _Tuple(labelsA), tensor(B), _Tuple(labelsB), α, β
- )
- setstorage!(C, storage(CT))
- setinds!(C, inds(C))
- return C
+function contract!(C::ITensor, A::ITensor, B::ITensor, α::Number, β::Number = 0)::ITensor
+ labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B))
+ labelsC, labelsA, labelsB = labelsCAB
+ CT = NDTensors.contract!!(
+ tensor(C), _Tuple(labelsC), tensor(A), _Tuple(labelsA), tensor(B), _Tuple(labelsB), α, β
+ )
+ setstorage!(C, storage(CT))
+ setinds!(C, inds(C))
+ return C
end
function _contract!!(C::Tensor, A::Tensor, B::Tensor)
- labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B))
- labelsC, labelsA, labelsB = labelsCAB
- CT = NDTensors.contract!!(C, labelsC, A, labelsA, B, labelsB)
- return CT
+ labelsCAB = compute_contraction_labels(inds(C), inds(A), inds(B))
+ labelsC, labelsA, labelsB = labelsCAB
+ CT = NDTensors.contract!!(C, labelsC, A, labelsA, B, labelsB)
+ return CT
end
# This is necessary for now since not all types implement contract!!
# with non-trivial α and β
function contract!(C::ITensor, A::ITensor, B::ITensor)::ITensor
- return settensor!(C, _contract!!(tensor(C), tensor(A), tensor(B)))
+ return settensor!(C, _contract!!(tensor(C), tensor(A), tensor(B)))
end
"""
@@ -191,132 +191,134 @@ Elementwise product of 2 ITensors with the same indices.
Alternative syntax `⊙` can be typed in the REPL with `\\odot `.
"""
function hadamard_product!(R::ITensor, T1::ITensor, T2::ITensor)
- if !hassameinds(T1, T2)
- error("ITensors must have some indices to perform Hadamard product")
- end
- # Permute the indices to the same order
- #if inds(A) ≠ inds(B)
- # B = permute(B, inds(A))
- #end
- #tensor(C) .= tensor(A) .* tensor(B)
- map!((t1, t2) -> *(t1, t2), R, T1, T2)
- return R
+ if !hassameinds(T1, T2)
+ error("ITensors must have some indices to perform Hadamard product")
+ end
+ # Permute the indices to the same order
+ #if inds(A) ≠ inds(B)
+ # B = permute(B, inds(A))
+ #end
+ #tensor(C) .= tensor(A) .* tensor(B)
+ map!((t1, t2) -> *(t1, t2), R, T1, T2)
+ return R
end
# TODO: instead of copy, use promote(A, B)
function hadamard_product(A::ITensor, B::ITensor)
- Ac = copy(A)
- return hadamard_product!(Ac, Ac, B)
+ Ac = copy(A)
+ return hadamard_product!(Ac, Ac, B)
end
⊙(A::ITensor, B::ITensor) = hadamard_product(A, B)
function directsum_projectors!(D1::Tensor, D2::Tensor)
- d1 = size(D1, 1)
- for ii in 1:d1
- D1[ii, ii] = one(eltype(D1))
- end
- d2 = size(D2, 1)
- for jj in 1:d2
- D2[jj, d1 + jj] = one(eltype(D1))
- end
- return D1, D2
+ d1 = size(D1, 1)
+ for ii in 1:d1
+ D1[ii, ii] = one(eltype(D1))
+ end
+ d2 = size(D2, 1)
+ for jj in 1:d2
+ D2[jj, d1 + jj] = one(eltype(D1))
+ end
+ return D1, D2
end
# Helper tensors for performing a partial direct sum
function directsum_projectors(
- elt1::Type{<:Number}, elt2::Type{<:Number}, i::Index, j::Index, ij::Index
-)
- # Ideally we would just use the following but it gives
- # an error that `setindex!` isn't defined:
- # D1 = ITensor(elt1, dag(i), ij)
- # D2 = ITensor(elt1, dag(j), ij)
- # Or with new notation:
- # D1 = zeros(elt1, dag(i), ij)
- # D2 = zeros(elt1, dag(j), ij)
- elt = promote_type(elt1, elt2)
- D1 = zeros_itensor(elt, dag(i), ij)
- D2 = zeros_itensor(elt, dag(j), ij)
- directsum_projectors!(tensor(D1), tensor(D2))
- return D1, D2
+ elt1::Type{<:Number}, elt2::Type{<:Number}, i::Index, j::Index, ij::Index
+ )
+ # Ideally we would just use the following but it gives
+ # an error that `setindex!` isn't defined:
+ # D1 = ITensor(elt1, dag(i), ij)
+ # D2 = ITensor(elt1, dag(j), ij)
+ # Or with new notation:
+ # D1 = zeros(elt1, dag(i), ij)
+ # D2 = zeros(elt1, dag(j), ij)
+ elt = promote_type(elt1, elt2)
+ D1 = zeros_itensor(elt, dag(i), ij)
+ D2 = zeros_itensor(elt, dag(j), ij)
+ directsum_projectors!(tensor(D1), tensor(D2))
+ return D1, D2
end
function directsum_projectors(
- ::Type{<:EmptyNumber}, ::Type{<:EmptyNumber}, ::Index, ::Index, ::Index
-)
- return error(
- "It is not possible to call directsum on two tensors with element type EmptyNumber.
+ ::Type{<:EmptyNumber}, ::Type{<:EmptyNumber}, ::Index, ::Index, ::Index
+ )
+ return error(
+ "It is not possible to call directsum on two tensors with element type EmptyNumber.
If you are inputting ITensors constructed like `ITensor(i, j)`, try specifying the element type,
e.g. `ITensor(Float64, i, j)`, or fill them with zero values, e.g. `ITensor(zero(Float64), i, j)`.",
- )
+ )
end
function check_directsum_inds(A::ITensor, I, B::ITensor, J)
- a = uniqueinds(A, I)
- b = uniqueinds(B, J)
- if !hassameinds(a, b)
- error("""In directsum, attemptying to direct sum ITensors A and B with indices:
+ a = uniqueinds(A, I)
+ b = uniqueinds(B, J)
+ return if !hassameinds(a, b)
+ error(
+ """In directsum, attemptying to direct sum ITensors A and B with indices:
- $(inds(A))
+ $(inds(A))
- and
+ and
- $(inds(B))
+ $(inds(B))
- over the indices
+ over the indices
- $(I)
+ $(I)
- and
+ and
- $(J)
+ $(J)
- The indices not being direct summed must match, however they are
+ The indices not being direct summed must match, however they are
- $a
+ $a
- and
+ and
- $b
- """)
- end
+ $b
+ """
+ )
+ end
end
function _directsum(
- IJ::Nothing, A::ITensor, I, B::ITensor, J; tags=default_directsum_tags(A => I)
-)
- N = length(I)
- (N != length(J)) &&
- error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices")
- check_directsum_inds(A, I, B, J)
- # Fix the Index direction for QN indices
- # TODO: Define `getfirstind`?
- I = map(In -> getfirst(==(In), inds(A)), I)
- J = map(Jn -> getfirst(==(Jn), inds(B)), J)
- IJ = Vector{Base.promote_eltype(I, J)}(undef, N)
- for n in 1:N
- IJ[n] = directsum(I[n], J[n]; tags=tags[n])
- end
- return _directsum(IJ, A, I, B, J)
-end
-
-function _directsum(IJ, A::ITensor, I, B::ITensor, J; tags=nothing)
- N = length(I)
- (N != length(J)) &&
- error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices")
- check_directsum_inds(A, I, B, J)
- # Fix the Index direction for QN indices
- # TODO: Define `getfirstind`?
- I = map(In -> getfirst(==(In), inds(A)), I)
- J = map(Jn -> getfirst(==(Jn), inds(B)), J)
- for n in 1:N
- # TODO: Pass the entire `datatype` instead of just the `eltype`.
- D1, D2 = directsum_projectors(eltype(A), eltype(B), I[n], J[n], IJ[n])
- A *= adapt(datatype(A), D1)
- B *= adapt(datatype(B), D2)
- end
- C = A + B
- return C => IJ
+ IJ::Nothing, A::ITensor, I, B::ITensor, J; tags = default_directsum_tags(A => I)
+ )
+ N = length(I)
+ (N != length(J)) &&
+ error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices")
+ check_directsum_inds(A, I, B, J)
+ # Fix the Index direction for QN indices
+ # TODO: Define `getfirstind`?
+ I = map(In -> getfirst(==(In), inds(A)), I)
+ J = map(Jn -> getfirst(==(Jn), inds(B)), J)
+ IJ = Vector{Base.promote_eltype(I, J)}(undef, N)
+ for n in 1:N
+ IJ[n] = directsum(I[n], J[n]; tags = tags[n])
+ end
+ return _directsum(IJ, A, I, B, J)
+end
+
+function _directsum(IJ, A::ITensor, I, B::ITensor, J; tags = nothing)
+ N = length(I)
+ (N != length(J)) &&
+ error("In directsum(::ITensor, ::ITensor, ...), must sum equal number of indices")
+ check_directsum_inds(A, I, B, J)
+ # Fix the Index direction for QN indices
+ # TODO: Define `getfirstind`?
+ I = map(In -> getfirst(==(In), inds(A)), I)
+ J = map(Jn -> getfirst(==(Jn), inds(B)), J)
+ for n in 1:N
+ # TODO: Pass the entire `datatype` instead of just the `eltype`.
+ D1, D2 = directsum_projectors(eltype(A), eltype(B), I[n], J[n], IJ[n])
+ A *= adapt(datatype(A), D1)
+ B *= adapt(datatype(B), D2)
+ end
+ C = A + B
+ return C => IJ
end
to_inds(i::Index) = (i,)
@@ -324,26 +326,26 @@ to_inds(i::Indices) = i
to_inds(::Nothing) = nothing
function __directsum(
- ij, A::ITensor, i::Index, B::ITensor, j::Index; tags=default_directsum_tags(A => i)
-)
- C, (ij,) = _directsum(to_inds(ij), A, to_inds(i), B, to_inds(j); tags=[tags])
- return C => ij
+ ij, A::ITensor, i::Index, B::ITensor, j::Index; tags = default_directsum_tags(A => i)
+ )
+ C, (ij,) = _directsum(to_inds(ij), A, to_inds(i), B, to_inds(j); tags = [tags])
+ return C => ij
end
function _directsum(ij::Nothing, A::ITensor, i::Index, B::ITensor, j::Index; kwargs...)
- return __directsum(ij, A, i, B, j; kwargs...)
+ return __directsum(ij, A, i, B, j; kwargs...)
end
function _directsum(ij::Index, A::ITensor, i::Index, B::ITensor, j::Index; kwargs...)
- return __directsum(ij, A, i, B, j; kwargs...)
+ return __directsum(ij, A, i, B, j; kwargs...)
end
function default_directsum_tags(A_and_I::Pair{ITensor})
- return ["sum$i" for i in 1:length(last(A_and_I))]
+ return ["sum$i" for i in 1:length(last(A_and_I))]
end
-function default_directsum_tags(A_and_I::Pair{ITensor,<:Index})
- return "sum"
+function default_directsum_tags(A_and_I::Pair{ITensor, <:Index})
+ return "sum"
end
"""
@@ -401,66 +403,66 @@ dim(s[2]) == dim(j1) + dim(j2)
```
"""
function directsum(
- A_and_I::Pair{ITensor},
- B_and_J::Pair{ITensor},
- C_and_K::Pair{ITensor},
- itensor_and_inds...;
- tags=default_directsum_tags(A_and_I),
-)
- return directsum(nothing, A_and_I, B_and_J, C_and_K, itensor_and_inds...; tags)
+ A_and_I::Pair{ITensor},
+ B_and_J::Pair{ITensor},
+ C_and_K::Pair{ITensor},
+ itensor_and_inds...;
+ tags = default_directsum_tags(A_and_I),
+ )
+ return directsum(nothing, A_and_I, B_and_J, C_and_K, itensor_and_inds...; tags)
end
function directsum(
- output_inds::Nothing,
- A_and_I::Pair{ITensor},
- B_and_J::Pair{ITensor},
- C_and_K::Pair{ITensor},
- itensor_and_inds...;
- tags=default_directsum_tags(A_and_I),
-)
- return directsum(
- output_inds,
- directsum(nothing, A_and_I, B_and_J; tags),
- C_and_K,
- itensor_and_inds...;
- tags,
- )
+ output_inds::Nothing,
+ A_and_I::Pair{ITensor},
+ B_and_J::Pair{ITensor},
+ C_and_K::Pair{ITensor},
+ itensor_and_inds...;
+ tags = default_directsum_tags(A_and_I),
+ )
+ return directsum(
+ output_inds,
+ directsum(nothing, A_and_I, B_and_J; tags),
+ C_and_K,
+ itensor_and_inds...;
+ tags,
+ )
end
function directsum(
- output_inds::Union{Index,Indices},
- A_and_I::Pair{ITensor},
- B_and_J::Pair{ITensor},
- C_and_K::Pair{ITensor},
- itensor_and_inds...;
- tags=default_directsum_tags(A_and_I),
-)
- return directsum(
- output_inds,
- directsum(nothing, A_and_I, B_and_J; tags),
- C_and_K,
- itensor_and_inds...;
- tags,
- )
+ output_inds::Union{Index, Indices},
+ A_and_I::Pair{ITensor},
+ B_and_J::Pair{ITensor},
+ C_and_K::Pair{ITensor},
+ itensor_and_inds...;
+ tags = default_directsum_tags(A_and_I),
+ )
+ return directsum(
+ output_inds,
+ directsum(nothing, A_and_I, B_and_J; tags),
+ C_and_K,
+ itensor_and_inds...;
+ tags,
+ )
end
function directsum(A_and_I::Pair{ITensor}, B_and_J::Pair{ITensor}; kwargs...)
- return directsum(nothing, A_and_I, B_and_J; kwargs...)
+ return directsum(nothing, A_and_I, B_and_J; kwargs...)
end
function directsum(
- output_inds::Nothing, A_and_I::Pair{ITensor}, B_and_J::Pair{ITensor}; kwargs...
-)
- return _directsum(output_inds, A_and_I..., B_and_J...; kwargs...)
+ output_inds::Nothing, A_and_I::Pair{ITensor}, B_and_J::Pair{ITensor}; kwargs...
+ )
+ return _directsum(output_inds, A_and_I..., B_and_J...; kwargs...)
end
function directsum(
- output_inds::Union{Index,Indices},
- A_and_I::Pair{ITensor},
- B_and_J::Pair{ITensor};
- kwargs...,
-)
- return first(_directsum(output_inds, A_and_I..., B_and_J...; kwargs...))
+ output_inds::Union{Index, Indices},
+ A_and_I::Pair{ITensor},
+ B_and_J::Pair{ITensor};
+ kwargs...,
+ )
+ return first(_directsum(output_inds, A_and_I..., B_and_J...; kwargs...))
end
const ⊕ = directsum
@@ -557,48 +559,48 @@ Again, like in the matrix-matrix product above, you can have
dangling indices to do "batched" vector-vector products, or
sum over a batch of vector-vector products.
"""
-function product(A::ITensor, B::ITensor; apply_dag::Bool=false)
- commonindsAB = commoninds(A, B; plev=0)
- isempty(commonindsAB) && error("In product, must have common indices with prime level 0.")
- common_paired_indsA = filterinds(
- i -> hasind(commonindsAB, i) && hasind(A, setprime(i, 1)), A
- )
- common_paired_indsB = filterinds(
- i -> hasind(commonindsAB, i) && hasind(B, setprime(i, 1)), B
- )
-
- if !isempty(common_paired_indsA)
- commoninds_pairs = unioninds(common_paired_indsA, common_paired_indsA')
- elseif !isempty(common_paired_indsB)
- commoninds_pairs = unioninds(common_paired_indsB, common_paired_indsB')
- else
- # vector-vector product
- apply_dag && error("apply_dag not supported for vector-vector product")
- return A * B
- end
- danglings_indsA = uniqueinds(A, commoninds_pairs)
- danglings_indsB = uniqueinds(B, commoninds_pairs)
- danglings_inds = unioninds(danglings_indsA, danglings_indsB)
- if hassameinds(common_paired_indsA, common_paired_indsB)
- # matrix-matrix product
- A′ = prime(A; inds=(!danglings_inds))
- AB = mapprime(A′ * B, 2 => 1; inds=(!danglings_inds))
- if apply_dag
- AB′ = prime(AB; inds=(!danglings_inds))
- Adag = swapprime(dag(A), 0 => 1; inds=(!danglings_inds))
- return mapprime(AB′ * Adag, 2 => 1; inds=(!danglings_inds))
+function product(A::ITensor, B::ITensor; apply_dag::Bool = false)
+ commonindsAB = commoninds(A, B; plev = 0)
+ isempty(commonindsAB) && error("In product, must have common indices with prime level 0.")
+ common_paired_indsA = filterinds(
+ i -> hasind(commonindsAB, i) && hasind(A, setprime(i, 1)), A
+ )
+ common_paired_indsB = filterinds(
+ i -> hasind(commonindsAB, i) && hasind(B, setprime(i, 1)), B
+ )
+
+ if !isempty(common_paired_indsA)
+ commoninds_pairs = unioninds(common_paired_indsA, common_paired_indsA')
+ elseif !isempty(common_paired_indsB)
+ commoninds_pairs = unioninds(common_paired_indsB, common_paired_indsB')
+ else
+ # vector-vector product
+ apply_dag && error("apply_dag not supported for vector-vector product")
+ return A * B
+ end
+ danglings_indsA = uniqueinds(A, commoninds_pairs)
+ danglings_indsB = uniqueinds(B, commoninds_pairs)
+ danglings_inds = unioninds(danglings_indsA, danglings_indsB)
+ if hassameinds(common_paired_indsA, common_paired_indsB)
+ # matrix-matrix product
+ A′ = prime(A; inds = (!danglings_inds))
+ AB = mapprime(A′ * B, 2 => 1; inds = (!danglings_inds))
+ if apply_dag
+ AB′ = prime(AB; inds = (!danglings_inds))
+ Adag = swapprime(dag(A), 0 => 1; inds = (!danglings_inds))
+ return mapprime(AB′ * Adag, 2 => 1; inds = (!danglings_inds))
+ end
+ return AB
+ elseif isempty(common_paired_indsA) && !isempty(common_paired_indsB)
+ # vector-matrix product
+ apply_dag && error("apply_dag not supported for matrix-vector product")
+ A′ = prime(A; inds = (!danglings_inds))
+ return A′ * B
+ elseif !isempty(common_paired_indsA) && isempty(common_paired_indsB)
+ # matrix-vector product
+ apply_dag && error("apply_dag not supported for vector-matrix product")
+ return replaceprime(A * B, 1 => 0; inds = (!danglings_inds))
end
- return AB
- elseif isempty(common_paired_indsA) && !isempty(common_paired_indsB)
- # vector-matrix product
- apply_dag && error("apply_dag not supported for matrix-vector product")
- A′ = prime(A; inds=(!danglings_inds))
- return A′ * B
- elseif !isempty(common_paired_indsA) && isempty(common_paired_indsB)
- # matrix-vector product
- apply_dag && error("apply_dag not supported for vector-matrix product")
- return replaceprime(A * B, 1 => 0; inds=(!danglings_inds))
- end
end
"""
@@ -607,11 +609,11 @@ end
Product the ITensors pairwise.
"""
function product(As::Vector{<:ITensor}, B::ITensor; kwargs...)
- AB = B
- for A in As
- AB = product(A, AB; kwargs...)
- end
- return AB
+ AB = B
+ for A in As
+ AB = product(A, AB; kwargs...)
+ end
+ return AB
end
# Alias apply with product
@@ -619,4 +621,4 @@ const apply = product
(A::ITensor)(B::ITensor) = apply(A, B)
-const Apply{Args} = Applied{typeof(apply),Args}
+const Apply{Args} = Applied{typeof(apply), Args}
diff --git a/src/utils.jl b/src/utils.jl
index 3812a4f006..b55d0b0f67 100644
--- a/src/utils.jl
+++ b/src/utils.jl
@@ -1,17 +1,16 @@
-
# Warn only once, using the message `msg`.
# `funcsym` is a symbol that determines if the warning has been
# called before (so there is only one warning per `funcsym`).
-function warn_once(msg, funcsym; force=true, stacktrace=true)
- if stacktrace
- io = IOBuffer()
- Base.show_backtrace(io, backtrace())
- backtrace_string = String(take!(io))
- backtrace_string *= "\n"
- msg *= backtrace_string
- end
- Base.depwarn(msg, funcsym; force)
- return nothing
+function warn_once(msg, funcsym; force = true, stacktrace = true)
+ if stacktrace
+ io = IOBuffer()
+ Base.show_backtrace(io, backtrace())
+ backtrace_string = String(take!(io))
+ backtrace_string *= "\n"
+ msg *= backtrace_string
+ end
+ Base.depwarn(msg, funcsym; force)
+ return nothing
end
# Directory helper functions (useful for
@@ -21,7 +20,7 @@ pkg_dir() = joinpath(dirname(pathof(@__MODULE__)), "..")
# Determine version and uuid of the package
function _parse_project_toml(field::String)
- return Pkg.TOML.parsefile(joinpath(pkg_dir(), "Project.toml"))[field]
+ return Pkg.TOML.parsefile(joinpath(pkg_dir(), "Project.toml"))[field]
end
version() = VersionNumber(_parse_project_toml("version"))
uuid() = Base.UUID(_parse_project_toml("uuid"))
diff --git a/test/base/test_contract.jl b/test/base/test_contract.jl
index 47d3f64403..5526787ca1 100644
--- a/test/base/test_contract.jl
+++ b/test/base/test_contract.jl
@@ -5,310 +5,310 @@ using Combinatorics: Combinatorics
digits(::Type{T}, i, j, k) where {T} = T(i * 10^2 + j * 10 + k)
@testset "ITensor $T Contractions" for T in (Float64, ComplexF64)
- mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7
- i = Index(mi, "i")
- j = Index(mj, "j")
- k = Index(mk, "k")
- l = Index(ml, "l")
- α = Index(mα, "alpha")
- @testset "Test contract ITensors" begin
- A = random_itensor(T)
- B = random_itensor(T)
- Ai = random_itensor(T, i)
- Bi = random_itensor(T, i)
- Aj = random_itensor(T, j)
- Aij = random_itensor(T, i, j)
- Bij = random_itensor(T, i, j)
- Aik = random_itensor(T, i, k)
- Ajk = random_itensor(T, j, k)
- Ajl = random_itensor(T, j, l)
- Akl = random_itensor(T, k, l)
- Aijk = random_itensor(T, i, j, k)
- Ajkl = random_itensor(T, j, k, l)
- Aikl = random_itensor(T, i, k, l)
- Aklα = random_itensor(T, k, l, α)
- Aijkl = random_itensor(T, i, j, k, l)
- @testset "Test contract ITensor (Scalar*Scalar -> Scalar)" begin
- C = A * B
- @test scalar(C) ≈ scalar(A) * scalar(B)
- end
- @testset "Test contract ITensor (Scalar*Vector -> Vector)" begin
- C = A * Ai
- @test array(C) ≈ scalar(A) * array(Ai)
- end
- @testset "Test contract ITensor (Vector*Scalar -> Vector)" begin
- C = Aj * A
- @test array(C) ≈ scalar(A) * array(Aj)
- end
- @testset "Test contract ITensors (Vectorᵀ*Vector -> Scalar)" begin
- C = Ai * Bi
- CArray = transpose(array(Ai)) * array(Bi)
- @test CArray ≈ scalar(C)
- end
- @testset "Test Matrix{ITensor} * Matrix{ITensor}" begin
- M1 = [Aij Aij; Aij Aij]
- M2 = [Ajk Ajk; Ajk Ajk]
- M12 = M1 * M2
- for x in 1:2, y in 1:2
- @test M12[x, y] ≈ 2 * Aij * Ajk
- end
- end
- @testset "Test contract ITensors (Vector*Vectorᵀ -> Matrix)" begin
- C = Ai * Aj
- for ii in 1:dim(i), jj in 1:dim(j)
- @test C[i => ii, j => jj] ≈ Ai[i => ii] * Aj[j => jj]
- end
- end
- @testset "Test contract ITensors (Matrix*Scalar -> Matrix)" begin
- Aij = permute(Aij, i, j)
- C = Aij * A
- @test array(permute(C, i, j)) ≈ scalar(A) * array(Aij)
- end
- @testset "Test contract ITensors (Matrix*Vector -> Vector)" begin
- Aij = permute(Aij, i, j)
- C = Aij * Aj
- CArray = array(permute(Aij, i, j)) * array(Aj)
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrixᵀ*Vector -> Vector)" begin
- Aij = permute(Aij, j, i)
- C = Aij * Aj
- CArray = transpose(array(Aij)) * array(Aj)
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Vector*Matrix -> Vector)" begin
- Aij = permute(Aij, i, j)
- C = Ai * Aij
- CArray = transpose(transpose(array(Ai)) * array(Aij))
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Vector*Matrixᵀ -> Vector)" begin
- Aij = permute(Aij, j, i)
- C = Ai * Aij
- CArray = transpose(transpose(array(Ai)) * transpose(array(Aij)))
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrix*Matrix -> Scalar)" begin
- Aij = permute(Aij, i, j)
- Bij = permute(Bij, i, j)
- C = Aij * Bij
- CArray = LinearAlgebra.tr(array(Aij) * transpose(array(Bij)))
- @test CArray ≈ scalar(C)
- end
- @testset "Test contract ITensors (Matrix*Matrix -> Matrix)" begin
- Aij = permute(Aij, i, j)
- Ajk = permute(Ajk, j, k)
- C = Aij * Ajk
- CArray = array(Aij) * array(Ajk)
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrixᵀ*Matrix -> Matrix)" begin
- Aij = permute(Aij, j, i)
- Ajk = permute(Ajk, j, k)
- C = Aij * Ajk
- CArray = transpose(array(Aij)) * array(Ajk)
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrix*Matrixᵀ -> Matrix)" begin
- Aij = permute(Aij, i, j)
- Ajk = permute(Ajk, k, j)
- C = Aij * Ajk
- CArray = array(Aij) * transpose(array(Ajk))
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrixᵀ*Matrixᵀ -> Matrix)" begin
- Aij = permute(Aij, j, i)
- Ajk = permute(Ajk, k, j)
- C = Aij * Ajk
- CArray = transpose(array(Aij)) * transpose(array(Ajk))
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (Matrix⊗Matrix -> 4-tensor)" begin
- C = Aij * Akl
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l)
- @test C[i => ii, j => jj, k => kk, l => ll] ≈
- Aij[i => ii, j => jj] * Akl[k => kk, l => ll]
- end
- end
- @testset "Test contract ITensors (3-Tensor*Scalar -> 3-Tensor)" begin
- Aijk = permute(Aijk, i, j, k)
- C = Aijk * A
- @test array(permute(C, i, j, k)) ≈ scalar(A) * array(Aijk) rtol = 1e-12
- end
- @testset "Test contract ITensors (3-Tensor*Vector -> Matrix)" begin
- Aijk = permute(Aijk, i, j, k)
- C = Aijk * Ai
- CArray = reshape(
- reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i)) * array(Ai),
- dim(j),
- dim(k),
- )
- @test CArray ≈ array(permute(C, j, k))
- end
- @testset "Test contract ITensors (Vector*3-Tensor -> Matrix)" begin
- Aijk = permute(Aijk, i, j, k)
- C = Aj * Aijk
- CArray = reshape(
- transpose(array(Aj)) *
- reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)),
- dim(i),
- dim(k),
- )
- @test CArray ≈ array(permute(C, i, k))
- end
- @testset "Test contract ITensors (3-Tensor*Matrix -> Vector)" begin
- Aijk = permute(Aijk, i, j, k)
- Aik = permute(Aik, i, k)
- C = Aijk * Aik
- CArray =
- reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)) * vec(array(Aik))
- @test CArray ≈ array(C)
- end
- @testset "Test contract ITensors (3-Tensor*Matrix -> 3-Tensor)" begin
- Aijk = permute(Aijk, i, j, k)
- Ajl = permute(Ajl, j, l)
- C = Aijk * Ajl
- CArray = reshape(
- reshape(array(permute(Aijk, i, k, j)), dim(i) * dim(k), dim(j)) * array(Ajl),
- dim(i),
- dim(k),
- dim(l),
- )
- @test CArray ≈ array(permute(C, i, k, l))
- end
- @testset "Test contract ITensors (Matrix*3-Tensor -> 3-Tensor)" begin
- Aijk = permute(Aijk, i, j, k)
- Akl = permute(Akl, k, l)
- C = Akl * Aijk
- CArray = reshape(
- array(permute(Akl, l, k)) *
- reshape(array(permute(Aijk, k, i, j)), dim(k), dim(i) * dim(j)),
- dim(l),
- dim(i),
- dim(j),
- )
- @test CArray ≈ array(permute(C, l, i, j))
- end
- @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin
- Aijk = permute(Aijk, i, j, k)
- Ajkl = permute(Ajkl, j, k, l)
- C = Aijk * Ajkl
- CArray =
- reshape(array(permute(Aijk, i, j, k)), dim(i), dim(j) * dim(k)) *
- reshape(array(permute(Ajkl, j, k, l)), dim(j) * dim(k), dim(l))
- @test CArray ≈ array(permute(C, i, l))
- end
- @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin
- for inds_ijk in Combinatorics.permutations([i, j, k]),
- inds_jkl in Combinatorics.permutations([j, k, l])
+ mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7
+ i = Index(mi, "i")
+ j = Index(mj, "j")
+ k = Index(mk, "k")
+ l = Index(ml, "l")
+ α = Index(mα, "alpha")
+ @testset "Test contract ITensors" begin
+ A = random_itensor(T)
+ B = random_itensor(T)
+ Ai = random_itensor(T, i)
+ Bi = random_itensor(T, i)
+ Aj = random_itensor(T, j)
+ Aij = random_itensor(T, i, j)
+ Bij = random_itensor(T, i, j)
+ Aik = random_itensor(T, i, k)
+ Ajk = random_itensor(T, j, k)
+ Ajl = random_itensor(T, j, l)
+ Akl = random_itensor(T, k, l)
+ Aijk = random_itensor(T, i, j, k)
+ Ajkl = random_itensor(T, j, k, l)
+ Aikl = random_itensor(T, i, k, l)
+ Aklα = random_itensor(T, k, l, α)
+ Aijkl = random_itensor(T, i, j, k, l)
+ @testset "Test contract ITensor (Scalar*Scalar -> Scalar)" begin
+ C = A * B
+ @test scalar(C) ≈ scalar(A) * scalar(B)
+ end
+ @testset "Test contract ITensor (Scalar*Vector -> Vector)" begin
+ C = A * Ai
+ @test array(C) ≈ scalar(A) * array(Ai)
+ end
+ @testset "Test contract ITensor (Vector*Scalar -> Vector)" begin
+ C = Aj * A
+ @test array(C) ≈ scalar(A) * array(Aj)
+ end
+ @testset "Test contract ITensors (Vectorᵀ*Vector -> Scalar)" begin
+ C = Ai * Bi
+ CArray = transpose(array(Ai)) * array(Bi)
+ @test CArray ≈ scalar(C)
+ end
+ @testset "Test Matrix{ITensor} * Matrix{ITensor}" begin
+ M1 = [Aij Aij; Aij Aij]
+ M2 = [Ajk Ajk; Ajk Ajk]
+ M12 = M1 * M2
+ for x in 1:2, y in 1:2
+ @test M12[x, y] ≈ 2 * Aij * Ajk
+ end
+ end
+ @testset "Test contract ITensors (Vector*Vectorᵀ -> Matrix)" begin
+ C = Ai * Aj
+ for ii in 1:dim(i), jj in 1:dim(j)
+ @test C[i => ii, j => jj] ≈ Ai[i => ii] * Aj[j => jj]
+ end
+ end
+ @testset "Test contract ITensors (Matrix*Scalar -> Matrix)" begin
+ Aij = permute(Aij, i, j)
+ C = Aij * A
+ @test array(permute(C, i, j)) ≈ scalar(A) * array(Aij)
+ end
+ @testset "Test contract ITensors (Matrix*Vector -> Vector)" begin
+ Aij = permute(Aij, i, j)
+ C = Aij * Aj
+ CArray = array(permute(Aij, i, j)) * array(Aj)
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrixᵀ*Vector -> Vector)" begin
+ Aij = permute(Aij, j, i)
+ C = Aij * Aj
+ CArray = transpose(array(Aij)) * array(Aj)
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Vector*Matrix -> Vector)" begin
+ Aij = permute(Aij, i, j)
+ C = Ai * Aij
+ CArray = transpose(transpose(array(Ai)) * array(Aij))
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Vector*Matrixᵀ -> Vector)" begin
+ Aij = permute(Aij, j, i)
+ C = Ai * Aij
+ CArray = transpose(transpose(array(Ai)) * transpose(array(Aij)))
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrix*Matrix -> Scalar)" begin
+ Aij = permute(Aij, i, j)
+ Bij = permute(Bij, i, j)
+ C = Aij * Bij
+ CArray = LinearAlgebra.tr(array(Aij) * transpose(array(Bij)))
+ @test CArray ≈ scalar(C)
+ end
+ @testset "Test contract ITensors (Matrix*Matrix -> Matrix)" begin
+ Aij = permute(Aij, i, j)
+ Ajk = permute(Ajk, j, k)
+ C = Aij * Ajk
+ CArray = array(Aij) * array(Ajk)
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrixᵀ*Matrix -> Matrix)" begin
+ Aij = permute(Aij, j, i)
+ Ajk = permute(Ajk, j, k)
+ C = Aij * Ajk
+ CArray = transpose(array(Aij)) * array(Ajk)
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrix*Matrixᵀ -> Matrix)" begin
+ Aij = permute(Aij, i, j)
+ Ajk = permute(Ajk, k, j)
+ C = Aij * Ajk
+ CArray = array(Aij) * transpose(array(Ajk))
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrixᵀ*Matrixᵀ -> Matrix)" begin
+ Aij = permute(Aij, j, i)
+ Ajk = permute(Ajk, k, j)
+ C = Aij * Ajk
+ CArray = transpose(array(Aij)) * transpose(array(Ajk))
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (Matrix⊗Matrix -> 4-tensor)" begin
+ C = Aij * Akl
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k), ll in 1:dim(l)
+ @test C[i => ii, j => jj, k => kk, l => ll] ≈
+ Aij[i => ii, j => jj] * Akl[k => kk, l => ll]
+ end
+ end
+ @testset "Test contract ITensors (3-Tensor*Scalar -> 3-Tensor)" begin
+ Aijk = permute(Aijk, i, j, k)
+ C = Aijk * A
+ @test array(permute(C, i, j, k)) ≈ scalar(A) * array(Aijk) rtol = 1.0e-12
+ end
+ @testset "Test contract ITensors (3-Tensor*Vector -> Matrix)" begin
+ Aijk = permute(Aijk, i, j, k)
+ C = Aijk * Ai
+ CArray = reshape(
+ reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i)) * array(Ai),
+ dim(j),
+ dim(k),
+ )
+ @test CArray ≈ array(permute(C, j, k))
+ end
+ @testset "Test contract ITensors (Vector*3-Tensor -> Matrix)" begin
+ Aijk = permute(Aijk, i, j, k)
+ C = Aj * Aijk
+ CArray = reshape(
+ transpose(array(Aj)) *
+ reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)),
+ dim(i),
+ dim(k),
+ )
+ @test CArray ≈ array(permute(C, i, k))
+ end
+ @testset "Test contract ITensors (3-Tensor*Matrix -> Vector)" begin
+ Aijk = permute(Aijk, i, j, k)
+ Aik = permute(Aik, i, k)
+ C = Aijk * Aik
+ CArray =
+ reshape(array(permute(Aijk, j, i, k)), dim(j), dim(i) * dim(k)) * vec(array(Aik))
+ @test CArray ≈ array(C)
+ end
+ @testset "Test contract ITensors (3-Tensor*Matrix -> 3-Tensor)" begin
+ Aijk = permute(Aijk, i, j, k)
+ Ajl = permute(Ajl, j, l)
+ C = Aijk * Ajl
+ CArray = reshape(
+ reshape(array(permute(Aijk, i, k, j)), dim(i) * dim(k), dim(j)) * array(Ajl),
+ dim(i),
+ dim(k),
+ dim(l),
+ )
+ @test CArray ≈ array(permute(C, i, k, l))
+ end
+ @testset "Test contract ITensors (Matrix*3-Tensor -> 3-Tensor)" begin
+ Aijk = permute(Aijk, i, j, k)
+ Akl = permute(Akl, k, l)
+ C = Akl * Aijk
+ CArray = reshape(
+ array(permute(Akl, l, k)) *
+ reshape(array(permute(Aijk, k, i, j)), dim(k), dim(i) * dim(j)),
+ dim(l),
+ dim(i),
+ dim(j),
+ )
+ @test CArray ≈ array(permute(C, l, i, j))
+ end
+ @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin
+ Aijk = permute(Aijk, i, j, k)
+ Ajkl = permute(Ajkl, j, k, l)
+ C = Aijk * Ajkl
+ CArray =
+ reshape(array(permute(Aijk, i, j, k)), dim(i), dim(j) * dim(k)) *
+ reshape(array(permute(Ajkl, j, k, l)), dim(j) * dim(k), dim(l))
+ @test CArray ≈ array(permute(C, i, l))
+ end
+ @testset "Test contract ITensors (3-Tensor*3-Tensor -> 3-Tensor)" begin
+ for inds_ijk in Combinatorics.permutations([i, j, k]),
+ inds_jkl in Combinatorics.permutations([j, k, l])
- Aijk = permute(Aijk, inds_ijk...)
- Ajkl = permute(Ajkl, inds_jkl...)
- C = Ajkl * Aijk
- CArray =
- reshape(array(permute(Ajkl, l, j, k)), dim(l), dim(j) * dim(k)) *
- reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i))
- @test CArray ≈ array(permute(C, l, i))
- end
- end
- @testset "Test contract ITensors (4-Tensor*3-Tensor -> 1-Tensor)" begin
- for inds_ijkl in Combinatorics.permutations([i, j, k, l]),
- inds_jkl in Combinatorics.permutations([j, k, l])
+ Aijk = permute(Aijk, inds_ijk...)
+ Ajkl = permute(Ajkl, inds_jkl...)
+ C = Ajkl * Aijk
+ CArray =
+ reshape(array(permute(Ajkl, l, j, k)), dim(l), dim(j) * dim(k)) *
+ reshape(array(permute(Aijk, j, k, i)), dim(j) * dim(k), dim(i))
+ @test CArray ≈ array(permute(C, l, i))
+ end
+ end
+ @testset "Test contract ITensors (4-Tensor*3-Tensor -> 1-Tensor)" begin
+ for inds_ijkl in Combinatorics.permutations([i, j, k, l]),
+ inds_jkl in Combinatorics.permutations([j, k, l])
- Aijkl = permute(Aijkl, inds_ijkl...)
- Ajkl = permute(Ajkl, inds_jkl...)
- C = Ajkl * Aijkl
- CArray =
- reshape(array(permute(Ajkl, j, k, l)), 1, dim(j) * dim(k) * dim(l)) *
- reshape(array(permute(Aijkl, j, k, l, i)), dim(j) * dim(k) * dim(l), dim(i))
- @test vec(CArray) ≈ array(permute(C, i))
- end
- end
- @testset "Test contract ITensors (4-Tensor*3-Tensor -> 3-Tensor)" begin
- for inds_ijkl in Combinatorics.permutations([i, j, k, l]),
- inds_klα in Combinatorics.permutations([k, l, α])
+ Aijkl = permute(Aijkl, inds_ijkl...)
+ Ajkl = permute(Ajkl, inds_jkl...)
+ C = Ajkl * Aijkl
+ CArray =
+ reshape(array(permute(Ajkl, j, k, l)), 1, dim(j) * dim(k) * dim(l)) *
+ reshape(array(permute(Aijkl, j, k, l, i)), dim(j) * dim(k) * dim(l), dim(i))
+ @test vec(CArray) ≈ array(permute(C, i))
+ end
+ end
+ @testset "Test contract ITensors (4-Tensor*3-Tensor -> 3-Tensor)" begin
+ for inds_ijkl in Combinatorics.permutations([i, j, k, l]),
+ inds_klα in Combinatorics.permutations([k, l, α])
- Aijkl = permute(Aijkl, inds_ijkl...)
- Aklα = permute(Aklα, inds_klα...)
- C = Aklα * Aijkl
- CArray = reshape(
- reshape(array(permute(Aklα, α, k, l)), dim(α), dim(k) * dim(l)) *
- reshape(array(permute(Aijkl, k, l, i, j)), dim(k) * dim(l), dim(i) * dim(j)),
- dim(α),
- dim(i),
- dim(j),
- )
- @test CArray ≈ array(permute(C, α, i, j))
- end
- end
- @testset "Test contract in-place ITensors (4-Tensor*Matrix -> 4-Tensor)" begin
- A = random_itensor(T, (j, i))
- B = random_itensor(T, (j, k, l, α))
- C = ITensor(zero(T), (i, k, α, l))
- ITensors.contract!(C, B, A, 1.0, 0.0)
- ITensors.contract!(C, B, A, 1.0, 1.0)
- D = A * B
- D .+= A * B
- @test C ≈ D
- end
- end # End contraction testset
+ Aijkl = permute(Aijkl, inds_ijkl...)
+ Aklα = permute(Aklα, inds_klα...)
+ C = Aklα * Aijkl
+ CArray = reshape(
+ reshape(array(permute(Aklα, α, k, l)), dim(α), dim(k) * dim(l)) *
+ reshape(array(permute(Aijkl, k, l, i, j)), dim(k) * dim(l), dim(i) * dim(j)),
+ dim(α),
+ dim(i),
+ dim(j),
+ )
+ @test CArray ≈ array(permute(C, α, i, j))
+ end
+ end
+ @testset "Test contract in-place ITensors (4-Tensor*Matrix -> 4-Tensor)" begin
+ A = random_itensor(T, (j, i))
+ B = random_itensor(T, (j, k, l, α))
+ C = ITensor(zero(T), (i, k, α, l))
+ ITensors.contract!(C, B, A, 1.0, 0.0)
+ ITensors.contract!(C, B, A, 1.0, 1.0)
+ D = A * B
+ D .+= A * B
+ @test C ≈ D
+ end
+ end # End contraction testset
end
@testset "Contraction conversions" begin
- @testset "Real scalar * Complex ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- x = rand(Float64)
- A = random_itensor(ComplexF64, i, j)
- B = x * A
- for ii in dim(i), jj in dim(j)
- @test B[i => ii, j => jj] == x * A[i => ii, j => jj]
+ @testset "Real scalar * Complex ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ x = rand(Float64)
+ A = random_itensor(ComplexF64, i, j)
+ B = x * A
+ for ii in dim(i), jj in dim(j)
+ @test B[i => ii, j => jj] == x * A[i => ii, j => jj]
+ end
end
- end
- @testset "Complex scalar * Real ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- x = rand(ComplexF64)
- A = random_itensor(Float64, i, j)
- B = x * A
- for ii in dim(i), jj in dim(j)
- @test B[i => ii, j => jj] == x * A[i => ii, j => jj]
+ @testset "Complex scalar * Real ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ x = rand(ComplexF64)
+ A = random_itensor(Float64, i, j)
+ B = x * A
+ for ii in dim(i), jj in dim(j)
+ @test B[i => ii, j => jj] == x * A[i => ii, j => jj]
+ end
+ end
+ @testset "Real ITensor * Complex ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ k = Index(2, "k")
+ A = random_itensor(Float64, i, j)
+ B = random_itensor(ComplexF64, j, k)
+ C = A * B
+ @test array(permute(C, i, k)) ≈ array(A) * array(B)
+ end
+ @testset "Complex ITensor * Real ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ k = Index(2, "k")
+ A = random_itensor(ComplexF64, i, j)
+ B = random_itensor(Float64, j, k)
+ C = A * B
+ @test array(permute(C, i, k)) ≈ array(A) * array(B)
end
- end
- @testset "Real ITensor * Complex ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- k = Index(2, "k")
- A = random_itensor(Float64, i, j)
- B = random_itensor(ComplexF64, j, k)
- C = A * B
- @test array(permute(C, i, k)) ≈ array(A) * array(B)
- end
- @testset "Complex ITensor * Real ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- k = Index(2, "k")
- A = random_itensor(ComplexF64, i, j)
- B = random_itensor(Float64, j, k)
- C = A * B
- @test array(permute(C, i, k)) ≈ array(A) * array(B)
- end
- @testset "Outer Product Real ITensor * Complex ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(Float64, i)
- B = random_itensor(ComplexF64, j)
- C = A * B
- @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B)))
- end
+ @testset "Outer Product Real ITensor * Complex ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(Float64, i)
+ B = random_itensor(ComplexF64, j)
+ C = A * B
+ @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B)))
+ end
- @testset "Outer Product: Complex ITensor * Real ITensor" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(ComplexF64, i)
- B = random_itensor(Float64, j)
- C = A * B
- @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B)))
- end
+ @testset "Outer Product: Complex ITensor * Real ITensor" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(ComplexF64, i)
+ B = random_itensor(Float64, j)
+ C = A * B
+ @test array(permute(C, i, j)) ≈ kron(array(A), transpose(array(B)))
+ end
end
diff --git a/test/base/test_decomp.jl b/test/base/test_decomp.jl
index 4c1ef4fb2c..f9d7a064c2 100644
--- a/test/base/test_decomp.jl
+++ b/test/base/test_decomp.jl
@@ -5,51 +5,51 @@ using ITensors.SiteTypes: siteinds
# Decide if rank 2 tensor is upper triangular, i.e. all zeros below the diagonal.
#
function is_upper(At::NDTensors.Tensor)::Bool
- nr, nc = dims(At)
- dc = Base.max(0, dim(nr) - dim(nc)) #column off set for rectangular matrices.
- nzeros = 0
- for i in CartesianIndices(At)
- if i[1] > i[2] + dc
- if abs(At[i]) > 0.0 #row>col is lower triangle
- return false
- else
- nzeros += 1
- end
+ nr, nc = dims(At)
+ dc = Base.max(0, dim(nr) - dim(nc)) #column off set for rectangular matrices.
+ nzeros = 0
+ for i in CartesianIndices(At)
+ if i[1] > i[2] + dc
+ if abs(At[i]) > 0.0 #row>col is lower triangle
+ return false
+ else
+ nzeros += 1
+ end
+ end
end
- end
- #
- # Debug code: Make some noise if At is not a vector and we still found no zeros.
- #
- # if nzeros==0 && nr>1 && nc>1
- # @show nr nc dc At
- # end
- return true
+ #
+ # Debug code: Make some noise if At is not a vector and we still found no zeros.
+ #
+ # if nzeros==0 && nr>1 && nc>1
+ # @show nr nc dc At
+ # end
+ return true
end
#
# A must be rank 2
#
function is_upper(l::Index, A::ITensor, r::Index)::Bool
- @assert length(inds(A)) == 2
- if inds(A) != IndexSet(l, r)
- A = permute(A, l, r)
- end
- return is_upper(NDTensors.tensor(A))
+ @assert length(inds(A)) == 2
+ if inds(A) != IndexSet(l, r)
+ A = permute(A, l, r)
+ end
+ return is_upper(NDTensors.tensor(A))
end
#
# With left index specified
#
function is_upper(l::Index, A::ITensor)::Bool
- other = noncommoninds(A, l)
- if (length(other) == 1)
- return is_upper(l, A, other[1])
- else
- # use combiner to gather all the "other" indices into one.
- C = combiner(other...)
- AC = A * C
- return is_upper(l, AC, combinedind(C))
- end
+ other = noncommoninds(A, l)
+ if (length(other) == 1)
+ return is_upper(l, A, other[1])
+ else
+ # use combiner to gather all the "other" indices into one.
+ C = combiner(other...)
+ AC = A * C
+ return is_upper(l, AC, combinedind(C))
+ end
end
is_lower(l::Index, A::ITensor)::Bool = is_upper(A, l)
@@ -57,487 +57,487 @@ is_lower(l::Index, A::ITensor)::Bool = is_upper(A, l)
# With right index specified
#
function is_upper(A::ITensor, r::Index)::Bool
- other = noncommoninds(A, r)
- if (length(other) == 1)
- return is_upper(other[1], A, r)
- else
- C = combiner(other...)
- AC = A * C
- return is_upper(combinedind(C), AC, r)
- end
+ other = noncommoninds(A, r)
+ if (length(other) == 1)
+ return is_upper(other[1], A, r)
+ else
+ C = combiner(other...)
+ AC = A * C
+ return is_upper(combinedind(C), AC, r)
+ end
end
is_lower(A::ITensor, r::Index)::Bool = is_upper(r, A)
function diag_upper(l::Index, A::ITensor)
- At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...))
- if size(At) == (1,)
- return At
- end
- @assert length(size(At)) == 2
- return diag(At)
+ At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...))
+ if size(At) == (1,)
+ return At
+ end
+ @assert length(size(At)) == 2
+ return diag(At)
end
function diag_lower(l::Index, A::ITensor)
- At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2
- if size(At) == (1,)
- return At
- end
- @assert length(size(At)) == 2
- nr, nc = size(At)
- dc = Base.max(0, nc - nr) #diag starts dc+1 columns out from the left
- At1 = At[:, (dc + 1):nc] #chop out the first dc columns
- return diag(At1) #now we can use the stock diag function.
+ At = NDTensors.tensor(A * combiner(noncommoninds(A, l)...)) #render down ot order 2
+ if size(At) == (1,)
+ return At
+ end
+ @assert length(size(At)) == 2
+ nr, nc = size(At)
+ dc = Base.max(0, nc - nr) #diag starts dc+1 columns out from the left
+ At1 = At[:, (dc + 1):nc] #chop out the first dc columns
+ return diag(At1) #now we can use the stock diag function.
end
@testset "ITensor Decompositions" begin
- @testset "truncate!" begin
- a = [0.1, 0.01, 1e-13]
- @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) ==
- (1e-13, (0.01 + 1e-13) / 2)
- @test length(a) == 2
-
- # Negative definite spectrum treated by taking
- # square (if singular values) or absolute values
- a = [-0.12, -0.1]
- @test NDTensors.truncate!(a) == (0.0, 0.0)
- @test length(a) == 2
-
- a = [-0.1, -0.01, -1e-13]
- @test NDTensors.truncate!(a; use_absolute_cutoff=true, cutoff=1e-5) ==
- (1e-13, (0.01 + 1e-13) / 2)
- @test length(a) == 2
- end
-
- @testset "factorize" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, j)
- @test_throws ErrorException factorize(A, i; ortho="fakedir")
- end
-
- @testset "factorize with eigen_perturbation" begin
- l = Index(4, "l")
- s1 = Index(2, "s1")
- s2 = Index(2, "s2")
- r = Index(4, "r")
-
- phi = random_itensor(l, s1, s2, r)
-
- drho = random_itensor(l', s1', l, s1)
- drho += swapprime(drho, 0, 1)
- drho .*= 1E-5
-
- U, B = factorize(phi, (l, s1); ortho="left", eigen_perturbation=drho)
- @test norm(U * B - phi) < 1E-5
-
- # Not allowed to use eigen_perturbation with which_decomp
- # other than "automatic" or "eigen":
- @test_throws ErrorException factorize(
- phi, (l, s1); ortho="left", eigen_perturbation=drho, which_decomp="svd"
- )
- end
-
- @testset "factorize with eigen_perturbation dimensions" begin
- elt = Float64
- di = 10
- dj = 5
- maxdim = di - 1
- i = Index(di, "i")
- j = Index(dj, "j")
- a = random_itensor(elt, i, j)
- δ = random_itensor(elt, i, j)
- δ² = prime(δ, i) * dag(δ)
- a² = prime(a, i) * dag(a)
- x, y = factorize(a, i; ortho="left", which_decomp="eigen", maxdim)
- l = commonind(x, y)
- @test dim(l) == dj
- xδ, yδ = factorize(
- a, i; ortho="left", which_decomp="eigen", eigen_perturbation=δ², maxdim
- )
- lδ = commonind(xδ, yδ)
- @test dim(lδ) == maxdim
- end
-
- @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in
- [
- 0, 1, 2, 3
- ],
- elt in (Float64, ComplexF64)
-
- l = Index(5, "l")
- s = Index(2, "s")
- r = Index(5, "r")
- A = random_itensor(elt, l, s, r)
- Ainds = inds(A)
- Linds = Ainds[1:ninds]
- Rinds = uniqueinds(A, Linds...)
- Q, R, q = qr(A, Linds) #calling qr(A) triggers not supported error.
- @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
- @test length(inds(R)) == 3 - ninds + 1
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- @test q == commonind(Q, R)
- @test hastags(q, "Link,qr")
- if (length(inds(R)) > 1)
- @test is_upper(q, R) #specify the left index
+ @testset "truncate!" begin
+ a = [0.1, 0.01, 1.0e-13]
+ @test NDTensors.truncate!(a; use_absolute_cutoff = true, cutoff = 1.0e-5) ==
+ (1.0e-13, (0.01 + 1.0e-13) / 2)
+ @test length(a) == 2
+
+ # Negative definite spectrum treated by taking
+ # square (if singular values) or absolute values
+ a = [-0.12, -0.1]
+ @test NDTensors.truncate!(a) == (0.0, 0.0)
+ @test length(a) == 2
+
+ a = [-0.1, -0.01, -1.0e-13]
+ @test NDTensors.truncate!(a; use_absolute_cutoff = true, cutoff = 1.0e-5) ==
+ (1.0e-13, (0.01 + 1.0e-13) / 2)
+ @test length(a) == 2
+ end
+
+ @testset "factorize" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, j)
+ @test_throws ErrorException factorize(A, i; ortho = "fakedir")
end
- Q1, R1, q1 = qr(A, Linds, Rinds; tags="Link,myqr") #make sure the same call with both L & R indices give the same answer.
- Q1 = replaceind(Q1, q1, q)
- R1 = replaceind(R1, q1, q)
- @test norm(Q - Q1) == 0.0
- @test norm(R - R1) == 0.0
- @test hastags(q1, "Link,myqr")
-
- R, Q, q = rq(A, Linds)
- @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
- @test length(inds(Q)) == 3 - ninds + 1
- @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- @test q == commonind(Q, R)
- @test hastags(q, "rq")
- if (length(inds(R)) > 1)
- @test is_upper(R, q) #specify the right index
+
+ @testset "factorize with eigen_perturbation" begin
+ l = Index(4, "l")
+ s1 = Index(2, "s1")
+ s2 = Index(2, "s2")
+ r = Index(4, "r")
+
+ phi = random_itensor(l, s1, s2, r)
+
+ drho = random_itensor(l', s1', l, s1)
+ drho += swapprime(drho, 0, 1)
+ drho .*= 1.0e-5
+
+ U, B = factorize(phi, (l, s1); ortho = "left", eigen_perturbation = drho)
+ @test norm(U * B - phi) < 1.0e-5
+
+ # Not allowed to use eigen_perturbation with which_decomp
+ # other than "automatic" or "eigen":
+ @test_throws ErrorException factorize(
+ phi, (l, s1); ortho = "left", eigen_perturbation = drho, which_decomp = "svd"
+ )
end
- R1, Q1, q1 = rq(A, Linds, Rinds; tags="Link,myrq") #make sure the same call with both L & R indices give the same answer.
- Q1 = replaceind(Q1, q1, q)
- R1 = replaceind(R1, q1, q)
- @test norm(Q - Q1) == 0.0
- @test norm(R - R1) == 0.0
- @test hastags(q1, "myrq")
- @test hastags(q1, "Link")
-
- L, Q, q = lq(A, Linds)
- @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index.
- @test length(inds(Q)) == 3 - ninds + 1
- @test A ≈ Q * L atol = 1e-13 #With ITensors L*Q==Q*L
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- @test q == commonind(Q, L)
- @test hastags(q, "lq")
- if (length(inds(L)) > 1)
- @test is_lower(L, q) #specify the right index
+
+ @testset "factorize with eigen_perturbation dimensions" begin
+ elt = Float64
+ di = 10
+ dj = 5
+ maxdim = di - 1
+ i = Index(di, "i")
+ j = Index(dj, "j")
+ a = random_itensor(elt, i, j)
+ δ = random_itensor(elt, i, j)
+ δ² = prime(δ, i) * dag(δ)
+ a² = prime(a, i) * dag(a)
+ x, y = factorize(a, i; ortho = "left", which_decomp = "eigen", maxdim)
+ l = commonind(x, y)
+ @test dim(l) == dj
+ xδ, yδ = factorize(
+ a, i; ortho = "left", which_decomp = "eigen", eigen_perturbation = δ², maxdim
+ )
+ lδ = commonind(xδ, yδ)
+ @test dim(lδ) == maxdim
end
- L1, Q1, q1 = lq(A, Linds, Rinds; tags="Link,mylq") #make sure the same call with both L & R indices give the same answer.
- Q1 = replaceind(Q1, q1, q)
- L1 = replaceind(L1, q1, q)
- @test norm(Q - Q1) == 0.0
- @test norm(L - L1) == 0.0
- @test hastags(q1, "mylq")
- @test hastags(q1, "Link")
-
- Q, L, q = ql(A, Linds)
- @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index.
- @test length(inds(L)) == 3 - ninds + 1
- @test A ≈ Q * L atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- @test q == commonind(Q, L)
- @test hastags(q, "ql")
- if (length(inds(L)) > 1)
- @test is_lower(q, L) #specify the right index
+
+ @testset "QR/RQ/QL/LQ decomp on MPS dense $elt tensor with all possible collections on Q/R/L" for ninds in
+ [
+ 0, 1, 2, 3,
+ ],
+ elt in (Float64, ComplexF64)
+
+ l = Index(5, "l")
+ s = Index(2, "s")
+ r = Index(5, "r")
+ A = random_itensor(elt, l, s, r)
+ Ainds = inds(A)
+ Linds = Ainds[1:ninds]
+ Rinds = uniqueinds(A, Linds...)
+ Q, R, q = qr(A, Linds) #calling qr(A) triggers not supported error.
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
+ @test length(inds(R)) == 3 - ninds + 1
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ @test q == commonind(Q, R)
+ @test hastags(q, "Link,qr")
+ if (length(inds(R)) > 1)
+ @test is_upper(q, R) #specify the left index
+ end
+ Q1, R1, q1 = qr(A, Linds, Rinds; tags = "Link,myqr") #make sure the same call with both L & R indices give the same answer.
+ Q1 = replaceind(Q1, q1, q)
+ R1 = replaceind(R1, q1, q)
+ @test norm(Q - Q1) == 0.0
+ @test norm(R - R1) == 0.0
+ @test hastags(q1, "Link,myqr")
+
+ R, Q, q = rq(A, Linds)
+ @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
+ @test length(inds(Q)) == 3 - ninds + 1
+ @test A ≈ Q * R atol = 1.0e-13 #With ITensors R*Q==Q*R
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ @test q == commonind(Q, R)
+ @test hastags(q, "rq")
+ if (length(inds(R)) > 1)
+ @test is_upper(R, q) #specify the right index
+ end
+ R1, Q1, q1 = rq(A, Linds, Rinds; tags = "Link,myrq") #make sure the same call with both L & R indices give the same answer.
+ Q1 = replaceind(Q1, q1, q)
+ R1 = replaceind(R1, q1, q)
+ @test norm(Q - Q1) == 0.0
+ @test norm(R - R1) == 0.0
+ @test hastags(q1, "myrq")
+ @test hastags(q1, "Link")
+
+ L, Q, q = lq(A, Linds)
+ @test length(inds(L)) == ninds + 1 #+1 to account for new lq,Link index.
+ @test length(inds(Q)) == 3 - ninds + 1
+ @test A ≈ Q * L atol = 1.0e-13 #With ITensors L*Q==Q*L
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ @test q == commonind(Q, L)
+ @test hastags(q, "lq")
+ if (length(inds(L)) > 1)
+ @test is_lower(L, q) #specify the right index
+ end
+ L1, Q1, q1 = lq(A, Linds, Rinds; tags = "Link,mylq") #make sure the same call with both L & R indices give the same answer.
+ Q1 = replaceind(Q1, q1, q)
+ L1 = replaceind(L1, q1, q)
+ @test norm(Q - Q1) == 0.0
+ @test norm(L - L1) == 0.0
+ @test hastags(q1, "mylq")
+ @test hastags(q1, "Link")
+
+ Q, L, q = ql(A, Linds)
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new lq,Link index.
+ @test length(inds(L)) == 3 - ninds + 1
+ @test A ≈ Q * L atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ @test q == commonind(Q, L)
+ @test hastags(q, "ql")
+ if (length(inds(L)) > 1)
+ @test is_lower(q, L) #specify the right index
+ end
+ Q1, L1, q1 = ql(A, Linds, Rinds; tags = "Link,myql") #make sure the same call with both L & R indices give the same answer.
+ Q1 = replaceind(Q1, q1, q)
+ L1 = replaceind(L1, q1, q)
+ @test norm(Q - Q1) == 0.0
+ @test norm(L - L1) == 0.0
+ @test hastags(q1, "myql")
+ @test hastags(q1, "Link")
end
- Q1, L1, q1 = ql(A, Linds, Rinds; tags="Link,myql") #make sure the same call with both L & R indices give the same answer.
- Q1 = replaceind(Q1, q1, q)
- L1 = replaceind(L1, q1, q)
- @test norm(Q - Q1) == 0.0
- @test norm(L - L1) == 0.0
- @test hastags(q1, "myql")
- @test hastags(q1, "Link")
- end
-
- @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [
- 0, 1, 2, 3, 4
- ]
- l = Index(5, "l")
- s = Index(2, "s")
- r = Index(10, "r")
- A = random_itensor(l, s, s', r)
- Ainds = inds(A)
- Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
- @test length(inds(R)) == 4 - ninds + 1
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
-
- R, Q, q = rq(A, Ainds[1:ninds])
- @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
- @test length(inds(Q)) == 4 - ninds + 1
- @test A ≈ Q * R atol = 1e-13 #With ITensors R*Q==Q*R
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- end
-
- @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in
- [
- 0, 1, 2, 3
- ]
- expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)]
- expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -0), QN()]
- l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="l"))
- s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s")
- r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags="r")
- A = random_itensor(l, s, r)
- @test flux(A) == QN("Sz", 0)
- Ainds = inds(A)
- Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
- @test length(inds(R)) == 3 - ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(R) == expected_RLflux[ninds + 1]
- @test A ≈ Q * R atol = 1e-13
- # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense.
- # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead.
- # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
-
- Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index.
- @test length(inds(Q)) == ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(L) == expected_RLflux[ninds + 1]
- @test A ≈ Q * L atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
-
- R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
- @test length(inds(Q)) == 3 - ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(R) == expected_RLflux[ninds + 1]
- @test A ≈ Q * R atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
-
- L, Q, q = lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index.
- @test length(inds(Q)) == 3 - ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(L) == expected_RLflux[ninds + 1]
- @test A ≈ Q * L atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
- end
-
- @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in
- [
- 0, 1, 2, 3, 4
- ]
- expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)]
- expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()]
- l = dag(Index(QN("Sz", 0) => 3; tags="l"))
- s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s")
- r = Index(QN("Sz", 0) => 3; tags="r")
- A = random_itensor(l, s, dag(s'), r)
- @test flux(A) == QN("Sz", 0)
- Ainds = inds(A)
- Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
- @test length(inds(R)) == 4 - ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(R) == expected_RLflux[ninds + 1]
- @test A ≈ Q * R atol = 1e-13
- # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense.
- # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead.
- # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
-
- Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
- @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
- @test length(inds(L)) == 4 - ninds + 1
- @test flux(Q) == expected_Qflux[ninds + 1]
- @test flux(L) == expected_RLflux[ninds + 1]
- @test A ≈ Q * L atol = 1e-13
- @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1e-13
- end
-
- @testset "QR/QL/RQ/LQ dense with positive R" for ninds in [0, 1, 2, 3]
- l = Index(3, "l")
- s = Index(5, "s")
- r = Index(7, "r")
- A = random_itensor(l, s, s', r)
- Ainds = inds(A)
-
- Q, R, q = qr(A, Ainds[1:ninds]; positive=true)
- @test min(diag_upper(q, R)...) > 0.0
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- Q, L, q = ql(A, Ainds[1:ninds]; positive=true)
- @test min(diag_lower(q, L)...) > 0.0
- @test A ≈ Q * L atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
-
- R, Q, q = rq(A, Ainds[1:ninds]; positive=true)
- @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- L, Q, q = lq(A, Ainds[1:ninds]; positive=true)
- @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper
- @test A ≈ Q * L atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- end
-
- @testset "QR/QL block sparse with positive R" begin
- l = dag(Index(QN("Sz", 0) => 3; tags="l"))
- s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="s")
- r = Index(QN("Sz", 0) => 3; tags="r")
- A = random_itensor(l, s, dag(s'), r)
- Q, R, q = qr(A, l, s, dag(s'); positive=true)
- @test min(diag(R)...) > 0.0
- @test A ≈ Q * R atol = 1e-13
- Q, L, q = ql(A, l, s, dag(s'); positive=true)
- @test min(diag(L)...) > 0.0
- @test A ≈ Q * L atol = 1e-13
- end
-
- @testset "factorize with QR" begin
- l = Index(5, "l")
- s = Index(2, "s")
- r = Index(10, "r")
- A = random_itensor(l, s, r)
- Q, R, = factorize(A, l, s; which_decomp="qr")
- q = commonind(Q, R)
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
-
- R, Q, = factorize(A, l, s; which_decomp="qr", ortho="right")
- q = commonind(Q, R)
- @test A ≈ Q * R atol = 1e-13
- @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1e-13
- end
-
- @testset "eigen" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, i')
- eigA = eigen(A)
- Dt, Ut = eigen(NDTensors.tensor(A))
- eigArr = eigen(array(A))
- @test diag(array(eigA.D), 0) ≈ eigArr.values
- @test diag(array(Dt), 0) == eigArr.values
-
- @test_throws ArgumentError eigen(ITensor(NaN, i', i))
- @test_throws ArgumentError eigen(ITensor(NaN, i', i); ishermitian=true)
- @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i))
- @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i); ishermitian=true)
- @test_throws ArgumentError eigen(ITensor(Inf, i', i))
- @test_throws ArgumentError eigen(ITensor(Inf, i', i); ishermitian=true)
- @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i))
- @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i); ishermitian=true)
- end
-
- @testset "exp function" begin
- At = rand(10, 10)
- k = Index(10, "k")
- A = itensor(At + transpose(At), k, k')
- @test Array(exp(Hermitian(NDTensors.tensor(A)))) ≈ exp(At + transpose(At))
- end
-
- @testset "Spectrum" begin
- i = Index(100, "i")
- j = Index(100, "j")
-
- U, S, V = svd(rand(100, 100))
- S ./= norm(S)
- A = itensor(U * ITensors.diagm(0 => S) * V', i, j)
-
- spec = svd(A, i).spec
-
- @test eigs(spec) ≈ S .^ 2
- @test truncerror(spec) == 0.0
-
- spec = svd(A, i; maxdim=length(S) - 3).spec
- @test truncerror(spec) ≈ sum(S[(end - 2):end] .^ 2)
-
- @test entropy(Spectrum([0.5; 0.5], 0.0)) == log(2)
- @test entropy(Spectrum([1.0], 0.0)) == 0.0
- @test entropy(Spectrum([0.0], 0.0)) == 0.0
-
- @test isnothing(eigs(Spectrum(nothing, 1.0)))
- @test_throws ErrorException entropy(Spectrum(nothing, 1.0))
- @test truncerror(Spectrum(nothing, 1.0)) == 1.0
- end
-
- @testset "Eigen QN flux regression test" begin
- cutoff = 1E-12
- N = 4
- s = siteinds("S=1", N; conserve_qns=true)
- A = random_itensor(QN("Sz", 2), s[1], s[2], s[3])
-
- R = A * dag(prime(A, s[1], s[2]))
- F = eigen(R, (s[1], s[2]), (s[1]', s[2]'))
-
- @test flux(F.Vt) == QN("Sz", 0)
- end
-
- @testset "SVD block_mindim keyword" begin
- i = Index(
- [
- QN("Sz", 4) => 1,
- QN("Sz", 2) => 4,
- QN("Sz", 0) => 6,
- QN("Sz", -2) => 4,
- QN("Sz", -4) => 1,
- ],
- "i",
- )
- j = sim(i)
- X = random_itensor(QN("Sz", 0), i, j)
-
- min_blockdim = 2
- U, S, V = svd(X, i; cutoff=1E-1, min_blockdim)
- u = commonind(S, U)
-
- @test nblocks(u) == nblocks(i)
- for b in 1:nblocks(u)
- @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim
+
+ @testset "QR/RQ dense on MP0 tensor with all possible collections on Q,R" for ninds in [
+ 0, 1, 2, 3, 4,
+ ]
+ l = Index(5, "l")
+ s = Index(2, "s")
+ r = Index(10, "r")
+ A = random_itensor(l, s, s', r)
+ Ainds = inds(A)
+ Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
+ @test length(inds(R)) == 4 - ninds + 1
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+
+ R, Q, q = rq(A, Ainds[1:ninds])
+ @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
+ @test length(inds(Q)) == 4 - ninds + 1
+ @test A ≈ Q * R atol = 1.0e-13 #With ITensors R*Q==Q*R
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
end
- end
- @testset "factorize with mindim" begin
- l = Index(8, "l")
- s1 = Index(2, "s1")
- s2 = Index(2, "s2")
- r = Index(2, "r")
+ @testset "QR/RQ block sparse on MPS tensor with all possible collections on Q,R" for ninds in
+ [
+ 0, 1, 2, 3,
+ ]
+ expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)]
+ expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", -0), QN()]
+ l = dag(Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags = "l"))
+ s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "s")
+ r = Index(QN("Sz", 0) => 1, QN("Sz", 1) => 1, QN("Sz", -1) => 1; tags = "r")
+ A = random_itensor(l, s, r)
+ @test flux(A) == QN("Sz", 0)
+ Ainds = inds(A)
+ Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
+ @test length(inds(R)) == 3 - ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(R) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * R atol = 1.0e-13
+ # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense.
+ # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead.
+ # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+
+ Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(L)) == 3 - ninds + 1 #+1 to account for new rq,Link index.
+ @test length(inds(Q)) == ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(L) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * L atol = 1.0e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+
+ R, Q, q = rq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(R)) == ninds + 1 #+1 to account for new rq,Link index.
+ @test length(inds(Q)) == 3 - ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(R) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * R atol = 1.0e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+
+ L, Q, q = lq(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(L)) == ninds + 1 #+1 to account for new rq,Link index.
+ @test length(inds(Q)) == 3 - ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(L) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * L atol = 1.0e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+ end
- phi = random_itensor(l, s1, s2, r)
+ @testset "QR/QL block sparse on MPO tensor with all possible collections on Q,R" for ninds in
+ [
+ 0, 1, 2, 3, 4,
+ ]
+ expected_Qflux = [QN(), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0)]
+ expected_RLflux = [QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN("Sz", 0), QN()]
+ l = dag(Index(QN("Sz", 0) => 3; tags = "l"))
+ s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "s")
+ r = Index(QN("Sz", 0) => 3; tags = "r")
+ A = random_itensor(l, s, dag(s'), r)
+ @test flux(A) == QN("Sz", 0)
+ Ainds = inds(A)
+ Q, R, q = qr(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
+ @test length(inds(R)) == 4 - ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(R) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * R atol = 1.0e-13
+ # blocksparse - diag is not supported so we must convert Q*Q_dagger to dense.
+ # Also fails with error in permutedims so below we use norm(a-b)≈ 0.0 instead.
+ # @test dense(Q*dag(prime(Q, q))) ≈ δ(Float64, q, q') atol = 1e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+
+ Q, L, q = ql(A, Ainds[1:ninds]) #calling qr(A) triggers not supported error.
+ @test length(inds(Q)) == ninds + 1 #+1 to account for new qr,Link index.
+ @test length(inds(L)) == 4 - ninds + 1
+ @test flux(Q) == expected_Qflux[ninds + 1]
+ @test flux(L) == expected_RLflux[ninds + 1]
+ @test A ≈ Q * L atol = 1.0e-13
+ @test norm(dense(Q * dag(prime(Q, q))) - δ(Float64, q, q')) ≈ 0.0 atol = 1.0e-13
+ end
- U, B = factorize(phi, (l, s1); ortho="left", mindim=8, which_decomp="eigen")
+ @testset "QR/QL/RQ/LQ dense with positive R" for ninds in [0, 1, 2, 3]
+ l = Index(3, "l")
+ s = Index(5, "s")
+ r = Index(7, "r")
+ A = random_itensor(l, s, s', r)
+ Ainds = inds(A)
+
+ Q, R, q = qr(A, Ainds[1:ninds]; positive = true)
+ @test min(diag_upper(q, R)...) > 0.0
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ Q, L, q = ql(A, Ainds[1:ninds]; positive = true)
+ @test min(diag_lower(q, L)...) > 0.0
+ @test A ≈ Q * L atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+
+ R, Q, q = rq(A, Ainds[1:ninds]; positive = true)
+ @test min(diag_lower(q, R)...) > 0.0 #transpose R is lower
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ L, Q, q = lq(A, Ainds[1:ninds]; positive = true)
+ @test min(diag_upper(q, L)...) > 0.0 #transpose L is upper
+ @test A ≈ Q * L atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ end
- @test norm(U * B - phi) < 1E-5
- @test dim(commonind(U, B)) <= 4
- end
+ @testset "QR/QL block sparse with positive R" begin
+ l = dag(Index(QN("Sz", 0) => 3; tags = "l"))
+ s = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "s")
+ r = Index(QN("Sz", 0) => 3; tags = "r")
+ A = random_itensor(l, s, dag(s'), r)
+ Q, R, q = qr(A, l, s, dag(s'); positive = true)
+ @test min(diag(R)...) > 0.0
+ @test A ≈ Q * R atol = 1.0e-13
+ Q, L, q = ql(A, l, s, dag(s'); positive = true)
+ @test min(diag(L)...) > 0.0
+ @test A ≈ Q * L atol = 1.0e-13
+ end
- @testset "Eigen of Fermionic Matrices" begin
- ITensors.enable_auto_fermion()
- s = Index([QN("Nf", 0, -1)=>2, QN("Nf", 1, -1)=>2], "s,Site,Fermion")
- t = Index([QN("Nf", 0, -1)=>2, QN("Nf", 1, -1)=>2], "t,Site,Fermion")
+ @testset "factorize with QR" begin
+ l = Index(5, "l")
+ s = Index(2, "s")
+ r = Index(10, "r")
+ A = random_itensor(l, s, r)
+ Q, R, = factorize(A, l, s; which_decomp = "qr")
+ q = commonind(Q, R)
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+
+ R, Q, = factorize(A, l, s; which_decomp = "qr", ortho = "right")
+ q = commonind(Q, R)
+ @test A ≈ Q * R atol = 1.0e-13
+ @test Q * dag(prime(Q, q)) ≈ δ(Float64, q, q') atol = 1.0e-13
+ end
- #
- # HPSD Operator (Out,In) case
- #
- M = random_itensor(s, dag(t))
- O = prime(M, s)*dag(M)
+ @testset "eigen" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, i')
+ eigA = eigen(A)
+ Dt, Ut = eigen(NDTensors.tensor(A))
+ eigArr = eigen(array(A))
+ @test diag(array(eigA.D), 0) ≈ eigArr.values
+ @test diag(array(Dt), 0) == eigArr.values
+
+ @test_throws ArgumentError eigen(ITensor(NaN, i', i))
+ @test_throws ArgumentError eigen(ITensor(NaN, i', i); ishermitian = true)
+ @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i))
+ @test_throws ArgumentError eigen(ITensor(complex(NaN), i', i); ishermitian = true)
+ @test_throws ArgumentError eigen(ITensor(Inf, i', i))
+ @test_throws ArgumentError eigen(ITensor(Inf, i', i); ishermitian = true)
+ @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i))
+ @test_throws ArgumentError eigen(ITensor(complex(Inf), i', i); ishermitian = true)
+ end
- @test dir(inds(O)[1]) == ITensors.Out
- @test dir(inds(O)[2]) == ITensors.In
- linds = [s']
- rinds = [dag(s)]
- D_O, U = eigen(O, linds, rinds; ishermitian=true)
- @test norm(prime(U)*D_O*dag(U)-O) < 1E-10
- @test all(>=(0.0), diag(array(D_O)))
+ @testset "exp function" begin
+ At = rand(10, 10)
+ k = Index(10, "k")
+ A = itensor(At + transpose(At), k, k')
+ @test Array(exp(Hermitian(NDTensors.tensor(A)))) ≈ exp(At + transpose(At))
+ end
- #
- # HPSD Dual operator (In,Out) case
- #
- # Make ρ out of two squared states
- # to populate both blocks: (0,0) and (1,1)
- ψ0 = random_itensor(t, s)
- ρ0 = prime(dag(ψ0), s)*ψ0
-
- ψ2 = random_itensor(QN("Nf", 2, -1), t, s)
- ρ2 = prime(dag(ψ2), s)*ψ2
-
- ρ = ρ0/2 + ρ2/2
- @test dir(inds(ρ)[1]) == ITensors.In
- @test dir(inds(ρ)[2]) == ITensors.Out
-
- D_ρ, U = eigen(ρ, [dag(s)'], [s]; ishermitian=true)
- @test all(>=(0.0), diag(array(D_ρ)))
- @test norm(prime(U)*D_ρ*dag(U)-ρ) < 1E-10
- ITensors.disable_auto_fermion()
- end
+ @testset "Spectrum" begin
+ i = Index(100, "i")
+ j = Index(100, "j")
+
+ U, S, V = svd(rand(100, 100))
+ S ./= norm(S)
+ A = itensor(U * ITensors.diagm(0 => S) * V', i, j)
+
+ spec = svd(A, i).spec
+
+ @test eigs(spec) ≈ S .^ 2
+ @test truncerror(spec) == 0.0
+
+ spec = svd(A, i; maxdim = length(S) - 3).spec
+ @test truncerror(spec) ≈ sum(S[(end - 2):end] .^ 2)
+
+ @test entropy(Spectrum([0.5; 0.5], 0.0)) == log(2)
+ @test entropy(Spectrum([1.0], 0.0)) == 0.0
+ @test entropy(Spectrum([0.0], 0.0)) == 0.0
+
+ @test isnothing(eigs(Spectrum(nothing, 1.0)))
+ @test_throws ErrorException entropy(Spectrum(nothing, 1.0))
+ @test truncerror(Spectrum(nothing, 1.0)) == 1.0
+ end
+
+ @testset "Eigen QN flux regression test" begin
+ cutoff = 1.0e-12
+ N = 4
+ s = siteinds("S=1", N; conserve_qns = true)
+ A = random_itensor(QN("Sz", 2), s[1], s[2], s[3])
+
+ R = A * dag(prime(A, s[1], s[2]))
+ F = eigen(R, (s[1], s[2]), (s[1]', s[2]'))
+
+ @test flux(F.Vt) == QN("Sz", 0)
+ end
+
+ @testset "SVD block_mindim keyword" begin
+ i = Index(
+ [
+ QN("Sz", 4) => 1,
+ QN("Sz", 2) => 4,
+ QN("Sz", 0) => 6,
+ QN("Sz", -2) => 4,
+ QN("Sz", -4) => 1,
+ ],
+ "i",
+ )
+ j = sim(i)
+ X = random_itensor(QN("Sz", 0), i, j)
+
+ min_blockdim = 2
+ U, S, V = svd(X, i; cutoff = 1.0e-1, min_blockdim)
+ u = commonind(S, U)
+
+ @test nblocks(u) == nblocks(i)
+ for b in 1:nblocks(u)
+ @test blockdim(u, b) == blockdim(i, b) || blockdim(u, b) >= min_blockdim
+ end
+ end
+
+ @testset "factorize with mindim" begin
+ l = Index(8, "l")
+ s1 = Index(2, "s1")
+ s2 = Index(2, "s2")
+ r = Index(2, "r")
+
+ phi = random_itensor(l, s1, s2, r)
+
+ U, B = factorize(phi, (l, s1); ortho = "left", mindim = 8, which_decomp = "eigen")
+
+ @test norm(U * B - phi) < 1.0e-5
+ @test dim(commonind(U, B)) <= 4
+ end
+
+ @testset "Eigen of Fermionic Matrices" begin
+ ITensors.enable_auto_fermion()
+ s = Index([QN("Nf", 0, -1) => 2, QN("Nf", 1, -1) => 2], "s,Site,Fermion")
+ t = Index([QN("Nf", 0, -1) => 2, QN("Nf", 1, -1) => 2], "t,Site,Fermion")
+
+ #
+ # HPSD Operator (Out,In) case
+ #
+ M = random_itensor(s, dag(t))
+ O = prime(M, s) * dag(M)
+
+ @test dir(inds(O)[1]) == ITensors.Out
+ @test dir(inds(O)[2]) == ITensors.In
+ linds = [s']
+ rinds = [dag(s)]
+ D_O, U = eigen(O, linds, rinds; ishermitian = true)
+ @test norm(prime(U) * D_O * dag(U) - O) < 1.0e-10
+ @test all(>=(0.0), diag(array(D_O)))
+
+ #
+ # HPSD Dual operator (In,Out) case
+ #
+ # Make ρ out of two squared states
+ # to populate both blocks: (0,0) and (1,1)
+ ψ0 = random_itensor(t, s)
+ ρ0 = prime(dag(ψ0), s) * ψ0
+
+ ψ2 = random_itensor(QN("Nf", 2, -1), t, s)
+ ρ2 = prime(dag(ψ2), s) * ψ2
+
+ ρ = ρ0 / 2 + ρ2 / 2
+ @test dir(inds(ρ)[1]) == ITensors.In
+ @test dir(inds(ρ)[2]) == ITensors.Out
+
+ D_ρ, U = eigen(ρ, [dag(s)'], [s]; ishermitian = true)
+ @test all(>=(0.0), diag(array(D_ρ)))
+ @test norm(prime(U) * D_ρ * dag(U) - ρ) < 1.0e-10
+ ITensors.disable_auto_fermion()
+ end
end
diff --git a/test/base/test_diagitensor.jl b/test/base/test_diagitensor.jl
index 92c031fa23..e5f9deb947 100644
--- a/test/base/test_diagitensor.jl
+++ b/test/base/test_diagitensor.jl
@@ -4,559 +4,559 @@ using LinearAlgebra
using Test
@testset "diag_itensor" begin
- d = 3
- i = Index(d, "i")
- j = Index(d, "j")
- k = Index(d, "k")
- l = Index(d, "l")
- m = Index(d, "m")
- n = Index(d, "n")
- o = Index(d, "o")
- p = Index(d, "p")
- q = Index(d, "q")
-
- v = collect(1:d)
- vr = randn(d)
-
- @testset "non-uniform diagonal values" begin
- @testset "diag_itensor constructor (no vector, order 2)" begin
- D = diag_itensor(i, j)
-
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d
- if ii == jj
- @test D[i => ii, j => jj] == 0.0
- else
- @test D[i => ii, j => jj] == 0.0
- end
- end
- end
-
- @testset "diag_itensor constructor (no vector, order 3)" begin
- D = diag_itensor(i, j, k)
-
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test D[i => ii, j => jj, k => kk] == 0.0
- else
- @test D[i => ii, j => jj, k => kk] == 0.0
+ d = 3
+ i = Index(d, "i")
+ j = Index(d, "j")
+ k = Index(d, "k")
+ l = Index(d, "l")
+ m = Index(d, "m")
+ n = Index(d, "n")
+ o = Index(d, "o")
+ p = Index(d, "p")
+ q = Index(d, "q")
+
+ v = collect(1:d)
+ vr = randn(d)
+
+ @testset "non-uniform diagonal values" begin
+ @testset "diag_itensor constructor (no vector, order 2)" begin
+ D = diag_itensor(i, j)
+
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d
+ if ii == jj
+ @test D[i => ii, j => jj] == 0.0
+ else
+ @test D[i => ii, j => jj] == 0.0
+ end
+ end
end
- end
- end
-
- @testset "diag_itensor constructor (no vector, complex)" begin
- D = diag_itensor(ComplexF64, i, j)
- @test eltype(D) == ComplexF64
- for ii in 1:d, jj in 1:d
- if ii == jj
- @test D[i => ii, j => jj] == complex(0.0)
- else
- @test D[i => ii, j => jj] == complex(0.0)
+ @testset "diag_itensor constructor (no vector, order 3)" begin
+ D = diag_itensor(i, j, k)
+
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test D[i => ii, j => jj, k => kk] == 0.0
+ else
+ @test D[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
end
- end
- end
- @testset "diag" for ElType in (Float64, ComplexF64)
- A = diag_itensor(randn(ElType, d), i, j)
- dA = diag(A)
- @test dA isa DenseTensor{ElType,1}
- @test dA[1] == A[1, 1]
- @test dA[2] == A[2, 2]
- end
-
- @testset "diag_itensor constructor (vector, order 2)" begin
- D = diag_itensor(v, i, j)
-
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d
- if ii == jj
- @test D[i => ii, j => jj] == v[ii]
- else
- @test D[i => ii, j => jj] == 0.0
+ @testset "diag_itensor constructor (no vector, complex)" begin
+ D = diag_itensor(ComplexF64, i, j)
+
+ @test eltype(D) == ComplexF64
+ for ii in 1:d, jj in 1:d
+ if ii == jj
+ @test D[i => ii, j => jj] == complex(0.0)
+ else
+ @test D[i => ii, j => jj] == complex(0.0)
+ end
+ end
end
- end
- end
-
- @testset "diag_itensor constructor (vector, order 3)" begin
- D = diag_itensor(v, i, j, k)
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test D[i => ii, j => jj, k => kk] == v[ii]
- else
- @test D[i => ii, j => jj, k => kk] == 0.0
+ @testset "diag" for ElType in (Float64, ComplexF64)
+ A = diag_itensor(randn(ElType, d), i, j)
+ dA = diag(A)
+ @test dA isa DenseTensor{ElType, 1}
+ @test dA[1] == A[1, 1]
+ @test dA[2] == A[2, 2]
end
- end
- end
-
- @testset "diag_itensor constructor (complex)" begin
- vc = v + im * v
- D = diag_itensor(vc, i, j, k)
- @test eltype(D) == ComplexF64
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test D[i => ii, j => jj, k => kk] == vc[ii]
- else
- @test D[i => ii, j => jj, k => kk] == complex(0.0)
+ @testset "diag_itensor constructor (vector, order 2)" begin
+ D = diag_itensor(v, i, j)
+
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d
+ if ii == jj
+ @test D[i => ii, j => jj] == v[ii]
+ else
+ @test D[i => ii, j => jj] == 0.0
+ end
+ end
end
- end
- end
- @testset "reductions (sum, prod)" for elt in (
- Float32, Float64, Complex{Float32}, Complex{Float64}
- )
- a = diag_itensor(randn(elt, 2), Index(2), Index(2))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
-
- a = diag_itensor(randn(elt, 1), Index(1), Index(1))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
- end
-
- @testset "Complex operations" begin
- xr = randn(d)
- xi = randn(d)
- D = diag_itensor(xr + im * xi, i, j, k)
- @test eltype(D) == ComplexF64
- rD = real(D)
- iD = imag(D)
- @test eltype(rD) == Float64
- @test eltype(iD) == Float64
- @test typeof(storage(rD)) <: NDTensors.Diag
- @test norm(rD + im * iD - D) < 1E-8
- end
-
- @testset "Constructor AllowAlias/NeverAlias" begin
- vv = ones(d)
- D = diag_itensor(vv, i, j)
- @test eltype(D) === Float64
- D[1, 1] = 5.0
- @test vv[1] == 1.0
- @test vv[1] != D[1, 1]
-
- vv = ones(Int, d)
- D = diag_itensor(vv, i, j)
- @test eltype(D) === Float64
- D[1, 1] = 5.0
- @test vv[1] == 1.0
- @test vv[1] != D[1, 1]
-
- vv = ones(Int, d)
- D = diag_itensor(Int, vv, i, j)
- @test eltype(D) === Int
- D[1, 1] = 5
- @test vv[1] == 1
- @test vv[1] != D[1, 1]
-
- vv = ones(d)
- D = diagitensor(vv, i, j)
- @test eltype(D) === Float64
- D[1, 1] = 5.0
- @test vv[1] == 5.0
- @test vv[1] == D[1, 1]
-
- vv = ones(Int, d)
- D = diagitensor(vv, i, j)
- @test eltype(D) === Float64
- D[1, 1] = 5.0
- @test vv[1] == 1.0
- @test vv[1] != D[1, 1]
-
- vv = ones(Int, d)
- D = diagitensor(Int, vv, i, j)
- @test eltype(D) === Int
- D[1, 1] = 5
- @test vv[1] == 5
- @test vv[1] == D[1, 1]
-
- D = diag_itensor(1, i, j)
- @test eltype(D) === Float64
- D[1, 1] = 5
- @test D[1, 1] == 5
-
- D = diag_itensor(Int, 1, i, j)
- @test eltype(D) === Int
- D[1, 1] = 5
- @test D[1, 1] == 5
- end
-
- @testset "fill!" begin
- D = diag_itensor(ones(d), i, j, k)
- D = fill!(D, 2.0)
- for ii in 1:d
- @test D[i => ii, j => ii, k => ii] == 2.0
- end
-
- @test eltype(D) == Float64
- end
+ @testset "diag_itensor constructor (vector, order 3)" begin
+ D = diag_itensor(v, i, j, k)
+
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test D[i => ii, j => jj, k => kk] == v[ii]
+ else
+ @test D[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
+ end
- @testset "Set elements" begin
- D = diag_itensor(i, j, k)
+ @testset "diag_itensor constructor (complex)" begin
+ vc = v + im * v
+ D = diag_itensor(vc, i, j, k)
+
+ @test eltype(D) == ComplexF64
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test D[i => ii, j => jj, k => kk] == vc[ii]
+ else
+ @test D[i => ii, j => jj, k => kk] == complex(0.0)
+ end
+ end
+ end
- for ii in 1:d
- D[i => ii, j => ii, k => ii] = ii
- end
+ @testset "reductions (sum, prod)" for elt in (
+ Float32, Float64, Complex{Float32}, Complex{Float64},
+ )
+ a = diag_itensor(randn(elt, 2), Index(2), Index(2))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+
+ a = diag_itensor(randn(elt, 1), Index(1), Index(1))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+ end
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test D[i => ii, j => jj, k => kk] == ii
- else
- @test D[i => ii, j => jj, k => kk] == 0.0
+ @testset "Complex operations" begin
+ xr = randn(d)
+ xi = randn(d)
+ D = diag_itensor(xr + im * xi, i, j, k)
+ @test eltype(D) == ComplexF64
+ rD = real(D)
+ iD = imag(D)
+ @test eltype(rD) == Float64
+ @test eltype(iD) == Float64
+ @test typeof(storage(rD)) <: NDTensors.Diag
+ @test norm(rD + im * iD - D) < 1.0e-8
end
- end
- # Can't set off-diagonal elements
- @test_throws ErrorException D[i => 2, j => 1, k => 1] = 0.0
- @test_throws ErrorException D[i => 1, j => 2, k => 1] = 0.0
- end
+ @testset "Constructor AllowAlias/NeverAlias" begin
+ vv = ones(d)
+ D = diag_itensor(vv, i, j)
+ @test eltype(D) === Float64
+ D[1, 1] = 5.0
+ @test vv[1] == 1.0
+ @test vv[1] != D[1, 1]
+
+ vv = ones(Int, d)
+ D = diag_itensor(vv, i, j)
+ @test eltype(D) === Float64
+ D[1, 1] = 5.0
+ @test vv[1] == 1.0
+ @test vv[1] != D[1, 1]
+
+ vv = ones(Int, d)
+ D = diag_itensor(Int, vv, i, j)
+ @test eltype(D) === Int
+ D[1, 1] = 5
+ @test vv[1] == 1
+ @test vv[1] != D[1, 1]
+
+ vv = ones(d)
+ D = diagitensor(vv, i, j)
+ @test eltype(D) === Float64
+ D[1, 1] = 5.0
+ @test vv[1] == 5.0
+ @test vv[1] == D[1, 1]
+
+ vv = ones(Int, d)
+ D = diagitensor(vv, i, j)
+ @test eltype(D) === Float64
+ D[1, 1] = 5.0
+ @test vv[1] == 1.0
+ @test vv[1] != D[1, 1]
+
+ vv = ones(Int, d)
+ D = diagitensor(Int, vv, i, j)
+ @test eltype(D) === Int
+ D[1, 1] = 5
+ @test vv[1] == 5
+ @test vv[1] == D[1, 1]
+
+ D = diag_itensor(1, i, j)
+ @test eltype(D) === Float64
+ D[1, 1] = 5
+ @test D[1, 1] == 5
+
+ D = diag_itensor(Int, 1, i, j)
+ @test eltype(D) === Int
+ D[1, 1] = 5
+ @test D[1, 1] == 5
+ end
- @testset "Convert diag to dense" begin
- D = diag_itensor(v, i, j, k)
- T = dense(D)
+ @testset "fill!" begin
+ D = diag_itensor(ones(d), i, j, k)
+ D = fill!(D, 2.0)
+ for ii in 1:d
+ @test D[i => ii, j => ii, k => ii] == 2.0
+ end
- @test storage(T) isa NDTensors.Dense{Float64}
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test T[ii, ii, ii] == ii
- else
- @test T[i => ii, j => jj, k => kk] == 0.0
+ @test eltype(D) == Float64
end
- end
- end
- @testset "Convert diag to dense with denseblocks" begin
- D = diag_itensor(v, i, j, k)
- T = denseblocks(D)
+ @testset "Set elements" begin
+ D = diag_itensor(i, j, k)
+
+ for ii in 1:d
+ D[i => ii, j => ii, k => ii] = ii
+ end
+
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test D[i => ii, j => jj, k => kk] == ii
+ else
+ @test D[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
+
+ # Can't set off-diagonal elements
+ @test_throws ErrorException D[i => 2, j => 1, k => 1] = 0.0
+ @test_throws ErrorException D[i => 1, j => 2, k => 1] = 0.0
+ end
- @test storage(T) isa NDTensors.Dense{Float64}
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test T[ii, ii, ii] == ii
- else
- @test T[i => ii, j => jj, k => kk] == 0.0
+ @testset "Convert diag to dense" begin
+ D = diag_itensor(v, i, j, k)
+ T = dense(D)
+
+ @test storage(T) isa NDTensors.Dense{Float64}
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test T[ii, ii, ii] == ii
+ else
+ @test T[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
end
- end
- end
- @testset "Add (Diag + Diag)" begin
- v1 = randn(d)
- v2 = randn(d)
- D1 = diag_itensor(v1, i, j, k)
- D2 = diag_itensor(v2, k, i, j)
+ @testset "Convert diag to dense with denseblocks" begin
+ D = diag_itensor(v, i, j, k)
+ T = denseblocks(D)
+
+ @test storage(T) isa NDTensors.Dense{Float64}
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test T[ii, ii, ii] == ii
+ else
+ @test T[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
+ end
- v3 = v1 + v2
- D3 = D1 + D2
+ @testset "Add (Diag + Diag)" begin
+ v1 = randn(d)
+ v2 = randn(d)
+ D1 = diag_itensor(v1, i, j, k)
+ D2 = diag_itensor(v2, k, i, j)
- @test D3 ≈ dense(D1) + dense(D2)
- for ii in 1:d
- @test D3[ii, ii, ii] == v3[ii]
- end
- end
+ v3 = v1 + v2
+ D3 = D1 + D2
- @testset "Add ( number * Diag + Diag)" begin
- v1 = randn(d)
- v2 = randn(d)
- D1 = Float32(2.0) * diag_itensor(v1, i, j, k)
- D2 = diag_itensor(v2, k, i, j)
+ @test D3 ≈ dense(D1) + dense(D2)
+ for ii in 1:d
+ @test D3[ii, ii, ii] == v3[ii]
+ end
+ end
- v3 = 2 * v1 + v2
- D3 = D1 + D2
+ @testset "Add ( number * Diag + Diag)" begin
+ v1 = randn(d)
+ v2 = randn(d)
+ D1 = Float32(2.0) * diag_itensor(v1, i, j, k)
+ D2 = diag_itensor(v2, k, i, j)
- @test D3 ≈ dense(D1) + dense(D2)
- for ii in 1:d
- @test D3[ii, ii, ii] == v3[ii]
- end
- end
+ v3 = 2 * v1 + v2
+ D3 = D1 + D2
- @testset "Add (Diag uniform + Diag uniform)" begin
- D1 = δ(i, j, k)
- D2 = δ(k, i, j)
+ @test D3 ≈ dense(D1) + dense(D2)
+ for ii in 1:d
+ @test D3[ii, ii, ii] == v3[ii]
+ end
+ end
- D3 = D1 + D2
+ @testset "Add (Diag uniform + Diag uniform)" begin
+ D1 = δ(i, j, k)
+ D2 = δ(k, i, j)
- @test D3 ≈ dense(D1) + dense(D2)
- end
+ D3 = D1 + D2
- @testset "Add (Diag + Dense)" begin
- D = diag_itensor(vr, i, j, k)
- A = random_itensor(k, j, i)
+ @test D3 ≈ dense(D1) + dense(D2)
+ end
- R = D + A
+ @testset "Add (Diag + Dense)" begin
+ D = diag_itensor(vr, i, j, k)
+ A = random_itensor(k, j, i)
- @test R ≈ dense(D) + A
- for ii in 1:d
- @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
- end
- end
+ R = D + A
- @testset "Add (Dense + Diag)" begin
- D = diag_itensor(vr, i, j, k)
- A = random_itensor(i, k, j)
+ @test R ≈ dense(D) + A
+ for ii in 1:d
+ @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
+ end
+ end
- R = A + D
+ @testset "Add (Dense + Diag)" begin
+ D = diag_itensor(vr, i, j, k)
+ A = random_itensor(i, k, j)
- @test R ≈ dense(D) + A
- for ii in 1:d
- @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
- end
- end
+ R = A + D
- @testset "Contraction (all contracted)" begin
- D = diag_itensor(v, i, j, k)
- A = random_itensor(j, k, i)
+ @test R ≈ dense(D) + A
+ for ii in 1:d
+ @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
+ end
+ end
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @testset "Contraction (all contracted)" begin
+ D = diag_itensor(v, i, j, k)
+ A = random_itensor(j, k, i)
- @testset "Contraction (all contracted) with different types" begin
- D = diag_itensor(v, i, j, k)
- A = random_itensor(Float32, j, k, i)
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
+ @testset "Contraction (all contracted) with different types" begin
+ D = diag_itensor(v, i, j, k)
+ A = random_itensor(Float32, j, k, i)
- D = diag_itensor(v, i, j, k)
- A = random_itensor(ComplexF32, j, k, i)
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ D = diag_itensor(v, i, j, k)
+ A = random_itensor(ComplexF32, j, k, i)
- @testset "Contraction (all dense contracted)" begin
- D = diag_itensor(v, j, k, i)
- A = random_itensor(i, j)
-
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction Diag*Dense (general)" begin
- D = diag_itensor(v, l, i, k, j)
- A = random_itensor(m, k, n, l)
+ @testset "Contraction (all dense contracted)" begin
+ D = diag_itensor(v, j, k, i)
+ A = random_itensor(i, j)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction Diag*Dense (outer)" begin
- D = diag_itensor(v, l, i, k, j)
- A = random_itensor(m, n)
+ @testset "Contraction Diag*Dense (general)" begin
+ D = diag_itensor(v, l, i, k, j)
+ A = random_itensor(m, k, n, l)
- @test order(D * A) == 6
- @test D * A ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction Diag*Diag (outer)" begin
- D1 = diag_itensor(v, l, i)
- D2 = diag_itensor(v, m, n)
+ @testset "Contraction Diag*Dense (outer)" begin
+ D = diag_itensor(v, l, i, k, j)
+ A = random_itensor(m, n)
- @test order(D1 * D2) == 4
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- end
+ @test order(D * A) == 6
+ @test D * A ≈ dense(D) * A
+ end
- @testset "Contraction Diag*Diag (all contracted)" begin
- D1 = diag_itensor(v, l, i, k, j)
- D2 = diag_itensor(vr, j, l, i, k)
+ @testset "Contraction Diag*Diag (outer)" begin
+ D1 = diag_itensor(v, l, i)
+ D2 = diag_itensor(v, m, n)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test order(D1 * D2) == 4
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ end
- @testset "Contraction Diag*Diag (general)" begin
- D1 = diag_itensor(v, l, i, k, j)
- D2 = diag_itensor(vr, m, k, n, l)
+ @testset "Contraction Diag*Diag (all contracted)" begin
+ D1 = diag_itensor(v, l, i, k, j)
+ D2 = diag_itensor(vr, j, l, i, k)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- @testset "Contraction Diag*Diag (no contracted)" begin
- D1 = diag_itensor(v, i, j)
- D2 = diag_itensor(vr, k, l)
+ @testset "Contraction Diag*Diag (general)" begin
+ D1 = diag_itensor(v, l, i, k, j)
+ D2 = diag_itensor(vr, m, k, n, l)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- @testset "Contraction Diag*Scalar" begin
- D = diag_itensor(v, i, j)
- x = 2.0
+ @testset "Contraction Diag*Diag (no contracted)" begin
+ D1 = diag_itensor(v, i, j)
+ D2 = diag_itensor(vr, k, l)
- @test x * D ≈ x * dense(D)
- @test D * x ≈ x * dense(D)
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- xc = 2 + 3im
+ @testset "Contraction Diag*Scalar" begin
+ D = diag_itensor(v, i, j)
+ x = 2.0
- @test xc * D ≈ xc * dense(D)
- @test D * xc ≈ xc * dense(D)
- end
- end
+ @test x * D ≈ x * dense(D)
+ @test D * x ≈ x * dense(D)
- @testset "Uniform diagonal ITensor" begin
- @testset "delta constructor (order 2)" begin
- D = δ(i, j)
+ xc = 2 + 3im
- @test eltype(D) == Float64
- for ii in 1:d, jj in 1:d
- if ii == jj
- @test D[i => ii, j => jj] == 1.0
- else
- @test D[i => ii, j => jj] == 0.0
+ @test xc * D ≈ xc * dense(D)
+ @test D * xc ≈ xc * dense(D)
end
- end
end
- @testset "delta constructor (order 3)" begin
- D = δ(i, j, k)
+ @testset "Uniform diagonal ITensor" begin
+ @testset "delta constructor (order 2)" begin
+ D = δ(i, j)
- @test eltype(D) == Float64
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- if ii == jj == kk
- @test D[i => ii, j => jj, k => kk] == 1.0
- else
- @test D[i => ii, j => jj, k => kk] == 0.0
+ @test eltype(D) == Float64
+ for ii in 1:d, jj in 1:d
+ if ii == jj
+ @test D[i => ii, j => jj] == 1.0
+ else
+ @test D[i => ii, j => jj] == 0.0
+ end
+ end
end
- end
- end
- @testset "Set elements" begin
- D = δ(i, j, k)
+ @testset "delta constructor (order 3)" begin
+ D = δ(i, j, k)
+
+ @test eltype(D) == Float64
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ if ii == jj == kk
+ @test D[i => ii, j => jj, k => kk] == 1.0
+ else
+ @test D[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
+ end
- @test eltype(D) == Float64
+ @testset "Set elements" begin
+ D = δ(i, j, k)
- # Can't set elements of uniform diag tensor
- # TODO: should we make a function that converts
- # to a version that can?
- @test_throws ErrorException D[i => 1, j => 1, k => 1] = 2.0
- @test_throws ErrorException D[i => 2, j => 1, k => 1] = 4.3
- @test_throws ErrorException D[i => 1, j => 2, k => 1] = 2
- end
+ @test eltype(D) == Float64
- @testset "Convert diag uniform to dense" begin
- D = δ(i, j, k)
- T = dense(D)
+ # Can't set elements of uniform diag tensor
+ # TODO: should we make a function that converts
+ # to a version that can?
+ @test_throws ErrorException D[i => 1, j => 1, k => 1] = 2.0
+ @test_throws ErrorException D[i => 2, j => 1, k => 1] = 4.3
+ @test_throws ErrorException D[i => 1, j => 2, k => 1] = 2
+ end
- @test storage(T) isa NDTensors.Dense{Float64}
- for ii in 1:d, jj in 1:d, kk in 1:d
- if ii == jj == kk
- @test T[ii, ii, ii] == 1.0
- else
- @test T[i => ii, j => jj, k => kk] == 0.0
+ @testset "Convert diag uniform to dense" begin
+ D = δ(i, j, k)
+ T = dense(D)
+
+ @test storage(T) isa NDTensors.Dense{Float64}
+ for ii in 1:d, jj in 1:d, kk in 1:d
+ if ii == jj == kk
+ @test T[ii, ii, ii] == 1.0
+ else
+ @test T[i => ii, j => jj, k => kk] == 0.0
+ end
+ end
end
- end
- end
- @testset "Add (Diag uniform + Dense)" begin
- D = δ(i, j, k)
- A = random_itensor(k, j, i)
+ @testset "Add (Diag uniform + Dense)" begin
+ D = δ(i, j, k)
+ A = random_itensor(k, j, i)
- R = D + A
+ R = D + A
- @test R ≈ dense(D) + A
- for ii in 1:d
- @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
- end
- end
+ @test R ≈ dense(D) + A
+ for ii in 1:d
+ @test R[ii, ii, ii] ≈ D[ii, ii, ii] + A[ii, ii, ii]
+ end
+ end
- @testset "Contraction (Diag uniform * Dense, all contracted)" begin
- D = δ(i, j, k)
- A = random_itensor(j, k, i)
+ @testset "Contraction (Diag uniform * Dense, all contracted)" begin
+ D = δ(i, j, k)
+ A = random_itensor(j, k, i)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction (Diag uniform * Dense, all dense contracted)" begin
- D = δ(j, k, i)
- A = random_itensor(i, j)
+ @testset "Contraction (Diag uniform * Dense, all dense contracted)" begin
+ D = δ(j, k, i)
+ A = random_itensor(i, j)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction (Diag uniform * Dense, general)" begin
- D = δ(l, i, k, j)
- A = random_itensor(m, k, n, l)
+ @testset "Contraction (Diag uniform * Dense, general)" begin
+ D = δ(l, i, k, j)
+ A = random_itensor(m, k, n, l)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction with different bond dimensions" begin
- idim = 3
- mdim = 2
+ @testset "Contraction with different bond dimensions" begin
+ idim = 3
+ mdim = 2
- i = Index(idim, "i")
- m = Index(mdim, "m")
+ i = Index(idim, "i")
+ m = Index(mdim, "m")
- A = random_itensor(i, i', m)
- D = δ(i, i')
+ A = random_itensor(i, i', m)
+ D = δ(i, i')
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction (Diag uniform * Dense, replace index)" begin
- D = δ(i, k)
- A = random_itensor(m, k, n, l)
+ @testset "Contraction (Diag uniform * Dense, replace index)" begin
+ D = δ(i, k)
+ A = random_itensor(m, k, n, l)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction (Diag uniform * Dense, replace index 2)" begin
- D = δ(k, i)
- A = random_itensor(m, n, k, l)
+ @testset "Contraction (Diag uniform * Dense, replace index 2)" begin
+ D = δ(k, i)
+ A = random_itensor(m, n, k, l)
- @test D * A ≈ dense(D) * A
- @test A * D ≈ dense(D) * A
- end
+ @test D * A ≈ dense(D) * A
+ @test A * D ≈ dense(D) * A
+ end
- @testset "Contraction (Diag uniform * Diag uniform, all contracted)" begin
- D1 = δ(l, i, k, j)
- D2 = δ(j, l, i, k)
+ @testset "Contraction (Diag uniform * Diag uniform, all contracted)" begin
+ D1 = δ(l, i, k, j)
+ D2 = δ(j, l, i, k)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- @testset "Contraction (Diag uniform * Diag uniform, general)" begin
- D1 = δ(l, i, k, j)
- D2 = δ(m, k, n, l)
+ @testset "Contraction (Diag uniform * Diag uniform, general)" begin
+ D1 = δ(l, i, k, j)
+ D2 = δ(m, k, n, l)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- @testset "Contraction (Diag uniform * Diag uniform, no contracted)" begin
- D1 = δ(i, j)
- D2 = δ(k, l)
+ @testset "Contraction (Diag uniform * Diag uniform, no contracted)" begin
+ D1 = δ(i, j)
+ D2 = δ(k, l)
- @test D1 * D2 ≈ dense(D1) * dense(D2)
- @test D2 * D1 ≈ dense(D1) * dense(D2)
- end
+ @test D1 * D2 ≈ dense(D1) * dense(D2)
+ @test D2 * D1 ≈ dense(D1) * dense(D2)
+ end
- @testset "Rectangular Diag * Dense regression test (#969)" begin
- i = Index(3)
- j = Index(2)
- A = random_itensor(i)
- B = delta(i, j)
- C = A * B
- @test hassameinds(C, j)
- for n in 1:dim(j)
- @test C[n] == A[n]
- end
+ @testset "Rectangular Diag * Dense regression test (#969)" begin
+ i = Index(3)
+ j = Index(2)
+ A = random_itensor(i)
+ B = delta(i, j)
+ C = A * B
+ @test hassameinds(C, j)
+ for n in 1:dim(j)
+ @test C[n] == A[n]
+ end
+ end
end
- end
end
diff --git a/test/base/test_empty.jl b/test/base/test_empty.jl
index ef272b7678..5dc94ef043 100644
--- a/test/base/test_empty.jl
+++ b/test/base/test_empty.jl
@@ -3,79 +3,79 @@ using ITensors.NDTensors
using Test
@testset "ITensor (Empty)" begin
- @testset "ITensor set elements" begin
- i = Index(2; tags="i")
+ @testset "ITensor set elements" begin
+ i = Index(2; tags = "i")
- E = ITensor(i', dag(i))
+ E = ITensor(i', dag(i))
- @test conj(E) == E
- @test 1.2 * E == E
+ @test conj(E) == E
+ @test 1.2 * E == E
- @test hassameinds(E, (i', i))
- @test order(E) == 2
- @test E[i' => 1, i => 1] == 0
+ @test hassameinds(E, (i', i))
+ @test order(E) == 2
+ @test E[i' => 1, i => 1] == 0
- E[i' => 1, i => 2] = 2.3
+ E[i' => 1, i => 2] = 2.3
- @test E[i' => 1, i => 1] == 0
- @test E[i' => 2, i => 1] == 0
- @test E[i' => 1, i => 2] == 2.3
- @test E[i' => 2, i => 2] == 0
- end
+ @test E[i' => 1, i => 1] == 0
+ @test E[i' => 2, i => 1] == 0
+ @test E[i' => 1, i => 2] == 2.3
+ @test E[i' => 2, i => 2] == 0
+ end
- @testset "ITensor (Empty) convert to complex" begin
- i = Index(2; tags="i")
- E = ITensor(i', dag(i))
- @test eltype(E) == NDTensors.EmptyNumber
+ @testset "ITensor (Empty) convert to complex" begin
+ i = Index(2; tags = "i")
+ E = ITensor(i', dag(i))
+ @test eltype(E) == NDTensors.EmptyNumber
- Ec = complex(E)
- @test eltype(Ec) == Complex{NDTensors.EmptyNumber}
- Ec[1, 1] = 2.3
- @test eltype(Ec) == ComplexF64
+ Ec = complex(E)
+ @test eltype(Ec) == Complex{NDTensors.EmptyNumber}
+ Ec[1, 1] = 2.3
+ @test eltype(Ec) == ComplexF64
- Ec = complex(E)
- @test eltype(Ec) == Complex{NDTensors.EmptyNumber}
- Ec[1, 1] = 2.3f0
- @test eltype(Ec) == ComplexF32
+ Ec = complex(E)
+ @test eltype(Ec) == Complex{NDTensors.EmptyNumber}
+ Ec[1, 1] = 2.3f0
+ @test eltype(Ec) == ComplexF32
- E2 = copy(E)
- E2c = complex!(E2)
- @test eltype(E2c) == Complex{NDTensors.EmptyNumber}
- end
+ E2 = copy(E)
+ E2c = complex!(E2)
+ @test eltype(E2c) == Complex{NDTensors.EmptyNumber}
+ end
- @testset "ITensor set elements (QN)" begin
- i = Index(QN(0) => 2, QN(1) => 2; tags="i")
+ @testset "ITensor set elements (QN)" begin
+ i = Index(QN(0) => 2, QN(1) => 2; tags = "i")
- E = ITensor(i', dag(i))
+ E = ITensor(i', dag(i))
- @test hassameinds(E, (i', i))
- @test order(E) == 2
- @test isnothing(flux(E))
- @test E[i' => 1, i => 3] == 0
+ @test hassameinds(E, (i', i))
+ @test order(E) == 2
+ @test isnothing(flux(E))
+ @test E[i' => 1, i => 3] == 0
- E[i' => 3, i => 2] = 2.3
+ E[i' => 3, i => 2] = 2.3
- @test flux(E) == QN(1)
+ @test flux(E) == QN(1)
- @test E[i' => 1, i => 1] == 0
- @test E[i' => 2, i => 1] == 0
- @test E[i' => 3, i => 2] == 2.3
- @test E[i' => 2, i => 3] == 0
- @test_throws ErrorException E[i' => 2, i => 3] = 3.2
- end
+ @test E[i' => 1, i => 1] == 0
+ @test E[i' => 2, i => 1] == 0
+ @test E[i' => 3, i => 2] == 2.3
+ @test E[i' => 2, i => 3] == 0
+ @test_throws ErrorException E[i' => 2, i => 3] = 3.2
+ end
- @testset "ITensor()" begin
- i = Index(QN(0) => 2, QN(1) => 2; tags="i")
+ @testset "ITensor()" begin
+ i = Index(QN(0) => 2, QN(1) => 2; tags = "i")
- E = ITensor()
+ E = ITensor()
- @test isnothing(flux(E))
- @test order(E) == 0
- @test_throws MethodError E[i' => 1, i => 3] = 0
+ @test isnothing(flux(E))
+ @test order(E) == 0
+ @test_throws MethodError E[i' => 1, i => 3] = 0
- A = random_itensor(i', dag(i))
- E += A
+ A = random_itensor(i', dag(i))
+ E += A
- @test norm(E - A) < 1E-8
- end
+ @test norm(E - A) < 1.0e-8
+ end
end
diff --git a/test/base/test_fermions.jl b/test/base/test_fermions.jl
index 3ed5183798..6be9f4b27f 100644
--- a/test/base/test_fermions.jl
+++ b/test/base/test_fermions.jl
@@ -4,830 +4,830 @@ import ITensors: Out, In
using ITensors.SiteTypes: op, siteind, siteinds
@testset "Fermions" begin
- ITensors.enable_auto_fermion()
-
- @testset "parity_sign function" begin
-
- # Full permutations
- p1 = [1, 2, 3]
- @test ITensors.parity_sign(p1) == +1
- p2 = [2, 1, 3]
- @test ITensors.parity_sign(p2) == -1
- p3 = [2, 3, 1]
- @test ITensors.parity_sign(p3) == +1
- p4 = [3, 2, 1]
- @test ITensors.parity_sign(p4) == -1
-
- ## Partial permutations
- p5 = [2, 7]
- @test ITensors.parity_sign(p5) == +1
- p6 = [5, 3]
- @test ITensors.parity_sign(p6) == -1
- p7 = [1, 9, 3, 10]
- @test ITensors.parity_sign(p7) == -1
- p8 = [1, 12, 9, 3, 11]
- @test ITensors.parity_sign(p8) == +1
- end
-
- @testset "Fermionic QNs" begin
- q = QN("Nf", 1, -1)
- @test isfermionic(q[1])
- @test fparity(q) == 1
-
- q = q + q + q
- @test val(q, "Nf") == 3
-
- p = QN("P", 1, -2)
- @test fparity(p) == 1
- @test isodd(p)
- @test fparity(p + p) == 0
- @test fparity(p + p + p) == 1
- end
-
- @testset "Fermionic IndexVals" begin
- sn = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn")
- @test fparity(sn => 1) == 0
- @test fparity(sn => 2) == 1
- @test !isodd(sn => 1)
- @test isodd(sn => 2)
-
- sp = Index([QN("Nfp", 0, -2) => 1, QN("Nfp", 1, -2) => 1], "sp")
- @test fparity(sp => 1) == 0
- @test fparity(sp => 2) == 1
- end
-
- @testset "Get and Set Elements" begin
- s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
-
- N = ITensor(s', dag(s))
-
- N[s' => 2, s => 2] = 1.0
- @test N[s' => 2, s => 2] ≈ +1.0
- @test N[s => 2, s' => 2] ≈ -1.0
-
- N[s => 2, s' => 2] = 1.0
- @test N[s' => 2, s => 2] ≈ -1.0
- @test N[s => 2, s' => 2] ≈ 1.0
-
- C = ITensor(s', dag(s))
-
- C[s' => 1, s => 2] = 1.0
- @test C[s' => 1, s => 2] ≈ 1.0
- @test C[s => 2, s' => 1] ≈ 1.0
-
- I = ITensor(s', dag(s))
- I[s' => 1, s => 1] = 1.0
- I[s' => 2, s => 2] = 1.0
- @test I[s' => 1, s => 1] ≈ 1.0
- @test I[s' => 2, s => 2] ≈ 1.0
-
- @test I[s => 1, s' => 1] ≈ 1.0
- @test I[s => 2, s' => 2] ≈ -1.0
- end
-
- @testset "Making operators different ways" begin
- s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
-
- N1 = ITensor(s', dag(s))
- N1[s' => 2, s => 2] = +1.0
-
- N2 = ITensor(dag(s), s')
- N2[s' => 2, s => 2] = +1.0
- @test norm(N1 - N2) ≈ 0.0
-
- N3 = ITensor(s', dag(s))
- N3[s => 2, s' => 2] = -1.0
- @test norm(N1 - N3) ≈ 0.0
-
- N4 = ITensor(dag(s), s')
- N4[s => 2, s' => 2] = -1.0
- @test norm(N1 - N4) ≈ 0.0
- end
-
- @testset "Permute and Add Fermionic ITensors" begin
- @testset "Permute Operators" begin
- s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
-
- N1 = ITensor(s', dag(s))
- N1[s' => 2, s => 2] = 1.0
-
- N2 = ITensor(dag(s), s')
- N2[s' => 2, s => 2] = 1.0
-
- pN1 = permute(N1, dag(s), s')
- @test pN1[s' => 2, s => 2] ≈ 1.0
-
- pN2 = permute(N2, s', dag(s))
- @test pN2[s' => 2, s => 2] ≈ 1.0
-
- #TODO add cases resulting in minus signs
- end
-
- @testset "Add Operators" begin
- s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn")
-
- N1 = ITensor(s', dag(s))
- N1[s' => 2, s => 2] = 1.0
-
- N2 = ITensor(dag(s), s')
- N2[s' => 2, s => 2] = 1.0
+ ITensors.enable_auto_fermion()
- NN = N1 + N2
- @test NN[s' => 2, s => 2] ≈ 2.0
+ @testset "parity_sign function" begin
- NN = N1 + N1
- @test NN[s' => 2, s => 2] ≈ 2.0
+ # Full permutations
+ p1 = [1, 2, 3]
+ @test ITensors.parity_sign(p1) == +1
+ p2 = [2, 1, 3]
+ @test ITensors.parity_sign(p2) == -1
+ p3 = [2, 3, 1]
+ @test ITensors.parity_sign(p3) == +1
+ p4 = [3, 2, 1]
+ @test ITensors.parity_sign(p4) == -1
- NN = N2 + N2
- @test NN[s' => 2, s => 2] ≈ 2.0
+ ## Partial permutations
+ p5 = [2, 7]
+ @test ITensors.parity_sign(p5) == +1
+ p6 = [5, 3]
+ @test ITensors.parity_sign(p6) == -1
+ p7 = [1, 9, 3, 10]
+ @test ITensors.parity_sign(p7) == -1
+ p8 = [1, 12, 9, 3, 11]
+ @test ITensors.parity_sign(p8) == +1
end
- @testset "Wavefunction Tests" begin
- s = [Index([QN("N", 0, -2) => 2, QN("N", 1, -2) => 2], "s$n") for n in 1:4]
-
- psi0 = ITensor(s...)
-
- psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] = 1111
- psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] = 3311
- psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] = 1313
-
- psi1 = permute(psi0, s[2], s[1], s[3], s[4])
- @test norm(psi1 - psi0) ≈ 0.0
-
- @test psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111
- @test psi1[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111
- @test psi0[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111
- @test psi1[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111
+ @testset "Fermionic QNs" begin
+ q = QN("Nf", 1, -1)
+ @test isfermionic(q[1])
+ @test fparity(q) == 1
- @test psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311
- @test psi1[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311
- @test psi0[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311
- @test psi1[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311
- @test psi0[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311
- @test psi1[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311
+ q = q + q + q
+ @test val(q, "Nf") == 3
- psi2 = permute(psi0, s[4], s[1], s[3], s[2])
- @test norm(psi2 - psi0) ≈ 0.0
- @test norm(psi2 - psi1) ≈ 0.0
-
- @test psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
- @test psi1[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
- @test psi2[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
- @test psi0[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
- @test psi1[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
- @test psi2[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
+ p = QN("P", 1, -2)
+ @test fparity(p) == 1
+ @test isodd(p)
+ @test fparity(p + p) == 0
+ @test fparity(p + p + p) == 1
end
- end
-
- @testset "C Cdag operators" begin
- s = siteinds("Fermion", 3; conserve_qns=true)
-
- p110 = ITensor(s[1], s[2], s[3])
- p110[s[1] => 2, s[2] => 2, s[3] => 1] = 1.0
-
- p011 = ITensor(s[1], s[2], s[3])
- p011[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0
-
- np011 = ITensor(s[1], s[2], s[3])
- np011[s[1] => 1, s[3] => 2, s[2] => 2] = 1.0
-
- dag_p011 = ITensor(dag(s[3]), dag(s[2]), dag(s[1]))
- dag_p011[s[3] => 2, s[2] => 2, s[1] => 1] = 1.0
-
- @test norm(dag(p011) - dag_p011) ≈ 0
- C1 = op(s, "C", 1)
- Cdag3 = op(s, "Cdag", 3)
+ @testset "Fermionic IndexVals" begin
+ sn = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn")
+ @test fparity(sn => 1) == 0
+ @test fparity(sn => 2) == 1
+ @test !isodd(sn => 1)
+ @test isodd(sn => 2)
- # Anti-commutator
- @test norm(Cdag3 * C1 + C1 * Cdag3) ≈ 0.0
-
- # Commutator
- @test norm(Cdag3 * C1 - C1 * Cdag3) ≈ 2.0
-
- let # <011|Cdag3*C1|110> = -1
- t1 = noprime(C1 * p110)
- t2 = noprime(Cdag3 * t1)
- @test scalar(dag_p011 * t2) ≈ -1.0
- end
-
- let # <011|C1*Cdag3|110> = +1
- t1 = noprime(Cdag3 * p110)
- t2 = noprime(C1 * t1)
- @test scalar(dag_p011 * t2) ≈ +1.0
- end
-
- let # <011|(Cdag3*C1)|110> = -1
- t = noprime((Cdag3 * C1) * p110)
- @test scalar(dag(p011) * t) ≈ -1.0
- end
-
- let # <011|(C1*Cdag3)|110> = +1
- t = noprime((C1 * Cdag3) * p110)
- @test scalar(dag(p011) * t) ≈ +1.0
+ sp = Index([QN("Nfp", 0, -2) => 1, QN("Nfp", 1, -2) => 1], "sp")
+ @test fparity(sp => 1) == 0
+ @test fparity(sp => 2) == 1
end
- #
- # Commuting B tensors
- #
- # These commute by carrying additional
- # g-indices (Grassman indices)
- #
+ @testset "Get and Set Elements" begin
+ s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
- g = Index(QN("Nf", 1, -1) => 1; tags="g")
+ N = ITensor(s', dag(s))
- Bdag3 = Cdag3 * setelt(dag(g) => 1)
- B1 = setelt(g => 1) * C1
+ N[s' => 2, s => 2] = 1.0
+ @test N[s' => 2, s => 2] ≈ +1.0
+ @test N[s => 2, s' => 2] ≈ -1.0
- # Commutator
- @test norm(Bdag3 * B1 - B1 * Bdag3) ≈ 0.0
+ N[s => 2, s' => 2] = 1.0
+ @test N[s' => 2, s => 2] ≈ -1.0
+ @test N[s => 2, s' => 2] ≈ 1.0
- # Anti-commutator
- @test norm(Bdag3 * B1 + B1 * Bdag3) ≈ 2.0
+ C = ITensor(s', dag(s))
- let # <011|Cdag3*C1|110> = <011|Bdag3*B1|110> = -1
- t1 = noprime(B1 * p110)
- t2 = noprime(Bdag3 * t1)
- @test scalar(dag(p011) * t2) ≈ -1.0
- end
+ C[s' => 1, s => 2] = 1.0
+ @test C[s' => 1, s => 2] ≈ 1.0
+ @test C[s => 2, s' => 1] ≈ 1.0
- let # <011|(Cdag3*C1)|110> = <011|(Bdag3*B1)|110> = -1
- t = noprime((Bdag3 * B1) * p110)
- @test scalar(dag(p011) * t) ≈ -1.0
- end
+ I = ITensor(s', dag(s))
+ I[s' => 1, s => 1] = 1.0
+ I[s' => 2, s => 2] = 1.0
+ @test I[s' => 1, s => 1] ≈ 1.0
+ @test I[s' => 2, s => 2] ≈ 1.0
- let # <011|Cdag3*C1|110> = <011|B1*Bdag3|110> = -1
- t1 = noprime(Bdag3 * p110)
- t2 = noprime(B1 * t1)
- @test scalar(dag(p011) * t2) ≈ -1.0
+ @test I[s => 1, s' => 1] ≈ 1.0
+ @test I[s => 2, s' => 2] ≈ -1.0
end
- let # <011|(Cdag3*C1)|110> = <011|(B1*Bdag3)|110> = -1
- t = noprime((B1 * Bdag3) * p110)
- @test scalar(dag(p011) * t) ≈ -1.0
- end
+ @testset "Making operators different ways" begin
+ s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
- #
- # Leave out middle fermion, test for cases <001|...|100>
- #
- p100 = ITensor(s[1], s[2], s[3])
- p100[s[1] => 2, s[2] => 1, s[3] => 1] = 1.0
+ N1 = ITensor(s', dag(s))
+ N1[s' => 2, s => 2] = +1.0
- p001 = ITensor(s[1], s[2], s[3])
- p001[s[1] => 1, s[2] => 1, s[3] => 2] = 1.0
+ N2 = ITensor(dag(s), s')
+ N2[s' => 2, s => 2] = +1.0
+ @test norm(N1 - N2) ≈ 0.0
- let # <001|Cdag3*C1|100> = <001|Bdag3*B1|100> = +1
- t1 = noprime(B1 * p100)
- t2 = noprime(Bdag3 * t1)
- @test scalar(dag(p001) * t2) ≈ +1.0
- end
+ N3 = ITensor(s', dag(s))
+ N3[s => 2, s' => 2] = -1.0
+ @test norm(N1 - N3) ≈ 0.0
- let # <001|Cdag3*C1|100> = <001|(Bdag3*B1)|100> = +1
- t = noprime((Bdag3 * B1) * p100)
- @test scalar(dag(p001) * t) ≈ +1.0
+ N4 = ITensor(dag(s), s')
+ N4[s => 2, s' => 2] = -1.0
+ @test norm(N1 - N4) ≈ 0.0
end
- let # <001|Cdag3*C1|100> = <001|B1*Bdag3|100> = +1
- t1 = noprime(Bdag3 * p100)
- t2 = noprime(B1 * t1)
- @test scalar(dag(p001) * t2) ≈ +1.0
- end
+ @testset "Permute and Add Fermionic ITensors" begin
+ @testset "Permute Operators" begin
+ s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "s")
- let # <001|Cdag3*C1|100> = <001|(B1*Bdag3)|100> = +1
- t = noprime((B1 * Bdag3) * p100)
- @test scalar(dag(p001) * t) ≈ +1.0
- end
- end
+ N1 = ITensor(s', dag(s))
+ N1[s' => 2, s => 2] = 1.0
- @testset "Combiner conjugation" begin
- s = siteinds("Fermion", 4; conserve_qns=true)
- C = combiner(s[1], s[2])
- @test NDTensors.isconj(storage(C)) == false
+ N2 = ITensor(dag(s), s')
+ N2[s' => 2, s => 2] = 1.0
- dC = dag(C)
- @test NDTensors.isconj(storage(dC)) == true
- end
+ pN1 = permute(N1, dag(s), s')
+ @test pN1[s' => 2, s => 2] ≈ 1.0
- @testset "Combine Uncombine Permute Test" begin
- s = siteinds("Fermion", 4; conserve_qns=true)
+ pN2 = permute(N2, s', dag(s))
+ @test pN2[s' => 2, s => 2] ≈ 1.0
- @testset "Two Site Test" begin
- p11 = ITensor(s[1], s[2])
- p11[s[1] => 2, s[2] => 2] = 1.0
+ #TODO add cases resulting in minus signs
+ end
- C = combiner(s[1], s[2])
+ @testset "Add Operators" begin
+ s = Index([QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1], "sn")
- dp11 = dag(p11)
+ N1 = ITensor(s', dag(s))
+ N1[s' => 2, s => 2] = 1.0
- Cp11_A = C * p11
- dCp11_A = dag(Cp11_A)
- dp11_A = C * dCp11_A
- @test dp11_A ≈ dp11
+ N2 = ITensor(dag(s), s')
+ N2[s' => 2, s => 2] = 1.0
- Cp11_B = p11 * C
- dCp11_B = dag(Cp11_B)
- dp11_B = C * dCp11_B
- @test dp11_B ≈ dp11
- end
+ NN = N1 + N2
+ @test NN[s' => 2, s => 2] ≈ 2.0
- @testset "Longer two-site tests" begin
- s1, s2, s3, s4 = s
- C12 = combiner(s1, s2)
- C21 = combiner(s2, s1)
- C13 = combiner(s1, s3)
- C31 = combiner(s3, s1)
-
- T = random_itensor(QN("Nf", 3, -1), s1, s2, s3, s4)
- T .= abs.(T)
-
- #
- # 1a, 2a tests
- #
-
- c12 = combinedind(C12)
- c12T = C12 * T
- u12T = dag(C12) * c12T
- @test norm(u12T - T) < 1E-10
-
- c21 = combinedind(C21)
- c21T = C21 * T
- u21T = dag(C21) * c21T
- @test norm(u21T - T) < 1E-10
-
- c13 = combinedind(C13)
- c13T = C13 * T
- u13T = dag(C13) * c13T
- @test norm(u13T - T) < 1E-10
-
- c31 = combinedind(C31)
- c31T = C31 * T
- u31T = dag(C31) * c31T
- @test norm(u31T - T) < 1E-10
-
- #
- # 1b, 2b tests
- #
-
- dc12T = dag(C12) * dag(T)
- @test norm(dc12T - dag(c12T)) < 1E-10
- du12T = C12 * dc12T
- @test norm(du12T - dag(T)) < 1E-10
-
- dc21T = dag(C21) * dag(T)
- @test norm(dc21T - dag(c21T)) < 1E-10
- du21T = C21 * dc21T
- @test norm(du21T - dag(T)) < 1E-10
-
- dc13T = dag(C13) * dag(T)
- @test norm(dc13T - dag(c13T)) < 1E-10
- du13T = C13 * dc13T
- @test norm(du13T - dag(T)) < 1E-10
-
- dc31T = dag(C31) * dag(T)
- @test norm(dc31T - dag(c31T)) < 1E-10
- du31T = C31 * dc31T
- @test norm(du31T - dag(T)) < 1E-10
- end
+ NN = N1 + N1
+ @test NN[s' => 2, s => 2] ≈ 2.0
- @testset "Three Site Test" begin
- p111 = ITensor(s[1], s[2], s[3])
- p111[s[1] => 2, s[2] => 2, s[3] => 2] = 1.0
+ NN = N2 + N2
+ @test NN[s' => 2, s => 2] ≈ 2.0
+ end
- dp111 = dag(p111)
+ @testset "Wavefunction Tests" begin
+ s = [Index([QN("N", 0, -2) => 2, QN("N", 1, -2) => 2], "s$n") for n in 1:4]
- C = combiner(s[1], s[3])
- Cp111 = C * p111
- dCp111 = dag(Cp111)
- dp111_U = C * dCp111
- @test dp111_U ≈ dp111
- end
- end
+ psi0 = ITensor(s...)
- @testset "Mixed Arrow Combiner Tests" begin
- @testset "One wrong-way arrow" begin
- q1 = QN("Nf", 1, -1)
+ psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] = 1111
+ psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] = 3311
+ psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] = 1313
- s0 = Index([q1 => 1]; tags="s0")
- s1 = Index([q1 => 1]; tags="s1")
- s2 = Index([q1 => 1]; tags="s2")
- s3 = Index([q1 => 1]; tags="s3")
- s4 = Index([q1 => 1]; tags="s4")
+ psi1 = permute(psi0, s[2], s[1], s[3], s[4])
+ @test norm(psi1 - psi0) ≈ 0.0
- A = random_itensor(QN("Nf", 0, -1), s0, s1, dag(s2), dag(s3))
- B = random_itensor(QN("Nf", 0, -1), s3, s2, dag(s1), dag(s4))
- A .= one.(A)
- B .= one.(B)
- @test norm(A) ≈ 1.0
- @test norm(B) ≈ 1.0
+ @test psi0[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111
+ @test psi1[s[1] => 1, s[2] => 1, s[3] => 1, s[4] => 1] ≈ 1111
+ @test psi0[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111
+ @test psi1[s[2] => 1, s[1] => 1, s[3] => 1, s[4] => 1] ≈ 1111
- Ru = A * B
+ @test psi0[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311
+ @test psi1[s[1] => 3, s[2] => 3, s[3] => 1, s[4] => 1] ≈ 3311
+ @test psi0[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311
+ @test psi1[s[2] => 3, s[1] => 3, s[3] => 1, s[4] => 1] ≈ -3311
+ @test psi0[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311
+ @test psi1[s[4] => 1, s[2] => 3, s[1] => 3, s[3] => 1] ≈ -3311
- C = combiner(s3, s2, dag(s1))
- Bc = C * B
- Ac = A * dag(C)
- Rc = Ac * Bc
+ psi2 = permute(psi0, s[4], s[1], s[3], s[2])
+ @test norm(psi2 - psi0) ≈ 0.0
+ @test norm(psi2 - psi1) ≈ 0.0
- @test norm(Ru - Rc) < 1E-8
+ @test psi0[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
+ @test psi1[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
+ @test psi2[s[1] => 1, s[2] => 3, s[3] => 1, s[4] => 3] ≈ 1313
+ @test psi0[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
+ @test psi1[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
+ @test psi2[s[4] => 3, s[1] => 1, s[3] => 1, s[2] => 3] ≈ -1313
+ end
end
- @testset "Two wrong-way arrows" begin
- q1 = QN("Nf", 1, -1)
+ @testset "C Cdag operators" begin
+ s = siteinds("Fermion", 3; conserve_qns = true)
- s0 = Index([q1 => 1]; tags="s0")
- s1 = Index([q1 => 1]; tags="s1")
- s2 = Index([q1 => 1]; tags="s2")
- s3 = Index([q1 => 1]; tags="s3")
- s4 = Index([q1 => 1]; tags="s4")
+ p110 = ITensor(s[1], s[2], s[3])
+ p110[s[1] => 2, s[2] => 2, s[3] => 1] = 1.0
- A = random_itensor(QN("Nf", 2, -1), s0, s1, s2, dag(s3))
- B = random_itensor(QN("Nf", -2, -1), s3, dag(s2), dag(s1), dag(s4))
- A .= one.(A)
- B .= one.(B)
- @test norm(A) ≈ 1.0
- @test norm(B) ≈ 1.0
+ p011 = ITensor(s[1], s[2], s[3])
+ p011[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0
- Ru = A * B
+ np011 = ITensor(s[1], s[2], s[3])
+ np011[s[1] => 1, s[3] => 2, s[2] => 2] = 1.0
- C = combiner(s3, dag(s2), dag(s1))
- Bc = C * B
- Ac = A * dag(C)
- Rc = Ac * Bc
+ dag_p011 = ITensor(dag(s[3]), dag(s[2]), dag(s[1]))
+ dag_p011[s[3] => 2, s[2] => 2, s[1] => 1] = 1.0
- @test norm(Ru - Rc) < 1E-8
- end
- end
-
- @testset "Permutedims Regression Test" begin
- s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
- s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
- i = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1, QN("N", 2, -1) => 1], "i")
-
- A = ITensor(QN("N", 4, -1), s1, s2, i)
- A[s1 => 2, s2 => 2, i => 3] = 223
-
- B = ITensor(QN("N", 4, -1), s1, i, s2)
- B[s1 => 2, i => 3, s2 => 2] = 223
- @test A ≈ B
-
- C = ITensor(QN("N", 4, -1), s1, i, s2)
- C[s2 => 2, i => 3, s1 => 2] = -223
- @test A ≈ C
- end
-
- @testset "Fermionic SVD" begin
- N = 4
- s = siteinds("Fermion", N; conserve_qns=true)
-
- A = random_itensor(QN("Nf", 2, -1), s[1], s[2], s[3], s[4])
- for n1 in 1:4, n2 in 1:4
- (n1 == n2) && continue
- U, S, V = svd(A, (s[n1], s[n2]))
- @test norm(U * S * V - A) < 1E-10
- end
- for n1 in 1:4, n2 in 1:4, n3 in 1:4
- (n1 == n2) && continue
- (n1 == n3) && continue
- (n2 == n3) && continue
- U, S, V = svd(A, (s[n1], s[n2], s[n3]))
- @test norm(U * S * V - A) < 1E-10
- end
+ @test norm(dag(p011) - dag_p011) ≈ 0
- B = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
- for n1 in 1:4, n2 in 1:4
- (n1 == n2) && continue
- U, S, V = svd(B, (s[n1], s[n2]))
- @test norm(U * S * V - B) < 1E-10
- end
- for n1 in 1:4, n2 in 1:4, n3 in 1:4
- (n1 == n2) && continue
- (n1 == n3) && continue
- (n2 == n3) && continue
- U, S, V = svd(B, (s[n1], s[n2], s[n3]))
- @test norm(U * S * V - B) < 1E-10
- end
- end # Fermionic SVD tests
-
- @testset "Fermionic SVD Arrow Cases" begin
- s = siteinds("Fermion", 3; conserve_qns=true)
-
- function id(i)
- if dir(i) == Out
- I = ITensor(i, dag(i)')
- else
- I = ITensor(dag(i)', i)
- end
- for n in 1:dim(i)
- I[n, n] = 1.0
- end
- return I
- end
+ C1 = op(s, "C", 1)
+ Cdag3 = op(s, "Cdag", 3)
- # Arrows: Out, Out
- let
- T = ITensor(s[1], s[2])
- T[1, 2] = 1.0
- T[2, 1] = 1.0
- U, S, V, spec, u, v = svd(T, s[1])
- @test norm(T - U * S * V) ≈ 0
- UU = dag(U) * prime(U, u)
- @test norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ # Anti-commutator
+ @test norm(Cdag3 * C1 + C1 * Cdag3) ≈ 0.0
- # Arrows: In, Out
- let
- T = ITensor(dag(s[1]), s[2])
- T[2, 2] = 1.0
- U, S, V, spec, u, v = svd(T, s[1])
- @test norm(T - U * S * V) ≈ 0
- UU = dag(U) * prime(U, u)
- @test norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ # Commutator
+ @test norm(Cdag3 * C1 - C1 * Cdag3) ≈ 2.0
- # Arrows: Out, In
- let
- T = ITensor(s[1], dag(s[2]))
- T[2, 2] = 1.0
- U, S, V, spec, u, v = svd(T, s[1])
- @test norm(T - U * S * V) ≈ 0
- UU = dag(U) * prime(U, u)
- @test norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ let # <011|Cdag3*C1|110> = -1
+ t1 = noprime(C1 * p110)
+ t2 = noprime(Cdag3 * t1)
+ @test scalar(dag_p011 * t2) ≈ -1.0
+ end
- # Arrows: In, In
- let
- T = ITensor(dag(s[1]), dag(s[2]))
- T[1, 2] = 1.0
- U, S, V, spec, u, v = svd(T, s[1])
- @test norm(T - U * S * V) ≈ 0
- UU = dag(U) * prime(U, u)
- @test norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ let # <011|C1*Cdag3|110> = +1
+ t1 = noprime(Cdag3 * p110)
+ t2 = noprime(C1 * t1)
+ @test scalar(dag_p011 * t2) ≈ +1.0
+ end
- # Arrows: Mixed, In
- let
- T = ITensor(dag(s[1]), s[2], dag(s[3]))
- T[1, 1, 1] = 1.0
- T[2, 2, 1] = 1.0
- U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]])
- @test norm(T - U * S * V) < 1E-14
- UU = dag(U) * prime(U, u)
- @test_broken norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ let # <011|(Cdag3*C1)|110> = -1
+ t = noprime((Cdag3 * C1) * p110)
+ @test scalar(dag(p011) * t) ≈ -1.0
+ end
- # Arrows: Mixed, In
- # Try to fix
- let
- T = ITensor(dag(s[1]), s[2], dag(s[3]))
- T[1, 1, 1] = 1.0
- T[2, 2, 1] = 1.0
- U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]])
- @test norm(T - U * S * V) < 1E-14
- UU = dag(U) * prime(U, u)
- @test_broken norm(UU - id(u)) ≈ 0
- VV = dag(V) * prime(V, v)
- @test norm(VV - id(v)) ≈ 0
- end
+ let # <011|(C1*Cdag3)|110> = +1
+ t = noprime((C1 * Cdag3) * p110)
+ @test scalar(dag(p011) * t) ≈ +1.0
+ end
- #Factorize SVD Test. Specifying arrows on S.
- let
- l1, l2 = Index(QN("Nf", -1) => 1, QN("Nf", 1) => 1; tags="l1", dir=ITensors.In),
- Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags="l2", dir=ITensors.Out)
- r1, r2, r3 = Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags="r1", dir=ITensors.Out),
- Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags="r2", dir=ITensors.In),
- Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags="r3", dir=ITensors.In)
- A = random_itensor(l1, l2, r1, r2, r3)
-
- for dir in [ITensors.Out, ITensors.In]
- L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho="none")
- @test norm(L * R - A) <= 1e-14
- end
- end
- end
-
- @testset "Fermion Contraction with Combined Indices" begin
- N = 10
- s = siteinds("Fermion", N; conserve_qns=true)
+ #
+ # Commuting B tensors
+ #
+ # These commute by carrying additional
+ # g-indices (Grassman indices)
+ #
- begin
- A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
- B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+ g = Index(QN("Nf", 1, -1) => 1; tags = "g")
+
+ Bdag3 = Cdag3 * setelt(dag(g) => 1)
+ B1 = setelt(g => 1) * C1
+
+ # Commutator
+ @test norm(Bdag3 * B1 - B1 * Bdag3) ≈ 0.0
- CC = combiner(s[1], s[3])
+ # Anti-commutator
+ @test norm(Bdag3 * B1 + B1 * Bdag3) ≈ 2.0
- cA = CC * A
- cB = CC * B
+ let # <011|Cdag3*C1|110> = <011|Bdag3*B1|110> = -1
+ t1 = noprime(B1 * p110)
+ t2 = noprime(Bdag3 * t1)
+ @test scalar(dag(p011) * t2) ≈ -1.0
+ end
+
+ let # <011|(Cdag3*C1)|110> = <011|(Bdag3*B1)|110> = -1
+ t = noprime((Bdag3 * B1) * p110)
+ @test scalar(dag(p011) * t) ≈ -1.0
+ end
+
+ let # <011|Cdag3*C1|110> = <011|B1*Bdag3|110> = -1
+ t1 = noprime(Bdag3 * p110)
+ t2 = noprime(B1 * t1)
+ @test scalar(dag(p011) * t2) ≈ -1.0
+ end
+
+ let # <011|(Cdag3*C1)|110> = <011|(B1*Bdag3)|110> = -1
+ t = noprime((B1 * Bdag3) * p110)
+ @test scalar(dag(p011) * t) ≈ -1.0
+ end
+
+ #
+ # Leave out middle fermion, test for cases <001|...|100>
+ #
+ p100 = ITensor(s[1], s[2], s[3])
+ p100[s[1] => 2, s[2] => 1, s[3] => 1] = 1.0
+
+ p001 = ITensor(s[1], s[2], s[3])
+ p001[s[1] => 1, s[2] => 1, s[3] => 2] = 1.0
+
+ let # <001|Cdag3*C1|100> = <001|Bdag3*B1|100> = +1
+ t1 = noprime(B1 * p100)
+ t2 = noprime(Bdag3 * t1)
+ @test scalar(dag(p001) * t2) ≈ +1.0
+ end
+
+ let # <001|Cdag3*C1|100> = <001|(Bdag3*B1)|100> = +1
+ t = noprime((Bdag3 * B1) * p100)
+ @test scalar(dag(p001) * t) ≈ +1.0
+ end
+
+ let # <001|Cdag3*C1|100> = <001|B1*Bdag3|100> = +1
+ t1 = noprime(Bdag3 * p100)
+ t2 = noprime(B1 * t1)
+ @test scalar(dag(p001) * t2) ≈ +1.0
+ end
+
+ let # <001|Cdag3*C1|100> = <001|(B1*Bdag3)|100> = +1
+ t = noprime((B1 * Bdag3) * p100)
+ @test scalar(dag(p001) * t) ≈ +1.0
+ end
+ end
+
+ @testset "Combiner conjugation" begin
+ s = siteinds("Fermion", 4; conserve_qns = true)
+ C = combiner(s[1], s[2])
+ @test NDTensors.isconj(storage(C)) == false
+
+ dC = dag(C)
+ @test NDTensors.isconj(storage(dC)) == true
+ end
+
+ @testset "Combine Uncombine Permute Test" begin
+ s = siteinds("Fermion", 4; conserve_qns = true)
+
+ @testset "Two Site Test" begin
+ p11 = ITensor(s[1], s[2])
+ p11[s[1] => 2, s[2] => 2] = 1.0
+
+ C = combiner(s[1], s[2])
+
+ dp11 = dag(p11)
+
+ Cp11_A = C * p11
+ dCp11_A = dag(Cp11_A)
+ dp11_A = C * dCp11_A
+ @test dp11_A ≈ dp11
+
+ Cp11_B = p11 * C
+ dCp11_B = dag(Cp11_B)
+ dp11_B = C * dCp11_B
+ @test dp11_B ≈ dp11
+ end
+
+ @testset "Longer two-site tests" begin
+ s1, s2, s3, s4 = s
+ C12 = combiner(s1, s2)
+ C21 = combiner(s2, s1)
+ C13 = combiner(s1, s3)
+ C31 = combiner(s3, s1)
+
+ T = random_itensor(QN("Nf", 3, -1), s1, s2, s3, s4)
+ T .= abs.(T)
+
+ #
+ # 1a, 2a tests
+ #
+
+ c12 = combinedind(C12)
+ c12T = C12 * T
+ u12T = dag(C12) * c12T
+ @test norm(u12T - T) < 1.0e-10
+
+ c21 = combinedind(C21)
+ c21T = C21 * T
+ u21T = dag(C21) * c21T
+ @test norm(u21T - T) < 1.0e-10
+
+ c13 = combinedind(C13)
+ c13T = C13 * T
+ u13T = dag(C13) * c13T
+ @test norm(u13T - T) < 1.0e-10
+
+ c31 = combinedind(C31)
+ c31T = C31 * T
+ u31T = dag(C31) * c31T
+ @test norm(u31T - T) < 1.0e-10
+
+ #
+ # 1b, 2b tests
+ #
+
+ dc12T = dag(C12) * dag(T)
+ @test norm(dc12T - dag(c12T)) < 1.0e-10
+ du12T = C12 * dc12T
+ @test norm(du12T - dag(T)) < 1.0e-10
+
+ dc21T = dag(C21) * dag(T)
+ @test norm(dc21T - dag(c21T)) < 1.0e-10
+ du21T = C21 * dc21T
+ @test norm(du21T - dag(T)) < 1.0e-10
+
+ dc13T = dag(C13) * dag(T)
+ @test norm(dc13T - dag(c13T)) < 1.0e-10
+ du13T = C13 * dc13T
+ @test norm(du13T - dag(T)) < 1.0e-10
+
+ dc31T = dag(C31) * dag(T)
+ @test norm(dc31T - dag(c31T)) < 1.0e-10
+ du31T = C31 * dc31T
+ @test norm(du31T - dag(T)) < 1.0e-10
+ end
+
+ @testset "Three Site Test" begin
+ p111 = ITensor(s[1], s[2], s[3])
+ p111[s[1] => 2, s[2] => 2, s[3] => 2] = 1.0
+
+ dp111 = dag(p111)
+
+ C = combiner(s[1], s[3])
+ Cp111 = C * p111
+ dCp111 = dag(Cp111)
+ dp111_U = C * dCp111
+ @test dp111_U ≈ dp111
+ end
+ end
+
+ @testset "Mixed Arrow Combiner Tests" begin
+ @testset "One wrong-way arrow" begin
+ q1 = QN("Nf", 1, -1)
+
+ s0 = Index([q1 => 1]; tags = "s0")
+ s1 = Index([q1 => 1]; tags = "s1")
+ s2 = Index([q1 => 1]; tags = "s2")
+ s3 = Index([q1 => 1]; tags = "s3")
+ s4 = Index([q1 => 1]; tags = "s4")
+
+ A = random_itensor(QN("Nf", 0, -1), s0, s1, dag(s2), dag(s3))
+ B = random_itensor(QN("Nf", 0, -1), s3, s2, dag(s1), dag(s4))
+ A .= one.(A)
+ B .= one.(B)
+ @test norm(A) ≈ 1.0
+ @test norm(B) ≈ 1.0
+
+ Ru = A * B
+
+ C = combiner(s3, s2, dag(s1))
+ Bc = C * B
+ Ac = A * dag(C)
+ Rc = Ac * Bc
+
+ @test norm(Ru - Rc) < 1.0e-8
+ end
+
+ @testset "Two wrong-way arrows" begin
+ q1 = QN("Nf", 1, -1)
+
+ s0 = Index([q1 => 1]; tags = "s0")
+ s1 = Index([q1 => 1]; tags = "s1")
+ s2 = Index([q1 => 1]; tags = "s2")
+ s3 = Index([q1 => 1]; tags = "s3")
+ s4 = Index([q1 => 1]; tags = "s4")
+
+ A = random_itensor(QN("Nf", 2, -1), s0, s1, s2, dag(s3))
+ B = random_itensor(QN("Nf", -2, -1), s3, dag(s2), dag(s1), dag(s4))
+ A .= one.(A)
+ B .= one.(B)
+ @test norm(A) ≈ 1.0
+ @test norm(B) ≈ 1.0
+
+ Ru = A * B
+
+ C = combiner(s3, dag(s2), dag(s1))
+ Bc = C * B
+ Ac = A * dag(C)
+ Rc = Ac * Bc
+
+ @test norm(Ru - Rc) < 1.0e-8
+ end
+ end
+
+ @testset "Permutedims Regression Test" begin
+ s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
+ s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
+ i = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1, QN("N", 2, -1) => 1], "i")
+
+ A = ITensor(QN("N", 4, -1), s1, s2, i)
+ A[s1 => 2, s2 => 2, i => 3] = 223
+
+ B = ITensor(QN("N", 4, -1), s1, i, s2)
+ B[s1 => 2, i => 3, s2 => 2] = 223
+ @test A ≈ B
+
+ C = ITensor(QN("N", 4, -1), s1, i, s2)
+ C[s2 => 2, i => 3, s1 => 2] = -223
+ @test A ≈ C
+ end
+
+ @testset "Fermionic SVD" begin
+ N = 4
+ s = siteinds("Fermion", N; conserve_qns = true)
+
+ A = random_itensor(QN("Nf", 2, -1), s[1], s[2], s[3], s[4])
+ for n1 in 1:4, n2 in 1:4
+ (n1 == n2) && continue
+ U, S, V = svd(A, (s[n1], s[n2]))
+ @test norm(U * S * V - A) < 1.0e-10
+ end
+ for n1 in 1:4, n2 in 1:4, n3 in 1:4
+ (n1 == n2) && continue
+ (n1 == n3) && continue
+ (n2 == n3) && continue
+ U, S, V = svd(A, (s[n1], s[n2], s[n3]))
+ @test norm(U * S * V - A) < 1.0e-10
+ end
+
+ B = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
+ for n1 in 1:4, n2 in 1:4
+ (n1 == n2) && continue
+ U, S, V = svd(B, (s[n1], s[n2]))
+ @test norm(U * S * V - B) < 1.0e-10
+ end
+ for n1 in 1:4, n2 in 1:4, n3 in 1:4
+ (n1 == n2) && continue
+ (n1 == n3) && continue
+ (n2 == n3) && continue
+ U, S, V = svd(B, (s[n1], s[n2], s[n3]))
+ @test norm(U * S * V - B) < 1.0e-10
+ end
+ end # Fermionic SVD tests
+
+ @testset "Fermionic SVD Arrow Cases" begin
+ s = siteinds("Fermion", 3; conserve_qns = true)
+
+ function id(i)
+ if dir(i) == Out
+ I = ITensor(i, dag(i)')
+ else
+ I = ITensor(dag(i)', i)
+ end
+ for n in 1:dim(i)
+ I[n, n] = 1.0
+ end
+ return I
+ end
+
+ # Arrows: Out, Out
+ let
+ T = ITensor(s[1], s[2])
+ T[1, 2] = 1.0
+ T[2, 1] = 1.0
+ U, S, V, spec, u, v = svd(T, s[1])
+ @test norm(T - U * S * V) ≈ 0
+ UU = dag(U) * prime(U, u)
+ @test norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ # Arrows: In, Out
+ let
+ T = ITensor(dag(s[1]), s[2])
+ T[2, 2] = 1.0
+ U, S, V, spec, u, v = svd(T, s[1])
+ @test norm(T - U * S * V) ≈ 0
+ UU = dag(U) * prime(U, u)
+ @test norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ # Arrows: Out, In
+ let
+ T = ITensor(s[1], dag(s[2]))
+ T[2, 2] = 1.0
+ U, S, V, spec, u, v = svd(T, s[1])
+ @test norm(T - U * S * V) ≈ 0
+ UU = dag(U) * prime(U, u)
+ @test norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ # Arrows: In, In
+ let
+ T = ITensor(dag(s[1]), dag(s[2]))
+ T[1, 2] = 1.0
+ U, S, V, spec, u, v = svd(T, s[1])
+ @test norm(T - U * S * V) ≈ 0
+ UU = dag(U) * prime(U, u)
+ @test norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ # Arrows: Mixed, In
+ let
+ T = ITensor(dag(s[1]), s[2], dag(s[3]))
+ T[1, 1, 1] = 1.0
+ T[2, 2, 1] = 1.0
+ U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]])
+ @test norm(T - U * S * V) < 1.0e-14
+ UU = dag(U) * prime(U, u)
+ @test_broken norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ # Arrows: Mixed, In
+ # Try to fix
+ let
+ T = ITensor(dag(s[1]), s[2], dag(s[3]))
+ T[1, 1, 1] = 1.0
+ T[2, 2, 1] = 1.0
+ U, S, V, spec, u, v = svd(T, [dag(s[1]), s[2]])
+ @test norm(T - U * S * V) < 1.0e-14
+ UU = dag(U) * prime(U, u)
+ @test_broken norm(UU - id(u)) ≈ 0
+ VV = dag(V) * prime(V, v)
+ @test norm(VV - id(v)) ≈ 0
+ end
+
+ #Factorize SVD Test. Specifying arrows on S.
+ let
+ l1, l2 = Index(QN("Nf", -1) => 1, QN("Nf", 1) => 1; tags = "l1", dir = ITensors.In),
+ Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags = "l2", dir = ITensors.Out)
+ r1, r2, r3 = Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags = "r1", dir = ITensors.Out),
+ Index(QN("Nf", 2) => 1, QN("Nf", 1) => 1; tags = "r2", dir = ITensors.In),
+ Index(QN("Nf", -2) => 1, QN("Nf", 1) => 1; tags = "r3", dir = ITensors.In)
+ A = random_itensor(l1, l2, r1, r2, r3)
+
+ for dir in [ITensors.Out, ITensors.In]
+ L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho = "none")
+ @test norm(L * R - A) <= 1.0e-14
+ end
+ end
+ end
+
+ @testset "Fermion Contraction with Combined Indices" begin
+ N = 10
+ s = siteinds("Fermion", N; conserve_qns = true)
+
+ begin
+ A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
+ B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+
+ CC = combiner(s[1], s[3])
+
+ cA = CC * A
+ cB = CC * B
+
+ R1 = dag(cA) * cB
+ R2 = dag(A) * B
+
+ @test norm(R1 - R2) < 1.0e-10
+ end
+
+ begin
+ A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
+ B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+
+ CC = combiner(s[1], s[3])
+
+ cA = CC * A
+ cdB = dag(CC) * dag(B)
+
+ R1 = cA * cdB
+ R2 = A * dag(B)
+
+ @test norm(R1 - R2) < 1.0e-10
+ end
+
+ begin
+ A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
+ B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+
+ CC = combiner(s[1], s[4], s[3])
+
+ cA = CC * A
+ cdB = dag(CC) * dag(B)
+
+ R1 = cA * cdB
+ R2 = A * dag(B)
+
+ @test norm(R1 - R2) < 1.0e-10
+ end
+
+ begin
+ CC = combiner(s[3], s[4])
+ c = combinedind(CC)
- R1 = dag(cA) * cB
- R2 = dag(A) * B
+ A = random_itensor(QN("Nf", 3, -1), c, s[1], s[2])
+ B = random_itensor(QN("Nf", 2, -1), s[1], c, s[5])
- @test norm(R1 - R2) < 1E-10
- end
+ uA = dag(CC) * A
+ uB = dag(CC) * B
- begin
- A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
- B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+ R1 = dag(uA) * uB
+ R2 = dag(A) * B
- CC = combiner(s[1], s[3])
+ @test norm(R1 - R2) < 1.0e-10
+ end
- cA = CC * A
- cdB = dag(CC) * dag(B)
+ @testset "Combiner Regression Test" begin
+ T = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
- R1 = cA * cdB
- R2 = A * dag(B)
+ C12 = combiner(s[1], s[2])
+ c12 = combinedind(C12)
+ c12T = C12 * T
- @test norm(R1 - R2) < 1E-10
- end
+ u12T = dag(C12) * c12T
- begin
- A = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
- B = random_itensor(QN("Nf", 2, -1), s[1], s[3], s[4])
+ @test norm(u12T - T) < 1.0e-10
+ end
+ end # Fermion Contraction with Combined Indices
- CC = combiner(s[1], s[4], s[3])
+ @testset "Regression Tests" begin
+ @testset "SVD DiagBlockSparse Regression Test" begin
+ l1 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags = "Link,l=1")
+ s2 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags = "Site,n=2")
+ s3 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags = "Site,n=3")
+ l3 = Index(QN("Nf", 2, -1) => 1; tags = "Link,l=3")
- cA = CC * A
- cdB = dag(CC) * dag(B)
-
- R1 = cA * cdB
- R2 = A * dag(B)
-
- @test norm(R1 - R2) < 1E-10
- end
+ phi = random_itensor(QN("Nf", 4, -1), l1, s2, s3, l3)
- begin
- CC = combiner(s[3], s[4])
- c = combinedind(CC)
+ U, S, V = svd(phi, (l1, s2))
- A = random_itensor(QN("Nf", 3, -1), c, s[1], s[2])
- B = random_itensor(QN("Nf", 2, -1), s[1], c, s[5])
+ @test norm((U * S) * V - phi) < 1.0e-10
+ @test norm(U * (S * V) - phi) < 1.0e-10
+ end
- uA = dag(CC) * A
- uB = dag(CC) * B
+ @testset "Eigen Positive Semi Def Regression Test" begin
+ #
+ # Test was failing without using combiners in
+ # eigen which were conjugates of each other
+ #
+ cutoff = 1.0e-12
+ N = 2
+ s = siteinds("Fermion", N; conserve_qns = true)
- R1 = dag(uA) * uB
- R2 = dag(A) * B
+ T = ITensor(QN("Nf", 0, -1), dag(s[1]), s[1]')
+ T[2, 2] = 1
- @test norm(R1 - R2) < 1E-10
- end
+ F = eigen(T; ishermitian = true, cutoff = cutoff)
+ D, U, spec = F
+ Ut = F.Vt
- @testset "Combiner Regression Test" begin
- T = random_itensor(QN("Nf", 3, -1), s[1], s[2], s[3], s[4])
+ @test norm(dag(U) * D * Ut - T) < 1.0e-10
+ end
- C12 = combiner(s[1], s[2])
- c12 = combinedind(C12)
- c12T = C12 * T
+ @testset "Factorize Eigen Regression Test" begin
+ N = 3
+ s = siteinds("Fermion", N; conserve_qns = true)
+ A = ITensor(QN("Nf", 2, -1), s[1], s[2], s[3])
+ A[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0
- u12T = dag(C12) * c12T
+ U, R = factorize(A, (s[1], s[2]); which_decomp = "eigen", cutoff = 1.0e-18, ortho = "left")
- @test norm(u12T - T) < 1E-10
- end
- end # Fermion Contraction with Combined Indices
+ @test norm(U * R - A) < 1.0e-12
+ end
- @testset "Regression Tests" begin
- @testset "SVD DiagBlockSparse Regression Test" begin
- l1 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Link,l=1")
- s2 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Site,n=2")
- s3 = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Site,n=3")
- l3 = Index(QN("Nf", 2, -1) => 1; tags="Link,l=3")
+ @testset "Contraction Regression Test" begin
+ s = siteinds("Fermion", 3; conserve_qns = true)
+ l = Index(QN("Nf", 1, -1) => 1; tags = "l")
- phi = random_itensor(QN("Nf", 4, -1), l1, s2, s3, l3)
+ q2 = QN("Nf", 2, -1)
+ q0 = QN("Nf", 0, -1)
- U, S, V = svd(phi, (l1, s2))
+ T1 = ITensor(q2, s[1], s[2], l)
+ T1[s[1] => 1, s[2] => 2, l => 1] = 1.0
- @test norm((U * S) * V - phi) < 1E-10
- @test norm(U * (S * V) - phi) < 1E-10
- end
+ T2 = ITensor(q0, dag(l), s[3])
+ T2[dag(l) => 1, s[3] => 2] = 1.0
- @testset "Eigen Positive Semi Def Regression Test" begin
- #
- # Test was failing without using combiners in
- # eigen which were conjugates of each other
- #
- cutoff = 1E-12
- N = 2
- s = siteinds("Fermion", N; conserve_qns=true)
+ @test norm(T1 * T2 - T2 * T1) < 1.0e-10
+ end
- T = ITensor(QN("Nf", 0, -1), dag(s[1]), s[1]')
- T[2, 2] = 1
+ @testset "SVD Regression Test" begin
+ Pf0 = QN("Pf", 0, -2)
+ Pf1 = QN("Pf", 1, -2)
- F = eigen(T; ishermitian=true, cutoff=cutoff)
- D, U, spec = F
- Ut = F.Vt
+ l22 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=2,n=2")
+ l23 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=3,n=2")
+ s1 = Index([Pf0 => 1, Pf1 => 1, Pf1 => 1, Pf0 => 1], "Site,n=1")
+ l11 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=1,n=1")
- @test norm(dag(U) * D * Ut - T) < 1E-10
- end
+ T = random_itensor(dag(l22), dag(l23), s1, l11)
- @testset "Factorize Eigen Regression Test" begin
- N = 3
- s = siteinds("Fermion", N; conserve_qns=true)
- A = ITensor(QN("Nf", 2, -1), s[1], s[2], s[3])
- A[s[1] => 1, s[2] => 2, s[3] => 2] = 1.0
+ U, S, V = svd(T, dag(l22), dag(l23), s1)
- U, R = factorize(A, (s[1], s[2]); which_decomp="eigen", cutoff=1E-18, ortho="left")
+ @test norm(T - U * S * V) < 1.0e-10
+ end
+ end # Regression Tests
- @test norm(U * R - A) < 1E-12
+ @testset "Non-QN eigen Regression Test" begin
+ # Test that non-QN eigen runs properly
+ # with auto-fermion enabled.
+ i = Index(2)
+ a = random_itensor(i', i)
+ d, u = eigen(a)
+ @test norm(a * u - u' * d) ≈ 0 atol = √(eps(real(eltype(a))))
end
- @testset "Contraction Regression Test" begin
- s = siteinds("Fermion", 3; conserve_qns=true)
- l = Index(QN("Nf", 1, -1) => 1; tags="l")
+ @testset "Fermion exp Tests" begin
+ s = siteinds("Fermion", 2; conserve_qns = true)
- q2 = QN("Nf", 2, -1)
- q0 = QN("Nf", 0, -1)
+ # Matrix test
+ id_tensor = op("I", s[1])
+ @test id_tensor ≈ exp(0.0 * id_tensor)
- T1 = ITensor(q2, s[1], s[2], l)
- T1[s[1] => 1, s[2] => 2, l => 1] = 1.0
+ # Tensor test
+ id_tensor = op("I", s[1]) * op("I", s[2])
+ @test id_tensor ≈ exp(0.0 * id_tensor)
- T2 = ITensor(q0, dag(l), s[3])
- T2[dag(l) => 1, s[3] => 2] = 1.0
+ # Permute and test again
+ id_tensor = permute(id_tensor, s[2], s[1], s[2]', s[1]')
+ @test id_tensor ≈ exp(0.0 * id_tensor)
- @test norm(T1 * T2 - T2 * T1) < 1E-10
- end
-
- @testset "SVD Regression Test" begin
- Pf0 = QN("Pf", 0, -2)
- Pf1 = QN("Pf", 1, -2)
+ # Explicitly passing indices in different, valid orders
+ @test id_tensor ≈ exp(0.0 * id_tensor, (s[1]', s[2]'), (dag(s[1]), dag(s[2])))
+ @test id_tensor ≈ exp(0.0 * id_tensor, (s[2]', s[1]'), (dag(s[2]), dag(s[1])))
+ @test id_tensor ≈ exp(0.0 * id_tensor, (dag(s[1]), dag(s[2])), (s[1]', s[2]'))
+ @test id_tensor ≈ exp(0.0 * id_tensor, (dag(s[2]), dag(s[1])), (s[2]', s[1]'))
- l22 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=2,n=2")
- l23 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=3,n=2")
- s1 = Index([Pf0 => 1, Pf1 => 1, Pf1 => 1, Pf0 => 1], "Site,n=1")
- l11 = Index([Pf0 => 1, Pf1 => 1], "Link,dir=1,n=1")
+ # Check wrong index ordering fails (i.e. we are actually paying attention to it)
+ @test norm(id_tensor - exp(0.0 * id_tensor, (dag(s[1]), dag(s[2])), (s[2]', s[1]'))) > 1
- T = random_itensor(dag(l22), dag(l23), s1, l11)
+ # Test a different, random tensor
+ T = random_itensor(s[1]', dag(s[1]))
+ T = 1 / 2 * (T + swapprime(dag(T), 0 => 1))
+ t = 0.01
+ eT = exp(t * T)
+ eT_taylor = (op("I", s[1]) + t * T + t^2 * apply(T, T) / 2)
+ @test norm(eT - eT_taylor) < 1.0e-5
- U, S, V = svd(T, dag(l22), dag(l23), s1)
+ #
+ # Test that bosonic tensor exp works with auto fermion enabled
+ #
+ j1 = Index([QN("Nb", 0) => 2, QN("Nb", 1) => 2])
+ j2 = Index([QN("Nb", 0) => 2, QN("Nb", 1) => 2])
+ id_tensor = op("I", j1) * op("I", j2)
+ @test id_tensor ≈ exp(0.0 * id_tensor)
- @test norm(T - U * S * V) < 1E-10
+ T = random_itensor(j1', dag(j1))
+ T = 1 / 2 * (T + swapprime(dag(T), 0 => 1))
+ t = 0.01
+ eT = exp(t * T)
+ eT_taylor = (op("I", j1) + t * T + t^2 * apply(T, T) / 2)
+ @test norm(eT - eT_taylor) < 1.0e-5
end
- end # Regression Tests
-
- @testset "Non-QN eigen Regression Test" begin
- # Test that non-QN eigen runs properly
- # with auto-fermion enabled.
- i = Index(2)
- a = random_itensor(i', i)
- d, u = eigen(a)
- @test norm(a * u - u' * d) ≈ 0 atol = √(eps(real(eltype(a))))
- end
-
- @testset "Fermion exp Tests" begin
- s = siteinds("Fermion", 2; conserve_qns=true)
-
- # Matrix test
- id_tensor = op("I", s[1])
- @test id_tensor ≈ exp(0.0 * id_tensor)
-
- # Tensor test
- id_tensor = op("I", s[1]) * op("I", s[2])
- @test id_tensor ≈ exp(0.0 * id_tensor)
-
- # Permute and test again
- id_tensor = permute(id_tensor, s[2], s[1], s[2]', s[1]')
- @test id_tensor ≈ exp(0.0 * id_tensor)
-
- # Explicitly passing indices in different, valid orders
- @test id_tensor ≈ exp(0.0 * id_tensor, (s[1]', s[2]'), (dag(s[1]), dag(s[2])))
- @test id_tensor ≈ exp(0.0 * id_tensor, (s[2]', s[1]'), (dag(s[2]), dag(s[1])))
- @test id_tensor ≈ exp(0.0 * id_tensor, (dag(s[1]), dag(s[2])), (s[1]', s[2]'))
- @test id_tensor ≈ exp(0.0 * id_tensor, (dag(s[2]), dag(s[1])), (s[2]', s[1]'))
-
- # Check wrong index ordering fails (i.e. we are actually paying attention to it)
- @test norm(id_tensor - exp(0.0 * id_tensor, (dag(s[1]), dag(s[2])), (s[2]', s[1]'))) > 1
-
- # Test a different, random tensor
- T = random_itensor(s[1]', dag(s[1]))
- T = 1/2*(T+swapprime(dag(T), 0=>1))
- t = 0.01
- eT = exp(t*T)
- eT_taylor = (op("I", s[1])+t*T+t^2*apply(T, T)/2)
- @test norm(eT - eT_taylor) < 1E-5
-
- #
- # Test that bosonic tensor exp works with auto fermion enabled
- #
- j1 = Index([QN("Nb", 0)=>2, QN("Nb", 1)=>2])
- j2 = Index([QN("Nb", 0)=>2, QN("Nb", 1)=>2])
- id_tensor = op("I", j1) * op("I", j2)
- @test id_tensor ≈ exp(0.0 * id_tensor)
-
- T = random_itensor(j1', dag(j1))
- T = 1/2*(T+swapprime(dag(T), 0=>1))
- t = 0.01
- eT = exp(t*T)
- eT_taylor = (op("I", j1)+t*T+t^2*apply(T, T)/2)
- @test norm(eT - eT_taylor) < 1E-5
- end
-
- ITensors.disable_auto_fermion()
+
+ ITensors.disable_auto_fermion()
end
end # module
diff --git a/test/base/test_itensor.jl b/test/base/test_itensor.jl
index 52efac9fbf..648cfc2641 100644
--- a/test/base/test_itensor.jl
+++ b/test/base/test_itensor.jl
@@ -1,76 +1,76 @@
@eval module $(gensym())
using Combinatorics: permutations
using ITensors:
- ITensors,
- Index,
- IndexSet,
- ITensor,
- Order,
- QN,
- ⊕,
- δ,
- addtags,
- allhastags,
- anyhastags,
- commonind,
- convert_eltype,
- convert_leaf_eltype,
- dag,
- directsum,
- eachindval,
- eachval,
- filterinds,
- firstind,
- hascommoninds,
- hasind,
- hasinds,
- hassameinds,
- hastags,
- inner,
- itensor,
- mapprime,
- noprime,
- onehot,
- order,
- permute,
- prime,
- product,
- random_itensor,
- removetags,
- replaceind,
- replaceind!,
- replaceinds,
- replaceinds!,
- replacetags,
- scalar,
- setelt,
- setprime,
- settags,
- sim,
- swapinds,
- swapinds!,
- swapprime,
- uniqueind,
- uniqueindex,
- val
+ ITensors,
+ Index,
+ IndexSet,
+ ITensor,
+ Order,
+ QN,
+ ⊕,
+ δ,
+ addtags,
+ allhastags,
+ anyhastags,
+ commonind,
+ convert_eltype,
+ convert_leaf_eltype,
+ dag,
+ directsum,
+ eachindval,
+ eachval,
+ filterinds,
+ firstind,
+ hascommoninds,
+ hasind,
+ hasinds,
+ hassameinds,
+ hastags,
+ inner,
+ itensor,
+ mapprime,
+ noprime,
+ onehot,
+ order,
+ permute,
+ prime,
+ product,
+ random_itensor,
+ removetags,
+ replaceind,
+ replaceind!,
+ replaceinds,
+ replaceinds!,
+ replacetags,
+ scalar,
+ setelt,
+ setprime,
+ settags,
+ sim,
+ swapinds,
+ swapinds!,
+ swapprime,
+ uniqueind,
+ uniqueindex,
+ val
using ITensors.NDTensors:
- NDTensors,
- DenseTensor,
- array,
- dim,
- dims,
- eigen,
- factorize,
- ind,
- inds,
- matrix,
- maxdim,
- mindim,
- polar,
- storage,
- vector
+ NDTensors,
+ DenseTensor,
+ array,
+ dim,
+ dims,
+ eigen,
+ factorize,
+ ind,
+ inds,
+ matrix,
+ maxdim,
+ mindim,
+ polar,
+ storage,
+ vector
using LinearAlgebra:
- LinearAlgebra, axpy!, diag, dot, ishermitian, mul!, norm, nullspace, qr, rmul!, svd, tr
+ LinearAlgebra, axpy!, diag, dot, ishermitian, mul!, norm, nullspace, qr, rmul!, svd, tr
using Random: Random
using Test: @test, @test_throws, @testset
@@ -80,1882 +80,1882 @@ ITensors.enable_debug_checks()
Random.seed!(12345)
function invdigits(::Type{T}, x...) where {T}
- return T(sum([x[length(x) - k + 1] * 10^(k - 1) for k in 1:length(x)]))
+ return T(sum([x[length(x) - k + 1] * 10^(k - 1) for k in 1:length(x)]))
end
@testset "Dense ITensor basic functionality" begin
- @testset "ITensor constructors" begin
- @testset "Default" begin
- A = ITensor()
- @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber}
- end
+ @testset "ITensor constructors" begin
+ @testset "Default" begin
+ A = ITensor()
+ @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber}
+ end
+
+ @testset "Undef with index" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = ITensor(undef, i)
+ @test storage(A) isa NDTensors.Dense{Float64}
+ end
+
+ @testset "Default with indices" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = ITensor(i, j)
+ @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber}
+ end
+
+ @testset "diag" for ElType in (Float32, Float64, ComplexF32, ComplexF64)
+ i, j = Index.(2, ("i", "j"))
+ A = random_itensor(ElType, i, j)
+ d = diag(A)
+ @test d isa DenseTensor{ElType, 1}
+ @test d[1] == A[1, 1]
+ @test d[2] == A[2, 2]
+ end
+
+ @testset "Index set operations" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = random_itensor(i, j)
+ B = random_itensor(j, k)
+ C = random_itensor(k, l)
+ @test hascommoninds(A, B)
+ @test hascommoninds(B, C)
+ @test !hascommoninds(A, C)
+ end
+
+ @testset "isreal, iszero, real, imag" begin
+ i, j = Index.(2, ("i", "j"))
+ A = random_itensor(i, j)
+ Ac = random_itensor(ComplexF64, i, j)
+ Ar = real(Ac)
+ Ai = imag(Ac)
+ @test Ac ≈ Ar + im * Ai
+ @test isreal(A)
+ @test !isreal(Ac)
+ @test isreal(Ar)
+ @test isreal(Ai)
+ @test !iszero(A)
+ @test !iszero(real(A))
+ @test iszero(imag(A))
+ @test iszero(ITensor(0.0, i, j))
+ @test iszero(ITensor(i, j))
+ end
+
+ elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
+ @testset "ITensors.scalartype (eltype=$elt)" for elt in elts
+ i, j = Index.((2, 2))
+ a = ITensor(elt, i, j)
+ @test ITensors.scalartype(a) === elt
+ a = random_itensor(elt, i, j)
+ @test ITensors.scalartype(a) === elt
+ end
+
+ @testset "map" begin
+ A = random_itensor(Index(2))
+ @test eltype(A) == Float64
+ B = map(ComplexF64, A)
+ @test B ≈ A
+ @test eltype(B) == ComplexF64
+ B = map(Float32, A)
+ @test B ≈ A
+ @test eltype(B) == Float32
+ B = map(x -> 2x, A)
+ @test B ≈ 2A
+ @test eltype(B) == Float64
+ @test array(map(x -> x + 1, A)) ≈ map(x -> x + 1, array(A))
+ end
+
+ @testset "reductions (sum, prod)" for elt in (
+ Float32, Float64, Complex{Float32}, Complex{Float64},
+ )
+ a = random_itensor(elt, Index(2), Index(2))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+
+ a = ITensor(elt(2))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+ end
+
+ @testset "getindex with state string" begin
+ i₁ = Index(2, "S=1/2")
+ i₂ = Index(2, "S=1/2")
+ v = ITensor(i₁, i₂)
+ v[i₂ => "↑", i₁ => "↓"] = 1.0
+ @test v[1, 1] == 0.0
+ @test v[1, 2] == 0.0
+ @test v[2, 1] == 1.0
+ @test v[2, 2] == 0.0
+ @test v[i₁ => "↑", i₂ => "↑"] == 0.0
+ @test v[i₁ => "↑", i₂ => "↓"] == 0.0
+ @test v[i₁ => "↓", i₂ => "↑"] == 1.0
+ @test v[i₁ => "↓", i₂ => "↓"] == 0.0
+ end
+
+ @testset "getindex with state string" begin
+ i₁ = Index(2, "S=1/2")
+ i₂ = Index(2, "S=1/2")
+ v = ITensor(i₁, i₂)
+ v["↓", "↑"] = 1.0
+ @test v[1, 1] == 0.0
+ @test v[1, 2] == 0.0
+ @test v[2, 1] == 1.0
+ @test v[2, 2] == 0.0
+ @test v["↑", "↑"] == 0.0
+ @test v["↑", "↓"] == 0.0
+ @test v["↓", "↑"] == 1.0
+ @test v["↓", "↓"] == 0.0
+ end
+
+ @testset "getindex with end (lastindex, LastIndex)" begin
+ a = Index(2)
+ b = Index(3)
+ A = random_itensor(a, b)
+ @test A[end, end] == A[a => 2, b => 3]
+ @test A[end - 1, end] == A[a => 1, b => 3]
+ @test A[end - 1, end - 1] == A[a => 1, b => 2]
+ @test A[end - 1, end - 2] == A[a => 1, b => 1]
+ @test A[end - 1, 2 * (end - 2)] == A[a => 1, b => 2]
+ @test A[2, end] == A[a => 2, b => 3]
+ @test A[2, end - 1] == A[a => 2, b => 2]
+ @test A[1, end] == A[a => 1, b => 3]
+ @test A[1, end - 2] == A[a => 1, b => 1]
+ @test A[end, 2] == A[a => 2, b => 2]
+ @test A[end - 1, 2] == A[a => 1, b => 2]
+ @test A[a => end, b => end] == A[a => 2, b => 3]
+ @test A[a => end - 1, b => end] == A[a => 1, b => 3]
+ @test A[a => end, b => end - 1] == A[a => 2, b => 2]
+ @test A[a => end - 1, b => 2 * (end - 2)] == A[a => 1, b => 2]
+ @test A[a => 2, b => end] == A[a => 2, b => 3]
+ @test A[a => 2, b => end] == A[a => 2, b => 3]
+ @test A[a => 1, b => end] == A[a => 1, b => 3]
+ @test A[a => end, b => 3] == A[a => 2, b => 3]
+ @test A[a => end, b => 2] == A[a => 2, b => 2]
+ @test A[b => end, a => end] == A[a => 2, b => 3]
+ @test A[b => end - 1, a => end] == A[a => 2, b => 2]
+ @test A[b => end - 1, a => end - 1] == A[a => 1, b => 2]
+ @test A[b => end - 2, a => end - 1] == A[a => 1, b => 1]
+ @test A[b => 2 * (end - 2), a => end - 1] == A[a => 1, b => 2]
+ @test A[b => 2, a => end] == A[a => 2, b => 2]
+ @test A[b => 2, a => end - 1] == A[a => 1, b => 2]
+ @test A[b => 1, a => end] == A[a => 2, b => 1]
+ @test A[b => 1, a => end - 1] == A[a => 1, b => 1]
+ @test A[b => end, a => 2] == A[a => 2, b => 3]
+ @test A[b => end - 1, a => 2] == A[a => 2, b => 2]
+ @test A[b => end, a => 1] == A[a => 1, b => 3]
+ @test A[b => end - 2, a => 1] == A[a => 1, b => 1]
+ @test A[b => end^2 - 7, a => 1] == A[a => 1, b => 2]
+
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ B = random_itensor(i)
+ @test B[i => end] == B[i => dim(i)]
+ @test B[i => end - 1] == B[i => dim(i) - 1]
+ @test B[end] == B[dim(i)]
+ @test B[end - 1] == B[dim(i) - 1]
+ end
+ @testset "ITensor equality" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ Aij = random_itensor(i, j)
+ Aji = permute(Aij, j, i)
+ Bij′ = random_itensor(i, j')
+ Cij′ = random_itensor(i, j')
+ @test Aij == Aij
+ @test Aij == Aji
+ @test Bij′ != Cij′
+ @test Bij′ != Aij
+ end
+ @testset "Set element with end (lastindex, LastIndex)" begin
+ _i = Index(2, "i")
+ _j = Index(3, "j")
+
+ A = ITensor(_i, _j)
+ A[_i => end, _j => end] = 2.5
+ @test A[_i => dim(_i), _j => dim(_j)] == 2.5
+
+ A = ITensor(_i, _j)
+ A[_j => end, _i => end] = 3.5
+ @test A[_i => dim(_i), _j => dim(_j)] == 3.5
+
+ A = ITensor(_i, _j)
+ A[_j => end, _i => 1] = 4.5
+ @test A[_i => 1, _j => dim(_j)] == 4.5
+
+ A = ITensor(_i, _j)
+ A[_j => end - 1, _i => 1] = 4.5
+ @test A[_i => 1, _j => dim(_j) - 1] == 4.5
+ end
+
+ @testset "Random" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = random_itensor(i, j)
+
+ # Test hasind, hasinds
+ @test hasind(A, i)
+ @test hasind(i)(A)
+
+ @test hasinds(A, i)
+ @test hasinds(A, j)
+ @test hasinds(A, [i, j])
+ @test hasinds([i, j])(A)
+ @test hasinds(A, IndexSet(j))
+ @test hasinds(A, j, i)
+ @test hasinds(A, (i, j))
+ @test hasinds(A, IndexSet(i, j))
+ @test hasinds(j, i)(A)
+ @test hasinds(i)(A)
+ @test hasinds(IndexSet(j))(A)
+ @test hasinds((i, j))(A)
+ @test hasinds(IndexSet(i, j))(A)
+
+ @test storage(A) isa NDTensors.Dense{Float64}
+
+ @test ndims(A) == order(A) == 2 == length(inds(A))
+ @test Order(A) == Order(2)
+ @test size(A) == dims(A) == (2, 2)
+ @test dim(A) == 4
+
+ At = random_itensor(Index(2), Index(3))
+ @test maxdim(At) == 3
+ @test mindim(At) == 2
+ @test dim(At, 1) == 2
+ @test dim(At, 2) == 3
+
+ B = random_itensor(IndexSet(i, j))
+ @test storage(B) isa NDTensors.Dense{Float64}
+ @test ndims(B) == order(B) == 2 == length(inds(B))
+ @test size(B) == dims(B) == (2, 2)
+
+ A = random_itensor()
+ @test eltype(A) == Float64
+ @test ndims(A) == 0
+ end
+
+ @testset "trace (tr) (eltype=$elt)" for elt in (
+ Float32, Float64, Complex{Float32}, Complex{Float64},
+ )
+ i, j, k, l = Index.((2, 3, 4, 5), ("i", "j", "k", "l"))
+ T = random_itensor(elt, j, k', i', k, j', i)
+ trT1 = tr(T)
+ @test eltype(trT1) === elt
+ trT2 = (T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k'))[]
+ @test trT1 ≈ trT2
+
+ T = random_itensor(elt, j, k', i', l, k, j', i)
+ trT1 = tr(T)
+ @test eltype(trT1) === elt
+ trT2 = T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k')
+ @test trT1 ≈ trT2
+ end
+
+ @testset "ITensor iteration" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = random_itensor(i, j)
+ Is = eachindex(A)
+ @test length(Is) == dim(A)
+ sumA = 0.0
+ for I in Is
+ sumA += A[I]
+ end
+ @test sumA ≈ sum(ITensors.data(A))
+ sumA = 0.0
+ for a in A
+ sumA += a
+ end
+ @test sumA ≈ sum(A)
+ @test sumA ≈ sum(A)
+ end
+
+ @testset "From matrix" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ M = [1 2; 3 4]
+ A = itensor(M, i, j)
+ @test storage(A) isa NDTensors.Dense{Float64}
+
+ @test M ≈ Matrix(A, i, j)
+ @test M' ≈ Matrix(A, j, i)
+ @test_throws DimensionMismatch vector(A)
+
+ @test size(A, 1) == size(M, 1) == 2
+ @test_throws BoundsError size(A, 3)
+ @test_throws BoundsError size(A, 0)
+ @test_throws ErrorException size(M, 0)
+ # setstorage changes the internal data but not indices
+ N = [5 6; 7 8]
+ A = itensor(M, i, j)
+ B = ITensors.setstorage(A, NDTensors.Dense(vec(N)))
+ @test N == Matrix(B, i, j)
+ @test storage(A) isa NDTensors.Dense{Float64}
+ @test storage(B) isa NDTensors.Dense{Int}
+
+ M = [1 2 3; 4 5 6]
+ @test_throws DimensionMismatch itensor(M, i, j)
+ end
+
+ @testset "To Matrix" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ TM = random_itensor(i, j)
+
+ M1 = matrix(TM)
+ for ni in eachval(i), nj in eachval(j)
+ @test M1[ni, nj] ≈ TM[i => ni, j => nj]
+ end
- @testset "Undef with index" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = ITensor(undef, i)
- @test storage(A) isa NDTensors.Dense{Float64}
+ M2 = Matrix(TM, j, i)
+ for ni in eachval(i), nj in eachval(j)
+ @test M2[nj, ni] ≈ TM[i => ni, j => nj]
+ end
+
+ T3 = random_itensor(i, j, k)
+ @test_throws DimensionMismatch Matrix(T3, i, j)
+ end
+
+ @testset "To Vector" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ TV = random_itensor(i)
+
+ V = vector(TV)
+ for ni in eachindval(i)
+ @test V[val(ni)] ≈ TV[ni]
+ end
+ V = Vector(TV)
+ for ni in eachindval(i)
+ @test V[val(ni)] ≈ TV[ni]
+ end
+ V = Vector(TV, i)
+ for ni in eachindval(i)
+ @test V[val(ni)] ≈ TV[ni]
+ end
+ V = Vector{ComplexF64}(TV)
+ for ni in eachindval(i)
+ @test V[val(ni)] ≈ complex(TV[ni])
+ end
+
+ T2 = random_itensor(i, j)
+ @test_throws DimensionMismatch vector(T2)
+ end
+
+ @testset "Complex" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = ITensor(Complex, i, j)
+ @test storage(A) isa NDTensors.EmptyStorage{Complex}
+ end
+
+ @testset "Random complex" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ A = random_itensor(ComplexF64, i, j)
+ @test storage(A) isa NDTensors.Dense{ComplexF64}
+ end
+
+ @testset "From complex matrix" begin
+ i, j, k, l = Index.(2, ("i", "j", "k", "l"))
+ M = [1 + 2im 2; 3 4]
+ A = itensor(M, i, j)
+ @test storage(A) isa NDTensors.Dense{ComplexF64}
+ end
end
- @testset "Default with indices" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = ITensor(i, j)
- @test storage(A) isa NDTensors.EmptyStorage{NDTensors.EmptyNumber}
+ @testset "eltype promotion with scalar * and /" begin
+ @test eltype(ITensor(1.0f0, Index(2)) * 2) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) .* 2) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) / 2) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) ./ 2) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) * 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) .* 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) / 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, Index(2)) * 2.0) === Float64
+ @test eltype(ITensor(1.0f0, Index(2)) .* 2.0) === Float64
+ @test eltype(ITensor(1.0f0, Index(2)) / 2.0) === Float64
+ @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0) === Float64
end
- @testset "diag" for ElType in (Float32, Float64, ComplexF32, ComplexF64)
- i, j = Index.(2, ("i", "j"))
- A = random_itensor(ElType, i, j)
- d = diag(A)
- @test d isa DenseTensor{ElType,1}
- @test d[1] == A[1, 1]
- @test d[2] == A[2, 2]
+ @testset "Division /" begin
+ i = Index(2)
+ A = random_itensor(i)
+ B = A / 2
+ C = A / ITensor(2)
+ @test B isa ITensor
+ @test C isa ITensor
+ @test B ≈ C
+ @test A[1] / 2 ≈ B[1]
+ @test A[2] / 2 ≈ B[2]
+ @test A[1] / 2 ≈ C[1]
+ @test A[2] / 2 ≈ C[2]
end
- @testset "Index set operations" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = random_itensor(i, j)
- B = random_itensor(j, k)
- C = random_itensor(k, l)
- @test hascommoninds(A, B)
- @test hascommoninds(B, C)
- @test !hascommoninds(A, C)
+ @testset "Convert to complex" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, j)
+ B = complex(A)
+ for ii in 1:dim(i), jj in 1:dim(j)
+ @test complex(A[i => ii, j => jj]) == B[i => ii, j => jj]
+ end
end
- @testset "isreal, iszero, real, imag" begin
- i, j = Index.(2, ("i", "j"))
- A = random_itensor(i, j)
- Ac = random_itensor(ComplexF64, i, j)
- Ar = real(Ac)
- Ai = imag(Ac)
- @test Ac ≈ Ar + im * Ai
- @test isreal(A)
- @test !isreal(Ac)
- @test isreal(Ar)
- @test isreal(Ai)
- @test !iszero(A)
- @test !iszero(real(A))
- @test iszero(imag(A))
- @test iszero(ITensor(0.0, i, j))
- @test iszero(ITensor(i, j))
+ @testset "Complex Number Operations" for _eltype in (Float32, Float64)
+ i = Index(3, "i")
+ j = Index(4, "j")
+
+ A = random_itensor(complex(_eltype), i, j)
+
+ rA = real(A)
+ iA = imag(A)
+ @test norm(rA + 1im * iA - A) < 1.0e-8
+ @test eltype(rA) <: _eltype
+ @test eltype(iA) <: _eltype
+
+ cA = conj(A)
+ @test eltype(cA) <: complex(_eltype)
+ @test norm(cA) ≈ norm(A)
+
+ B = random_itensor(_eltype, i, j)
+
+ cB = conj(B)
+ @test eltype(cB) <: _eltype
+ @test norm(cB) ≈ norm(B)
end
- elts = (Float32, Float64, Complex{Float32}, Complex{Float64})
- @testset "ITensors.scalartype (eltype=$elt)" for elt in elts
- i, j = Index.((2, 2))
- a = ITensor(elt, i, j)
- @test ITensors.scalartype(a) === elt
- a = random_itensor(elt, i, j)
- @test ITensors.scalartype(a) === elt
+ @testset "similar" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, j)
+ B = similar(A)
+ @test inds(B) == inds(A)
+ Ac = similar(A, ComplexF32)
+ @test storage(Ac) isa NDTensors.Dense{ComplexF32}
end
- @testset "map" begin
- A = random_itensor(Index(2))
- @test eltype(A) == Float64
- B = map(ComplexF64, A)
- @test B ≈ A
- @test eltype(B) == ComplexF64
- B = map(Float32, A)
- @test B ≈ A
- @test eltype(B) == Float32
- B = map(x -> 2x, A)
- @test B ≈ 2A
- @test eltype(B) == Float64
- @test array(map(x -> x + 1, A)) ≈ map(x -> x + 1, array(A))
+ @testset "fill!" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, j)
+ fill!(A, 1.0)
+ @test all(ITensors.data(A) .== 1.0)
end
- @testset "reductions (sum, prod)" for elt in (
- Float32, Float64, Complex{Float32}, Complex{Float64}
- )
- a = random_itensor(elt, Index(2), Index(2))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
-
- a = ITensor(elt(2))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
+ @testset "fill! using broadcast" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i, j)
+ A .= 1.0
+ @test all(ITensors.data(A) .== 1.0)
end
- @testset "getindex with state string" begin
- i₁ = Index(2, "S=1/2")
- i₂ = Index(2, "S=1/2")
- v = ITensor(i₁, i₂)
- v[i₂ => "↑", i₁ => "↓"] = 1.0
- @test v[1, 1] == 0.0
- @test v[1, 2] == 0.0
- @test v[2, 1] == 1.0
- @test v[2, 2] == 0.0
- @test v[i₁ => "↑", i₂ => "↑"] == 0.0
- @test v[i₁ => "↑", i₂ => "↓"] == 0.0
- @test v[i₁ => "↓", i₂ => "↑"] == 1.0
- @test v[i₁ => "↓", i₂ => "↓"] == 0.0
+ @testset "zero" begin
+ i = Index(2)
+ A = random_itensor(i)
+ B = zero(A)
+ @test false * A ≈ B
end
- @testset "getindex with state string" begin
- i₁ = Index(2, "S=1/2")
- i₂ = Index(2, "S=1/2")
- v = ITensor(i₁, i₂)
- v["↓", "↑"] = 1.0
- @test v[1, 1] == 0.0
- @test v[1, 2] == 0.0
- @test v[2, 1] == 1.0
- @test v[2, 2] == 0.0
- @test v["↑", "↑"] == 0.0
- @test v["↑", "↓"] == 0.0
- @test v["↓", "↑"] == 1.0
- @test v["↓", "↓"] == 0.0
+ @testset "copyto!" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ M = [1 2; 3 4]
+ A = itensor(M, i, j)
+ N = 2 * M
+ B = itensor(N, i, j)
+ copyto!(A, B)
+ @test A == B
+ @test ITensors.data(A) == vec(N)
+ A = itensor(M, i, j)
+ B = itensor(N, j, i)
+ copyto!(A, B)
+ @test A == B
+ @test ITensors.data(A) == vec(transpose(N))
end
- @testset "getindex with end (lastindex, LastIndex)" begin
- a = Index(2)
- b = Index(3)
- A = random_itensor(a, b)
- @test A[end, end] == A[a => 2, b => 3]
- @test A[end - 1, end] == A[a => 1, b => 3]
- @test A[end - 1, end - 1] == A[a => 1, b => 2]
- @test A[end - 1, end - 2] == A[a => 1, b => 1]
- @test A[end - 1, 2 * (end - 2)] == A[a => 1, b => 2]
- @test A[2, end] == A[a => 2, b => 3]
- @test A[2, end - 1] == A[a => 2, b => 2]
- @test A[1, end] == A[a => 1, b => 3]
- @test A[1, end - 2] == A[a => 1, b => 1]
- @test A[end, 2] == A[a => 2, b => 2]
- @test A[end - 1, 2] == A[a => 1, b => 2]
- @test A[a => end, b => end] == A[a => 2, b => 3]
- @test A[a => end - 1, b => end] == A[a => 1, b => 3]
- @test A[a => end, b => end - 1] == A[a => 2, b => 2]
- @test A[a => end - 1, b => 2 * (end - 2)] == A[a => 1, b => 2]
- @test A[a => 2, b => end] == A[a => 2, b => 3]
- @test A[a => 2, b => end] == A[a => 2, b => 3]
- @test A[a => 1, b => end] == A[a => 1, b => 3]
- @test A[a => end, b => 3] == A[a => 2, b => 3]
- @test A[a => end, b => 2] == A[a => 2, b => 2]
- @test A[b => end, a => end] == A[a => 2, b => 3]
- @test A[b => end - 1, a => end] == A[a => 2, b => 2]
- @test A[b => end - 1, a => end - 1] == A[a => 1, b => 2]
- @test A[b => end - 2, a => end - 1] == A[a => 1, b => 1]
- @test A[b => 2 * (end - 2), a => end - 1] == A[a => 1, b => 2]
- @test A[b => 2, a => end] == A[a => 2, b => 2]
- @test A[b => 2, a => end - 1] == A[a => 1, b => 2]
- @test A[b => 1, a => end] == A[a => 2, b => 1]
- @test A[b => 1, a => end - 1] == A[a => 1, b => 1]
- @test A[b => end, a => 2] == A[a => 2, b => 3]
- @test A[b => end - 1, a => 2] == A[a => 2, b => 2]
- @test A[b => end, a => 1] == A[a => 1, b => 3]
- @test A[b => end - 2, a => 1] == A[a => 1, b => 1]
- @test A[b => end ^ 2 - 7, a => 1] == A[a => 1, b => 2]
-
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- B = random_itensor(i)
- @test B[i => end] == B[i => dim(i)]
- @test B[i => end - 1] == B[i => dim(i) - 1]
- @test B[end] == B[dim(i)]
- @test B[end - 1] == B[dim(i) - 1]
+ @testset "Unary -" begin
+ i = Index(2, "i")
+ j = Index(2, "j")
+ M = [1 2; 3 4]
+ A = itensor(M, i, j)
+ @test -A == itensor(-M, i, j)
end
- @testset "ITensor equality" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- Aij = random_itensor(i, j)
- Aji = permute(Aij, j, i)
- Bij′ = random_itensor(i, j')
- Cij′ = random_itensor(i, j')
- @test Aij == Aij
- @test Aij == Aji
- @test Bij′ != Cij′
- @test Bij′ != Aij
+
+ @testset "dot" begin
+ i = Index(2, "i")
+ a = [1.0; 2.0]
+ b = [3.0; 4.0]
+ A = itensor(a, i)
+ B = itensor(b, i)
+ @test dot(A, B) == 11.0
end
- @testset "Set element with end (lastindex, LastIndex)" begin
- _i = Index(2, "i")
- _j = Index(3, "j")
- A = ITensor(_i, _j)
- A[_i => end, _j => end] = 2.5
- @test A[_i => dim(_i), _j => dim(_j)] == 2.5
+ @testset "mul!" begin
+ i = Index(2; tags = "i")
+ j = Index(2; tags = "j")
+ k = Index(2; tags = "k")
- A = ITensor(_i, _j)
- A[_j => end, _i => end] = 3.5
- @test A[_i => dim(_i), _j => dim(_j)] == 3.5
+ A = random_itensor(i, j)
+ B = random_itensor(j, k)
+ C = random_itensor(i, k)
+ mul!(C, A, B)
+ @test C ≈ A * B
- A = ITensor(_i, _j)
- A[_j => end, _i => 1] = 4.5
- @test A[_i => 1, _j => dim(_j)] == 4.5
+ A = random_itensor(i, j)
+ B = random_itensor(j, k)
+ C = random_itensor(k, i)
+ mul!(C, A, B)
+ @test C ≈ A * B
- A = ITensor(_i, _j)
- A[_j => end - 1, _i => 1] = 4.5
- @test A[_i => 1, _j => dim(_j) - 1] == 4.5
- end
+ A = random_itensor(i, j)
+ B = random_itensor(k, j)
+ C = random_itensor(i, k)
+ mul!(C, A, B)
+ @test C ≈ A * B
- @testset "Random" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = random_itensor(i, j)
-
- # Test hasind, hasinds
- @test hasind(A, i)
- @test hasind(i)(A)
-
- @test hasinds(A, i)
- @test hasinds(A, j)
- @test hasinds(A, [i, j])
- @test hasinds([i, j])(A)
- @test hasinds(A, IndexSet(j))
- @test hasinds(A, j, i)
- @test hasinds(A, (i, j))
- @test hasinds(A, IndexSet(i, j))
- @test hasinds(j, i)(A)
- @test hasinds(i)(A)
- @test hasinds(IndexSet(j))(A)
- @test hasinds((i, j))(A)
- @test hasinds(IndexSet(i, j))(A)
-
- @test storage(A) isa NDTensors.Dense{Float64}
-
- @test ndims(A) == order(A) == 2 == length(inds(A))
- @test Order(A) == Order(2)
- @test size(A) == dims(A) == (2, 2)
- @test dim(A) == 4
-
- At = random_itensor(Index(2), Index(3))
- @test maxdim(At) == 3
- @test mindim(At) == 2
- @test dim(At, 1) == 2
- @test dim(At, 2) == 3
-
- B = random_itensor(IndexSet(i, j))
- @test storage(B) isa NDTensors.Dense{Float64}
- @test ndims(B) == order(B) == 2 == length(inds(B))
- @test size(B) == dims(B) == (2, 2)
-
- A = random_itensor()
- @test eltype(A) == Float64
- @test ndims(A) == 0
- end
+ A = random_itensor(i, j)
+ B = random_itensor(k, j)
+ C = random_itensor(k, i)
+ mul!(C, A, B)
+ @test C ≈ A * B
- @testset "trace (tr) (eltype=$elt)" for elt in (
- Float32, Float64, Complex{Float32}, Complex{Float64}
- )
- i, j, k, l = Index.((2, 3, 4, 5), ("i", "j", "k", "l"))
- T = random_itensor(elt, j, k', i', k, j', i)
- trT1 = tr(T)
- @test eltype(trT1) === elt
- trT2 = (T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k'))[]
- @test trT1 ≈ trT2
-
- T = random_itensor(elt, j, k', i', l, k, j', i)
- trT1 = tr(T)
- @test eltype(trT1) === elt
- trT2 = T * δ(elt, i, i') * δ(elt, j, j') * δ(elt, k, k')
- @test trT1 ≈ trT2
- end
+ A = random_itensor(j, i)
+ B = random_itensor(j, k)
+ C = random_itensor(i, k)
+ mul!(C, A, B)
+ @test C ≈ A * B
- @testset "ITensor iteration" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = random_itensor(i, j)
- Is = eachindex(A)
- @test length(Is) == dim(A)
- sumA = 0.0
- for I in Is
- sumA += A[I]
- end
- @test sumA ≈ sum(ITensors.data(A))
- sumA = 0.0
- for a in A
- sumA += a
- end
- @test sumA ≈ sum(A)
- @test sumA ≈ sum(A)
- end
+ A = random_itensor(j, i)
+ B = random_itensor(j, k)
+ C = random_itensor(k, i)
+ mul!(C, A, B)
+ @test C ≈ A * B
- @testset "From matrix" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- M = [1 2; 3 4]
- A = itensor(M, i, j)
- @test storage(A) isa NDTensors.Dense{Float64}
-
- @test M ≈ Matrix(A, i, j)
- @test M' ≈ Matrix(A, j, i)
- @test_throws DimensionMismatch vector(A)
-
- @test size(A, 1) == size(M, 1) == 2
- @test_throws BoundsError size(A, 3)
- @test_throws BoundsError size(A, 0)
- @test_throws ErrorException size(M, 0)
- # setstorage changes the internal data but not indices
- N = [5 6; 7 8]
- A = itensor(M, i, j)
- B = ITensors.setstorage(A, NDTensors.Dense(vec(N)))
- @test N == Matrix(B, i, j)
- @test storage(A) isa NDTensors.Dense{Float64}
- @test storage(B) isa NDTensors.Dense{Int}
-
- M = [1 2 3; 4 5 6]
- @test_throws DimensionMismatch itensor(M, i, j)
- end
+ A = random_itensor(j, i)
+ B = random_itensor(k, j)
+ C = random_itensor(i, k)
+ mul!(C, A, B)
+ @test C ≈ A * B
+
+ A = random_itensor(j, i)
+ B = random_itensor(k, j)
+ C = random_itensor(k, i)
+ mul!(C, A, B)
+ @test C ≈ A * B
- @testset "To Matrix" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- TM = random_itensor(i, j)
+ A = random_itensor(i, j)
+ B = random_itensor(k, j)
+ C = random_itensor(k, i)
+ α = 2
+ β = 3
+ R = mul!(copy(C), A, B, α, β)
+ @test α * A * B + β * C ≈ R
- M1 = matrix(TM)
- for ni in eachval(i), nj in eachval(j)
- @test M1[ni, nj] ≈ TM[i => ni, j => nj]
- end
+ @testset "In-place bugs" begin
+ @testset "Bug 1" begin
+ l1 = Index(3, "l=1")
+ l2 = Index(3, "l=2")
+ s = Index(2, "s")
- M2 = Matrix(TM, j, i)
- for ni in eachval(i), nj in eachval(j)
- @test M2[nj, ni] ≈ TM[i => ni, j => nj]
- end
+ A = random_itensor(s', s)
+ B = random_itensor(l1, s, l2)
- T3 = random_itensor(i, j, k)
- @test_throws DimensionMismatch Matrix(T3, i, j)
- end
+ C = random_itensor(l1, s', l2)
- @testset "To Vector" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- TV = random_itensor(i)
-
- V = vector(TV)
- for ni in eachindval(i)
- @test V[val(ni)] ≈ TV[ni]
- end
- V = Vector(TV)
- for ni in eachindval(i)
- @test V[val(ni)] ≈ TV[ni]
- end
- V = Vector(TV, i)
- for ni in eachindval(i)
- @test V[val(ni)] ≈ TV[ni]
- end
- V = Vector{ComplexF64}(TV)
- for ni in eachindval(i)
- @test V[val(ni)] ≈ complex(TV[ni])
- end
-
- T2 = random_itensor(i, j)
- @test_throws DimensionMismatch vector(T2)
- end
+ C .= A .* B
- @testset "Complex" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = ITensor(Complex, i, j)
- @test storage(A) isa NDTensors.EmptyStorage{Complex}
- end
+ @test C ≈ A * B
+ end
- @testset "Random complex" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- A = random_itensor(ComplexF64, i, j)
- @test storage(A) isa NDTensors.Dense{ComplexF64}
- end
+ @testset "Bug 2" begin
+ is = [Index(n + 1, "i$n") for n in 1:6]
- @testset "From complex matrix" begin
- i, j, k, l = Index.(2, ("i", "j", "k", "l"))
- M = [1+2im 2; 3 4]
- A = itensor(M, i, j)
- @test storage(A) isa NDTensors.Dense{ComplexF64}
- end
- end
-
- @testset "eltype promotion with scalar * and /" begin
- @test eltype(ITensor(1.0f0, Index(2)) * 2) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) .* 2) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) / 2) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) ./ 2) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) * 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) .* 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) / 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, Index(2)) * 2.0) === Float64
- @test eltype(ITensor(1.0f0, Index(2)) .* 2.0) === Float64
- @test eltype(ITensor(1.0f0, Index(2)) / 2.0) === Float64
- @test eltype(ITensor(1.0f0, Index(2)) ./ 2.0) === Float64
- end
-
- @testset "Division /" begin
- i = Index(2)
- A = random_itensor(i)
- B = A / 2
- C = A / ITensor(2)
- @test B isa ITensor
- @test C isa ITensor
- @test B ≈ C
- @test A[1] / 2 ≈ B[1]
- @test A[2] / 2 ≈ B[2]
- @test A[1] / 2 ≈ C[1]
- @test A[2] / 2 ≈ C[2]
- end
-
- @testset "Convert to complex" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, j)
- B = complex(A)
- for ii in 1:dim(i), jj in 1:dim(j)
- @test complex(A[i => ii, j => jj]) == B[i => ii, j => jj]
- end
- end
-
- @testset "Complex Number Operations" for _eltype in (Float32, Float64)
- i = Index(3, "i")
- j = Index(4, "j")
-
- A = random_itensor(complex(_eltype), i, j)
-
- rA = real(A)
- iA = imag(A)
- @test norm(rA + 1im * iA - A) < 1E-8
- @test eltype(rA) <: _eltype
- @test eltype(iA) <: _eltype
-
- cA = conj(A)
- @test eltype(cA) <: complex(_eltype)
- @test norm(cA) ≈ norm(A)
-
- B = random_itensor(_eltype, i, j)
-
- cB = conj(B)
- @test eltype(cB) <: _eltype
- @test norm(cB) ≈ norm(B)
- end
-
- @testset "similar" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, j)
- B = similar(A)
- @test inds(B) == inds(A)
- Ac = similar(A, ComplexF32)
- @test storage(Ac) isa NDTensors.Dense{ComplexF32}
- end
-
- @testset "fill!" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, j)
- fill!(A, 1.0)
- @test all(ITensors.data(A) .== 1.0)
- end
-
- @testset "fill! using broadcast" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i, j)
- A .= 1.0
- @test all(ITensors.data(A) .== 1.0)
- end
-
- @testset "zero" begin
- i = Index(2)
- A = random_itensor(i)
- B = zero(A)
- @test false * A ≈ B
- end
-
- @testset "copyto!" begin
- i = Index(2, "i")
- j = Index(2, "j")
- M = [1 2; 3 4]
- A = itensor(M, i, j)
- N = 2 * M
- B = itensor(N, i, j)
- copyto!(A, B)
- @test A == B
- @test ITensors.data(A) == vec(N)
- A = itensor(M, i, j)
- B = itensor(N, j, i)
- copyto!(A, B)
- @test A == B
- @test ITensors.data(A) == vec(transpose(N))
- end
-
- @testset "Unary -" begin
- i = Index(2, "i")
- j = Index(2, "j")
- M = [1 2; 3 4]
- A = itensor(M, i, j)
- @test -A == itensor(-M, i, j)
- end
-
- @testset "dot" begin
- i = Index(2, "i")
- a = [1.0; 2.0]
- b = [3.0; 4.0]
- A = itensor(a, i)
- B = itensor(b, i)
- @test dot(A, B) == 11.0
- end
-
- @testset "mul!" begin
- i = Index(2; tags="i")
- j = Index(2; tags="j")
- k = Index(2; tags="k")
-
- A = random_itensor(i, j)
- B = random_itensor(j, k)
- C = random_itensor(i, k)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(i, j)
- B = random_itensor(j, k)
- C = random_itensor(k, i)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(i, j)
- B = random_itensor(k, j)
- C = random_itensor(i, k)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(i, j)
- B = random_itensor(k, j)
- C = random_itensor(k, i)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(j, i)
- B = random_itensor(j, k)
- C = random_itensor(i, k)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(j, i)
- B = random_itensor(j, k)
- C = random_itensor(k, i)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(j, i)
- B = random_itensor(k, j)
- C = random_itensor(i, k)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(j, i)
- B = random_itensor(k, j)
- C = random_itensor(k, i)
- mul!(C, A, B)
- @test C ≈ A * B
-
- A = random_itensor(i, j)
- B = random_itensor(k, j)
- C = random_itensor(k, i)
- α = 2
- β = 3
- R = mul!(copy(C), A, B, α, β)
- @test α * A * B + β * C ≈ R
-
- @testset "In-place bugs" begin
- @testset "Bug 1" begin
- l1 = Index(3, "l=1")
- l2 = Index(3, "l=2")
- s = Index(2, "s")
+ for ais in permutations((1, 2, 3)),
+ bis in permutations((2, 3, 4)),
+ cis in permutations((1, 4))
- A = random_itensor(s', s)
- B = random_itensor(l1, s, l2)
+ A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais))))
+ B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis))))
+ C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis))))
- C = random_itensor(l1, s', l2)
+ C .= A .* B
- C .= A .* B
+ @test C ≈ A * B
+ end
- @test C ≈ A * B
- end
+ for ais in permutations((1, 2, 3)),
+ bis in permutations((2, 3, 4, 5)),
+ cis in permutations((1, 4, 5))
+
+ A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais))))
+ B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis))))
+ C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis))))
- @testset "Bug 2" begin
- is = [Index(n + 1, "i$n") for n in 1:6]
+ C .= A .* B
- for ais in permutations((1, 2, 3)),
- bis in permutations((2, 3, 4)),
- cis in permutations((1, 4))
+ @test C ≈ A * B
+ end
+ end
+ end
+
+ @testset "In-place outer bug" begin
+ l1 = Index(3, "l=1")
+ s = Index(2, "s")
- A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais))))
- B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis))))
- C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis))))
+ A = random_itensor(l1)
+ B = random_itensor(s)
+ C = random_itensor(s, l1)
- C .= A .* B
+ C .= A .* B
- @test C ≈ A * B
+ @test C ≈ A * B
end
- for ais in permutations((1, 2, 3)),
- bis in permutations((2, 3, 4, 5)),
- cis in permutations((1, 4, 5))
+ @testset "In-place contractions" begin
+ i1 = Index(2, "i1")
+ i2 = Index(2, "i2")
+ i3 = Index(2, "i3")
+ i4 = Index(2, "i4")
+ i5 = Index(2, "i5")
+ i6 = Index(2, "i6")
+ j1 = Index(2, "j1")
+ j2 = Index(2, "j2")
+ j3 = Index(2, "j3")
- A = random_itensor(ntuple(i -> is[ais[i]], Val(length(ais))))
- B = random_itensor(ntuple(i -> is[bis[i]], Val(length(bis))))
- C = random_itensor(ntuple(i -> is[cis[i]], Val(length(cis))))
+ #A = random_itensor(s', s)
+ #B = random_itensor(l1, s, l2)
- C .= A .* B
+ #C = random_itensor(l1, s', l2)
- @test C ≈ A * B
+ C .= A .* B
+ @test C ≈ A * B
end
- end
end
- @testset "In-place outer bug" begin
- l1 = Index(3, "l=1")
- s = Index(2, "s")
+ @testset "exponentiate" begin
+ s1 = Index(2, "s1")
+ s2 = Index(2, "s2")
+ i1 = Index(2, "i1")
+ i2 = Index(2, "i2")
+ Amat = rand(2, 2, 2, 2)
+ A = itensor(Amat, i1, i2, s1, s2)
+
+ Aexp = exp(A, (i1, i2), (s1, s2))
+ Amatexp = reshape(exp(reshape(Amat, 4, 4)), 2, 2, 2, 2)
+ Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
+ @test Aexp ≈ Aexp_from_mat
+
+ #test that exponentiation works when indices need to be permuted
+ Aexp = exp(A, (s1, s2), (i1, i2))
+ Amatexp = Matrix(exp(reshape(Amat, 4, 4))')
+ Aexp_from_mat = itensor(reshape(Amatexp, 2, 2, 2, 2), s1, s2, i1, i2)
+ @test Aexp ≈ Aexp_from_mat
+
+ #test exponentiation when hermitian=true is used
+ Amat = reshape(Amat, 4, 4)
+ Amat = reshape(Amat + Amat' + randn(4, 4) * 1.0e-10, 2, 2, 2, 2)
+ A = itensor(Amat, i1, i2, s1, s2)
+ Aexp = exp(A, (i1, i2), (s1, s2); ishermitian = true)
+ Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2)
+ Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
+ @test Aexp ≈ Aexp_from_mat
+ Aexp = exp(A, (i1, i2), (s1, s2); ishermitian = true)
+ Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2)
+ Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
+ @test Aexp ≈ Aexp_from_mat
+ end
- A = random_itensor(l1)
- B = random_itensor(s)
- C = random_itensor(s, l1)
+ @testset "onehot (setelt)" begin
+ i = Index(2, "i")
- C .= A .* B
+ T = onehot(i => 1)
+ @test eltype(T) === Float64
+ @test T[i => 1] ≈ 1.0
+ @test T[i => 2] ≈ 0.0
- @test C ≈ A * B
- end
+ T = setelt(i => 2)
+ @test T[i => 1] ≈ 0.0
+ @test T[i => 2] ≈ 1.0
- @testset "In-place contractions" begin
- i1 = Index(2, "i1")
- i2 = Index(2, "i2")
- i3 = Index(2, "i3")
- i4 = Index(2, "i4")
- i5 = Index(2, "i5")
- i6 = Index(2, "i6")
- j1 = Index(2, "j1")
- j2 = Index(2, "j2")
- j3 = Index(2, "j3")
+ j = Index(2, "j")
- #A = random_itensor(s', s)
- #B = random_itensor(l1, s, l2)
+ T = onehot(j => 2, i => 1)
+ @test T[j => 1, i => 1] ≈ 0.0
+ @test T[j => 2, i => 1] ≈ 1.0
+ @test T[j => 1, i => 2] ≈ 0.0
+ @test T[j => 2, i => 2] ≈ 0.0
- #C = random_itensor(l1, s', l2)
+ T = onehot(Float32, i => 1)
+ @test eltype(T) === Float32
+ @test T[i => 1] ≈ 1.0
+ @test T[i => 2] ≈ 0.0
- C .= A .* B
- @test C ≈ A * B
- end
- end
-
- @testset "exponentiate" begin
- s1 = Index(2, "s1")
- s2 = Index(2, "s2")
- i1 = Index(2, "i1")
- i2 = Index(2, "i2")
- Amat = rand(2, 2, 2, 2)
- A = itensor(Amat, i1, i2, s1, s2)
-
- Aexp = exp(A, (i1, i2), (s1, s2))
- Amatexp = reshape(exp(reshape(Amat, 4, 4)), 2, 2, 2, 2)
- Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
- @test Aexp ≈ Aexp_from_mat
-
- #test that exponentiation works when indices need to be permuted
- Aexp = exp(A, (s1, s2), (i1, i2))
- Amatexp = Matrix(exp(reshape(Amat, 4, 4))')
- Aexp_from_mat = itensor(reshape(Amatexp, 2, 2, 2, 2), s1, s2, i1, i2)
- @test Aexp ≈ Aexp_from_mat
-
- #test exponentiation when hermitian=true is used
- Amat = reshape(Amat, 4, 4)
- Amat = reshape(Amat + Amat' + randn(4, 4) * 1e-10, 2, 2, 2, 2)
- A = itensor(Amat, i1, i2, s1, s2)
- Aexp = exp(A, (i1, i2), (s1, s2); ishermitian=true)
- Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2)
- Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
- @test Aexp ≈ Aexp_from_mat
- Aexp = exp(A, (i1, i2), (s1, s2); ishermitian=true)
- Amatexp = reshape(parent(exp(LinearAlgebra.Hermitian(reshape(Amat, 4, 4)))), 2, 2, 2, 2)
- Aexp_from_mat = itensor(Amatexp, i1, i2, s1, s2)
- @test Aexp ≈ Aexp_from_mat
- end
-
- @testset "onehot (setelt)" begin
- i = Index(2, "i")
-
- T = onehot(i => 1)
- @test eltype(T) === Float64
- @test T[i => 1] ≈ 1.0
- @test T[i => 2] ≈ 0.0
-
- T = setelt(i => 2)
- @test T[i => 1] ≈ 0.0
- @test T[i => 2] ≈ 1.0
-
- j = Index(2, "j")
-
- T = onehot(j => 2, i => 1)
- @test T[j => 1, i => 1] ≈ 0.0
- @test T[j => 2, i => 1] ≈ 1.0
- @test T[j => 1, i => 2] ≈ 0.0
- @test T[j => 2, i => 2] ≈ 0.0
-
- T = onehot(Float32, i => 1)
- @test eltype(T) === Float32
- @test T[i => 1] ≈ 1.0
- @test T[i => 2] ≈ 0.0
-
- T = onehot(ComplexF32, i => 1)
- @test eltype(T) === ComplexF32
- @test T[i => 1] ≈ 1.0
- @test T[i => 2] ≈ 0.0
- end
-
- @testset "add, subtract, and axpy" begin
- i = Index(2, "i")
- a = [1.0; 2.0]
- b = [3.0; 4.0]
- A = itensor(a, i)
- B = itensor(b, i)
- c = [5.0; 8.0]
- @test A + B == itensor([4.0; 6.0], i)
- @test axpy!(2.0, A, B) == itensor(c, i)
- a = [1.0; 2.0]
- b = [3.0; 4.0]
- A = itensor(a, i)
- B = itensor(b, i)
- c = [5.0; 8.0]
- @test (B .+= 2.0 .* A) == itensor(c, i)
- a = [1.0; 2.0]
- b = [3.0; 4.0]
- A = itensor(a, i)
- B = itensor(b, i)
- c = [8.0; 12.0]
- @test (A .= 2.0 .* A .+ 2.0 .* B) == itensor(c, i)
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(2.0)
- @test_throws DimensionMismatch A + B
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor()
- C = A + B
- @test C ≈ A
- A[1] = 5
- @test C[1] == 5
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(0)
- @test_throws DimensionMismatch A + B
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(ComplexF64)
- @test_throws DimensionMismatch A + B
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(Float64)
- @test_throws DimensionMismatch A + B
- a = [1.0; 2.0]
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(2.0)
- @test_throws DimensionMismatch A - B
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor()
- C = A - B
- @test C ≈ A
- A[1] = 5
- @test C[1] == 5
- #@test_throws DimensionMismatch A - B
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(2.0)
- @test_throws DimensionMismatch B - A
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor(Float64)
- @test_throws DimensionMismatch B - A
- a = [1.0; 2.0]
- A = itensor(a, i)
- B = ITensor()
- C = B - A
- @test C ≈ -A
- A[1] = 5
- @test C[1] == -1
- a = [1.0; 2.0]
- b = [3.0; 4.0]
- A = itensor(a, i)
- B = itensor(b, i)
- c = [2.0; 2.0]
- @test B - A == itensor(c, i)
- @test A - B == -itensor(c, i)
- end
-
- @testset "mul! and rmul!" begin
- i = Index(2, "i")
- a = [1.0; 2.0]
- b = [2.0; 4.0]
- A = itensor(a, i)
- A2, A3 = copy(A), copy(A)
- B = itensor(b, i)
- @test mul!(A2, A, 2.0) == B == (A2 .= 0 .* A2 .+ 2 .* A)
- @test rmul!(A, 2.0) == B == ITensors.scale!(A3, 2)
- #make sure mul! works also when A2 has NaNs in it
- A = itensor([1.0; 2.0], i)
- A2 = itensor([NaN; 1.0], i)
- @test mul!(A2, A, 2.0) == B
-
- i = Index(2, "i")
- j = Index(2, "j")
- M = [1 2; 3 4]
- A = itensor(M, i, j)
- N = 2 * M
- B = itensor(N, j, i)
- @test ITensors.data(mul!(B, A, 2.0)) == 2.0 * vec(transpose(M))
- end
-
- @testset "Construct from Array" begin
- i = Index(2, "index_i")
- j = Index(2, "index_j")
-
- M = [
- 1.0 2
- 3 4
- ]
- T = itensor(M, i, j)
- T[i => 1, j => 1] = 3.3
- @test M[1, 1] == 3.3
- @test T[i => 1, j => 1] == 3.3
- @test storage(T) isa NDTensors.Dense{Float64}
-
- M = [
- 1 2
- 3 4
- ]
- T = itensor(M, i, j)
- T[i => 1, j => 1] = 3.3
- @test M[1, 1] == 1
- @test T[i => 1, j => 1] == 3.3
- @test storage(T) isa NDTensors.Dense{Float64}
-
- M = [
- 1 2
- 3 4
- ]
- T = itensor(Int, M, i, j)
- T[i => 1, j => 1] = 6
- @test M[1, 1] == 6
- @test T[i => 1, j => 1] == 6
- @test storage(T) isa NDTensors.Dense{Int}
-
- # This version makes a copy
- M = [
- 1.0 2
- 3 4
- ]
- T = ITensor(M, i, j)
- T[i => 1, j => 1] = 3.3
- @test M[1, 1] == 1
- @test T[i => 1, j => 1] == 3.3
-
- # Empty indices
- A = randn(1)
- T = itensor(A, Index[])
- @test A[] == T[]
- T = itensor(A, Index[], Index[])
- @test A[] == T[]
- T = itensor(A, Any[])
- @test A[] == T[]
-
- A = randn(1, 1)
- T = itensor(A, Index[])
- @test A[] == T[]
- T = itensor(A, Index[], Index[])
- @test A[] == T[]
- T = itensor(A, Any[], Any[])
- @test A[] == T[]
-
- @test_throws ErrorException itensor(rand(1), Int[1])
- end
-
- @testset "Construct from AbstractArray" begin
- i = Index(2, "index_i")
- j = Index(2, "index_j")
-
- X = [
- 1.0 2 0
- 3 4 0
- 0 0 0
- ]
- M = @view X[1:2, 1:2]
- T = itensor(M, i, j)
- T[i => 1, j => 1] = 3.3
- @test M[1, 1] == 3.3
- @test T[i => 1, j => 1] == 3.3
- @test storage(T) isa NDTensors.Dense{Float64}
- end
-
- @testset "ITensor Array constructor view behavior" begin
- d = 2
- i = Index(d)
-
- # view
- A = randn(Float64, d, d)
- T = itensor(A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Float64}
- A[1, 1] = 2.0
- T[1, 1] == 2.0
-
- # view
- A = rand(Int, d, d)
- T = itensor(Int, A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Int}
- A[1, 1] = 2
- T[1, 1] == 2
-
- # no view
- A = rand(Int, d, d)
- T = itensor(A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = randn(Float64, d, d)
- T = ITensor(A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = rand(Int, d, d)
- T = ITensor(Int, A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Int}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = rand(Int, d, d)
- T = ITensor(A, i', dag(i))
- @test storage(T) isa NDTensors.Dense{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
- end
-
- @testset "Convert to Array" begin
- i = Index(2, "i")
- j = Index(3, "j")
- T = random_itensor(i, j)
-
- A = Array{Float64}(T, i, j)
- for I in CartesianIndices(T)
- @test A[I] == T[I]
+ T = onehot(ComplexF32, i => 1)
+ @test eltype(T) === ComplexF32
+ @test T[i => 1] ≈ 1.0
+ @test T[i => 2] ≈ 0.0
end
- T11 = T[1, 1]
- T[1, 1] = 1
- @test T[1, 1] == 1
- @test T11 != 1
- @test A[1, 1] == T11
-
- A = Matrix{Float64}(T, i, j)
- for I in CartesianIndices(T)
- @test A[I] == T[I]
+ @testset "add, subtract, and axpy" begin
+ i = Index(2, "i")
+ a = [1.0; 2.0]
+ b = [3.0; 4.0]
+ A = itensor(a, i)
+ B = itensor(b, i)
+ c = [5.0; 8.0]
+ @test A + B == itensor([4.0; 6.0], i)
+ @test axpy!(2.0, A, B) == itensor(c, i)
+ a = [1.0; 2.0]
+ b = [3.0; 4.0]
+ A = itensor(a, i)
+ B = itensor(b, i)
+ c = [5.0; 8.0]
+ @test (B .+= 2.0 .* A) == itensor(c, i)
+ a = [1.0; 2.0]
+ b = [3.0; 4.0]
+ A = itensor(a, i)
+ B = itensor(b, i)
+ c = [8.0; 12.0]
+ @test (A .= 2.0 .* A .+ 2.0 .* B) == itensor(c, i)
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(2.0)
+ @test_throws DimensionMismatch A + B
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor()
+ C = A + B
+ @test C ≈ A
+ A[1] = 5
+ @test C[1] == 5
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(0)
+ @test_throws DimensionMismatch A + B
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(ComplexF64)
+ @test_throws DimensionMismatch A + B
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(Float64)
+ @test_throws DimensionMismatch A + B
+ a = [1.0; 2.0]
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(2.0)
+ @test_throws DimensionMismatch A - B
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor()
+ C = A - B
+ @test C ≈ A
+ A[1] = 5
+ @test C[1] == 5
+ #@test_throws DimensionMismatch A - B
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(2.0)
+ @test_throws DimensionMismatch B - A
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor(Float64)
+ @test_throws DimensionMismatch B - A
+ a = [1.0; 2.0]
+ A = itensor(a, i)
+ B = ITensor()
+ C = B - A
+ @test C ≈ -A
+ A[1] = 5
+ @test C[1] == -1
+ a = [1.0; 2.0]
+ b = [3.0; 4.0]
+ A = itensor(a, i)
+ B = itensor(b, i)
+ c = [2.0; 2.0]
+ @test B - A == itensor(c, i)
+ @test A - B == -itensor(c, i)
end
- A = Matrix(T, i, j)
- for I in CartesianIndices(T)
- @test A[I] == T[I]
+ @testset "mul! and rmul!" begin
+ i = Index(2, "i")
+ a = [1.0; 2.0]
+ b = [2.0; 4.0]
+ A = itensor(a, i)
+ A2, A3 = copy(A), copy(A)
+ B = itensor(b, i)
+ @test mul!(A2, A, 2.0) == B == (A2 .= 0 .* A2 .+ 2 .* A)
+ @test rmul!(A, 2.0) == B == ITensors.scale!(A3, 2)
+ #make sure mul! works also when A2 has NaNs in it
+ A = itensor([1.0; 2.0], i)
+ A2 = itensor([NaN; 1.0], i)
+ @test mul!(A2, A, 2.0) == B
+
+ i = Index(2, "i")
+ j = Index(2, "j")
+ M = [1 2; 3 4]
+ A = itensor(M, i, j)
+ N = 2 * M
+ B = itensor(N, j, i)
+ @test ITensors.data(mul!(B, A, 2.0)) == 2.0 * vec(transpose(M))
end
- A = Array(T, i, j)
- for I in CartesianIndices(T)
- @test A[I] == T[I]
+ @testset "Construct from Array" begin
+ i = Index(2, "index_i")
+ j = Index(2, "index_j")
+
+ M = [
+ 1.0 2
+ 3 4
+ ]
+ T = itensor(M, i, j)
+ T[i => 1, j => 1] = 3.3
+ @test M[1, 1] == 3.3
+ @test T[i => 1, j => 1] == 3.3
+ @test storage(T) isa NDTensors.Dense{Float64}
+
+ M = [
+ 1 2
+ 3 4
+ ]
+ T = itensor(M, i, j)
+ T[i => 1, j => 1] = 3.3
+ @test M[1, 1] == 1
+ @test T[i => 1, j => 1] == 3.3
+ @test storage(T) isa NDTensors.Dense{Float64}
+
+ M = [
+ 1 2
+ 3 4
+ ]
+ T = itensor(Int, M, i, j)
+ T[i => 1, j => 1] = 6
+ @test M[1, 1] == 6
+ @test T[i => 1, j => 1] == 6
+ @test storage(T) isa NDTensors.Dense{Int}
+
+ # This version makes a copy
+ M = [
+ 1.0 2
+ 3 4
+ ]
+ T = ITensor(M, i, j)
+ T[i => 1, j => 1] = 3.3
+ @test M[1, 1] == 1
+ @test T[i => 1, j => 1] == 3.3
+
+ # Empty indices
+ A = randn(1)
+ T = itensor(A, Index[])
+ @test A[] == T[]
+ T = itensor(A, Index[], Index[])
+ @test A[] == T[]
+ T = itensor(A, Any[])
+ @test A[] == T[]
+
+ A = randn(1, 1)
+ T = itensor(A, Index[])
+ @test A[] == T[]
+ T = itensor(A, Index[], Index[])
+ @test A[] == T[]
+ T = itensor(A, Any[], Any[])
+ @test A[] == T[]
+
+ @test_throws ErrorException itensor(rand(1), Int[1])
end
- T = random_itensor(i)
- A = Vector(T)
- for I in CartesianIndices(T)
- @test A[I] == T[I]
+ @testset "Construct from AbstractArray" begin
+ i = Index(2, "index_i")
+ j = Index(2, "index_j")
+
+ X = [
+ 1.0 2 0
+ 3 4 0
+ 0 0 0
+ ]
+ M = @view X[1:2, 1:2]
+ T = itensor(M, i, j)
+ T[i => 1, j => 1] = 3.3
+ @test M[1, 1] == 3.3
+ @test T[i => 1, j => 1] == 3.3
+ @test storage(T) isa NDTensors.Dense{Float64}
end
- end
-
- @testset "Test isapprox for ITensors" begin
- m, n = rand(0:20, 2)
- i = Index(m)
- j = Index(n)
- realData = rand(m, n)
- complexData = complex(realData)
- A = itensor(realData, i, j)
- B = itensor(complexData, i, j)
- @test A ≈ B
- @test B ≈ A
- A = permute(A, j, i)
- @test A ≈ B
- @test B ≈ A
- end
-
- @testset "permute" begin
- i = Index(2)
- A = ITensor(i, i')
- Ap = permute(A, i, i')
- A[i => 1, i' => 1] = 1
- @test A[i => 1, i' => 1] == 1
- @test Ap[i => 1, i' => 1] == 0
- end
-
- @testset "permute, NeverAlias()/AllowAlias()" begin
- i = Index(2)
- A = ITensor(i, i')
- Ap = permute(A, i, i')
- A[i => 1, i' => 1] = 1
- @test A[i => 1, i' => 1] == 1
- @test Ap[i => 1, i' => 1] == 0
-
- i = Index(2)
- A = ITensor(i, i')
- Ap = permute(ITensors.NeverAlias(), A, i, i')
- A[i => 1, i' => 1] = 1
- @test A[i => 1, i' => 1] == 1
- @test Ap[i => 1, i' => 1] == 0
-
- i = Index(2, "index_i")
- j = Index(4, "index_j")
- k = Index(3, "index_k")
- T = random_itensor(i, j, k)
-
- # NeverAlias()/allow_alias = false by default
- pT_noalias_1 = permute(T, i, j, k)
- pT_noalias_1[1, 1, 1] = 12
- @test T[1, 1, 1] != pT_noalias_1[1, 1, 1]
-
- pT_noalias_2 = permute(T, i, j, k; allow_alias=false)
- pT_noalias_2[1, 1, 1] = 12
- @test T[1, 1, 1] != pT_noalias_1[1, 1, 1]
-
- cT = copy(T)
- pT_alias = permute(cT, i, j, k; allow_alias=true)
- pT_alias[1, 1, 1] = 12
- @test cT[1, 1, 1] == pT_alias[1, 1, 1]
-
- cT = copy(T)
- pT_alias = permute(ITensors.AllowAlias(), cT, i, j, k)
- pT_alias[1, 1, 1] = 12
- @test cT[1, 1, 1] == pT_alias[1, 1, 1]
- end
-
- @testset "ITensor tagging and priming" begin
- s1 = Index(2, "Site,s=1")
- s2 = Index(2, "Site,s=2")
- l = Index(3, "Link")
- ltmp = settags(l, "Temp")
- A1 = random_itensor(s1, l, l')
- A2 = random_itensor(s2, l', l'')
- @testset "firstind(::ITensor,::String)" begin
- @test s1 == firstind(A1, "Site")
- @test s1 == firstind(A1, "s=1")
- @test s1 == firstind(A1, "s=1,Site")
- @test l == firstind(A1; tags="Link", plev=0)
- @test l' == firstind(A1; plev=1)
- @test l' == firstind(A1; tags="Link", plev=1)
- @test s2 == firstind(A2, "Site")
- @test s2 == firstind(A2, "s=2")
- @test s2 == firstind(A2, "Site")
- @test s2 == firstind(A2; plev=0)
- @test s2 == firstind(A2; tags="s=2", plev=0)
- @test s2 == firstind(A2; tags="Site", plev=0)
- @test s2 == firstind(A2; tags="s=2,Site", plev=0)
- @test l' == firstind(A2; plev=1)
- @test l' == firstind(A2; tags="Link", plev=1)
- @test l'' == firstind(A2; plev=2)
- @test l'' == firstind(A2; tags="Link", plev=2)
+
+ @testset "ITensor Array constructor view behavior" begin
+ d = 2
+ i = Index(d)
+
+ # view
+ A = randn(Float64, d, d)
+ T = itensor(A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Float64}
+ A[1, 1] = 2.0
+ T[1, 1] == 2.0
+
+ # view
+ A = rand(Int, d, d)
+ T = itensor(Int, A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Int}
+ A[1, 1] = 2
+ T[1, 1] == 2
+
+ # no view
+ A = rand(Int, d, d)
+ T = itensor(A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = randn(Float64, d, d)
+ T = ITensor(A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = rand(Int, d, d)
+ T = ITensor(Int, A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Int}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = rand(Int, d, d)
+ T = ITensor(A, i', dag(i))
+ @test storage(T) isa NDTensors.Dense{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
end
- @testset "addtags(::ITensor,::String,::String)" begin
- s1u = addtags(s1, "u")
- lu = addtags(l, "u")
- A1u = addtags(A1, "u")
- @test hasinds(A1u, s1u, lu, lu')
+ @testset "Convert to Array" begin
+ i = Index(2, "i")
+ j = Index(3, "j")
+ T = random_itensor(i, j)
- A1u = addtags(A1, "u", "Link")
- @test hasinds(A1u, s1, lu, lu')
+ A = Array{Float64}(T, i, j)
+ for I in CartesianIndices(T)
+ @test A[I] == T[I]
+ end
- A1u = addtags(A1, "u"; tags="Link")
- @test hasinds(A1u, s1, lu, lu')
+ T11 = T[1, 1]
+ T[1, 1] = 1
+ @test T[1, 1] == 1
+ @test T11 != 1
+ @test A[1, 1] == T11
+
+ A = Matrix{Float64}(T, i, j)
+ for I in CartesianIndices(T)
+ @test A[I] == T[I]
+ end
- A1u = addtags(A1, "u"; plev=0)
- @test hasinds(A1u, s1u, lu, l')
+ A = Matrix(T, i, j)
+ for I in CartesianIndices(T)
+ @test A[I] == T[I]
+ end
- A1u = addtags(A1, "u"; tags="Link", plev=0)
- @test hasinds(A1u, s1, lu, l')
+ A = Array(T, i, j)
+ for I in CartesianIndices(T)
+ @test A[I] == T[I]
+ end
- A1u = addtags(A1, "u"; tags="Link", plev=1)
- @test hasinds(A1u, s1, l, lu')
+ T = random_itensor(i)
+ A = Vector(T)
+ for I in CartesianIndices(T)
+ @test A[I] == T[I]
+ end
end
- @testset "removetags(::ITensor,::String,::String)" begin
- A2r = removetags(A2, "Site")
- @test hasinds(A2r, removetags(s2, "Site"), l', l'')
- A2r = removetags(A2, "Link"; plev=1)
- @test hasinds(A2r, s2, removetags(l, "Link")', l'')
+ @testset "Test isapprox for ITensors" begin
+ m, n = rand(0:20, 2)
+ i = Index(m)
+ j = Index(n)
+ realData = rand(m, n)
+ complexData = complex(realData)
+ A = itensor(realData, i, j)
+ B = itensor(complexData, i, j)
+ @test A ≈ B
+ @test B ≈ A
+ A = permute(A, j, i)
+ @test A ≈ B
+ @test B ≈ A
+ end
- A2r = replacetags(A2, "Link", "Temp"; plev=1)
- @test hasinds(A2r, s2, ltmp', l'')
+ @testset "permute" begin
+ i = Index(2)
+ A = ITensor(i, i')
+ Ap = permute(A, i, i')
+ A[i => 1, i' => 1] = 1
+ @test A[i => 1, i' => 1] == 1
+ @test Ap[i => 1, i' => 1] == 0
end
- @testset "replacetags(::ITensor,::String,::String)" begin
- s2tmp = replacetags(s2, "Site", "Temp")
- @test s2tmp == replacetags(s2, "Site" => "Temp")
+ @testset "permute, NeverAlias()/AllowAlias()" begin
+ i = Index(2)
+ A = ITensor(i, i')
+ Ap = permute(A, i, i')
+ A[i => 1, i' => 1] = 1
+ @test A[i => 1, i' => 1] == 1
+ @test Ap[i => 1, i' => 1] == 0
+
+ i = Index(2)
+ A = ITensor(i, i')
+ Ap = permute(ITensors.NeverAlias(), A, i, i')
+ A[i => 1, i' => 1] = 1
+ @test A[i => 1, i' => 1] == 1
+ @test Ap[i => 1, i' => 1] == 0
+
+ i = Index(2, "index_i")
+ j = Index(4, "index_j")
+ k = Index(3, "index_k")
+ T = random_itensor(i, j, k)
+
+ # NeverAlias()/allow_alias = false by default
+ pT_noalias_1 = permute(T, i, j, k)
+ pT_noalias_1[1, 1, 1] = 12
+ @test T[1, 1, 1] != pT_noalias_1[1, 1, 1]
+
+ pT_noalias_2 = permute(T, i, j, k; allow_alias = false)
+ pT_noalias_2[1, 1, 1] = 12
+ @test T[1, 1, 1] != pT_noalias_1[1, 1, 1]
+
+ cT = copy(T)
+ pT_alias = permute(cT, i, j, k; allow_alias = true)
+ pT_alias[1, 1, 1] = 12
+ @test cT[1, 1, 1] == pT_alias[1, 1, 1]
+
+ cT = copy(T)
+ pT_alias = permute(ITensors.AllowAlias(), cT, i, j, k)
+ pT_alias[1, 1, 1] = 12
+ @test cT[1, 1, 1] == pT_alias[1, 1, 1]
+ end
- ltmp = replacetags(l, "Link", "Temp")
+ @testset "ITensor tagging and priming" begin
+ s1 = Index(2, "Site,s=1")
+ s2 = Index(2, "Site,s=2")
+ l = Index(3, "Link")
+ ltmp = settags(l, "Temp")
+ A1 = random_itensor(s1, l, l')
+ A2 = random_itensor(s2, l', l'')
+ @testset "firstind(::ITensor,::String)" begin
+ @test s1 == firstind(A1, "Site")
+ @test s1 == firstind(A1, "s=1")
+ @test s1 == firstind(A1, "s=1,Site")
+ @test l == firstind(A1; tags = "Link", plev = 0)
+ @test l' == firstind(A1; plev = 1)
+ @test l' == firstind(A1; tags = "Link", plev = 1)
+ @test s2 == firstind(A2, "Site")
+ @test s2 == firstind(A2, "s=2")
+ @test s2 == firstind(A2, "Site")
+ @test s2 == firstind(A2; plev = 0)
+ @test s2 == firstind(A2; tags = "s=2", plev = 0)
+ @test s2 == firstind(A2; tags = "Site", plev = 0)
+ @test s2 == firstind(A2; tags = "s=2,Site", plev = 0)
+ @test l' == firstind(A2; plev = 1)
+ @test l' == firstind(A2; tags = "Link", plev = 1)
+ @test l'' == firstind(A2; plev = 2)
+ @test l'' == firstind(A2; tags = "Link", plev = 2)
+ end
+ @testset "addtags(::ITensor,::String,::String)" begin
+ s1u = addtags(s1, "u")
+ lu = addtags(l, "u")
- A2r = replacetags(A2, "Site", "Temp")
- @test hasinds(A2r, s2tmp, l', l'')
+ A1u = addtags(A1, "u")
+ @test hasinds(A1u, s1u, lu, lu')
- A2r = replacetags(A2, "Site" => "Temp")
- @test hasinds(A2r, s2tmp, l', l'')
+ A1u = addtags(A1, "u", "Link")
+ @test hasinds(A1u, s1, lu, lu')
- A2r = replacetags(A2, "Link", "Temp")
- @test hasinds(A2r, s2, ltmp', ltmp'')
+ A1u = addtags(A1, "u"; tags = "Link")
+ @test hasinds(A1u, s1, lu, lu')
- A2r = replacetags(A2, "Site" => "Link", "Link" => "Site")
- @test hasinds(
- A2r,
- replacetags(s2, "Site" => "Link"),
- replacetags(l', "Link" => "Site"),
- replacetags(l'', "Link" => "Site"),
- )
- end
- @testset "prime(::ITensor,::String)" begin
- A2p = prime(A2)
- @test A2p == A2'
- @test hasinds(A2p, s2', l'', l''')
+ A1u = addtags(A1, "u"; plev = 0)
+ @test hasinds(A1u, s1u, lu, l')
- A2p = prime(A2, 2)
- A2p = A2''
- @test hasinds(A2p, s2'', l''', l'''')
+ A1u = addtags(A1, "u"; tags = "Link", plev = 0)
+ @test hasinds(A1u, s1, lu, l')
- A2p = prime(A2, "s=2")
- @test hasinds(A2p, s2', l', l'')
- end
+ A1u = addtags(A1, "u"; tags = "Link", plev = 1)
+ @test hasinds(A1u, s1, l, lu')
+ end
+ @testset "removetags(::ITensor,::String,::String)" begin
+ A2r = removetags(A2, "Site")
+ @test hasinds(A2r, removetags(s2, "Site"), l', l'')
- @testset "mapprime" begin
- @test hasinds(mapprime(A2, 1, 7), s2, l^7, l'')
- @test hasinds(mapprime(A2, 0, 1), s2', l', l'')
- end
+ A2r = removetags(A2, "Link"; plev = 1)
+ @test hasinds(A2r, s2, removetags(l, "Link")', l'')
- @testset "replaceprime" begin
- @test hasinds(mapprime(A2, 1 => 7), s2, l^7, l'')
- @test hasinds(mapprime(A2, 0 => 1), s2', l', l'')
- @test hasinds(mapprime(A2, 1 => 7, 0 => 1), s2', l^7, l'')
- @test hasinds(mapprime(A2, 1 => 2, 2 => 1), s2, l'', l')
- @test hasinds(mapprime(A2, 1 => 0, 0 => 1), s2', l, l'')
- end
+ A2r = replacetags(A2, "Link", "Temp"; plev = 1)
+ @test hasinds(A2r, s2, ltmp', l'')
+ end
+ @testset "replacetags(::ITensor,::String,::String)" begin
+ s2tmp = replacetags(s2, "Site", "Temp")
- @testset "setprime" begin
- @test hasinds(setprime(A2, 2, s2), s2'', l', l'')
- @test hasinds(setprime(A2, 0, l''), s2, l', l)
- end
+ @test s2tmp == replacetags(s2, "Site" => "Temp")
- @testset "swapprime" begin
- @test hasinds(swapprime(A2, 1, 3), l''', s2, l'')
- end
- end
-
- @testset "ITensor other index operations" begin
- s1 = Index(2, "Site,s=1")
- s2 = Index(2, "Site,s=2")
- l = Index(3, "Link")
- A1 = random_itensor(s1, l, l')
- A2 = random_itensor(s2, l', l'')
-
- @testset "ind(::ITensor)" begin
- @test ind(A1, 1) == s1
- @test ind(A1, 2) == l
- end
+ ltmp = replacetags(l, "Link", "Temp")
- @testset "replaceind and replaceinds" begin
- rA1 = replaceind(A1, s1, s2)
- @test hasinds(rA1, s2, l, l')
- @test hasinds(A1, s1, l, l')
+ A2r = replacetags(A2, "Site", "Temp")
+ @test hasinds(A2r, s2tmp, l', l'')
- @test replaceinds(A1, [] => []) == A1
- @test replaceinds(A1, ()) == A1
- @test replaceinds(A1) == A1
+ A2r = replacetags(A2, "Site" => "Temp")
+ @test hasinds(A2r, s2tmp, l', l'')
- # Pair notation (like Julia's replace function)
- rA1 = replaceind(A1, s1 => s2)
- @test hasinds(rA1, s2, l, l')
- @test hasinds(A1, s1, l, l')
+ A2r = replacetags(A2, "Link", "Temp")
+ @test hasinds(A2r, s2, ltmp', ltmp'')
- replaceind!(A1, s1, s2)
- @test hasinds(A1, s2, l, l')
+ A2r = replacetags(A2, "Site" => "Link", "Link" => "Site")
+ @test hasinds(
+ A2r,
+ replacetags(s2, "Site" => "Link"),
+ replacetags(l', "Link" => "Site"),
+ replacetags(l'', "Link" => "Site"),
+ )
+ end
+ @testset "prime(::ITensor,::String)" begin
+ A2p = prime(A2)
+ @test A2p == A2'
+ @test hasinds(A2p, s2', l'', l''')
- rA2 = replaceinds(A2, (s2, l'), (s1, l))
- @test hasinds(rA2, s1, l, l'')
- @test hasinds(A2, s2, l', l'')
+ A2p = prime(A2, 2)
+ A2p = A2''
+ @test hasinds(A2p, s2'', l''', l'''')
- # Pair notation (like Julia's replace function)
- rA2 = replaceinds(A2, s2 => s1, l' => l)
- @test hassameinds(rA2, (s1, l, l''))
- @test hassameinds(A2, (s2, l', l''))
+ A2p = prime(A2, "s=2")
+ @test hasinds(A2p, s2', l', l'')
+ end
- # Test ignoring indices that don't exist
- rA2 = replaceinds(A2, s1 => l, l' => l)
- @test hassameinds(rA2, (s2, l, l''))
- @test hassameinds(A2, (s2, l', l''))
+ @testset "mapprime" begin
+ @test hasinds(mapprime(A2, 1, 7), s2, l^7, l'')
+ @test hasinds(mapprime(A2, 0, 1), s2', l', l'')
+ end
- replaceinds!(A2, (s2, l'), (s1, l))
- @test hasinds(A2, s1, l, l'')
- end
+ @testset "replaceprime" begin
+ @test hasinds(mapprime(A2, 1 => 7), s2, l^7, l'')
+ @test hasinds(mapprime(A2, 0 => 1), s2', l', l'')
+ @test hasinds(mapprime(A2, 1 => 7, 0 => 1), s2', l^7, l'')
+ @test hasinds(mapprime(A2, 1 => 2, 2 => 1), s2, l'', l')
+ @test hasinds(mapprime(A2, 1 => 0, 0 => 1), s2', l, l'')
+ end
- @testset "replaceinds fixed errors" begin
- l = Index(3; tags="l")
- s = Index(2; tags="s")
- l̃, s̃ = sim(l), sim(s)
- A = random_itensor(s, l)
- Ã = replaceinds(A, (l, s), (l̃, s̃))
- @test ind(A, 1) == s
- @test ind(A, 2) == l
- @test ind(Ã, 1) == s̃
- @test ind(Ã, 2) == l̃
- @test_throws ErrorException replaceinds(A, (l, s), (s̃, l̃))
+ @testset "setprime" begin
+ @test hasinds(setprime(A2, 2, s2), s2'', l', l'')
+ @test hasinds(setprime(A2, 0, l''), s2, l', l)
+ end
+
+ @testset "swapprime" begin
+ @test hasinds(swapprime(A2, 1, 3), l''', s2, l'')
+ end
end
- @testset "swapinds and swapinds!" begin
- s = Index(2)
- t = Index(2)
- Ast = random_itensor(s, s', t, t')
- Ats = swapinds(Ast, (s, s'), (t, t'))
- @test Ast != Ats
- @test Ast == swapinds(Ats, (s, s'), (t, t'))
+ @testset "ITensor other index operations" begin
+ s1 = Index(2, "Site,s=1")
+ s2 = Index(2, "Site,s=2")
+ l = Index(3, "Link")
+ A1 = random_itensor(s1, l, l')
+ A2 = random_itensor(s2, l', l'')
- swapinds!(Ats, (s, s'), (t, t'))
- @test Ast == Ats
- end
- end #End "ITensor other index operations"
-
- @testset "Converting Real and Complex Storage" begin
- @testset "Add Real and Complex" for eltype in (Float32, Float64)
- i = Index(2, "i")
- j = Index(2, "j")
- TC = random_itensor(complex(eltype), i, j)
- TR = random_itensor(eltype, i, j)
-
- S1 = TC + TR
- S2 = TR + TC
- @test typeof(storage(S1)) == NDTensors.Dense{complex(eltype),Vector{complex(eltype)}}
- @test typeof(storage(S2)) == NDTensors.Dense{complex(eltype),Vector{complex(eltype)}}
- for ii in 1:dim(i), jj in 1:dim(j)
- @test S1[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj]
- @test S2[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj]
- end
- end
- end
-
- @testset "ITensor, NDTensors.Dense{$SType} storage" for SType in (
- Float32, Float64, ComplexF32, ComplexF64
- )
- mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7
- i = Index(mi, "i")
- j = Index(mj, "j")
- k = Index(mk, "k")
- l = Index(ml, "l")
- α = Index(mα, "alpha")
-
- atol = eps(real(SType)) * 500
-
- @testset "Set and get values with IndexVals" begin
- A = ITensor(SType, i, j, k)
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- A[k => kk, j => jj, i => ii] = invdigits(SType, ii, jj, kk)
- end
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- @test A[j => jj, k => kk, i => ii] == invdigits(SType, ii, jj, kk)
- end
- @test A[1] == invdigits(SType, 1, 1, 1)
- end
- @testset "Test permute(ITensor,Index...)" begin
- A = random_itensor(SType, i, k, j)
- permA = permute(A, k, j, i)
- @test k == inds(permA)[1]
- @test j == inds(permA)[2]
- @test i == inds(permA)[3]
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk]
- end
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk]
- end
- # TODO: I think this was doing slicing, but what is the output
- # of slicing an ITensor?
- #@testset "getindex and setindex with vector of IndexVals" begin
- # k_inds = [k=>kk for kk ∈ 1:dim(k)]
- # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
- # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...]
- # end
- # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
- # A[k_inds,i=>ii,j=>jj]=collect(1:length(k_inds))
- # end
- # permA = permute(A,k,j,i)
- # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
- # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...]
- # end
- #end
- end
- @testset "Set and get values with Ints" begin
- A = ITensor(SType, i, j, k)
- A = permute(A, k, i, j)
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- A[kk, ii, jj] = invdigits(SType, ii, jj, kk)
- end
- A = permute(A, i, j, k)
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- @test A[ii, jj, kk] == invdigits(SType, ii, jj, kk)
- end
- end
- @testset "Test scalar(::ITensor)" begin
- x = SType(34)
- A = ITensor(x)
- @test x == scalar(A)
- A = ITensor(SType, i, j, k)
- @test_throws DimensionMismatch scalar(A)
- end
- @testset "Test norm(ITensor)" begin
- A = random_itensor(SType, i, j, k)
- @test norm(A) ≈ sqrt(scalar(dag(A) * A))
- end
- @testset "Test dag(::Number)" begin
- x = 1.2 + 2.3im
- @test dag(x) == 1.2 - 2.3im
- x = 1.4
- @test dag(x) == 1.4
- end
- @testset "Test add ITensors" begin
- A = random_itensor(SType, i, j, k)
- B = random_itensor(SType, k, i, j)
- C = A + B
- for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
- @test C[i => ii, j => jj, k => kk] ==
- A[j => jj, i => ii, k => kk] + B[i => ii, k => kk, j => jj]
- end
- @test array(permute(C, i, j, k)) ==
- array(permute(A, i, j, k)) + array(permute(B, i, j, k))
- end
+ @testset "ind(::ITensor)" begin
+ @test ind(A1, 1) == s1
+ @test ind(A1, 2) == l
+ end
- @testset "Test array" begin
- A = random_itensor(SType, i, j, k)
- B = random_itensor(SType, i, j)
- C = random_itensor(SType, i)
+ @testset "replaceind and replaceinds" begin
+ rA1 = replaceind(A1, s1, s2)
+ @test hasinds(rA1, s2, l, l')
+ @test hasinds(A1, s1, l, l')
- @test array(permute(A, j, i, k)) == array(A, j, i, k)
- @test_throws DimensionMismatch matrix(A, j, i, k)
- @test_throws DimensionMismatch vector(A, j, i, k)
+ @test replaceinds(A1, [] => []) == A1
+ @test replaceinds(A1, ()) == A1
+ @test replaceinds(A1) == A1
- @test array(permute(B, j, i)) == array(B, j, i)
- @test matrix(permute(B, j, i)) == matrix(B, j, i)
- @test_throws DimensionMismatch vector(B, j, i)
+ # Pair notation (like Julia's replace function)
+ rA1 = replaceind(A1, s1 => s2)
+ @test hasinds(rA1, s2, l, l')
+ @test hasinds(A1, s1, l, l')
- @test array(permute(C, i)) == array(C, i)
- @test vector(permute(C, i)) == vector(C, i)
- @test vector(C) == vector(C, i)
- @test_throws DimensionMismatch matrix(C, i)
- end
+ replaceind!(A1, s1, s2)
+ @test hasinds(A1, s2, l, l')
- @testset "Test factorizations of an ITensor" begin
- A = random_itensor(SType, i, j, k, l)
-
- @testset "Test SVD of an ITensor" begin
- U, S, V, spec, u, v = svd(A, (j, l))
- @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}}
- @test A ≈ U * S * V
- @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
- @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
- end
-
- @testset "Test SVD of an ITensor with different algorithms" begin
- U, S, V, spec, u, v = svd(A, j, l; alg="recursive")
- @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}}
- @test A ≈ U * S * V
- @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
- @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
-
- U, S, V, spec, u, v = svd(A, j, l; alg="divide_and_conquer")
- @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}}
- @test A ≈ U * S * V
- @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
- @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
-
- U, S, V, spec, u, v = svd(A, j, l; alg="qr_iteration")
- @test storage(S) isa NDTensors.Diag{real(SType),Vector{real(SType)}}
- @test A ≈ U * S * V
- @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
- @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
-
- @test_throws ErrorException svd(A, j, l; alg="bad_alg")
- end
-
- #@testset "Test SVD of a DenseTensor internally" begin
- # Lis = commoninds(A,IndexSet(j,l))
- # Ris = uniqueinds(A,Lis)
- # Lpos,Rpos = NDTensors.getperms(inds(A),Lis,Ris)
- # # XXX this function isn't used anywhere in ITensors
- # # (it is no longer needed because of the combiner)
- # Ut,St,Vt,spec = svd(NDTensors.tensor(A), Lpos, Rpos)
- # U = itensor(Ut)
- # S = itensor(St)
- # V = itensor(Vt)
- # u = commonind(U, S)
- # v = commonind(V, S)
- # @test storage(S) isa NDTensors.Diag{Float64,Vector{Float64}}
- # @test A≈U*S*V
- # @test U*dag(prime(U,u))≈δ(SType,u,u') atol = atol
- # @test V*dag(prime(V,v))≈δ(SType,v,v') atol = atol
- #end
-
- @testset "Test SVD truncation" begin
- ii = Index(4)
- jj = Index(4)
- T = random_itensor(ComplexF64, ii, jj)
- U, S, V = svd(T, ii; maxdim=2)
- u, s, v = svd(matrix(T))
- @test norm(U * S * V - T) ≈ sqrt(s[3]^2 + s[4]^2)
- end
-
- @testset "Test QR decomposition of an ITensor" begin
- Q, R = qr(A, (i, l))
- @test eltype(Q) <: eltype(A)
- @test eltype(R) <: eltype(A)
- q = commonind(Q, R)
- @test A ≈ Q * R atol = atol
- @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol
-
- Q, R = qr(A, (i, j, k, l))
- @test eltype(Q) <: eltype(A)
- @test eltype(R) <: eltype(A)
- q = commonind(Q, R)
- @test hassameinds(Q, (q, i, j, k, l))
- @test hassameinds(R, (q,))
- @test A ≈ Q * R atol = atol
- @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol
- end
-
- @testset "Regression test for QR decomposition of an ITensor with all indices on one side" begin
- a = Index(2, "a")
- b = Index(2, "b")
- Vab = random_itensor(a, b)
- Q, R = qr(Vab, (a, b))
- @test hasinds(Q, (a, b))
- @test Vab ≈ Q * R atol = atol
- end
-
- @testset "Test polar decomposition of an ITensor" begin
- U, P, u = polar(A, (k, l))
-
- @test eltype(U) == eltype(A)
- @test eltype(P) == eltype(A)
-
- @test A ≈ U * P atol = atol
-
- #Note: this is only satisfied when left dimensions
- #are greater than right dimensions
- UUᵀ = U * dag(prime(U, u))
-
- # TODO: use a combiner to combine the u indices to make
- # this test simpler
- for ii in 1:dim(u[1]), jj in 1:dim(u[2]), iip in 1:dim(u[1]), jjp in 1:dim(u[2])
- val = UUᵀ[u[1] => ii, u[2] => jj, u[1]' => iip, u[2]' => jjp]
- if ii == iip && jj == jjp
- @test val ≈ one(SType) atol = atol
- else
- @test val ≈ zero(SType) atol = atol
- end
- end
- end
-
- @testset "Test Hermitian eigendecomposition of an ITensor" begin
- is = IndexSet(i, j)
- T = random_itensor(SType, is..., prime(is)...)
- T = T + swapprime(dag(T), 0, 1)
- D, U, spec, l, r = eigen(T; ishermitian=true)
- @test T ≈ prime(U) * D * dag(U) atol = atol
- UUᴴ = U * prime(dag(U), r)
- @test UUᴴ ≈ δ(r, r')
- end
-
- @testset "Test factorize of an ITensor" begin
- @testset "factorize default" begin
- L, R = factorize(A, (j, l))
- l = commonind(L, R)
- @test A ≈ L * R
- @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
- @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ rA2 = replaceinds(A2, (s2, l'), (s1, l))
+ @test hasinds(rA2, s1, l, l'')
+ @test hasinds(A2, s2, l', l'')
+
+ # Pair notation (like Julia's replace function)
+ rA2 = replaceinds(A2, s2 => s1, l' => l)
+ @test hassameinds(rA2, (s1, l, l''))
+ @test hassameinds(A2, (s2, l', l''))
+
+ # Test ignoring indices that don't exist
+ rA2 = replaceinds(A2, s1 => l, l' => l)
+ @test hassameinds(rA2, (s2, l, l''))
+ @test hassameinds(A2, (s2, l', l''))
+
+ replaceinds!(A2, (s2, l'), (s1, l))
+ @test hasinds(A2, s1, l, l'')
end
- @testset "factorize ortho left" begin
- L, R = factorize(A, (j, l); ortho="left")
- l = commonind(L, R)
- @test A ≈ L * R
- @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
- @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ @testset "replaceinds fixed errors" begin
+ l = Index(3; tags = "l")
+ s = Index(2; tags = "s")
+ l̃, s̃ = sim(l), sim(s)
+ A = random_itensor(s, l)
+ Ã = replaceinds(A, (l, s), (l̃, s̃))
+ @test ind(A, 1) == s
+ @test ind(A, 2) == l
+ @test ind(Ã, 1) == s̃
+ @test ind(Ã, 2) == l̃
+ @test_throws ErrorException replaceinds(A, (l, s), (s̃, l̃))
end
- @testset "factorize ortho right" begin
- L, R = factorize(A, (j, l); ortho="right")
- l = commonind(L, R)
- @test A ≈ L * R
- @test L * dag(prime(L, l)) ≉ δ(SType, l, l')
- @test R * dag(prime(R, l)) ≈ δ(SType, l, l')
+ @testset "swapinds and swapinds!" begin
+ s = Index(2)
+ t = Index(2)
+ Ast = random_itensor(s, s', t, t')
+ Ats = swapinds(Ast, (s, s'), (t, t'))
+ @test Ast != Ats
+ @test Ast == swapinds(Ats, (s, s'), (t, t'))
+
+ swapinds!(Ats, (s, s'), (t, t'))
+ @test Ast == Ats
+ end
+ end #End "ITensor other index operations"
+
+ @testset "Converting Real and Complex Storage" begin
+ @testset "Add Real and Complex" for eltype in (Float32, Float64)
+ i = Index(2, "i")
+ j = Index(2, "j")
+ TC = random_itensor(complex(eltype), i, j)
+ TR = random_itensor(eltype, i, j)
+
+ S1 = TC + TR
+ S2 = TR + TC
+ @test typeof(storage(S1)) == NDTensors.Dense{complex(eltype), Vector{complex(eltype)}}
+ @test typeof(storage(S2)) == NDTensors.Dense{complex(eltype), Vector{complex(eltype)}}
+ for ii in 1:dim(i), jj in 1:dim(j)
+ @test S1[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj]
+ @test S2[i => ii, j => jj] ≈ TC[i => ii, j => jj] + TR[i => ii, j => jj]
+ end
end
+ end
- @testset "factorize ortho none" begin
- L, R = factorize(A, (j, l); ortho="none")
- l = commonind(L, R)
- @test A ≈ L * R
- @test L * dag(prime(L, l)) ≉ δ(SType, l, l')
- @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ @testset "ITensor, NDTensors.Dense{$SType} storage" for SType in (
+ Float32, Float64, ComplexF32, ComplexF64,
+ )
+ mi, mj, mk, ml, mα = 2, 3, 4, 5, 6, 7
+ i = Index(mi, "i")
+ j = Index(mj, "j")
+ k = Index(mk, "k")
+ l = Index(ml, "l")
+ α = Index(mα, "alpha")
+
+ atol = eps(real(SType)) * 500
+
+ @testset "Set and get values with IndexVals" begin
+ A = ITensor(SType, i, j, k)
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ A[k => kk, j => jj, i => ii] = invdigits(SType, ii, jj, kk)
+ end
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ @test A[j => jj, k => kk, i => ii] == invdigits(SType, ii, jj, kk)
+ end
+ @test A[1] == invdigits(SType, 1, 1, 1)
+ end
+ @testset "Test permute(ITensor,Index...)" begin
+ A = random_itensor(SType, i, k, j)
+ permA = permute(A, k, j, i)
+ @test k == inds(permA)[1]
+ @test j == inds(permA)[2]
+ @test i == inds(permA)[3]
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk]
+ end
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ @test A[k => kk, i => ii, j => jj] == permA[i => ii, j => jj, k => kk]
+ end
+ # TODO: I think this was doing slicing, but what is the output
+ # of slicing an ITensor?
+ #@testset "getindex and setindex with vector of IndexVals" begin
+ # k_inds = [k=>kk for kk ∈ 1:dim(k)]
+ # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
+ # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...]
+ # end
+ # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
+ # A[k_inds,i=>ii,j=>jj]=collect(1:length(k_inds))
+ # end
+ # permA = permute(A,k,j,i)
+ # for ii ∈ 1:dim(i), jj ∈ 1:dim(j)
+ # @test A[k_inds,i=>ii,j=>jj]==permA[i=>ii,j=>jj,k_inds...]
+ # end
+ #end
+ end
+ @testset "Set and get values with Ints" begin
+ A = ITensor(SType, i, j, k)
+ A = permute(A, k, i, j)
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ A[kk, ii, jj] = invdigits(SType, ii, jj, kk)
+ end
+ A = permute(A, i, j, k)
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ @test A[ii, jj, kk] == invdigits(SType, ii, jj, kk)
+ end
+ end
+ @testset "Test scalar(::ITensor)" begin
+ x = SType(34)
+ A = ITensor(x)
+ @test x == scalar(A)
+ A = ITensor(SType, i, j, k)
+ @test_throws DimensionMismatch scalar(A)
+ end
+ @testset "Test norm(ITensor)" begin
+ A = random_itensor(SType, i, j, k)
+ @test norm(A) ≈ sqrt(scalar(dag(A) * A))
end
+ @testset "Test dag(::Number)" begin
+ x = 1.2 + 2.3im
+ @test dag(x) == 1.2 - 2.3im
+ x = 1.4
+ @test dag(x) == 1.4
+ end
+ @testset "Test add ITensors" begin
+ A = random_itensor(SType, i, j, k)
+ B = random_itensor(SType, k, i, j)
+ C = A + B
+ for ii in 1:dim(i), jj in 1:dim(j), kk in 1:dim(k)
+ @test C[i => ii, j => jj, k => kk] ==
+ A[j => jj, i => ii, k => kk] + B[i => ii, k => kk, j => jj]
+ end
+ @test array(permute(C, i, j, k)) ==
+ array(permute(A, i, j, k)) + array(permute(B, i, j, k))
+ end
+
+ @testset "Test array" begin
+ A = random_itensor(SType, i, j, k)
+ B = random_itensor(SType, i, j)
+ C = random_itensor(SType, i)
+
+ @test array(permute(A, j, i, k)) == array(A, j, i, k)
+ @test_throws DimensionMismatch matrix(A, j, i, k)
+ @test_throws DimensionMismatch vector(A, j, i, k)
+
+ @test array(permute(B, j, i)) == array(B, j, i)
+ @test matrix(permute(B, j, i)) == matrix(B, j, i)
+ @test_throws DimensionMismatch vector(B, j, i)
- @testset "factorize when ITensor has primed indices" begin
- A = random_itensor(i, i')
- L, R = factorize(A, i)
- l = commonind(L, R)
- @test A ≈ L * R
- @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
- @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ @test array(permute(C, i)) == array(C, i)
+ @test vector(permute(C, i)) == vector(C, i)
+ @test vector(C) == vector(C, i)
+ @test_throws DimensionMismatch matrix(C, i)
+ end
- @test_throws ErrorException factorize(A, i; which_decomp="svd", svd_alg="bad_alg")
+ @testset "Test factorizations of an ITensor" begin
+ A = random_itensor(SType, i, j, k, l)
+
+ @testset "Test SVD of an ITensor" begin
+ U, S, V, spec, u, v = svd(A, (j, l))
+ @test storage(S) isa NDTensors.Diag{real(SType), Vector{real(SType)}}
+ @test A ≈ U * S * V
+ @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
+ @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
+ end
+
+ @testset "Test SVD of an ITensor with different algorithms" begin
+ U, S, V, spec, u, v = svd(A, j, l; alg = "recursive")
+ @test storage(S) isa NDTensors.Diag{real(SType), Vector{real(SType)}}
+ @test A ≈ U * S * V
+ @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
+ @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
+
+ U, S, V, spec, u, v = svd(A, j, l; alg = "divide_and_conquer")
+ @test storage(S) isa NDTensors.Diag{real(SType), Vector{real(SType)}}
+ @test A ≈ U * S * V
+ @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
+ @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
+
+ U, S, V, spec, u, v = svd(A, j, l; alg = "qr_iteration")
+ @test storage(S) isa NDTensors.Diag{real(SType), Vector{real(SType)}}
+ @test A ≈ U * S * V
+ @test U * dag(prime(U, u)) ≈ δ(SType, u, u') atol = atol
+ @test V * dag(prime(V, v)) ≈ δ(SType, v, v') atol = atol
+
+ @test_throws ErrorException svd(A, j, l; alg = "bad_alg")
+ end
+
+ #@testset "Test SVD of a DenseTensor internally" begin
+ # Lis = commoninds(A,IndexSet(j,l))
+ # Ris = uniqueinds(A,Lis)
+ # Lpos,Rpos = NDTensors.getperms(inds(A),Lis,Ris)
+ # # XXX this function isn't used anywhere in ITensors
+ # # (it is no longer needed because of the combiner)
+ # Ut,St,Vt,spec = svd(NDTensors.tensor(A), Lpos, Rpos)
+ # U = itensor(Ut)
+ # S = itensor(St)
+ # V = itensor(Vt)
+ # u = commonind(U, S)
+ # v = commonind(V, S)
+ # @test storage(S) isa NDTensors.Diag{Float64,Vector{Float64}}
+ # @test A≈U*S*V
+ # @test U*dag(prime(U,u))≈δ(SType,u,u') atol = atol
+ # @test V*dag(prime(V,v))≈δ(SType,v,v') atol = atol
+ #end
+
+ @testset "Test SVD truncation" begin
+ ii = Index(4)
+ jj = Index(4)
+ T = random_itensor(ComplexF64, ii, jj)
+ U, S, V = svd(T, ii; maxdim = 2)
+ u, s, v = svd(matrix(T))
+ @test norm(U * S * V - T) ≈ sqrt(s[3]^2 + s[4]^2)
+ end
+
+ @testset "Test QR decomposition of an ITensor" begin
+ Q, R = qr(A, (i, l))
+ @test eltype(Q) <: eltype(A)
+ @test eltype(R) <: eltype(A)
+ q = commonind(Q, R)
+ @test A ≈ Q * R atol = atol
+ @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol
+
+ Q, R = qr(A, (i, j, k, l))
+ @test eltype(Q) <: eltype(A)
+ @test eltype(R) <: eltype(A)
+ q = commonind(Q, R)
+ @test hassameinds(Q, (q, i, j, k, l))
+ @test hassameinds(R, (q,))
+ @test A ≈ Q * R atol = atol
+ @test Q * dag(prime(Q, q)) ≈ δ(SType, q, q') atol = atol
+ end
+
+ @testset "Regression test for QR decomposition of an ITensor with all indices on one side" begin
+ a = Index(2, "a")
+ b = Index(2, "b")
+ Vab = random_itensor(a, b)
+ Q, R = qr(Vab, (a, b))
+ @test hasinds(Q, (a, b))
+ @test Vab ≈ Q * R atol = atol
+ end
+
+ @testset "Test polar decomposition of an ITensor" begin
+ U, P, u = polar(A, (k, l))
+
+ @test eltype(U) == eltype(A)
+ @test eltype(P) == eltype(A)
+
+ @test A ≈ U * P atol = atol
+
+ #Note: this is only satisfied when left dimensions
+ #are greater than right dimensions
+ UUᵀ = U * dag(prime(U, u))
+
+ # TODO: use a combiner to combine the u indices to make
+ # this test simpler
+ for ii in 1:dim(u[1]), jj in 1:dim(u[2]), iip in 1:dim(u[1]), jjp in 1:dim(u[2])
+ val = UUᵀ[u[1] => ii, u[2] => jj, u[1]' => iip, u[2]' => jjp]
+ if ii == iip && jj == jjp
+ @test val ≈ one(SType) atol = atol
+ else
+ @test val ≈ zero(SType) atol = atol
+ end
+ end
+ end
+
+ @testset "Test Hermitian eigendecomposition of an ITensor" begin
+ is = IndexSet(i, j)
+ T = random_itensor(SType, is..., prime(is)...)
+ T = T + swapprime(dag(T), 0, 1)
+ D, U, spec, l, r = eigen(T; ishermitian = true)
+ @test T ≈ prime(U) * D * dag(U) atol = atol
+ UUᴴ = U * prime(dag(U), r)
+ @test UUᴴ ≈ δ(r, r')
+ end
+
+ @testset "Test factorize of an ITensor" begin
+ @testset "factorize default" begin
+ L, R = factorize(A, (j, l))
+ l = commonind(L, R)
+ @test A ≈ L * R
+ @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
+ @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ end
+
+ @testset "factorize ortho left" begin
+ L, R = factorize(A, (j, l); ortho = "left")
+ l = commonind(L, R)
+ @test A ≈ L * R
+ @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
+ @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ end
+
+ @testset "factorize ortho right" begin
+ L, R = factorize(A, (j, l); ortho = "right")
+ l = commonind(L, R)
+ @test A ≈ L * R
+ @test L * dag(prime(L, l)) ≉ δ(SType, l, l')
+ @test R * dag(prime(R, l)) ≈ δ(SType, l, l')
+ end
+
+ @testset "factorize ortho none" begin
+ L, R = factorize(A, (j, l); ortho = "none")
+ l = commonind(L, R)
+ @test A ≈ L * R
+ @test L * dag(prime(L, l)) ≉ δ(SType, l, l')
+ @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+ end
+
+ @testset "factorize when ITensor has primed indices" begin
+ A = random_itensor(i, i')
+ L, R = factorize(A, i)
+ l = commonind(L, R)
+ @test A ≈ L * R
+ @test L * dag(prime(L, l)) ≈ δ(SType, l, l')
+ @test R * dag(prime(R, l)) ≉ δ(SType, l, l')
+
+ @test_throws ErrorException factorize(A, i; which_decomp = "svd", svd_alg = "bad_alg")
+ end
+ end # End factorize tests
+
+ @testset "Test error for bad decomposition inputs" begin
+ @test_throws ErrorException svd(A)
+ @test_throws ErrorException factorize(A)
+ @test_throws ErrorException eigen(A, inds(A), inds(A))
+ end
end
- end # End factorize tests
+ end # End Dense storage test
+
+ @testset "dag copy behavior" begin
+ i = Index(4, "i")
+
+ v1 = random_itensor(i)
+ cv1 = dag(v1)
+ cv1[1] = -1
+ @test v1[1] ≈ cv1[1]
+
+ v2 = random_itensor(i)
+ cv2 = dag(ITensors.NeverAlias(), v2)
+ orig_elt = v2[1]
+ cv2[1] = -1
+ @test v2[1] ≈ orig_elt
+
+ v2 = random_itensor(i)
+ cv2 = dag(v2; allow_alias = false)
+ orig_elt = v2[1]
+ cv2[1] = -1
+ @test v2[1] ≈ orig_elt
+
+ v3 = random_itensor(ComplexF64, i)
+ orig_elt = v3[1]
+ cv3 = dag(v3)
+ cv3[1] = -1
+ @test v3[1] ≈ orig_elt
+
+ v4 = random_itensor(ComplexF64, i)
+ cv4 = dag(ITensors.NeverAlias(), v4)
+ orig_elt = v4[1]
+ cv4[1] = -1
+ @test v4[1] ≈ orig_elt
+ end
+
+ @testset "filter ITensor indices" begin
+ i = Index(2, "i")
+ A = random_itensor(i, i')
+ @test hassameinds(filterinds(A; plev = 0), (i,))
+ @test hassameinds(inds(A; plev = 0), (i,))
+ is = inds(A)
+ @test hassameinds(filterinds(is; plev = 0), (i,))
+ @test hassameinds(inds(is; plev = 0), (i,))
+ end
+
+ @testset "product/apply" begin
+ s1 = Index(2, "s1")
+ s2 = Index(2, "s2")
+ s3 = Index(2, "s3")
+
+ rA = Index(3, "rA")
+ lA = Index(3, "lA")
+
+ rB = Index(3, "rB")
+ lB = Index(3, "lB")
+
+ # operator * operator
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
+ AB = product(A, B)
+ @test hassameinds(AB, (s1', s2', s1, s2, lA, rA, lB, rB))
+ @test AB ≈ mapprime(prime(A; inds = (s1', s2', s1, s2)) * B, 2 => 1)
+
+ # operator * operator, common dangling indices
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', dag(s1), dag(s2), dag(lA), dag(rA))
+ AB = product(A, B)
+ @test hassameinds(AB, (s1', s2', s1, s2))
+ @test AB ≈ mapprime(prime(A; inds = (s1', s2', s1, s2)) * B, 2 => 1)
+
+ # operator * operator, apply_dag, common dangling indices
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
+ ABAdag = product(A, B; apply_dag = true)
+ AB = mapprime(prime(A; inds = (s1', s2', s1, s2)) * B, 2 => 1)
+ Adag = swapprime(dag(A), 0 => 1; inds = (s1', s2', s1, s2))
+ @test hassameinds(ABAdag, (s1', s2', s1, s2, lB, rB))
+ @test ABAdag ≈ mapprime(prime(AB; inds = (s1', s2', s1, s2)) * Adag, 2 => 1)
+
+ # operator * operator, more complicated
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', s3', dag(s1), dag(s2), dag(s3), lB, rB, dag(rA))
+ AB = product(A, B)
+ @test hassameinds(AB, (s1', s2', s3', s1, s2, s3, lA, lB, rB))
+ @test AB ≈ mapprime(prime(A; inds = (s1', s2', s1, s2)) * B, 2 => 1)
+
+ # state * operator (1)
+ A = random_itensor(dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
+ AB = product(A, B)
+ @test hassameinds(AB, (s1, s2, lA, rA, lB, rB))
+ @test AB ≈ mapprime(prime(A; inds = (s1, s2)) * B)
+
+ # state * operator (2)
+ A = random_itensor(dag(s1'), dag(s2'), lA, rA)
+ B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
+ @test_throws ErrorException product(A, B)
+
+ # operator * state (1)
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', lB, rB)
+ @test_throws ErrorException product(A, B)
+
+ # operator * state (2)
+ A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1, s2, lB, rB, dag(lA))
+ AB = product(A, B)
+ @test hassameinds(AB, (s1, s2, rA, lB, rB))
+ @test AB ≈ noprime(A * B)
+
+ # state * state (1)
+ A = random_itensor(dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1, s2, lB, rB)
+ AB = product(A, B)
+ @test hassameinds(AB, (lA, rA, lB, rB))
+ @test AB ≈ A * B
+
+ # state * state (2)
+ A = random_itensor(dag(s1'), dag(s2'), lA, rA)
+ B = random_itensor(s1, s2, lB, dag(rA))
+ AB = product(A, B)
+ @test hassameinds(AB, (s1', s2', s1, s2, lA, lB))
+ @test AB ≈ A * B
+
+ # state * state (3)
+ A = random_itensor(dag(s1'), dag(s2'), lA, rA)
+ B = random_itensor(s1, s2, lB, rB)
+ @test_throws ErrorException product(A, B)
+
+ # state * state (4)
+ A = random_itensor(dag(s1), dag(s2), lA, rA)
+ B = random_itensor(s1', s2', lB, rB)
+ @test_throws ErrorException product(A, B)
+
+ # state * state (5)
+ A = random_itensor(dag(s1'), dag(s2'), lA, rA)
+ B = random_itensor(s1', s2', lB, rB)
+ @test_throws ErrorException product(A, B)
+ end
+
+ @testset "inner ($ElType)" for ElType in (Float64, ComplexF64)
+ i = Index(2)
+ j = Index(2)
+ A = random_itensor(ElType, i', j', i, j)
+ x = random_itensor(ElType, i, j)
+ y = random_itensor(ElType, i, j)
+ @test inner(x, y) ≈ (dag(x) * y)[]
+ @test inner(x', A, y) ≈ (dag(x)' * A * y)[]
+ # No automatic priming
+ @test_throws DimensionMismatch inner(x, A, y)
+ end
- @testset "Test error for bad decomposition inputs" begin
- @test_throws ErrorException svd(A)
- @test_throws ErrorException factorize(A)
- @test_throws ErrorException eigen(A, inds(A), inds(A))
- end
+ @testset "hastags" begin
+ i = Index(2, "i, x")
+ j = Index(2, "j, x")
+ A = random_itensor(i, j)
+ @test hastags(A, "i")
+ @test anyhastags(A, "i")
+ @test !allhastags(A, "i")
+ @test allhastags(A, "x")
end
- end # End Dense storage test
-
- @testset "dag copy behavior" begin
- i = Index(4, "i")
-
- v1 = random_itensor(i)
- cv1 = dag(v1)
- cv1[1] = -1
- @test v1[1] ≈ cv1[1]
-
- v2 = random_itensor(i)
- cv2 = dag(ITensors.NeverAlias(), v2)
- orig_elt = v2[1]
- cv2[1] = -1
- @test v2[1] ≈ orig_elt
-
- v2 = random_itensor(i)
- cv2 = dag(v2; allow_alias=false)
- orig_elt = v2[1]
- cv2[1] = -1
- @test v2[1] ≈ orig_elt
-
- v3 = random_itensor(ComplexF64, i)
- orig_elt = v3[1]
- cv3 = dag(v3)
- cv3[1] = -1
- @test v3[1] ≈ orig_elt
-
- v4 = random_itensor(ComplexF64, i)
- cv4 = dag(ITensors.NeverAlias(), v4)
- orig_elt = v4[1]
- cv4[1] = -1
- @test v4[1] ≈ orig_elt
- end
-
- @testset "filter ITensor indices" begin
- i = Index(2, "i")
- A = random_itensor(i, i')
- @test hassameinds(filterinds(A; plev=0), (i,))
- @test hassameinds(inds(A; plev=0), (i,))
- is = inds(A)
- @test hassameinds(filterinds(is; plev=0), (i,))
- @test hassameinds(inds(is; plev=0), (i,))
- end
-
- @testset "product/apply" begin
- s1 = Index(2, "s1")
- s2 = Index(2, "s2")
- s3 = Index(2, "s3")
-
- rA = Index(3, "rA")
- lA = Index(3, "lA")
-
- rB = Index(3, "rB")
- lB = Index(3, "lB")
-
- # operator * operator
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
- AB = product(A, B)
- @test hassameinds(AB, (s1', s2', s1, s2, lA, rA, lB, rB))
- @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1)
-
- # operator * operator, common dangling indices
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', dag(s1), dag(s2), dag(lA), dag(rA))
- AB = product(A, B)
- @test hassameinds(AB, (s1', s2', s1, s2))
- @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1)
-
- # operator * operator, apply_dag, common dangling indices
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
- ABAdag = product(A, B; apply_dag=true)
- AB = mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1)
- Adag = swapprime(dag(A), 0 => 1; inds=(s1', s2', s1, s2))
- @test hassameinds(ABAdag, (s1', s2', s1, s2, lB, rB))
- @test ABAdag ≈ mapprime(prime(AB; inds=(s1', s2', s1, s2)) * Adag, 2 => 1)
-
- # operator * operator, more complicated
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', s3', dag(s1), dag(s2), dag(s3), lB, rB, dag(rA))
- AB = product(A, B)
- @test hassameinds(AB, (s1', s2', s3', s1, s2, s3, lA, lB, rB))
- @test AB ≈ mapprime(prime(A; inds=(s1', s2', s1, s2)) * B, 2 => 1)
-
- # state * operator (1)
- A = random_itensor(dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
- AB = product(A, B)
- @test hassameinds(AB, (s1, s2, lA, rA, lB, rB))
- @test AB ≈ mapprime(prime(A; inds=(s1, s2)) * B)
-
- # state * operator (2)
- A = random_itensor(dag(s1'), dag(s2'), lA, rA)
- B = random_itensor(s1', s2', dag(s1), dag(s2), lB, rB)
- @test_throws ErrorException product(A, B)
-
- # operator * state (1)
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', lB, rB)
- @test_throws ErrorException product(A, B)
-
- # operator * state (2)
- A = random_itensor(s1', s2', dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1, s2, lB, rB, dag(lA))
- AB = product(A, B)
- @test hassameinds(AB, (s1, s2, rA, lB, rB))
- @test AB ≈ noprime(A * B)
-
- # state * state (1)
- A = random_itensor(dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1, s2, lB, rB)
- AB = product(A, B)
- @test hassameinds(AB, (lA, rA, lB, rB))
- @test AB ≈ A * B
-
- # state * state (2)
- A = random_itensor(dag(s1'), dag(s2'), lA, rA)
- B = random_itensor(s1, s2, lB, dag(rA))
- AB = product(A, B)
- @test hassameinds(AB, (s1', s2', s1, s2, lA, lB))
- @test AB ≈ A * B
-
- # state * state (3)
- A = random_itensor(dag(s1'), dag(s2'), lA, rA)
- B = random_itensor(s1, s2, lB, rB)
- @test_throws ErrorException product(A, B)
-
- # state * state (4)
- A = random_itensor(dag(s1), dag(s2), lA, rA)
- B = random_itensor(s1', s2', lB, rB)
- @test_throws ErrorException product(A, B)
-
- # state * state (5)
- A = random_itensor(dag(s1'), dag(s2'), lA, rA)
- B = random_itensor(s1', s2', lB, rB)
- @test_throws ErrorException product(A, B)
- end
-
- @testset "inner ($ElType)" for ElType in (Float64, ComplexF64)
- i = Index(2)
- j = Index(2)
- A = random_itensor(ElType, i', j', i, j)
- x = random_itensor(ElType, i, j)
- y = random_itensor(ElType, i, j)
- @test inner(x, y) ≈ (dag(x) * y)[]
- @test inner(x', A, y) ≈ (dag(x)' * A * y)[]
- # No automatic priming
- @test_throws DimensionMismatch inner(x, A, y)
- end
-
- @testset "hastags" begin
- i = Index(2, "i, x")
- j = Index(2, "j, x")
- A = random_itensor(i, j)
- @test hastags(A, "i")
- @test anyhastags(A, "i")
- @test !allhastags(A, "i")
- @test allhastags(A, "x")
- end
-
- @testset "directsum" for space in (identity, d -> [QN(0) => d, QN(1) => d]),
- index_op in (identity, dag)
-
- x = Index(space(2), "x")
- i1 = Index(space(3), "i1")
- j1 = Index(space(4), "j1")
- i2 = Index(space(5), "i2")
- j2 = Index(space(6), "j2")
-
- A1 = random_itensor(i1, x, j1)
- A2 = random_itensor(x, j2, i2)
-
- # Generate indices automatically.
- # Reverse the arrow directions in the QN case as a
- # regression test for:
- # https://github.com/ITensor/ITensors.jl/pull/1178.
- S1, s1 = directsum(
- A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2)); tags=["sum_i", "sum_j"]
- )
-
- # Provide indices
- i1i2 = directsum(i1, i2; tags="sum_i")
- j1j2 = directsum(j1, j2; tags="sum_j")
- s2 = [i1i2, j1j2]
- S2 = directsum(s2, A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2)))
- for (S, s) in zip((S1, S2), (s1, s2))
- for vx in 1:dim(x)
- proj = dag(onehot(x => vx))
- A1_vx = A1 * proj
- A2_vx = A2 * proj
- S_vx = S * proj
- for m in 1:dim(s[1]), n in 1:dim(s[2])
- if m ≤ dim(i1) && n ≤ dim(j1)
- @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n]
- elseif m > dim(i1) && n > dim(j1)
- @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)]
- else
- @test S_vx[s[1] => m, s[2] => n] == 0
- end
+
+ @testset "directsum" for space in (identity, d -> [QN(0) => d, QN(1) => d]),
+ index_op in (identity, dag)
+
+ x = Index(space(2), "x")
+ i1 = Index(space(3), "i1")
+ j1 = Index(space(4), "j1")
+ i2 = Index(space(5), "i2")
+ j2 = Index(space(6), "j2")
+
+ A1 = random_itensor(i1, x, j1)
+ A2 = random_itensor(x, j2, i2)
+
+ # Generate indices automatically.
+ # Reverse the arrow directions in the QN case as a
+ # regression test for:
+ # https://github.com/ITensor/ITensors.jl/pull/1178.
+ S1, s1 = directsum(
+ A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2)); tags = ["sum_i", "sum_j"]
+ )
+
+ # Provide indices
+ i1i2 = directsum(i1, i2; tags = "sum_i")
+ j1j2 = directsum(j1, j2; tags = "sum_j")
+ s2 = [i1i2, j1j2]
+ S2 = directsum(s2, A1 => index_op.((i1, j1)), A2 => index_op.((i2, j2)))
+ for (S, s) in zip((S1, S2), (s1, s2))
+ for vx in 1:dim(x)
+ proj = dag(onehot(x => vx))
+ A1_vx = A1 * proj
+ A2_vx = A2 * proj
+ S_vx = S * proj
+ for m in 1:dim(s[1]), n in 1:dim(s[2])
+ if m ≤ dim(i1) && n ≤ dim(j1)
+ @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n]
+ elseif m > dim(i1) && n > dim(j1)
+ @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)]
+ else
+ @test S_vx[s[1] => m, s[2] => n] == 0
+ end
+ end
+ end
end
- end
+
+ i1, i2, j, k, l = Index.(space.((2, 3, 4, 5, 6)), ("i1", "i2", "j", "k", "l"))
+
+ A = random_itensor(i1, i2, j)
+ B = random_itensor(i1, i2, k)
+ C = random_itensor(i1, i2, l)
+ D = ITensor(i1, i2, k)
+ F = ITensor(i1, i2, j)
+
+ S, s = directsum(A => index_op(j), B => index_op(k))
+ @test dim(s) == dim(j) + dim(k)
+ @test hassameinds(S, (i1, i2, s))
+
+ S, s = (A => index_op(j)) ⊕ (B => index_op(k))
+ @test dim(s) == dim(j) + dim(k)
+ @test hassameinds(S, (i1, i2, s))
+
+ S, s = (A => index_op(j)) ⊕ (D => index_op(k))
+ @test dim(s) == dim(j) + dim(k)
+ @test hassameinds(S, (i1, i2, s))
+
+ @test_throws ErrorException (F => index_op(j)) ⊕ (D => index_op(k))
+
+ S, s = (D => index_op(k)) ⊕ (A => index_op(j))
+ @test dim(s) == dim(j) + dim(k)
+ @test hassameinds(S, (i1, i2, s))
+
+ S, s = directsum(A => index_op(j), B => index_op(k), C => index_op(l))
+ @test dim(s) == dim(j) + dim(k) + dim(l)
+ @test hassameinds(S, (i1, i2, s))
+
+ @test_throws ErrorException directsum(A => index_op(i2), B => index_op(i2))
+
+ S, (s,) = directsum(A => (index_op(j),), B => (index_op(k),))
+ @test s == uniqueind(S, A)
+ @test dim(s) == dim(j) + dim(k)
+ @test hassameinds(S, (i1, i2, s))
+
+ S, ss = directsum(A => index_op.((i2, j)), B => index_op.((i2, k)))
+ @test length(ss) == 2
+ @test dim(ss[1]) == dim(i2) + dim(i2)
+ @test hassameinds(S, (i1, ss...))
+
+ S, ss = directsum(A => (index_op(j),), B => (index_op(k),), C => (index_op(l),))
+ s = only(ss)
+ @test s == uniqueind(S, A)
+ @test dim(s) == dim(j) + dim(k) + dim(l)
+ @test hassameinds(S, (i1, i2, s))
+
+ S, ss = directsum(
+ A => index_op.((i2, i1, j)), B => index_op.((i1, i2, k)), C => index_op.((i1, i2, l))
+ )
+ @test length(ss) == 3
+ @test dim(ss[1]) == dim(i2) + dim(i1) + dim(i1)
+ @test dim(ss[2]) == dim(i1) + dim(i2) + dim(i2)
+ @test dim(ss[3]) == dim(j) + dim(k) + dim(l)
+ @test hassameinds(S, ss)
end
- i1, i2, j, k, l = Index.(space.((2, 3, 4, 5, 6)), ("i1", "i2", "j", "k", "l"))
-
- A = random_itensor(i1, i2, j)
- B = random_itensor(i1, i2, k)
- C = random_itensor(i1, i2, l)
- D = ITensor(i1, i2, k)
- F = ITensor(i1, i2, j)
-
- S, s = directsum(A => index_op(j), B => index_op(k))
- @test dim(s) == dim(j) + dim(k)
- @test hassameinds(S, (i1, i2, s))
-
- S, s = (A => index_op(j)) ⊕ (B => index_op(k))
- @test dim(s) == dim(j) + dim(k)
- @test hassameinds(S, (i1, i2, s))
-
- S, s = (A => index_op(j)) ⊕ (D => index_op(k))
- @test dim(s) == dim(j) + dim(k)
- @test hassameinds(S, (i1, i2, s))
-
- @test_throws ErrorException (F => index_op(j)) ⊕ (D => index_op(k))
-
- S, s = (D => index_op(k)) ⊕ (A => index_op(j))
- @test dim(s) == dim(j) + dim(k)
- @test hassameinds(S, (i1, i2, s))
-
- S, s = directsum(A => index_op(j), B => index_op(k), C => index_op(l))
- @test dim(s) == dim(j) + dim(k) + dim(l)
- @test hassameinds(S, (i1, i2, s))
-
- @test_throws ErrorException directsum(A => index_op(i2), B => index_op(i2))
-
- S, (s,) = directsum(A => (index_op(j),), B => (index_op(k),))
- @test s == uniqueind(S, A)
- @test dim(s) == dim(j) + dim(k)
- @test hassameinds(S, (i1, i2, s))
-
- S, ss = directsum(A => index_op.((i2, j)), B => index_op.((i2, k)))
- @test length(ss) == 2
- @test dim(ss[1]) == dim(i2) + dim(i2)
- @test hassameinds(S, (i1, ss...))
-
- S, ss = directsum(A => (index_op(j),), B => (index_op(k),), C => (index_op(l),))
- s = only(ss)
- @test s == uniqueind(S, A)
- @test dim(s) == dim(j) + dim(k) + dim(l)
- @test hassameinds(S, (i1, i2, s))
-
- S, ss = directsum(
- A => index_op.((i2, i1, j)), B => index_op.((i1, i2, k)), C => index_op.((i1, i2, l))
- )
- @test length(ss) == 3
- @test dim(ss[1]) == dim(i2) + dim(i1) + dim(i1)
- @test dim(ss[2]) == dim(i1) + dim(i2) + dim(i2)
- @test dim(ss[3]) == dim(j) + dim(k) + dim(l)
- @test hassameinds(S, ss)
- end
-
- @testset "ishermitian" begin
- s = Index(2, "s")
- Sz = ITensor([0.5 0.0; 0.0 -0.5], s', s)
- Sp = ITensor([0.0 1.0; 0.0 0.0], s', s)
- @test ishermitian(Sz)
- @test !ishermitian(Sp)
- end
-
- @testset "convert_eltype, convert_leaf_eltype, $new_eltype" for new_eltype in
- (Float32, ComplexF64)
- s = Index(2)
- A = random_itensor(s)
- @test eltype(A) == Float64
-
- Af32 = convert_eltype(new_eltype, A)
- @test Af32 ≈ A
- @test eltype(Af32) == new_eltype
-
- Af32_2 = convert_leaf_eltype(new_eltype, A)
- @test eltype(Af32_2) == new_eltype
- @test Af32_2 ≈ A
-
- As1 = [A, A]
- As1_f32 = convert_leaf_eltype(new_eltype, As1)
- @test length(As1_f32) == length(As1)
- @test typeof(As1_f32) == typeof(As1)
- @test eltype(As1_f32[1]) == new_eltype
- @test eltype(As1_f32[2]) == new_eltype
-
- As2 = [[A, A], [A]]
- As2_f32 = convert_leaf_eltype(new_eltype, As2)
- @test length(As2_f32) == length(As2)
- @test typeof(As2_f32) == typeof(As2)
- @test eltype(As2_f32[1][1]) == new_eltype
- @test eltype(As2_f32[1][2]) == new_eltype
- @test eltype(As2_f32[2][1]) == new_eltype
- end
-
- @testset "nullspace $eltype" for (ss, sl, sr) in [
- ([QN(-1) => 2, QN(1) => 3], [QN(-1) => 2], [QN(0) => 3]), (5, 2, 3)
- ],
- eltype in (Float32, Float64, ComplexF32, ComplexF64),
- nullspace_kwargs in ((;))
- #nullspace_kwargs in ((; atol=eps(real(eltype)) * 100), (;))
-
- s, l, r = Index.((ss, sl, sr), ("s", "l", "r"))
- A = random_itensor(eltype, dag(l), s, r)
- N = nullspace(A, dag(l); nullspace_kwargs...)
- @test Base.eltype(N) === eltype
- n = uniqueind(N, A)
- @test op("I", n) ≈ N * dag(prime(N, n))
- @test hassameinds(N, (s, r, n))
- @test norm(A * N) ≈ 0 atol = eps(real(eltype)) * 100
- @test dim(l) + dim(n) == dim((s, r))
- A′, (rn,) = ITensors.directsum(A => (l,), dag(N) => (n,); tags=["⊕"])
- @test dim(rn) == dim((s, r))
- @test norm(A * dag(prime(A, l))) ≈ norm(A * dag(A′))
- end
-
- @testset "nullspace regression test" begin
- # This is a case that failed before we raised
- # the default atol value in the `nullspace` function
- M = [
- 0.663934 0.713867 -0.458164 -1.79885 -0.83443
- 1.19064 -1.3474 -0.277555 -0.177408 0.408656
- ]
- i = Index(2)
- j = Index(5)
- A = ITensor(M, i, j)
- N = nullspace(A, i)
- n = uniqueindex(N, A)
- @test dim(n) == dim(j) - dim(i)
- end
-
- @testset "checkflux test" begin
- # Calling checkflux should not error (issue #1283)
- @test ITensors.checkflux(random_itensor(Index(2))) == nothing
- end
+ @testset "ishermitian" begin
+ s = Index(2, "s")
+ Sz = ITensor([0.5 0.0; 0.0 -0.5], s', s)
+ Sp = ITensor([0.0 1.0; 0.0 0.0], s', s)
+ @test ishermitian(Sz)
+ @test !ishermitian(Sp)
+ end
+
+ @testset "convert_eltype, convert_leaf_eltype, $new_eltype" for new_eltype in
+ (Float32, ComplexF64)
+ s = Index(2)
+ A = random_itensor(s)
+ @test eltype(A) == Float64
+
+ Af32 = convert_eltype(new_eltype, A)
+ @test Af32 ≈ A
+ @test eltype(Af32) == new_eltype
+
+ Af32_2 = convert_leaf_eltype(new_eltype, A)
+ @test eltype(Af32_2) == new_eltype
+ @test Af32_2 ≈ A
+
+ As1 = [A, A]
+ As1_f32 = convert_leaf_eltype(new_eltype, As1)
+ @test length(As1_f32) == length(As1)
+ @test typeof(As1_f32) == typeof(As1)
+ @test eltype(As1_f32[1]) == new_eltype
+ @test eltype(As1_f32[2]) == new_eltype
+
+ As2 = [[A, A], [A]]
+ As2_f32 = convert_leaf_eltype(new_eltype, As2)
+ @test length(As2_f32) == length(As2)
+ @test typeof(As2_f32) == typeof(As2)
+ @test eltype(As2_f32[1][1]) == new_eltype
+ @test eltype(As2_f32[1][2]) == new_eltype
+ @test eltype(As2_f32[2][1]) == new_eltype
+ end
+
+ @testset "nullspace $eltype" for (ss, sl, sr) in [
+ ([QN(-1) => 2, QN(1) => 3], [QN(-1) => 2], [QN(0) => 3]), (5, 2, 3),
+ ],
+ eltype in (Float32, Float64, ComplexF32, ComplexF64),
+ nullspace_kwargs in ((;))
+ #nullspace_kwargs in ((; atol=eps(real(eltype)) * 100), (;))
+
+ s, l, r = Index.((ss, sl, sr), ("s", "l", "r"))
+ A = random_itensor(eltype, dag(l), s, r)
+ N = nullspace(A, dag(l); nullspace_kwargs...)
+ @test Base.eltype(N) === eltype
+ n = uniqueind(N, A)
+ @test op("I", n) ≈ N * dag(prime(N, n))
+ @test hassameinds(N, (s, r, n))
+ @test norm(A * N) ≈ 0 atol = eps(real(eltype)) * 100
+ @test dim(l) + dim(n) == dim((s, r))
+ A′, (rn,) = ITensors.directsum(A => (l,), dag(N) => (n,); tags = ["⊕"])
+ @test dim(rn) == dim((s, r))
+ @test norm(A * dag(prime(A, l))) ≈ norm(A * dag(A′))
+ end
+
+ @testset "nullspace regression test" begin
+ # This is a case that failed before we raised
+ # the default atol value in the `nullspace` function
+ M = [
+ 0.663934 0.713867 -0.458164 -1.79885 -0.83443
+ 1.19064 -1.3474 -0.277555 -0.177408 0.408656
+ ]
+ i = Index(2)
+ j = Index(5)
+ A = ITensor(M, i, j)
+ N = nullspace(A, i)
+ n = uniqueindex(N, A)
+ @test dim(n) == dim(j) - dim(i)
+ end
+
+ @testset "checkflux test" begin
+ # Calling checkflux should not error (issue #1283)
+ @test ITensors.checkflux(random_itensor(Index(2))) == nothing
+ end
end # End Dense ITensor basic functionality
# Disable debug checking once tests are completed
diff --git a/test/base/test_phys_site_types.jl b/test/base/test_phys_site_types.jl
index 5d0cf38f32..4c09944fa5 100644
--- a/test/base/test_phys_site_types.jl
+++ b/test/base/test_phys_site_types.jl
@@ -2,854 +2,857 @@ using ITensors, LinearAlgebra, Test
using ITensors.SiteTypes: has_fermion_string, op, siteind, siteinds, state
@testset "Physics Sites" begin
- N = 10
-
- @testset "Generic sites" for eltype in (Float32, Float64, ComplexF32, ComplexF64)
- d1, d2 = 3, 4
- i1, i2 = Index(d1), Index(d2)
-
- o = op("I", i1; eltype)
- @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
- @test Base.eltype(o) <: eltype
-
- o = op("Id", i1; eltype)
- @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
- @test Base.eltype(o) <: eltype
-
- o = op("F", i1; eltype)
- @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
- @test Base.eltype(o) <: eltype
-
- o = op("I", i1, i2; eltype)
- @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
- @test Base.eltype(o) <: eltype
-
- o = op("Id", i1, i2; eltype)
- @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
- @test Base.eltype(o) <: eltype
-
- U1 = op("RandomUnitary", i1)
- @test hassameinds(U1, (i1', i1))
- @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1))
-
- U1 = op("randU", i1)
- @test hassameinds(U1, (i1', i1))
- @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1))
-
- U12 = op("RandomUnitary", i1, i2)
- @test hassameinds(U12, (i1', i2', i1, i2))
- @test apply(transpose(dag(U12)), U12) ≈
- itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
- end
-
- @testset "Qubit sites" begin
- s = siteind("Qubit")
- @test hastags(s, "Qubit,Site")
- @test dim(s) == 2
-
- s = siteinds("Qubit", N)
- @test val(s[1], "0") == 1
- @test val(s[1], "1") == 2
- @test_throws ArgumentError val(s[1], "Fake")
-
- s = siteind("Qubit"; conserve_parity=true)
- @test hastags(s, "Qubit,Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN("Parity", 0, 2)
- @test qn(s, 2) == QN("Parity", 1, 2)
-
- s = siteind("Qubit"; conserve_number=true)
- @test hastags(s, "Qubit,Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN("Number", 0)
- @test qn(s, 2) == QN("Number", 1)
-
- s = siteind("Qubit"; conserve_number=true, conserve_parity=true)
- @test hastags(s, "Qubit,Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN(("Parity", 0, 2), ("Number", 0))
- @test qn(s, 2) == QN(("Parity", 1, 2), ("Number", 1))
-
- s = siteinds("Qubit", N)
-
- Z = op("Z", s, 5)
- @test hasinds(Z, s[5]', s[5])
-
- @test_throws ArgumentError(
- "Overload of \"state\" or \"state!\" functions not found for state name \"Fake\" and Index tags $(tags(s[3]))",
- ) state("Fake", s[3])
- @test Vector(state("Up", s[3])) ≈ [1, 0]
- @test Vector(state("↑", s[3])) ≈ [1, 0]
- @test Vector(state("Dn", s[3])) ≈ [0, 1]
- @test Vector(state("↓", s[3])) ≈ [0, 1]
- @test Vector(state("+", s[3])) ≈ (1 / √2) * [1, 1]
- @test Vector(state("X+", s[3])) ≈ (1 / √2) * [1, 1]
- @test Vector(state("Xp", s[3])) ≈ (1 / √2) * [1, 1]
- @test Vector(state("-", s[3])) ≈ (1 / √2) * [1, -1]
- @test Vector(state("X-", s[3])) ≈ (1 / √2) * [1, -1]
- @test Vector(state("Xm", s[3])) ≈ (1 / √2) * [1, -1]
- @test Vector(state("i", s[3])) ≈ (1 / √2) * [1, im]
- @test Vector(state("Yp", s[3])) ≈ (1 / √2) * [1, im]
- @test Vector(state("Y+", s[3])) ≈ (1 / √2) * [1, im]
- @test Vector(state("-i", s[3])) ≈ (1 / √2) * [1, -im]
- @test Vector(state("Y-", s[3])) ≈ (1 / √2) * [1, -im]
- @test Vector(state("Ym", s[3])) ≈ (1 / √2) * [1, -im]
- @test Vector(state("Z+", s[3])) ≈ [1, 0]
- @test Vector(state("Zp", s[3])) ≈ [1, 0]
- @test Vector(state("Z-", s[3])) ≈ [0, 1]
- @test Vector(state("Zm", s[3])) ≈ [0, 1]
- @test Vector(state("Tetra1", s[3])) ≈ [1, 0]
- @test Vector(state("Tetra2", s[3])) ≈ (1 / √3) * [1, √2]
- @test Vector(state("Tetra3", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 2π / 3)]
- @test Vector(state("Tetra4", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 4π / 3)]
-
- @test_throws ArgumentError op(s, "Fake", 2)
- @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
- @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈
- [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2]
- @test Array(op("√X", s, 3), s[3]', s[3]) ≈
- [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2]
- @test Array(op("σx", s, 3), s[3]', s[3]) ≈ [0 1; 1 0]
- @test Array(op("σ1", s, 3), s[3]', s[3]) ≈ [0 1; 1 0]
- @test Array(op("σy", s, 3), s[3]', s[3]) ≈ [0 -im; im 0]
- @test Array(op("σ2", s, 3), s[3]', s[3]) ≈ [0 -im; im 0]
- @test Array(op("iY", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
- @test Array(op("iσy", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
- @test Array(op("iσ2", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
- @test Array(op("σz", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1]
- @test Array(op("σ3", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1]
- @test Array(op("H", s, 3), s[3]', s[3]) ≈ [1/sqrt(2) 1/sqrt(2); 1/sqrt(2) -1/sqrt(2)]
- @test Array(op("Phase", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
- @test Array(op("P", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
- @test Array(op("S", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
- @test Array(op("π/8", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im)/sqrt(2)]
- @test Array(op("T", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im)/sqrt(2)]
- θ = randn()
- @test Array(op("Rx", s, 3; θ=θ), s[3]', s[3]) ≈
- [cos(θ / 2) -im*sin(θ / 2); -im*sin(θ / 2) cos(θ / 2)]
- @test Array(op("Ry", s, 3; θ=θ), s[3]', s[3]) ≈
- [cos(θ / 2) -sin(θ / 2); sin(θ / 2) cos(θ / 2)]
- @test Array(op("Rz", s, 3; θ=θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)]
- # fallback
- @test Array(op("Rz", s, 3; ϕ=θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)]
- λ = randn()
- φ = randn()
- @test Array(op("Rn", s, 3; θ=θ, λ=λ, ϕ=φ), s[3]', s[3]) ≈ [
- cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2)
- ]
- @test Array(op("Rn̂", s, 3; θ=θ, λ=λ, ϕ=φ), s[3]', s[3]) ≈ [
- cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2)
- ]
- @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 1; 0 0]
- @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0; 1 0]
- @test Array(op("S²", s, 3), s[3]', s[3]) ≈ [0.75 0; 0 0.75]
- @test Array(op("Proj0", s, 3), s[3]', s[3]) ≈ [1 0; 0 0]
- @test Array(op("Proj1", s, 3), s[3]', s[3]) ≈ [0 0; 0 1]
- @test reshape(Array(op("√SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 (1 + im)/2 (1 - im)/2 0; 0 (1 - im)/2 (1 + im)/2 0; 0 0 0 1]
- @test reshape(Array(op("√Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 (1 + im)/2 (1 - im)/2 0; 0 (1 - im)/2 (1 + im)/2 0; 0 0 0 1]
- @test reshape(Array(op("√iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 1/√2 im/√2 0; 0 im/√2 1/√2 0; 0 0 0 1]
- @test reshape(Array(op("√iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 1/√2 im/√2 0; 0 im/√2 1/√2 0; 0 0 0 1]
- @test reshape(Array(op("SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1]
- @test reshape(Array(op("Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1]
- @test reshape(Array(op("iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1]
- @test reshape(Array(op("iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1]
- @test reshape(Array(op("Cphase", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 exp(im * θ)]
- @test reshape(Array(op("RXX", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [
- cos(θ) 0 0 -im*sin(θ)
- 0 cos(θ) -im*sin(θ) 0
- 0 -im*sin(θ) cos(θ) 0
- -im*sin(θ) 0 0 cos(θ)
- ]
- @test reshape(Array(op("RYY", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [
- cos(θ) 0 0 im*sin(θ)
- 0 cos(θ) -im*sin(θ) 0
- 0 -im*sin(θ) cos(θ) 0
- im*sin(θ) 0 0 cos(θ)
- ]
- @test reshape(Array(op("RXY", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [1 0 0 0; 0 cos(θ) -im*sin(θ) 0; 0 -im*sin(θ) cos(θ) 0; 0 0 0 1]
- @test reshape(Array(op("RZZ", s, 3, 5; ϕ=θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
- [exp(-im * θ) 0 0 0; 0 exp(im * θ) 0 0; 0 0 exp(im * θ) 0; 0 0 0 exp(-im * θ)]
- @test reshape(Array(op("CRX", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -im*sin(θ / 2)
- 0 0 -im*sin(θ / 2) cos(θ / 2)
- ]
- @test reshape(Array(op("CRY", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -sin(θ / 2)
- 0 0 sin(θ / 2) cos(θ / 2)
- ]
- @test reshape(Array(op("CRZ", s, 3, 5; θ=θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 exp(-im * θ / 2) 0
- 0 0 0 exp(im * θ / 2)
- ]
- @test reshape(
- Array(op("CRn", s, 3, 5; θ=θ, λ=λ, ϕ=φ), s[5]', s[3]', s[5], s[3]), (4, 4)
- ) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- 0 0 exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2)
- ]
- @test reshape(
- Array(op("CRn̂", s, 3, 5; θ=θ, λ=λ, ϕ=φ), s[5]', s[3]', s[5], s[3]), (4, 4)
- ) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 cos(θ / 2) -exp(im * λ)*sin(θ / 2)
- 0 0 exp(im * φ)*sin(θ / 2) exp(im * (φ + λ))*cos(θ / 2)
- ]
- @test reshape(Array(op("CX", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 0 1
- 0 0 1 0
- ]
- @test reshape(Array(op("CY", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 0 -im
- 0 0 im 0
- ]
- @test reshape(Array(op("CZ", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
- 1 0 0 0
- 0 1 0 0
- 0 0 1 0
- 0 0 0 -1
- ]
-
- toff_mat = diagm(ones(8))
- toff_mat[7:8, 7:8] .= [0 1; 1 0]
- @test reshape(
- Array(op("TOFF", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ toff_mat
- @test reshape(
- Array(op("CCNOT", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ toff_mat
- @test reshape(
- Array(op("CCX", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ toff_mat
- fred_mat = diagm(ones(8))
- fred_mat[6:7, 6:7] .= [0 1; 1 0]
- @test reshape(
- Array(op("CS", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ fred_mat
- @test reshape(
- Array(op("CSWAP", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ fred_mat
- @test reshape(
- Array(op("CSwap", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
- ) ≈ fred_mat
- cccn_mat = diagm(ones(16))
- cccn_mat[15:16, 15:16] .= [0 1; 1 0]
- @test reshape(
- Array(
- op("CCCNOT", s, 2, 3, 4, 5), s[5]', s[4]', s[3]', s[2]', s[5], s[4], s[3], s[2]
- ),
- (16, 16),
- ) ≈ cccn_mat
- # Test obtaining S=1/2 operators using Qubit tag
- @test Matrix(op("X", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 1.0 0.0]
- end
-
- @testset "Spin Half sites" begin
- for name in ("S=1/2", "SpinHalf", "S=½")
- s = siteind(name)
- @test hastags(s, name * ",Site")
- @test dim(s) == 2
-
- s = siteind(name; conserve_qns=true)
- @test hastags(s, name * ",Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN("Sz", +1)
- @test qn(s, 2) == QN("Sz", -1)
-
- s = siteind(name; conserve_szparity=true)
- @test hastags(s, name * ",Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN("SzParity", 1, 2)
- @test qn(s, 2) == QN("SzParity", 0, 2)
-
- s = siteind(name; conserve_sz=true, conserve_szparity=true)
- @test hastags(s, name * ",Site")
- @test dim(s) == 2
- @test nblocks(s) == 2
- @test qn(s, 1) == QN(("SzParity", 1, 2), ("Sz", +1))
- @test qn(s, 2) == QN(("SzParity", 0, 2), ("Sz", -1))
-
- s = siteinds(name, N)
- @test val(s[1], "Up") == 1
- @test val(s[1], "↑") == 1
- @test val(s[1], "Dn") == 2
- @test val(s[1], "↓") == 2
- @test_throws ArgumentError val(s[1], "Fake")
-
- Sz5 = op("Sz", s, 5)
- @test hasinds(Sz5, s[5]', s[5])
-
- @test Vector(state("Up", s[1])) ≈ [1, 0]
- @test Vector(state("↑", s[1])) ≈ [1, 0]
- @test Vector(state("Dn", s[1])) ≈ [0, 1]
- @test Vector(state("↓", s[1])) ≈ [0, 1]
- @test Vector(state("X+", s[1])) ≈ (1 / √2) * [1, 1]
- @test Vector(state("X-", s[1])) ≈ (1 / √2) * [1, -1]
-
- @test_throws ArgumentError op(s, "Fake", 2)
- @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
- @test Array(op("F", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
- @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0]
- @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0]
- @test Array(op("S-", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0]
- @test Array(op("S⁻", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0]
- @test Array(op("Sx", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0]
- @test Array(op("Sˣ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0]
- @test Array(op("iSy", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0]
- @test Array(op("iSʸ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0]
- @test Array(op("Sy", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0]
- @test Array(op("Sʸ", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0]
- @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5]
- @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5]
- @test Array(op("ProjUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0]
- @test Array(op("projUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0]
- @test Array(op("ProjDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0]
- @test Array(op("projDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0]
-
- # Test obtaining Qubit operators using S=1/2 tag:
- @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈
- [(1 + im)/2 (1 - im)/2; (1 - im)/2 (1 + im)/2]
+ N = 10
+
+ @testset "Generic sites" for eltype in (Float32, Float64, ComplexF32, ComplexF64)
+ d1, d2 = 3, 4
+ i1, i2 = Index(d1), Index(d2)
+
+ o = op("I", i1; eltype)
+ @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
+ @test Base.eltype(o) <: eltype
+
+ o = op("Id", i1; eltype)
+ @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
+ @test Base.eltype(o) <: eltype
+
+ o = op("F", i1; eltype)
+ @test o == itensor(Matrix(I, d1, d1), i1', dag(i1))
+ @test Base.eltype(o) <: eltype
+
+ o = op("I", i1, i2; eltype)
+ @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
+ @test Base.eltype(o) <: eltype
+
+ o = op("Id", i1, i2; eltype)
+ @test o == itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
+ @test Base.eltype(o) <: eltype
+
+ U1 = op("RandomUnitary", i1)
+ @test hassameinds(U1, (i1', i1))
+ @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1))
+
+ U1 = op("randU", i1)
+ @test hassameinds(U1, (i1', i1))
+ @test apply(transpose(dag(U1)), U1) ≈ itensor(Matrix(I, d1, d1), i1', dag(i1))
+
+ U12 = op("RandomUnitary", i1, i2)
+ @test hassameinds(U12, (i1', i2', i1, i2))
+ @test apply(transpose(dag(U12)), U12) ≈
+ itensor(Matrix(I, d1 * d2, d1 * d2), i2', i1', dag(i2), dag(i1))
+ end
+
+ @testset "Qubit sites" begin
+ s = siteind("Qubit")
+ @test hastags(s, "Qubit,Site")
+ @test dim(s) == 2
+
+ s = siteinds("Qubit", N)
+ @test val(s[1], "0") == 1
+ @test val(s[1], "1") == 2
+ @test_throws ArgumentError val(s[1], "Fake")
+
+ s = siteind("Qubit"; conserve_parity = true)
+ @test hastags(s, "Qubit,Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN("Parity", 0, 2)
+ @test qn(s, 2) == QN("Parity", 1, 2)
+
+ s = siteind("Qubit"; conserve_number = true)
+ @test hastags(s, "Qubit,Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN("Number", 0)
+ @test qn(s, 2) == QN("Number", 1)
+
+ s = siteind("Qubit"; conserve_number = true, conserve_parity = true)
+ @test hastags(s, "Qubit,Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN(("Parity", 0, 2), ("Number", 0))
+ @test qn(s, 2) == QN(("Parity", 1, 2), ("Number", 1))
+
+ s = siteinds("Qubit", N)
+
+ Z = op("Z", s, 5)
+ @test hasinds(Z, s[5]', s[5])
+
+ @test_throws ArgumentError(
+ "Overload of \"state\" or \"state!\" functions not found for state name \"Fake\" and Index tags $(tags(s[3]))",
+ ) state("Fake", s[3])
+ @test Vector(state("Up", s[3])) ≈ [1, 0]
+ @test Vector(state("↑", s[3])) ≈ [1, 0]
+ @test Vector(state("Dn", s[3])) ≈ [0, 1]
+ @test Vector(state("↓", s[3])) ≈ [0, 1]
+ @test Vector(state("+", s[3])) ≈ (1 / √2) * [1, 1]
+ @test Vector(state("X+", s[3])) ≈ (1 / √2) * [1, 1]
+ @test Vector(state("Xp", s[3])) ≈ (1 / √2) * [1, 1]
+ @test Vector(state("-", s[3])) ≈ (1 / √2) * [1, -1]
+ @test Vector(state("X-", s[3])) ≈ (1 / √2) * [1, -1]
+ @test Vector(state("Xm", s[3])) ≈ (1 / √2) * [1, -1]
+ @test Vector(state("i", s[3])) ≈ (1 / √2) * [1, im]
+ @test Vector(state("Yp", s[3])) ≈ (1 / √2) * [1, im]
+ @test Vector(state("Y+", s[3])) ≈ (1 / √2) * [1, im]
+ @test Vector(state("-i", s[3])) ≈ (1 / √2) * [1, -im]
+ @test Vector(state("Y-", s[3])) ≈ (1 / √2) * [1, -im]
+ @test Vector(state("Ym", s[3])) ≈ (1 / √2) * [1, -im]
+ @test Vector(state("Z+", s[3])) ≈ [1, 0]
+ @test Vector(state("Zp", s[3])) ≈ [1, 0]
+ @test Vector(state("Z-", s[3])) ≈ [0, 1]
+ @test Vector(state("Zm", s[3])) ≈ [0, 1]
+ @test Vector(state("Tetra1", s[3])) ≈ [1, 0]
+ @test Vector(state("Tetra2", s[3])) ≈ (1 / √3) * [1, √2]
+ @test Vector(state("Tetra3", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 2π / 3)]
+ @test Vector(state("Tetra4", s[3])) ≈ (1 / √3) * [1, √2 * exp(im * 4π / 3)]
+
+ @test_throws ArgumentError op(s, "Fake", 2)
+ @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
+ @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈
+ [(1 + im) / 2 (1 - im) / 2; (1 - im) / 2 (1 + im) / 2]
+ @test Array(op("√X", s, 3), s[3]', s[3]) ≈
+ [(1 + im) / 2 (1 - im) / 2; (1 - im) / 2 (1 + im) / 2]
+ @test Array(op("σx", s, 3), s[3]', s[3]) ≈ [0 1; 1 0]
+ @test Array(op("σ1", s, 3), s[3]', s[3]) ≈ [0 1; 1 0]
+ @test Array(op("σy", s, 3), s[3]', s[3]) ≈ [0 -im; im 0]
+ @test Array(op("σ2", s, 3), s[3]', s[3]) ≈ [0 -im; im 0]
+ @test Array(op("iY", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
+ @test Array(op("iσy", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
+ @test Array(op("iσ2", s, 3), s[3]', s[3]) ≈ [0 1; -1 0]
+ @test Array(op("σz", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1]
+ @test Array(op("σ3", s, 3), s[3]', s[3]) ≈ [1 0; 0 -1]
+ @test Array(op("H", s, 3), s[3]', s[3]) ≈
+ [1 / sqrt(2) 1 / sqrt(2); 1 / sqrt(2) -1 / sqrt(2)]
+ @test Array(op("Phase", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
+ @test Array(op("P", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
+ @test Array(op("S", s, 3), s[3]', s[3]) ≈ [1 0; 0 im]
+ @test Array(op("π/8", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im) / sqrt(2)]
+ @test Array(op("T", s, 3), s[3]', s[3]) ≈ [1 0; 0 (1 + im) / sqrt(2)]
+ θ = randn()
+ @test Array(op("Rx", s, 3; θ = θ), s[3]', s[3]) ≈
+ [cos(θ / 2) -im * sin(θ / 2); -im * sin(θ / 2) cos(θ / 2)]
+ @test Array(op("Ry", s, 3; θ = θ), s[3]', s[3]) ≈
+ [cos(θ / 2) -sin(θ / 2); sin(θ / 2) cos(θ / 2)]
+ @test Array(op("Rz", s, 3; θ = θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)]
+ # fallback
+ @test Array(op("Rz", s, 3; ϕ = θ), s[3]', s[3]) ≈ [exp(-im * θ / 2) 0; 0 exp(im * θ / 2)]
+ λ = randn()
+ φ = randn()
+ @test Array(op("Rn", s, 3; θ = θ, λ = λ, ϕ = φ), s[3]', s[3]) ≈ [
+ cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ exp(im * φ) * sin(θ / 2) exp(im * (φ + λ)) * cos(θ / 2)
+ ]
+ @test Array(op("Rn̂", s, 3; θ = θ, λ = λ, ϕ = φ), s[3]', s[3]) ≈ [
+ cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ exp(im * φ) * sin(θ / 2) exp(im * (φ + λ)) * cos(θ / 2)
+ ]
+ @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 1; 0 0]
+ @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0; 1 0]
+ @test Array(op("S²", s, 3), s[3]', s[3]) ≈ [0.75 0; 0 0.75]
+ @test Array(op("Proj0", s, 3), s[3]', s[3]) ≈ [1 0; 0 0]
+ @test Array(op("Proj1", s, 3), s[3]', s[3]) ≈ [0 0; 0 1]
+ @test reshape(Array(op("√SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 (1 + im) / 2 (1 - im) / 2 0; 0 (1 - im) / 2 (1 + im) / 2 0; 0 0 0 1]
+ @test reshape(Array(op("√Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 (1 + im) / 2 (1 - im) / 2 0; 0 (1 - im) / 2 (1 + im) / 2 0; 0 0 0 1]
+ @test reshape(Array(op("√iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 1 / √2 im / √2 0; 0 im / √2 1 / √2 0; 0 0 0 1]
+ @test reshape(Array(op("√iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 1 / √2 im / √2 0; 0 im / √2 1 / √2 0; 0 0 0 1]
+ @test reshape(Array(op("SWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1]
+ @test reshape(Array(op("Swap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 0 1 0; 0 1 0 0; 0 0 0 1]
+ @test reshape(Array(op("iSWAP", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1]
+ @test reshape(Array(op("iSwap", s, 3, 5), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 0 im 0; 0 im 0 0; 0 0 0 1]
+ @test reshape(Array(op("Cphase", s, 3, 5; ϕ = θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 exp(im * θ)]
+ @test reshape(Array(op("RXX", s, 3, 5; ϕ = θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [
+ cos(θ) 0 0 -im * sin(θ)
+ 0 cos(θ) -im * sin(θ) 0
+ 0 -im * sin(θ) cos(θ) 0
+ -im * sin(θ) 0 0 cos(θ)
+ ]
+ @test reshape(Array(op("RYY", s, 3, 5; ϕ = θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈ [
+ cos(θ) 0 0 im * sin(θ)
+ 0 cos(θ) -im * sin(θ) 0
+ 0 -im * sin(θ) cos(θ) 0
+ im * sin(θ) 0 0 cos(θ)
+ ]
+ @test reshape(Array(op("RXY", s, 3, 5; ϕ = θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [1 0 0 0; 0 cos(θ) -im * sin(θ) 0; 0 -im * sin(θ) cos(θ) 0; 0 0 0 1]
+ @test reshape(Array(op("RZZ", s, 3, 5; ϕ = θ), s[3]', s[5]', s[3], s[5]), (4, 4)) ≈
+ [exp(-im * θ) 0 0 0; 0 exp(im * θ) 0 0; 0 0 exp(im * θ) 0; 0 0 0 exp(-im * θ)]
+ @test reshape(Array(op("CRX", s, 3, 5; θ = θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -im * sin(θ / 2)
+ 0 0 -im * sin(θ / 2) cos(θ / 2)
+ ]
+ @test reshape(Array(op("CRY", s, 3, 5; θ = θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -sin(θ / 2)
+ 0 0 sin(θ / 2) cos(θ / 2)
+ ]
+ @test reshape(Array(op("CRZ", s, 3, 5; θ = θ), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 exp(-im * θ / 2) 0
+ 0 0 0 exp(im * θ / 2)
+ ]
+ @test reshape(
+ Array(op("CRn", s, 3, 5; θ = θ, λ = λ, ϕ = φ), s[5]', s[3]', s[5], s[3]), (4, 4)
+ ) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ 0 0 exp(im * φ) * sin(θ / 2) exp(im * (φ + λ)) * cos(θ / 2)
+ ]
+ @test reshape(
+ Array(op("CRn̂", s, 3, 5; θ = θ, λ = λ, ϕ = φ), s[5]', s[3]', s[5], s[3]), (4, 4)
+ ) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 cos(θ / 2) -exp(im * λ) * sin(θ / 2)
+ 0 0 exp(im * φ) * sin(θ / 2) exp(im * (φ + λ)) * cos(θ / 2)
+ ]
+ @test reshape(Array(op("CX", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 1
+ 0 0 1 0
+ ]
+ @test reshape(Array(op("CY", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 0 -im
+ 0 0 im 0
+ ]
+ @test reshape(Array(op("CZ", s, 3, 5), s[5]', s[3]', s[5], s[3]), (4, 4)) ≈ [
+ 1 0 0 0
+ 0 1 0 0
+ 0 0 1 0
+ 0 0 0 -1
+ ]
+
+ toff_mat = diagm(ones(8))
+ toff_mat[7:8, 7:8] .= [0 1; 1 0]
+ @test reshape(
+ Array(op("TOFF", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ toff_mat
+ @test reshape(
+ Array(op("CCNOT", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ toff_mat
+ @test reshape(
+ Array(op("CCX", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ toff_mat
+ fred_mat = diagm(ones(8))
+ fred_mat[6:7, 6:7] .= [0 1; 1 0]
+ @test reshape(
+ Array(op("CS", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ fred_mat
+ @test reshape(
+ Array(op("CSWAP", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ fred_mat
+ @test reshape(
+ Array(op("CSwap", s, 3, 4, 5), s[5]', s[4]', s[3]', s[5], s[4], s[3]), (8, 8)
+ ) ≈ fred_mat
+ cccn_mat = diagm(ones(16))
+ cccn_mat[15:16, 15:16] .= [0 1; 1 0]
+ @test reshape(
+ Array(
+ op("CCCNOT", s, 2, 3, 4, 5), s[5]', s[4]', s[3]', s[2]', s[5], s[4], s[3], s[2]
+ ),
+ (16, 16),
+ ) ≈ cccn_mat
+ # Test obtaining S=1/2 operators using Qubit tag
+ @test Matrix(op("X", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 1.0 0.0]
end
- end
-
- @testset "Spin One sites" begin
- for name in ("S=1", "SpinOne")
- s = siteinds(name, N)
-
- @test val(s[1], "Up") == 1
- @test val(s[1], "↑") == 1
- @test val(s[1], "0") == 2
- @test val(s[1], "Dn") == 3
- @test val(s[1], "↓") == 3
- @test val(s[1], "Z+") == 1
- @test val(s[1], "Z-") == 3
- @test_throws ArgumentError val(s[1], "Fake")
-
- @test Vector(state("Up", s[1])) ≈ [1, 0, 0]
- @test Vector(state("↑", s[1])) ≈ [1, 0, 0]
- @test Vector(state("Z+", s[1])) ≈ [1, 0, 0]
- @test Vector(state("Z0", s[1])) ≈ [0, 1, 0]
- @test Vector(state("0", s[1])) ≈ [0, 1, 0]
- @test Vector(state("Dn", s[1])) ≈ [0, 0, 1]
- @test Vector(state("↓", s[1])) ≈ [0, 0, 1]
- @test Vector(state("Z-", s[1])) ≈ [0, 0, 1]
- @test Vector(state("X+", s[1])) ≈ [1 / 2, 1 / √2, 1 / 2]
- @test Vector(state("X0", s[1])) ≈ [-1 / √2, 0, 1 / √2]
- @test Vector(state("X-", s[1])) ≈ [1 / 2, -1 / √2, 1 / 2]
- @test Vector(state("Y+", s[1])) ≈ [-1 / 2, -im / √2, 1 / 2]
- @test Vector(state("Y0", s[1])) ≈ [1 / √2, 0, 1 / √2]
- @test Vector(state("Y-", s[1])) ≈ [-1 / 2, im / √2, 1 / 2]
-
- Sz5 = op("Sz", s, 5)
- @test hasinds(Sz5, s[5]', s[5])
-
- @test_throws ArgumentError op(s, "Fake", 2)
- @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]
- @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
- @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
- @test Array(op("Sp", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
- @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
- @test Array(op("S-", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
- @test Array(op("S⁻", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
- @test Array(op("Sm", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
- @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
- @test Array(op("Sx", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0]
- @test Array(op("Sˣ", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; 1/√2 0 1/√2; 0 1/√2 0]
- @test Array(op("iSy", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0]
- @test Array(op("iSʸ", s, 3), s[3]', s[3]) ≈ [0 1/√2 0; -1/√2 0 1/√2; 0 -1/√2 0]
- @test Array(op("Sy", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0]
- @test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0]
- #@test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ [0 +1/√2im 0; +1/√2im 0 -1/√2im; 0 +1/√2im 0]
- @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0]
- @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0]
- @test Array(op("Sz2", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0]
- @test Array(op("Sx2", s, 2), s[2]', s[2]) ≈ [0.5 0 0.5; 0 1.0 0; 0.5 0 0.5]
- @test Array(op("Sy2", s, 2), s[2]', s[2]) ≈ [0.5 0 -0.5; 0 1.0 0; -0.5 0 0.5]
- @test Array(op("S2", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0]
- @test Array(op("S²", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0]
+
+ @testset "Spin Half sites" begin
+ for name in ("S=1/2", "SpinHalf", "S=½")
+ s = siteind(name)
+ @test hastags(s, name * ",Site")
+ @test dim(s) == 2
+
+ s = siteind(name; conserve_qns = true)
+ @test hastags(s, name * ",Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN("Sz", +1)
+ @test qn(s, 2) == QN("Sz", -1)
+
+ s = siteind(name; conserve_szparity = true)
+ @test hastags(s, name * ",Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN("SzParity", 1, 2)
+ @test qn(s, 2) == QN("SzParity", 0, 2)
+
+ s = siteind(name; conserve_sz = true, conserve_szparity = true)
+ @test hastags(s, name * ",Site")
+ @test dim(s) == 2
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN(("SzParity", 1, 2), ("Sz", +1))
+ @test qn(s, 2) == QN(("SzParity", 0, 2), ("Sz", -1))
+
+ s = siteinds(name, N)
+ @test val(s[1], "Up") == 1
+ @test val(s[1], "↑") == 1
+ @test val(s[1], "Dn") == 2
+ @test val(s[1], "↓") == 2
+ @test_throws ArgumentError val(s[1], "Fake")
+
+ Sz5 = op("Sz", s, 5)
+ @test hasinds(Sz5, s[5]', s[5])
+
+ @test Vector(state("Up", s[1])) ≈ [1, 0]
+ @test Vector(state("↑", s[1])) ≈ [1, 0]
+ @test Vector(state("Dn", s[1])) ≈ [0, 1]
+ @test Vector(state("↓", s[1])) ≈ [0, 1]
+ @test Vector(state("X+", s[1])) ≈ (1 / √2) * [1, 1]
+ @test Vector(state("X-", s[1])) ≈ (1 / √2) * [1, -1]
+
+ @test_throws ArgumentError op(s, "Fake", 2)
+ @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
+ @test Array(op("F", s, 3), s[3]', s[3]) ≈ [1.0 0.0; 0.0 1.0]
+ @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0]
+ @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0.0 1.0; 0.0 0.0]
+ @test Array(op("S-", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0]
+ @test Array(op("S⁻", s, 4), s[4]', s[4]) ≈ [0.0 0.0; 1.0 0.0]
+ @test Array(op("Sx", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0]
+ @test Array(op("Sˣ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; 0.5 0.0]
+ @test Array(op("iSy", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0]
+ @test Array(op("iSʸ", s, 2), s[2]', s[2]) ≈ [0.0 0.5; -0.5 0.0]
+ @test Array(op("Sy", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0]
+ @test Array(op("Sʸ", s, 2), s[2]', s[2]) ≈ [0.0 -0.5im; 0.5im 0.0]
+ @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5]
+ @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [0.5 0.0; 0.0 -0.5]
+ @test Array(op("ProjUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0]
+ @test Array(op("projUp", s, 2), s[2]', s[2]) ≈ [1.0 0.0; 0.0 0.0]
+ @test Array(op("ProjDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0]
+ @test Array(op("projDn", s, 2), s[2]', s[2]) ≈ [0.0 0.0; 0.0 1.0]
+
+ # Test obtaining Qubit operators using S=1/2 tag:
+ @test Array(op("√NOT", s, 3), s[3]', s[3]) ≈
+ [(1 + im) / 2 (1 - im) / 2; (1 - im) / 2 (1 + im) / 2]
+ end
+ end
+
+ @testset "Spin One sites" begin
+ for name in ("S=1", "SpinOne")
+ s = siteinds(name, N)
+
+ @test val(s[1], "Up") == 1
+ @test val(s[1], "↑") == 1
+ @test val(s[1], "0") == 2
+ @test val(s[1], "Dn") == 3
+ @test val(s[1], "↓") == 3
+ @test val(s[1], "Z+") == 1
+ @test val(s[1], "Z-") == 3
+ @test_throws ArgumentError val(s[1], "Fake")
+
+ @test Vector(state("Up", s[1])) ≈ [1, 0, 0]
+ @test Vector(state("↑", s[1])) ≈ [1, 0, 0]
+ @test Vector(state("Z+", s[1])) ≈ [1, 0, 0]
+ @test Vector(state("Z0", s[1])) ≈ [0, 1, 0]
+ @test Vector(state("0", s[1])) ≈ [0, 1, 0]
+ @test Vector(state("Dn", s[1])) ≈ [0, 0, 1]
+ @test Vector(state("↓", s[1])) ≈ [0, 0, 1]
+ @test Vector(state("Z-", s[1])) ≈ [0, 0, 1]
+ @test Vector(state("X+", s[1])) ≈ [1 / 2, 1 / √2, 1 / 2]
+ @test Vector(state("X0", s[1])) ≈ [-1 / √2, 0, 1 / √2]
+ @test Vector(state("X-", s[1])) ≈ [1 / 2, -1 / √2, 1 / 2]
+ @test Vector(state("Y+", s[1])) ≈ [-1 / 2, -im / √2, 1 / 2]
+ @test Vector(state("Y0", s[1])) ≈ [1 / √2, 0, 1 / √2]
+ @test Vector(state("Y-", s[1])) ≈ [-1 / 2, im / √2, 1 / 2]
+
+ Sz5 = op("Sz", s, 5)
+ @test hasinds(Sz5, s[5]', s[5])
+
+ @test_throws ArgumentError op(s, "Fake", 2)
+ @test Array(op("Id", s, 3), s[3]', s[3]) ≈ [1.0 0.0 0.0; 0.0 1.0 0.0; 0.0 0.0 1.0]
+ @test Array(op("S+", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
+ @test Array(op("S⁺", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
+ @test Array(op("Sp", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
+ @test Array(op("Splus", s, 3), s[3]', s[3]) ≈ [0 √2 0; 0 0 √2; 0 0 0]
+ @test Array(op("S-", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
+ @test Array(op("S⁻", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
+ @test Array(op("Sm", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
+ @test Array(op("Sminus", s, 3), s[3]', s[3]) ≈ [0 0 0; √2 0 0; 0.0 √2 0]
+ @test Array(op("Sx", s, 3), s[3]', s[3]) ≈ [0 1 / √2 0; 1 / √2 0 1 / √2; 0 1 / √2 0]
+ @test Array(op("Sˣ", s, 3), s[3]', s[3]) ≈ [0 1 / √2 0; 1 / √2 0 1 / √2; 0 1 / √2 0]
+ @test Array(op("iSy", s, 3), s[3]', s[3]) ≈
+ [0 1 / √2 0; -1 / √2 0 1 / √2; 0 -1 / √2 0]
+ @test Array(op("iSʸ", s, 3), s[3]', s[3]) ≈
+ [0 1 / √2 0; -1 / √2 0 1 / √2; 0 -1 / √2 0]
+ @test Array(op("Sy", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0]
+ @test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ (1 / (√2im)) * [0 +1 0; -1 0 +1; 0 -1 0]
+ #@test Array(op("Sʸ", s, 3), s[3]', s[3]) ≈ [0 +1/√2im 0; +1/√2im 0 -1/√2im; 0 +1/√2im 0]
+ @test Array(op("Sz", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0]
+ @test Array(op("Sᶻ", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 -1.0]
+ @test Array(op("Sz2", s, 2), s[2]', s[2]) ≈ [1.0 0 0; 0 0 0; 0 0 +1.0]
+ @test Array(op("Sx2", s, 2), s[2]', s[2]) ≈ [0.5 0 0.5; 0 1.0 0; 0.5 0 0.5]
+ @test Array(op("Sy2", s, 2), s[2]', s[2]) ≈ [0.5 0 -0.5; 0 1.0 0; -0.5 0 0.5]
+ @test Array(op("S2", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0]
+ @test Array(op("S²", s, 2), s[2]', s[2]) ≈ [2.0 0.0 0.0; 0.0 2.0 0.0; 0.0 0.0 2.0]
+ end
+ end
+
+ @testset "Fermion sites" begin
+ s = siteind("Fermion")
+
+ @test val(s, "0") == 1
+ @test val(s, "1") == 2
+ @test_throws ArgumentError val(s, "Fake")
+
+ @test Vector(state("Emp", s)) ≈ [1, 0]
+ @test Vector(state("Occ", s)) ≈ [0, 1]
+ @test Vector(state("0", s)) ≈ [1, 0]
+ @test Vector(state("1", s)) ≈ [0, 1]
+
+ N = op(s, "N")
+ @test hasinds(N, s', s)
+
+ @test_throws ArgumentError op(s, "Fake")
+ N = Array(op(s, "N"), s', s)
+ @test N ≈ [0.0 0; 0 1]
+ N = Array(op(s, "n"), s', s)
+ @test N ≈ [0.0 0; 0 1]
+ C = Array(op(s, "C"), s', s)
+ @test C ≈ [0.0 1; 0 0]
+ C = Array(op(s, "c"), s', s)
+ @test C ≈ [0.0 1; 0 0]
+ Cdag = Array(op(s, "Cdag"), s', s)
+ @test Cdag ≈ [0.0 0; 1 0]
+ Cdag = Array(op(s, "cdag"), s', s)
+ @test Cdag ≈ [0.0 0; 1 0]
+ Cdag = Array(op(s, "c†"), s', s)
+ @test Cdag ≈ [0.0 0; 1 0]
+ F = Array(op(s, "F"), s', s)
+ @test F ≈ [1.0 0; 0 -1]
+
+ @test has_fermion_string("C", s)
+ @test has_fermion_string("c", s)
+ @test has_fermion_string("Cdag", s)
+ @test has_fermion_string("cdag", s)
+ @test has_fermion_string("c†", s)
+ @test has_fermion_string("C*F", s)
+ @test has_fermion_string("c*F", s)
+ @test has_fermion_string("F*Cdag*F", s)
+ @test has_fermion_string("F*c†*F", s)
+ @test !has_fermion_string("N", s)
+ @test !has_fermion_string("n", s)
+ @test !has_fermion_string("N*F", s)
+ @test !has_fermion_string("n*F", s)
+
+ s = siteind("Fermion"; conserve_nf = true)
+ @test qn(s, 1) == QN("Nf", 0, -1)
+ @test qn(s, 2) == QN("Nf", 1, -1)
+ s = siteind("Fermion"; conserve_nfparity = true)
+ @test qn(s, 1) == QN("NfParity", 0, -2)
+ @test qn(s, 2) == QN("NfParity", 1, -2)
+ s = siteind("Fermion"; conserve_parity = true)
+ @test qn(s, 1) == QN("NfParity", 0, -2)
+ @test qn(s, 2) == QN("NfParity", 1, -2)
+ s = siteind("Fermion"; conserve_qns = false)
+ @test dim(s) == 2
+
+ s = siteind("Fermion"; conserve_nf = true, conserve_sz = true)
+ @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
+ @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
+ s = siteind("Fermion"; conserve_nfparity = true, conserve_sz = true)
+ @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
+ @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1))
+ s = siteind("Fermion"; conserve_nf = true, conserve_sz = "Up")
+ @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
+ @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
+ s = siteind("Fermion"; conserve_nfparity = true, conserve_sz = "Up")
+ @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
+ @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1))
+ s = siteind("Fermion"; conserve_nf = true, conserve_sz = "Dn")
+ @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
+ @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", -1))
+ s = siteind("Fermion"; conserve_nfparity = true, conserve_sz = "Dn")
+ @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
+ @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", -1))
+ end
+
+ @testset "Electron sites" begin
+ s = siteind("Electron")
+
+ @test val(s, "Emp") == 1
+ @test val(s, "0") == 1
+ @test val(s, "Up") == 2
+ @test val(s, "↑") == 2
+ @test val(s, "Dn") == 3
+ @test val(s, "↓") == 3
+ @test val(s, "UpDn") == 4
+ @test val(s, "↑↓") == 4
+ @test_throws ArgumentError val(s, "Fake")
+
+ @test Vector(state("Emp", s)) ≈ [1, 0, 0, 0]
+ @test Vector(state("Up", s)) ≈ [0, 1, 0, 0]
+ @test Vector(state("Dn", s)) ≈ [0, 0, 1, 0]
+ @test Vector(state("UpDn", s)) ≈ [0, 0, 0, 1]
+ @test Vector(state("0", s)) ≈ [1, 0, 0, 0]
+ @test Vector(state("↑", s)) ≈ [0, 1, 0, 0]
+ @test Vector(state("↓", s)) ≈ [0, 0, 1, 0]
+ @test Vector(state("↑↓", s)) ≈ [0, 0, 0, 1]
+
+ Nup = op(s, "Nup")
+ @test hasinds(Nup, s', s)
+
+ @test_throws ArgumentError op(s, "Fake")
+ Nup = Array(op(s, "Nup"), s', s)
+ @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1]
+ Nup = Array(op(s, "n↑"), s', s)
+ @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1]
+ Ndn = Array(op(s, "Ndn"), s', s)
+ @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1]
+ Ndn = Array(op(s, "n↓"), s', s)
+ @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1]
+ Nupdn = Array(op(s, "n↑↓"), s', s)
+ @test Nupdn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 1]
+ Ntot = Array(op(s, "Ntot"), s', s)
+ @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2]
+ Ntot = Array(op(s, "ntot"), s', s)
+ @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2]
+ Cup = Array(op(s, "Cup"), s', s)
+ @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
+ Cup = Array(op(s, "c↑"), s', s)
+ @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
+ Cdagup = Array(op(s, "Cdagup"), s', s)
+ @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
+ Cdagup = Array(op(s, "c†↑"), s', s)
+ @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
+ Cdn = Array(op(s, "Cdn"), s', s)
+ @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0]
+ Cdn = Array(op(s, "c↓"), s', s)
+ @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0]
+ Cdagdn = Array(op(s, "Cdagdn"), s', s)
+ @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0]
+ Cdagdn = Array(op(s, "c†↓"), s', s)
+ @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0]
+ Aup = Array(op(s, "Aup"), s', s)
+ @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
+ Aup = Array(op(s, "a↑"), s', s)
+ @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
+ Adagup = Array(op(s, "Adagup"), s', s)
+ @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
+ Adagup = Array(op(s, "a†↑"), s', s)
+ @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
+ Adn = Array(op(s, "Adn"), s', s)
+ @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0]
+ Adn = Array(op(s, "a↓"), s', s)
+ @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0]
+ Adagdn = Array(op(s, "Adagdn"), s', s)
+ @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0]
+ Adagdn = Array(op(s, "a†↓"), s', s)
+ @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0]
+ F = Array(op(s, "F"), s', s)
+ @test F ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 -1 0; 0 0 0 1]
+ Fup = Array(op(s, "Fup"), s', s)
+ @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1]
+ Fup = Array(op(s, "F↑"), s', s)
+ @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1]
+ Fdn3 = Array(op(s, "Fdn"), s', s)
+ @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1]
+ Fdn3 = Array(op(s, "F↓"), s', s)
+ @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1]
+ Sz3 = Array(op(s, "Sz"), s', s)
+ @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0]
+ Sz3 = Array(op(s, "Sᶻ"), s', s)
+ @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0]
+ Sx3 = Array(op(s, "Sx"), s', s)
+ @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0]
+ Sx3 = Array(op(s, "Sˣ"), s', s)
+ @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0]
+ Sp3 = Array(op(s, "S+"), s', s)
+ @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
+ Sp3 = Array(op(s, "Sp"), s', s)
+ @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
+ Sp3 = Array(op(s, "Splus"), s', s)
+ @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
+ Sp3 = Array(op(s, "S⁺"), s', s)
+ @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
+ Sm3 = Array(op(s, "S-"), s', s)
+ @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
+ Sm3 = Array(op(s, "S⁻"), s', s)
+ @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
+ Sm3 = Array(op(s, "Sm"), s', s)
+ @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
+ Sm3 = Array(op(s, "Sminus"), s', s)
+ @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
+
+ @test has_fermion_string("Cup", s)
+ @test has_fermion_string("c↑", s)
+ @test has_fermion_string("Cup*F", s)
+ @test has_fermion_string("c↑*F", s)
+ @test has_fermion_string("Cdagup", s)
+ @test has_fermion_string("c†↑", s)
+ @test has_fermion_string("F*Cdagup", s)
+ @test has_fermion_string("F*c†↑", s)
+ @test has_fermion_string("Cdn", s)
+ @test has_fermion_string("c↓", s)
+ @test has_fermion_string("Cdn*F", s)
+ @test has_fermion_string("c↓*F", s)
+ @test has_fermion_string("Cdagdn", s)
+ @test has_fermion_string("c†↓", s)
+ @test !has_fermion_string("N", s)
+ @test !has_fermion_string("n", s)
+ @test !has_fermion_string("F*N", s)
+ @test !has_fermion_string("F*n", s)
+
+ s = siteind("Electron"; conserve_nf = true)
+ @test qn(s, 1) == QN("Nf", 0, -1)
+ @test qn(s, 2) == QN("Nf", 1, -1)
+ @test qn(s, 3) == QN("Nf", 2, -1)
+ s = siteind("Electron"; conserve_sz = true)
+ @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2))
+ @test qn(s, 2) == QN(("Sz", +1), ("NfParity", 1, -2))
+ @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2))
+ @test qn(s, 4) == QN(("Sz", 0), ("NfParity", 0, -2))
+ s = siteind("Electron"; conserve_nfparity = true)
+ @test qn(s, 1) == QN("NfParity", 0, -2)
+ @test qn(s, 2) == QN("NfParity", 1, -2)
+ @test qn(s, 3) == QN("NfParity", 0, -2)
+ s = siteind("Electron"; conserve_parity = true)
+ @test qn(s, 1) == QN("NfParity", 0, -2)
+ @test qn(s, 2) == QN("NfParity", 1, -2)
+ @test qn(s, 3) == QN("NfParity", 0, -2)
+ s = siteind("Electron"; conserve_qns = false)
+ @test dim(s) == 4
+ end
+
+ @testset "tJ sites" begin
+ s = siteind("tJ"; conserve_parity = true)
+ @test hastags(s, "tJ,Site")
+ @test dim(s) == 3
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN(("NfParity", 0, -2))
+ @test qn(s, 2) == QN(("NfParity", 1, -2))
+
+ s = siteind("tJ"; conserve_nf = true)
+ @test hastags(s, "tJ,Site")
+ @test dim(s) == 3
+ @test nblocks(s) == 2
+ @test qn(s, 1) == QN(("Nf", 0, -1))
+ @test qn(s, 2) == QN(("Nf", 1, -1))
+
+ s = siteind("tJ"; conserve_sz = true)
+ @test hastags(s, "tJ,Site")
+ @test dim(s) == 3
+ @test nblocks(s) == 3
+ @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2))
+ @test qn(s, 2) == QN(("Sz", 1), ("NfParity", 1, -2))
+ @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2))
+
+ s = siteind("tJ"; conserve_sz = true, conserve_nf = true)
+ @test hastags(s, "tJ,Site")
+ @test dim(s) == 3
+ @test nblocks(s) == 3
+ @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
+ @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
+ @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1))
+
+ s = siteind("tJ")
+ @test hastags(s, "tJ,Site")
+ @test dim(s) == 3
+
+ @test val(s, "Emp") == 1
+ @test val(s, "0") == 1
+ @test val(s, "Up") == 2
+ @test val(s, "↑") == 2
+ @test val(s, "Dn") == 3
+ @test val(s, "↓") == 3
+ @test_throws ArgumentError val(s, "Fake")
+
+ @test Vector(state("Emp", s)) ≈ [1, 0, 0]
+ @test Vector(state("0", s)) ≈ [1, 0, 0]
+ @test Vector(state("Up", s)) ≈ [0, 1, 0]
+ @test Vector(state("↑", s)) ≈ [0, 1, 0]
+ @test Vector(state("Dn", s)) ≈ [0, 0, 1]
+ @test Vector(state("↓", s)) ≈ [0, 0, 1]
+
+ @test_throws ArgumentError op(s, "Fake")
+ Nup = op(s, "Nup")
+ @test Nup[2, 2] ≈ 1.0
+ Nup = op(s, "n↑")
+ @test Nup[2, 2] ≈ 1.0
+ Ndn = op(s, "Ndn")
+ @test Ndn[3, 3] ≈ 1.0
+ Ndn = op(s, "n↓")
+ @test Ndn[3, 3] ≈ 1.0
+ Ntot = op(s, "Ntot")
+ @test Ntot[2, 2] ≈ 1.0
+ @test Ntot[3, 3] ≈ 1.0
+ Ntot = op(s, "ntot")
+ @test Ntot[2, 2] ≈ 1.0
+ @test Ntot[3, 3] ≈ 1.0
+ Id = Array(op(s, "Id"), s', s)
+ @test Id ≈ [1.0 0 0; 0 1 0; 0 0 1]
+ Cup = Array(op(s, "Cup"), s', s)
+ @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0]
+ Cup = Array(op(s, "c↑"), s', s)
+ @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0]
+ Cdup = Array(op(s, "Cdagup"), s', s)
+ @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0]
+ Cdup = Array(op(s, "c†↑"), s', s)
+ @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0]
+ Cdn = Array(op(s, "Cdn"), s', s)
+ @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
+ Cdn = Array(op(s, "c↓"), s', s)
+ @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
+ Cddn = Array(op(s, "Cdagdn"), s', s)
+ @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0]
+ Cddn = Array(op(s, "c†↓"), s', s)
+ @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0]
+ Aup = Array(op(s, "Aup"), s', s)
+ @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0]
+ Aup = Array(op(s, "a↑"), s', s)
+ @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0]
+ Adup = Array(op(s, "Adagup"), s', s)
+ @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0]
+ Adup = Array(op(s, "a†↑"), s', s)
+ @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0]
+ Adn = Array(op(s, "Adn"), s', s)
+ @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
+ Adn = Array(op(s, "a↓"), s', s)
+ @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
+ Addn = Array(op(s, "Adagdn"), s', s)
+ @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0]
+ Addn = Array(op(s, "a†↓"), s', s)
+ @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0]
+ FP = Array(op(s, "F"), s', s)
+ @test FP ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 -1.0]
+ Fup = Array(op(s, "Fup"), s', s)
+ @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0]
+ Fup = Array(op(s, "F↑"), s', s)
+ @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0]
+ Fdn = Array(op(s, "Fdn"), s', s)
+ @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0]
+ Fdn = Array(op(s, "F↓"), s', s)
+ @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0]
+ Sz = Array(op(s, "Sz"), s', s)
+ @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5]
+ Sz = Array(op(s, "Sᶻ"), s', s)
+ @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5]
+ Sx = Array(op(s, "Sx"), s', s)
+ @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0]
+ Sx = Array(op(s, "Sˣ"), s', s)
+ @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0]
+ Sy = Array(op(s, "Sy"), s', s)
+ @test Sy ≈ [0.0 0.0 0; 0 0 -0.5im; 0 0.5im 0]
+ Sy = Array(op(s, "Sʸ"), s', s)
+ @test Sy ≈ [0.0 0.0 0; 0 0 -0.5im; 0 0.5im 0]
+ Sp = Array(op(s, "Splus"), s', s)
+ @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
+ Sp = Array(op(s, "Sp"), s', s)
+ @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
+ Sp = Array(op(s, "S⁺"), s', s)
+ @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
+ Sm = Array(op(s, "Sminus"), s', s)
+ @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
+ Sm = Array(op(s, "Sm"), s', s)
+ @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
+ Sm = Array(op(s, "S⁻"), s', s)
+ @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
+
+ @test has_fermion_string("Cup", s)
+ @test has_fermion_string("c↑", s)
+ @test has_fermion_string("Cdagup", s)
+ @test has_fermion_string("c†↑", s)
+ @test has_fermion_string("Cdn", s)
+ @test has_fermion_string("c↓", s)
+ @test has_fermion_string("Cdagdn", s)
+ @test has_fermion_string("c†↓", s)
+ @test !has_fermion_string("N", s)
+ @test !has_fermion_string("n", s)
+ end
+
+ @testset "$st" for st in ["Qudit", "Boson"]
+ d = 3
+ s = siteinds(st, 4; dim = d)
+ @test dim(s[1]) == d
+ @test dim(s[2]) == d
+ @test dim(s[3]) == d
+ @test dim(s[4]) == d
+ v = state(s, 2, "0")
+ @test v == itensor([1, 0, 0], s[2])
+ v = state(s, 3, "1")
+ @test v == itensor([0, 1, 0], s[3])
+ v = state(s, 4, "2")
+ @test v == itensor([0, 0, 1], s[4])
+ @test_throws BoundsError state(s, 4, "3")
+ v = val(s, 2, "0")
+ @test v == 1
+ v = val(s, 3, "1")
+ @test v == 2
+ v = val(s, 4, "2")
+ @test v == 3
+ @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
+ @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
+ @test op(s, "F", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
+ @test op("Id", s, 1, 2) ==
+ itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
+ @test op("I", s, 1, 2) ==
+ itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
+ @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
+ @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
+ @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
+ @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
+ @test op(s, "a†b†", 2, 3) ≈ itensor(
+ kron([0 0 0; 1 0 0; 0 √2 0], [0 0 0; 1 0 0; 0 √2 0]),
+ s[3]',
+ s[2]',
+ dag(s[3]),
+ dag(s[2]),
+ )
+ @test op(s, "a†b", 2, 3) ≈ itensor(
+ kron([0 0 0; 1 0 0; 0 √2 0], [0 1 0; 0 0 √2; 0 0 0]),
+ s[3]',
+ s[2]',
+ dag(s[3]),
+ dag(s[2]),
+ )
+ @test op(s, "ab†", 2, 3) ≈ itensor(
+ kron([0 1 0; 0 0 √2; 0 0 0], [0 0 0; 1 0 0; 0 √2 0]),
+ s[3]',
+ s[2]',
+ dag(s[3]),
+ dag(s[2]),
+ )
+ @test op(s, "ab", 2, 3) ≈ itensor(
+ kron([0 1 0; 0 0 √2; 0 0 0], [0 1 0; 0 0 √2; 0 0 0]),
+ s[3]',
+ s[2]',
+ dag(s[3]),
+ dag(s[2]),
+ )
+ @test_throws ErrorException op(ITensors.OpName("ab"), ITensors.SiteType(st))
+
+ # With QNs
+ s = siteinds(st, 4; dim = d, conserve_qns = true)
+ @test all(hasqns, s)
+ @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
+ @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
+ @test op("Id", s, 1, 2) ==
+ itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
+ @test op("I", s, 1, 2) ==
+ itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
+ @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
+ @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
+ @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
+ @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
+ @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
end
- end
-
- @testset "Fermion sites" begin
- s = siteind("Fermion")
-
- @test val(s, "0") == 1
- @test val(s, "1") == 2
- @test_throws ArgumentError val(s, "Fake")
-
- @test Vector(state("Emp", s)) ≈ [1, 0]
- @test Vector(state("Occ", s)) ≈ [0, 1]
- @test Vector(state("0", s)) ≈ [1, 0]
- @test Vector(state("1", s)) ≈ [0, 1]
-
- N = op(s, "N")
- @test hasinds(N, s', s)
-
- @test_throws ArgumentError op(s, "Fake")
- N = Array(op(s, "N"), s', s)
- @test N ≈ [0.0 0; 0 1]
- N = Array(op(s, "n"), s', s)
- @test N ≈ [0.0 0; 0 1]
- C = Array(op(s, "C"), s', s)
- @test C ≈ [0.0 1; 0 0]
- C = Array(op(s, "c"), s', s)
- @test C ≈ [0.0 1; 0 0]
- Cdag = Array(op(s, "Cdag"), s', s)
- @test Cdag ≈ [0.0 0; 1 0]
- Cdag = Array(op(s, "cdag"), s', s)
- @test Cdag ≈ [0.0 0; 1 0]
- Cdag = Array(op(s, "c†"), s', s)
- @test Cdag ≈ [0.0 0; 1 0]
- F = Array(op(s, "F"), s', s)
- @test F ≈ [1.0 0; 0 -1]
-
- @test has_fermion_string("C", s)
- @test has_fermion_string("c", s)
- @test has_fermion_string("Cdag", s)
- @test has_fermion_string("cdag", s)
- @test has_fermion_string("c†", s)
- @test has_fermion_string("C*F", s)
- @test has_fermion_string("c*F", s)
- @test has_fermion_string("F*Cdag*F", s)
- @test has_fermion_string("F*c†*F", s)
- @test !has_fermion_string("N", s)
- @test !has_fermion_string("n", s)
- @test !has_fermion_string("N*F", s)
- @test !has_fermion_string("n*F", s)
-
- s = siteind("Fermion"; conserve_nf=true)
- @test qn(s, 1) == QN("Nf", 0, -1)
- @test qn(s, 2) == QN("Nf", 1, -1)
- s = siteind("Fermion"; conserve_nfparity=true)
- @test qn(s, 1) == QN("NfParity", 0, -2)
- @test qn(s, 2) == QN("NfParity", 1, -2)
- s = siteind("Fermion"; conserve_parity=true)
- @test qn(s, 1) == QN("NfParity", 0, -2)
- @test qn(s, 2) == QN("NfParity", 1, -2)
- s = siteind("Fermion"; conserve_qns=false)
- @test dim(s) == 2
-
- s = siteind("Fermion"; conserve_nf=true, conserve_sz=true)
- @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
- @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
- s = siteind("Fermion"; conserve_nfparity=true, conserve_sz=true)
- @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
- @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1))
- s = siteind("Fermion"; conserve_nf=true, conserve_sz="Up")
- @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
- @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
- s = siteind("Fermion"; conserve_nfparity=true, conserve_sz="Up")
- @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
- @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", 1))
- s = siteind("Fermion"; conserve_nf=true, conserve_sz="Dn")
- @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
- @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", -1))
- s = siteind("Fermion"; conserve_nfparity=true, conserve_sz="Dn")
- @test qn(s, 1) == QN(("NfParity", 0, -2), ("Sz", 0))
- @test qn(s, 2) == QN(("NfParity", 1, -2), ("Sz", -1))
- end
-
- @testset "Electron sites" begin
- s = siteind("Electron")
-
- @test val(s, "Emp") == 1
- @test val(s, "0") == 1
- @test val(s, "Up") == 2
- @test val(s, "↑") == 2
- @test val(s, "Dn") == 3
- @test val(s, "↓") == 3
- @test val(s, "UpDn") == 4
- @test val(s, "↑↓") == 4
- @test_throws ArgumentError val(s, "Fake")
-
- @test Vector(state("Emp", s)) ≈ [1, 0, 0, 0]
- @test Vector(state("Up", s)) ≈ [0, 1, 0, 0]
- @test Vector(state("Dn", s)) ≈ [0, 0, 1, 0]
- @test Vector(state("UpDn", s)) ≈ [0, 0, 0, 1]
- @test Vector(state("0", s)) ≈ [1, 0, 0, 0]
- @test Vector(state("↑", s)) ≈ [0, 1, 0, 0]
- @test Vector(state("↓", s)) ≈ [0, 0, 1, 0]
- @test Vector(state("↑↓", s)) ≈ [0, 0, 0, 1]
-
- Nup = op(s, "Nup")
- @test hasinds(Nup, s', s)
-
- @test_throws ArgumentError op(s, "Fake")
- Nup = Array(op(s, "Nup"), s', s)
- @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1]
- Nup = Array(op(s, "n↑"), s', s)
- @test Nup ≈ [0.0 0 0 0; 0 1 0 0; 0 0 0 0; 0 0 0 1]
- Ndn = Array(op(s, "Ndn"), s', s)
- @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1]
- Ndn = Array(op(s, "n↓"), s', s)
- @test Ndn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 1 0; 0 0 0 1]
- Nupdn = Array(op(s, "n↑↓"), s', s)
- @test Nupdn ≈ [0.0 0 0 0; 0 0 0 0; 0 0 0 0; 0 0 0 1]
- Ntot = Array(op(s, "Ntot"), s', s)
- @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2]
- Ntot = Array(op(s, "ntot"), s', s)
- @test Ntot ≈ [0.0 0 0 0; 0 1 0 0; 0 0 1 0; 0 0 0 2]
- Cup = Array(op(s, "Cup"), s', s)
- @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
- Cup = Array(op(s, "c↑"), s', s)
- @test Cup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
- Cdagup = Array(op(s, "Cdagup"), s', s)
- @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
- Cdagup = Array(op(s, "c†↑"), s', s)
- @test Cdagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
- Cdn = Array(op(s, "Cdn"), s', s)
- @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0]
- Cdn = Array(op(s, "c↓"), s', s)
- @test Cdn ≈ [0.0 0 1 0; 0 0 0 -1; 0 0 0 0; 0 0 0 0]
- Cdagdn = Array(op(s, "Cdagdn"), s', s)
- @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0]
- Cdagdn = Array(op(s, "c†↓"), s', s)
- @test Cdagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 -1 0 0]
- Aup = Array(op(s, "Aup"), s', s)
- @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
- Aup = Array(op(s, "a↑"), s', s)
- @test Aup ≈ [0.0 1 0 0; 0 0 0 0; 0 0 0 1; 0 0 0 0]
- Adagup = Array(op(s, "Adagup"), s', s)
- @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
- Adagup = Array(op(s, "a†↑"), s', s)
- @test Adagup ≈ [0.0 0 0 0; 1 0 0 0; 0 0 0 0; 0 0 1 0]
- Adn = Array(op(s, "Adn"), s', s)
- @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0]
- Adn = Array(op(s, "a↓"), s', s)
- @test Adn ≈ [0.0 0 1 0; 0 0 0 1; 0 0 0 0; 0 0 0 0]
- Adagdn = Array(op(s, "Adagdn"), s', s)
- @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0]
- Adagdn = Array(op(s, "a†↓"), s', s)
- @test Adagdn ≈ [0.0 0 0 0; 0 0 0 0; 1 0 0 0; 0 1 0 0]
- F = Array(op(s, "F"), s', s)
- @test F ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 -1 0; 0 0 0 1]
- Fup = Array(op(s, "Fup"), s', s)
- @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1]
- Fup = Array(op(s, "F↑"), s', s)
- @test Fup ≈ [1.0 0 0 0; 0 -1 0 0; 0 0 1 0; 0 0 0 -1]
- Fdn3 = Array(op(s, "Fdn"), s', s)
- @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1]
- Fdn3 = Array(op(s, "F↓"), s', s)
- @test Fdn3 ≈ [1.0 0 0 0; 0 1 0 0; 0 0 -1 0; 0 0 0 -1]
- Sz3 = Array(op(s, "Sz"), s', s)
- @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0]
- Sz3 = Array(op(s, "Sᶻ"), s', s)
- @test Sz3 ≈ [0.0 0 0 0; 0 0.5 0 0; 0 0 -0.5 0; 0 0 0 0]
- Sx3 = Array(op(s, "Sx"), s', s)
- @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0]
- Sx3 = Array(op(s, "Sˣ"), s', s)
- @test Sx3 ≈ [0.0 0 0 0; 0 0 0.5 0; 0 0.5 0 0; 0 0 0 0]
- Sp3 = Array(op(s, "S+"), s', s)
- @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
- Sp3 = Array(op(s, "Sp"), s', s)
- @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
- Sp3 = Array(op(s, "Splus"), s', s)
- @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
- Sp3 = Array(op(s, "S⁺"), s', s)
- @test Sp3 ≈ [0.0 0 0 0; 0 0 1 0; 0 0 0 0; 0 0 0 0]
- Sm3 = Array(op(s, "S-"), s', s)
- @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
- Sm3 = Array(op(s, "S⁻"), s', s)
- @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
- Sm3 = Array(op(s, "Sm"), s', s)
- @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
- Sm3 = Array(op(s, "Sminus"), s', s)
- @test Sm3 ≈ [0.0 0 0 0; 0 0 0 0; 0 1 0 0; 0 0 0 0]
-
- @test has_fermion_string("Cup", s)
- @test has_fermion_string("c↑", s)
- @test has_fermion_string("Cup*F", s)
- @test has_fermion_string("c↑*F", s)
- @test has_fermion_string("Cdagup", s)
- @test has_fermion_string("c†↑", s)
- @test has_fermion_string("F*Cdagup", s)
- @test has_fermion_string("F*c†↑", s)
- @test has_fermion_string("Cdn", s)
- @test has_fermion_string("c↓", s)
- @test has_fermion_string("Cdn*F", s)
- @test has_fermion_string("c↓*F", s)
- @test has_fermion_string("Cdagdn", s)
- @test has_fermion_string("c†↓", s)
- @test !has_fermion_string("N", s)
- @test !has_fermion_string("n", s)
- @test !has_fermion_string("F*N", s)
- @test !has_fermion_string("F*n", s)
-
- s = siteind("Electron"; conserve_nf=true)
- @test qn(s, 1) == QN("Nf", 0, -1)
- @test qn(s, 2) == QN("Nf", 1, -1)
- @test qn(s, 3) == QN("Nf", 2, -1)
- s = siteind("Electron"; conserve_sz=true)
- @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2))
- @test qn(s, 2) == QN(("Sz", +1), ("NfParity", 1, -2))
- @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2))
- @test qn(s, 4) == QN(("Sz", 0), ("NfParity", 0, -2))
- s = siteind("Electron"; conserve_nfparity=true)
- @test qn(s, 1) == QN("NfParity", 0, -2)
- @test qn(s, 2) == QN("NfParity", 1, -2)
- @test qn(s, 3) == QN("NfParity", 0, -2)
- s = siteind("Electron"; conserve_parity=true)
- @test qn(s, 1) == QN("NfParity", 0, -2)
- @test qn(s, 2) == QN("NfParity", 1, -2)
- @test qn(s, 3) == QN("NfParity", 0, -2)
- s = siteind("Electron"; conserve_qns=false)
- @test dim(s) == 4
- end
-
- @testset "tJ sites" begin
- s = siteind("tJ"; conserve_parity=true)
- @test hastags(s, "tJ,Site")
- @test dim(s) == 3
- @test nblocks(s) == 2
- @test qn(s, 1) == QN(("NfParity", 0, -2))
- @test qn(s, 2) == QN(("NfParity", 1, -2))
-
- s = siteind("tJ"; conserve_nf=true)
- @test hastags(s, "tJ,Site")
- @test dim(s) == 3
- @test nblocks(s) == 2
- @test qn(s, 1) == QN(("Nf", 0, -1))
- @test qn(s, 2) == QN(("Nf", 1, -1))
-
- s = siteind("tJ"; conserve_sz=true)
- @test hastags(s, "tJ,Site")
- @test dim(s) == 3
- @test nblocks(s) == 3
- @test qn(s, 1) == QN(("Sz", 0), ("NfParity", 0, -2))
- @test qn(s, 2) == QN(("Sz", 1), ("NfParity", 1, -2))
- @test qn(s, 3) == QN(("Sz", -1), ("NfParity", 1, -2))
-
- s = siteind("tJ"; conserve_sz=true, conserve_nf=true)
- @test hastags(s, "tJ,Site")
- @test dim(s) == 3
- @test nblocks(s) == 3
- @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
- @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
- @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1))
-
- s = siteind("tJ")
- @test hastags(s, "tJ,Site")
- @test dim(s) == 3
-
- @test val(s, "Emp") == 1
- @test val(s, "0") == 1
- @test val(s, "Up") == 2
- @test val(s, "↑") == 2
- @test val(s, "Dn") == 3
- @test val(s, "↓") == 3
- @test_throws ArgumentError val(s, "Fake")
-
- @test Vector(state("Emp", s)) ≈ [1, 0, 0]
- @test Vector(state("0", s)) ≈ [1, 0, 0]
- @test Vector(state("Up", s)) ≈ [0, 1, 0]
- @test Vector(state("↑", s)) ≈ [0, 1, 0]
- @test Vector(state("Dn", s)) ≈ [0, 0, 1]
- @test Vector(state("↓", s)) ≈ [0, 0, 1]
-
- @test_throws ArgumentError op(s, "Fake")
- Nup = op(s, "Nup")
- @test Nup[2, 2] ≈ 1.0
- Nup = op(s, "n↑")
- @test Nup[2, 2] ≈ 1.0
- Ndn = op(s, "Ndn")
- @test Ndn[3, 3] ≈ 1.0
- Ndn = op(s, "n↓")
- @test Ndn[3, 3] ≈ 1.0
- Ntot = op(s, "Ntot")
- @test Ntot[2, 2] ≈ 1.0
- @test Ntot[3, 3] ≈ 1.0
- Ntot = op(s, "ntot")
- @test Ntot[2, 2] ≈ 1.0
- @test Ntot[3, 3] ≈ 1.0
- Id = Array(op(s, "Id"), s', s)
- @test Id ≈ [1.0 0 0; 0 1 0; 0 0 1]
- Cup = Array(op(s, "Cup"), s', s)
- @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0]
- Cup = Array(op(s, "c↑"), s', s)
- @test Cup ≈ [0.0 1 0; 0 0 0; 0 0 0]
- Cdup = Array(op(s, "Cdagup"), s', s)
- @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0]
- Cdup = Array(op(s, "c†↑"), s', s)
- @test Cdup ≈ [0 0 0; 1.0 0 0; 0 0 0]
- Cdn = Array(op(s, "Cdn"), s', s)
- @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
- Cdn = Array(op(s, "c↓"), s', s)
- @test Cdn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
- Cddn = Array(op(s, "Cdagdn"), s', s)
- @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0]
- Cddn = Array(op(s, "c†↓"), s', s)
- @test Cddn ≈ [0 0 0; 0.0 0 0; 1 0 0]
- Aup = Array(op(s, "Aup"), s', s)
- @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0]
- Aup = Array(op(s, "a↑"), s', s)
- @test Aup ≈ [0.0 1 0; 0 0 0; 0 0 0]
- Adup = Array(op(s, "Adagup"), s', s)
- @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0]
- Adup = Array(op(s, "a†↑"), s', s)
- @test Adup ≈ [0 0 0; 1.0 0 0; 0 0 0]
- Adn = Array(op(s, "Adn"), s', s)
- @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
- Adn = Array(op(s, "a↓"), s', s)
- @test Adn ≈ [0.0 0.0 1; 0 0 0; 0 0 0]
- Addn = Array(op(s, "Adagdn"), s', s)
- @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0]
- Addn = Array(op(s, "a†↓"), s', s)
- @test Addn ≈ [0 0 0; 0.0 0 0; 1 0 0]
- FP = Array(op(s, "F"), s', s)
- @test FP ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 -1.0]
- Fup = Array(op(s, "Fup"), s', s)
- @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0]
- Fup = Array(op(s, "F↑"), s', s)
- @test Fup ≈ [1.0 0.0 0; 0 -1.0 0; 0 0 1.0]
- Fdn = Array(op(s, "Fdn"), s', s)
- @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0]
- Fdn = Array(op(s, "F↓"), s', s)
- @test Fdn ≈ [1.0 0.0 0; 0 1.0 0; 0 0 -1.0]
- Sz = Array(op(s, "Sz"), s', s)
- @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5]
- Sz = Array(op(s, "Sᶻ"), s', s)
- @test Sz ≈ [0.0 0.0 0; 0 0.5 0; 0 0 -0.5]
- Sx = Array(op(s, "Sx"), s', s)
- @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0]
- Sx = Array(op(s, "Sˣ"), s', s)
- @test Sx ≈ [0.0 0.0 0; 0 0 0.5; 0 0.5 0]
- Sy = Array(op(s, "Sy"), s', s)
- @test Sy ≈ [0.0 0.0 0; 0 0 -0.5im; 0 0.5im 0]
- Sy = Array(op(s, "Sʸ"), s', s)
- @test Sy ≈ [0.0 0.0 0; 0 0 -0.5im; 0 0.5im 0]
- Sp = Array(op(s, "Splus"), s', s)
- @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
- Sp = Array(op(s, "Sp"), s', s)
- @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
- Sp = Array(op(s, "S⁺"), s', s)
- @test Sp ≈ [0.0 0.0 0; 0 0 1.0; 0 0 0]
- Sm = Array(op(s, "Sminus"), s', s)
- @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
- Sm = Array(op(s, "Sm"), s', s)
- @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
- Sm = Array(op(s, "S⁻"), s', s)
- @test Sm ≈ [0.0 0.0 0; 0 0 0; 0 1.0 0]
-
- @test has_fermion_string("Cup", s)
- @test has_fermion_string("c↑", s)
- @test has_fermion_string("Cdagup", s)
- @test has_fermion_string("c†↑", s)
- @test has_fermion_string("Cdn", s)
- @test has_fermion_string("c↓", s)
- @test has_fermion_string("Cdagdn", s)
- @test has_fermion_string("c†↓", s)
- @test !has_fermion_string("N", s)
- @test !has_fermion_string("n", s)
- end
-
- @testset "$st" for st in ["Qudit", "Boson"]
- d = 3
- s = siteinds(st, 4; dim=d)
- @test dim(s[1]) == d
- @test dim(s[2]) == d
- @test dim(s[3]) == d
- @test dim(s[4]) == d
- v = state(s, 2, "0")
- @test v == itensor([1, 0, 0], s[2])
- v = state(s, 3, "1")
- @test v == itensor([0, 1, 0], s[3])
- v = state(s, 4, "2")
- @test v == itensor([0, 0, 1], s[4])
- @test_throws BoundsError state(s, 4, "3")
- v = val(s, 2, "0")
- @test v == 1
- v = val(s, 3, "1")
- @test v == 2
- v = val(s, 4, "2")
- @test v == 3
- @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
- @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
- @test op(s, "F", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
- @test op("Id", s, 1, 2) ==
- itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
- @test op("I", s, 1, 2) ==
- itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
- @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
- @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
- @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
- @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
- @test op(s, "a†b†", 2, 3) ≈ itensor(
- kron([0 0 0; 1 0 0; 0 √2 0], [0 0 0; 1 0 0; 0 √2 0]),
- s[3]',
- s[2]',
- dag(s[3]),
- dag(s[2]),
- )
- @test op(s, "a†b", 2, 3) ≈ itensor(
- kron([0 0 0; 1 0 0; 0 √2 0], [0 1 0; 0 0 √2; 0 0 0]),
- s[3]',
- s[2]',
- dag(s[3]),
- dag(s[2]),
- )
- @test op(s, "ab†", 2, 3) ≈ itensor(
- kron([0 1 0; 0 0 √2; 0 0 0], [0 0 0; 1 0 0; 0 √2 0]),
- s[3]',
- s[2]',
- dag(s[3]),
- dag(s[2]),
- )
- @test op(s, "ab", 2, 3) ≈ itensor(
- kron([0 1 0; 0 0 √2; 0 0 0], [0 1 0; 0 0 √2; 0 0 0]),
- s[3]',
- s[2]',
- dag(s[3]),
- dag(s[2]),
- )
- @test_throws ErrorException op(ITensors.OpName("ab"), ITensors.SiteType(st))
-
- # With QNs
- s = siteinds(st, 4; dim=d, conserve_qns=true)
- @test all(hasqns, s)
- @test op(s, "Id", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
- @test op(s, "I", 2) == itensor([1 0 0; 0 1 0; 0 0 1], s[2]', dag(s[2]))
- @test op("Id", s, 1, 2) ==
- itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
- @test op("I", s, 1, 2) ==
- itensor(Matrix(I, d^2, d^2), s[2]', s[1]', dag(s[2]), dag(s[1]))
- @test op(s, "N", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
- @test op(s, "n", 2) == itensor([0 0 0; 0 1 0; 0 0 2], s[2]', dag(s[2]))
- @test op(s, "Adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "adag", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "a†", 2) ≈ itensor([0 0 0; 1 0 0; 0 √2 0], s[2]', dag(s[2]))
- @test op(s, "A", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
- @test op(s, "a", 2) ≈ itensor([0 1 0; 0 0 √2; 0 0 0], s[2]', dag(s[2]))
- end
end
diff --git a/test/base/test_qnitensor.jl b/test/base/test_qnitensor.jl
index 820c8ccabe..bdc885dfaf 100644
--- a/test/base/test_qnitensor.jl
+++ b/test/base/test_qnitensor.jl
@@ -9,1912 +9,1912 @@ using Test
Random.seed!(1234)
@testset "BlockSparse ITensor" begin
- @testset "Constructor" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = ITensor(QN(0), i, dag(j))
-
- @test flux(A) == QN(0)
- @test nnzblocks(A) == 2
- end
-
- @testset "Construct from Array" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
-
- A = [
- 1.0 0.0 0.0
- 0.0 2.0 3.0
- 0.0 1e-10 4.0
- ]
- T = ITensor(A, i', dag(i))
- @test flux(T) == QN(0)
- @test nnzblocks(T) == 2
- @test Block(1, 1) in nzblocks(T)
- @test Block(2, 2) in nzblocks(T)
- @test T[1, 1] == 1.0
- @test T[2, 2] == 2.0
- @test T[2, 3] == 3.0
- @test T[3, 2] == 1e-10
- @test T[3, 3] == 4.0
-
- T = ITensor(A, i', dag(i))
- @test flux(T) == QN(0)
- @test nnzblocks(T) == 2
- @test Block(1, 1) in nzblocks(T)
- @test Block(2, 2) in nzblocks(T)
- @test T[1, 1] == 1.0
- @test T[2, 2] == 2.0
- @test T[2, 3] == 3.0
- @test T[3, 2] == 1e-10
- @test T[3, 3] == 4.0
-
- T = ITensor(A, i', dag(i); tol=1e-9)
- @test flux(T) == QN(0)
- @test nnzblocks(T) == 2
- @test Block(1, 1) in nzblocks(T)
- @test Block(2, 2) in nzblocks(T)
- @test T[1, 1] == 1.0
- @test T[2, 2] == 2.0
- @test T[2, 3] == 3.0
- @test T[3, 2] == 0.0
- @test T[3, 3] == 4.0
-
- A = [
- 1e-9 0.0 0.0
- 0.0 2.0 3.0
- 0.0 1e-10 4.0
- ]
- T = ITensor(A, i', dag(i); tol=1e-8)
- @test flux(T) == QN(0)
- @test nnzblocks(T) == 1
- @test Block(2, 2) in nzblocks(T)
- @test T[1, 1] == 0.0
- @test T[2, 2] == 2.0
- @test T[2, 3] == 3.0
- @test T[3, 2] == 0.0
- @test T[3, 3] == 4.0
-
- A = [
- 1e-9 2.0 3.0
- 1e-9 1e-10 2e-10
- 2e-9 1e-10 4e-10
- ]
- T = ITensor(A, i', dag(i); tol=1e-8)
- @test flux(T) == QN(-1)
- @test nnzblocks(T) == 1
- @test Block(1, 2) in nzblocks(T)
- @test T[1, 1] == 0.0
- @test T[1, 2] == 2.0
- @test T[1, 3] == 3.0
- @test T[2, 2] == 0.0
- @test T[2, 3] == 0.0
- @test T[3, 2] == 0.0
- @test T[3, 3] == 0.0
-
- A = [
- 1e-9 2.0 3.0
- 1e-5 1e-10 2e-10
- 2e-9 1e-10 4e-10
- ]
- @test_throws ErrorException ITensor(A, i', dag(i); tol=1e-8)
- @test ITensor(A, i', dag(i); tol=1e-8, checkflux=false) isa ITensor
-
- # Construct from zero matrix. Flux check should still pass
- # (Regression test for issue #1209)
- @test ITensor(zeros(3, 3), i', dag(i)) isa ITensor
- end
-
- @testset "reductions (sum, prod)" for elt in (
- Float32, Float64, Complex{Float32}, Complex{Float64}
- )
- s = [QN(0) => 2, QN(1) => 2]
- a = random_itensor(elt, Index(s), dag(Index(s)))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
-
- # All blocks are nonzero
- s = [QN(0) => 2, QN(0) => 2]
- a = random_itensor(elt, Index(s), dag(Index(s)))
- @test sum(a) ≈ sum(array(a))
- @test sum(a) isa elt
- @test prod(a) ≈ prod(array(a))
- @test prod(a) isa elt
- end
-
- @testset "Regression test for in-place operations with mismatched block structure (eltype=$elt)" for elt in
- (
- Float32, Float64, Complex{Float32}, Complex{Float64}
- )
- # Regression test for https://github.com/ITensor/ITensors.jl/pull/1318
- i = Index([QN(0) => 1, QN(1) => 1])
- src = ITensor(i', dag(i))
- x12 = randn(elt)
- src[1, 2] = x12
- dest = ITensor(i', dag(i))
- x21 = randn(elt)
- dest[2, 1] = x21
- α = elt(2)
- dest .= α .* src
- @test nnz(src) == 1
- @test src[1, 2] == x12
- @test nnz(dest) == 2
- @test dest[1, 2] == α * x12
- @test dest[2, 1] == zero(elt)
- end
-
- @testset "similartype regression test" begin
- # Regression test for issue seen in:
- # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77
- # Previously, `similartype` wasn't using information about the dimensions
- # properly and was returning a `BlockSparse` storage of the dimensions
- # of the input tensor.
- i = Index([QN() => 2])
- A = ITensor(i, i')
- B = ITensor(i'')
- C = A * B
- @test NDTensors.ndims(NDTensors.storagetype(C)) == 3
- @test C + ITensor(i, i', i'') == ITensor(i, i', i'')
- end
-
- @testset "Construct from Array regression test" begin
- i = Index([QN(0) => 2, QN(1) => 2])
- T = itensor([0, 0, 1, 2], i)
- @test flux(T) == QN(1)
- @test nnzblocks(T) == 1
- @test !(Block(1) in nzblocks(T))
- @test Block(2) in nzblocks(T)
- @test T[1] == 0
- @test T[2] == 0
- @test T[3] == 1
- @test T[4] == 2
- # Test fluxes of specific elements:
- @test flux(T, 1) == QN(0)
- @test flux(T, 2) == QN(0)
- @test flux(T, 3) == QN(1)
- @test flux(T, 4) == QN(1)
- @test_throws BoundsError flux(T, 5)
- @test_throws BoundsError flux(T, 0)
- # Test fluxes of specific Blocks
- @test flux(T, Block(1)) == QN(0)
- @test flux(T, Block(2)) == QN(1)
- @test_throws BoundsError flux(T, Block(0))
- @test_throws BoundsError flux(T, Block(3))
- end
-
- @testset "trace (tr)" begin
- si = [QN(0) => 1, QN(1) => 2, QN(2) => 3]
- sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4]
- sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5]
- sl = [QN(0) => 2]
- i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l"))
- T = random_itensor(dag(j), k', i', dag(k), j', dag(i))
- trT1 = tr(T)
- trT2 = (T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)'))[]
- @test trT1 ≈ trT2
-
- T = random_itensor(dag(j), k', i', l, dag(k), j', dag(i))
- trT1 = tr(T)
- trT2 = T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)')
- @test trT1 ≈ trT2
- end
-
- @testset "QN ITensor Array constructor view behavior" begin
- d = 2
- i = Index([QN(0) => d ÷ 2, QN(1) => d ÷ 2])
-
- # no view
- A = diagm(randn(Float64, d))
- T = ITensor(A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Float64}
- A[1, 1] = 2.0
- T[1, 1] ≠ 2.0
-
- # no view
- A = diagm(rand(Int, d))
- T = ITensor(Int, A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Int}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = diagm(rand(Int, d))
- T = ITensor(A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = diagm(randn(Float64, d))
- T = ITensor(A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = diagm(rand(Int, d))
- T = ITensor(Int, A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Int}
- A[1, 1] = 2
- T[1, 1] ≠ 2
-
- # no view
- A = diagm(rand(Int, d))
- T = ITensor(A, i', dag(i); tol=1e-12)
- @test storage(T) isa NDTensors.BlockSparse{Float64}
- A[1, 1] = 2
- T[1, 1] ≠ 2
- end
-
- @testset "Constructor Leads to No Blocks" begin
- i = Index(QN(0) => 2, QN(1) => 3; tags="i")
- j = Index(QN(1) => 2, QN(2) => 1; tags="j")
- A = ITensor(i, j)
- @test storage(A) isa NDTensors.EmptyStorage
- @test_throws ErrorException ITensor(QN(0), i, j)
- end
-
- @testset "ITensor iteration" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = random_itensor(i, dag(j))
- Is = eachindex(A)
- @test length(Is) == dim(A)
- sumA = 0.0
- for I in Is
- sumA += A[I]
+ @testset "Constructor" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
+
+ A = ITensor(QN(0), i, dag(j))
+
+ @test flux(A) == QN(0)
+ @test nnzblocks(A) == 2
+ end
+
+ @testset "Construct from Array" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+
+ A = [
+ 1.0 0.0 0.0
+ 0.0 2.0 3.0
+ 0.0 1.0e-10 4.0
+ ]
+ T = ITensor(A, i', dag(i))
+ @test flux(T) == QN(0)
+ @test nnzblocks(T) == 2
+ @test Block(1, 1) in nzblocks(T)
+ @test Block(2, 2) in nzblocks(T)
+ @test T[1, 1] == 1.0
+ @test T[2, 2] == 2.0
+ @test T[2, 3] == 3.0
+ @test T[3, 2] == 1.0e-10
+ @test T[3, 3] == 4.0
+
+ T = ITensor(A, i', dag(i))
+ @test flux(T) == QN(0)
+ @test nnzblocks(T) == 2
+ @test Block(1, 1) in nzblocks(T)
+ @test Block(2, 2) in nzblocks(T)
+ @test T[1, 1] == 1.0
+ @test T[2, 2] == 2.0
+ @test T[2, 3] == 3.0
+ @test T[3, 2] == 1.0e-10
+ @test T[3, 3] == 4.0
+
+ T = ITensor(A, i', dag(i); tol = 1.0e-9)
+ @test flux(T) == QN(0)
+ @test nnzblocks(T) == 2
+ @test Block(1, 1) in nzblocks(T)
+ @test Block(2, 2) in nzblocks(T)
+ @test T[1, 1] == 1.0
+ @test T[2, 2] == 2.0
+ @test T[2, 3] == 3.0
+ @test T[3, 2] == 0.0
+ @test T[3, 3] == 4.0
+
+ A = [
+ 1.0e-9 0.0 0.0
+ 0.0 2.0 3.0
+ 0.0 1.0e-10 4.0
+ ]
+ T = ITensor(A, i', dag(i); tol = 1.0e-8)
+ @test flux(T) == QN(0)
+ @test nnzblocks(T) == 1
+ @test Block(2, 2) in nzblocks(T)
+ @test T[1, 1] == 0.0
+ @test T[2, 2] == 2.0
+ @test T[2, 3] == 3.0
+ @test T[3, 2] == 0.0
+ @test T[3, 3] == 4.0
+
+ A = [
+ 1.0e-9 2.0 3.0
+ 1.0e-9 1.0e-10 2.0e-10
+ 2.0e-9 1.0e-10 4.0e-10
+ ]
+ T = ITensor(A, i', dag(i); tol = 1.0e-8)
+ @test flux(T) == QN(-1)
+ @test nnzblocks(T) == 1
+ @test Block(1, 2) in nzblocks(T)
+ @test T[1, 1] == 0.0
+ @test T[1, 2] == 2.0
+ @test T[1, 3] == 3.0
+ @test T[2, 2] == 0.0
+ @test T[2, 3] == 0.0
+ @test T[3, 2] == 0.0
+ @test T[3, 3] == 0.0
+
+ A = [
+ 1.0e-9 2.0 3.0
+ 1.0e-5 1.0e-10 2.0e-10
+ 2.0e-9 1.0e-10 4.0e-10
+ ]
+ @test_throws ErrorException ITensor(A, i', dag(i); tol = 1.0e-8)
+ @test ITensor(A, i', dag(i); tol = 1.0e-8, checkflux = false) isa ITensor
+
+ # Construct from zero matrix. Flux check should still pass
+ # (Regression test for issue #1209)
+ @test ITensor(zeros(3, 3), i', dag(i)) isa ITensor
+ end
+
+ @testset "reductions (sum, prod)" for elt in (
+ Float32, Float64, Complex{Float32}, Complex{Float64},
+ )
+ s = [QN(0) => 2, QN(1) => 2]
+ a = random_itensor(elt, Index(s), dag(Index(s)))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+
+ # All blocks are nonzero
+ s = [QN(0) => 2, QN(0) => 2]
+ a = random_itensor(elt, Index(s), dag(Index(s)))
+ @test sum(a) ≈ sum(array(a))
+ @test sum(a) isa elt
+ @test prod(a) ≈ prod(array(a))
+ @test prod(a) isa elt
+ end
+
+ @testset "Regression test for in-place operations with mismatched block structure (eltype=$elt)" for elt in
+ (
+ Float32, Float64, Complex{Float32}, Complex{Float64},
+ )
+ # Regression test for https://github.com/ITensor/ITensors.jl/pull/1318
+ i = Index([QN(0) => 1, QN(1) => 1])
+ src = ITensor(i', dag(i))
+ x12 = randn(elt)
+ src[1, 2] = x12
+ dest = ITensor(i', dag(i))
+ x21 = randn(elt)
+ dest[2, 1] = x21
+ α = elt(2)
+ dest .= α .* src
+ @test nnz(src) == 1
+ @test src[1, 2] == x12
+ @test nnz(dest) == 2
+ @test dest[1, 2] == α * x12
+ @test dest[2, 1] == zero(elt)
+ end
+
+ @testset "similartype regression test" begin
+ # Regression test for issue seen in:
+ # https://github.com/ITensor/ITensorInfiniteMPS.jl/pull/77
+ # Previously, `similartype` wasn't using information about the dimensions
+ # properly and was returning a `BlockSparse` storage of the dimensions
+ # of the input tensor.
+ i = Index([QN() => 2])
+ A = ITensor(i, i')
+ B = ITensor(i'')
+ C = A * B
+ @test NDTensors.ndims(NDTensors.storagetype(C)) == 3
+ @test C + ITensor(i, i', i'') == ITensor(i, i', i'')
end
- @test sumA ≈ sum(ITensors.data(A))
- sumA = 0.0
- for a in A
- sumA += a
+
+ @testset "Construct from Array regression test" begin
+ i = Index([QN(0) => 2, QN(1) => 2])
+ T = itensor([0, 0, 1, 2], i)
+ @test flux(T) == QN(1)
+ @test nnzblocks(T) == 1
+ @test !(Block(1) in nzblocks(T))
+ @test Block(2) in nzblocks(T)
+ @test T[1] == 0
+ @test T[2] == 0
+ @test T[3] == 1
+ @test T[4] == 2
+ # Test fluxes of specific elements:
+ @test flux(T, 1) == QN(0)
+ @test flux(T, 2) == QN(0)
+ @test flux(T, 3) == QN(1)
+ @test flux(T, 4) == QN(1)
+ @test_throws BoundsError flux(T, 5)
+ @test_throws BoundsError flux(T, 0)
+ # Test fluxes of specific Blocks
+ @test flux(T, Block(1)) == QN(0)
+ @test flux(T, Block(2)) == QN(1)
+ @test_throws BoundsError flux(T, Block(0))
+ @test_throws BoundsError flux(T, Block(3))
end
- @test sumA ≈ sum(A)
- end
- @testset "Constructor (from Tuple)" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
+ @testset "trace (tr)" begin
+ si = [QN(0) => 1, QN(1) => 2, QN(2) => 3]
+ sj = [QN(0) => 2, QN(1) => 3, QN(2) => 4]
+ sk = [QN(0) => 3, QN(1) => 4, QN(2) => 5]
+ sl = [QN(0) => 2]
+ i, j, k, l = Index.((si, sj, sk, sl), ("i", "j", "k", "l"))
+ T = random_itensor(dag(j), k', i', dag(k), j', dag(i))
+ trT1 = tr(T)
+ trT2 = (T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)'))[]
+ @test trT1 ≈ trT2
+
+ T = random_itensor(dag(j), k', i', l, dag(k), j', dag(i))
+ trT1 = tr(T)
+ trT2 = T * δ(i, dag(i)') * δ(j, dag(j)') * δ(k, dag(k)')
+ @test trT1 ≈ trT2
+ end
- A = ITensor(QN(0), (i, dag(j)))
-
- @test flux(A) == QN(0)
- @test nnzblocks(A) == 2
- end
-
- @testset "Constructor (no flux specified)" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = ITensor(i, dag(j))
-
- @test flux(A) === nothing
- @test nnzblocks(A) == 0
- end
-
- @testset "Constructor (Tuple, no flux specified)" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = ITensor((i, dag(j)))
-
- @test flux(A) === nothing
- @test nnzblocks(A) == 0
- end
-
- @testset "No indices getindex" begin
- T = ITensor(QN())
- @test order(T) == 0
- @test flux(T) == nothing
- @test nnzblocks(T) == 1
- @test T[] == 0
-
- s = Index(QN(-1) => 1, QN(1) => 1)
- A = ITensor(s, dag(s'))
- B = ITensor(s', dag(s))
- A[1, 1] = 1
- B[2, 2] = 1
- C = A * B
- @test order(C) == 0
- @test flux(C) == nothing
- @test nnzblocks(C) == 0
- @test C[] == 0
- end
-
- @testset "Empty constructor" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
-
- A = ITensor(i, dag(i'))
-
- @test nnzblocks(A) == 0
- @test nnz(A) == 0
- @test hasinds(A, i, i')
- @test isnothing(flux(A))
-
- A[i => 1, i' => 1] = 1.0
-
- @test nnzblocks(A) == 1
- @test nnz(A) == 1
- @test flux(A) == QN(0)
-
- A[i => 2, i' => 2] = 1.0
-
- @test nnzblocks(A) == 2
- @test nnz(A) == 5
- @test flux(A) == QN(0)
- end
-
- @testset "Check flux when setting elements" begin
- i = Index(QN(0) => 1, QN(1) => 1; tags="i")
- A = random_itensor(QN(0), i, dag(i'))
- @test_throws ErrorException A[i => 1, i' => 2] = 1.0
- end
-
- @testset "Random constructor" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = random_itensor(QN(1), i, dag(j))
-
- @test flux(A) == QN(1)
- @test nnzblocks(A) == 1
-
- B = random_itensor(i, dag(j))
-
- @test flux(B) == QN()
- @test nnzblocks(B) == 2
-
- # Scalar algebra
- C = 2 * B
- @test C[1, 1] == 2 * B[1, 1]
- @test flux(B) == QN()
- @test flux(C) == QN()
- @test nnzblocks(B) == 2
- @test nnzblocks(C) == 2
-
- C = B / 2
- @test C[1, 1] == B[1, 1] / 2
- @test flux(B) == QN()
- @test flux(C) == QN()
- @test nnzblocks(B) == 2
- @test nnzblocks(C) == 2
- end
-
- @testset "eltype promotion with scalar * and /" begin
- i = Index([QN(0) => 2, QN(1) => 3])
- @test eltype(ITensor(1.0f0, i', dag(i)) * 2) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) .* 2) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) / 2) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0f0) === Float32
- @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0) === Float64
- @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0) === Float64
- @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0) === Float64
- @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0) === Float64
- end
-
- @testset "Complex Number Operations" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- A = random_itensor(ComplexF64, QN(0), i, dag(j))
-
- @test flux(A) == QN(0)
- @test nnzblocks(A) == 2
-
- rA = real(A)
- iA = imag(A)
- @test nnzblocks(rA) == nnzblocks(A)
- @test nnzblocks(iA) == nnzblocks(A)
- @test norm(rA + 1im * iA - A) < 1E-8
- @test eltype(rA) == Float64
- @test eltype(iA) == Float64
-
- cA = conj(A)
- @test eltype(cA) == ComplexF64
- @test norm(cA) ≈ norm(A)
-
- B = random_itensor(Float64, QN(0), i, dag(j))
-
- cB = conj(B)
- @test eltype(cB) == Float64
- @test norm(cB) ≈ norm(B)
- end
-
- @testset "QN onehot" begin
- i = Index(QN(0) => 2, QN(1) => 2; tags="i")
-
- T = onehot(i => 1)
- @test T[i => 1] ≈ 1.0
- @test T[i => 2] ≈ 0.0
- @test T[i => 3] ≈ 0.0
- @test T[i => 4] ≈ 0.0
-
- T = onehot(i => 2)
- @test T[i => 1] ≈ 0.0
- @test T[i => 2] ≈ 1.0
- @test T[i => 3] ≈ 0.0
- @test T[i => 4] ≈ 0.0
- end
-
- @testset "setindex!" begin
- @testset "Test 1" begin
- s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
- s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
- A = ITensor(s1, s2)
-
- @test nnzblocks(A) == 0
- @test nnz(A) == 0
- @test hasinds(A, s1, s2)
- @test isnothing(flux(A))
-
- A[2, 1] = 1.0 / sqrt(2)
-
- @test nnzblocks(A) == 1
- @test nnz(A) == 1
- @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
- @test flux(A) == QN("N", 1, -1)
-
- A[1, 2] = 1.0 / sqrt(2)
-
- @test nnzblocks(A) == 2
- @test nnz(A) == 2
- @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
- @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
- @test flux(A) == QN("N", 1, -1)
+ @testset "QN ITensor Array constructor view behavior" begin
+ d = 2
+ i = Index([QN(0) => d ÷ 2, QN(1) => d ÷ 2])
+
+ # no view
+ A = diagm(randn(Float64, d))
+ T = ITensor(A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Float64}
+ A[1, 1] = 2.0
+ T[1, 1] ≠ 2.0
+
+ # no view
+ A = diagm(rand(Int, d))
+ T = ITensor(Int, A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Int}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = diagm(rand(Int, d))
+ T = ITensor(A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = diagm(randn(Float64, d))
+ T = ITensor(A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = diagm(rand(Int, d))
+ T = ITensor(Int, A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Int}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
+
+ # no view
+ A = diagm(rand(Int, d))
+ T = ITensor(A, i', dag(i); tol = 1.0e-12)
+ @test storage(T) isa NDTensors.BlockSparse{Float64}
+ A[1, 1] = 2
+ T[1, 1] ≠ 2
end
- @testset "Test 2" begin
- s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
- s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
- A = ITensor(s1, s2)
+ @testset "Constructor Leads to No Blocks" begin
+ i = Index(QN(0) => 2, QN(1) => 3; tags = "i")
+ j = Index(QN(1) => 2, QN(2) => 1; tags = "j")
+ A = ITensor(i, j)
+ @test storage(A) isa NDTensors.EmptyStorage
+ @test_throws ErrorException ITensor(QN(0), i, j)
+ end
- @test nnzblocks(A) == 0
- @test nnz(A) == 0
- @test hasinds(A, s1, s2)
- @test isnothing(flux(A))
+ @testset "ITensor iteration" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- A[1, 2] = 1.0 / sqrt(2)
+ A = random_itensor(i, dag(j))
+ Is = eachindex(A)
+ @test length(Is) == dim(A)
+ sumA = 0.0
+ for I in Is
+ sumA += A[I]
+ end
+ @test sumA ≈ sum(ITensors.data(A))
+ sumA = 0.0
+ for a in A
+ sumA += a
+ end
+ @test sumA ≈ sum(A)
+ end
- @test nnzblocks(A) == 1
- @test nnz(A) == 1
- @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
- @test flux(A) == QN("N", 1, -1)
+ @testset "Constructor (from Tuple)" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- A[2, 1] = 1.0 / sqrt(2)
+ A = ITensor(QN(0), (i, dag(j)))
- @test nnzblocks(A) == 2
- @test nnz(A) == 2
- @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
- @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
- @test flux(A) == QN("N", 1, -1)
+ @test flux(A) == QN(0)
+ @test nnzblocks(A) == 2
end
- end
- @testset "Multiply by scalar" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4], "j")
+ @testset "Constructor (no flux specified)" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- A = random_itensor(QN(0), i, dag(j))
+ A = ITensor(i, dag(j))
- @test flux(A) == QN(0)
- @test nnzblocks(A) == 2
+ @test flux(A) === nothing
+ @test nnzblocks(A) == 0
+ end
- B = 2 * A
+ @testset "Constructor (Tuple, no flux specified)" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- @test flux(B) == QN(0)
- @test nnzblocks(B) == 2
+ A = ITensor((i, dag(j)))
- for ii in dim(i), jj in dim(j)
- @test 2 * A[i => ii, j => jj] == B[i => ii, j => jj]
+ @test flux(A) === nothing
+ @test nnzblocks(A) == 0
end
- end
-
- @testset "Check arrows when summing" begin
- s = siteinds("S=1/2", 4; conserve_qns=true)
- Tout = random_itensor(QN("Sz" => 2), s[2], s[1], s[3], s[4])
- Tin = random_itensor(QN("Sz" => 2), dag(s[1]), dag(s[2]), dag(s[3]), dag(s[4]))
- @test norm(Tout - Tout) < 1E-10 # this is ok
- @test_throws ErrorException (Tout + Tin) # not ok
- end
-
- @testset "Copy" begin
- s = Index([QN(0) => 1, QN(1) => 1], "s")
- T = random_itensor(QN(0), s, s')
- cT = copy(T)
- for ss in dim(s), ssp in dim(s')
- @test T[s => ss, s' => ssp] == cT[s => ss, s' => ssp]
+
+ @testset "No indices getindex" begin
+ T = ITensor(QN())
+ @test order(T) == 0
+ @test flux(T) == nothing
+ @test nnzblocks(T) == 1
+ @test T[] == 0
+
+ s = Index(QN(-1) => 1, QN(1) => 1)
+ A = ITensor(s, dag(s'))
+ B = ITensor(s', dag(s))
+ A[1, 1] = 1
+ B[2, 2] = 1
+ C = A * B
+ @test order(C) == 0
+ @test flux(C) == nothing
+ @test nnzblocks(C) == 0
+ @test C[] == 0
end
- end
- @testset "Permute" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4], "j")
+ @testset "Empty constructor" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+
+ A = ITensor(i, dag(i'))
+
+ @test nnzblocks(A) == 0
+ @test nnz(A) == 0
+ @test hasinds(A, i, i')
+ @test isnothing(flux(A))
- A = random_itensor(QN(1), i, dag(j))
+ A[i => 1, i' => 1] = 1.0
- @test flux(A) == QN(1)
- @test nnzblocks(A) == 1
+ @test nnzblocks(A) == 1
+ @test nnz(A) == 1
+ @test flux(A) == QN(0)
- B = permute(A, j, i)
+ A[i => 2, i' => 2] = 1.0
- @test flux(B) == QN(1)
- @test nnzblocks(B) == 1
+ @test nnzblocks(A) == 2
+ @test nnz(A) == 5
+ @test flux(A) == QN(0)
+ end
- for ii in dim(i), jj in dim(j)
- @test A[ii, jj] == B[jj, ii]
+ @testset "Check flux when setting elements" begin
+ i = Index(QN(0) => 1, QN(1) => 1; tags = "i")
+ A = random_itensor(QN(0), i, dag(i'))
+ @test_throws ErrorException A[i => 1, i' => 2] = 1.0
end
- end
-
- @testset "Contraction" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4], "j")
-
- A = random_itensor(QN(0), i, dag(j))
-
- @test flux(A) == QN(0)
- @test nnzblocks(A) == 2
-
- B = random_itensor(QN(1), j, dag(i)')
-
- @test flux(B) == QN(1)
- @test nnzblocks(B) == 1
-
- C = A * B
-
- @test hasinds(C, i, i')
- @test flux(C) == QN(1)
- @test nnzblocks(C) == 1
- end
-
- @testset "Combine and uncombine" begin
- @testset "Combine no indices" begin
- i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
- A = random_itensor(QN(), i1, dag(i1'))
-
- C = combiner()
- c = combinedind(C)
- @test isnothing(c)
- AC = A * C
- @test nnz(AC) == nnz(A)
- @test nnzblocks(AC) == nnzblocks(A)
- @test hassameinds(AC, A)
- @test norm(AC - A * C) ≈ 0.0
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test hassameinds(Ap, A)
- @test norm(A - Ap) ≈ 0.0
+
+ @testset "Random constructor" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
+
+ A = random_itensor(QN(1), i, dag(j))
+
+ @test flux(A) == QN(1)
+ @test nnzblocks(A) == 1
+
+ B = random_itensor(i, dag(j))
+
+ @test flux(B) == QN()
+ @test nnzblocks(B) == 2
+
+ # Scalar algebra
+ C = 2 * B
+ @test C[1, 1] == 2 * B[1, 1]
+ @test flux(B) == QN()
+ @test flux(C) == QN()
+ @test nnzblocks(B) == 2
+ @test nnzblocks(C) == 2
+
+ C = B / 2
+ @test C[1, 1] == B[1, 1] / 2
+ @test flux(B) == QN()
+ @test flux(C) == QN()
+ @test nnzblocks(B) == 2
+ @test nnzblocks(C) == 2
end
- @testset "Combine set direction" begin
- i1 = Index([QN(0) => 2, QN(1) => 3], "i1")
- A = random_itensor(i1', dag(i1))
- # Test that checkflux does not throw an error:
- @test isnothing(ITensors.checkflux(A))
- C = combiner(dag(i1); dir=ITensors.Out)
- c = combinedind(C)
- @test dir(c) == ITensors.Out
- AC = A * C
- @test nnz(AC) == nnz(A)
- @test nnzblocks(AC) == nnzblocks(A)
- # Test that checkflux does not throw an error:
- @test isnothing(ITensors.checkflux(AC))
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test hassameinds(Ap, A)
- # Test that checkflux does not throw an error:
- @test isnothing(ITensors.checkflux(AC))
- @test A ≈ Ap
+ @testset "eltype promotion with scalar * and /" begin
+ i = Index([QN(0) => 2, QN(1) => 3])
+ @test eltype(ITensor(1.0f0, i', dag(i)) * 2) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) .* 2) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) / 2) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0f0) === Float32
+ @test eltype(ITensor(1.0f0, i', dag(i)) * 2.0) === Float64
+ @test eltype(ITensor(1.0f0, i', dag(i)) .* 2.0) === Float64
+ @test eltype(ITensor(1.0f0, i', dag(i)) / 2.0) === Float64
+ @test eltype(ITensor(1.0f0, i', dag(i)) ./ 2.0) === Float64
end
- @testset "Order 2 (IndexSet constructor)" begin
- i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
- A = random_itensor(QN(), i1, dag(i1'))
+ @testset "Complex Number Operations" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)]
+ A = random_itensor(ComplexF64, QN(0), i, dag(j))
- for is in iss
- C = combiner(IndexSet(is); tags="c")
- AC = A * C
- @test nnz(AC) == nnz(A)
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test norm(A - Ap) ≈ 0.0
- end
+ @test flux(A) == QN(0)
+ @test nnzblocks(A) == 2
+
+ rA = real(A)
+ iA = imag(A)
+ @test nnzblocks(rA) == nnzblocks(A)
+ @test nnzblocks(iA) == nnzblocks(A)
+ @test norm(rA + 1im * iA - A) < 1.0e-8
+ @test eltype(rA) == Float64
+ @test eltype(iA) == Float64
+
+ cA = conj(A)
+ @test eltype(cA) == ComplexF64
+ @test norm(cA) ≈ norm(A)
+
+ B = random_itensor(Float64, QN(0), i, dag(j))
+
+ cB = conj(B)
+ @test eltype(cB) == Float64
+ @test norm(cB) ≈ norm(B)
end
- @testset "Order 2" begin
- i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
- A = random_itensor(QN(), (i1, dag(i1')))
+ @testset "QN onehot" begin
+ i = Index(QN(0) => 2, QN(1) => 2; tags = "i")
- iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)]
+ T = onehot(i => 1)
+ @test T[i => 1] ≈ 1.0
+ @test T[i => 2] ≈ 0.0
+ @test T[i => 3] ≈ 0.0
+ @test T[i => 4] ≈ 0.0
- for is in iss
- C = combiner(is; tags="c")
- AC = A * C
- @test nnz(AC) == nnz(A)
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test norm(A - Ap) ≈ 0.0
- end
+ T = onehot(i => 2)
+ @test T[i => 1] ≈ 0.0
+ @test T[i => 2] ≈ 1.0
+ @test T[i => 3] ≈ 0.0
+ @test T[i => 4] ≈ 0.0
end
- @testset "Order 3, Combine 2" begin
- i = Index([QN(0) => 2, QN(1) => 2], "i")
+ @testset "setindex!" begin
+ @testset "Test 1" begin
+ s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
+ s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
+ A = ITensor(s1, s2)
+
+ @test nnzblocks(A) == 0
+ @test nnz(A) == 0
+ @test hasinds(A, s1, s2)
+ @test isnothing(flux(A))
+
+ A[2, 1] = 1.0 / sqrt(2)
+
+ @test nnzblocks(A) == 1
+ @test nnz(A) == 1
+ @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
+ @test flux(A) == QN("N", 1, -1)
+
+ A[1, 2] = 1.0 / sqrt(2)
- A = random_itensor(QN(0), i, dag(i)', dag(i)'')
+ @test nnzblocks(A) == 2
+ @test nnz(A) == 2
+ @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
+ @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
+ @test flux(A) == QN("N", 1, -1)
+ end
+
+ @testset "Test 2" begin
+ s1 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s1")
+ s2 = Index([QN("N", 0, -1) => 1, QN("N", 1, -1) => 1], "s2")
+ A = ITensor(s1, s2)
+
+ @test nnzblocks(A) == 0
+ @test nnz(A) == 0
+ @test hasinds(A, s1, s2)
+ @test isnothing(flux(A))
- C = combiner(i, dag(i)'')
- c = combinedind(C)
+ A[1, 2] = 1.0 / sqrt(2)
- AC = A * C
+ @test nnzblocks(A) == 1
+ @test nnz(A) == 1
+ @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
+ @test flux(A) == QN("N", 1, -1)
- @test hasinds(AC, c, i')
- @test nnz(AC) == nnz(A)
+ A[2, 1] = 1.0 / sqrt(2)
+
+ @test nnzblocks(A) == 2
+ @test nnz(A) == 2
+ @test A[s1 => 2, s2 => 1] ≈ 1.0 / sqrt(2)
+ @test A[s1 => 1, s2 => 2] ≈ 1.0 / sqrt(2)
+ @test flux(A) == QN("N", 1, -1)
+ end
+ end
- for b in nzblocks(AC)
- @test flux(AC, b) == QN(0)
- end
+ @testset "Multiply by scalar" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4], "j")
- B = ITensor(QN(0), i', c)
- @test nnz(B) == nnz(AC)
- @test nnzblocks(B) == nnzblocks(AC)
+ A = random_itensor(QN(0), i, dag(j))
- Ap = AC * dag(C)
+ @test flux(A) == QN(0)
+ @test nnzblocks(A) == 2
- @test norm(A - Ap) == 0
- @test nnz(A) == nnz(Ap)
- @test nnzblocks(A) == nnzblocks(Ap)
- @test hassameinds(A, Ap)
+ B = 2 * A
+
+ @test flux(B) == QN(0)
+ @test nnzblocks(B) == 2
+
+ for ii in dim(i), jj in dim(j)
+ @test 2 * A[i => ii, j => jj] == B[i => ii, j => jj]
+ end
end
- @testset "Order 3" begin
- i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
- i2 = settags(i1, "i2")
- A = random_itensor(QN(), i1, i2, dag(i1'))
-
- iss = [
- i1,
- i2,
- dag(i1'),
- (i1, i2),
- (i2, i1),
- (i1, dag(i1')),
- (dag(i1'), i1),
- (i2, dag(i1')),
- (dag(i1'), i2),
- (i1, i2, dag(i1')),
- (i1, dag(i1'), i2),
- (i2, i1, dag(i1')),
- (i2, dag(i1'), i1),
- (dag(i1'), i1, i2),
- (dag(i1'), i2, i1),
- ]
-
- for is in iss
- C = combiner(is; tags="c")
- AC = A * C
- @test nnz(AC) == nnz(A)
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test norm(A - AC * dag(C)) ≈ 0.0
- end
+ @testset "Check arrows when summing" begin
+ s = siteinds("S=1/2", 4; conserve_qns = true)
+ Tout = random_itensor(QN("Sz" => 2), s[2], s[1], s[3], s[4])
+ Tin = random_itensor(QN("Sz" => 2), dag(s[1]), dag(s[2]), dag(s[3]), dag(s[4]))
+ @test norm(Tout - Tout) < 1.0e-10 # this is ok
+ @test_throws ErrorException (Tout + Tin) # not ok
end
- @testset "Order 4" begin
- i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
- i2 = settags(i1, "i2")
- A = random_itensor(QN(), i1, i2, dag(i1'), dag(i2'))
-
- iss = [
- i1,
- i2,
- dag(i1'),
- dag(i2'),
- (i1, i2),
- (i2, i1),
- (i1, dag(i1')),
- (dag(i1'), i1),
- (i1, dag(i2')),
- (dag(i2'), i1),
- (i2, dag(i1')),
- (dag(i1'), i2),
- (i2, dag(i2')),
- (dag(i2'), i2),
- (dag(i1'), dag(i2')),
- (dag(i2'), dag(i1')),
- (i1, i2, dag(i1')),
- (i1, dag(i1'), i2),
- (i2, i1, dag(i1')),
- (i2, dag(i1'), i1),
- (dag(i1'), i1, i2),
- (dag(i1'), i2, i1),
- (i1, dag(i1'), dag(i2')),
- (i1, dag(i2'), dag(i1')),
- (dag(i1'), i1, dag(i2')),
- (dag(i1'), dag(i2'), i1),
- (dag(i2'), i1, dag(i1')),
- (dag(i2'), dag(i1'), i1),
- (i1, i2, dag(i1'), dag(i2')),
- (i1, i2, dag(i2'), dag(i1')),
- (i1, dag(i1'), i2, dag(i2')),
- (i1, dag(i1'), dag(i2'), i2),
- (i1, dag(i2'), i2, dag(i1')),
- (i1, dag(i2'), dag(i1'), i2),
- (i2, i1, dag(i1'), dag(i2')),
- (i2, i1, dag(i2'), dag(i1')),
- (i2, dag(i1'), i1, dag(i2')),
- (i2, dag(i1'), dag(i2'), i1),
- (i2, dag(i2'), i1, dag(i1')),
- (i2, dag(i2'), dag(i1'), i1),
- (dag(i1'), i2, i1, dag(i2')),
- (dag(i1'), i2, dag(i2'), i1),
- (dag(i1'), i1, i2, dag(i2')),
- (dag(i1'), i1, dag(i2'), i2),
- (dag(i1'), dag(i2'), i2, i1),
- (dag(i1'), dag(i2'), i1, i2),
- (dag(i2'), i1, dag(i1'), i2),
- (dag(i2'), i1, i2, dag(i1')),
- (dag(i2'), dag(i1'), i1, i2),
- (dag(i2'), dag(i1'), i2, i1),
- (dag(i2'), i2, i1, dag(i1')),
- (dag(i2'), i2, dag(i1'), i1),
- ]
-
- for is in iss
- C = combiner(is; tags="c")
- AC = A * C
- @test nnz(AC) == nnz(A)
- Ap = AC * dag(C)
- @test nnz(Ap) == nnz(A)
- @test nnzblocks(Ap) == nnzblocks(A)
- @test norm(A - Ap) ≈ 0.0
- end
+ @testset "Copy" begin
+ s = Index([QN(0) => 1, QN(1) => 1], "s")
+ T = random_itensor(QN(0), s, s')
+ cT = copy(T)
+ for ss in dim(s), ssp in dim(s')
+ @test T[s => ss, s' => ssp] == cT[s => ss, s' => ssp]
+ end
end
- @testset "Order 4, Combine 2, Example 1" begin
- s1 = Index(
- [
- QN(("Sz", 0), ("Nf", 0)) => 1,
- QN(("Sz", +1), ("Nf", 1)) => 1,
- QN(("Sz", -1), ("Nf", 1)) => 1,
- QN(("Sz", 0), ("Nf", 2)) => 1,
- ],
- "site,n=1",
- )
- s2 = replacetags(s1, "n=1", "n=2")
+ @testset "Permute" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4], "j")
- A = random_itensor(QN(), s1, s2, dag(s1)', dag(s2)')
+ A = random_itensor(QN(1), i, dag(j))
- C = combiner(dag(s1)', dag(s2)')
- c = combinedind(C)
+ @test flux(A) == QN(1)
+ @test nnzblocks(A) == 1
- AC = A * C
+ B = permute(A, j, i)
- @test norm(AC) ≈ norm(A)
- @test hasinds(AC, s1, s2, c)
- @test nnz(AC) == nnz(A)
- for b in nzblocks(AC)
- @test flux(AC, b) == QN()
- end
+ @test flux(B) == QN(1)
+ @test nnzblocks(B) == 1
- @test nnzblocks(AC) < nnz(A)
+ for ii in dim(i), jj in dim(j)
+ @test A[ii, jj] == B[jj, ii]
+ end
+ end
- B = ITensor(QN(), s1, s2, c)
- @test nnz(B) == nnz(AC)
- @test nnzblocks(B) == nnzblocks(AC)
+ @testset "Contraction" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4], "j")
- Ap = AC * dag(C)
+ A = random_itensor(QN(0), i, dag(j))
- @test hassameinds(A, Ap)
- @test norm(A - Ap) == 0
- @test nnz(A) == nnz(Ap)
- @test nnzblocks(A) == nnzblocks(Ap)
+ @test flux(A) == QN(0)
+ @test nnzblocks(A) == 2
+
+ B = random_itensor(QN(1), j, dag(i)')
+
+ @test flux(B) == QN(1)
+ @test nnzblocks(B) == 1
+
+ C = A * B
+
+ @test hasinds(C, i, i')
+ @test flux(C) == QN(1)
+ @test nnzblocks(C) == 1
end
- @testset "Order 4, Combine 2, Example 2" begin
- s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1")
- s2 = replacetags(s1, "n=1", "n=2")
+ @testset "Combine and uncombine" begin
+ @testset "Combine no indices" begin
+ i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
+ A = random_itensor(QN(), i1, dag(i1'))
+
+ C = combiner()
+ c = combinedind(C)
+ @test isnothing(c)
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ @test nnzblocks(AC) == nnzblocks(A)
+ @test hassameinds(AC, A)
+ @test norm(AC - A * C) ≈ 0.0
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test hassameinds(Ap, A)
+ @test norm(A - Ap) ≈ 0.0
+ end
+
+ @testset "Combine set direction" begin
+ i1 = Index([QN(0) => 2, QN(1) => 3], "i1")
+ A = random_itensor(i1', dag(i1))
+ # Test that checkflux does not throw an error:
+ @test isnothing(ITensors.checkflux(A))
+ C = combiner(dag(i1); dir = ITensors.Out)
+ c = combinedind(C)
+ @test dir(c) == ITensors.Out
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ @test nnzblocks(AC) == nnzblocks(A)
+ # Test that checkflux does not throw an error:
+ @test isnothing(ITensors.checkflux(AC))
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test hassameinds(Ap, A)
+ # Test that checkflux does not throw an error:
+ @test isnothing(ITensors.checkflux(AC))
+ @test A ≈ Ap
+ end
- A = random_itensor(QN(), dag(s2)', s2, dag(s1)', s1)
+ @testset "Order 2 (IndexSet constructor)" begin
+ i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
+ A = random_itensor(QN(), i1, dag(i1'))
+
+ iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)]
+
+ for is in iss
+ C = combiner(IndexSet(is); tags = "c")
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test norm(A - Ap) ≈ 0.0
+ end
+ end
- C = combiner(dag(s2)', dag(s1)')
- c = combinedind(C)
+ @testset "Order 2" begin
+ i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
+ A = random_itensor(QN(), (i1, dag(i1')))
+
+ iss = [i1, dag(i1'), (i1, dag(i1')), (dag(i1'), i1)]
+
+ for is in iss
+ C = combiner(is; tags = "c")
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test norm(A - Ap) ≈ 0.0
+ end
+ end
- AC = A * C
+ @testset "Order 3, Combine 2" begin
+ i = Index([QN(0) => 2, QN(1) => 2], "i")
- @test norm(AC) ≈ norm(A)
- @test hasinds(AC, s1, s2, c)
- @test nnz(AC) == nnz(A)
- for b in nzblocks(AC)
- @test flux(AC, b) == QN()
- end
+ A = random_itensor(QN(0), i, dag(i)', dag(i)'')
- B = ITensor(QN(), s1, s2, c)
- @test nnzblocks(B) == nnzblocks(AC)
+ C = combiner(i, dag(i)'')
+ c = combinedind(C)
- Ap = AC * dag(C)
+ AC = A * C
- @test hassameinds(A, Ap)
- @test norm(A - Ap) == 0
- @test nnz(A) == nnz(Ap)
- @test nnzblocks(A) == nnzblocks(Ap)
- end
+ @test hasinds(AC, c, i')
+ @test nnz(AC) == nnz(A)
- @testset "Order 4, Combine 2, Example 3" begin
- s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1")
- s2 = replacetags(s1, "n=1", "n=2")
+ for b in nzblocks(AC)
+ @test flux(AC, b) == QN(0)
+ end
- A = random_itensor(QN(), dag(s1)', s2, dag(s2)', s1)
+ B = ITensor(QN(0), i', c)
+ @test nnz(B) == nnz(AC)
+ @test nnzblocks(B) == nnzblocks(AC)
- C = combiner(dag(s2)', dag(s1)')
- c = combinedind(C)
+ Ap = AC * dag(C)
- AC = A * C
+ @test norm(A - Ap) == 0
+ @test nnz(A) == nnz(Ap)
+ @test nnzblocks(A) == nnzblocks(Ap)
+ @test hassameinds(A, Ap)
+ end
- @test norm(AC) ≈ norm(A)
- @test hasinds(AC, s1, s2, c)
- @test nnz(AC) == nnz(A)
- for b in nzblocks(AC)
- @test flux(AC, b) == QN()
- end
+ @testset "Order 3" begin
+ i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
+ i2 = settags(i1, "i2")
+ A = random_itensor(QN(), i1, i2, dag(i1'))
+
+ iss = [
+ i1,
+ i2,
+ dag(i1'),
+ (i1, i2),
+ (i2, i1),
+ (i1, dag(i1')),
+ (dag(i1'), i1),
+ (i2, dag(i1')),
+ (dag(i1'), i2),
+ (i1, i2, dag(i1')),
+ (i1, dag(i1'), i2),
+ (i2, i1, dag(i1')),
+ (i2, dag(i1'), i1),
+ (dag(i1'), i1, i2),
+ (dag(i1'), i2, i1),
+ ]
+
+ for is in iss
+ C = combiner(is; tags = "c")
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test norm(A - AC * dag(C)) ≈ 0.0
+ end
+ end
- B = ITensor(QN(), s1, s2, c)
- @test nnzblocks(B) == nnzblocks(AC)
+ @testset "Order 4" begin
+ i1 = Index([QN(0, 2) => 2, QN(1, 2) => 2], "i1")
+ i2 = settags(i1, "i2")
+ A = random_itensor(QN(), i1, i2, dag(i1'), dag(i2'))
+
+ iss = [
+ i1,
+ i2,
+ dag(i1'),
+ dag(i2'),
+ (i1, i2),
+ (i2, i1),
+ (i1, dag(i1')),
+ (dag(i1'), i1),
+ (i1, dag(i2')),
+ (dag(i2'), i1),
+ (i2, dag(i1')),
+ (dag(i1'), i2),
+ (i2, dag(i2')),
+ (dag(i2'), i2),
+ (dag(i1'), dag(i2')),
+ (dag(i2'), dag(i1')),
+ (i1, i2, dag(i1')),
+ (i1, dag(i1'), i2),
+ (i2, i1, dag(i1')),
+ (i2, dag(i1'), i1),
+ (dag(i1'), i1, i2),
+ (dag(i1'), i2, i1),
+ (i1, dag(i1'), dag(i2')),
+ (i1, dag(i2'), dag(i1')),
+ (dag(i1'), i1, dag(i2')),
+ (dag(i1'), dag(i2'), i1),
+ (dag(i2'), i1, dag(i1')),
+ (dag(i2'), dag(i1'), i1),
+ (i1, i2, dag(i1'), dag(i2')),
+ (i1, i2, dag(i2'), dag(i1')),
+ (i1, dag(i1'), i2, dag(i2')),
+ (i1, dag(i1'), dag(i2'), i2),
+ (i1, dag(i2'), i2, dag(i1')),
+ (i1, dag(i2'), dag(i1'), i2),
+ (i2, i1, dag(i1'), dag(i2')),
+ (i2, i1, dag(i2'), dag(i1')),
+ (i2, dag(i1'), i1, dag(i2')),
+ (i2, dag(i1'), dag(i2'), i1),
+ (i2, dag(i2'), i1, dag(i1')),
+ (i2, dag(i2'), dag(i1'), i1),
+ (dag(i1'), i2, i1, dag(i2')),
+ (dag(i1'), i2, dag(i2'), i1),
+ (dag(i1'), i1, i2, dag(i2')),
+ (dag(i1'), i1, dag(i2'), i2),
+ (dag(i1'), dag(i2'), i2, i1),
+ (dag(i1'), dag(i2'), i1, i2),
+ (dag(i2'), i1, dag(i1'), i2),
+ (dag(i2'), i1, i2, dag(i1')),
+ (dag(i2'), dag(i1'), i1, i2),
+ (dag(i2'), dag(i1'), i2, i1),
+ (dag(i2'), i2, i1, dag(i1')),
+ (dag(i2'), i2, dag(i1'), i1),
+ ]
+
+ for is in iss
+ C = combiner(is; tags = "c")
+ AC = A * C
+ @test nnz(AC) == nnz(A)
+ Ap = AC * dag(C)
+ @test nnz(Ap) == nnz(A)
+ @test nnzblocks(Ap) == nnzblocks(A)
+ @test norm(A - Ap) ≈ 0.0
+ end
+ end
- Ap = AC * dag(C)
+ @testset "Order 4, Combine 2, Example 1" begin
+ s1 = Index(
+ [
+ QN(("Sz", 0), ("Nf", 0)) => 1,
+ QN(("Sz", +1), ("Nf", 1)) => 1,
+ QN(("Sz", -1), ("Nf", 1)) => 1,
+ QN(("Sz", 0), ("Nf", 2)) => 1,
+ ],
+ "site,n=1",
+ )
+ s2 = replacetags(s1, "n=1", "n=2")
- @test hassameinds(A, Ap)
- @test norm(A - Ap) == 0
- @test nnz(A) == nnz(Ap)
- @test nnzblocks(A) == nnzblocks(Ap)
- end
- end
-
- @testset "Check that combiner commutes" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i")
- j = settags(i, "j")
- A = random_itensor(QN(0, 2), i, j, dag(i'), dag(j'))
- C = combiner(i, j)
- @test norm(A * dag(C') * C - A * C * dag(C')) ≈ 0.0
- end
-
- @testset "Combiner for block deficient ITensor" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i")
- j = settags(i, "j")
- A = ITensor(i, j, dag(i'))
- A[1, 1, 1] = 1.0
- C = combiner(i, j; tags="c")
- AC = A * C
- Ap = AC * dag(C)
- @test norm(A - Ap) ≈ 0.0
- @test norm(Ap - A) ≈ 0.0
- end
-
- @testset "Combine Complex ITensor" begin
- s1 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags="S=1/2,Site,n=1")
- s2 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags="S=1/2,Site,n=2")
-
- T = random_itensor(ComplexF64, QN("Sz", 0), s1, s2)
-
- C = combiner(s1, s2)
- CT = C * T
- @test norm(CT) ≈ norm(T)
- TT = dag(C) * CT
- @test TT ≈ T
- end
-
- @testset "Combiner bug #395" begin
- i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
- i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
- A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)')
- CL = combiner(i1, i2)
- CR = combiner(dag(i1)', dag(i2)')
- AC = A * CR * CL
- @test AC * dag(CR) * dag(CL) ≈ A
- end
-
- @testset "Contract to scalar" begin
- i = Index([QN(0) => 1, QN(1) => 1], "i")
- A = random_itensor(QN(0), i, dag(i'))
-
- c = A * dag(A)
-
- @test nnz(c) == 1
- @test nnzblocks(c) == 1
- @test c[] isa Float64
- @test c[] ≈ norm(A)^2
- end
-
- @testset "eigen" begin
- @testset "eigen hermitian" begin
- i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i")
- j = settags(i, "j")
- k = settags(i, "k")
- l = settags(i, "l")
-
- A = random_itensor(QN(), i, j, dag(k), dag(l))
- A = A * prime(dag(A), (i, j))
-
- F = eigen(A; ishermitian=true, tags="x")
-
- D, U = F
- Ut = F.Vt
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(D) isa NDTensors.DiagBlockSparse
-
- u = commonind(D, U)
- up = uniqueind(D, U)
-
- @test hastags(u, "x")
- @test plev(u) == 0
- @test hastags(up, "x")
- @test plev(up) == 1
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(D, (u, up))
-
- @test A ≈ dag(U) * D * U' atol = 1e-11
- @test A ≈ dag(U) * D * Ut atol = 1e-11
- @test A * U ≈ U' * D atol = 1e-11
- @test A * U ≈ Ut * D atol = 1e-11
- end
+ A = random_itensor(QN(), s1, s2, dag(s1)', dag(s2)')
- @testset "eigen hermitian (truncate)" begin
- i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i")
- j = settags(i, "j")
- k = settags(i, "k")
- l = settags(i, "l")
-
- A = random_itensor(QN(), i, j, dag(k), dag(l))
- A = A * prime(dag(A), (i, j))
- for i in 1:4
- A = mapprime(A * A', 2, 1)
- end
- A = A / norm(A)
-
- cutoff = 1e-5
- F = eigen(A; ishermitian=true, tags="x", cutoff=cutoff)
-
- D, U, spec = F
- Ut = F.Vt
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(D) isa NDTensors.DiagBlockSparse
-
- u = commonind(D, U)
- up = uniqueind(D, U)
-
- @test hastags(u, "x")
- @test plev(u) == 0
- @test hastags(up, "x")
- @test plev(up) == 1
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(D, (u, up))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(D)
- @test flux(D, b) == QN(0)
- end
-
- Ap = dag(U) * D * U'
-
- @test norm(Ap - A) ≤ 1e-2
- @test norm(dag(U) * D * Ut - A) ≤ 1e-2
- @test minimum(dims(D)) == length(spec.eigs)
- @test minimum(dims(D)) < dim(i) * dim(j)
-
- @test spec.truncerr ≤ cutoff
- err = sqrt(1 - (Ap * dag(Ap))[] / (A * dag(A))[])
- @test err ≤ cutoff
- @test err ≈ spec.truncerr rtol = 4e-1
- end
+ C = combiner(dag(s1)', dag(s2)')
+ c = combinedind(C)
- @testset "eigen non-hermitian" begin
- i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i")
- j = settags(i, "j")
+ AC = A * C
- A = random_itensor(QN(), i, j, dag(i'), dag(j'))
+ @test norm(AC) ≈ norm(A)
+ @test hasinds(AC, s1, s2, c)
+ @test nnz(AC) == nnz(A)
+ for b in nzblocks(AC)
+ @test flux(AC, b) == QN()
+ end
- F = eigen(A; tags="x")
+ @test nnzblocks(AC) < nnz(A)
- D, U = F
- Ut = F.Vt
+ B = ITensor(QN(), s1, s2, c)
+ @test nnz(B) == nnz(AC)
+ @test nnzblocks(B) == nnzblocks(AC)
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(D) isa NDTensors.DiagBlockSparse
+ Ap = AC * dag(C)
- u = commonind(D, U)
- up = uniqueind(D, U)
+ @test hassameinds(A, Ap)
+ @test norm(A - Ap) == 0
+ @test nnz(A) == nnz(Ap)
+ @test nnzblocks(A) == nnzblocks(Ap)
+ end
- @test hastags(u, "x")
- @test plev(u) == 0
- @test hastags(up, "x")
- @test plev(up) == 1
+ @testset "Order 4, Combine 2, Example 2" begin
+ s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1")
+ s2 = replacetags(s1, "n=1", "n=2")
- @test A ≉ U' * D * dag(U) atol = 1e-12
- @test A ≉ Ut * D * dag(U) atol = 1e-12
- @test A * U ≈ U' * D atol = 1e-12
- @test A * U ≈ Ut * D atol = 1e-12
- end
+ A = random_itensor(QN(), dag(s2)', s2, dag(s1)', s1)
- @testset "eigen non-hermitian (general inds)" begin
- i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i")
- j = settags(i, "j")
- ĩ, j̃ = sim(i), sim(j)
+ C = combiner(dag(s2)', dag(s1)')
+ c = combinedind(C)
- A = random_itensor(QN(), i, j, dag(ĩ), dag(j̃))
+ AC = A * C
- F = eigen(A, (i, j), (ĩ, j̃); lefttags="x", righttags="y")
+ @test norm(AC) ≈ norm(A)
+ @test hasinds(AC, s1, s2, c)
+ @test nnz(AC) == nnz(A)
+ for b in nzblocks(AC)
+ @test flux(AC, b) == QN()
+ end
- D, U = F
- Ut = F.Vt
+ B = ITensor(QN(), s1, s2, c)
+ @test nnzblocks(B) == nnzblocks(AC)
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(D) isa NDTensors.DiagBlockSparse
+ Ap = AC * dag(C)
- l = uniqueind(D, U)
- r = commonind(D, U)
+ @test hassameinds(A, Ap)
+ @test norm(A - Ap) == 0
+ @test nnz(A) == nnz(Ap)
+ @test nnzblocks(A) == nnzblocks(Ap)
+ end
- @test F.l == l
- @test F.r == r
+ @testset "Order 4, Combine 2, Example 3" begin
+ s1 = Index([QN(("Nf", 0)) => 1, QN(("Nf", 1)) => 1], "site,n=1")
+ s2 = replacetags(s1, "n=1", "n=2")
- @test hastags(l, "x")
- @test plev(l) == 0
- @test hastags(r, "y")
- @test plev(r) == 0
+ A = random_itensor(QN(), dag(s1)', s2, dag(s2)', s1)
- @test hassameinds(U, (ĩ, j̃, r))
- @test hassameinds(Ut, (i, j, l))
+ C = combiner(dag(s2)', dag(s1)')
+ c = combinedind(C)
- @test A * U ≈ Ut * D atol = 1e-12
- @test A ≉ Ut * D * dag(U) atol = 1e-12
- end
+ AC = A * C
- @testset "eigen mixed arrows" begin
- i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
- i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
- A = random_itensor(i1, i2, dag(i1)', dag(i2)')
- F = eigen(A, (i1, i1'), (i2', i2))
- D, U = F
- Ut = F.Vt
- @test A * U ≈ Ut * D atol = 1e-12
- end
- end
-
- @testset "svd" for ElT in (Float64, ComplexF64)
- @testset "svd example 1" begin
- i = Index(QN(0) => 2, QN(1) => 2; tags="i")
- j = Index(QN(0) => 2, QN(1) => 2; tags="j")
- A = random_itensor(ElT, QN(0), i, dag(j))
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- U, S, V = svd(A, i)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
- @test U * S * V ≈ A atol = 1e-14
- end
+ @test norm(AC) ≈ norm(A)
+ @test hasinds(AC, s1, s2, c)
+ @test nnz(AC) == nnz(A)
+ for b in nzblocks(AC)
+ @test flux(AC, b) == QN()
+ end
- @testset "svd example 2" begin
- i = Index(QN(0) => 5, QN(1) => 6; tags="i")
- j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags="j")
- A = random_itensor(ElT, QN(0), i, j)
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- U, S, V = svd(A, i)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
- @test U * S * V ≈ A atol = 1e-14
- end
+ B = ITensor(QN(), s1, s2, c)
+ @test nnzblocks(B) == nnzblocks(AC)
- @testset "svd example 3" begin
- i = Index(QN(0) => 5, QN(1) => 6; tags="i")
- j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags="j")
- A = random_itensor(ElT, QN(0), i, dag(j))
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- U, S, V = svd(A, i)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
- @test U * S * V ≈ A atol = 1e-12
- end
+ Ap = AC * dag(C)
- @testset "svd example 4" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i")
- j = settags(i, "j")
-
- A = random_itensor(ElT, QN(0, 2), i, j, dag(i'), dag(j'))
-
- U, S, V = svd(A, i, j)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(0, 2)
- end
- U, S, V = svd(A, i)
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0, 2)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
- end
- @test U * S * V ≈ A atol = 1e-14
+ @test hassameinds(A, Ap)
+ @test norm(A - Ap) == 0
+ @test nnz(A) == nnz(Ap)
+ @test nnzblocks(A) == nnzblocks(Ap)
+ end
end
- @testset "svd example 5" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i")
- j = settags(i, "j")
-
- A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
-
- U, S, V = svd(A, i, j)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(1, 2)
- end
- U, S, V = svd(A, i)
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1, 2)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
- end
- @test U * S * V ≈ A atol = 1e-14
+ @testset "Check that combiner commutes" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags = "i")
+ j = settags(i, "j")
+ A = random_itensor(QN(0, 2), i, j, dag(i'), dag(j'))
+ C = combiner(i, j)
+ @test norm(A * dag(C') * C - A * C * dag(C')) ≈ 0.0
end
- @testset "svd example 6" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags="i")
- j = settags(i, "j")
-
- A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
-
- U, S, V = svd(A, i, i')
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(1, 2)
- end
- U, S, V = svd(A, i)
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1, 2)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
- end
- @test U * S * V ≈ A atol = 1e-14
+ @testset "Combiner for block deficient ITensor" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags = "i")
+ j = settags(i, "j")
+ A = ITensor(i, j, dag(i'))
+ A[1, 1, 1] = 1.0
+ C = combiner(i, j; tags = "c")
+ AC = A * C
+ Ap = AC * dag(C)
+ @test norm(A - Ap) ≈ 0.0
+ @test norm(Ap - A) ≈ 0.0
end
- @testset "svd truncation example 1" begin
- i = Index(QN(0) => 2, QN(1) => 3; tags="i")
- j = settags(i, "j")
- A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j'))
- for i in 1:4
- A = mapprime(A * A', 2, 1)
- end
- A = A / norm(A)
-
- cutoff = 1e-5
- U, S, V, spec = svd(A, i, j; utags="x", vtags="y", cutoff=cutoff)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- u = commonind(S, U)
- v = commonind(S, V)
-
- @test hastags(u, "x")
- @test hastags(v, "y")
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(V, (i', j', v))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
-
- Ap = U * S * V
-
- @test norm(Ap - A) ≤ 1e-2
- @test minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
-
- @test spec.truncerr ≤ cutoff
- err = real(1 - (Ap * dag(Ap))[] / (A * dag(A))[])
- @test err ≤ cutoff
- @test isapprox(err, spec.truncerr; rtol=1e-6)
+ @testset "Combine Complex ITensor" begin
+ s1 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags = "S=1/2,Site,n=1")
+ s2 = Index(QN(("Sz", 1)) => 1, QN(("Sz", -1)) => 1; tags = "S=1/2,Site,n=2")
+
+ T = random_itensor(ComplexF64, QN("Sz", 0), s1, s2)
+
+ C = combiner(s1, s2)
+ CT = C * T
+ @test norm(CT) ≈ norm(T)
+ TT = dag(C) * CT
+ @test TT ≈ T
end
- @testset "svd truncation example 2" begin
- i = Index(QN(0) => 3, QN(1) => 2; tags="i")
- j = settags(i, "j")
- A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j'))
-
- maxdim = 4
- U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- u = commonind(S, U)
- v = commonind(S, V)
-
- @test hastags(u, "x")
- @test hastags(v, "y")
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(V, (i', j', v))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(0)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(0)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
-
- @test minimum(dims(S)) == maxdim
- @test minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
-
- Ap = U * S * V
- err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
- @test isapprox(err, spec.truncerr; rtol=1e-6)
+ @testset "Combiner bug #395" begin
+ i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
+ i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
+ A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)')
+ CL = combiner(i1, i2)
+ CR = combiner(dag(i1)', dag(i2)')
+ AC = A * CR * CL
+ @test AC * dag(CR) * dag(CL) ≈ A
end
- @testset "svd truncation example 3" begin
- i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags="i")
- j = settags(i, "j")
- A = random_itensor(ElT, QN(1), i, j, dag(i'), dag(j'))
-
- maxdim = 4
- U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- u = commonind(S, U)
- v = commonind(S, V)
-
- @test hastags(u, "x")
- @test hastags(v, "y")
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(V, (i', j', v))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(1)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0)
- end
-
- @test minimum(dims(S)) == maxdim
- @test minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
-
- Ap = U * S * V
- err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
- @test isapprox(err, spec.truncerr; rtol=1e-6)
+ @testset "Contract to scalar" begin
+ i = Index([QN(0) => 1, QN(1) => 1], "i")
+ A = random_itensor(QN(0), i, dag(i'))
+
+ c = A * dag(A)
+
+ @test nnz(c) == 1
+ @test nnzblocks(c) == 1
+ @test c[] isa Float64
+ @test c[] ≈ norm(A)^2
end
- @testset "svd truncation example 4" begin
- i = Index(QN(0, 2) => 3, QN(1, 2) => 4; tags="i")
- j = settags(i, "j")
- A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
-
- maxdim = 4
- U, S, V, spec = svd(A, i, j; utags="x", vtags="y", maxdim=maxdim)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- u = commonind(S, U)
- v = commonind(S, V)
-
- @test hastags(u, "x")
- @test hastags(v, "y")
-
- @test hassameinds(U, (i, j, u))
- @test hassameinds(V, (i', j', v))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(1, 2)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1, 2)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
- end
-
- @test minimum(dims(S)) == maxdim
- @test minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
-
- Ap = U * S * V
- err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
- @test isapprox(err, spec.truncerr; rtol=1e-6)
+ @testset "eigen" begin
+ @testset "eigen hermitian" begin
+ i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "i")
+ j = settags(i, "j")
+ k = settags(i, "k")
+ l = settags(i, "l")
+
+ A = random_itensor(QN(), i, j, dag(k), dag(l))
+ A = A * prime(dag(A), (i, j))
+
+ F = eigen(A; ishermitian = true, tags = "x")
+
+ D, U = F
+ Ut = F.Vt
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(D) isa NDTensors.DiagBlockSparse
+
+ u = commonind(D, U)
+ up = uniqueind(D, U)
+
+ @test hastags(u, "x")
+ @test plev(u) == 0
+ @test hastags(up, "x")
+ @test plev(up) == 1
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(D, (u, up))
+
+ @test A ≈ dag(U) * D * U' atol = 1.0e-11
+ @test A ≈ dag(U) * D * Ut atol = 1.0e-11
+ @test A * U ≈ U' * D atol = 1.0e-11
+ @test A * U ≈ Ut * D atol = 1.0e-11
+ end
+
+ @testset "eigen hermitian (truncate)" begin
+ i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "i")
+ j = settags(i, "j")
+ k = settags(i, "k")
+ l = settags(i, "l")
+
+ A = random_itensor(QN(), i, j, dag(k), dag(l))
+ A = A * prime(dag(A), (i, j))
+ for i in 1:4
+ A = mapprime(A * A', 2, 1)
+ end
+ A = A / norm(A)
+
+ cutoff = 1.0e-5
+ F = eigen(A; ishermitian = true, tags = "x", cutoff = cutoff)
+
+ D, U, spec = F
+ Ut = F.Vt
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(D) isa NDTensors.DiagBlockSparse
+
+ u = commonind(D, U)
+ up = uniqueind(D, U)
+
+ @test hastags(u, "x")
+ @test plev(u) == 0
+ @test hastags(up, "x")
+ @test plev(up) == 1
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(D, (u, up))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(D)
+ @test flux(D, b) == QN(0)
+ end
+
+ Ap = dag(U) * D * U'
+
+ @test norm(Ap - A) ≤ 1.0e-2
+ @test norm(dag(U) * D * Ut - A) ≤ 1.0e-2
+ @test minimum(dims(D)) == length(spec.eigs)
+ @test minimum(dims(D)) < dim(i) * dim(j)
+
+ @test spec.truncerr ≤ cutoff
+ err = sqrt(1 - (Ap * dag(Ap))[] / (A * dag(A))[])
+ @test err ≤ cutoff
+ @test err ≈ spec.truncerr rtol = 4.0e-1
+ end
+
+ @testset "eigen non-hermitian" begin
+ i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "i")
+ j = settags(i, "j")
+
+ A = random_itensor(QN(), i, j, dag(i'), dag(j'))
+
+ F = eigen(A; tags = "x")
+
+ D, U = F
+ Ut = F.Vt
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(D) isa NDTensors.DiagBlockSparse
+
+ u = commonind(D, U)
+ up = uniqueind(D, U)
+
+ @test hastags(u, "x")
+ @test plev(u) == 0
+ @test hastags(up, "x")
+ @test plev(up) == 1
+
+ @test A ≉ U' * D * dag(U) atol = 1.0e-12
+ @test A ≉ Ut * D * dag(U) atol = 1.0e-12
+ @test A * U ≈ U' * D atol = 1.0e-12
+ @test A * U ≈ Ut * D atol = 1.0e-12
+ end
+
+ @testset "eigen non-hermitian (general inds)" begin
+ i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "i")
+ j = settags(i, "j")
+ ĩ, j̃ = sim(i), sim(j)
+
+ A = random_itensor(QN(), i, j, dag(ĩ), dag(j̃))
+
+ F = eigen(A, (i, j), (ĩ, j̃); lefttags = "x", righttags = "y")
+
+ D, U = F
+ Ut = F.Vt
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(D) isa NDTensors.DiagBlockSparse
+
+ l = uniqueind(D, U)
+ r = commonind(D, U)
+
+ @test F.l == l
+ @test F.r == r
+
+ @test hastags(l, "x")
+ @test plev(l) == 0
+ @test hastags(r, "y")
+ @test plev(r) == 0
+
+ @test hassameinds(U, (ĩ, j̃, r))
+ @test hassameinds(Ut, (i, j, l))
+
+ @test A * U ≈ Ut * D atol = 1.0e-12
+ @test A ≉ Ut * D * dag(U) atol = 1.0e-12
+ end
+
+ @testset "eigen mixed arrows" begin
+ i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
+ i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
+ A = random_itensor(i1, i2, dag(i1)', dag(i2)')
+ F = eigen(A, (i1, i1'), (i2', i2))
+ D, U = F
+ Ut = F.Vt
+ @test A * U ≈ Ut * D atol = 1.0e-12
+ end
end
- # This test happened to have different behavior because of an
- # accidental degeneracy in the singular values with a change
- # in the random number generator intoduced in Julia 1.7
- if (ElT == Float64) && (VERSION ≥ v"1.7.0-0")
- @testset "svd truncation example 5 (accidental degeneracy)" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags="i")
- j = settags(i, "j")
- copy!(
- Random.default_rng(),
- Xoshiro(
- 0x4ea8944fb1006ec4, 0xec60c93e7daf5295, 0x7c967091b08e72b3, 0x13bc39357cddea97
- ),
- )
- A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+ @testset "svd" for ElT in (Float64, ComplexF64)
+ @testset "svd example 1" begin
+ i = Index(QN(0) => 2, QN(1) => 2; tags = "i")
+ j = Index(QN(0) => 2, QN(1) => 2; tags = "j")
+ A = random_itensor(ElT, QN(0), i, dag(j))
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ U, S, V = svd(A, i)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+ @test U * S * V ≈ A atol = 1.0e-14
+ end
- maxdim = 4
- U, S, V, spec = svd(A, i, j'; utags="x", vtags="y", maxdim=maxdim)
+ @testset "svd example 2" begin
+ i = Index(QN(0) => 5, QN(1) => 6; tags = "i")
+ j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags = "j")
+ A = random_itensor(ElT, QN(0), i, j)
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ U, S, V = svd(A, i)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+ @test U * S * V ≈ A atol = 1.0e-14
+ end
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
+ @testset "svd example 3" begin
+ i = Index(QN(0) => 5, QN(1) => 6; tags = "i")
+ j = Index(QN(-1) => 2, QN(0) => 3, QN(1) => 4; tags = "j")
+ A = random_itensor(ElT, QN(0), i, dag(j))
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ U, S, V = svd(A, i)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+ @test U * S * V ≈ A atol = 1.0e-12
+ end
- u = commonind(S, U)
- v = commonind(S, V)
+ @testset "svd example 4" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags = "i")
+ j = settags(i, "j")
+
+ A = random_itensor(ElT, QN(0, 2), i, j, dag(i'), dag(j'))
+
+ U, S, V = svd(A, i, j)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0, 2)
+ end
+ U, S, V = svd(A, i)
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+ @test U * S * V ≈ A atol = 1.0e-14
+ end
- @test hastags(u, "x")
- @test hastags(v, "y")
+ @testset "svd example 5" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags = "i")
+ j = settags(i, "j")
+
+ A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+
+ U, S, V = svd(A, i, j)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1, 2)
+ end
+ U, S, V = svd(A, i)
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+ @test U * S * V ≈ A atol = 1.0e-14
+ end
- @test hassameinds(U, (i, j', u))
- @test hassameinds(V, (i', j, v))
+ @testset "svd example 6" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 2; tags = "i")
+ j = settags(i, "j")
+
+ A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+
+ U, S, V = svd(A, i, i')
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1, 2)
+ end
+ U, S, V = svd(A, i)
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+ @test U * S * V ≈ A atol = 1.0e-14
+ end
- for b in nzblocks(A)
- @test flux(A, b) == QN(1, 2)
+ @testset "svd truncation example 1" begin
+ i = Index(QN(0) => 2, QN(1) => 3; tags = "i")
+ j = settags(i, "j")
+ A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j'))
+ for i in 1:4
+ A = mapprime(A * A', 2, 1)
+ end
+ A = A / norm(A)
+
+ cutoff = 1.0e-5
+ U, S, V, spec = svd(A, i, j; utags = "x", vtags = "y", cutoff = cutoff)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(V, (i', j', v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+
+ Ap = U * S * V
+
+ @test norm(Ap - A) ≤ 1.0e-2
+ @test minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ @test spec.truncerr ≤ cutoff
+ err = real(1 - (Ap * dag(Ap))[] / (A * dag(A))[])
+ @test err ≤ cutoff
+ @test isapprox(err, spec.truncerr; rtol = 1.0e-6)
end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
+
+ @testset "svd truncation example 2" begin
+ i = Index(QN(0) => 3, QN(1) => 2; tags = "i")
+ j = settags(i, "j")
+ A = random_itensor(ElT, QN(0), i, j, dag(i'), dag(j'))
+
+ maxdim = 4
+ U, S, V, spec = svd(A, i, j; utags = "x", vtags = "y", maxdim = maxdim)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(V, (i', j', v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(0)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(0)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+
+ @test minimum(dims(S)) == maxdim
+ @test minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ Ap = U * S * V
+ err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
+ @test isapprox(err, spec.truncerr; rtol = 1.0e-6)
end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1, 2)
+
+ @testset "svd truncation example 3" begin
+ i = Index(QN(0) => 2, QN(1) => 3, QN(2) => 4; tags = "i")
+ j = settags(i, "j")
+ A = random_itensor(ElT, QN(1), i, j, dag(i'), dag(j'))
+
+ maxdim = 4
+ U, S, V, spec = svd(A, i, j; utags = "x", vtags = "y", maxdim = maxdim)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(V, (i', j', v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0)
+ end
+
+ @test minimum(dims(S)) == maxdim
+ @test minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ Ap = U * S * V
+ err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
+ @test isapprox(err, spec.truncerr; rtol = 1.0e-6)
end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
+
+ @testset "svd truncation example 4" begin
+ i = Index(QN(0, 2) => 3, QN(1, 2) => 4; tags = "i")
+ j = settags(i, "j")
+ A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+
+ maxdim = 4
+ U, S, V, spec = svd(A, i, j; utags = "x", vtags = "y", maxdim = maxdim)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j, u))
+ @test hassameinds(V, (i', j', v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1, 2)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+
+ @test minimum(dims(S)) == maxdim
+ @test minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ Ap = U * S * V
+ err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
+ @test isapprox(err, spec.truncerr; rtol = 1.0e-6)
end
- @test minimum(dims(S)) == maxdim - 1
- @test_broken minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
+ # This test happened to have different behavior because of an
+ # accidental degeneracy in the singular values with a change
+ # in the random number generator intoduced in Julia 1.7
+ if (ElT == Float64) && (VERSION ≥ v"1.7.0-0")
+ @testset "svd truncation example 5 (accidental degeneracy)" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags = "i")
+ j = settags(i, "j")
+ copy!(
+ Random.default_rng(),
+ Xoshiro(
+ 0x4ea8944fb1006ec4, 0xec60c93e7daf5295, 0x7c967091b08e72b3, 0x13bc39357cddea97
+ ),
+ )
+ A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+
+ maxdim = 4
+ U, S, V, spec = svd(A, i, j'; utags = "x", vtags = "y", maxdim = maxdim)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j', u))
+ @test hassameinds(V, (i', j, v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1, 2)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+
+ @test minimum(dims(S)) == maxdim - 1
+ @test_broken minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ _, Sfull, _ = svd(A, i, j'; utags = "x", vtags = "y")
+ s = sort(diag(array(Sfull)); rev = true)
+ @test (s[4] - s[5]) / norm(s) < 1.0e-4
+
+ Ap = U * S * V
+ err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
+ @test_broken isapprox(err, spec.truncerr; rtol = 1.0e-6)
+ end
+ end
- _, Sfull, _ = svd(A, i, j'; utags="x", vtags="y")
- s = sort(diag(array(Sfull)); rev=true)
- @test (s[4] - s[5]) / norm(s) < 1e-4
+ @testset "svd truncation example 5" begin
+ i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags = "i")
+ j = settags(i, "j")
+ Random.seed!(123)
+ A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
+
+ maxdim = 4
+ U, S, V, spec = svd(A, i, j'; utags = "x", vtags = "y", maxdim = maxdim)
+
+ @test storage(U) isa NDTensors.BlockSparse
+ @test storage(S) isa NDTensors.DiagBlockSparse
+ @test storage(V) isa NDTensors.BlockSparse
+
+ u = commonind(S, U)
+ v = commonind(S, V)
+
+ @test hastags(u, "x")
+ @test hastags(v, "y")
+
+ @test hassameinds(U, (i, j', u))
+ @test hassameinds(V, (i', j, v))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN(1, 2)
+ end
+ for b in nzblocks(U)
+ @test flux(U, b) == QN(0, 2)
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN(1, 2)
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN(0, 2)
+ end
+
+ @test minimum(dims(S)) == maxdim
+ @test minimum(dims(S)) == length(spec.eigs)
+ @test minimum(dims(S)) < dim(i) * dim(j)
+
+ Ap = U * S * V
+ err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
+ @test isapprox(err, spec.truncerr; rtol = 1.0e-6)
+ end
- Ap = U * S * V
- err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
- @test_broken isapprox(err, spec.truncerr; rtol=1e-6)
- end
- end
+ @testset "issue #231" begin
+ l = Index(
+ QN("Nf", -1, -1) => 2, QN("Nf", 0, -1) => 4, QN("Nf", +1, -1) => 2; tags = "CMB,Link"
+ )
+ s = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags = "Fermion,Site,n=4")
+ r = Index(
+ QN("Nf", 1, -1) => 2, QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 2; tags = "Link,u"
+ )
+
+ A = ITensor(ElT, l, s, dag(r))
+
+ insertblock!(A, Block(2, 1, 2))
+ insertblock!(A, Block(1, 2, 2))
+ insertblock!(A, Block(2, 2, 3))
+
+ for b in nzblocks(A)
+ @test flux(A, b) == QN()
+ end
+
+ U, S, V = svd(A, l, s)
+
+ for b in nzblocks(U)
+ @test flux(U, b) == QN()
+ end
+ for b in nzblocks(S)
+ @test flux(S, b) == QN()
+ end
+ for b in nzblocks(V)
+ @test flux(V, b) == QN()
+ end
+ @test U * S * V ≈ A atol = 1.0e-13
+ end
- @testset "svd truncation example 5" begin
- i = Index(QN(0, 2) => 2, QN(1, 2) => 3; tags="i")
- j = settags(i, "j")
- Random.seed!(123)
- A = random_itensor(ElT, QN(1, 2), i, j, dag(i'), dag(j'))
-
- maxdim = 4
- U, S, V, spec = svd(A, i, j'; utags="x", vtags="y", maxdim=maxdim)
-
- @test storage(U) isa NDTensors.BlockSparse
- @test storage(S) isa NDTensors.DiagBlockSparse
- @test storage(V) isa NDTensors.BlockSparse
-
- u = commonind(S, U)
- v = commonind(S, V)
-
- @test hastags(u, "x")
- @test hastags(v, "y")
-
- @test hassameinds(U, (i, j', u))
- @test hassameinds(V, (i', j, v))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN(1, 2)
- end
- for b in nzblocks(U)
- @test flux(U, b) == QN(0, 2)
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN(1, 2)
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN(0, 2)
- end
-
- @test minimum(dims(S)) == maxdim
- @test minimum(dims(S)) == length(spec.eigs)
- @test minimum(dims(S)) < dim(i) * dim(j)
-
- Ap = U * S * V
- err = 1 - (Ap * dag(Ap))[] / (A * dag(A))[]
- @test isapprox(err, spec.truncerr; rtol=1e-6)
- end
+ @testset "SVD no truncate bug" begin
+ s = Index(
+ QN("Sz", -4) => 1,
+ QN("Sz", -2) => 4,
+ QN("Sz", 0) => 6,
+ QN("Sz", 2) => 4,
+ QN("Sz", 4) => 1,
+ )
+ A = ITensor(ElT, s, s')
+ insertblock!(A, Block(5, 2))
+ insertblock!(A, Block(4, 3))
+ insertblock!(A, Block(3, 4))
+ insertblock!(A, Block(2, 5))
+ randn!(A)
+ U, S, V = svd(A, s)
+ @test U * S * V ≈ A
+ end
- @testset "issue #231" begin
- l = Index(
- QN("Nf", -1, -1) => 2, QN("Nf", 0, -1) => 4, QN("Nf", +1, -1) => 2; tags="CMB,Link"
- )
- s = Index(QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 1; tags="Fermion,Site,n=4")
- r = Index(
- QN("Nf", 1, -1) => 2, QN("Nf", 0, -1) => 1, QN("Nf", 1, -1) => 2; tags="Link,u"
- )
-
- A = ITensor(ElT, l, s, dag(r))
-
- insertblock!(A, Block(2, 1, 2))
- insertblock!(A, Block(1, 2, 2))
- insertblock!(A, Block(2, 2, 3))
-
- for b in nzblocks(A)
- @test flux(A, b) == QN()
- end
-
- U, S, V = svd(A, l, s)
-
- for b in nzblocks(U)
- @test flux(U, b) == QN()
- end
- for b in nzblocks(S)
- @test flux(S, b) == QN()
- end
- for b in nzblocks(V)
- @test flux(V, b) == QN()
- end
- @test U * S * V ≈ A atol = 1e-13
- end
+ @testset "SVD no truncate" begin
+ s = Index(
+ QN("Sz", -4) => 1,
+ QN("Sz", -2) => 4,
+ QN("Sz", 0) => 6,
+ QN("Sz", 2) => 4,
+ QN("Sz", 4) => 1,
+ )
+ A = ITensor(ElT, s, s')
+ insertblock!(A, Block(5, 1))
+ insertblock!(A, Block(4, 2))
+ insertblock!(A, Block(3, 3))
+ insertblock!(A, Block(2, 4))
+ insertblock!(A, Block(1, 5))
+ U, S, V = svd(A, s)
+ @test dims(S) == dims(A)
+ @test U * S * V ≈ A
+ end
- @testset "SVD no truncate bug" begin
- s = Index(
- QN("Sz", -4) => 1,
- QN("Sz", -2) => 4,
- QN("Sz", 0) => 6,
- QN("Sz", 2) => 4,
- QN("Sz", 4) => 1,
- )
- A = ITensor(ElT, s, s')
- insertblock!(A, Block(5, 2))
- insertblock!(A, Block(4, 3))
- insertblock!(A, Block(3, 4))
- insertblock!(A, Block(2, 5))
- randn!(A)
- U, S, V = svd(A, s)
- @test U * S * V ≈ A
+ @testset "SVD truncate zeros" begin
+ s = Index(
+ QN("Sz", -4) => 1,
+ QN("Sz", -2) => 4,
+ QN("Sz", 0) => 6,
+ QN("Sz", 2) => 4,
+ QN("Sz", 4) => 1,
+ )
+ A = ITensor(ElT, s, s')
+ insertblock!(A, Block(5, 1))
+ insertblock!(A, Block(4, 2))
+ insertblock!(A, Block(3, 3))
+ insertblock!(A, Block(2, 4))
+ insertblock!(A, Block(1, 5))
+ U, S, V = svd(A, s; cutoff = 0)
+ @test dims(S) == (0, 0)
+ @test U * S * V ≈ A
+ end
end
- @testset "SVD no truncate" begin
- s = Index(
- QN("Sz", -4) => 1,
- QN("Sz", -2) => 4,
- QN("Sz", 0) => 6,
- QN("Sz", 2) => 4,
- QN("Sz", 4) => 1,
- )
- A = ITensor(ElT, s, s')
- insertblock!(A, Block(5, 1))
- insertblock!(A, Block(4, 2))
- insertblock!(A, Block(3, 3))
- insertblock!(A, Block(2, 4))
- insertblock!(A, Block(1, 5))
- U, S, V = svd(A, s)
- @test dims(S) == dims(A)
- @test U * S * V ≈ A
- end
+ @testset "Replace Index" begin
+ i = Index([QN(0) => 1, QN(1) => 2], "i")
+ j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
- @testset "SVD truncate zeros" begin
- s = Index(
- QN("Sz", -4) => 1,
- QN("Sz", -2) => 4,
- QN("Sz", 0) => 6,
- QN("Sz", 2) => 4,
- QN("Sz", 4) => 1,
- )
- A = ITensor(ElT, s, s')
- insertblock!(A, Block(5, 1))
- insertblock!(A, Block(4, 2))
- insertblock!(A, Block(3, 3))
- insertblock!(A, Block(2, 4))
- insertblock!(A, Block(1, 5))
- U, S, V = svd(A, s; cutoff=0)
- @test dims(S) == (0, 0)
- @test U * S * V ≈ A
- end
- end
-
- @testset "Replace Index" begin
- i = Index([QN(0) => 1, QN(1) => 2], "i")
- j = Index([QN(0) => 3, QN(1) => 4, QN(2) => 5], "j")
-
- T1 = random_itensor(QN(1), i, j)
- T2 = copy(T1)
-
- k = Index([QN(0) => 1, QN(1) => 2], "k")
-
- replaceind!(T1, i, k)
- @test hasind(T1, k)
- @test dir(inds(T1)[1]) == dir(i)
-
- # Check that replaceind! keeps
- # original Arrow direction
- replaceind!(T2, i, dag(k))
- @test hasind(T2, k)
- @test dir(inds(T2)[1]) == dir(i)
- @test dir(inds(T2)[1]) != dir(dag(k))
- end
-
- @testset "BlockSparse dag copy behavior" begin
- i = Index(QN(0) => 2, QN(1) => 2; tags="i")
- j = Index(QN(0) => 2, QN(1) => 2; tags="j")
-
- v1 = random_itensor(QN(1), i, j)
- orig_elt = v1[1, 3]
- cv1 = dag(v1; allow_alias=true)
- cv1[1, 3] = 123.45
- @test v1[1, 3] ≈ cv1[1, 3]
-
- v1 = random_itensor(QN(1), i, j)
- orig_elt = v1[1, 3]
- cv1 = dag(ITensors.AllowAlias(), v1)
- cv1[1, 3] = 123.45
- @test v1[1, 3] ≈ cv1[1, 3]
-
- v2 = random_itensor(QN(1), i, j)
- orig_elt = v2[1, 3]
- cv2 = dag(v2; allow_alias=false)
- cv2[1, 3] = 123.45
- @test v2[1, 3] ≈ orig_elt
-
- v2 = random_itensor(QN(1), i, j)
- orig_elt = v2[1, 3]
- cv2 = dag(ITensors.NeverAlias(), v2)
- cv2[1, 3] = 123.45
- @test v2[1, 3] ≈ orig_elt
-
- v3 = random_itensor(ComplexF64, QN(1), i, j)
- orig_elt = v3[1, 3]
- cv3 = dag(v3; allow_alias=true)
- cv3[1, 3] = 123.45
- @test v3[1, 3] ≈ orig_elt
-
- v3 = random_itensor(ComplexF64, QN(1), i, j)
- orig_elt = v3[1, 3]
- cv3 = dag(ITensors.AllowAlias(), v3)
- cv3[1, 3] = 123.45
- @test v3[1, 3] ≈ orig_elt
-
- v4 = random_itensor(ComplexF64, QN(1), i, j)
- orig_elt = v4[1, 3]
- cv4 = dag(v4; allow_alias=false)
- cv4[1, 3] = 123.45
- @test v4[1, 3] ≈ orig_elt
-
- v4 = random_itensor(ComplexF64, QN(1), i, j)
- orig_elt = v4[1, 3]
- cv4 = dag(ITensors.NeverAlias(), v4)
- cv4[1, 3] = 123.45
- @test v4[1, 3] ≈ orig_elt
- end
-
- @testset "exponentiate" begin
- @testset "Simple arrows" begin
- i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
- i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
- A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)')
- Aexp = exp(A)
- Amat = Array(A, i1, i2, i1', i2')
- Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3)
- @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1e-14
- @test flux(Aexp) == QN()
- @test length(setdiff(inds(Aexp), inds(A))) == 0
-
- @test exp(A, (i1, i2), (i1', i2')) ≈ Aexp rtol = 5e-14
-
- # test the case where indices are permuted
- A = random_itensor(QN(), i1, dag(i1)', dag(i2)', i2)
- Aexp = exp(A, (i1, i2), (i1', i2'))
- Amat = Array(A, i1, i2, i1', i2')
- Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3)
- @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1e-14
-
- # test exponentiation in the Hermitian case
- i1 = Index([QN(0) => 2, QN(1) => 2, QN(2) => 3], "i1")
- A = random_itensor(QN(), i1, dag(i1)')
- Ad = dag(swapinds(A, IndexSet(i1), IndexSet(dag(i1)')))
- Ah = A + Ad + 1e-10 * random_itensor(QN(), i1, dag(i1)')
- Amat = Array(Ah, i1', i1)
- Aexp = exp(Ah; ishermitian=true)
- Amatexp = exp(LinearAlgebra.Hermitian(Amat))
- @test Array(Aexp, i1, i1') ≈ Amatexp rtol = 5e-14
- end
+ T1 = random_itensor(QN(1), i, j)
+ T2 = copy(T1)
- @testset "Regression test for exp of QN ITensor with missing diagonal blocks" begin
- i = Index([QN(0) => 2, QN(1) => 3])
- A = ITensor(i', dag(i))
- A[1, 1] = 1.2
- expA = exp(A; ishermitian=false)
- for n in 1:mindim(A)
- @test expA[n, n] == exp(A[n, n])
- end
- @test expA ≈ exp(dense(A))
- expA = exp(A; ishermitian=true)
- for n in 1:mindim(A)
- @test expA[n, n] == exp(A[n, n])
- end
- @test expA ≈ exp(dense(A))
- end
+ k = Index([QN(0) => 1, QN(1) => 2], "k")
- @testset "diag" for ElType in (Float64, ComplexF64)
- χ = [QN(0) => 1, QN(1) => 2]
- i, j = Index.((χ,), ("i", "j"))
- A = random_itensor(ElType, i, j)
- d = diag(A)
- @test d isa DenseTensor{ElType,1}
- for n in 1:dim(χ)
- @test d[n] == A[n, n]
- end
- end
+ replaceind!(T1, i, k)
+ @test hasind(T1, k)
+ @test dir(inds(T1)[1]) == dir(i)
- @testset "diag" for ElType in (Float64, ComplexF64)
- χ = [QN(0) => 1, QN(1) => 2]
- i, j = Index.((χ,), ("i", "j"))
- A = random_itensor(ElType, i, j)
- _, S, _ = svd(A, i)
- d = diag(S)
- @test d isa DenseTensor{real(ElType),1}
- for n in 1:diaglength(S)
- @test d[n] == S[n, n]
- end
+ # Check that replaceind! keeps
+ # original Arrow direction
+ replaceind!(T2, i, dag(k))
+ @test hasind(T2, k)
+ @test dir(inds(T2)[1]) == dir(i)
+ @test dir(inds(T2)[1]) != dir(dag(k))
end
- @testset "Mixed arrows" begin
- i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
- i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
- A = random_itensor(i1, i2, dag(i1)', dag(i2)')
- expA = exp(A, (i1, i1'), (i2', i2))
- @test exp(dense(A), (i1, i1'), (i2', i2)) ≈ dense(expA)
+ @testset "BlockSparse dag copy behavior" begin
+ i = Index(QN(0) => 2, QN(1) => 2; tags = "i")
+ j = Index(QN(0) => 2, QN(1) => 2; tags = "j")
+
+ v1 = random_itensor(QN(1), i, j)
+ orig_elt = v1[1, 3]
+ cv1 = dag(v1; allow_alias = true)
+ cv1[1, 3] = 123.45
+ @test v1[1, 3] ≈ cv1[1, 3]
+
+ v1 = random_itensor(QN(1), i, j)
+ orig_elt = v1[1, 3]
+ cv1 = dag(ITensors.AllowAlias(), v1)
+ cv1[1, 3] = 123.45
+ @test v1[1, 3] ≈ cv1[1, 3]
+
+ v2 = random_itensor(QN(1), i, j)
+ orig_elt = v2[1, 3]
+ cv2 = dag(v2; allow_alias = false)
+ cv2[1, 3] = 123.45
+ @test v2[1, 3] ≈ orig_elt
+
+ v2 = random_itensor(QN(1), i, j)
+ orig_elt = v2[1, 3]
+ cv2 = dag(ITensors.NeverAlias(), v2)
+ cv2[1, 3] = 123.45
+ @test v2[1, 3] ≈ orig_elt
+
+ v3 = random_itensor(ComplexF64, QN(1), i, j)
+ orig_elt = v3[1, 3]
+ cv3 = dag(v3; allow_alias = true)
+ cv3[1, 3] = 123.45
+ @test v3[1, 3] ≈ orig_elt
+
+ v3 = random_itensor(ComplexF64, QN(1), i, j)
+ orig_elt = v3[1, 3]
+ cv3 = dag(ITensors.AllowAlias(), v3)
+ cv3[1, 3] = 123.45
+ @test v3[1, 3] ≈ orig_elt
+
+ v4 = random_itensor(ComplexF64, QN(1), i, j)
+ orig_elt = v4[1, 3]
+ cv4 = dag(v4; allow_alias = false)
+ cv4[1, 3] = 123.45
+ @test v4[1, 3] ≈ orig_elt
+
+ v4 = random_itensor(ComplexF64, QN(1), i, j)
+ orig_elt = v4[1, 3]
+ cv4 = dag(ITensors.NeverAlias(), v4)
+ cv4[1, 3] = 123.45
+ @test v4[1, 3] ≈ orig_elt
end
- @testset "Test contraction direction error" begin
- i = Index([QN(0) => 1, QN(1) => 1], "i")
- A = random_itensor(i', dag(i))
- A² = A' * A
- @test dense(A²) ≈ dense(A') * dense(A)
- @test_throws ErrorException A' * dag(A)
- end
+ @testset "exponentiate" begin
+ @testset "Simple arrows" begin
+ i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
+ i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
+ A = random_itensor(QN(), i1, i2, dag(i1)', dag(i2)')
+ Aexp = exp(A)
+ Amat = Array(A, i1, i2, i1', i2')
+ Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3)
+ @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1.0e-14
+ @test flux(Aexp) == QN()
+ @test length(setdiff(inds(Aexp), inds(A))) == 0
+
+ @test exp(A, (i1, i2), (i1', i2')) ≈ Aexp rtol = 5.0e-14
+
+ # test the case where indices are permuted
+ A = random_itensor(QN(), i1, dag(i1)', dag(i2)', i2)
+ Aexp = exp(A, (i1, i2), (i1', i2'))
+ Amat = Array(A, i1, i2, i1', i2')
+ Amatexp = reshape(exp(reshape(Amat, 9, 9)), 3, 3, 3, 3)
+ @test Array(Aexp, i1, i2, i1', i2') ≈ Amatexp rtol = 1.0e-14
+
+ # test exponentiation in the Hermitian case
+ i1 = Index([QN(0) => 2, QN(1) => 2, QN(2) => 3], "i1")
+ A = random_itensor(QN(), i1, dag(i1)')
+ Ad = dag(swapinds(A, IndexSet(i1), IndexSet(dag(i1)')))
+ Ah = A + Ad + 1.0e-10 * random_itensor(QN(), i1, dag(i1)')
+ Amat = Array(Ah, i1', i1)
+ Aexp = exp(Ah; ishermitian = true)
+ Amatexp = exp(LinearAlgebra.Hermitian(Amat))
+ @test Array(Aexp, i1, i1') ≈ Amatexp rtol = 5.0e-14
+ end
- @testset "Contraction with scalar ITensor" begin
- i = Index([QN(0) => 2, QN(1) => 2])
- A = random_itensor(i', dag(i))
- A1 = A * ITensor(1)
- A2 = ITensor(1) * A
- @test A1 ≈ A
- @test A2 ≈ A
- end
- end
-
- @testset "directsum" begin
- x = Index([QN(0) => 1, QN(1) => 1], "x")
- i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
- j1 = Index([QN(0) => 2, QN(1) => 2], "j1")
- i2 = Index([QN(0) => 2, QN(1) => 3], "i2")
- j2 = Index([QN(0) => 3, QN(1) => 3], "j2")
-
- A1 = random_itensor(i1, x, j1)
- A2 = random_itensor(x, j2, i2)
- S, s = ITensors.directsum(A1 => (i1, j1), A2 => (i2, j2); tags=["sum_i", "sum_j"])
-
- @test hassameinds(S, (x, s...))
- @test hastags(s[1], "sum_i")
- @test hastags(s[2], "sum_j")
-
- for vx in 1:dim(x)
- proj = dag(onehot(x => vx))
- A1_vx = A1 * proj
- A2_vx = A2 * proj
- S_vx = S * proj
- for m in 1:dim(s[1]), n in 1:dim(s[2])
- if m ≤ dim(i1) && n ≤ dim(j1)
- @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n]
- elseif m > dim(i1) && n > dim(j1)
- @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)]
- else
- @test S_vx[s[1] => m, s[2] => n] == 0
+ @testset "Regression test for exp of QN ITensor with missing diagonal blocks" begin
+ i = Index([QN(0) => 2, QN(1) => 3])
+ A = ITensor(i', dag(i))
+ A[1, 1] = 1.2
+ expA = exp(A; ishermitian = false)
+ for n in 1:mindim(A)
+ @test expA[n, n] == exp(A[n, n])
+ end
+ @test expA ≈ exp(dense(A))
+ expA = exp(A; ishermitian = true)
+ for n in 1:mindim(A)
+ @test expA[n, n] == exp(A[n, n])
+ end
+ @test expA ≈ exp(dense(A))
+ end
+
+ @testset "diag" for ElType in (Float64, ComplexF64)
+ χ = [QN(0) => 1, QN(1) => 2]
+ i, j = Index.((χ,), ("i", "j"))
+ A = random_itensor(ElType, i, j)
+ d = diag(A)
+ @test d isa DenseTensor{ElType, 1}
+ for n in 1:dim(χ)
+ @test d[n] == A[n, n]
+ end
+ end
+
+ @testset "diag" for ElType in (Float64, ComplexF64)
+ χ = [QN(0) => 1, QN(1) => 2]
+ i, j = Index.((χ,), ("i", "j"))
+ A = random_itensor(ElType, i, j)
+ _, S, _ = svd(A, i)
+ d = diag(S)
+ @test d isa DenseTensor{real(ElType), 1}
+ for n in 1:diaglength(S)
+ @test d[n] == S[n, n]
+ end
+ end
+
+ @testset "Mixed arrows" begin
+ i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
+ i2 = Index([QN(0) => 1, QN(1) => 2], "i2")
+ A = random_itensor(i1, i2, dag(i1)', dag(i2)')
+ expA = exp(A, (i1, i1'), (i2', i2))
+ @test exp(dense(A), (i1, i1'), (i2', i2)) ≈ dense(expA)
+ end
+
+ @testset "Test contraction direction error" begin
+ i = Index([QN(0) => 1, QN(1) => 1], "i")
+ A = random_itensor(i', dag(i))
+ A² = A' * A
+ @test dense(A²) ≈ dense(A') * dense(A)
+ @test_throws ErrorException A' * dag(A)
+ end
+
+ @testset "Contraction with scalar ITensor" begin
+ i = Index([QN(0) => 2, QN(1) => 2])
+ A = random_itensor(i', dag(i))
+ A1 = A * ITensor(1)
+ A2 = ITensor(1) * A
+ @test A1 ≈ A
+ @test A2 ≈ A
end
- end
end
- end
-
- @testset "Negate QN ITensor Regression Test" begin
- s = siteind("S=1/2"; conserve_qns=true)
-
- A = ITensor(s', dag(s))
- A[1, 1] = 1.0
-
- @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1
- B = -A # there was a bug where doing -A would
- # increase the number of blocks of A's storage
- @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1
- end
-
- @testset "removeqns and removeqn" begin
- s = siteind("Electron"; conserve_qns=true)
- T = op("c†↑", s)
-
- @test hasqns(s)
- @test hasqns(T)
- @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
- @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
- @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1))
- @test qn(s, 4) == QN(("Nf", 2, -1), ("Sz", 0))
- @test blockdim(s, 1) == 1
- @test blockdim(s, 2) == 1
- @test blockdim(s, 3) == 1
- @test blockdim(s, 4) == 1
- @test nblocks(s) == 4
- @test dim(s) == 4
-
- s1 = removeqns(s)
- T1 = removeqns(T)
- @test !hasqns(s1)
- @test !hasqns(T1)
- @test nblocks(s1) == 1
- @test dim(s1) == 4
- for I in eachindex(T1)
- @test T1[I] == T[I]
+
+ @testset "directsum" begin
+ x = Index([QN(0) => 1, QN(1) => 1], "x")
+ i1 = Index([QN(0) => 1, QN(1) => 2], "i1")
+ j1 = Index([QN(0) => 2, QN(1) => 2], "j1")
+ i2 = Index([QN(0) => 2, QN(1) => 3], "i2")
+ j2 = Index([QN(0) => 3, QN(1) => 3], "j2")
+
+ A1 = random_itensor(i1, x, j1)
+ A2 = random_itensor(x, j2, i2)
+ S, s = ITensors.directsum(A1 => (i1, j1), A2 => (i2, j2); tags = ["sum_i", "sum_j"])
+
+ @test hassameinds(S, (x, s...))
+ @test hastags(s[1], "sum_i")
+ @test hastags(s[2], "sum_j")
+
+ for vx in 1:dim(x)
+ proj = dag(onehot(x => vx))
+ A1_vx = A1 * proj
+ A2_vx = A2 * proj
+ S_vx = S * proj
+ for m in 1:dim(s[1]), n in 1:dim(s[2])
+ if m ≤ dim(i1) && n ≤ dim(j1)
+ @test S_vx[s[1] => m, s[2] => n] == A1_vx[i1 => m, j1 => n]
+ elseif m > dim(i1) && n > dim(j1)
+ @test S_vx[s[1] => m, s[2] => n] == A2_vx[i2 => m - dim(i1), j2 => n - dim(j1)]
+ else
+ @test S_vx[s[1] => m, s[2] => n] == 0
+ end
+ end
+ end
end
- s2 = removeqn(s, "Sz")
- T2 = removeqn(T, "Sz")
- @test hasqns(s2)
- @test hasqns(T2)
- @test nnzblocks(T2) == 2
- @test nblocks(s2) == 3
- @test nblocks(T2) == (3, 3)
- @test qn(s2, 1) == QN(("Nf", 0, -1))
- @test qn(s2, 2) == QN(("Nf", 1, -1))
- @test qn(s2, 3) == QN(("Nf", 2, -1))
- @test blockdim(s2, 1) == 1
- @test blockdim(s2, 2) == 2
- @test blockdim(s2, 3) == 1
- @test dim(s2) == 4
- for I in eachindex(T2)
- @test T2[I] == T[I]
+ @testset "Negate QN ITensor Regression Test" begin
+ s = siteind("S=1/2"; conserve_qns = true)
+
+ A = ITensor(s', dag(s))
+ A[1, 1] = 1.0
+
+ @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1
+ B = -A # there was a bug where doing -A would
+ # increase the number of blocks of A's storage
+ @test length(ITensors.blockoffsets(ITensors.tensor(A))) == 1
end
- s3 = removeqn(s, "Nf")
- T3 = removeqn(T, "Nf")
- @test hasqns(s3)
- @test hasqns(T3)
- @test nnzblocks(T3) == 2
- @test nblocks(s3) == 4
- @test nblocks(T3) == (4, 4)
- @test qn(s3, 1) == QN(("Sz", 0))
- @test qn(s3, 2) == QN(("Sz", 1))
- @test qn(s3, 3) == QN(("Sz", -1))
- @test qn(s3, 4) == QN(("Sz", 0))
- @test blockdim([QN(0) => 1, QN(1) => 2], 1) == 1
- @test blockdim([QN(0) => 1, QN(1) => 2], 2) == 2
- @test blockdim(s3, 1) == 1
- @test blockdim(s3, 2) == 1
- @test blockdim(s3, 3) == 1
- @test blockdim(s3, 4) == 1
- @test dim(s3) == 4
- for I in eachindex(T3)
- @test T3[I] == T[I]
+ @testset "removeqns and removeqn" begin
+ s = siteind("Electron"; conserve_qns = true)
+ T = op("c†↑", s)
+
+ @test hasqns(s)
+ @test hasqns(T)
+ @test qn(s, 1) == QN(("Nf", 0, -1), ("Sz", 0))
+ @test qn(s, 2) == QN(("Nf", 1, -1), ("Sz", 1))
+ @test qn(s, 3) == QN(("Nf", 1, -1), ("Sz", -1))
+ @test qn(s, 4) == QN(("Nf", 2, -1), ("Sz", 0))
+ @test blockdim(s, 1) == 1
+ @test blockdim(s, 2) == 1
+ @test blockdim(s, 3) == 1
+ @test blockdim(s, 4) == 1
+ @test nblocks(s) == 4
+ @test dim(s) == 4
+
+ s1 = removeqns(s)
+ T1 = removeqns(T)
+ @test !hasqns(s1)
+ @test !hasqns(T1)
+ @test nblocks(s1) == 1
+ @test dim(s1) == 4
+ for I in eachindex(T1)
+ @test T1[I] == T[I]
+ end
+
+ s2 = removeqn(s, "Sz")
+ T2 = removeqn(T, "Sz")
+ @test hasqns(s2)
+ @test hasqns(T2)
+ @test nnzblocks(T2) == 2
+ @test nblocks(s2) == 3
+ @test nblocks(T2) == (3, 3)
+ @test qn(s2, 1) == QN(("Nf", 0, -1))
+ @test qn(s2, 2) == QN(("Nf", 1, -1))
+ @test qn(s2, 3) == QN(("Nf", 2, -1))
+ @test blockdim(s2, 1) == 1
+ @test blockdim(s2, 2) == 2
+ @test blockdim(s2, 3) == 1
+ @test dim(s2) == 4
+ for I in eachindex(T2)
+ @test T2[I] == T[I]
+ end
+
+ s3 = removeqn(s, "Nf")
+ T3 = removeqn(T, "Nf")
+ @test hasqns(s3)
+ @test hasqns(T3)
+ @test nnzblocks(T3) == 2
+ @test nblocks(s3) == 4
+ @test nblocks(T3) == (4, 4)
+ @test qn(s3, 1) == QN(("Sz", 0))
+ @test qn(s3, 2) == QN(("Sz", 1))
+ @test qn(s3, 3) == QN(("Sz", -1))
+ @test qn(s3, 4) == QN(("Sz", 0))
+ @test blockdim([QN(0) => 1, QN(1) => 2], 1) == 1
+ @test blockdim([QN(0) => 1, QN(1) => 2], 2) == 2
+ @test blockdim(s3, 1) == 1
+ @test blockdim(s3, 2) == 1
+ @test blockdim(s3, 3) == 1
+ @test blockdim(s3, 4) == 1
+ @test dim(s3) == 4
+ for I in eachindex(T3)
+ @test T3[I] == T[I]
+ end
+ @test -[QN(0) => 1, QN(1) => 2] == [QN(0) => 1, QN(-1) => 2]
+ @test !ITensors.have_same_qns([QN(0) => 1, QN(0) => 2, QN(("Sz", 2)) => 1])
end
- @test -[QN(0) => 1, QN(1) => 2] == [QN(0) => 1, QN(-1) => 2]
- @test !ITensors.have_same_qns([QN(0) => 1, QN(0) => 2, QN(("Sz", 2)) => 1])
- end
end
end # module
diff --git a/test/base/test_readwrite.jl b/test/base/test_readwrite.jl
index 5f8d47cb2e..f6a38e13aa 100644
--- a/test/base/test_readwrite.jl
+++ b/test/base/test_readwrite.jl
@@ -6,182 +6,182 @@ using Test: @test, @testset
include(joinpath(@__DIR__, "utils", "util.jl"))
@testset "HDF5 Read and Write" begin
- i = Index(2, "i")
- j = Index(3, "j")
- k = Index(4, "k")
-
- @testset "TagSet" begin
- ts = TagSet("A,Site,n=2")
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "tags", ts)
+ i = Index(2, "i")
+ j = Index(3, "j")
+ k = Index(4, "k")
+
+ @testset "TagSet" begin
+ ts = TagSet("A,Site,n=2")
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "tags", ts)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rts = read(fi, "tags", TagSet)
+ @test rts == ts
+ end
+ end
+
+ @testset "Index" begin
+ i = Index(3, "Site,S=1")
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "index", i)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ ri = read(fi, "index", Index)
+ @test ri == i
+ end
+
+ # primed Index
+ i = Index(3, "Site,S=1")
+ i = prime(i, 2)
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "index", i)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ ri = read(fi, "index", Index)
+ @test ri == i
+ end
+ end
+
+ @testset "IndexSet" begin
+ is = IndexSet(i, j, k)
+
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "inds", is)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ ris = read(fi, "inds", IndexSet)
+ @test ris == is
+ end
+ end
+
+ @testset "Dense ITensor" begin
+
+ # default constructed case
+ T = ITensor()
+
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "defaultT", T)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rT = read(fi, "defaultT", ITensor)
+ @test typeof(storage(T)) == typeof(storage(ITensor()))
+ end
+
+ # real case
+ T = random_itensor(i, j, k)
+
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "T", T)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rT = read(fi, "T", ITensor)
+ @test norm(rT - T) / norm(T) < 1.0e-10
+ end
+
+ # complex case
+ T = random_itensor(ComplexF64, i, j, k)
+
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "complexT", T)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rT = read(fi, "complexT", ITensor)
+ @test norm(rT - T) / norm(T) < 1.0e-10
+ end
+ end
+
+ @testset "Delta ITensor" begin
+ #
+ # Delta ITensor
+ #
+ Δ = δ(i, i')
+ cΔ = δ(ComplexF64, i, i')
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ fo["delta_tensor"] = Δ
+ fo["c_delta_tensor"] = cΔ
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rΔ = read(fi, "delta_tensor", ITensor)
+ rcΔ = read(fi, "c_delta_tensor", ITensor)
+ @test rΔ ≈ Δ
+ @test rcΔ ≈ cΔ
+ end
end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rts = read(fi, "tags", TagSet)
- @test rts == ts
- end
- end
-
- @testset "Index" begin
- i = Index(3, "Site,S=1")
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "index", i)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- ri = read(fi, "index", Index)
- @test ri == i
- end
-
- # primed Index
- i = Index(3, "Site,S=1")
- i = prime(i, 2)
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "index", i)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- ri = read(fi, "index", Index)
- @test ri == i
- end
- end
-
- @testset "IndexSet" begin
- is = IndexSet(i, j, k)
-
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "inds", is)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- ris = read(fi, "inds", IndexSet)
- @test ris == is
- end
- end
-
- @testset "Dense ITensor" begin
-
- # default constructed case
- T = ITensor()
-
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "defaultT", T)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rT = read(fi, "defaultT", ITensor)
- @test typeof(storage(T)) == typeof(storage(ITensor()))
- end
-
- # real case
- T = random_itensor(i, j, k)
-
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "T", T)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rT = read(fi, "T", ITensor)
- @test norm(rT - T) / norm(T) < 1E-10
+ @testset "Diag ITensor" begin
+
+ #
+ # Diag ITensor
+ #
+ dk = dim(k)
+ D = diag_itensor(randn(dk), k, k')
+ C = diag_itensor(randn(ComplexF64, dk), k, k')
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ fo["diag_tensor"] = D
+ fo["c_diag_tensor"] = C
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rD = read(fi, "diag_tensor", ITensor)
+ rC = read(fi, "c_diag_tensor", ITensor)
+ @test rD ≈ D
+ @test rC ≈ C
+ end
end
- # complex case
- T = random_itensor(ComplexF64, i, j, k)
+ @testset "QN ITensor" begin
+ i = Index(QN("A", -1) => 3, QN("A", 0) => 4, QN("A", +1) => 3; tags = "i")
+ j = Index(QN("A", -2) => 2, QN("A", 0) => 3, QN("A", +2) => 2; tags = "j")
+ k = Index(QN("A", -1) => 1, QN("A", 0) => 1, QN("A", +1) => 1; tags = "k")
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "complexT", T)
- end
+ # real case
+ T = random_itensor(QN("A", 1), i, j, k)
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rT = read(fi, "complexT", ITensor)
- @test norm(rT - T) / norm(T) < 1E-10
- end
- end
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "T", T)
+ end
- @testset "Delta ITensor" begin
- #
- # Delta ITensor
- #
- Δ = δ(i, i')
- cΔ = δ(ComplexF64, i, i')
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- fo["delta_tensor"] = Δ
- fo["c_delta_tensor"] = cΔ
- end
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rT = read(fi, "T", ITensor)
+ @test rT ≈ T
+ end
+
+ # complex case
+ T = random_itensor(ComplexF64, i, j, k)
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rΔ = read(fi, "delta_tensor", ITensor)
- rcΔ = read(fi, "c_delta_tensor", ITensor)
- @test rΔ ≈ Δ
- @test rcΔ ≈ cΔ
+ h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
+ write(fo, "complexT", T)
+ end
+
+ h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
+ rT = read(fi, "complexT", ITensor)
+ @test rT ≈ T
+ end
+ end
+
+ @testset "DownwardCompat" begin
+ h5open(joinpath(@__DIR__, "utils", "testfilev0.1.41.h5"), "r") do fi
+ ITensorName = "ITensorv0.1.41"
+
+ # ITensor version <= v0.1.41 uses the `store` key for ITensor data storage
+ # whereas v >= 0.2 uses `storage` as key
+ @test haskey(read(fi, ITensorName), "store")
+ @test read(fi, ITensorName, ITensor) isa ITensor
+ end
end
- end
- @testset "Diag ITensor" begin
#
- # Diag ITensor
+ # Clean up the test hdf5 file
#
- dk = dim(k)
- D = diag_itensor(randn(dk), k, k')
- C = diag_itensor(randn(ComplexF64, dk), k, k')
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- fo["diag_tensor"] = D
- fo["c_diag_tensor"] = C
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rD = read(fi, "diag_tensor", ITensor)
- rC = read(fi, "c_diag_tensor", ITensor)
- @test rD ≈ D
- @test rC ≈ C
- end
- end
-
- @testset "QN ITensor" begin
- i = Index(QN("A", -1) => 3, QN("A", 0) => 4, QN("A", +1) => 3; tags="i")
- j = Index(QN("A", -2) => 2, QN("A", 0) => 3, QN("A", +2) => 2; tags="j")
- k = Index(QN("A", -1) => 1, QN("A", 0) => 1, QN("A", +1) => 1; tags="k")
-
- # real case
- T = random_itensor(QN("A", 1), i, j, k)
-
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "T", T)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rT = read(fi, "T", ITensor)
- @test rT ≈ T
- end
-
- # complex case
- T = random_itensor(ComplexF64, i, j, k)
-
- h5open(joinpath(@__DIR__, "data.h5"), "w") do fo
- write(fo, "complexT", T)
- end
-
- h5open(joinpath(@__DIR__, "data.h5"), "r") do fi
- rT = read(fi, "complexT", ITensor)
- @test rT ≈ T
- end
- end
-
- @testset "DownwardCompat" begin
- h5open(joinpath(@__DIR__, "utils", "testfilev0.1.41.h5"), "r") do fi
- ITensorName = "ITensorv0.1.41"
-
- # ITensor version <= v0.1.41 uses the `store` key for ITensor data storage
- # whereas v >= 0.2 uses `storage` as key
- @test haskey(read(fi, ITensorName), "store")
- @test read(fi, ITensorName, ITensor) isa ITensor
- end
- end
-
- #
- # Clean up the test hdf5 file
- #
- rm(joinpath(@__DIR__, "data.h5"); force=true)
+ rm(joinpath(@__DIR__, "data.h5"); force = true)
end
end
diff --git a/test/base/test_sitetype.jl b/test/base/test_sitetype.jl
index de03b2b4a5..a01f96cdbe 100644
--- a/test/base/test_sitetype.jl
+++ b/test/base/test_sitetype.jl
@@ -1,583 +1,583 @@
using ITensors, Test
using ITensors.SiteTypes:
- @OpName_str,
- @SiteType_str,
- @StateName_str,
- OpName,
- StateName,
- op,
- ops,
- siteind,
- siteinds,
- state
+ @OpName_str,
+ @SiteType_str,
+ @StateName_str,
+ OpName,
+ StateName,
+ op,
+ ops,
+ siteind,
+ siteinds,
+ state
function is_unitary(U::ITensor; kwargs...)
- s = noprime(filterinds(U; plev=1))
- return isapprox(transpose(dag(U))(U), op("I", s...))
+ s = noprime(filterinds(U; plev = 1))
+ return isapprox(transpose(dag(U))(U), op("I", s...))
end
@testset "SiteType" begin
- N = 10
-
- @testset "Star in operator strings" begin
- @test_throws ErrorException op("S=1/2")
-
- sites = siteinds("S=1/2", N)
- #@test_throws ArgumentError op(sites, "Sp", 1)
- @test sites[1] isa Index
- Sz = op(sites, "Sz", 2)
- SzSz = op(sites, "Sz * Sz", 2)
- @test SzSz ≈ product(Sz, Sz)
- Sy = op(sites, "Sy", 2)
- SySy = op(sites, "Sy * Sy", 2)
- @test SySy ≈ product(Sy, Sy)
-
- Sz1 = op("Sz", sites, 1)
- @test op("Sz", [sites[1]]) ≈ Sz1
- @test op([sites[1]], "Sz") ≈ Sz1
- @test op([1 0; 0 -1] / 2, [sites[1]]) ≈ Sz1
- @test op([sites[1]], [1 0; 0 -1] / 2) ≈ Sz1
-
- @test op([sites[1]], "Ry"; θ=π / 2) ≈
- itensor([1 -1; 1 1] / √2, sites[1]', dag(sites[1]))
-
- sites = siteinds("S=1", N)
- #@test_throws ArgumentError op(sites, "Sp", 1)
- Sz = op(sites, "Sz", 2)
- SzSz = op(sites, "Sz * Sz", 2)
- @test SzSz ≈ product(Sz, Sz)
- Sy = op(sites, "Sy", 2)
- SySy = op(sites, "Sy * Sy", 2)
- @test SySy ≈ product(Sy, Sy)
- SzSySz = op(sites, "Sz * Sy * Sz", 2)
- @test SzSySz ≈ product(Sz, product(Sy, Sz))
- end
-
- @testset "+/- in operator strings" begin
- q = siteind("Qudit"; dim=5)
- Amat = array(op("a", q))
- Adagmat = array(op("a†", q))
-
- x = Amat - Adagmat
- @test x ≈ array(op("a - a†", q))
- x = Amat * Adagmat - Adagmat
- @test x ≈ array(op("a * a† - a†", q))
- @test x ≈ array(op("a * a† - a†", q))
- x = Adagmat * Adagmat * Amat * Amat
- @test x ≈ array(op("a† * a† * a * a", q))
-
- q = siteind("S=1/2")
- Sp = array(op("S+", q))
- Sm = array(op("S-", q))
- Sx = array(op("Sx", q))
- Sy = array(op("Sy", q))
- Sz = array(op("Sz", q))
- x = Sp + Sm
- @test x ≈ array(op("S+ + S-", q))
- x = Sp - Sm
- @test x ≈ array(op("S+ - S-", q))
- x = Sp - Sm - Sp
- @test x ≈ array(op("S+ - S- - S+", q))
- x = Sp * Sm + Sm * Sp
- @test x ≈ array(op("S+ * S- + S- * S+", q))
- # Deprecated syntax
- @test x ≈ array(op("S+ * S- + S-*S+", q))
- x = Sp * Sm - Sm * Sp
- @test x ≈ array(op("S+ * S- - S- * S+", q))
- @test x ≈ array(op("S+ * S- - S- * S+", q))
- x = Sp * Sm + Sm * Sp + Sz * Sx * Sy
- @test x ≈ array(op("S+ * S- + S- * S+ + Sz * Sx * Sy", q))
- x = Sp * Sm - Sm * Sp + Sz * Sx * Sy
- @test x ≈ array(op("S+ * S- - S- * S+ + Sz * Sx * Sy", q))
- x = Sp * Sm - Sm * Sp - Sz * Sx * Sy
- @test x ≈ array(op("S+ * S- - S- * S+ - Sz * Sx * Sy", q))
-
- #q = siteind("Qubit")
- #R = array(op("Rx", q; θ = 0.1))
- #H = array(op("H", q))
- #Y = array(op("Y", q))
- #x = H * R + Y + R
- #@test x ≈ array(op("H * Rx + Y + Rx", q; θ = 0.1))
-
- end
-
- @testset "Custom SiteType using op" begin
- # Use "_Custom_" tag even though this example
- # is for S=3/2, because we might define the
- # "S=3/2" TagType inside ITensors.jl later
- function ITensors.op(::OpName"Sz", ::SiteType"_Custom_", s::Index)
- Op = ITensor(s', dag(s))
- Op[s' => 1, s => 1] = +3 / 2
- Op[s' => 2, s => 2] = +1 / 2
- Op[s' => 3, s => 3] = -1 / 2
- Op[s' => 4, s => 4] = -3 / 2
- return Op
- end
+ N = 10
- function ITensors.op(::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index)
- Op = ITensor(s1', s2', dag(s1), dag(s2))
- Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2
- Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2
- Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2
- Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2
- return Op
+ @testset "Star in operator strings" begin
+ @test_throws ErrorException op("S=1/2")
+
+ sites = siteinds("S=1/2", N)
+ #@test_throws ArgumentError op(sites, "Sp", 1)
+ @test sites[1] isa Index
+ Sz = op(sites, "Sz", 2)
+ SzSz = op(sites, "Sz * Sz", 2)
+ @test SzSz ≈ product(Sz, Sz)
+ Sy = op(sites, "Sy", 2)
+ SySy = op(sites, "Sy * Sy", 2)
+ @test SySy ≈ product(Sy, Sy)
+
+ Sz1 = op("Sz", sites, 1)
+ @test op("Sz", [sites[1]]) ≈ Sz1
+ @test op([sites[1]], "Sz") ≈ Sz1
+ @test op([1 0; 0 -1] / 2, [sites[1]]) ≈ Sz1
+ @test op([sites[1]], [1 0; 0 -1] / 2) ≈ Sz1
+
+ @test op([sites[1]], "Ry"; θ = π / 2) ≈
+ itensor([1 -1; 1 1] / √2, sites[1]', dag(sites[1]))
+
+ sites = siteinds("S=1", N)
+ #@test_throws ArgumentError op(sites, "Sp", 1)
+ Sz = op(sites, "Sz", 2)
+ SzSz = op(sites, "Sz * Sz", 2)
+ @test SzSz ≈ product(Sz, Sz)
+ Sy = op(sites, "Sy", 2)
+ SySy = op(sites, "Sy * Sy", 2)
+ @test SySy ≈ product(Sy, Sy)
+ SzSySz = op(sites, "Sz * Sy * Sz", 2)
+ @test SzSySz ≈ product(Sz, product(Sy, Sz))
end
- function ITensors.op(
- ::OpName"β", ::SiteType"_Custom1", ::SiteType"_Custom2", s1::Index, s2::Index
- )
- Op = ITensor(s1', s2', dag(s1), dag(s2))
- Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2
- Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2
- Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2
- Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2
- return Op
+ @testset "+/- in operator strings" begin
+ q = siteind("Qudit"; dim = 5)
+ Amat = array(op("a", q))
+ Adagmat = array(op("a†", q))
+
+ x = Amat - Adagmat
+ @test x ≈ array(op("a - a†", q))
+ x = Amat * Adagmat - Adagmat
+ @test x ≈ array(op("a * a† - a†", q))
+ @test x ≈ array(op("a * a† - a†", q))
+ x = Adagmat * Adagmat * Amat * Amat
+ @test x ≈ array(op("a† * a† * a * a", q))
+
+ q = siteind("S=1/2")
+ Sp = array(op("S+", q))
+ Sm = array(op("S-", q))
+ Sx = array(op("Sx", q))
+ Sy = array(op("Sy", q))
+ Sz = array(op("Sz", q))
+ x = Sp + Sm
+ @test x ≈ array(op("S+ + S-", q))
+ x = Sp - Sm
+ @test x ≈ array(op("S+ - S-", q))
+ x = Sp - Sm - Sp
+ @test x ≈ array(op("S+ - S- - S+", q))
+ x = Sp * Sm + Sm * Sp
+ @test x ≈ array(op("S+ * S- + S- * S+", q))
+ # Deprecated syntax
+ @test x ≈ array(op("S+ * S- + S-*S+", q))
+ x = Sp * Sm - Sm * Sp
+ @test x ≈ array(op("S+ * S- - S- * S+", q))
+ @test x ≈ array(op("S+ * S- - S- * S+", q))
+ x = Sp * Sm + Sm * Sp + Sz * Sx * Sy
+ @test x ≈ array(op("S+ * S- + S- * S+ + Sz * Sx * Sy", q))
+ x = Sp * Sm - Sm * Sp + Sz * Sx * Sy
+ @test x ≈ array(op("S+ * S- - S- * S+ + Sz * Sx * Sy", q))
+ x = Sp * Sm - Sm * Sp - Sz * Sx * Sy
+ @test x ≈ array(op("S+ * S- - S- * S+ - Sz * Sx * Sy", q))
+
+ #q = siteind("Qubit")
+ #R = array(op("Rx", q; θ = 0.1))
+ #H = array(op("H", q))
+ #Y = array(op("Y", q))
+ #x = H * R + Y + R
+ #@test x ≈ array(op("H * Rx + Y + Rx", q; θ = 0.1))
+
end
- s = Index(4, "_Custom_, __x")
- Sz = op("Sz", s)
- @test Sz[s' => 1, s => 1] ≈ +3 / 2
- @test Sz[s' => 2, s => 2] ≈ +1 / 2
- @test Sz[s' => 3, s => 3] ≈ -1 / 2
- @test Sz[s' => 4, s => 4] ≈ -3 / 2
-
- t = Index(4, "_Custom_, __x")
- α = op("α", s, t)
- @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2
- @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2
- @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2
- @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2
-
- s1 = Index(4, "_Custom1, __x")
- @test_throws ArgumentError op("α", s, s1)
-
- s2 = Index(4, "_Custom2, __x")
- β = op("β", s1, s2)
- @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2
- @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2
- @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2
- @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2
- @test_throws ArgumentError op("β", s2, s1)
- end
-
- @testset "Custom OpName with long name" begin
- function ITensors.op(::OpName"my_favorite_operator", ::SiteType"S=1/2", s::Index)
- Op = ITensor(s', dag(s))
- Op[s' => 1, s => 1] = 0.11
- Op[s' => 1, s => 2] = 0.12
- Op[s' => 2, s => 1] = 0.21
- Op[s' => 2, s => 2] = 0.22
- return Op
+ @testset "Custom SiteType using op" begin
+ # Use "_Custom_" tag even though this example
+ # is for S=3/2, because we might define the
+ # "S=3/2" TagType inside ITensors.jl later
+ function ITensors.op(::OpName"Sz", ::SiteType"_Custom_", s::Index)
+ Op = ITensor(s', dag(s))
+ Op[s' => 1, s => 1] = +3 / 2
+ Op[s' => 2, s => 2] = +1 / 2
+ Op[s' => 3, s => 3] = -1 / 2
+ Op[s' => 4, s => 4] = -3 / 2
+ return Op
+ end
+
+ function ITensors.op(::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index)
+ Op = ITensor(s1', s2', dag(s1), dag(s2))
+ Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2
+ Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2
+ Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2
+ Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2
+ return Op
+ end
+
+ function ITensors.op(
+ ::OpName"β", ::SiteType"_Custom1", ::SiteType"_Custom2", s1::Index, s2::Index
+ )
+ Op = ITensor(s1', s2', dag(s1), dag(s2))
+ Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2
+ Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2
+ Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2
+ Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2
+ return Op
+ end
+
+ s = Index(4, "_Custom_, __x")
+ Sz = op("Sz", s)
+ @test Sz[s' => 1, s => 1] ≈ +3 / 2
+ @test Sz[s' => 2, s => 2] ≈ +1 / 2
+ @test Sz[s' => 3, s => 3] ≈ -1 / 2
+ @test Sz[s' => 4, s => 4] ≈ -3 / 2
+
+ t = Index(4, "_Custom_, __x")
+ α = op("α", s, t)
+ @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2
+ @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2
+ @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2
+ @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2
+
+ s1 = Index(4, "_Custom1, __x")
+ @test_throws ArgumentError op("α", s, s1)
+
+ s2 = Index(4, "_Custom2, __x")
+ β = op("β", s1, s2)
+ @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2
+ @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2
+ @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2
+ @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2
+ @test_throws ArgumentError op("β", s2, s1)
end
- s = Index(2, "S=1/2, Site")
- Sz = op("my_favorite_operator", s)
- @test Sz[s' => 1, s => 1] ≈ 0.11
- @test Sz[s' => 1, s => 2] ≈ 0.12
- @test Sz[s' => 2, s => 1] ≈ 0.21
- @test Sz[s' => 2, s => 2] ≈ 0.22
+ @testset "Custom OpName with long name" begin
+ function ITensors.op(::OpName"my_favorite_operator", ::SiteType"S=1/2", s::Index)
+ Op = ITensor(s', dag(s))
+ Op[s' => 1, s => 1] = 0.11
+ Op[s' => 1, s => 2] = 0.12
+ Op[s' => 2, s => 1] = 0.21
+ Op[s' => 2, s => 2] = 0.22
+ return Op
+ end
+
+ s = Index(2, "S=1/2, Site")
+ Sz = op("my_favorite_operator", s)
+ @test Sz[s' => 1, s => 1] ≈ 0.11
+ @test Sz[s' => 1, s => 2] ≈ 0.12
+ @test Sz[s' => 2, s => 1] ≈ 0.21
+ @test Sz[s' => 2, s => 2] ≈ 0.22
+
+ @test OpName(:myop) == OpName("myop")
+ @test ITensors.name(OpName(:myop)) == :myop
+ end
- @test OpName(:myop) == OpName("myop")
- @test ITensors.name(OpName(:myop)) == :myop
- end
+ @testset "op with more than two indices" begin
+ ITensors.space(::SiteType"qubit") = 2
- @testset "op with more than two indices" begin
- ITensors.space(::SiteType"qubit") = 2
+ function ITensors.op(::OpName"rand", ::SiteType"qubit", s::Index...)
+ return random_itensor(prime.(s)..., dag.(s)...)
+ end
- function ITensors.op(::OpName"rand", ::SiteType"qubit", s::Index...)
- return random_itensor(prime.(s)..., dag.(s)...)
+ s = siteinds("qubit", 4)
+ o = op("rand", s...)
+ @test norm(o) > 0
+ @test order(o) == 8
+ @test hassameinds(o, (prime.(s)..., s...))
end
- s = siteinds("qubit", 4)
- o = op("rand", s...)
- @test norm(o) > 0
- @test order(o) == 8
- @test hassameinds(o, (prime.(s)..., s...))
- end
-
- @testset "Custom Qudit/Boson op" begin
- # Overload Qudit, implicitly defined for Boson as well
- function ITensors.op(::OpName"Qudit_op_1", ::SiteType"Qudit", ds::Int...)
- d = prod(ds)
- return [i * j for i in 1:d, j in 1:d]
- end
- function ITensors.op(::OpName"Qudit_op_2", ::SiteType"Qudit", d::Int)
- return [i * j for i in 1:d, j in 1:d]
+ @testset "Custom Qudit/Boson op" begin
+ # Overload Qudit, implicitly defined for Boson as well
+ function ITensors.op(::OpName"Qudit_op_1", ::SiteType"Qudit", ds::Int...)
+ d = prod(ds)
+ return [i * j for i in 1:d, j in 1:d]
+ end
+ function ITensors.op(::OpName"Qudit_op_2", ::SiteType"Qudit", d::Int)
+ return [i * j for i in 1:d, j in 1:d]
+ end
+
+ # Overload Boson directly
+ function ITensors.op(::OpName"Boson_op_1", ::SiteType"Boson", ds::Int...)
+ d = prod(ds)
+ return [i * j for i in 1:d, j in 1:d]
+ end
+ function ITensors.op(::OpName"Boson_op_2", ::SiteType"Boson", d::Int)
+ return [i * j for i in 1:d, j in 1:d]
+ end
+
+ for st in ["Qudit", "Boson"], ot in ["Qudit", "Boson"]
+ if st == "Qudit" && ot == "Boson"
+ # Qudit site types don't see Boson overloads
+ continue
+ end
+ d = 4
+ s = siteinds(st, 2; dim = d)
+ o = op("$(ot)_op_1", s, 1)
+ @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1]))
+
+ o = op("$(ot)_op_1", s, 1, 2)
+ @test o ≈ itensor(
+ [i * j for i in 1:(d^2), j in 1:(d^2)], s[2]', s[1]', dag(s[2]), dag(s[1])
+ )
+
+ d = 4
+ s = siteinds(st, 2; dim = d)
+ o = op("$(ot)_op_2", s, 1)
+ @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1]))
+ @test_throws MethodError op("$(ot)_op_2", s, 1, 2)
+ end
end
- # Overload Boson directly
- function ITensors.op(::OpName"Boson_op_1", ::SiteType"Boson", ds::Int...)
- d = prod(ds)
- return [i * j for i in 1:d, j in 1:d]
- end
- function ITensors.op(::OpName"Boson_op_2", ::SiteType"Boson", d::Int)
- return [i * j for i in 1:d, j in 1:d]
+ @testset "Custom SiteType using op!" begin
+ # Use "_Custom_" tag even though this example
+ # is for S=3/2, because we might define the
+ # "S=3/2" TagType inside ITensors.jl later
+ function ITensors.op!(Op::ITensor, ::OpName"Sz", ::SiteType"_Custom_", s::Index)
+ Op[s' => 1, s => 1] = +3 / 2
+ Op[s' => 2, s => 2] = +1 / 2
+ Op[s' => 3, s => 3] = -1 / 2
+ return Op[s' => 4, s => 4] = -3 / 2
+ end
+
+ function ITensors.op!(
+ Op::ITensor, ::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index
+ )
+ Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2
+ Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2
+ Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2
+ return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2
+ end
+
+ function ITensors.op!(
+ Op::ITensor,
+ ::OpName"β",
+ ::SiteType"_Custom1",
+ ::SiteType"_Custom2",
+ s1::Index,
+ s2::Index,
+ )
+ Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2
+ Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2
+ Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2
+ return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2
+ end
+
+ s = Index(4, "_Custom_, __x")
+ Sz = op("Sz", s)
+ @test Sz[s' => 1, s => 1] ≈ +3 / 2
+ @test Sz[s' => 2, s => 2] ≈ +1 / 2
+ @test Sz[s' => 3, s => 3] ≈ -1 / 2
+ @test Sz[s' => 4, s => 4] ≈ -3 / 2
+
+ t = Index(4, "_Custom_, __x")
+ α = op("α", s, t)
+ @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2
+ @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2
+ @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2
+ @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2
+
+ s1 = Index(4, "_Custom1, __x")
+ @test_throws ArgumentError op("α", t, s1)
+
+ s2 = Index(4, "_Custom2, __x")
+ β = op("β", s1, s2)
+ @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2
+ @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2
+ @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2
+ @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2
+ @test_throws ArgumentError op("β", s2, s1)
end
- for st in ["Qudit", "Boson"], ot in ["Qudit", "Boson"]
- if st == "Qudit" && ot == "Boson"
- # Qudit site types don't see Boson overloads
- continue
- end
- d = 4
- s = siteinds(st, 2; dim=d)
- o = op("$(ot)_op_1", s, 1)
- @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1]))
-
- o = op("$(ot)_op_1", s, 1, 2)
- @test o ≈ itensor(
- [i * j for i in 1:(d ^ 2), j in 1:(d ^ 2)], s[2]', s[1]', dag(s[2]), dag(s[1])
- )
-
- d = 4
- s = siteinds(st, 2; dim=d)
- o = op("$(ot)_op_2", s, 1)
- @test o ≈ itensor([i * j for i in 1:d, j in 1:d], s[1]', dag(s[1]))
- @test_throws MethodError op("$(ot)_op_2", s, 1, 2)
+ @testset "Custom SiteType using older op interface" begin
+ # Use "_Custom_" tag even though this example
+ # is for S=3/2, because we might define the
+ # "S=3/2" TagType inside ITensors.jl later
+ function ITensors.op(::SiteType"_Custom_", s::Index, opname::AbstractString)
+ Op = ITensor(s', dag(s))
+ if opname == "S+"
+ Op[s' => 1, s => 2] = sqrt(3)
+ Op[s' => 2, s => 3] = 2
+ Op[s' => 3, s => 4] = sqrt(3)
+ else
+ error("Name $opname not recognized for tag \"Custom\"")
+ end
+ return Op
+ end
+
+ s = Index(4, "_Custom_")
+ Sp = op("S+", s)
+ @test Sp[s' => 1, s => 2] ≈ sqrt(3)
+ @test Sp[s' => 2, s => 3] ≈ 2
+ @test Sp[s' => 3, s => 4] ≈ sqrt(3)
end
- end
-
- @testset "Custom SiteType using op!" begin
- # Use "_Custom_" tag even though this example
- # is for S=3/2, because we might define the
- # "S=3/2" TagType inside ITensors.jl later
- function ITensors.op!(Op::ITensor, ::OpName"Sz", ::SiteType"_Custom_", s::Index)
- Op[s' => 1, s => 1] = +3 / 2
- Op[s' => 2, s => 2] = +1 / 2
- Op[s' => 3, s => 3] = -1 / 2
- return Op[s' => 4, s => 4] = -3 / 2
+
+ @testset "siteind defined by space overload" begin
+ ITensors.space(::SiteType"Test1") = 4
+ s = siteind("Test1", 3)
+ @test dim(s) == 4
+ @test hastags(s, "Site,Test1,n=3")
+
+ s = siteind("Test1")
+ @test dim(s) == 4
+ @test hastags(s, "Site,Test1")
end
- function ITensors.op!(
- Op::ITensor, ::OpName"α", ::SiteType"_Custom_", s1::Index, s2::Index
- )
- Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +3 / 2
- Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +1 / 2
- Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -1 / 2
- return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -3 / 2
+ @testset "siteind defined by siteind overload" begin
+ # TODO: Make `ITensors.siteind` accessible? Or delete this test?
+ ITensors.SiteTypes.siteind(::SiteType"Test2") = Index(4, "Test2")
+ s = siteind("Test2", 3)
+ @test dim(s) == 4
+ @test hastags(s, "Test2,n=3")
end
- function ITensors.op!(
- Op::ITensor,
- ::OpName"β",
- ::SiteType"_Custom1",
- ::SiteType"_Custom2",
- s1::Index,
- s2::Index,
- )
- Op[s1' => 1, s2' => 2, s1 => 1, s2 => 2] = +5 / 2
- Op[s1' => 2, s2' => 1, s1 => 2, s2 => 2] = +3 / 2
- Op[s1' => 3, s2' => 3, s1 => 3, s2 => 4] = -3 / 2
- return Op[s1' => 4, s2' => 1, s1 => 4, s2 => 2] = -5 / 2
+ @testset "siteind defined by space overload with QN" begin
+ function ITensors.space(::SiteType"Test3")
+ return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1]
+ end
+ s = siteind("Test3", 3)
+ @test dim(s) == 4
+ @test hasqns(s)
+ @test hastags(s, "Site,Test3,n=3")
end
- s = Index(4, "_Custom_, __x")
- Sz = op("Sz", s)
- @test Sz[s' => 1, s => 1] ≈ +3 / 2
- @test Sz[s' => 2, s => 2] ≈ +1 / 2
- @test Sz[s' => 3, s => 3] ≈ -1 / 2
- @test Sz[s' => 4, s => 4] ≈ -3 / 2
-
- t = Index(4, "_Custom_, __x")
- α = op("α", s, t)
- @test α[s' => 1, t' => 2, s => 1, t => 2] ≈ +3 / 2
- @test α[s' => 2, t' => 1, s => 2, t => 2] ≈ +1 / 2
- @test α[s' => 3, t' => 3, s => 3, t => 4] ≈ -1 / 2
- @test α[s' => 4, t' => 1, s => 4, t => 2] ≈ -3 / 2
-
- s1 = Index(4, "_Custom1, __x")
- @test_throws ArgumentError op("α", t, s1)
-
- s2 = Index(4, "_Custom2, __x")
- β = op("β", s1, s2)
- @test β[s1' => 1, s2' => 2, s1 => 1, s2 => 2] ≈ +5 / 2
- @test β[s1' => 2, s2' => 1, s1 => 2, s2 => 2] ≈ +3 / 2
- @test β[s1' => 3, s2' => 3, s1 => 3, s2 => 4] ≈ -3 / 2
- @test β[s1' => 4, s2' => 1, s1 => 4, s2 => 2] ≈ -5 / 2
- @test_throws ArgumentError op("β", s2, s1)
- end
-
- @testset "Custom SiteType using older op interface" begin
- # Use "_Custom_" tag even though this example
- # is for S=3/2, because we might define the
- # "S=3/2" TagType inside ITensors.jl later
- function ITensors.op(::SiteType"_Custom_", s::Index, opname::AbstractString)
- Op = ITensor(s', dag(s))
- if opname == "S+"
- Op[s' => 1, s => 2] = sqrt(3)
- Op[s' => 2, s => 3] = 2
- Op[s' => 3, s => 4] = sqrt(3)
- else
- error("Name $opname not recognized for tag \"Custom\"")
- end
- return Op
+ @testset "siteinds defined by space overload" begin
+ function ITensors.space(::SiteType"Test4"; conserve_qns = false)
+ if conserve_qns
+ return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1]
+ end
+ return 4
+ end
+
+ # Without QNs
+ s = siteinds("Test4", 6)
+ @test length(s) == 6
+ @test dim(s[1]) == 4
+ for n in 1:length(s)
+ @test hastags(s[n], "Site,Test4,n=$n")
+ @test !hasqns(s[n])
+ end
+
+ # With QNs
+ s = siteinds("Test4", 6; conserve_qns = true)
+ @test length(s) == 6
+ @test dim(s[1]) == 4
+ for n in 1:length(s)
+ @test hastags(s[n], "Site,Test4,n=$n")
+ @test hasqns(s[n])
+ end
end
- s = Index(4, "_Custom_")
- Sp = op("S+", s)
- @test Sp[s' => 1, s => 2] ≈ sqrt(3)
- @test Sp[s' => 2, s => 3] ≈ 2
- @test Sp[s' => 3, s => 4] ≈ sqrt(3)
- end
-
- @testset "siteind defined by space overload" begin
- ITensors.space(::SiteType"Test1") = 4
- s = siteind("Test1", 3)
- @test dim(s) == 4
- @test hastags(s, "Site,Test1,n=3")
-
- s = siteind("Test1")
- @test dim(s) == 4
- @test hastags(s, "Site,Test1")
- end
-
- @testset "siteind defined by siteind overload" begin
- # TODO: Make `ITensors.siteind` accessible? Or delete this test?
- ITensors.SiteTypes.siteind(::SiteType"Test2") = Index(4, "Test2")
- s = siteind("Test2", 3)
- @test dim(s) == 4
- @test hastags(s, "Test2,n=3")
- end
-
- @testset "siteind defined by space overload with QN" begin
- function ITensors.space(::SiteType"Test3")
- return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1]
+ @testset "siteinds defined by siteinds overload" begin
+ # TODO: Make `ITensors.siteinds` accessible? Or delete this test?
+ function ITensors.SiteTypes.siteinds(::SiteType"Test5", N; kwargs...)
+ return [Index(4, "Test5,n=$n") for n in 1:N]
+ end
+ s = siteinds("Test5", 8)
+ @test length(s) == 8
+ @test dim(s[1]) == 4
+ for n in 1:length(s)
+ @test hastags(s[n], "Test5,n=$n")
+ end
end
- s = siteind("Test3", 3)
- @test dim(s) == 4
- @test hasqns(s)
- @test hastags(s, "Site,Test3,n=3")
- end
-
- @testset "siteinds defined by space overload" begin
- function ITensors.space(::SiteType"Test4"; conserve_qns=false)
- if conserve_qns
- return [QN("T", 0) => 2, QN("T", 1) => 1, QN("T", 2) => 1]
- end
- return 4
+
+ @testset "Version of siteinds taking function argument" begin
+ N = 10
+ s = siteinds(n -> (n == 1 || n == N) ? "S=1/2" : "S=1", N)
+ for n in (1, N)
+ @test dim(s[n]) == 2
+ @test hastags(s[n], "Site,S=1/2,n=$n")
+ end
+ for n in 2:(N - 1)
+ @test dim(s[n]) == 3
+ @test hastags(s[n], "Site,S=1,n=$n")
+ end
end
- # Without QNs
- s = siteinds("Test4", 6)
- @test length(s) == 6
- @test dim(s[1]) == 4
- for n in 1:length(s)
- @test hastags(s[n], "Site,Test4,n=$n")
- @test !hasqns(s[n])
+ @testset "siteinds addtags keyword argument" begin
+ N = 4
+ s = siteinds("S=1/2", N; addtags = "T")
+ for n in 1:N
+ @test hastags(s[n], "Site,S=1/2,n=$n,T")
+ end
end
- # With QNs
- s = siteinds("Test4", 6; conserve_qns=true)
- @test length(s) == 6
- @test dim(s[1]) == 4
- for n in 1:length(s)
- @test hastags(s[n], "Site,Test4,n=$n")
- @test hasqns(s[n])
+ @testset "Error for undefined tag in siteinds,space system" begin
+ @test_throws ErrorException siteinds("Missing", 10)
+ @test_throws ErrorException siteind("Missing", 3)
+ @test isnothing(siteind("Missing"))
end
- end
- @testset "siteinds defined by siteinds overload" begin
- # TODO: Make `ITensors.siteinds` accessible? Or delete this test?
- function ITensors.SiteTypes.siteinds(::SiteType"Test5", N; kwargs...)
- return [Index(4, "Test5,n=$n") for n in 1:N]
+ @testset "Various ops input types" begin
+ s = siteinds("S=1/2", 4)
+
+ # Vector{Tuple{String,Int}} input
+ oa = ops(s, [("Sz", n) for n in 1:length(s)])
+ @test length(oa) == length(s)
+ @test norm(oa[2] - op("Sz", s, 2)) < 1.0e-8
+
+ # Vector{Tuple} input
+ oa = ops(s, Tuple[("Sz", n) for n in 1:length(s)])
+ @test length(oa) == length(s)
+ @test norm(oa[2] - op("Sz", s, 2)) < 1.0e-8
end
- s = siteinds("Test5", 8)
- @test length(s) == 8
- @test dim(s[1]) == 4
- for n in 1:length(s)
- @test hastags(s[n], "Test5,n=$n")
+
+ @testset "Index Values From Strings" begin
+ @testset "Val function" begin
+ s = siteind("Electron")
+ @test val(s, "0") == 1
+ @test val(s, "Up") == 2
+ @test val(s, "Dn") == 3
+ @test val(s, "UpDn") == 4
+ end
+
+ @testset "Strings in ITensor get and set" begin
+ s = siteind("S=1"; conserve_qns = true)
+ T = ITensor(s', dag(s))
+ T[s' => "Up", s => "Up"] = +1.0
+ T[s' => "Z0", s => "Z0"] = +2.0
+ T[s' => "Dn", s => "Dn"] = -1.0
+ @test T[1, 1] ≈ +1.0
+ @test T[2, 2] ≈ +2.0
+ @test T[3, 3] ≈ -1.0
+
+ o = onehot(s => "Z0")
+ @test vector(o) ≈ [0, 1, 0]
+ end
end
- end
- @testset "Version of siteinds taking function argument" begin
- N = 10
- s = siteinds(n -> (n == 1 || n == N) ? "S=1/2" : "S=1", N)
- for n in (1, N)
- @test dim(s[n]) == 2
- @test hastags(s[n], "Site,S=1/2,n=$n")
+ @testset "state with variable dimension" begin
+ ITensors.space(::SiteType"MyQudit"; dim = 2) = dim
+
+ function ITensors.state(::StateName{N}, ::SiteType"MyQudit", s::Index) where {N}
+ n = parse(Int, String(N))
+ st = zeros(dim(s))
+ st[n + 1] = 1.0
+ return itensor(st, s)
+ end
+
+ s = siteind("MyQudit"; dim = 3)
+ v0 = state(s, "0")
+ v1 = state(s, "1")
+ v2 = state(s, "2")
+ @test v0 == state("0", s)
+ @test v1 == state("1", s)
+ @test v2 == state("2", s)
+ @test dim(v0) == 3
+ @test dim(v1) == 3
+ @test dim(v2) == 3
+ @test v0[s => 1] == 1
+ @test v0[s => 2] == 0
+ @test v0[s => 3] == 0
+ @test v1[s => 1] == 0
+ @test v1[s => 2] == 1
+ @test v1[s => 3] == 0
+ @test v2[s => 1] == 0
+ @test v2[s => 2] == 0
+ @test v2[s => 3] == 1
+ @test_throws BoundsError state(s, "3")
end
- for n in 2:(N - 1)
- @test dim(s[n]) == 3
- @test hastags(s[n], "Site,S=1,n=$n")
+
+ @testset "state with parameters" begin
+ ITensors.state(::StateName"phase", ::SiteType"Qubit"; θ::Real) = [cos(θ), sin(θ)]
+ s = siteind("Qubit")
+ @test state("phase", s; θ = π / 6) ≈ itensor([cos(π / 6), sin(π / 6)], s)
end
- end
- @testset "siteinds addtags keyword argument" begin
- N = 4
- s = siteinds("S=1/2", N; addtags="T")
- for n in 1:N
- @test hastags(s[n], "Site,S=1/2,n=$n,T")
+ @testset "state with variable dimension (deprecated)" begin
+ ITensors.space(::SiteType"MyQudit2"; dim = 2) = dim
+
+ # XXX: This syntax is deprecated, only testing for
+ # backwards compatibility. Should return the
+ # ITensor `itensor(st, s)`.
+ function ITensors.state(::StateName{N}, ::SiteType"MyQudit2", s::Index) where {N}
+ n = parse(Int, String(N))
+ st = zeros(dim(s))
+ st[n + 1] = 1.0
+ return st
+ end
+
+ s = siteind("MyQudit2"; dim = 3)
+ v0 = state(s, "0")
+ v1 = state(s, "1")
+ v2 = state(s, "2")
+ @test v0 == state("0", s)
+ @test v1 == state("1", s)
+ @test v2 == state("2", s)
+ @test dim(v0) == 3
+ @test dim(v1) == 3
+ @test dim(v2) == 3
+ @test v0[s => 1] == 1
+ @test v0[s => 2] == 0
+ @test v0[s => 3] == 0
+ @test v1[s => 1] == 0
+ @test v1[s => 2] == 1
+ @test v1[s => 3] == 0
+ @test v2[s => 1] == 0
+ @test v2[s => 2] == 0
+ @test v2[s => 3] == 1
+ @test_throws BoundsError state(s, "3")
end
- end
-
- @testset "Error for undefined tag in siteinds,space system" begin
- @test_throws ErrorException siteinds("Missing", 10)
- @test_throws ErrorException siteind("Missing", 3)
- @test isnothing(siteind("Missing"))
- end
-
- @testset "Various ops input types" begin
- s = siteinds("S=1/2", 4)
-
- # Vector{Tuple{String,Int}} input
- oa = ops(s, [("Sz", n) for n in 1:length(s)])
- @test length(oa) == length(s)
- @test norm(oa[2] - op("Sz", s, 2)) < 1E-8
-
- # Vector{Tuple} input
- oa = ops(s, Tuple[("Sz", n) for n in 1:length(s)])
- @test length(oa) == length(s)
- @test norm(oa[2] - op("Sz", s, 2)) < 1E-8
- end
-
- @testset "Index Values From Strings" begin
- @testset "Val function" begin
- s = siteind("Electron")
- @test val(s, "0") == 1
- @test val(s, "Up") == 2
- @test val(s, "Dn") == 3
- @test val(s, "UpDn") == 4
+
+ @testset "StateName methods" begin
+ @test StateName(ITensors.SmallString("a")) == StateName("a")
+ @test ITensors.name(StateName("a")) == ITensors.SmallString("a")
end
- @testset "Strings in ITensor get and set" begin
- s = siteind("S=1"; conserve_qns=true)
- T = ITensor(s', dag(s))
- T[s' => "Up", s => "Up"] = +1.0
- T[s' => "Z0", s => "Z0"] = +2.0
- T[s' => "Dn", s => "Dn"] = -1.0
- @test T[1, 1] ≈ +1.0
- @test T[2, 2] ≈ +2.0
- @test T[3, 3] ≈ -1.0
-
- o = onehot(s => "Z0")
- @test vector(o) ≈ [0, 1, 0]
+ @testset "Regression test for state overload" begin
+ ITensors.space(::SiteType"Xev") = 8
+ function ITensors.state(::StateName"0", ::SiteType"Xev")
+ return [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ end
+ s = siteind("Xev")
+ @test state(s, "0") ≈ ITensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], s)
end
- end
- @testset "state with variable dimension" begin
- ITensors.space(::SiteType"MyQudit"; dim=2) = dim
+ @testset "function applied to a gate" begin
+ s = siteinds("Qubit", 2)
- function ITensors.state(::StateName{N}, ::SiteType"MyQudit", s::Index) where {N}
- n = parse(Int, String(N))
- st = zeros(dim(s))
- st[n + 1] = 1.0
- return itensor(st, s)
- end
+ θ = 0.1
+ rx = array(op("Rx", s[1]; θ = 0.1))
+ exp_rx = exp(rx)
+ gtest = op(x -> exp(x), "Rx", s[1]; θ = 0.1)
+ @test exp_rx ≈ array(op(x -> exp(x), "Rx", s[1]; θ = 0.1))
+ @test exp_rx ≈ array(op(x -> exp(x), ("Rx", 1, (θ = 0.1,)), s))
- s = siteind("MyQudit"; dim=3)
- v0 = state(s, "0")
- v1 = state(s, "1")
- v2 = state(s, "2")
- @test v0 == state("0", s)
- @test v1 == state("1", s)
- @test v2 == state("2", s)
- @test dim(v0) == 3
- @test dim(v1) == 3
- @test dim(v2) == 3
- @test v0[s => 1] == 1
- @test v0[s => 2] == 0
- @test v0[s => 3] == 0
- @test v1[s => 1] == 0
- @test v1[s => 2] == 1
- @test v1[s => 3] == 0
- @test v2[s => 1] == 0
- @test v2[s => 2] == 0
- @test v2[s => 3] == 1
- @test_throws BoundsError state(s, "3")
- end
-
- @testset "state with parameters" begin
- ITensors.state(::StateName"phase", ::SiteType"Qubit"; θ::Real) = [cos(θ), sin(θ)]
- s = siteind("Qubit")
- @test state("phase", s; θ=π / 6) ≈ itensor([cos(π / 6), sin(π / 6)], s)
- end
-
- @testset "state with variable dimension (deprecated)" begin
- ITensors.space(::SiteType"MyQudit2"; dim=2) = dim
-
- # XXX: This syntax is deprecated, only testing for
- # backwards compatibility. Should return the
- # ITensor `itensor(st, s)`.
- function ITensors.state(::StateName{N}, ::SiteType"MyQudit2", s::Index) where {N}
- n = parse(Int, String(N))
- st = zeros(dim(s))
- st[n + 1] = 1.0
- return st
+ cx = 0.1 * reshape(array(op("CX", s[1], s[2])), (4, 4))
+ exp_cx = reshape(exp(cx), (2, 2, 2, 2))
+ @test exp_cx ≈ array(op(x -> exp(0.1 * x), "CX", s[1], s[2]))
+ @test exp_cx ≈ array(op(x -> exp(0.1 * x), ("CX", (1, 2)), s))
end
- s = siteind("MyQudit2"; dim=3)
- v0 = state(s, "0")
- v1 = state(s, "1")
- v2 = state(s, "2")
- @test v0 == state("0", s)
- @test v1 == state("1", s)
- @test v2 == state("2", s)
- @test dim(v0) == 3
- @test dim(v1) == 3
- @test dim(v2) == 3
- @test v0[s => 1] == 1
- @test v0[s => 2] == 0
- @test v0[s => 3] == 0
- @test v1[s => 1] == 0
- @test v1[s => 2] == 1
- @test v1[s => 3] == 0
- @test v2[s => 1] == 0
- @test v2[s => 2] == 0
- @test v2[s => 3] == 1
- @test_throws BoundsError state(s, "3")
- end
-
- @testset "StateName methods" begin
- @test StateName(ITensors.SmallString("a")) == StateName("a")
- @test ITensors.name(StateName("a")) == ITensors.SmallString("a")
- end
-
- @testset "Regression test for state overload" begin
- ITensors.space(::SiteType"Xev") = 8
- function ITensors.state(::StateName"0", ::SiteType"Xev")
- return [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
+ @testset "Haar-random unitary RandomUnitary" begin
+ s = siteinds(2, 3)
+
+ U = op("RandomUnitary", s, 1, 2)
+ @test eltype(U) == ComplexF64
+ @test order(U) == 4
+ @test is_unitary(U; rtol = 1.0e-15)
+
+ U = op("RandomUnitary", s, 1, 2, 3)
+ @test eltype(U) == ComplexF64
+ @test order(U) == 6
+ @test is_unitary(U; rtol = 1.0e-15)
+
+ U = op("RandomUnitary", s, 1, 2; eltype = Float64)
+ @test eltype(U) == Float64
+ @test order(U) == 4
+ @test is_unitary(U; rtol = 1.0e-15)
+
+ U = op("RandomUnitary", s, 1, 2, 3; eltype = Float64)
+ @test eltype(U) == Float64
+ @test order(U) == 6
+ @test is_unitary(U; rtol = 1.0e-15)
end
- s = siteind("Xev")
- @test state(s, "0") ≈ ITensor([1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], s)
- end
-
- @testset "function applied to a gate" begin
- s = siteinds("Qubit", 2)
-
- θ = 0.1
- rx = array(op("Rx", s[1]; θ=0.1))
- exp_rx = exp(rx)
- gtest = op(x -> exp(x), "Rx", s[1]; θ=0.1)
- @test exp_rx ≈ array(op(x -> exp(x), "Rx", s[1]; θ=0.1))
- @test exp_rx ≈ array(op(x -> exp(x), ("Rx", 1, (θ=0.1,)), s))
-
- cx = 0.1 * reshape(array(op("CX", s[1], s[2])), (4, 4))
- exp_cx = reshape(exp(cx), (2, 2, 2, 2))
- @test exp_cx ≈ array(op(x -> exp(0.1 * x), "CX", s[1], s[2]))
- @test exp_cx ≈ array(op(x -> exp(0.1 * x), ("CX", (1, 2)), s))
- end
-
- @testset "Haar-random unitary RandomUnitary" begin
- s = siteinds(2, 3)
-
- U = op("RandomUnitary", s, 1, 2)
- @test eltype(U) == ComplexF64
- @test order(U) == 4
- @test is_unitary(U; rtol=1e-15)
-
- U = op("RandomUnitary", s, 1, 2, 3)
- @test eltype(U) == ComplexF64
- @test order(U) == 6
- @test is_unitary(U; rtol=1e-15)
-
- U = op("RandomUnitary", s, 1, 2; eltype=Float64)
- @test eltype(U) == Float64
- @test order(U) == 4
- @test is_unitary(U; rtol=1e-15)
-
- U = op("RandomUnitary", s, 1, 2, 3; eltype=Float64)
- @test eltype(U) == Float64
- @test order(U) == 6
- @test is_unitary(U; rtol=1e-15)
- end
end
diff --git a/test/base/test_svd.jl b/test/base/test_svd.jl
index 454cb5f328..3efde0ce63 100644
--- a/test/base/test_svd.jl
+++ b/test/base/test_svd.jl
@@ -7,233 +7,233 @@ using Suppressor
include(joinpath(@__DIR__, "utils", "util.jl"))
@testset "SVD Algorithms" begin
- @testset "Matrix With Zero Sing Val" begin
- M = [
- 1.0 2.0 5.0 4.0
- 1.0 1.0 1.0 1.0
- 0.0 0.5 0.5 1.0
- 0.0 1.0 1.0 2.0
- ]
- U, S, V = NDTensors.svd_recursive(M)
- @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13
- end
-
- @testset "Real Matrix" begin
- M = rand(10, 20)
- U, S, V = NDTensors.svd_recursive(M)
- @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-12
-
- M = rand(20, 10)
- U, S, V = NDTensors.svd_recursive(M)
- @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-12
- end
-
- @testset "Cplx Matrix" begin
- M = rand(ComplexF64, 10, 15)
- U, S, V = NDTensors.svd_recursive(M)
- @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13
-
- M = rand(ComplexF64, 15, 10)
- U, S, V = NDTensors.svd_recursive(M)
- @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1E-13
- end
-
- @testset "Regression Test 1" begin
- # Implementation of the SVD was giving low
- # accuracy for this case
- M = rand(2, 2, 2, 2)
-
- M[:, :, 1, 1] = [
- 7.713134067177845 -0.16367628720441685
- -1.5253996568409225 1.3577749944302373
- ]
-
- M[:, :, 2, 1] = [
- 0.0 -2.1219889218225276
- -8.320068013774126 0.43565608213298096
- ]
-
- M[:, :, 1, 2] = [
- 0.0 -8.662721825820825
- 0.0 -0.46817091771736885
- ]
-
- M[:, :, 2, 2] = [
- 0.0 0.0
- 0.0 -8.159570989998151
- ]
-
- t1 = Index(2, "t1")
- t2 = Index(2, "t2")
- u1 = Index(2, "u1")
- u2 = Index(2, "u2")
-
- T = itensor(M, t1, t2, u1, u2)
-
- U, S, V = svd(T, (u1, t1))
- @test norm(U * S * V - T) / norm(T) < 1E-10
- end
-
- @testset "svd with empty left or right indices" for space in
- (2, [QN(0, 2) => 1, QN(1, 2) => 1]),
- cutoff in (nothing, 1e-15),
- _eltype in (Float32, Float64, ComplexF32, ComplexF64)
-
- i = Index(space)
- j = Index(space)
- A = random_itensor(_eltype, i, j)
-
- U, S, V = svd(A, i, j; cutoff)
- @test eltype(U) <: _eltype
- @test eltype(S) <: real(_eltype)
- @test eltype(V) <: _eltype
- @test U * S * V ≈ A
- @test hassameinds(uniqueinds(U, S), A)
- @test isempty(uniqueinds(V, S))
- @test dim(U) == dim(A)
- @test dim(S) == 1
- @test dim(V) == 1
- @test order(U) == order(A) + 1
- @test order(S) == 2
- @test order(V) == 1
-
- U, S, V = svd(A, (); cutoff)
- @test eltype(U) <: _eltype
- @test eltype(S) <: real(_eltype)
- @test eltype(V) <: _eltype
- @test U * S * V ≈ A
- @test hassameinds(uniqueinds(V, S), A)
- @test isempty(uniqueinds(U, S))
- @test dim(U) == 1
- @test dim(S) == 1
- @test dim(V) == dim(A)
- @test order(U) == 1
- @test order(S) == 2
- @test order(V) == order(A) + 1
-
- @test_throws ErrorException svd(A)
- end
-
- @testset "factorize with empty left or right indices" for space in (
- 2, [QN(0, 2) => 1, QN(1, 2) => 1]
- ),
- cutoff in (nothing, 1e-15)
-
- i = Index(space)
- j = Index(space)
- A = random_itensor(i, j)
-
- X, Y = factorize(A, i, j; cutoff)
- @test X * Y ≈ A
- @test hassameinds(uniqueinds(X, Y), A)
- @test isempty(uniqueinds(Y, X))
- @test dim(X) == dim(A)
- @test dim(Y) == 1
- @test order(X) == order(A) + 1
- @test order(Y) == 1
-
- X, Y = factorize(A, (); cutoff)
- @test X * Y ≈ A
- @test hassameinds(uniqueinds(Y, X), A)
- @test isempty(uniqueinds(X, Y))
- @test dim(X) == 1
- @test dim(Y) == dim(A)
- @test order(X) == 1
- @test order(Y) == order(A) + 1
-
- @test_throws ErrorException factorize(A)
- end
-
- @testset "svd with empty left and right indices" for cutoff in (nothing, 1e-15)
- A = ITensor(3.4)
-
- U, S, V = svd(A, (); cutoff)
- @test U * S * V ≈ A
- @test isempty(uniqueinds(U, S))
- @test isempty(uniqueinds(V, S))
- @test dim(U) == 1
- @test dim(S) == 1
- @test dim(V) == 1
- @test order(U) == 1
- @test order(S) == 2
- @test order(V) == 1
-
- @test_throws ErrorException svd(A)
- end
-
- @testset "factorize with empty left and right indices" for cutoff in (nothing, 1e-15)
- A = ITensor(3.4)
-
- X, Y = factorize(A, (); cutoff)
- @test X * Y ≈ A
- @test isempty(uniqueinds(X, Y))
- @test isempty(uniqueinds(Y, X))
- @test dim(X) == 1
- @test dim(Y) == 1
- @test order(X) == 1
- @test order(Y) == 1
-
- @test_throws ErrorException factorize(A)
- end
-
- @testset "svd with single precision element type" for eltype in (Float32, ComplexF32),
- space in (2, [QN(0) => 1, QN(1) => 1])
-
- i = Index(space)
- A = random_itensor(eltype, i', dag(i))
- @test Base.eltype(A) === eltype
- U, S, V = svd(A, i'; maxdim=1)
- @test Base.eltype(U) === eltype
- @test Base.eltype(S) === real(eltype)
- @test Base.eltype(V) === eltype
- end
-
- @testset "svd arrow directions" begin
- l1, l2 = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags="l1", dir=ITensors.In),
- Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags="l2", dir=ITensors.Out)
- r1, r2, r3 = Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags="r1", dir=ITensors.Out),
- Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags="r2", dir=ITensors.In),
- Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags="r3", dir=ITensors.In)
- A = random_itensor(l1, l2, r1, r2, r3)
-
- for leftdir in [ITensors.Out, ITensors.In]
- for rightdir in [ITensors.Out, ITensors.In]
- U, S, V = svd(A, l1, l2; leftdir, rightdir)
- s1, s2 = inds(S)
- @test dir(s1) == leftdir
- @test dir(s2) == rightdir
- @test norm(U * S * V - A) <= 1e-14
- end
+ @testset "Matrix With Zero Sing Val" begin
+ M = [
+ 1.0 2.0 5.0 4.0
+ 1.0 1.0 1.0 1.0
+ 0.0 0.5 0.5 1.0
+ 0.0 1.0 1.0 2.0
+ ]
+ U, S, V = NDTensors.svd_recursive(M)
+ @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1.0e-13
end
- for dir in [ITensors.Out, ITensors.In]
- for arrayt in (Array, JLArray)
- A′ = adapt(arrayt, A)
- L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho="none")
- @test datatype(L) == datatype(A)
- @test datatype(R) == datatype(A)
- @test dir == ITensors.dir(commonind(L, R))
- @test norm(L * R - A) <= 1e-14
- end
+ @testset "Real Matrix" begin
+ M = rand(10, 20)
+ U, S, V = NDTensors.svd_recursive(M)
+ @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1.0e-12
+
+ M = rand(20, 10)
+ U, S, V = NDTensors.svd_recursive(M)
+ @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1.0e-12
+ end
+
+ @testset "Cplx Matrix" begin
+ M = rand(ComplexF64, 10, 15)
+ U, S, V = NDTensors.svd_recursive(M)
+ @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1.0e-13
+
+ M = rand(ComplexF64, 15, 10)
+ U, S, V = NDTensors.svd_recursive(M)
+ @test norm(U * LinearAlgebra.Diagonal(S) * V' - M) < 1.0e-13
+ end
+
+ @testset "Regression Test 1" begin
+ # Implementation of the SVD was giving low
+ # accuracy for this case
+ M = rand(2, 2, 2, 2)
+
+ M[:, :, 1, 1] = [
+ 7.713134067177845 -0.16367628720441685
+ -1.5253996568409225 1.3577749944302373
+ ]
+
+ M[:, :, 2, 1] = [
+ 0.0 -2.1219889218225276
+ -8.320068013774126 0.43565608213298096
+ ]
+
+ M[:, :, 1, 2] = [
+ 0.0 -8.662721825820825
+ 0.0 -0.46817091771736885
+ ]
+
+ M[:, :, 2, 2] = [
+ 0.0 0.0
+ 0.0 -8.159570989998151
+ ]
+
+ t1 = Index(2, "t1")
+ t2 = Index(2, "t2")
+ u1 = Index(2, "u1")
+ u2 = Index(2, "u2")
+
+ T = itensor(M, t1, t2, u1, u2)
+
+ U, S, V = svd(T, (u1, t1))
+ @test norm(U * S * V - T) / norm(T) < 1.0e-10
+ end
+
+ @testset "svd with empty left or right indices" for space in
+ (2, [QN(0, 2) => 1, QN(1, 2) => 1]),
+ cutoff in (nothing, 1.0e-15),
+ _eltype in (Float32, Float64, ComplexF32, ComplexF64)
+
+ i = Index(space)
+ j = Index(space)
+ A = random_itensor(_eltype, i, j)
+
+ U, S, V = svd(A, i, j; cutoff)
+ @test eltype(U) <: _eltype
+ @test eltype(S) <: real(_eltype)
+ @test eltype(V) <: _eltype
+ @test U * S * V ≈ A
+ @test hassameinds(uniqueinds(U, S), A)
+ @test isempty(uniqueinds(V, S))
+ @test dim(U) == dim(A)
+ @test dim(S) == 1
+ @test dim(V) == 1
+ @test order(U) == order(A) + 1
+ @test order(S) == 2
+ @test order(V) == 1
+
+ U, S, V = svd(A, (); cutoff)
+ @test eltype(U) <: _eltype
+ @test eltype(S) <: real(_eltype)
+ @test eltype(V) <: _eltype
+ @test U * S * V ≈ A
+ @test hassameinds(uniqueinds(V, S), A)
+ @test isempty(uniqueinds(U, S))
+ @test dim(U) == 1
+ @test dim(S) == 1
+ @test dim(V) == dim(A)
+ @test order(U) == 1
+ @test order(S) == 2
+ @test order(V) == order(A) + 1
+
+ @test_throws ErrorException svd(A)
end
- end
-
- # TODO: remove this test, it takes a long time
- ## @testset "Ill-conditioned matrix" begin
- ## d = 5000
- ## i = Index(d, "i")
- ## T = itensor(make_illconditioned_matrix(dim(i)), i', i)
-
- ## @suppress begin
- ## F = svd(T, i'; alg="divide_and_conquer")
- ## end
- ## # Depending on the LAPACK implementation,
- ## # this sometimes works so don't test it
- ## #@test isnothing(F)
-
- ## # XXX: This fails on Windows, removing for now.
- ## # F = svd(T, i'; alg="qr_iteration")
- ## # @test !isnothing(F)
- ## # @test F.U * F.S * F.V ≈ T
- ## end
+
+ @testset "factorize with empty left or right indices" for space in (
+ 2, [QN(0, 2) => 1, QN(1, 2) => 1],
+ ),
+ cutoff in (nothing, 1.0e-15)
+
+ i = Index(space)
+ j = Index(space)
+ A = random_itensor(i, j)
+
+ X, Y = factorize(A, i, j; cutoff)
+ @test X * Y ≈ A
+ @test hassameinds(uniqueinds(X, Y), A)
+ @test isempty(uniqueinds(Y, X))
+ @test dim(X) == dim(A)
+ @test dim(Y) == 1
+ @test order(X) == order(A) + 1
+ @test order(Y) == 1
+
+ X, Y = factorize(A, (); cutoff)
+ @test X * Y ≈ A
+ @test hassameinds(uniqueinds(Y, X), A)
+ @test isempty(uniqueinds(X, Y))
+ @test dim(X) == 1
+ @test dim(Y) == dim(A)
+ @test order(X) == 1
+ @test order(Y) == order(A) + 1
+
+ @test_throws ErrorException factorize(A)
+ end
+
+ @testset "svd with empty left and right indices" for cutoff in (nothing, 1.0e-15)
+ A = ITensor(3.4)
+
+ U, S, V = svd(A, (); cutoff)
+ @test U * S * V ≈ A
+ @test isempty(uniqueinds(U, S))
+ @test isempty(uniqueinds(V, S))
+ @test dim(U) == 1
+ @test dim(S) == 1
+ @test dim(V) == 1
+ @test order(U) == 1
+ @test order(S) == 2
+ @test order(V) == 1
+
+ @test_throws ErrorException svd(A)
+ end
+
+ @testset "factorize with empty left and right indices" for cutoff in (nothing, 1.0e-15)
+ A = ITensor(3.4)
+
+ X, Y = factorize(A, (); cutoff)
+ @test X * Y ≈ A
+ @test isempty(uniqueinds(X, Y))
+ @test isempty(uniqueinds(Y, X))
+ @test dim(X) == 1
+ @test dim(Y) == 1
+ @test order(X) == 1
+ @test order(Y) == 1
+
+ @test_throws ErrorException factorize(A)
+ end
+
+ @testset "svd with single precision element type" for eltype in (Float32, ComplexF32),
+ space in (2, [QN(0) => 1, QN(1) => 1])
+
+ i = Index(space)
+ A = random_itensor(eltype, i', dag(i))
+ @test Base.eltype(A) === eltype
+ U, S, V = svd(A, i'; maxdim = 1)
+ @test Base.eltype(U) === eltype
+ @test Base.eltype(S) === real(eltype)
+ @test Base.eltype(V) === eltype
+ end
+
+ @testset "svd arrow directions" begin
+ l1, l2 = Index(QN("Sz", -1) => 1, QN("Sz", 1) => 1; tags = "l1", dir = ITensors.In),
+ Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags = "l2", dir = ITensors.Out)
+ r1, r2, r3 = Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags = "r1", dir = ITensors.Out),
+ Index(QN("Sz", 2) => 1, QN("Sz", 1) => 1; tags = "r2", dir = ITensors.In),
+ Index(QN("Sz", -2) => 1, QN("Sz", 1) => 1; tags = "r3", dir = ITensors.In)
+ A = random_itensor(l1, l2, r1, r2, r3)
+
+ for leftdir in [ITensors.Out, ITensors.In]
+ for rightdir in [ITensors.Out, ITensors.In]
+ U, S, V = svd(A, l1, l2; leftdir, rightdir)
+ s1, s2 = inds(S)
+ @test dir(s1) == leftdir
+ @test dir(s2) == rightdir
+ @test norm(U * S * V - A) <= 1.0e-14
+ end
+ end
+
+ for dir in [ITensors.Out, ITensors.In]
+ for arrayt in (Array, JLArray)
+ A′ = adapt(arrayt, A)
+ L, R, spec = ITensors.factorize_svd(A, l1, l2; dir, ortho = "none")
+ @test datatype(L) == datatype(A)
+ @test datatype(R) == datatype(A)
+ @test dir == ITensors.dir(commonind(L, R))
+ @test norm(L * R - A) <= 1.0e-14
+ end
+ end
+ end
+
+ # TODO: remove this test, it takes a long time
+ ## @testset "Ill-conditioned matrix" begin
+ ## d = 5000
+ ## i = Index(d, "i")
+ ## T = itensor(make_illconditioned_matrix(dim(i)), i', i)
+
+ ## @suppress begin
+ ## F = svd(T, i'; alg="divide_and_conquer")
+ ## end
+ ## # Depending on the LAPACK implementation,
+ ## # this sometimes works so don't test it
+ ## #@test isnothing(F)
+
+ ## # XXX: This fails on Windows, removing for now.
+ ## # F = svd(T, i'; alg="qr_iteration")
+ ## # @test !isnothing(F)
+ ## # @test F.U * F.S * F.V ≈ T
+ ## end
end
diff --git a/test/base/test_trg.jl b/test/base/test_trg.jl
index 84994b0fb3..e6f502b145 100644
--- a/test/base/test_trg.jl
+++ b/test/base/test_trg.jl
@@ -8,17 +8,17 @@ include(joinpath(pkgdir(ITensors), "examples", "src", "trg.jl"))
include(joinpath(pkgdir(ITensors), "examples", "src", "2d_classical_ising.jl"))
@testset "trg" begin
- # Make Ising model partition function
- β = 1.1 * βc
- d = 2
- s = Index(d)
- l = addtags(s, "left")
- u = addtags(s, "up")
- T = ising_mpo(l, u, β)
+ # Make Ising model partition function
+ β = 1.1 * βc
+ d = 2
+ s = Index(d)
+ l = addtags(s, "left")
+ u = addtags(s, "up")
+ T = ising_mpo(l, u, β)
- χmax = 20
- nsteps = 20
- κ, T = trg(T; χmax=χmax, nsteps=nsteps)
+ χmax = 20
+ nsteps = 20
+ κ, T = trg(T; χmax = χmax, nsteps = nsteps)
- @test κ ≈ exp(-β * ising_free_energy(β)) atol = 1e-4
+ @test κ ≈ exp(-β * ising_free_energy(β)) atol = 1.0e-4
end
diff --git a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl
index 0bb2b43bbf..e7e257fe19 100644
--- a/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl
+++ b/test/ext/ITensorsChainRulesCoreExt/test_chainrules.jl
@@ -13,511 +13,521 @@ using Zygote: ZygoteRuleConfig, gradient
Random.seed!(1234)
@testset "ChainRules rrules: basic ITensor operations" begin
- i = Index(2, "i")
- j = Index(2, "j")
- A = random_itensor(i', dag(i))
- V = random_itensor(i)
- Ac = random_itensor(ComplexF64, i', dag(i))
- B = random_itensor(i', dag(i))
- C = ITensor(3.4)
- D = random_itensor(i', j)
-
- @testset "getindex, priming, tagging, ITensor constructors, dag, etc." begin
- test_rrule(getindex, ITensor(3.4); check_inferred=false)
- test_rrule(getindex, A, 1, 2; check_inferred=false)
- test_rrule(contract, A', A; check_inferred=false)
- test_rrule(*, 3.2, A; check_inferred=false)
- test_rrule(*, A, 4.3; check_inferred=false)
- test_rrule(+, A, B; check_inferred=false)
- test_rrule(prime, A; check_inferred=false)
- test_rrule(prime, A, 2; check_inferred=false)
- test_rrule(prime, A; fkwargs=(; tags="i"), check_inferred=false)
- test_rrule(prime, A; fkwargs=(; tags="x"), check_inferred=false)
- test_rrule(setprime, D, 2; check_inferred=false)
- test_rrule(noprime, D; check_inferred=false)
- test_rrule(replaceprime, A, 1 => 2; check_inferred=false)
- test_rrule(replaceprime, A, 1, 2; check_inferred=false)
- test_rrule(swapprime, A, 0 => 1; check_inferred=false)
- test_rrule(swapprime, A, 0, 1; check_inferred=false)
- test_rrule(addtags, A, "x"; check_inferred=false)
- test_rrule(addtags, A, "x"; fkwargs=(; plev=1), check_inferred=false)
- test_rrule(removetags, A, "i"; check_inferred=false)
- test_rrule(replacetags, A, "i" => "j"; check_inferred=false)
- test_rrule(replacetags, A, "i", "j"; check_inferred=false)
- test_rrule(settags, A, "x"; check_inferred=false)
- test_rrule(settags, A, "x"; fkwargs=(; plev=1), check_inferred=false)
- test_rrule(
- swaptags,
- random_itensor(Index(2, "i"), Index(2, "j")),
- "i" => "j";
- check_inferred=false,
- )
- test_rrule(
- swaptags, random_itensor(Index(2, "i"), Index(2, "j")), "i", "j"; check_inferred=false
- )
- test_rrule(replaceind, A, i' => sim(i); check_inferred=false)
- test_rrule(replaceind, A, i', sim(i); check_inferred=false)
- test_rrule(replaceinds, A, (i, i') => (sim(i), sim(i)); check_inferred=false)
- test_rrule(replaceinds, A, (i, i'), (sim(i), sim(i)); check_inferred=false)
- test_rrule(swapind, A, i', i; check_inferred=false)
- test_rrule(swapinds, A, (i',), (i,); check_inferred=false)
- test_rrule(itensor, randn(2, 2), i', i; check_inferred=false)
- test_rrule(itensor, randn(2, 2), [i', i]; check_inferred=false)
- test_rrule(itensor, randn(4), i', i; check_inferred=false)
- test_rrule(ITensor, randn(2, 2), i', i; check_inferred=false)
- test_rrule(ITensor, randn(2, 2), [i', i]; check_inferred=false)
- test_rrule(ITensor, randn(4), i', i; check_inferred=false)
- test_rrule(ITensor, 2.3; check_inferred=false)
- test_rrule(dag, A; check_inferred=false)
- test_rrule(permute, A, reverse(inds(A)); check_inferred=false)
- end
-
- @testset "apply, contract" begin
- test_rrule(ZygoteRuleConfig(), apply, A, V; rrule_f=rrule_via_ad, check_inferred=false)
- f = function (A, B)
- AT = ITensor(A, i, j)
- BT = ITensor(B, j, i)
- return (BT * AT)[1]
- end
- args = (rand(2, 2), rand(2, 2))
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- args = (rand(4), rand(4))
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- args = (rand(4), rand(2, 2))
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "contraction sequence" begin
- a, b, k, l, m, n, u, v = Index.([2, 3, 2, 3, 2, 3, 2, 3])
- args = (
- random_itensor(a, b, k),
- random_itensor(a, l, m),
- random_itensor(b, u, n),
- random_itensor(u, v),
- random_itensor(k, v),
- random_itensor(l, m, n),
- )
- f = (args...) -> contract([args...])[] # Left associative
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- seq = ITensors.optimal_contraction_sequence([args...])
- f = (args...) -> contract([args...]; sequence=seq)[] # sequence
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "construction and contraction" begin
- f = function (x)
- b = itensor([0, 0, 1, 1], i, j)
- k = itensor([0, 1, 0, 0], i, j)
- T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i', j', i, j)
- return x * real((b' * T * k)[])
- end
- args = (0.3,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- #f = function (x)
- # b = itensor([0, 0, 1, 1], i, j)
- # k = itensor([0, 1, 0, 0], i, j)
- # T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i, j, i', j')
- # return x * real((b' * T * k)[])
- #end
- #args = (0.3,)
- #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "scalar operations" begin
- f = x -> sin(scalar(x)^3)
- args = (C,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> sin(x[]^3)
- args = (C,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "adjoint" begin
- f = adjoint
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "contraction, priming, tagging + getindex" begin
- f = (x, y) -> (x * y)[1, 1]
- args = (A', A)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> prime(x, 2)[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> x'[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> addtags(x, "x")[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (x' * x)[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (prime(x) * x)[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> ((x'' * x') * x)[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (x'' * (x' * x))[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = (x, y, z) -> (x * y * z)[1, 1]
- args = (A'', A', A)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x'' * x' * x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x''' * x'' * x' * x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x''' * x'' * x' * x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = (x, y) -> (x + y)[1, 1]
- args = (A, B)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x + x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (2x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x + 2x)[1, 1]
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (x + 2 * mapprime(x' * x, 2 => 1))[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = (x, y) -> (x * y)[]
- args = (A, δ(dag(inds(A))))
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (x * x)[]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> (x * δ(dag(inds(x))))[]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "delta contractions" begin
- f = function (x)
- y = x' * x
- tr = δ(dag(inds(y)))
- return (y * tr)[]
- end
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = function (x)
- y = x'' * x' * x
- tr = δ(dag(inds(y)))
- return (y * tr)[]
- end
- args = (A,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x ^ 2 * δ((i', i)))[1, 1]
- args = (6.2,)
-
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = x -> (x ^ 2 * δ(i', i))[1, 1]
- args = (5.2,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "ITensor constructors" begin
- f = x -> itensor([x^2 x; x^3 x^4], i', i)
- args = (2.54,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> ITensor([x^2 x; x^3 x^4], i', i)
- args = (2.1,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> ITensor(x)
- args = (2.12,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "ITensor constructor and contraction" begin
- f = function (x)
- T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j))
- return real((dag(T) * T)[])
- end
- args = (2.8,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- args = (2.8 + 3.1im,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = function (x)
- v = itensor([exp(-3.2x), cos(2x^2)], j)
- T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j))
- return real((dag(v') * T * v)[])
- end
- args = (2.8,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #args = (2.8 + 3.1im,)
- #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- f = function (x)
- return real((x ^ 3 * ITensor([sin(x) exp(-2x); 3x ^ 3 x + x ^ 2], j', dag(j)))[1, 1])
- end
- args = (3.4 + 2.3im,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "priming" begin
- f = x -> prime(permute(x, reverse(inds(x))))[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
-
- f = x -> prime(x; plev=1)[1, 1]
- args = (A,)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "ITensor inner" begin
- W = itensor([1 1] / √2, i)
- f = x -> inner(W', exp(x), W)
- args = (A,)
- test_rrule(
- ZygoteRuleConfig(),
- f,
- args...;
- rrule_f=rrule_via_ad,
- check_inferred=false,
- rtol=1e-3,
- atol=1e-3,
- )
-
- f = x -> inner(V', exp(x), V)
- args = (A,)
- test_rrule(
- ZygoteRuleConfig(),
- f,
- args...;
- rrule_f=rrule_via_ad,
- check_inferred=false,
- rtol=1e-4,
- atol=1e-4,
- )
- end
-
- @testset "issue 933" begin
- # https://github.com/ITensor/ITensors.jl/issues/933
- f2 = function (x, a)
- y = a + im * x
- return real(dag(y) * y)[]
- end
- a = random_itensor()
- f_itensor = x -> f2(x, a)
- f_number = x -> f2(x, a[])
- x = random_itensor()
- @test f_number(x[]) ≈ f_itensor(x)
- @test f_number'(x[]) ≈ f_itensor'(x)[]
- @test isreal(f_itensor'(x))
- end
-
- @testset "issue 969" begin
- i = Index(2)
- j = Index(3)
- A = random_itensor(i)
- B = random_itensor(j)
- f = function (x, y)
- d = δ(ind(x, 1), ind(y, 1))
- return (x * d * y)[]
- end
- args = (A, B)
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- @testset "issue 1294" begin
- for i in (
- Index([QN() => 2]),
- Index([QN(0) => 1, QN(1) => 1]),
- Index([QN("SzParity", 1, 2) => 1, QN("SzParity", 0, 2) => 1]),
- )
- A = random_itensor(i', dag(i))
- B = random_itensor(i', dag(i))
-
- f(A, B) = dot(A, B)
- grad = gradient(f, A, B)
- @test grad[1] ≈ B
- @test grad[2] ≈ dag(A)
- end
- end
+ i = Index(2, "i")
+ j = Index(2, "j")
+ A = random_itensor(i', dag(i))
+ V = random_itensor(i)
+ Ac = random_itensor(ComplexF64, i', dag(i))
+ B = random_itensor(i', dag(i))
+ C = ITensor(3.4)
+ D = random_itensor(i', j)
+
+ @testset "getindex, priming, tagging, ITensor constructors, dag, etc." begin
+ test_rrule(getindex, ITensor(3.4); check_inferred = false)
+ test_rrule(getindex, A, 1, 2; check_inferred = false)
+ test_rrule(contract, A', A; check_inferred = false)
+ test_rrule(*, 3.2, A; check_inferred = false)
+ test_rrule(*, A, 4.3; check_inferred = false)
+ test_rrule(+, A, B; check_inferred = false)
+ test_rrule(prime, A; check_inferred = false)
+ test_rrule(prime, A, 2; check_inferred = false)
+ test_rrule(prime, A; fkwargs = (; tags = "i"), check_inferred = false)
+ test_rrule(prime, A; fkwargs = (; tags = "x"), check_inferred = false)
+ test_rrule(setprime, D, 2; check_inferred = false)
+ test_rrule(noprime, D; check_inferred = false)
+ test_rrule(replaceprime, A, 1 => 2; check_inferred = false)
+ test_rrule(replaceprime, A, 1, 2; check_inferred = false)
+ test_rrule(swapprime, A, 0 => 1; check_inferred = false)
+ test_rrule(swapprime, A, 0, 1; check_inferred = false)
+ test_rrule(addtags, A, "x"; check_inferred = false)
+ test_rrule(addtags, A, "x"; fkwargs = (; plev = 1), check_inferred = false)
+ test_rrule(removetags, A, "i"; check_inferred = false)
+ test_rrule(replacetags, A, "i" => "j"; check_inferred = false)
+ test_rrule(replacetags, A, "i", "j"; check_inferred = false)
+ test_rrule(settags, A, "x"; check_inferred = false)
+ test_rrule(settags, A, "x"; fkwargs = (; plev = 1), check_inferred = false)
+ test_rrule(
+ swaptags,
+ random_itensor(Index(2, "i"), Index(2, "j")),
+ "i" => "j";
+ check_inferred = false,
+ )
+ test_rrule(
+ swaptags, random_itensor(Index(2, "i"), Index(2, "j")), "i", "j"; check_inferred = false
+ )
+ test_rrule(replaceind, A, i' => sim(i); check_inferred = false)
+ test_rrule(replaceind, A, i', sim(i); check_inferred = false)
+ test_rrule(replaceinds, A, (i, i') => (sim(i), sim(i)); check_inferred = false)
+ test_rrule(replaceinds, A, (i, i'), (sim(i), sim(i)); check_inferred = false)
+ test_rrule(swapind, A, i', i; check_inferred = false)
+ test_rrule(swapinds, A, (i',), (i,); check_inferred = false)
+ test_rrule(itensor, randn(2, 2), i', i; check_inferred = false)
+ test_rrule(itensor, randn(2, 2), [i', i]; check_inferred = false)
+ test_rrule(itensor, randn(4), i', i; check_inferred = false)
+ test_rrule(ITensor, randn(2, 2), i', i; check_inferred = false)
+ test_rrule(ITensor, randn(2, 2), [i', i]; check_inferred = false)
+ test_rrule(ITensor, randn(4), i', i; check_inferred = false)
+ test_rrule(ITensor, 2.3; check_inferred = false)
+ test_rrule(dag, A; check_inferred = false)
+ test_rrule(permute, A, reverse(inds(A)); check_inferred = false)
+ end
+
+ @testset "apply, contract" begin
+ test_rrule(ZygoteRuleConfig(), apply, A, V; rrule_f = rrule_via_ad, check_inferred = false)
+ f = function (A, B)
+ AT = ITensor(A, i, j)
+ BT = ITensor(B, j, i)
+ return (BT * AT)[1]
+ end
+ args = (rand(2, 2), rand(2, 2))
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ args = (rand(4), rand(4))
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ args = (rand(4), rand(2, 2))
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "contraction sequence" begin
+ a, b, k, l, m, n, u, v = Index.([2, 3, 2, 3, 2, 3, 2, 3])
+ args = (
+ random_itensor(a, b, k),
+ random_itensor(a, l, m),
+ random_itensor(b, u, n),
+ random_itensor(u, v),
+ random_itensor(k, v),
+ random_itensor(l, m, n),
+ )
+ f = (args...) -> contract([args...])[] # Left associative
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ seq = ITensors.optimal_contraction_sequence([args...])
+ f = (args...) -> contract([args...]; sequence = seq)[] # sequence
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "construction and contraction" begin
+ f = function (x)
+ b = itensor([0, 0, 1, 1], i, j)
+ k = itensor([0, 1, 0, 0], i, j)
+ T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i', j', i, j)
+ return x * real((b' * T * k)[])
+ end
+ args = (0.3,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ #f = function (x)
+ # b = itensor([0, 0, 1, 1], i, j)
+ # k = itensor([0, 1, 0, 0], i, j)
+ # T = itensor([0 x x^2 1; 0 0 sin(x) 0; 0 cos(x) 0 exp(x); x 0 0 0], i, j, i', j')
+ # return x * real((b' * T * k)[])
+ #end
+ #args = (0.3,)
+ #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ end
+
+ @testset "scalar operations" begin
+ f = x -> sin(scalar(x)^3)
+ args = (C,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> sin(x[]^3)
+ args = (C,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "adjoint" begin
+ f = adjoint
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "contraction, priming, tagging + getindex" begin
+ f = (x, y) -> (x * y)[1, 1]
+ args = (A', A)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> prime(x, 2)[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> x'[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> addtags(x, "x")[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (x' * x)[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (prime(x) * x)[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> ((x'' * x') * x)[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (x'' * (x' * x))[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = (x, y, z) -> (x * y * z)[1, 1]
+ args = (A'', A', A)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x'' * x' * x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x''' * x'' * x' * x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x''' * x'' * x' * x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = (x, y) -> (x + y)[1, 1]
+ args = (A, B)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x + x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (2x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x + 2x)[1, 1]
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (x + 2 * mapprime(x' * x, 2 => 1))[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = (x, y) -> (x * y)[]
+ args = (A, δ(dag(inds(A))))
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (x * x)[]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> (x * δ(dag(inds(x))))[]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "delta contractions" begin
+ f = function (x)
+ y = x' * x
+ tr = δ(dag(inds(y)))
+ return (y * tr)[]
+ end
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = function (x)
+ y = x'' * x' * x
+ tr = δ(dag(inds(y)))
+ return (y * tr)[]
+ end
+ args = (A,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x^2 * δ((i', i)))[1, 1]
+ args = (6.2,)
+
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ f = x -> (x^2 * δ(i', i))[1, 1]
+ args = (5.2,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "ITensor constructors" begin
+ f = x -> itensor([x^2 x; x^3 x^4], i', i)
+ args = (2.54,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> ITensor([x^2 x; x^3 x^4], i', i)
+ args = (2.1,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> ITensor(x)
+ args = (2.12,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "ITensor constructor and contraction" begin
+ f = function (x)
+ T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j))
+ return real((dag(T) * T)[])
+ end
+ args = (2.8,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ args = (2.8 + 3.1im,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = function (x)
+ v = itensor([exp(-3.2x), cos(2x^2)], j)
+ T = itensor([x^2 sin(x); x^2 exp(-2x)], j', dag(j))
+ return real((dag(v') * T * v)[])
+ end
+ args = (2.8,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ #args = (2.8 + 3.1im,)
+ #test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ f = function (x)
+ return real((x^3 * ITensor([sin(x) exp(-2x); 3x^3 x + x^2], j', dag(j)))[1, 1])
+ end
+ args = (3.4 + 2.3im,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "priming" begin
+ f = x -> prime(permute(x, reverse(inds(x))))[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+
+ f = x -> prime(x; plev = 1)[1, 1]
+ args = (A,)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "ITensor inner" begin
+ W = itensor([1 1] / √2, i)
+ f = x -> inner(W', exp(x), W)
+ args = (A,)
+ test_rrule(
+ ZygoteRuleConfig(),
+ f,
+ args...;
+ rrule_f = rrule_via_ad,
+ check_inferred = false,
+ rtol = 1.0e-3,
+ atol = 1.0e-3,
+ )
+
+ f = x -> inner(V', exp(x), V)
+ args = (A,)
+ test_rrule(
+ ZygoteRuleConfig(),
+ f,
+ args...;
+ rrule_f = rrule_via_ad,
+ check_inferred = false,
+ rtol = 1.0e-4,
+ atol = 1.0e-4,
+ )
+ end
+
+ @testset "issue 933" begin
+ # https://github.com/ITensor/ITensors.jl/issues/933
+ f2 = function (x, a)
+ y = a + im * x
+ return real(dag(y) * y)[]
+ end
+ a = random_itensor()
+ f_itensor = x -> f2(x, a)
+ f_number = x -> f2(x, a[])
+ x = random_itensor()
+ @test f_number(x[]) ≈ f_itensor(x)
+ @test f_number'(x[]) ≈ f_itensor'(x)[]
+ @test isreal(f_itensor'(x))
+ end
+
+ @testset "issue 969" begin
+ i = Index(2)
+ j = Index(3)
+ A = random_itensor(i)
+ B = random_itensor(j)
+ f = function (x, y)
+ d = δ(ind(x, 1), ind(y, 1))
+ return (x * d * y)[]
+ end
+ args = (A, B)
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ @testset "issue 1294" begin
+ for i in (
+ Index([QN() => 2]),
+ Index([QN(0) => 1, QN(1) => 1]),
+ Index([QN("SzParity", 1, 2) => 1, QN("SzParity", 0, 2) => 1]),
+ )
+ A = random_itensor(i', dag(i))
+ B = random_itensor(i', dag(i))
+
+ f(A, B) = dot(A, B)
+ grad = gradient(f, A, B)
+ @test grad[1] ≈ B
+ @test grad[2] ≈ dag(A)
+ end
+ end
end
@testset "ChainRules rrules: op" begin
- s = siteinds("Qubit", 4)
-
- # RX
- args = (0.2,)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> op("Rx", s, 1; θ=x)[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # RY
- args = (0.2,)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> op("Ry", s, 1; θ=x)[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # RZ
- args = (0.2,)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> op("Rz", s, 1; ϕ=x)[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # Rn
- args = (0.2, 0.3, 0.4)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> op("Rn", s, 1; θ=x[1], ϕ=x[2], λ=x[3])[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- basis = vec(collect(Iterators.product(fill([1, 2], 2)...)))
- # CRx
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("CRx", s, (1, 2); θ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- # CRy
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("CRy", s, (1, 2); θ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- # CRz
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("CRz", s, (1, 2); ϕ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # Rn
- args = (0.2, 0.3, 0.4)
- for σ in basis, σ′ in basis
- f = x -> op("CRn", s, (1, 2); θ=x[1], ϕ=x[2], λ=x[3])[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- # Rxx
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("Rxx", s, (1, 2); ϕ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # Ryy
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("Ryy", s, (1, 2); ϕ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # Rzz
- args = (0.2,)
- for σ in basis, σ′ in basis
- f = x -> op("Rzz", s, (1, 2); ϕ=x)[σ..., σ′...]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- # algebra with non-parametric gates
- args = (0.2,)
- # addition
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> x * op("H + Y", s[1])[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- #subtraction
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> x * op("H - Y", s[1])[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # product
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> x * op("H * Y", s[1])[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
- # composite
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> x * op("H + X * Y", s[1])[σ, σ′]
- test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- end
-
- ## algebra with parametric gates
- #args = (0.2,)
- ## addition
- #for σ in [1, 2], σ′ in [1, 2]
- # f = x -> x * op("H + Rx", s[1]; θ = x)[σ, σ′]
- # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #end
- ##subtraction
- #for σ in [1, 2], σ′ in [1, 2]
- # f = x -> x * op("H - Rx", s[1]; θ = x)[σ, σ′]
- # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #end
- ### product
- #for σ in [1, 2], σ′ in [1, 2]
- # f = x -> x * op("Rx * Y", s[1]; θ = x)[σ, σ′]
- # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #end
- ## composite
- #for σ in [1, 2], σ′ in [1, 2]
- # f = x -> x * op("Rx * Y - Ry", s[1]; θ = x)[σ, σ′]
- # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #end
- #
- ## two-qubit composite algebra with parametric gate
- #args = (0.2,)
- #for σ in basis, σ′ in basis
- # f = x -> op("Rxx + CX * CZ - Ryy", s, (1, 2); ϕ = x)[σ..., σ′...]
- # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
- #end
-
- # functions
- f = x -> exp(ITensor(Op("Ry", 1; θ=x), q))[1, 1]
-
- # RX
- args = (0.2,)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> exp(ITensor(Op("Rx", 1; θ=x), s))[σ, σ′]
- test_rrule(
- ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false, atol=1e-6
- )
- end
-
- # RY
- args = (0.2,)
- for σ in [1, 2], σ′ in [1, 2]
- f = x -> exp(ITensor(Op("Ry", 1; θ=x), s))[σ, σ′]
- test_rrule(
- ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false, atol=1e-6
- )
- end
+ s = siteinds("Qubit", 4)
+
+ # RX
+ args = (0.2,)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> op("Rx", s, 1; θ = x)[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # RY
+ args = (0.2,)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> op("Ry", s, 1; θ = x)[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # RZ
+ args = (0.2,)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> op("Rz", s, 1; ϕ = x)[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # Rn
+ args = (0.2, 0.3, 0.4)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> op("Rn", s, 1; θ = x[1], ϕ = x[2], λ = x[3])[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ basis = vec(collect(Iterators.product(fill([1, 2], 2)...)))
+ # CRx
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("CRx", s, (1, 2); θ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ # CRy
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("CRy", s, (1, 2); θ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ # CRz
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("CRz", s, (1, 2); ϕ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # Rn
+ args = (0.2, 0.3, 0.4)
+ for σ in basis, σ′ in basis
+ f = x -> op("CRn", s, (1, 2); θ = x[1], ϕ = x[2], λ = x[3])[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ # Rxx
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("Rxx", s, (1, 2); ϕ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # Ryy
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("Ryy", s, (1, 2); ϕ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # Rzz
+ args = (0.2,)
+ for σ in basis, σ′ in basis
+ f = x -> op("Rzz", s, (1, 2); ϕ = x)[σ..., σ′...]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ # algebra with non-parametric gates
+ args = (0.2,)
+ # addition
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> x * op("H + Y", s[1])[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ #subtraction
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> x * op("H - Y", s[1])[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # product
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> x * op("H * Y", s[1])[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+ # composite
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> x * op("H + X * Y", s[1])[σ, σ′]
+ test_rrule(ZygoteRuleConfig(), f, args...; rrule_f = rrule_via_ad, check_inferred = false)
+ end
+
+ ## algebra with parametric gates
+ #args = (0.2,)
+ ## addition
+ #for σ in [1, 2], σ′ in [1, 2]
+ # f = x -> x * op("H + Rx", s[1]; θ = x)[σ, σ′]
+ # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ #end
+ ##subtraction
+ #for σ in [1, 2], σ′ in [1, 2]
+ # f = x -> x * op("H - Rx", s[1]; θ = x)[σ, σ′]
+ # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ #end
+ ### product
+ #for σ in [1, 2], σ′ in [1, 2]
+ # f = x -> x * op("Rx * Y", s[1]; θ = x)[σ, σ′]
+ # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ #end
+ ## composite
+ #for σ in [1, 2], σ′ in [1, 2]
+ # f = x -> x * op("Rx * Y - Ry", s[1]; θ = x)[σ, σ′]
+ # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ #end
+ #
+ ## two-qubit composite algebra with parametric gate
+ #args = (0.2,)
+ #for σ in basis, σ′ in basis
+ # f = x -> op("Rxx + CX * CZ - Ryy", s, (1, 2); ϕ = x)[σ..., σ′...]
+ # test_rrule(ZygoteRuleConfig(), f, args...; rrule_f=rrule_via_ad, check_inferred=false)
+ #end
+
+ # functions
+ f = x -> exp(ITensor(Op("Ry", 1; θ = x), q))[1, 1]
+
+ # RX
+ args = (0.2,)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> exp(ITensor(Op("Rx", 1; θ = x), s))[σ, σ′]
+ test_rrule(
+ ZygoteRuleConfig(),
+ f,
+ args...;
+ rrule_f = rrule_via_ad,
+ check_inferred = false,
+ atol = 1.0e-6,
+ )
+ end
+
+ # RY
+ args = (0.2,)
+ for σ in [1, 2], σ′ in [1, 2]
+ f = x -> exp(ITensor(Op("Ry", 1; θ = x), s))[σ, σ′]
+ test_rrule(
+ ZygoteRuleConfig(),
+ f,
+ args...;
+ rrule_f = rrule_via_ad,
+ check_inferred = false,
+ atol = 1.0e-6,
+ )
+ end
end
diff --git a/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl b/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl
index 0e25cee1cf..7f6a103e69 100644
--- a/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl
+++ b/test/ext/ITensorsChainRulesCoreExt/utils/chainrulestestutils.jl
@@ -9,58 +9,58 @@ using Random
#
function FiniteDifferences.to_vec(A::ITensor)
- # TODO: generalize to sparse tensors
- # TODO: define `itensor([1.0])` as well
- # as `itensor([1.0], ())` to help with generic code.
- function vec_to_ITensor(x)
- return isempty(inds(A)) ? ITensor(x[]) : itensor(x, inds(A))
- end
- return vec(array(A)), vec_to_ITensor
+ # TODO: generalize to sparse tensors
+ # TODO: define `itensor([1.0])` as well
+ # as `itensor([1.0], ())` to help with generic code.
+ function vec_to_ITensor(x)
+ return isempty(inds(A)) ? ITensor(x[]) : itensor(x, inds(A))
+ end
+ return vec(array(A)), vec_to_ITensor
end
function FiniteDifferences.to_vec(x::Index)
- return (Bool[], _ -> x)
+ return (Bool[], _ -> x)
end
function FiniteDifferences.to_vec(x::Tuple{Vararg{Index}})
- return (Bool[], _ -> x)
+ return (Bool[], _ -> x)
end
function FiniteDifferences.to_vec(x::Vector{<:Index})
- return (Bool[], _ -> x)
+ return (Bool[], _ -> x)
end
-function FiniteDifferences.to_vec(x::Pair{<:Tuple{Vararg{Index}},<:Tuple{Vararg{Index}}})
- return (Bool[], _ -> x)
+function FiniteDifferences.to_vec(x::Pair{<:Tuple{Vararg{Index}}, <:Tuple{Vararg{Index}}})
+ return (Bool[], _ -> x)
end
function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, A::ITensor)
- # TODO: generalize to sparse tensors
- return isempty(inds(A)) ? ITensor(randn(eltype(A))) : random_itensor(eltype(A), inds(A))
+ # TODO: generalize to sparse tensors
+ return isempty(inds(A)) ? ITensor(randn(eltype(A))) : random_itensor(eltype(A), inds(A))
end
function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, x::Index)
- return NoTangent()
+ return NoTangent()
end
function ChainRulesTestUtils.rand_tangent(rng::AbstractRNG, x::Tuple{Vararg{Index}})
- return NoTangent()
+ return NoTangent()
end
function ChainRulesTestUtils.rand_tangent(
- rng::AbstractRNG, x::Pair{<:Tuple{Vararg{Index}},<:Tuple{Vararg{Index}}}
-)
- return NoTangent()
+ rng::AbstractRNG, x::Pair{<:Tuple{Vararg{Index}}, <:Tuple{Vararg{Index}}}
+ )
+ return NoTangent()
end
function ChainRulesTestUtils.test_approx(::AbstractZero, x::Vector{<:Index}, msg; kwargs...)
- return ChainRulesTestUtils.@test_msg msg true
+ return ChainRulesTestUtils.@test_msg msg true
end
# The fallback version would convert to an Array with `collect`,
# which would be incorrect if the indices had different orderings
function ChainRulesTestUtils.test_approx(
- actual::ITensor, expected::ITensor, msg=""; kwargs...
-)
- ChainRulesTestUtils.@test_msg msg isapprox(actual, expected; kwargs...)
+ actual::ITensor, expected::ITensor, msg = ""; kwargs...
+ )
+ return ChainRulesTestUtils.@test_msg msg isapprox(actual, expected; kwargs...)
end
From 4cde9ab8ee4a3fe361b44f50258aae07ae7c9536 Mon Sep 17 00:00:00 2001
From: Matt Fishman
Date: Thu, 9 Oct 2025 13:36:37 -0400
Subject: [PATCH 2/3] Apply suggestion from @mtfishman
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a4f7c6b2e0..a0f9e64b52 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,8 +9,8 @@ repos:
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
- exclude: '.*references/.*\.txt$' # do not check reference TN images
- exclude_types: [markdown] # incompatible with Literate.jl
+exclude_types: [markdown] # incompatible with Literate.jl
+exclude: '.*references/.*\.txt$' # do not check reference TN images
- repo: https://github.com/fredrikekre/runic-pre-commit
rev: v2.0.1
From 651e99c741e6f0e82ea57f147f1a1663ca2dc91d Mon Sep 17 00:00:00 2001
From: Matt Fishman
Date: Thu, 9 Oct 2025 13:38:12 -0400
Subject: [PATCH 3/3] Apply suggestion from @mtfishman
---
.pre-commit-config.yaml | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index a0f9e64b52..f805eb823c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -9,8 +9,8 @@ repos:
- id: check-toml
- id: check-yaml
- id: end-of-file-fixer
-exclude_types: [markdown] # incompatible with Literate.jl
-exclude: '.*references/.*\.txt$' # do not check reference TN images
+ exclude_types: [markdown] # incompatible with Literate.jl
+ exclude: '.*references/.*\.txt$' # do not check reference TN images
- repo: https://github.com/fredrikekre/runic-pre-commit
rev: v2.0.1