Skip to content

Commit 9b35445

Browse files
committed
Merge branch 'main' into GradedUnitRangeDual
2 parents 47a9ed1 + 239b64e commit 9b35445

File tree

86 files changed

+2474
-943
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

86 files changed

+2474
-943
lines changed

.github/workflows/main_test_itensors_base_macos_windows.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ jobs:
1414
strategy:
1515
matrix:
1616
version:
17-
- '1.6'
17+
- 'lts'
1818
- '1'
1919
os:
2020
# - windows-latest # windows tests are failing for an unknow reason, disable for now

.github/workflows/test_itensormps_ubuntu.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ jobs:
1616
strategy:
1717
matrix:
1818
version:
19-
- '1.6'
19+
- 'lts'
2020
- '1'
2121
os:
2222
- ubuntu-latest

.github/workflows/test_itensors_base_ubuntu.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ jobs:
1616
strategy:
1717
matrix:
1818
version:
19-
- '1.6'
19+
- 'lts'
2020
- '1'
2121
os:
2222
- ubuntu-latest

.github/workflows/test_ndtensors.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ jobs:
1616
strategy:
1717
matrix:
1818
version:
19-
- '1.6'
19+
- 'lts'
2020
- '1'
2121
os:
2222
- ubuntu-latest

NDTensors/Project.toml

Lines changed: 10 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "NDTensors"
22
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
33
authors = ["Matthew Fishman <[email protected]>"]
4-
version = "0.3.42"
4+
version = "0.3.48"
55

66
[deps]
77
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
@@ -36,6 +36,7 @@ AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
3636
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
3737
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
3838
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
39+
JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb"
3940
MappedArrays = "dbb5928d-eab1-5f90-85c2-b9b0edb7c900"
4041
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
4142
Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4"
@@ -47,6 +48,7 @@ NDTensorsAMDGPUExt = ["AMDGPU", "GPUArraysCore"]
4748
NDTensorsCUDAExt = ["CUDA", "GPUArraysCore"]
4849
NDTensorsGPUArraysCoreExt = "GPUArraysCore"
4950
NDTensorsHDF5Ext = "HDF5"
51+
NDTensorsJLArraysExt = ["GPUArraysCore", "JLArrays"]
5052
NDTensorsMappedArraysExt = ["MappedArrays"]
5153
NDTensorsMetalExt = ["GPUArraysCore", "Metal"]
5254
NDTensorsOctavianExt = "Octavian"
@@ -66,19 +68,20 @@ EllipsisNotation = "1.8"
6668
FillArrays = "1"
6769
Folds = "0.2.8"
6870
Functors = "0.2, 0.3, 0.4"
69-
GPUArraysCore = "0.1"
71+
GPUArraysCore = "0.1, 0.2"
7072
HDF5 = "0.14, 0.15, 0.16, 0.17"
7173
HalfIntegers = "1"
7274
InlineStrings = "1"
73-
LinearAlgebra = "1.6"
75+
JLArrays = "0.1"
76+
LinearAlgebra = "<0.0.1, 1.6"
7477
MacroTools = "0.5"
7578
MappedArrays = "0.4"
7679
Metal = "1"
7780
Octavian = "0.3"
7881
PackageExtensionCompat = "1"
79-
Random = "1.6"
82+
Random = "<0.0.1, 1.6"
8083
SimpleTraits = "0.9.4"
81-
SparseArrays = "1.6"
84+
SparseArrays = "<0.0.1, 1.6"
8285
SplitApplyCombine = "1.2.2"
8386
StaticArrays = "0.12, 1.0"
8487
Strided = "2"
@@ -88,13 +91,14 @@ TimerOutputs = "0.5.5"
8891
TupleTools = "1.2.0"
8992
VectorInterface = "0.4.2"
9093
cuTENSOR = "2"
91-
julia = "1.6"
94+
julia = "1.10"
9295

9396
[extras]
9497
AMDGPU = "21141c5a-9bdb-4563-92ae-f87d6854732e"
9598
CUDA = "052768ef-5323-5732-b1bb-66c8b64840ba"
9699
GPUArraysCore = "46192b85-c4d5-4398-a991-12ede77f4527"
97100
HDF5 = "f67ccb44-e63f-5c2f-98bd-6dc0ccc4ba2f"
101+
JLArrays = "27aeb0d3-9eb9-45fb-866b-73c2ecf80fcb"
98102
Metal = "dde4c033-4e86-420c-a63e-0dd931031962"
99103
Octavian = "6fd5a793-0b7e-452c-907f-f8bfe9c57db4"
100104
TBLIS = "48530278-0828-4a49-9772-0f3830dfa1e9"
Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
# TypeParameterAccessors definitions
2-
using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position
2+
using NDTensors.TypeParameterAccessors:
3+
TypeParameterAccessors, Position, default_type_parameters
34
using NDTensors.GPUArraysCoreExtensions: storagemode
45
using AMDGPU: AMDGPU, ROCArray
56

67
function TypeParameterAccessors.default_type_parameters(::Type{<:ROCArray})
7-
return (Float64, 1, AMDGPU.Mem.HIPBuffer)
8+
return (default_type_parameters(AbstractArray)..., AMDGPU.Mem.HIPBuffer)
89
end
9-
TypeParameterAccessors.position(::Type{<:ROCArray}, ::typeof(eltype)) = Position(1)
10-
TypeParameterAccessors.position(::Type{<:ROCArray}, ::typeof(ndims)) = Position(2)
10+
1111
TypeParameterAccessors.position(::Type{<:ROCArray}, ::typeof(storagemode)) = Position(3)
Lines changed: 3 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,13 @@
11
# TypeParameterAccessors definitions
22
using CUDA: CUDA, CuArray
3-
using NDTensors.TypeParameterAccessors: TypeParameterAccessors, Position
3+
using NDTensors.TypeParameterAccessors:
4+
TypeParameterAccessors, Position, default_type_parameters
45
using NDTensors.GPUArraysCoreExtensions: storagemode
56

6-
function TypeParameterAccessors.position(::Type{<:CuArray}, ::typeof(eltype))
7-
return Position(1)
8-
end
9-
function TypeParameterAccessors.position(::Type{<:CuArray}, ::typeof(ndims))
10-
return Position(2)
11-
end
127
function TypeParameterAccessors.position(::Type{<:CuArray}, ::typeof(storagemode))
138
return Position(3)
149
end
1510

1611
function TypeParameterAccessors.default_type_parameters(::Type{<:CuArray})
17-
return (Float64, 1, CUDA.Mem.DeviceBuffer)
12+
return (default_type_parameters(AbstractArray)..., CUDA.Mem.DeviceBuffer)
1813
end
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
module NDTensorsJLArraysExt
2+
include("copyto.jl")
3+
include("indexing.jl")
4+
include("linearalgebra.jl")
5+
include("mul.jl")
6+
include("permutedims.jl")
7+
end
Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
using JLArrays: JLArray
2+
using NDTensors.Expose: Exposed, expose, unexpose
3+
using LinearAlgebra: Adjoint
4+
5+
# Same definition as `CuArray`.
6+
function Base.copy(src::Exposed{<:JLArray,<:Base.ReshapedArray})
7+
return reshape(copy(parent(src)), size(unexpose(src)))
8+
end
9+
10+
function Base.copy(
11+
src::Exposed{
12+
<:JLArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}
13+
},
14+
)
15+
return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...])
16+
end
17+
18+
# Catches a bug in `copyto!` in CUDA backend.
19+
function Base.copyto!(dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:SubArray})
20+
copyto!(dest, expose(copy(src)))
21+
return unexpose(dest)
22+
end
23+
24+
# Catches a bug in `copyto!` in JLArray backend.
25+
function Base.copyto!(
26+
dest::Exposed{<:JLArray}, src::Exposed{<:JLArray,<:Base.ReshapedArray}
27+
)
28+
copyto!(dest, expose(parent(src)))
29+
return unexpose(dest)
30+
end
Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
using JLArrays: JLArray
2+
using GPUArraysCore: @allowscalar
3+
using NDTensors: NDTensors
4+
using NDTensors.Expose: Exposed, expose, unexpose
5+
6+
function Base.getindex(E::Exposed{<:JLArray})
7+
return @allowscalar unexpose(E)[]
8+
end
9+
10+
function Base.setindex!(E::Exposed{<:JLArray}, x::Number)
11+
@allowscalar unexpose(E)[] = x
12+
return unexpose(E)
13+
end
14+
15+
function Base.getindex(E::Exposed{<:JLArray,<:Adjoint}, i, j)
16+
return (expose(parent(E))[j, i])'
17+
end
18+
19+
Base.any(f, E::Exposed{<:JLArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E)))

0 commit comments

Comments
 (0)