Skip to content

Commit 59058d3

Browse files
authored
More Runic formatting (#1675)
1 parent 2a1aa19 commit 59058d3

File tree

212 files changed

+11182
-11176
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

212 files changed

+11182
-11176
lines changed

.github/ISSUE_TEMPLATE/generate_issue_templates/generate_issue_templates.jl

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -7,44 +7,44 @@ package_names = ["ITensors", "NDTensors"]
77
package_ordering = Dict(["ITensors" => 1, "NDTensors" => 2])
88

99
function bug_report_file(package_name::String)
10-
return "$(package_name)_bug_report.md"
10+
return "$(package_name)_bug_report.md"
1111
end
1212
function feature_request_file(package_name::String)
13-
return "$(package_name)_feature_request.md"
13+
return "$(package_name)_feature_request.md"
1414
end
1515

1616
for package_name in package_names
17-
@show package_name
17+
@show package_name
1818

19-
order = lpad(package_ordering[package_name], 2, "0")
19+
order = lpad(package_ordering[package_name], 2, "0")
2020

21-
template_bug_report = bug_report_file(template_package_name)
22-
new_bug_report = order * "_" * bug_report_file(package_name)
21+
template_bug_report = bug_report_file(template_package_name)
22+
new_bug_report = order * "_" * bug_report_file(package_name)
2323

24-
if isfile(new_bug_report)
25-
println("File $new_bug_report already exists, skipping")
26-
else
27-
println("Copying $template_bug_report to $new_bug_report")
28-
cp(template_bug_report, new_bug_report)
24+
if isfile(new_bug_report)
25+
println("File $new_bug_report already exists, skipping")
26+
else
27+
println("Copying $template_bug_report to $new_bug_report")
28+
cp(template_bug_report, new_bug_report)
2929

30-
println("Replace $template_package_name with $package_name in $new_bug_report")
31-
replace_in_file(new_bug_report, template_package_name => package_name)
30+
println("Replace $template_package_name with $package_name in $new_bug_report")
31+
replace_in_file(new_bug_report, template_package_name => package_name)
3232

33-
mv(new_bug_report, joinpath("..", new_bug_report); force=true)
34-
end
33+
mv(new_bug_report, joinpath("..", new_bug_report); force = true)
34+
end
3535

36-
template_feature_request = feature_request_file(template_package_name)
37-
new_feature_request = order * "_" * feature_request_file(package_name)
36+
template_feature_request = feature_request_file(template_package_name)
37+
new_feature_request = order * "_" * feature_request_file(package_name)
3838

39-
if isfile(new_feature_request)
40-
println("File $new_feature_request already exists, skipping")
41-
else
42-
println("Copying $template_feature_request to $new_feature_request")
43-
cp(template_feature_request, new_feature_request)
39+
if isfile(new_feature_request)
40+
println("File $new_feature_request already exists, skipping")
41+
else
42+
println("Copying $template_feature_request to $new_feature_request")
43+
cp(template_feature_request, new_feature_request)
4444

45-
println("Replace $template_package_name with $package_name in $new_feature_request")
46-
replace_in_file(new_feature_request, template_package_name => package_name)
45+
println("Replace $template_package_name with $package_name in $new_feature_request")
46+
replace_in_file(new_feature_request, template_package_name => package_name)
4747

48-
mv(new_feature_request, joinpath("..", new_feature_request); force=true)
49-
end
48+
mv(new_feature_request, joinpath("..", new_feature_request); force = true)
49+
end
5050
end

NDTensors/Project.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
name = "NDTensors"
22
uuid = "23ae76d9-e61a-49c4-8f12-3f1a16adf9cf"
33
authors = ["Matthew Fishman <[email protected]>"]
4-
version = "0.4.12"
4+
version = "0.4.13"
55

66
[deps]
77
Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"

NDTensors/ext/NDTensorsAMDGPUExt/adapt.jl

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,28 +2,28 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype
22
using NDTensors.AMDGPUExtensions: AMDGPUExtensions, ROCArrayAdaptor
33
using NDTensors.GPUArraysCoreExtensions: storagemode
44
using NDTensors.TypeParameterAccessors:
5-
default_type_parameters, set_type_parameters, type_parameters
5+
default_type_parameters, set_type_parameters, type_parameters
66
using Adapt: Adapt, adapt
77
using AMDGPU: AMDGPU, ROCArray, ROCVector
88
using Functors: fmap
99

1010
function AMDGPUExtensions.roc(
11-
xs; storagemode=default_type_parameters(ROCArray, storagemode)
12-
)
13-
return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs)
11+
xs; storagemode = default_type_parameters(ROCArray, storagemode)
12+
)
13+
return fmap(x -> adapt(ROCArrayAdaptor{storagemode}(), x), xs)
1414
end
1515

1616
function Adapt.adapt_storage(adaptor::ROCArrayAdaptor, xs::AbstractArray)
17-
new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor))
18-
roctype = set_type_parameters(ROCArray, (eltype, ndims, storagemode), new_parameters)
19-
return isbits(xs) ? xs : adapt(roctype, xs)
17+
new_parameters = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor))
18+
roctype = set_type_parameters(ROCArray, (eltype, ndims, storagemode), new_parameters)
19+
return isbits(xs) ? xs : adapt(roctype, xs)
2020
end
2121

2222
function NDTensors.adapt_storagetype(
23-
adaptor::ROCArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}}
24-
) where {ElT,StoreT}
25-
roctype = set_type_parameters(
26-
ROCVector, (eltype, storagemode), (ElT, storagemode(adaptor))
27-
)
28-
return emptytype(adapt_storagetype(roctype, StoreT))
23+
adaptor::ROCArrayAdaptor, ::Type{EmptyStorage{ElT, StoreT}}
24+
) where {ElT, StoreT}
25+
roctype = set_type_parameters(
26+
ROCVector, (eltype, storagemode), (ElT, storagemode(adaptor))
27+
)
28+
return emptytype(adapt_storagetype(roctype, StoreT))
2929
end

NDTensors/ext/NDTensorsAMDGPUExt/append.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,5 +4,5 @@ using NDTensors.Expose: Exposed, unexpose
44

55
## Warning this append function uses scalar indexing and is therefore extremely slow
66
function Base.append!(Ecollection::Exposed{<:ROCArray}, collections...)
7-
return @allowscalar append!(unexpose(Ecollection), collections...)
7+
return @allowscalar append!(unexpose(Ecollection), collections...)
88
end

NDTensors/ext/NDTensorsAMDGPUExt/copyto.jl

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -3,33 +3,33 @@ using LinearAlgebra: LinearAlgebra, Adjoint
33
using AMDGPU: ROCArray
44

55
# Same definition as `MtlArray`.
6-
function Base.copy(src::Exposed{<:ROCArray,<:Base.ReshapedArray})
7-
return reshape(copy(parent(src)), size(unexpose(src)))
6+
function Base.copy(src::Exposed{<:ROCArray, <:Base.ReshapedArray})
7+
return reshape(copy(parent(src)), size(unexpose(src)))
88
end
99

1010
function Base.copy(
11-
src::Exposed{
12-
<:ROCArray,<:SubArray{<:Any,<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}}
13-
},
14-
)
15-
return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...])
11+
src::Exposed{
12+
<:ROCArray, <:SubArray{<:Any, <:Any, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}},
13+
},
14+
)
15+
return copy(@view copy(expose(parent(src)))[parentindices(unexpose(src))...])
1616
end
1717

18-
function Base.copyto!(dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:SubArray})
19-
copyto!(dest, expose(copy(src)))
20-
return unexpose(dest)
18+
function Base.copyto!(dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:SubArray})
19+
copyto!(dest, expose(copy(src)))
20+
return unexpose(dest)
2121
end
2222

2323
function Base.copyto!(
24-
dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:Base.ReshapedArray}
25-
)
26-
copyto!(dest, expose(parent(src)))
27-
return unexpose(dest)
24+
dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:Base.ReshapedArray}
25+
)
26+
copyto!(dest, expose(parent(src)))
27+
return unexpose(dest)
2828
end
2929

3030
function Base.copyto!(
31-
dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray,<:LinearAlgebra.Transpose}
32-
)
33-
copyto!(expose(transpose(dest)), expose(parent(src)))
34-
return unexpose(dest)
31+
dest::Exposed{<:ROCArray}, src::Exposed{<:ROCArray, <:LinearAlgebra.Transpose}
32+
)
33+
copyto!(expose(transpose(dest)), expose(parent(src)))
34+
return unexpose(dest)
3535
end

NDTensors/ext/NDTensorsAMDGPUExt/indexing.jl

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4,20 +4,20 @@ using NDTensors.Expose: Exposed, expose, parent, unexpose
44
using NDTensors.GPUArraysCoreExtensions: cpu
55

66
function Base.getindex(E::Exposed{<:ROCArray})
7-
return @allowscalar unexpose(E)[]
7+
return @allowscalar unexpose(E)[]
88
end
99

1010
function Base.setindex!(E::Exposed{<:ROCArray}, x::Number)
11-
@allowscalar unexpose(E)[] = x
12-
return unexpose(E)
11+
@allowscalar unexpose(E)[] = x
12+
return unexpose(E)
1313
end
1414

15-
function Base.getindex(E::Exposed{<:ROCArray,<:Adjoint}, i, j)
16-
return (expose(parent(E))[j, i])'
15+
function Base.getindex(E::Exposed{<:ROCArray, <:Adjoint}, i, j)
16+
return (expose(parent(E))[j, i])'
1717
end
1818

19-
Base.any(f, E::Exposed{<:ROCArray,<:NDTensors.Tensor}) = any(f, data(unexpose(E)))
19+
Base.any(f, E::Exposed{<:ROCArray, <:NDTensors.Tensor}) = any(f, data(unexpose(E)))
2020

2121
function Base.print_array(io::IO, E::Exposed{<:ROCArray})
22-
return Base.print_array(io, expose(cpu(E)))
22+
return Base.print_array(io, expose(cpu(E)))
2323
end

NDTensors/ext/NDTensorsAMDGPUExt/linearalgebra.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,16 +7,16 @@ using Adapt: adapt
77
using AMDGPU: ROCMatrix
88

99
function LinearAlgebra.svd(A::Exposed{<:ROCMatrix}; kwargs...)
10-
U, S, V = svd(cpu(A))
11-
return roc.((U, S, V))
10+
U, S, V = svd(cpu(A))
11+
return roc.((U, S, V))
1212
end
1313

1414
## TODO currently AMDGPU doesn't have ql so make a ql function
1515
function Expose.ql(A::Exposed{<:ROCMatrix})
16-
Q, L = ql(expose(cpu(A)))
17-
return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L)
16+
Q, L = ql(expose(cpu(A)))
17+
return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L)
1818
end
1919
function Expose.ql_positive(A::Exposed{<:ROCMatrix})
20-
Q, L = ql_positive(expose(cpu(A)))
21-
return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L)
20+
Q, L = ql_positive(expose(cpu(A)))
21+
return adapt(unwrap_array_type(A), Matrix(Q)), adapt(unwrap_array_type(A), L)
2222
end

NDTensors/ext/NDTensorsAMDGPUExt/mul.jl

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -4,42 +4,42 @@ using AMDGPU: ROCArray
44

55
# This was calling generic matrix multiplication.
66
function LinearAlgebra.mul!(
7-
CM::Exposed{<:ROCArray,<:LinearAlgebra.Transpose},
8-
AM::Exposed{<:ROCArray},
9-
BM::Exposed{<:ROCArray},
10-
α,
11-
β,
12-
)
13-
mul!(transpose(CM), transpose(BM), transpose(AM), α, β)
14-
return unexpose(CM)
7+
CM::Exposed{<:ROCArray, <:LinearAlgebra.Transpose},
8+
AM::Exposed{<:ROCArray},
9+
BM::Exposed{<:ROCArray},
10+
α,
11+
β,
12+
)
13+
mul!(transpose(CM), transpose(BM), transpose(AM), α, β)
14+
return unexpose(CM)
1515
end
1616

1717
# This was calling generic matrix multiplication.
1818
function LinearAlgebra.mul!(
19-
CM::Exposed{<:ROCArray,<:LinearAlgebra.Adjoint},
20-
AM::Exposed{<:ROCArray},
21-
BM::Exposed{<:ROCArray},
22-
α,
23-
β,
24-
)
25-
mul!(CM', BM', AM', α, β)
26-
return unexpose(CM)
19+
CM::Exposed{<:ROCArray, <:LinearAlgebra.Adjoint},
20+
AM::Exposed{<:ROCArray},
21+
BM::Exposed{<:ROCArray},
22+
α,
23+
β,
24+
)
25+
mul!(CM', BM', AM', α, β)
26+
return unexpose(CM)
2727
end
2828

2929
# Fix issue in AMDGPU.jl where it cannot distinguish
3030
# Transpose{Reshape{Adjoint{ROCArray}}} as a ROCArray and calls generic matmul
3131
function LinearAlgebra.mul!(
32-
CM::Exposed{<:ROCArray},
33-
AM::Exposed{<:ROCArray},
34-
BM::Exposed{
35-
<:ROCArray,
36-
<:LinearAlgebra.Transpose{
37-
<:Any,<:Base.ReshapedArray{<:Any,<:Any,<:LinearAlgebra.Adjoint}
38-
},
39-
},
40-
α,
41-
β,
42-
)
43-
mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β)
44-
return unexpose(CM)
32+
CM::Exposed{<:ROCArray},
33+
AM::Exposed{<:ROCArray},
34+
BM::Exposed{
35+
<:ROCArray,
36+
<:LinearAlgebra.Transpose{
37+
<:Any, <:Base.ReshapedArray{<:Any, <:Any, <:LinearAlgebra.Adjoint},
38+
},
39+
},
40+
α,
41+
β,
42+
)
43+
mul!(CM, AM, expose(transpose(copy(expose(parent(BM))))), α, β)
44+
return unexpose(CM)
4545
end

NDTensors/ext/NDTensorsAMDGPUExt/permutedims.jl

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,22 +2,22 @@ using NDTensors.Expose: Exposed, expose, parent, unexpose
22
using AMDGPU: ROCArray
33

44
function Base.permutedims!(
5-
Edest::Exposed{<:ROCArray,<:Base.ReshapedArray}, Esrc::Exposed{<:ROCArray}, perm
6-
)
7-
Aperm = permutedims(Esrc, perm)
8-
copyto!(expose(parent(Edest)), expose(Aperm))
9-
return unexpose(Edest)
5+
Edest::Exposed{<:ROCArray, <:Base.ReshapedArray}, Esrc::Exposed{<:ROCArray}, perm
6+
)
7+
Aperm = permutedims(Esrc, perm)
8+
copyto!(expose(parent(Edest)), expose(Aperm))
9+
return unexpose(Edest)
1010
end
1111

1212
# There is an issue in AMDGPU where if Edest is a reshaped{<:Adjoint}
1313
# .= can fail. So instead force Esrc into the shape of parent(Edest)
1414
function Base.permutedims!(
15-
Edest::Exposed{<:ROCArray,<:Base.ReshapedArray{<:Any,<:Any,<:Adjoint}},
16-
Esrc::Exposed{<:ROCArray},
17-
perm,
18-
f,
19-
)
20-
Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest)))
21-
parent(Edest) .= f.(parent(Edest), Aperm)
22-
return unexpose(Edest)
15+
Edest::Exposed{<:ROCArray, <:Base.ReshapedArray{<:Any, <:Any, <:Adjoint}},
16+
Esrc::Exposed{<:ROCArray},
17+
perm,
18+
f,
19+
)
20+
Aperm = reshape(permutedims(Esrc, perm), size(parent(Edest)))
21+
parent(Edest) .= f.(parent(Edest), Aperm)
22+
return unexpose(Edest)
2323
end

NDTensors/ext/NDTensorsCUDAExt/adapt.jl

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,22 +5,22 @@ using NDTensors: NDTensors, EmptyStorage, adapt_storagetype, emptytype
55
using NDTensors.CUDAExtensions: CUDAExtensions, CuArrayAdaptor
66
using NDTensors.GPUArraysCoreExtensions: storagemode
77
using NDTensors.TypeParameterAccessors:
8-
default_type_parameters, set_type_parameters, type_parameters
8+
default_type_parameters, set_type_parameters, type_parameters
99

10-
function CUDAExtensions.cu(xs; storagemode=default_type_parameters(CuArray, storagemode))
11-
return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs)
10+
function CUDAExtensions.cu(xs; storagemode = default_type_parameters(CuArray, storagemode))
11+
return fmap(x -> adapt(CuArrayAdaptor{storagemode}(), x), xs)
1212
end
1313

1414
## Could do this generically
1515
function Adapt.adapt_storage(adaptor::CuArrayAdaptor, xs::AbstractArray)
16-
params = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor))
17-
cutype = set_type_parameters(CuArray, (eltype, ndims, storagemode), params)
18-
return isbits(xs) ? xs : adapt(cutype, xs)
16+
params = (type_parameters(xs, (eltype, ndims))..., storagemode(adaptor))
17+
cutype = set_type_parameters(CuArray, (eltype, ndims, storagemode), params)
18+
return isbits(xs) ? xs : adapt(cutype, xs)
1919
end
2020

2121
function NDTensors.adapt_storagetype(
22-
adaptor::CuArrayAdaptor, ::Type{EmptyStorage{ElT,StoreT}}
23-
) where {ElT,StoreT}
24-
cutype = set_type_parameters(CuVector, (eltype, storagemode), (ElT, storagemode(adaptor)))
25-
return emptytype(adapt_storagetype(cutype, StoreT))
22+
adaptor::CuArrayAdaptor, ::Type{EmptyStorage{ElT, StoreT}}
23+
) where {ElT, StoreT}
24+
cutype = set_type_parameters(CuVector, (eltype, storagemode), (ElT, storagemode(adaptor)))
25+
return emptytype(adapt_storagetype(cutype, StoreT))
2626
end

0 commit comments

Comments
 (0)