Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

## [Unreleased](https://github.com/qutip/QuantumToolbox.jl/tree/main)

- Fix CUDA `sparse_to_dense`. ([#386])

## [v0.25.2]
Release date: 2025-02-02

Expand Down Expand Up @@ -104,3 +106,4 @@ Release date: 2024-11-13
[#378]: https://github.com/qutip/QuantumToolbox.jl/issues/378
[#380]: https://github.com/qutip/QuantumToolbox.jl/issues/380
[#383]: https://github.com/qutip/QuantumToolbox.jl/issues/383
[#386]: https://github.com/qutip/QuantumToolbox.jl/issues/386
15 changes: 8 additions & 7 deletions ext/QuantumToolboxCUDAExt.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,12 @@

using QuantumToolbox
using QuantumToolbox: makeVal, getVal
import CUDA: cu, CuArray
import CUDA.CUSPARSE: CuSparseVector, CuSparseMatrixCSC, CuSparseMatrixCSR
import CUDA: cu, CuArray, allowscalar
import CUDA.CUSPARSE: CuSparseVector, CuSparseMatrixCSC, CuSparseMatrixCSR, AbstractCuSparseArray
import SparseArrays: SparseVector, SparseMatrixCSC

allowscalar(false)

@doc raw"""
CuArray(A::QuantumObject)

Expand Down Expand Up @@ -99,10 +101,9 @@
_change_eltype(::Type{Complex{T}}, ::Val{64}) where {T<:Union{Int,AbstractFloat}} = ComplexF64
_change_eltype(::Type{Complex{T}}, ::Val{32}) where {T<:Union{Int,AbstractFloat}} = ComplexF32

sparse_to_dense(::Type{T}, A::CuArray{T}) where {T<:Number} = A
sparse_to_dense(::Type{T1}, A::CuArray{T2}) where {T1<:Number,T2<:Number} = CuArray{T1}(A)
sparse_to_dense(::Type{T}, A::CuSparseVector) where {T<:Number} = CuArray{T}(A)
sparse_to_dense(::Type{T}, A::CuSparseMatrixCSC) where {T<:Number} = CuArray{T}(A)
sparse_to_dense(::Type{T}, A::CuSparseMatrixCSR) where {T<:Number} = CuArray{T}(A)
QuantumToolbox.sparse_to_dense(A::MT) where {MT<:AbstractCuSparseArray} = CuArray(A)

QuantumToolbox.sparse_to_dense(::Type{T1}, A::CuArray{T2}) where {T1<:Number,T2<:Number} = CuArray{T1}(A)
QuantumToolbox.sparse_to_dense(::Type{T}, A::AbstractCuSparseArray) where {T<:Number} = CuArray{T}(A)

Check warning on line 107 in ext/QuantumToolboxCUDAExt.jl

View check run for this annotation

Codecov / codecov/patch

ext/QuantumToolboxCUDAExt.jl#L107

Added line #L107 was not covered by tests

end
3 changes: 0 additions & 3 deletions src/qobj/functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -119,9 +119,6 @@ Converts a sparse QuantumObject to a dense QuantumObject.
"""
sparse_to_dense(A::QuantumObject) = QuantumObject(sparse_to_dense(A.data), A.type, A.dimensions)
sparse_to_dense(A::MT) where {MT<:AbstractSparseArray} = Array(A)
for op in (:Transpose, :Adjoint)
@eval sparse_to_dense(A::$op{T,<:AbstractSparseMatrix}) where {T<:BlasFloat} = Array(A)
end
sparse_to_dense(A::MT) where {MT<:AbstractArray} = A

sparse_to_dense(::Type{T}, A::AbstractSparseArray) where {T<:Number} = Array{T}(A)
Expand Down
18 changes: 18 additions & 0 deletions test/ext-test/gpu/cuda_ext.jl
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
@testset "CUDA Extension" verbose = true begin
# Test that scalar indexing is disallowed
@test_throws ErrorException CUDA.rand(1)[1]

ψdi = Qobj(Int64[1, 0])
ψdf = Qobj(Float64[1, 0])
ψdc = Qobj(ComplexF64[1, 0])
Expand Down Expand Up @@ -63,6 +66,21 @@
@test typeof(CuSparseMatrixCSR(Xsc).data) == CuSparseMatrixCSR{ComplexF64,Int32}
@test typeof(CuSparseMatrixCSR{ComplexF32}(Xsc).data) == CuSparseMatrixCSR{ComplexF32,Int32}

# Sparse To Dense
# @test sparse_to_dense(cu(ψsi; word_size = 64)).data isa CuVector{Int64} # TODO: Fix this in CUDA.jl
@test sparse_to_dense(cu(ψsf; word_size = 64)).data isa CuVector{Float64}
@test sparse_to_dense(cu(ψsc; word_size = 64)).data isa CuVector{ComplexF64}
# @test sparse_to_dense(cu(Xsi; word_size = 64)).data isa CuMatrix{Int64} # TODO: Fix this in CUDA.jl
@test sparse_to_dense(cu(Xsf; word_size = 64)).data isa CuMatrix{Float64}
@test sparse_to_dense(cu(Xsc; word_size = 64)).data isa CuMatrix{ComplexF64}

# @test sparse_to_dense(Int32, cu(ψsf; word_size = 64)).data isa CuVector{Int32} # TODO: Fix this in CUDA.jl
# @test sparse_to_dense(Float32, cu(ψsf; word_size = 64)).data isa CuVector{Float32} # TODO: Fix this in CUDA.jl
# @test sparse_to_dense(ComplexF32, cu(ψsf; word_size = 64)).data isa CuVector{ComplexF32} # TODO: Fix this in CUDA.jl
# @test sparse_to_dense(Int64, cu(Xsf; word_size = 32)).data isa CuMatrix{Int64} # TODO: Fix this in CUDA.jl
# @test sparse_to_dense(Float64, cu(Xsf; word_size = 32)).data isa CuMatrix{Float64} # TODO: Fix this in CUDA.jl
# @test sparse_to_dense(ComplexF64, cu(Xsf; word_size = 32)).data isa CuMatrix{ComplexF64} # TODO: Fix this in CUDA.jl

# brief example in README and documentation
N = 20
ω64 = 1.0 # Float64
Expand Down
2 changes: 1 addition & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ if (GROUP == "CUDA_Ext")# || (GROUP == "All")
using QuantumToolbox
using CUDA
using CUDA.CUSPARSE
CUDA.allowscalar(false) # Avoid unexpected scalar indexing
# CUDA.allowscalar(false) # This is already set in the extension script

QuantumToolbox.about()
CUDA.versioninfo()
Expand Down
Loading