diff --git a/Project.toml b/Project.toml index 8ab8ffa..8ca9abd 100644 --- a/Project.toml +++ b/Project.toml @@ -1,6 +1,6 @@ name = "NNlibCUDA" uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d" -version = "0.2.5" +version = "0.2.6" [deps] Adapt = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" @@ -13,7 +13,7 @@ Statistics = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" [compat] Adapt = "3.3" CUDA = "3.11" -NNlib = "0.8.14" +NNlib = "0.8.15" julia = "1.6" [extras] diff --git a/src/utils.jl b/src/utils.jl index f6ce38f..a9eaa8d 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -1,5 +1,9 @@ NNlib._rng_from_array(::CuArray) = CUDA.default_rng() +NNlib._rng_compat_array(rng::CUDA.RNG, A::CuArray) = nothing +NNlib._rng_compat_array(rng::AbstractRNG, A::CuArray) = throw(ArgumentError( + "cannot use rng::$(typeof(rng)) with array::CuArray, only CUDA's own RNG type works")) + function divide_kernel!(xs, ys, max_idx) index = threadIdx().x + (blockIdx().x - 1) * blockDim().x