diff --git a/.buildkite/pipeline.yml b/.buildkite/pipeline.yml index ef4a60a93f..9d6d60d107 100644 --- a/.buildkite/pipeline.yml +++ b/.buildkite/pipeline.yml @@ -37,354 +37,354 @@ steps: julia: "nightly" soft_fail: true - # then, test supported CUDA toolkits (installed through the artifact system) - - group: "CUDA" - key: "cuda" - depends_on: "julia" - steps: - - label: "CUDA {{matrix.cuda}}" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-test#v1: - test_args: "--quickfail core base libraries" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only cuda\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip cuda\]/ - timeout_in_minutes: 45 - matrix: - setup: - cuda: - - "12.9" - - "12.8" - - "12.6" - - "12.5" - - "12.4" - - "12.3" - - "12.2" - - "12.1" - - "12.0" - - "11.8" - - "11.7" - - "11.6" - - "11.5" - - "11.4" - commands: | - echo -e "[CUDA_Runtime_jll]\nversion = \"{{matrix.cuda}}\"" >LocalPreferences.toml - echo -e "[CUDA_Driver_jll]\ncompat = \"false\"" >>LocalPreferences.toml + # # then, test supported CUDA toolkits (installed through the artifact system) + # - group: "CUDA" + # key: "cuda" + # depends_on: "julia" + # steps: + # - label: "CUDA {{matrix.cuda}}" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-test#v1: + # test_args: "--quickfail core base libraries" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only cuda\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip cuda\]/ + # timeout_in_minutes: 45 + # matrix: + # setup: + # cuda: + # - "12.9" + # - "12.8" + # - "12.6" + # - "12.5" + # - "12.4" + # - "12.3" + # - "12.2" + # - "12.1" + # - "12.0" + # - "11.8" + # - "11.7" + # - "11.6" + # - "11.5" + # - "11.4" + # commands: | + # echo -e "[CUDA_Runtime_jll]\nversion = \"{{matrix.cuda}}\"" >LocalPreferences.toml + # echo -e "[CUDA_Driver_jll]\ncompat = \"false\"" >>LocalPreferences.toml - - group: ":nesting_dolls: Subpackages" - depends_on: "cuda" - steps: - - label: "{{matrix.package}} on CUDA {{matrix.cuda}}" - matrix: - setup: - cuda: - - "11.4" - - "12.0" - package: - - "cuDNN" - - "cuTENSOR" - - "cuStateVec" - - "cuTensorNet" - plugins: - - JuliaCI/julia#v1: - version: "1.10" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only subpackages\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip subpackages\]/ - timeout_in_minutes: 30 - commands: | - julia -e ' - using Pkg + # - group: ":nesting_dolls: Subpackages" + # depends_on: "cuda" + # steps: + # - label: "{{matrix.package}} on CUDA {{matrix.cuda}}" + # matrix: + # setup: + # cuda: + # - "11.4" + # - "12.0" + # package: + # - "cuDNN" + # - "cuTENSOR" + # - "cuStateVec" + # - "cuTensorNet" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.10" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only subpackages\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip subpackages\]/ + # timeout_in_minutes: 30 + # commands: | + # julia -e ' + # using Pkg - println("--- :julia: Instantiating project") - withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0) do - Pkg.activate(joinpath("lib", lowercase("{{matrix.package}}"))) - try - Pkg.instantiate() - catch - # if we fail to instantiate, assume that we need newer dependencies - deps = [PackageSpec(path=".")] - if "{{matrix.package}}" == "cuTensorNet" - push!(deps, PackageSpec(path="lib/cutensor")) - end - Pkg.develop(deps) - end + # println("--- :julia: Instantiating project") + # withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0) do + # Pkg.activate(joinpath("lib", lowercase("{{matrix.package}}"))) + # try + # Pkg.instantiate() + # catch + # # if we fail to instantiate, assume that we need newer dependencies + # deps = [PackageSpec(path=".")] + # if "{{matrix.package}}" == "cuTensorNet" + # push!(deps, PackageSpec(path="lib/cutensor")) + # end + # Pkg.develop(deps) + # end - Pkg.add("CUDA_Runtime_jll") - write(joinpath("lib", lowercase("{{matrix.package}}"), "LocalPreferences.toml"), - "[CUDA_Runtime_jll]\nversion = \"{{matrix.cuda}}\"") - end + # Pkg.add("CUDA_Runtime_jll") + # write(joinpath("lib", lowercase("{{matrix.package}}"), "LocalPreferences.toml"), + # "[CUDA_Runtime_jll]\nversion = \"{{matrix.cuda}}\"") + # end - println("+++ :julia: Running tests") - Pkg.test(; coverage=true)' + # println("+++ :julia: Running tests") + # Pkg.test(; coverage=true)' - - group: ":telescope: Downstream" - depends_on: "cuda" - steps: - #- label: "NNlib.jl" - # plugins: - # - JuliaCI/julia#v1: - # version: "1.11" - # - JuliaCI/julia-coverage#v1: - # dirs: - # - src - # - lib - # - examples - # command: | - # julia --project -e ' - # using Pkg - # - # cuda = pwd() - # cudnn = joinpath(cuda, "lib", "cudnn") - # devdir = mktempdir() - # nnlib = joinpath(devdir, "NNlib") - # - # println("--- :julia: Installing TestEnv") - # Pkg.activate(; temp=true) - # Pkg.add("TestEnv") - # using TestEnv - # - # println("--- :julia: Installing NNlib") - # withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0, - # "JULIA_PKG_DEVDIR" => devdir) do - # Pkg.develop("NNlib") - # Pkg.activate(nnlib) - # - # try - # Pkg.develop([PackageSpec(path=cuda), PackageSpec(path=cudnn)]) - # TestEnv.activate() - # catch err - # @error "Could not install NNlib" exception=(err,catch_backtrace()) - # exit(3) - # finally - # Pkg.activate(nnlib) - # end - # end - # - # println("+++ :julia: Running tests") - # Pkg.test(; coverage=true)' - # env: - # NNLIB_TEST_CUDA: "true" - # NNLIB_TEST_CPU: "false" - # agents: - # queue: "juliagpu" - # cuda: "*" - # if: | - # build.message =~ /\[only tests\]/ || - # build.message =~ /\[only downstream\]/ || - # build.message !~ /\[only/ && !build.pull_request.draft && - # build.message !~ /\[skip tests\]/ && - # build.message !~ /\[skip downstream\]/ - # timeout_in_minutes: 30 - # soft_fail: - # - exit_status: 3 - - label: "Enzyme.jl" - plugins: - - JuliaCI/julia#v1: - version: "1.10" # XXX: Enzyme.jl is broken on 1.11 - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - command: | - julia -e ' - using Pkg + # - group: ":telescope: Downstream" + # depends_on: "cuda" + # steps: + # #- label: "NNlib.jl" + # # plugins: + # # - JuliaCI/julia#v1: + # # version: "1.11" + # # - JuliaCI/julia-coverage#v1: + # # dirs: + # # - src + # # - lib + # # - examples + # # command: | + # # julia --project -e ' + # # using Pkg + # # + # # cuda = pwd() + # # cudnn = joinpath(cuda, "lib", "cudnn") + # # devdir = mktempdir() + # # nnlib = joinpath(devdir, "NNlib") + # # + # # println("--- :julia: Installing TestEnv") + # # Pkg.activate(; temp=true) + # # Pkg.add("TestEnv") + # # using TestEnv + # # + # # println("--- :julia: Installing NNlib") + # # withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0, + # # "JULIA_PKG_DEVDIR" => devdir) do + # # Pkg.develop("NNlib") + # # Pkg.activate(nnlib) + # # + # # try + # # Pkg.develop([PackageSpec(path=cuda), PackageSpec(path=cudnn)]) + # # TestEnv.activate() + # # catch err + # # @error "Could not install NNlib" exception=(err,catch_backtrace()) + # # exit(3) + # # finally + # # Pkg.activate(nnlib) + # # end + # # end + # # + # # println("+++ :julia: Running tests") + # # Pkg.test(; coverage=true)' + # # env: + # # NNLIB_TEST_CUDA: "true" + # # NNLIB_TEST_CPU: "false" + # # agents: + # # queue: "juliagpu" + # # cuda: "*" + # # if: | + # # build.message =~ /\[only tests\]/ || + # # build.message =~ /\[only downstream\]/ || + # # build.message !~ /\[only/ && !build.pull_request.draft && + # # build.message !~ /\[skip tests\]/ && + # # build.message !~ /\[skip downstream\]/ + # # timeout_in_minutes: 30 + # # soft_fail: + # # - exit_status: 3 + # - label: "Enzyme.jl" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.10" # XXX: Enzyme.jl is broken on 1.11 + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # command: | + # julia -e ' + # using Pkg - println("--- :julia: Instantiating project") - withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0) do - # add Enzyme to the test deps - Pkg.activate("test") - Pkg.add(["Enzyme", "EnzymeCore"]) + # println("--- :julia: Instantiating project") + # withenv("JULIA_PKG_PRECOMPILE_AUTO" => 0) do + # # add Enzyme to the test deps + # Pkg.activate("test") + # Pkg.add(["Enzyme", "EnzymeCore"]) - # to check compatibility, also add Enzyme to the main environment - # (or Pkg.test, which merges both environments, could fail) - Pkg.activate(".") - # Try to co-develop Enzyme and KA, if that fails, try just to dev Enzyme - try - Pkg.develop([PackageSpec("Enzyme"), PackageSpec("KernelAbstractions")]) - catch err - try - Pkg.develop([PackageSpec("Enzyme")]) - catch err - @error "Could not install Enzyme" exception=(err,catch_backtrace()) - exit(3) - end - end - end + # # to check compatibility, also add Enzyme to the main environment + # # (or Pkg.test, which merges both environments, could fail) + # Pkg.activate(".") + # # Try to co-develop Enzyme and KA, if that fails, try just to dev Enzyme + # try + # Pkg.develop([PackageSpec("Enzyme"), PackageSpec("KernelAbstractions")]) + # catch err + # try + # Pkg.develop([PackageSpec("Enzyme")]) + # catch err + # @error "Could not install Enzyme" exception=(err,catch_backtrace()) + # exit(3) + # end + # end + # end - println("+++ :julia: Running tests") - Pkg.test(; coverage=true, test_args=`extensions/enzyme`)' - agents: - queue: "juliagpu" - cuda: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only downstream\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip downstream\]/ - timeout_in_minutes: 60 - soft_fail: true + # println("+++ :julia: Running tests") + # Pkg.test(; coverage=true, test_args=`extensions/enzyme`)' + # agents: + # queue: "juliagpu" + # cuda: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only downstream\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip downstream\]/ + # timeout_in_minutes: 60 + # soft_fail: true - - group: ":eyes: Special" - depends_on: "cuda" - steps: - - label: "GPU-less environment" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - - JuliaCI/julia-test#v1: - run_tests: false - command: | - julia --project -e ' - using CUDA - @assert !CUDA.functional() - @assert !isdefined(CUDA, :libcudart) - CUDA.set_runtime_version!(v"11.6")' - julia --project -e ' - using CUDA - @assert !CUDA.functional() - @assert isdefined(CUDA, :libcudart)' - agents: - queue: "juliagpu" - intel: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only special\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip special\]/ - timeout_in_minutes: 5 + # - group: ":eyes: Special" + # depends_on: "cuda" + # steps: + # - label: "GPU-less environment" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # - JuliaCI/julia-test#v1: + # run_tests: false + # command: | + # julia --project -e ' + # using CUDA + # @assert !CUDA.functional() + # @assert !isdefined(CUDA, :libcudart) + # CUDA.set_runtime_version!(v"11.6")' + # julia --project -e ' + # using CUDA + # @assert !CUDA.functional() + # @assert isdefined(CUDA, :libcudart)' + # agents: + # queue: "juliagpu" + # intel: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only special\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip special\]/ + # timeout_in_minutes: 5 - - label: "Compute sanitizer" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-test#v1: - test_args: "--sanitize core base" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - env: - JULIA_CUDA_USE_COMPAT: 'false' # NVIDIA bug #3418723: injection tools prevent probing libcuda - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only special\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip special\]/ - timeout_in_minutes: 60 + # - label: "Compute sanitizer" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-test#v1: + # test_args: "--sanitize core base" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # env: + # JULIA_CUDA_USE_COMPAT: 'false' # NVIDIA bug #3418723: injection tools prevent probing libcuda + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only special\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip special\]/ + # timeout_in_minutes: 60 - - label: "Legacy memory allocator" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-test#v1: - test_args: "--quickfail core base" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - env: - JULIA_CUDA_MEMORY_POOL: 'none' - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only special\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip special\]/ - timeout_in_minutes: 30 + # - label: "Legacy memory allocator" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-test#v1: + # test_args: "--quickfail core base" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # env: + # JULIA_CUDA_MEMORY_POOL: 'none' + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only special\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip special\]/ + # timeout_in_minutes: 30 - - label: "CuArray with {{matrix.memory}} memory" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-test#v1: - test_args: "--quickfail core base libraries" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only special\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip special\]/ - timeout_in_minutes: 45 - matrix: - setup: - memory: - - "unified" - - "host" - commands: | - echo -e "[CUDA]\ndefault_memory = \"{{matrix.memory}}\"" >LocalPreferences.toml + # - label: "CuArray with {{matrix.memory}} memory" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-test#v1: + # test_args: "--quickfail core base libraries" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only special\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip special\]/ + # timeout_in_minutes: 45 + # matrix: + # setup: + # memory: + # - "unified" + # - "host" + # commands: | + # echo -e "[CUDA]\ndefault_memory = \"{{matrix.memory}}\"" >LocalPreferences.toml - - label: "MultiGPU" - plugins: - - JuliaCI/julia#v1: - version: "1.11" - - JuliaCI/julia-test#v1: - test_args: "--gpu=0,1 core base libraries" - - JuliaCI/julia-coverage#v1: - dirs: - - src - - lib - - examples - agents: - queue: "juliagpu" - cuda: "*" - multigpu: "*" - if: | - build.message =~ /\[only tests\]/ || - build.message =~ /\[only special\]/ || - build.message !~ /\[only/ && !build.pull_request.draft && - build.message !~ /\[skip tests\]/ && - build.message !~ /\[skip special\]/ - timeout_in_minutes: 45 + # - label: "MultiGPU" + # plugins: + # - JuliaCI/julia#v1: + # version: "1.11" + # - JuliaCI/julia-test#v1: + # test_args: "--gpu=0,1 core base libraries" + # - JuliaCI/julia-coverage#v1: + # dirs: + # - src + # - lib + # - examples + # agents: + # queue: "juliagpu" + # cuda: "*" + # multigpu: "*" + # if: | + # build.message =~ /\[only tests\]/ || + # build.message =~ /\[only special\]/ || + # build.message !~ /\[only/ && !build.pull_request.draft && + # build.message !~ /\[skip tests\]/ && + # build.message !~ /\[skip special\]/ + # timeout_in_minutes: 45 - wait: ~ continue_on_failure: true diff --git a/perf/Project.toml b/perf/Project.toml index 8314c7b285..9b028ce66a 100644 --- a/perf/Project.toml +++ b/perf/Project.toml @@ -2,5 +2,6 @@ BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3" JSON = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +Pkg = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" StableRNGs = "860ef19b-820b-49d6-a774-d7a799459cd3" StaticArrays = "90137ffa-7385-5640-81b9-e52037218182" diff --git a/perf/array.jl b/perf/array.jl index 65baa304dd..178cb46618 100644 --- a/perf/array.jl +++ b/perf/array.jl @@ -18,133 +18,137 @@ gpu_vec_ints = reshape(gpu_mat_ints, length(gpu_mat_ints)) gpu_mat_bools = CuArray(rand(rng, Bool, m, n)) gpu_vec_bools = reshape(gpu_mat_bools, length(gpu_mat_bools)) -group["construct"] = @benchmarkable CuArray{Int}(undef, 1) +# group["construct"] = @benchmarkable CuArray{Int}(undef, 1) -group["copy"] = @async_benchmarkable copy($gpu_mat) +# group["copy"] = @async_benchmarkable copy($gpu_mat) -gpu_mat2 = copy(gpu_mat) -let group = addgroup!(group, "copyto!") - group["cpu_to_gpu"] = @async_benchmarkable copyto!($gpu_mat, $cpu_mat) - group["gpu_to_cpu"] = @async_benchmarkable copyto!($cpu_mat, $gpu_mat) - group["gpu_to_gpu"] = @async_benchmarkable copyto!($gpu_mat2, $gpu_mat) -end +# gpu_mat2 = copy(gpu_mat) +# let group = addgroup!(group, "copyto!") +# group["cpu_to_gpu"] = @async_benchmarkable copyto!($gpu_mat, $cpu_mat) +# group["gpu_to_cpu"] = @async_benchmarkable copyto!($cpu_mat, $gpu_mat) +# group["gpu_to_gpu"] = @async_benchmarkable copyto!($gpu_mat2, $gpu_mat) +# end -let group = addgroup!(group, "iteration") - group["scalar"] = @benchmarkable CUDA.@allowscalar [$gpu_vec[i] for i in 1:10] +# let group = addgroup!(group, "iteration") +# group["scalar"] = @benchmarkable CUDA.@allowscalar [$gpu_vec[i] for i in 1:10] - group["logical"] = @benchmarkable $gpu_vec[$gpu_vec_bools] +# group["logical"] = @benchmarkable $gpu_vec[$gpu_vec_bools] - let group = addgroup!(group, "findall") - group["bool"] = @benchmarkable findall($gpu_vec_bools) - group["int"] = @benchmarkable findall(isodd, $gpu_vec_ints) - end +# let group = addgroup!(group, "findall") +# group["bool"] = @benchmarkable findall($gpu_vec_bools) +# group["int"] = @benchmarkable findall(isodd, $gpu_vec_ints) +# end - let group = addgroup!(group, "findfirst") - group["bool"] = @benchmarkable findfirst($gpu_vec_bools) - group["int"] = @benchmarkable findfirst(isodd, $gpu_vec_ints) - end +# let group = addgroup!(group, "findfirst") +# group["bool"] = @benchmarkable findfirst($gpu_vec_bools) +# group["int"] = @benchmarkable findfirst(isodd, $gpu_vec_ints) +# end - let group = addgroup!(group, "findmin") # findmax - group["1d"] = @async_benchmarkable findmin($gpu_vec) - group["2d"] = @async_benchmarkable findmin($gpu_mat; dims=1) - end -end +# let group = addgroup!(group, "findmin") # findmax +# group["1d"] = @async_benchmarkable findmin($gpu_vec) +# group["2d"] = @async_benchmarkable findmin($gpu_mat; dims=1) +# end +# end let group = addgroup!(group, "reverse") group["1d"] = @async_benchmarkable reverse($gpu_vec) + group["1dL"] = @async_benchmarkable reverse($gpu_vec_long) group["2d"] = @async_benchmarkable reverse($gpu_mat; dims=1) + group["2dL"] = @async_benchmarkable reverse($gpu_mat_long; dims=1) group["1d_inplace"] = @async_benchmarkable reverse!($gpu_vec) + group["1dL_inplace"] = @async_benchmarkable reverse!($gpu_vec_long) group["2d_inplace"] = @async_benchmarkable reverse!($gpu_mat; dims=1) + group["2dL_inplace"] = @async_benchmarkable reverse!($gpu_mat_long; dims=2) end -group["broadcast"] = @async_benchmarkable $gpu_mat .= 0f0 - -# no need to test inplace version, which performs the same operation (but with an alloc) -let group = addgroup!(group, "accumulate") - let group = addgroup!(group, "Float32") - group["1d"] = @async_benchmarkable accumulate(+, $gpu_vec) - group["dims=1"] = @async_benchmarkable accumulate(+, $gpu_mat; dims=1) - group["dims=2"] = @async_benchmarkable accumulate(+, $gpu_mat; dims=2) - - group["dims=1L"] = @async_benchmarkable accumulate(+, $gpu_mat_long; dims=1) - group["dims=2L"] = @async_benchmarkable accumulate(+, $gpu_mat_long; dims=2) - end - let group = addgroup!(group, "Int64") - group["1d"] = @async_benchmarkable accumulate(+, $gpu_vec_ints) - group["dims=1"] = @async_benchmarkable accumulate(+, $gpu_mat_ints; dims=1) - group["dims=2"] = @async_benchmarkable accumulate(+, $gpu_mat_ints; dims=2) - - group["dims=1L"] = @async_benchmarkable accumulate(+, $gpu_mat_long_ints; dims=1) - group["dims=2L"] = @async_benchmarkable accumulate(+, $gpu_mat_long_ints; dims=2) - end -end - -let group = addgroup!(group, "reductions") - let group = addgroup!(group, "reduce") - let group = addgroup!(group, "Float32") - group["1d"] = @async_benchmarkable reduce(+, $gpu_vec) - group["dims=1"] = @async_benchmarkable reduce(+, $gpu_mat; dims=1) - group["dims=2"] = @async_benchmarkable reduce(+, $gpu_mat; dims=2) - group["dims=1L"] = @async_benchmarkable reduce(+, $gpu_mat_long; dims=1) - group["dims=2L"] = @async_benchmarkable reduce(+, $gpu_mat_long; dims=2) - end - let group = addgroup!(group, "Int64") - group["1d"] = @async_benchmarkable reduce(+, $gpu_vec_ints) - group["dims=1"] = @async_benchmarkable reduce(+, $gpu_mat_ints; dims=1) - group["dims=2"] = @async_benchmarkable reduce(+, $gpu_mat_ints; dims=2) - group["dims=1L"] = @async_benchmarkable reduce(+, $gpu_mat_long_ints; dims=1) - group["dims=2L"] = @async_benchmarkable reduce(+, $gpu_mat_long_ints; dims=2) - end - end - - let group = addgroup!(group, "mapreduce") - let group = addgroup!(group, "Float32") - group["1d"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_vec) - group["dims=1"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat; dims=1) - group["dims=2"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat; dims=2) - group["dims=1L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long; dims=1) - group["dims=2L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long; dims=2) - end - let group = addgroup!(group, "Int64") - group["1d"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_vec_ints) - group["dims=1"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_ints; dims=1) - group["dims=2"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_ints; dims=2) - group["dims=1L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long_ints; dims=1) - group["dims=2L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long_ints; dims=2) - end - end - - # used by sum, prod, minimum, maximum, all, any, count -end - -let group = addgroup!(group, "random") - let group = addgroup!(group, "rand") - group["Float32"] = @async_benchmarkable CUDA.rand(Float32, m*n) - group["Int64"] = @async_benchmarkable CUDA.rand(Int64, m*n) - end - - let group = addgroup!(group, "rand!") - group["Float32"] = @async_benchmarkable CUDA.rand!($gpu_vec) - group["Int64"] = @async_benchmarkable CUDA.rand!($gpu_vec_ints) - end - - let group = addgroup!(group, "randn") - group["Float32"] = @async_benchmarkable CUDA.randn(Float32, m*n) - end - - let group = addgroup!(group, "randn!") - group["Float32"] = @async_benchmarkable CUDA.randn!($gpu_vec) - end -end - -let group = addgroup!(group, "sorting") - group["1d"] = @async_benchmarkable sort($gpu_vec) - group["2d"] = @async_benchmarkable sort($gpu_mat; dims=1) - group["by"] = @async_benchmarkable sort($gpu_vec; by=sin) -end - -let group = addgroup!(group, "permutedims") - group["2d"] = @async_benchmarkable permutedims($gpu_mat, (2,1)) - group["3d"] = @async_benchmarkable permutedims($gpu_arr_3d, (3,1,2)) - group["4d"] = @async_benchmarkable permutedims($gpu_arr_4d, (2,1,4,3)) -end +# group["broadcast"] = @async_benchmarkable $gpu_mat .= 0f0 + +# # no need to test inplace version, which performs the same operation (but with an alloc) +# let group = addgroup!(group, "accumulate") +# let group = addgroup!(group, "Float32") +# group["1d"] = @async_benchmarkable accumulate(+, $gpu_vec) +# group["dims=1"] = @async_benchmarkable accumulate(+, $gpu_mat; dims=1) +# group["dims=2"] = @async_benchmarkable accumulate(+, $gpu_mat; dims=2) + +# group["dims=1L"] = @async_benchmarkable accumulate(+, $gpu_mat_long; dims=1) +# group["dims=2L"] = @async_benchmarkable accumulate(+, $gpu_mat_long; dims=2) +# end +# let group = addgroup!(group, "Int64") +# group["1d"] = @async_benchmarkable accumulate(+, $gpu_vec_ints) +# group["dims=1"] = @async_benchmarkable accumulate(+, $gpu_mat_ints; dims=1) +# group["dims=2"] = @async_benchmarkable accumulate(+, $gpu_mat_ints; dims=2) + +# group["dims=1L"] = @async_benchmarkable accumulate(+, $gpu_mat_long_ints; dims=1) +# group["dims=2L"] = @async_benchmarkable accumulate(+, $gpu_mat_long_ints; dims=2) +# end +# end + +# let group = addgroup!(group, "reductions") +# let group = addgroup!(group, "reduce") +# let group = addgroup!(group, "Float32") +# group["1d"] = @async_benchmarkable reduce(+, $gpu_vec) +# group["dims=1"] = @async_benchmarkable reduce(+, $gpu_mat; dims=1) +# group["dims=2"] = @async_benchmarkable reduce(+, $gpu_mat; dims=2) +# group["dims=1L"] = @async_benchmarkable reduce(+, $gpu_mat_long; dims=1) +# group["dims=2L"] = @async_benchmarkable reduce(+, $gpu_mat_long; dims=2) +# end +# let group = addgroup!(group, "Int64") +# group["1d"] = @async_benchmarkable reduce(+, $gpu_vec_ints) +# group["dims=1"] = @async_benchmarkable reduce(+, $gpu_mat_ints; dims=1) +# group["dims=2"] = @async_benchmarkable reduce(+, $gpu_mat_ints; dims=2) +# group["dims=1L"] = @async_benchmarkable reduce(+, $gpu_mat_long_ints; dims=1) +# group["dims=2L"] = @async_benchmarkable reduce(+, $gpu_mat_long_ints; dims=2) +# end +# end + +# let group = addgroup!(group, "mapreduce") +# let group = addgroup!(group, "Float32") +# group["1d"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_vec) +# group["dims=1"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat; dims=1) +# group["dims=2"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat; dims=2) +# group["dims=1L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long; dims=1) +# group["dims=2L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long; dims=2) +# end +# let group = addgroup!(group, "Int64") +# group["1d"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_vec_ints) +# group["dims=1"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_ints; dims=1) +# group["dims=2"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_ints; dims=2) +# group["dims=1L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long_ints; dims=1) +# group["dims=2L"] = @async_benchmarkable mapreduce(x->x+1, +, $gpu_mat_long_ints; dims=2) +# end +# end + +# # used by sum, prod, minimum, maximum, all, any, count +# end + +# let group = addgroup!(group, "random") +# let group = addgroup!(group, "rand") +# group["Float32"] = @async_benchmarkable CUDA.rand(Float32, m*n) +# group["Int64"] = @async_benchmarkable CUDA.rand(Int64, m*n) +# end + +# let group = addgroup!(group, "rand!") +# group["Float32"] = @async_benchmarkable CUDA.rand!($gpu_vec) +# group["Int64"] = @async_benchmarkable CUDA.rand!($gpu_vec_ints) +# end + +# let group = addgroup!(group, "randn") +# group["Float32"] = @async_benchmarkable CUDA.randn(Float32, m*n) +# end + +# let group = addgroup!(group, "randn!") +# group["Float32"] = @async_benchmarkable CUDA.randn!($gpu_vec) +# end +# end + +# let group = addgroup!(group, "sorting") +# group["1d"] = @async_benchmarkable sort($gpu_vec) +# group["2d"] = @async_benchmarkable sort($gpu_mat; dims=1) +# group["by"] = @async_benchmarkable sort($gpu_vec; by=sin) +# end + +# let group = addgroup!(group, "permutedims") +# group["2d"] = @async_benchmarkable permutedims($gpu_mat, (2,1)) +# group["3d"] = @async_benchmarkable permutedims($gpu_arr_3d, (3,1,2)) +# group["4d"] = @async_benchmarkable permutedims($gpu_arr_4d, (2,1,4,3)) +# end diff --git a/perf/runbenchmarks.jl b/perf/runbenchmarks.jl index 72d8b1e76a..f66a1a8a9b 100644 --- a/perf/runbenchmarks.jl +++ b/perf/runbenchmarks.jl @@ -1,4 +1,6 @@ # benchmark suite execution and codespeed submission +using Pkg +Pkg.add(url="https://github.com/christiangnrd/GPUArrays.jl", rev="reverse") using CUDA @@ -16,13 +18,13 @@ end # before anything else, run latency benchmarks. these spawn subprocesses, so we don't want # to do so after regular benchmarks have caused the memory allocator to reserve memory. -@info "Running latency benchmarks" -latency_results = include("latency.jl") +# @info "Running latency benchmarks" +# latency_results = include("latency.jl") SUITE = BenchmarkGroup() -include("cuda.jl") -include("kernel.jl") +# include("cuda.jl") +# include("kernel.jl") include("array.jl") @info "Preparing main benchmarks" @@ -34,20 +36,20 @@ GC.gc(true) CUDA.reclaim() # benchmark groups that aren't part of the suite -addgroup!(SUITE, "integration") +# addgroup!(SUITE, "integration") @info "Running main benchmarks" results = run(SUITE, verbose=true) # integration tests (that do nasty things, so need to be run last) -@info "Running integration benchmarks" -integration_results = BenchmarkGroup() -integration_results["volumerhs"] = include("volumerhs.jl") -integration_results["byval"] = include("byval.jl") -integration_results["cudadevrt"] = include("cudadevrt.jl") - -results["latency"] = latency_results -results["integration"] = integration_results +# @info "Running integration benchmarks" +# integration_results = BenchmarkGroup() +# integration_results["volumerhs"] = include("volumerhs.jl") +# integration_results["byval"] = include("byval.jl") +# integration_results["cudadevrt"] = include("cudadevrt.jl") + +# results["latency"] = latency_results +# results["integration"] = integration_results # write out the results result_file = length(ARGS) >= 1 ? ARGS[1] : "benchmarkresults.json" diff --git a/src/reverse.jl b/src/reverse.jl index d0c73da2ec..94914941f5 100644 --- a/src/reverse.jl +++ b/src/reverse.jl @@ -1,155 +1,155 @@ # reversing -# the kernel works by treating the array as 1d. after reversing by dimension x an element at -# pos [i1, i2, i3, ... , i{x}, ..., i{n}] will be at -# pos [i1, i2, i3, ... , d{x} - i{x} + 1, ..., i{n}] where d{x} is the size of dimension x - -# out-of-place version, copying a single value per thread from input to output -function _reverse(input::AnyCuArray{T, N}, output::AnyCuArray{T, N}; - dims=1:ndims(input)) where {T, N} - @assert size(input) == size(output) - rev_dims = ntuple((d)-> d in dims && size(input, d) > 1, N) - ref = size(input) .+ 1 - # converts an ND-index in the data array to the linear index - lin_idx = LinearIndices(input) - # converts a linear index in a reduced array to an ND-index, but using the reduced size - nd_idx = CartesianIndices(input) - - ## COV_EXCL_START - function kernel(input::AbstractArray{T, N}, output::AbstractArray{T, N}) where {T, N} - offset_in = blockDim().x * (blockIdx().x - 1i32) - index_in = offset_in + threadIdx().x - - @inbounds if index_in <= length(input) - idx = Tuple(nd_idx[index_in]) - idx = ifelse.(rev_dims, ref .- idx, idx) - index_out = lin_idx[idx...] - output[index_out] = input[index_in] - end - - return - end - ## COV_EXCL_STOP - - nthreads = 256 - nblocks = cld(length(input), nthreads) - - @cuda threads=nthreads blocks=nblocks kernel(input, output) -end - -# in-place version, swapping elements on half the number of threads -function _reverse!(data::AnyCuArray{T, N}; dims=1:ndims(data)) where {T, N} - rev_dims = ntuple((d)-> d in dims && size(data, d) > 1, N) - half_dim = findlast(rev_dims) - if isnothing(half_dim) - # no reverse operation needed at all in this case. - return - end - ref = size(data) .+ 1 - # converts an ND-index in the data array to the linear index - lin_idx = LinearIndices(data) - reduced_size = ntuple((d)->ifelse(d==half_dim, cld(size(data,d),2), size(data,d)), N) - reduced_length = prod(reduced_size) - # converts a linear index in a reduced array to an ND-index, but using the reduced size - nd_idx = CartesianIndices(reduced_size) - - ## COV_EXCL_START - function kernel(data::AbstractArray{T, N}) where {T, N} - offset_in = blockDim().x * (blockIdx().x - 1i32) - - index_in = offset_in + threadIdx().x - - @inbounds if index_in <= reduced_length - idx = Tuple(nd_idx[index_in]) - index_in = lin_idx[idx...] - idx = ifelse.(rev_dims, ref .- idx, idx) - index_out = lin_idx[idx...] - - if index_in < index_out - temp = data[index_out] - data[index_out] = data[index_in] - data[index_in] = temp - end - end - - return - end - ## COV_EXCL_STOP - - # NOTE: we launch slightly more than half the number of elements in the array as threads. - # The last non-singleton dimension along which to reverse is used to define how the array is split. - # Only the middle row in case of an odd array dimension could cause trouble, but this is prevented by - # ignoring the threads that cross the mid-point - - nthreads = 256 - nblocks = cld(prod(reduced_size), nthreads) - - @cuda threads=nthreads blocks=nblocks kernel(data) -end +# # the kernel works by treating the array as 1d. after reversing by dimension x an element at +# # pos [i1, i2, i3, ... , i{x}, ..., i{n}] will be at +# # pos [i1, i2, i3, ... , d{x} - i{x} + 1, ..., i{n}] where d{x} is the size of dimension x + +# # out-of-place version, copying a single value per thread from input to output +# function _reverse(input::AnyCuArray{T, N}, output::AnyCuArray{T, N}; +# dims=1:ndims(input)) where {T, N} +# @assert size(input) == size(output) +# rev_dims = ntuple((d)-> d in dims && size(input, d) > 1, N) +# ref = size(input) .+ 1 +# # converts an ND-index in the data array to the linear index +# lin_idx = LinearIndices(input) +# # converts a linear index in a reduced array to an ND-index, but using the reduced size +# nd_idx = CartesianIndices(input) + +# ## COV_EXCL_START +# function kernel(input::AbstractArray{T, N}, output::AbstractArray{T, N}) where {T, N} +# offset_in = blockDim().x * (blockIdx().x - 1i32) +# index_in = offset_in + threadIdx().x + +# @inbounds if index_in <= length(input) +# idx = Tuple(nd_idx[index_in]) +# idx = ifelse.(rev_dims, ref .- idx, idx) +# index_out = lin_idx[idx...] +# output[index_out] = input[index_in] +# end + +# return +# end +# ## COV_EXCL_STOP + +# nthreads = 256 +# nblocks = cld(length(input), nthreads) + +# @cuda threads=nthreads blocks=nblocks kernel(input, output) +# end + +# # in-place version, swapping elements on half the number of threads +# function _reverse!(data::AnyCuArray{T, N}; dims=1:ndims(data)) where {T, N} +# rev_dims = ntuple((d)-> d in dims && size(data, d) > 1, N) +# half_dim = findlast(rev_dims) +# if isnothing(half_dim) +# # no reverse operation needed at all in this case. +# return +# end +# ref = size(data) .+ 1 +# # converts an ND-index in the data array to the linear index +# lin_idx = LinearIndices(data) +# reduced_size = ntuple((d)->ifelse(d==half_dim, cld(size(data,d),2), size(data,d)), N) +# reduced_length = prod(reduced_size) +# # converts a linear index in a reduced array to an ND-index, but using the reduced size +# nd_idx = CartesianIndices(reduced_size) + +# ## COV_EXCL_START +# function kernel(data::AbstractArray{T, N}) where {T, N} +# offset_in = blockDim().x * (blockIdx().x - 1i32) + +# index_in = offset_in + threadIdx().x + +# @inbounds if index_in <= reduced_length +# idx = Tuple(nd_idx[index_in]) +# index_in = lin_idx[idx...] +# idx = ifelse.(rev_dims, ref .- idx, idx) +# index_out = lin_idx[idx...] + +# if index_in < index_out +# temp = data[index_out] +# data[index_out] = data[index_in] +# data[index_in] = temp +# end +# end + +# return +# end +# ## COV_EXCL_STOP + +# # NOTE: we launch slightly more than half the number of elements in the array as threads. +# # The last non-singleton dimension along which to reverse is used to define how the array is split. +# # Only the middle row in case of an odd array dimension could cause trouble, but this is prevented by +# # ignoring the threads that cross the mid-point + +# nthreads = 256 +# nblocks = cld(prod(reduced_size), nthreads) + +# @cuda threads=nthreads blocks=nblocks kernel(data) +# end # n-dimensional API -function Base.reverse!(data::AnyCuArray{T, N}; dims=:) where {T, N} - if isa(dims, Colon) - dims = 1:ndims(data) - end - if !applicable(iterate, dims) - throw(ArgumentError("dimension $dims is not an iterable")) - end - if !all(1 .≤ dims .≤ ndims(data)) - throw(ArgumentError("dimension $dims is not 1 ≤ $dims ≤ $(ndims(data))")) - end - - _reverse!(data; dims=dims) - - return data -end - -# out-of-place -function Base.reverse(input::AnyCuArray{T, N}; dims=:) where {T, N} - if isa(dims, Colon) - dims = 1:ndims(input) - end - if !applicable(iterate, dims) - throw(ArgumentError("dimension $dims is not an iterable")) - end - if !all(1 .≤ dims .≤ ndims(input)) - throw(ArgumentError("dimension $dims is not 1 ≤ $dims ≤ $(ndims(input))")) - end - - if all(size(input)[[dims...]].==1) - # no reverse operation needed at all in this case. - return copy(input) - else - output = similar(input) - _reverse(input, output; dims=dims) - return output - end -end +# function Base.reverse!(data::AnyCuArray{T, N}; dims=:) where {T, N} +# if isa(dims, Colon) +# dims = 1:ndims(data) +# end +# if !applicable(iterate, dims) +# throw(ArgumentError("dimension $dims is not an iterable")) +# end +# if !all(1 .≤ dims .≤ ndims(data)) +# throw(ArgumentError("dimension $dims is not 1 ≤ $dims ≤ $(ndims(data))")) +# end + +# _reverse!(data; dims=dims) + +# return data +# end + +# # out-of-place +# function Base.reverse(input::AnyCuArray{T, N}; dims=:) where {T, N} +# if isa(dims, Colon) +# dims = 1:ndims(input) +# end +# if !applicable(iterate, dims) +# throw(ArgumentError("dimension $dims is not an iterable")) +# end +# if !all(1 .≤ dims .≤ ndims(input)) +# throw(ArgumentError("dimension $dims is not 1 ≤ $dims ≤ $(ndims(input))")) +# end + +# if all(size(input)[[dims...]].==1) +# # no reverse operation needed at all in this case. +# return copy(input) +# else +# output = similar(input) +# _reverse(input, output; dims=dims) +# return output +# end +# end # 1-dimensional API -# in-place -Base.@propagate_inbounds function Base.reverse!(data::AnyCuVector{T}, start::Integer, - stop::Integer=length(data)) where {T} - _reverse!(view(data, start:stop)) - return data -end +# # in-place +# Base.@propagate_inbounds function Base.reverse!(data::AnyCuVector{T}, start::Integer, +# stop::Integer=length(data)) where {T} +# _reverse!(view(data, start:stop)) +# return data +# end -Base.reverse!(data::AnyCuVector{T}) where {T} = @inbounds reverse!(data, 1, length(data)) +# Base.reverse!(data::AnyCuVector{T}) where {T} = @inbounds reverse!(data, 1, length(data)) -# out-of-place -Base.@propagate_inbounds function Base.reverse(input::AnyCuVector{T}, start::Integer, - stop::Integer=length(input)) where {T} - output = similar(input) +# # out-of-place +# Base.@propagate_inbounds function Base.reverse(input::AnyCuVector{T}, start::Integer, +# stop::Integer=length(input)) where {T} +# output = similar(input) - start > 1 && copyto!(output, 1, input, 1, start-1) - _reverse(view(input, start:stop), view(output, start:stop)) - stop < length(input) && copyto!(output, stop+1, input, stop+1) +# start > 1 && copyto!(output, 1, input, 1, start-1) +# _reverse(view(input, start:stop), view(output, start:stop)) +# stop < length(input) && copyto!(output, stop+1, input, stop+1) - return output -end +# return output +# end -Base.reverse(data::AnyCuVector{T}) where {T} = @inbounds reverse(data, 1, length(data)) +# Base.reverse(data::AnyCuVector{T}) where {T} = @inbounds reverse(data, 1, length(data)) diff --git a/test/base/array.jl b/test/base/array.jl index 51fb2fc219..9ab218b886 100644 --- a/test/base/array.jl +++ b/test/base/array.jl @@ -478,50 +478,50 @@ end @test Array(x) == zeros(4) end -@testset "reverse" begin - # 1-d out-of-place - @test testf(x->reverse(x), rand(1000)) - @test testf(x->reverse(x, 10), rand(1000)) - @test testf(x->reverse(x, 10, 90), rand(1000)) - - # 1-d in-place - @test testf(x->reverse!(x), rand(1000)) - @test testf(x->reverse!(x, 10), rand(1000)) - @test testf(x->reverse!(x, 10, 90), rand(1000)) - - # n-d out-of-place - for shape in ([1, 2, 4, 3], [4, 2], [5], [2^5, 2^5, 2^5]), - dim in 1:length(shape) - @test testf(x->reverse(x; dims=dim), rand(shape...)) - - cpu = rand(shape...) - gpu = CuArray(cpu) - reverse!(gpu; dims=dim) - @test Array(gpu) == reverse(cpu; dims=dim) - end - - # supports multidimensional reverse - for shape in ([1, 2, 4, 3], [2^5, 2^5, 2^5]), - dim in ((1,2),(2,3),(1,3),:) - @test testf(x->reverse(x; dims=dim), rand(shape...)) - - cpu = rand(shape...) - gpu = CuArray(cpu) - reverse!(gpu; dims=dim) - @test Array(gpu) == reverse(cpu; dims=dim) - end - - # wrapped array - @test testf(x->reverse(x), reshape(rand(2,2), 4)) - - # error throwing - cpu = rand(1,2,3,4) - gpu = CuArray(cpu) - @test_throws ArgumentError reverse!(gpu, dims=5) - @test_throws ArgumentError reverse!(gpu, dims=0) - @test_throws ArgumentError reverse(gpu, dims=5) - @test_throws ArgumentError reverse(gpu, dims=0) -end +# @testset "reverse" begin +# # 1-d out-of-place +# @test testf(x->reverse(x), rand(1000)) +# @test testf(x->reverse(x, 10), rand(1000)) +# @test testf(x->reverse(x, 10, 90), rand(1000)) + +# # 1-d in-place +# @test testf(x->reverse!(x), rand(1000)) +# @test testf(x->reverse!(x, 10), rand(1000)) +# @test testf(x->reverse!(x, 10, 90), rand(1000)) + +# # n-d out-of-place +# for shape in ([1, 2, 4, 3], [4, 2], [5], [2^5, 2^5, 2^5]), +# dim in 1:length(shape) +# @test testf(x->reverse(x; dims=dim), rand(shape...)) + +# cpu = rand(shape...) +# gpu = CuArray(cpu) +# reverse!(gpu; dims=dim) +# @test Array(gpu) == reverse(cpu; dims=dim) +# end + +# # supports multidimensional reverse +# for shape in ([1, 2, 4, 3], [2^5, 2^5, 2^5]), +# dim in ((1,2),(2,3),(1,3),:) +# @test testf(x->reverse(x; dims=dim), rand(shape...)) + +# cpu = rand(shape...) +# gpu = CuArray(cpu) +# reverse!(gpu; dims=dim) +# @test Array(gpu) == reverse(cpu; dims=dim) +# end + +# # wrapped array +# @test testf(x->reverse(x), reshape(rand(2,2), 4)) + +# # error throwing +# cpu = rand(1,2,3,4) +# gpu = CuArray(cpu) +# @test_throws ArgumentError reverse!(gpu, dims=5) +# @test_throws ArgumentError reverse!(gpu, dims=0) +# @test_throws ArgumentError reverse(gpu, dims=5) +# @test_throws ArgumentError reverse(gpu, dims=0) +# end @testset "findall" begin # 1D diff --git a/test/runtests.jl b/test/runtests.jl index 9172d18a5c..8aa8a9d696 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,3 +1,6 @@ +using Pkg +Pkg.add(url="https://github.com/christiangnrd/GPUArrays.jl", rev="reverse") + using Distributed using Dates import REPL