Skip to content

Commit e08b87d

Browse files
jpsamaroovchuravy
andauthored
Combine tests into parameterized testsuite (#219)
* Split tests into testsuite Co-authored-by: Valentin Churavy <[email protected]>
1 parent b2f7105 commit e08b87d

File tree

14 files changed

+374
-389
lines changed

14 files changed

+374
-389
lines changed

.buildkite/pipeline.yml

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,9 @@ steps:
1414
agents:
1515
queue: "juliagpu"
1616
cuda: "*"
17+
env:
18+
JULIA_CUDA_USE_BINARYBUILDER: "true"
19+
KERNELABSTRACTIONS_TEST_BACKEND: "CUDA"
1720
timeout_in_minutes: 60
1821

1922
- label: "Julia 1.6-nightly"
@@ -31,6 +34,9 @@ steps:
3134
agents:
3235
queue: "juliagpu"
3336
cuda: "*"
37+
env:
38+
JULIA_CUDA_USE_BINARYBUILDER: "true"
39+
KERNELABSTRACTIONS_TEST_BACKEND: "CUDA"
3440
timeout_in_minutes: 60
3541

3642
- label: "Julia nightly"
@@ -48,6 +54,9 @@ steps:
4854
agents:
4955
queue: "juliagpu"
5056
cuda: "*"
57+
env:
58+
JULIA_CUDA_USE_BINARYBUILDER: "true"
59+
KERNELABSTRACTIONS_TEST_BACKEND: "CUDA"
5160
timeout_in_minutes: 60
5261

5362
env:

test/async_copy.jl

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,25 +1,16 @@
1-
using KernelAbstractions, Test, CUDA
1+
using KernelAbstractions, Test
22

3-
if has_cuda_gpu()
4-
CUDA.allowscalar(false)
5-
end
3+
function asynccopy_testsuite(backend, ArrayT)
4+
M = 1024
65

7-
function copy_test(backend, ArrayT, M)
86
A = ArrayT(rand(Float64, M))
97
B = ArrayT(rand(Float64, M))
108

119
a = Array{Float64}(undef, M)
12-
event = async_copy!(backend, a, B, dependencies=Event(CPU()))
13-
event = async_copy!(backend, A, a, dependencies=event)
10+
event = async_copy!(backend(), a, B, dependencies=Event(CPU()))
11+
event = async_copy!(backend(), A, a, dependencies=event)
1412
wait(event)
1513

1614
@test isapprox(a, Array(A))
1715
@test isapprox(a, Array(B))
1816
end
19-
20-
M = 1024
21-
22-
if has_cuda_gpu()
23-
copy_test(CUDADevice(), CuArray, M)
24-
end
25-
copy_test(CPU(), Array, M)

test/compiler.jl

Lines changed: 8 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,14 @@ using Test
44
import KernelAbstractions.NDIteration: NDRange, StaticSize, DynamicSize
55

66
@kernel function index(A)
7-
I = @index(Global, NTuple)
7+
I = @index(Global, NTuple)
88
@show A[I...]
99
end
10-
kernel = index(CPU(), DynamicSize(), DynamicSize())
11-
iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}();
12-
ctx = KernelAbstractions.mkcontext(kernel, 1, nothing, iterspace, Val(KernelAbstractions.NoDynamicCheck()))
1310

14-
@test KernelAbstractions.Cassette.overdub(ctx, KernelAbstractions.__index_Global_NTuple, CartesianIndex(1)) == (1,)
11+
function compiler_testsuite()
12+
kernel = index(CPU(), DynamicSize(), DynamicSize())
13+
iterspace = NDRange{1, StaticSize{(128,)}, StaticSize{(8,)}}();
14+
ctx = KernelAbstractions.mkcontext(kernel, 1, nothing, iterspace, Val(KernelAbstractions.NoDynamicCheck()))
15+
16+
@test KernelAbstractions.Cassette.overdub(ctx, KernelAbstractions.__index_Global_NTuple, CartesianIndex(1)) == (1,)
17+
end

test/events.jl

Lines changed: 17 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,26 +1,23 @@
1-
using KernelAbstractions, Test, CUDA
1+
using KernelAbstractions, Test
22

3-
if has_cuda_gpu()
4-
CUDA.allowscalar(false)
5-
end
6-
7-
@testset "Error propagation" begin
8-
let event = Event(()->error(""))
9-
@test_throws TaskFailedException wait(event)
10-
end
3+
function events_testsuite()
4+
@testset "Error propagation" begin
5+
let event = Event(()->error(""))
6+
@test_throws TaskFailedException wait(event)
7+
end
118

12-
let event = Event(error, "")
13-
@test_throws CompositeException wait(MultiEvent(event))
14-
end
9+
let event = Event(error, "")
10+
@test_throws CompositeException wait(MultiEvent(event))
11+
end
1512

16-
let event = Event(error, "")
17-
event = Event(wait, MultiEvent(event))
18-
@test_throws TaskFailedException wait(event)
19-
end
13+
let event = Event(error, "")
14+
event = Event(wait, MultiEvent(event))
15+
@test_throws TaskFailedException wait(event)
16+
end
2017

21-
let event = Event(error, "")
22-
event = Event(()->nothing, dependencies=event)
23-
@test_throws TaskFailedException wait(event)
18+
let event = Event(error, "")
19+
event = Event(()->nothing, dependencies=event)
20+
@test_throws TaskFailedException wait(event)
21+
end
2422
end
2523
end
26-

test/examples.jl

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,3 @@
1-
@testset "examples" begin
2-
31
function find_sources(path::String, sources=String[])
42
if isdir(path)
53
for entry in readdir(path)
@@ -11,18 +9,21 @@ function find_sources(path::String, sources=String[])
119
sources
1210
end
1311

14-
examples_dir = joinpath(@__DIR__, "..", "examples")
15-
examples = find_sources(examples_dir)
16-
filter!(file -> readline(file) != "# EXCLUDE FROM TESTING", examples)
12+
function examples_testsuite()
13+
@testset "examples" begin
14+
examples_dir = joinpath(@__DIR__, "..", "examples")
15+
examples = find_sources(examples_dir)
16+
filter!(file -> readline(file) != "# EXCLUDE FROM TESTING", examples)
17+
18+
@testset "$(basename(example))" for example in examples
19+
code = """
20+
$(Base.load_path_setup_code())
21+
include($(repr(example)))
22+
"""
23+
cmd = `$(Base.julia_cmd()) --startup-file=no -e $code`
24+
@debug "Testing $example" Text(code) cmd
25+
@test success(pipeline(cmd, stderr=stderr))
26+
end
1727

18-
@testset "$(basename(example))" for example in examples
19-
code = """
20-
$(Base.load_path_setup_code())
21-
include($(repr(example)))
22-
"""
23-
cmd = `$(Base.julia_cmd()) --startup-file=no -e $code`
24-
@debug "Testing $example" Text(code) cmd
25-
@test success(pipeline(cmd, stderr=stderr))
2628
end
27-
2829
end

test/localmem.jl

Lines changed: 10 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,5 @@
11
using KernelAbstractions
22
using Test
3-
using CUDA
4-
5-
if has_cuda_gpu()
6-
CUDA.allowscalar(false)
7-
end
83

94
@kernel function localmem(A)
105
N = @uniform prod(groupsize())
@@ -39,20 +34,15 @@ end
3934
end
4035
end
4136

42-
function harness(backend, ArrayT)
43-
@testset for kernel! in (localmem(backend, 16), localmem2(backend, 16))
44-
A = ArrayT{Int}(undef, 64)
45-
wait(kernel!(A, ndrange=size(A)))
46-
@test all(A[1:16] .== 16:-1:1)
47-
@test all(A[17:32] .== 16:-1:1)
48-
@test all(A[33:48] .== 16:-1:1)
49-
@test all(A[49:64] .== 16:-1:1)
50-
end
51-
end
52-
53-
@testset "kernels" begin
54-
harness(CPU(), Array)
55-
if has_cuda_gpu()
56-
harness(CUDADevice(), CuArray)
37+
function localmem_testsuite(backend, ArrayT)
38+
@testset "kernels" begin
39+
@testset for kernel! in (localmem(backend(), 16), localmem2(backend(), 16))
40+
A = ArrayT{Int}(undef, 64)
41+
wait(kernel!(A, ndrange=size(A)))
42+
@test all(A[1:16] .== 16:-1:1)
43+
@test all(A[17:32] .== 16:-1:1)
44+
@test all(A[33:48] .== 16:-1:1)
45+
@test all(A[49:64] .== 16:-1:1)
46+
end
5747
end
5848
end

test/nditeration.jl

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ using KernelAbstractions
22
using KernelAbstractions.NDIteration
33
using Test
44

5+
function nditeration_testsuite()
56
@testset "iteration" begin
67
let ndrange = NDRange{2, DynamicSize, DynamicSize}(CartesianIndices((256, 256)), CartesianIndices((32, 32)));
78
@test length(ndrange) == 256*256
@@ -84,3 +85,4 @@ end
8485
end
8586
end
8687
end
88+
end

test/print_test.jl

Lines changed: 6 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,29 +1,17 @@
11
using KernelAbstractions, Test
2-
using CUDA
3-
4-
if has_cuda_gpu()
5-
CUDA.allowscalar(false)
6-
end
72

83
@kernel function kernel_print()
94
I = @index(Global)
105
@print("Hello from thread ", I, "!\n")
116
end
127

13-
function test_print(backend)
14-
kernel = kernel_print(backend, 4)
15-
kernel(ndrange=(4,))
16-
end
8+
function printing_testsuite(backend)
9+
@testset "print test" begin
10+
kernel = kernel_print(backend(), 4)
11+
wait(kernel(ndrange=(4,)))
12+
@test true
1713

18-
@testset "print test" begin
19-
if has_cuda_gpu()
20-
wait(test_print(CUDADevice()))
14+
@print("Why this should work\n")
2115
@test true
2216
end
23-
24-
wait(test_print(CPU()))
25-
@test true
26-
27-
@print("Why this should work\n")
28-
@test true
2917
end

test/private.jl

Lines changed: 28 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,5 @@
11
using KernelAbstractions
22
using Test
3-
using CUDA
4-
5-
if has_cuda_gpu()
6-
CUDA.allowscalar(false)
7-
end
83

94
@kernel function typetest(A, B)
105
priv = @private eltype(A) (1,)
@@ -57,41 +52,38 @@ end
5752
end
5853
end
5954

60-
function harness(backend, ArrayT)
61-
A = ArrayT{Int}(undef, 64)
62-
wait(private(backend, 16)(A, ndrange=size(A)))
63-
@test all(A[1:16] .== 16:-1:1)
64-
@test all(A[17:32] .== 16:-1:1)
65-
@test all(A[33:48] .== 16:-1:1)
66-
@test all(A[49:64] .== 16:-1:1)
55+
function private_testsuite(backend, ArrayT)
56+
@testset "kernels" begin
57+
A = ArrayT{Int}(undef, 64)
58+
wait(private(backend(), 16)(A, ndrange=size(A)))
59+
@test all(A[1:16] .== 16:-1:1)
60+
@test all(A[17:32] .== 16:-1:1)
61+
@test all(A[33:48] .== 16:-1:1)
62+
@test all(A[49:64] .== 16:-1:1)
6763

68-
A = ArrayT{Int}(undef, 64, 64)
69-
A .= 1
70-
wait(forloop(backend)(A, Val(size(A, 2)), ndrange=size(A,1), workgroupsize=size(A,1)))
71-
@test all(A[:, 1] .== 64)
72-
@test all(A[:, 2:end] .== 1)
64+
A = ArrayT{Int}(undef, 64, 64)
65+
A .= 1
66+
wait(forloop(backend())(A, Val(size(A, 2)), ndrange=size(A,1), workgroupsize=size(A,1)))
67+
@test all(A[:, 1] .== 64)
68+
@test all(A[:, 2:end] .== 1)
7369

74-
B = ArrayT{Bool}(undef, size(A)...)
75-
wait(typetest(backend, 16)(A, B, ndrange=size(A)))
76-
@test all(B)
70+
B = ArrayT{Bool}(undef, size(A)...)
71+
wait(typetest(backend(), 16)(A, B, ndrange=size(A)))
72+
@test all(B)
7773

78-
A = ArrayT{Float64}(ones(64,3));
79-
out = ArrayT{Float64}(undef, 64)
80-
wait(reduce_private(backend, 8)(out, A, ndrange=size(out)))
81-
@test all(out .== 3.0)
82-
end
83-
84-
@testset "kernels" begin
85-
harness(CPU(), Array)
86-
if has_cuda_gpu()
87-
harness(CUDADevice(), CuArray)
74+
A = ArrayT{Float64}(ones(64,3));
75+
out = ArrayT{Float64}(undef, 64)
76+
wait(reduce_private(backend(), 8)(out, A, ndrange=size(out)))
77+
@test all(out .== 3.0)
8878
end
89-
end
9079

91-
@testset "codegen" begin
92-
IR = sprint() do io
93-
KernelAbstractions.ka_code_llvm(io, reduce_private(CPU(), (8,)), Tuple{Vector{Float64}, Matrix{Float64}},
94-
optimize=true, ndrange=(64,))
80+
if backend == CPU
81+
@testset "codegen" begin
82+
IR = sprint() do io
83+
KernelAbstractions.ka_code_llvm(io, reduce_private(backend(), (8,)), Tuple{ArrayT{Float64,1}, ArrayT{Float64,2}},
84+
optimize=true, ndrange=(64,))
85+
end
86+
@test !occursin("gcframe", IR)
87+
end
9588
end
96-
@test !occursin("gcframe", IR)
9789
end

0 commit comments

Comments
 (0)