|
| 1 | +export @opencl, clfunction, clconvert |
| 2 | + |
| 3 | + |
| 4 | +## high-level @opencl interface |
| 5 | + |
| 6 | +const MACRO_KWARGS = [:launch] |
| 7 | +const COMPILER_KWARGS = [:kernel, :name, :always_inline] |
| 8 | +const LAUNCH_KWARGS = [:global_size, :local_size, :queue] |
| 9 | + |
| 10 | +macro opencl(ex...) |
| 11 | + call = ex[end] |
| 12 | + kwargs = map(ex[1:end-1]) do kwarg |
| 13 | + if kwarg isa Symbol |
| 14 | + :($kwarg = $kwarg) |
| 15 | + elseif Meta.isexpr(kwarg, :(=)) |
| 16 | + kwarg |
| 17 | + else |
| 18 | + throw(ArgumentError("Invalid keyword argument '$kwarg'")) |
| 19 | + end |
| 20 | + end |
| 21 | + |
| 22 | + # destructure the kernel call |
| 23 | + Meta.isexpr(call, :call) || throw(ArgumentError("second argument to @opencl should be a function call")) |
| 24 | + f = call.args[1] |
| 25 | + args = call.args[2:end] |
| 26 | + |
| 27 | + code = quote end |
| 28 | + vars, var_exprs = assign_args!(code, args) |
| 29 | + |
| 30 | + # group keyword argument |
| 31 | + macro_kwargs, compiler_kwargs, call_kwargs, other_kwargs = |
| 32 | + split_kwargs(kwargs, MACRO_KWARGS, COMPILER_KWARGS, LAUNCH_KWARGS) |
| 33 | + if !isempty(other_kwargs) |
| 34 | + key,val = first(other_kwargs).args |
| 35 | + throw(ArgumentError("Unsupported keyword argument '$key'")) |
| 36 | + end |
| 37 | + |
| 38 | + # handle keyword arguments that influence the macro's behavior |
| 39 | + launch = true |
| 40 | + for kwarg in macro_kwargs |
| 41 | + key,val = kwarg.args |
| 42 | + if key == :launch |
| 43 | + isa(val, Bool) || throw(ArgumentError("`launch` keyword argument to @opencl should be a constant value")) |
| 44 | + launch = val::Bool |
| 45 | + else |
| 46 | + throw(ArgumentError("Unsupported keyword argument '$key'")) |
| 47 | + end |
| 48 | + end |
| 49 | + if !launch && !isempty(call_kwargs) |
| 50 | + error("@opencl with launch=false does not support launch-time keyword arguments; use them when calling the kernel") |
| 51 | + end |
| 52 | + |
| 53 | + # FIXME: macro hygiene wrt. escaping kwarg values (this broke with 1.5) |
| 54 | + # we esc() the whole thing now, necessitating gensyms... |
| 55 | + @gensym f_var kernel_f kernel_args kernel_tt kernel |
| 56 | + |
| 57 | + # convert the arguments, call the compiler and launch the kernel |
| 58 | + # while keeping the original arguments alive |
| 59 | + push!(code.args, |
| 60 | + quote |
| 61 | + $f_var = $f |
| 62 | + GC.@preserve $(vars...) $f_var begin |
| 63 | + $kernel_f = $clconvert($f_var) |
| 64 | + $kernel_args = map($clconvert, ($(var_exprs...),)) |
| 65 | + $kernel_tt = Tuple{map(Core.Typeof, $kernel_args)...} |
| 66 | + $kernel = $clfunction($kernel_f, $kernel_tt; $(compiler_kwargs...)) |
| 67 | + if $launch |
| 68 | + $kernel($(var_exprs...); $(call_kwargs...)) |
| 69 | + end |
| 70 | + $kernel |
| 71 | + end |
| 72 | + end) |
| 73 | + |
| 74 | + return esc(quote |
| 75 | + let |
| 76 | + $code |
| 77 | + end |
| 78 | + end) |
| 79 | +end |
| 80 | + |
| 81 | + |
| 82 | +## argument conversion |
| 83 | + |
| 84 | +struct KernelAdaptor |
| 85 | + svm_pointers::Vector{Ptr{Cvoid}} |
| 86 | +end |
| 87 | + |
| 88 | +# assume directly-passed pointers are SVM pointers |
| 89 | +function Adapt.adapt_storage(to::KernelAdaptor, ptr::Ptr{T}) where {T} |
| 90 | + push!(to.svm_pointers, ptr) |
| 91 | + return ptr |
| 92 | +end |
| 93 | + |
| 94 | +# convert SVM buffers to their GPU address |
| 95 | +function Adapt.adapt_storage(to::KernelAdaptor, buf::cl.SVMBuffer) |
| 96 | + ptr = pointer(buf) |
| 97 | + push!(to.svm_pointers, ptr) |
| 98 | + return ptr |
| 99 | +end |
| 100 | + |
| 101 | +# Base.RefValue isn't GPU compatible, so provide a compatible alternative |
| 102 | +# TODO: port improvements from CUDA.jl |
| 103 | +struct CLRefValue{T} <: Ref{T} |
| 104 | + x::T |
| 105 | +end |
| 106 | +Base.getindex(r::CLRefValue) = r.x |
| 107 | +Adapt.adapt_structure(to::KernelAdaptor, r::Base.RefValue) = CLRefValue(adapt(to, r[])) |
| 108 | + |
| 109 | +# broadcast sometimes passes a ref(type), resulting in a GPU-incompatible DataType box. |
| 110 | +# avoid that by using a special kind of ref that knows about the boxed type. |
| 111 | +struct CLRefType{T} <: Ref{DataType} end |
| 112 | +Base.getindex(r::CLRefType{T}) where T = T |
| 113 | +Adapt.adapt_structure(to::KernelAdaptor, r::Base.RefValue{<:Union{DataType,Type}}) = |
| 114 | + CLRefType{r[]}() |
| 115 | + |
| 116 | +# case where type is the function being broadcasted |
| 117 | +Adapt.adapt_structure(to::KernelAdaptor, |
| 118 | + bc::Broadcast.Broadcasted{Style, <:Any, Type{T}}) where {Style, T} = |
| 119 | + Broadcast.Broadcasted{Style}((x...) -> T(x...), adapt(to, bc.args), bc.axes) |
| 120 | + |
| 121 | +""" |
| 122 | + clconvert(x, [pointers]) |
| 123 | +
|
| 124 | +This function is called for every argument to be passed to a kernel, allowing it to be |
| 125 | +converted to a GPU-friendly format. By default, the function does nothing and returns the |
| 126 | +input object `x` as-is. |
| 127 | +
|
| 128 | +Do not add methods to this function, but instead extend the underlying Adapt.jl package and |
| 129 | +register methods for the the `OpenCL.KernelAdaptor` type. |
| 130 | +
|
| 131 | +The `pointers` argument is used to collect pointers to indirect SVM buffers, which need to |
| 132 | +be registered with OpenCL before invoking the kernel. |
| 133 | +""" |
| 134 | +function clconvert(arg, pointers::Vector{Ptr{Cvoid}}=Ptr{Cvoid}[]) |
| 135 | + adapt(KernelAdaptor(pointers), arg) |
| 136 | +end |
| 137 | + |
| 138 | + |
| 139 | + |
| 140 | +## abstract kernel functionality |
| 141 | + |
| 142 | +abstract type AbstractKernel{F,TT} end |
| 143 | + |
| 144 | +@inline @generated function (kernel::AbstractKernel{F,TT})(args...; |
| 145 | + call_kwargs...) where {F,TT} |
| 146 | + sig = Tuple{F, TT.parameters...} # Base.signature_type with a function type |
| 147 | + args = (:(kernel.f), (:( clconvert(args[$i], svm_pointers) ) for i in 1:length(args))...) |
| 148 | + |
| 149 | + # filter out ghost arguments that shouldn't be passed |
| 150 | + predicate = dt -> isghosttype(dt) || Core.Compiler.isconstType(dt) |
| 151 | + to_pass = map(!predicate, sig.parameters) |
| 152 | + call_t = Type[x[1] for x in zip(sig.parameters, to_pass) if x[2]] |
| 153 | + call_args = Union{Expr,Symbol}[x[1] for x in zip(args, to_pass) if x[2]] |
| 154 | + |
| 155 | + # replace non-isbits arguments (they should be unused, or compilation would have failed) |
| 156 | + for (i,dt) in enumerate(call_t) |
| 157 | + if !isbitstype(dt) |
| 158 | + call_t[i] = Ptr{Any} |
| 159 | + call_args[i] = :C_NULL |
| 160 | + end |
| 161 | + end |
| 162 | + |
| 163 | + # finalize types |
| 164 | + call_tt = Base.to_tuple_type(call_t) |
| 165 | + |
| 166 | + quote |
| 167 | + svm_pointers = Ptr{Cvoid}[] |
| 168 | + clcall(kernel.fun, $call_tt, $(call_args...); svm_pointers, call_kwargs...) |
| 169 | + end |
| 170 | +end |
| 171 | + |
| 172 | + |
| 173 | + |
| 174 | +## host-side kernels |
| 175 | + |
| 176 | +struct HostKernel{F,TT} <: AbstractKernel{F,TT} |
| 177 | + f::F |
| 178 | + fun::cl.Kernel |
| 179 | +end |
| 180 | + |
| 181 | + |
| 182 | +## host-side API |
| 183 | + |
| 184 | +const clfunction_lock = ReentrantLock() |
| 185 | + |
| 186 | +function clfunction(f::F, tt::TT=Tuple{}; kwargs...) where {F,TT} |
| 187 | + ctx = cl.context() |
| 188 | + dev = cl.device() |
| 189 | + |
| 190 | + Base.@lock clfunction_lock begin |
| 191 | + # compile the function |
| 192 | + cache = compiler_cache(ctx) |
| 193 | + source = methodinstance(F, tt) |
| 194 | + config = compiler_config(dev; kwargs...)::OpenCLCompilerConfig |
| 195 | + fun = GPUCompiler.cached_compilation(cache, source, config, compile, link) |
| 196 | + |
| 197 | + # create a callable object that captures the function instance. we don't need to think |
| 198 | + # about world age here, as GPUCompiler already does and will return a different object |
| 199 | + h = hash(fun, hash(f, hash(tt))) |
| 200 | + kernel = get(_kernel_instances, h, nothing) |
| 201 | + if kernel === nothing |
| 202 | + # create the kernel state object |
| 203 | + kernel = HostKernel{F,tt}(f, fun) |
| 204 | + _kernel_instances[h] = kernel |
| 205 | + end |
| 206 | + return kernel::HostKernel{F,tt} |
| 207 | + end |
| 208 | +end |
| 209 | + |
| 210 | +# cache of kernel instances |
| 211 | +const _kernel_instances = Dict{UInt, Any}() |
0 commit comments