|
| 1 | +#include <cuda_fp16.h> |
| 2 | +#include "add.cuh" |
| 3 | +#include <c10/cuda/CUDAGuard.h> |
| 4 | +#include <ATen/cuda/CUDAContext.h> |
| 5 | +#include "util.h" |
| 6 | +#include "util.cuh" |
| 7 | + |
| 8 | +#define NUM_THREADS 1024 |
| 9 | + |
| 10 | +#define KERNEL_DEF(xt, yt, zt, kernel, fn) \ |
| 11 | +__launch_bounds__(NUM_THREADS) \ |
| 12 | +__global__ void kernel \ |
| 13 | +( \ |
| 14 | + const xt* __restrict__ x, \ |
| 15 | + const yt* __restrict__ y, \ |
| 16 | + zt* __restrict__ z, \ |
| 17 | + const uint64_t numel \ |
| 18 | +) \ |
| 19 | +{ \ |
| 20 | + uint64_t idx = ((uint64_t)blockIdx.x * NUM_THREADS + (uint64_t)threadIdx.x); \ |
| 21 | + if (idx >= numel) return; \ |
| 22 | + xt a = x[idx]; \ |
| 23 | + yt b = y[idx]; \ |
| 24 | + z[idx] = fn; \ |
| 25 | +} |
| 26 | + |
| 27 | +KERNEL_DEF(half, half, half, add_kernel_hhh, __hadd(a, b)) |
| 28 | +KERNEL_DEF(half, half, float, add_kernel_hhf, __half2float(__hadd(a, b))) |
| 29 | +KERNEL_DEF(half, float, half, add_kernel_hfh, __float2half_rn(__half2float(a) + b)) |
| 30 | +KERNEL_DEF(half, float, float, add_kernel_hff, __half2float(a) + b) |
| 31 | +KERNEL_DEF(float, half, half, add_kernel_fhh, __float2half_rn(a + __half2float(b))) |
| 32 | +KERNEL_DEF(float, half, float, add_kernel_fhf, a + __half2float(b)) |
| 33 | +KERNEL_DEF(float, float, half, add_kernel_ffh, __float2half_rn(a + b)) |
| 34 | +KERNEL_DEF(float, float, float, add_kernel_fff, a + b) |
| 35 | + |
| 36 | +#undef KERNEL_DEF |
| 37 | + |
| 38 | +/* |
| 39 | +x + y -> z |
| 40 | +Works inplace if x == z or y == z |
| 41 | +*/ |
| 42 | + |
| 43 | +void add_gr |
| 44 | +( |
| 45 | + const at::Tensor& x, |
| 46 | + const at::Tensor& y, |
| 47 | + at::Tensor& z, |
| 48 | + Graph* graph |
| 49 | +) |
| 50 | +{ |
| 51 | + const at::cuda::OptionalCUDAGuard device_guard(x.device()); |
| 52 | + cudaStream_t stream = graph ? graph->capture_stream : at::cuda::getCurrentCUDAStream().stream(); |
| 53 | + |
| 54 | + auto xt = x.dtype(); |
| 55 | + auto yt = y.dtype(); |
| 56 | + auto zt = z.dtype(); |
| 57 | + uint64_t numel = x.numel(); |
| 58 | + int blocks = (int) CEIL_DIVIDE(numel, (uint64_t) NUM_THREADS); |
| 59 | + |
| 60 | + #define INSTANCE(xt_, yt_, zt_, xt__, yt__, zt__, kernel) \ |
| 61 | + if (xt == xt_ && yt == yt_ && zt == zt_) \ |
| 62 | + { \ |
| 63 | + kernel<<<blocks, NUM_THREADS, 0, stream>>> \ |
| 64 | + ( \ |
| 65 | + (const xt__*) x.data_ptr(), \ |
| 66 | + (const yt__*) y.data_ptr(), \ |
| 67 | + (zt__*) z.data_ptr(), \ |
| 68 | + numel \ |
| 69 | + ); \ |
| 70 | + if (graph) graph->record_param((void*) &kernel, GP_add_x, 0); \ |
| 71 | + if (graph) graph->record_param((void*) &kernel, GP_add_y, 1); \ |
| 72 | + if (graph) graph->record_param((void*) &kernel, GP_add_z, 2); \ |
| 73 | + if (graph) graph->record_param((void*) &kernel, GP_end, 0); \ |
| 74 | + cuda_check(cudaPeekAtLastError()); \ |
| 75 | + } |
| 76 | + |
| 77 | + INSTANCE(at::kHalf, at::kHalf, at::kHalf, half, half, half , add_kernel_hhh) |
| 78 | + INSTANCE(at::kHalf, at::kHalf, at::kFloat, half, half, float, add_kernel_hhf) |
| 79 | + INSTANCE(at::kHalf, at::kFloat, at::kHalf, half, float, half , add_kernel_hfh) |
| 80 | + INSTANCE(at::kHalf, at::kFloat, at::kFloat, half, float, float, add_kernel_hff) |
| 81 | + INSTANCE(at::kFloat, at::kHalf, at::kHalf, float, half, half , add_kernel_fhh) |
| 82 | + INSTANCE(at::kFloat, at::kHalf, at::kFloat, float, half, float, add_kernel_fhf) |
| 83 | + INSTANCE(at::kFloat, at::kFloat, at::kHalf, float, float, half , add_kernel_ffh) |
| 84 | + INSTANCE(at::kFloat, at::kFloat, at::kFloat, float, float, float, add_kernel_fff) |
| 85 | + |
| 86 | + #undef INSTANCE |
| 87 | + |
| 88 | + cuda_check(cudaPeekAtLastError()); |
| 89 | +} |
| 90 | + |
| 91 | +void add |
| 92 | +( |
| 93 | + const at::Tensor& x, |
| 94 | + const at::Tensor& y, |
| 95 | + at::Tensor& z |
| 96 | +) |
| 97 | +{ |
| 98 | + add_gr(x, y, z, nullptr); |
| 99 | +} |
0 commit comments