|
| 1 | +// Original source reproduced unmodified here from: |
| 2 | +// https://github.com/olcf/vector_addition_tutorials/blob/master/CUDA/vecAdd.cu |
| 3 | + |
| 4 | +#include <algorithm> |
| 5 | +#include <iostream> |
| 6 | +#include <vector> |
| 7 | + |
| 8 | +#include <CL/sycl.hpp> |
| 9 | +#include <CL/sycl/backend/cuda.hpp> |
| 10 | + |
| 11 | +class CUDASelector : public sycl::device_selector { |
| 12 | +public: |
| 13 | + int operator()(const sycl::device &Device) const override { |
| 14 | + using namespace sycl::info; |
| 15 | + |
| 16 | + const std::string DriverVersion = Device.get_info<device::driver_version>(); |
| 17 | + |
| 18 | + if (Device.is_gpu() && (DriverVersion.find("CUDA") != std::string::npos)) { |
| 19 | + std::cout << " CUDA device found \n"; |
| 20 | + return 1; |
| 21 | + }; |
| 22 | + return -1; |
| 23 | + } |
| 24 | +}; |
| 25 | + |
| 26 | +// CUDA kernel. Each thread takes care of one element of c |
| 27 | +__global__ void vecAdd(double *a, double *b, double *c, int n) { |
| 28 | + // Get our global thread ID |
| 29 | + int id = blockIdx.x * blockDim.x + threadIdx.x; |
| 30 | + |
| 31 | + // Make sure we do not go out of bounds |
| 32 | + if (id < n) { |
| 33 | + c[id] = a[id] + b[id]; |
| 34 | + } |
| 35 | +} |
| 36 | + |
| 37 | +int main(int argc, char *argv[]) { |
| 38 | + using namespace sycl; |
| 39 | + // Size of vectors |
| 40 | + int n = 100000; |
| 41 | + |
| 42 | + // Create a SYCL context for interoperability with CUDA Runtime API |
| 43 | + // This is temporary until the property extension is implemented |
| 44 | + const bool UsePrimaryContext = true; |
| 45 | + device dev{CUDASelector().select_device()}; |
| 46 | + context myContext{dev, {}, UsePrimaryContext}; |
| 47 | + queue myQueue{myContext, dev}; |
| 48 | + |
| 49 | + { |
| 50 | + buffer<double> bA{range<1>(n)}; |
| 51 | + buffer<double> bB{range<1>(n)}; |
| 52 | + buffer<double> bC{range<1>(n)}; |
| 53 | + |
| 54 | + { |
| 55 | + auto hA = bA.get_access<access::mode::write>(); |
| 56 | + auto hB = bB.get_access<access::mode::write>(); |
| 57 | + |
| 58 | + // Initialize vectors on host |
| 59 | + for (int i = 0; i < n; i++) { |
| 60 | + hA[i] = sin(i) * sin(i); |
| 61 | + hB[i] = cos(i) * cos(i); |
| 62 | + } |
| 63 | + } |
| 64 | + |
| 65 | + // Dispatch a command group with all the dependencies |
| 66 | + myQueue.submit([&](handler& h) { |
| 67 | + auto accA = bA.get_access<access::mode::read>(h); |
| 68 | + auto accB = bB.get_access<access::mode::read>(h); |
| 69 | + auto accC = bC.get_access<access::mode::write>(h); |
| 70 | + |
| 71 | + h.interop_task([=](interop_handler ih) { |
| 72 | + auto dA = reinterpret_cast<double*>(ih.get_mem<backend::cuda>(accA)); |
| 73 | + auto dB = reinterpret_cast<double*>(ih.get_mem<backend::cuda>(accB)); |
| 74 | + auto dC = reinterpret_cast<double*>(ih.get_mem<backend::cuda>(accC)); |
| 75 | + |
| 76 | + int blockSize, gridSize; |
| 77 | + // Number of threads in each thread block |
| 78 | + blockSize = 1024; |
| 79 | + // Number of thread blocks in grid |
| 80 | + gridSize = static_cast<int>(ceil(static_cast<float>(n) / blockSize)); |
| 81 | + // Call the CUDA kernel directly from SYCL |
| 82 | + vecAdd<<<gridSize, blockSize>>>(dA, dB, dC, n); |
| 83 | + }); |
| 84 | + }); |
| 85 | + |
| 86 | + { |
| 87 | + auto hC = bC.get_access<access::mode::read>(); |
| 88 | + // Sum up vector c and print result divided by n, this should equal 1 within |
| 89 | + // error |
| 90 | + double sum = 0; |
| 91 | + for (int i = 0; i < n; i++) { |
| 92 | + sum += hC[i]; |
| 93 | + } |
| 94 | + std::cout << "Final result " << sum / n << std::endl; |
| 95 | + } |
| 96 | + } |
| 97 | + |
| 98 | + |
| 99 | + return 0; |
| 100 | +} |
0 commit comments