Skip to content

Commit bdc8a47

Browse files
authored
Merge pull request #25 from oneapi-src/fix_lc0
[Lc0][SYCL] Fix to interop calls.
2 parents aec41e5 + 6eba6c8 commit bdc8a47

File tree

1 file changed

+21
-21
lines changed

1 file changed

+21
-21
lines changed

lc0/src/neural/sycl/layers.cc.dp.cpp

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -270,7 +270,7 @@ void SELayer<float>::Eval(int N, float* output, const float* input,
270270
cublasHandle_t handle = cuBlasContextManager::getcuBlasHandle_t();
271271
//ReportCUBLASErrors(cublasCreate(&handle));
272272

273-
sycl_queue_.submit([&](sycl::handler &cgh) {
273+
sycl_queue_.submit([&](sycl::handler &cgh) {
274274
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
275275

276276
cgh.host_task([=](sycl::interop_handle ih) {
@@ -417,9 +417,9 @@ void SELayer<sycl::half>::Eval(int N, sycl::half* output, const sycl::half* inpu
417417
q_ct1->submit([&](sycl::handler &cgh) {
418418
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
419419
420-
cgh.interop_task([=](sycl::interop_handler ih) {
420+
cgh.host_task([=](sycl::interop_handle ih) {
421421
422-
auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(*q_ct1);
422+
auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(*q_ct1);
423423
cublasSetStream(handle, cudaStreamHandle);
424424
425425
ReportCUBLASErrors(cublasHgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, numFc1Out_,
@@ -439,9 +439,9 @@ void SELayer<sycl::half>::Eval(int N, sycl::half* output, const sycl::half* inpu
439439
q_ct1->submit([&](sycl::handler &cgh) {
440440
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
441441
442-
cgh.interop_task([=](sycl::interop_handler ih) {
442+
cgh.host_task([=](sycl::interop_handle ih) {
443443
444-
auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(*q_ct1);
444+
auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(*q_ct1);
445445
cublasSetStream(handle, cudaStreamHandle);
446446
447447
// 3. Second fully connected layer.
@@ -556,9 +556,9 @@ void FCLayer<float>::LoadWeights(float* cpuWeight, float* cpuBias,
556556

557557
// q_ct1->submit([&](sycl::handler &cgh) {
558558

559-
// cgh.interop_task([=](sycl::interop_handler ih) {
559+
// cgh.host_task([=](sycl::interop_handle ih) {
560560

561-
// auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(*q_ct1);
561+
// auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(*q_ct1);
562562
// cublasSetStream(handle, cudaStreamHandle);
563563

564564
// ReportCUBLASErrors(cublasHgemm(handle, CUBLAS_OP_T, CUBLAS_OP_N, num_outputs,
@@ -918,9 +918,9 @@ void FusedWinogradConvSELayer<DataType>::LoadSEWeights(float* w1, float* b1,
918918

919919
// q_ct1->submit([&](sycl::handler &cgh) {
920920
// //auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
921-
// cgh.interop_task([=](sycl::interop_handler ih) {
921+
// cgh.host_task([=](sycl::interop_handle ih) {
922922

923-
// auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(q_ct1);
923+
// auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(q_ct1);
924924
// cublasSetStream(handle, cudaStreamHandle);
925925

926926
// ReportCUBLASErrors(cublasGemmStridedBatchedEx(
@@ -1019,7 +1019,7 @@ template <> void BaseLayer<float>::cublasRowMajorMatrixMul(const float* A, const
10191019
else {
10201020

10211021
#ifdef USE_CUBLAS
1022-
sycl_queue_.submit([&](sycl::handler &cgh) {
1022+
sycl_queue_.submit([&](sycl::handler &cgh) {
10231023
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
10241024
cgh.host_task([=](sycl::interop_handle ih) {
10251025
auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(sycl_queue_);
@@ -1218,9 +1218,9 @@ template <typename DataType> void Conv1Layer<DataType>::LoadWeights(float* pfilt
12181218

12191219
// q_ct1->submit([&](sycl::handler &cgh) {
12201220
// //auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
1221-
// cgh.interop_task([=](sycl::interop_handler ih) {
1221+
// cgh.host_task([=](sycl::interop_handle ih) {
12221222

1223-
// auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(q_ct1);
1223+
// auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(q_ct1);
12241224
// cublasSetStream(handle, cudaStreamHandle);
12251225

12261226

@@ -1264,7 +1264,7 @@ void Conv1Layer<float>::cublasSpecialMatrixMul(const float* A, const float* B,
12641264

12651265
// printf("use_gemm_ex_\n");
12661266
#ifdef USE_CUBLAS
1267-
sycl_queue_.submit([&](sycl::handler &cgh) {
1267+
sycl_queue_.submit([&](sycl::handler &cgh) {
12681268
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
12691269
cgh.host_task([=](sycl::interop_handle ih) {
12701270

@@ -1282,7 +1282,7 @@ void Conv1Layer<float>::cublasSpecialMatrixMul(const float* A, const float* B,
12821282
});
12831283
});
12841284
#elifdef USE_HIPBLAS
1285-
sycl_queue_.submit([&](sycl::handler &cgh) {
1285+
sycl_queue_.submit([&](sycl::handler &cgh) {
12861286
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
12871287
cgh.host_task([=](sycl::interop_handle ih) {
12881288

@@ -1343,7 +1343,7 @@ void Conv1Layer<float>::cublasSpecialMatrixMul(const float* A, const float* B,
13431343
});
13441344
});
13451345
#elifdef USE_HIPBLAS
1346-
sycl_queue_.submit([&](sycl::handler &cgh) {
1346+
sycl_queue_.submit([&](sycl::handler &cgh) {
13471347
//auto d_A = b_A.get_access<sycl::access::mode::read_write>(cgh);
13481348
cgh.host_task([=](sycl::interop_handle ih) {
13491349

@@ -1831,9 +1831,9 @@ static void cublasXgemm(transpose_type transa,
18311831
18321832
q_ct1.submit([&](sycl::handler &cgh) {
18331833
1834-
cgh.interop_task([=](sycl::interop_handler ih) {
1834+
cgh.host_task([=](sycl::interop_handle ih) {
18351835
1836-
auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(q_ct1);
1836+
auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(q_ct1);
18371837
cublasSetStream(handle, cudaStreamHandle);
18381838
18391839
@@ -1850,7 +1850,7 @@ static void cublasXgemm(transpose_type transa,
18501850

18511851

18521852

1853-
sycl_queue.submit([&](sycl::handler &cgh) {
1853+
sycl_queue.submit([&](sycl::handler &cgh) {
18541854

18551855
cgh.host_task([=](sycl::interop_handle ih) {
18561856

@@ -1919,9 +1919,9 @@ static void cublasXGemmStridedBatched(transpose_type transa, transpose_type tran
19191919
19201920
q_ct1.submit([&](sycl::handler &cgh) {
19211921
1922-
cgh.interop_task([=](sycl::interop_handler ih) {
1922+
cgh.host_task([=](sycl::interop_handle ih) {
19231923
1924-
auto cudaStreamHandle = sycl::get_native<sycl::backend::cuda>(q_ct1);
1924+
auto cudaStreamHandle = sycl::get_native<sycl::backend::ext_oneapi_cuda>(q_ct1);
19251925
cublasSetStream(handle, cudaStreamHandle);
19261926
19271927
ReportCUBLASErrors(cublasGemmStridedBatchedEx(
@@ -1956,7 +1956,7 @@ static void cublasXGemmStridedBatched(transpose_type transa, transpose_type tran
19561956

19571957

19581958
#elifdef USE_HIPBLAS
1959-
hipblasHandle_t handle = hipBlasContextManager::gethipBlasHandle_t();
1959+
hipblasHandle_t handle = hipBlasContextManager::gethipBlasHandle_t();
19601960

19611961
sycl_queue.submit([&](sycl::handler &cgh) {
19621962

0 commit comments

Comments
 (0)