@@ -534,12 +534,12 @@ static void * ggml_backend_cuda_buffer_get_base(ggml_backend_buffer_t buffer) {
534534 return ctx->dev_ptr ;
535535}
536536
537- static void ggml_backend_cuda_buffer_init_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
537+ static enum ggml_status ggml_backend_cuda_buffer_init_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
538538 ggml_backend_cuda_buffer_context * ctx = (ggml_backend_cuda_buffer_context *)buffer->context ;
539539
540540 if (tensor->view_src != NULL ) {
541541 assert (tensor->view_src ->buffer ->buft == buffer->buft );
542- return ;
542+ return GGML_STATUS_SUCCESS; // check with reviewers
543543 }
544544
545545 if (ggml_is_quantized (tensor->type ) && tensor->view_src == nullptr && ggml_backend_buffer_get_usage (buffer) != GGML_BACKEND_BUFFER_USAGE_COMPUTE) {
@@ -552,6 +552,7 @@ static void ggml_backend_cuda_buffer_init_tensor(ggml_backend_buffer_t buffer, g
552552 CUDA_CHECK (cudaMemset ((char *)tensor->data + original_size, 0 , padded_size - original_size));
553553 }
554554 }
555+ return GGML_STATUS_SUCCESS;
555556}
556557
557558static void ggml_backend_cuda_buffer_memset_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
@@ -786,7 +787,7 @@ static void * ggml_backend_cuda_split_buffer_get_base(ggml_backend_buffer_t buff
786787 GGML_UNUSED (buffer);
787788}
788789
789- static void ggml_backend_cuda_split_buffer_init_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
790+ static enum ggml_status ggml_backend_cuda_split_buffer_init_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
790791 GGML_ASSERT (tensor->view_src == nullptr ); // views of split tensors are not supported
791792
792793 ggml_backend_cuda_split_buffer_context * ctx = (ggml_backend_cuda_split_buffer_context *)buffer->context ;
@@ -832,6 +833,7 @@ static void ggml_backend_cuda_split_buffer_init_tensor(ggml_backend_buffer_t buf
832833 }
833834 }
834835 tensor->extra = extra;
836+ return GGML_STATUS_SUCCESS;
835837}
836838
837839static void ggml_backend_cuda_split_buffer_set_tensor (ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
0 commit comments