@@ -670,13 +670,9 @@ static ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_syc
670670// sycl split buffer
671671
672672static int64_t get_row_rounding (ggml_type type, const std::array<float , GGML_SYCL_MAX_DEVICES> & tensor_split) {
673- int64_t min_compute_capability = INT_MAX;
674673 int64_t max_compute_capability = INT_MIN;
675674 for (int i = 0 ; i < ggml_sycl_info ().device_count ; ++i) {
676675 if (tensor_split[i] < (i + 1 < ggml_sycl_info ().device_count ? tensor_split[i + 1 ] : 1 .0f )) {
677- if (min_compute_capability > ggml_sycl_info ().devices [i].cc ) {
678- min_compute_capability = ggml_sycl_info ().devices [i].cc ;
679- }
680676 if (max_compute_capability < ggml_sycl_info ().devices [i].cc ) {
681677 max_compute_capability = ggml_sycl_info ().devices [i].cc ;
682678 }
@@ -2907,7 +2903,6 @@ static bool ggml_sycl_supports_dmmv(enum ggml_type type) {
29072903static void ggml_sycl_mul_mat (ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
29082904
29092905 const bool split = ggml_backend_buffer_is_sycl_split (src0->buffer );
2910- int64_t min_compute_capability = INT_MAX;
29112906
29122907 if (split) {
29132908 ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *) src0->buffer ->buft ->context ;
@@ -2917,13 +2912,7 @@ static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor
29172912 if (tensor_split[id] >= (id + 1 < ggml_sycl_info ().device_count ? tensor_split[id + 1 ] : 1 .0f )) {
29182913 continue ;
29192914 }
2920-
2921- if (min_compute_capability > ggml_sycl_info ().devices [id].cc ) {
2922- min_compute_capability = ggml_sycl_info ().devices [id].cc ;
2923- }
29242915 }
2925- } else {
2926- min_compute_capability = ggml_sycl_info ().devices [ctx.device ].cc ;
29272916 }
29282917
29292918 // check data types and tensor shapes for custom matrix multiplication kernels:
0 commit comments