@@ -5075,6 +5075,7 @@ static bool ggmlhexagon_can_handle_op_through_cdsp(ggml_backend_dev_t dev, const
50755075
50765076 const struct ggml_tensor * src0 = op_tensor->src [0 ];
50775077 const struct ggml_tensor * src1 = op_tensor->src [1 ];
5078+ const int src0_rank = ggml_n_dims (src0);
50785079 switch (op_tensor->op ) {
50795080 case GGML_OP_ADD:
50805081 {
@@ -5086,13 +5087,15 @@ static bool ggmlhexagon_can_handle_op_through_cdsp(ggml_backend_dev_t dev, const
50865087 case GGML_OP_MUL_MAT:
50875088 {
50885089 ggmlhexagon_dump_op_info (op_tensor);
5089- if (1 == g_hexagon_appcfg.enable_q_mulmat )
5090+ if (1 == g_hexagon_appcfg.enable_q_mulmat ) {
50905091 return (src0->type == GGML_TYPE_F32
50915092 || src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q8_0
50925093 || src0->type == GGML_TYPE_Q6_K || src0->type == GGML_TYPE_Q8_K
50935094 ) && (src1->type == GGML_TYPE_F32) && (op_tensor->type == GGML_TYPE_F32);
5094- else
5095- return (src0->type == GGML_TYPE_F32) && (src1->type == GGML_TYPE_F32) && (op_tensor->type == GGML_TYPE_F32);
5095+ } else {
5096+ return (src0->type == GGML_TYPE_F32) && (src1->type == GGML_TYPE_F32) &&
5097+ (op_tensor->type == GGML_TYPE_F32);
5098+ }
50965099 }
50975100 case GGML_OP_SOFT_MAX:{
50985101 if (!ggml_is_contiguous (op_tensor))
@@ -5124,13 +5127,9 @@ static bool ggmlhexagon_can_handle_op_through_qnn(ggml_backend_dev_t dev, const
51245127
51255128 struct ggml_tensor * src0 = op_tensor->src [0 ];
51265129 struct ggml_tensor * src1 = op_tensor->src [1 ];
5127- int64_t ne00 = 0 ;
5128- uint32_t src0_rank = 0 ;
5129- uint32_t src1_rank = 0 ;
5130- if (nullptr != src0) {
5131- src0_rank = ggml_n_dims (src0);
5132- ne00 = src0->ne [0 ];
5133- }
5130+ const int64_t ne00 = src0->ne [0 ];;
5131+ const int src0_rank = ggml_n_dims (src0);
5132+ int src1_rank = 0 ;
51345133 if (nullptr != src1) {
51355134 src1_rank = ggml_n_dims (src1);
51365135 }
@@ -6023,7 +6022,7 @@ const char * ggml_backend_hexagon_get_devname(size_t dev_num) {
60236022 case HEXAGON_BACKEND_QNNNPU:
60246023 return " HEXAGON_BACKEND_QNN_NPU" ;
60256024 case HEXAGON_BACKEND_GGML:
6026- return " ggml" ; // "fake" QNN backend, used for compare performance between hexagon backend and the default ggml backend
6025+ return " ggml" ; // "fake" hexagon backend, used for compare performance between hexagon backend and the default ggml backend
60276026 default :
60286027 return " unknown" ;
60296028 }
0 commit comments