@@ -919,19 +919,6 @@ static ggml_type ggml_datatype_from_qnn_datatype(Qnn_DataType_t qnn_type) {
919919 return GGML_TYPE_COUNT;
920920}
921921
922- // TODO: add more ops
923- static const char * qnn_opname_from_ggmlop (enum ggml_op ggmlop) {
924- switch (ggmlop) {
925- case GGML_OP_ADD:
926- return QNN_OP_ELEMENT_WISE_ADD;
927- case GGML_OP_MUL_MAT:
928- return QNN_OP_MAT_MUL;
929- default :
930- break ;
931- }
932- return nullptr ;
933- }
934-
935922static void get_qnn_dimensions_from_ggml_dimensions (uint32_t * qnn_dimensions, const uint32_t * ggml_dimensions, uint32_t rank) {
936923 if (rank > GGML_MAX_DIMS) {
937924 GGMLQNN_LOG_WARN (" invalid params" );
@@ -1007,14 +994,13 @@ Qnn_Tensor_t * ggmlqnn_create_general_tensor(const ggml_tensor * tensor, const c
1007994 .type = qnn_tensor_type,
1008995 .dataFormat = QNN_TENSOR_DATA_FORMAT_FLAT_BUFFER,
1009996 .dataType = qnn_data_type,
1010- .quantizeParams = {QNN_DEFINITION_UNDEFINED,
1011- QNN_QUANTIZATION_ENCODING_UNDEFINED,
997+ .quantizeParams = {. encodingDefinition = QNN_DEFINITION_UNDEFINED,
998+ . quantizationEncoding = QNN_QUANTIZATION_ENCODING_UNDEFINED,
1012999 {.scaleOffsetEncoding = {.scale = 0 .0000000000000000f , .offset = 0 }}},
10131000 .rank = rank,
10141001 .dimensions = tensor_dims,
10151002 .memType = QNN_TENSORMEMTYPE_RAW,
1016- {.clientBuf = {nullptr , 0 }
1017- }
1003+ .clientBuf = {.data = nullptr , .dataSize = 0 }
10181004 }
10191005 }
10201006 };
0 commit comments