@@ -220,8 +220,8 @@ mli_status mli_chk_conv2d_nhwc_sa8_sa8_sa32(
220220 MLI_CHECK (bias -> el_type == MLI_EL_ASYM_I32 , "Wrong bias tensor type" ))
221221 return MLI_STATUS_TYPE_MISMATCH ;
222222
223- if (MLI_CHECK (in -> el_params .asym .zero_point .i16 = = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ) ||
224- MLI_CHECK (out -> el_params .asym .zero_point .i16 = = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ))
223+ if (MLI_CHECK (in -> el_params .asym .zero_point .i16 ! = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ) ||
224+ MLI_CHECK (out -> el_params .asym .zero_point .i16 ! = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ))
225225 return MLI_STATUS_INCOMPATEBLE_TENSORS ;
226226
227227 mli_status ret = MLI_CHECK_STATUS (mli_chk_conv2d_hwc (in , weights , bias , cfg , out ), __func__ );
@@ -577,8 +577,8 @@ mli_status mli_chk_depthwise_conv2d_hwcn_sa8_sa8_sa32(
577577 MLI_CHECK (in -> el_params .asym .dim < 0 , "Input tensor: Per-tensor quantization is expected" ))
578578 return MLI_STATUS_INCOMPATEBLE_TENSORS ;
579579
580- if (MLI_CHECK (in -> el_params .asym .zero_point .i16 = = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ) ||
581- MLI_CHECK (out -> el_params .asym .zero_point .i16 = = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ))
580+ if (MLI_CHECK (in -> el_params .asym .zero_point .i16 ! = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ) ||
581+ MLI_CHECK (out -> el_params .asym .zero_point .i16 ! = INT16_MIN ,"Input tensor: INT16_MIN doesn't support as offset value" ))
582582 return MLI_STATUS_INCOMPATEBLE_TENSORS ;
583583
584584 ret = MLI_CHECK_STATUS (mli_chk_bias_scale_asym (in , weights , bias ), __func__ );
0 commit comments