diff --git a/runtime/onert/api/nnfw/include/nnfw.h b/runtime/onert/api/nnfw/include/nnfw.h index 3fe296844b0..40c97aec522 100644 --- a/runtime/onert/api/nnfw/include/nnfw.h +++ b/runtime/onert/api/nnfw/include/nnfw.h @@ -125,6 +125,8 @@ typedef enum NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE = 5, /** When API is deprecated */ NNFW_STATUS_DEPRECATED_API = 6, + /** When given input or ouput data type is not supported. */ + NNFW_STATUS_UNSUPPORTED_DATA_TYPE = 7, } NNFW_STATUS; /** diff --git a/runtime/onert/api/nnfw/src/CustomKernel.cc b/runtime/onert/api/nnfw/src/CustomKernel.cc index 9849e6a2089..93e87fe0822 100644 --- a/runtime/onert/api/nnfw/src/CustomKernel.cc +++ b/runtime/onert/api/nnfw/src/CustomKernel.cc @@ -16,6 +16,8 @@ #include "CustomKernel.h" +#include + namespace onert::api { @@ -54,7 +56,7 @@ class APIConverter api_type.dtype = NNFW_TYPE_TENSOR_BOOL; break; default: - throw std::runtime_error("Unsupported tensor datatype"); + throw UnsupportedDataTypeException("Converter", type.dtype); } return api_type; } diff --git a/runtime/onert/api/nnfw/src/nnfw_session.cc b/runtime/onert/api/nnfw/src/nnfw_session.cc index 5d5ad91eb13..6bd8fdf5b26 100644 --- a/runtime/onert/api/nnfw/src/nnfw_session.cc +++ b/runtime/onert/api/nnfw/src/nnfw_session.cc @@ -502,6 +502,11 @@ NNFW_STATUS nnfw_session::run() std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl; return NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE; } + catch (const onert::UnsupportedDataTypeException &e) + { + std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl; + return NNFW_STATUS_UNSUPPORTED_DATA_TYPE; + } catch (const std::exception &e) { std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl; diff --git a/runtime/onert/api/python/include/nnfw_exceptions.h b/runtime/onert/api/python/include/nnfw_exceptions.h index 7535cdbf34f..01216c05862 100644 --- a/runtime/onert/api/python/include/nnfw_exceptions.h +++ b/runtime/onert/api/python/include/nnfw_exceptions.h @@ -49,6 +49,10 @@ struct NnfwDeprecatedApiError : public NnfwError { using NnfwError::NnfwError; }; +struct NnfwUnsupportedDataTypeError : public NnfwError +{ + using NnfwError::NnfwError; +}; } // namespace onert::api::python diff --git a/runtime/onert/api/python/src/bindings/nnfw_exception_bindings.cc b/runtime/onert/api/python/src/bindings/nnfw_exception_bindings.cc index 85503c3832a..4c303609b3e 100644 --- a/runtime/onert/api/python/src/bindings/nnfw_exception_bindings.cc +++ b/runtime/onert/api/python/src/bindings/nnfw_exception_bindings.cc @@ -41,6 +41,8 @@ void bind_nnfw_exceptions(py::module_ &m) m.attr("OnertError").cast()); py::register_exception(m, "OnertDeprecatedApiError", m.attr("OnertError").cast()); + py::register_exception(m, "OnertUnsupportedDataTypeError", + m.attr("OnertError").cast()); } } // namespace onert::api::python diff --git a/runtime/onert/api/python/src/wrapper/nnfw_api_wrapper.cc b/runtime/onert/api/python/src/wrapper/nnfw_api_wrapper.cc index 2d660223d59..f3466e0df3c 100644 --- a/runtime/onert/api/python/src/wrapper/nnfw_api_wrapper.cc +++ b/runtime/onert/api/python/src/wrapper/nnfw_api_wrapper.cc @@ -42,9 +42,10 @@ void ensure_status(NNFW_STATUS status) throw NnfwInsufficientOutputError("NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE"); case NNFW_STATUS::NNFW_STATUS_DEPRECATED_API: throw NnfwDeprecatedApiError("NNFW_STATUS_DEPRECATED_API"); - default: - throw NnfwError("NNFW_UNKNOWN_ERROR"); + case NNFW_STATUS::NNFW_STATUS_UNSUPPORTED_DATA_TYPE: + throw NnfwUnsupportedDataTypeError("NNFW_STATUS_UNSUPPORTED_DATA_TYPE"); } + throw NnfwError("NNFW_UNKNOWN_ERROR"); } NNFW_LAYOUT getLayout(const char *layout) diff --git a/runtime/onert/backend/acl_common/Convert.cc b/runtime/onert/backend/acl_common/Convert.cc index 3dd34235836..75a85a52ab7 100644 --- a/runtime/onert/backend/acl_common/Convert.cc +++ b/runtime/onert/backend/acl_common/Convert.cc @@ -19,6 +19,7 @@ #include "Swizzle.h" #include "ir/DataType.h" #include "ir/operation/ElementwiseActivation.h" +#include #include namespace onert::backend::acl_common @@ -96,8 +97,7 @@ ::arm_compute::DataType asDataType(const ir::DataType type) case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL: return ::arm_compute::DataType::QSYMM8_PER_CHANNEL; default: - throw std::runtime_error("Not supported internal data type, yet"); - break; + throw UnsupportedDataTypeException(type); } } @@ -224,7 +224,8 @@ std::set asSet(const ir::Operand &operand, int32_t rank) for (size_t i = 0; i < operand.shape().num_elements(); ++i) { int32_t axis = 0; - switch (operand.typeInfo().type()) + const auto data_type = operand.typeInfo().type(); + switch (data_type) { case ir::DataType::INT32: axis = reinterpret_cast(operand.data()->base())[i]; @@ -233,7 +234,7 @@ std::set asSet(const ir::Operand &operand, int32_t rank) axis = reinterpret_cast(operand.data()->base())[i]; break; default: - throw std::runtime_error("acl_common::asSet: Not supported data type"); + throw UnsupportedDataTypeException("asSet", data_type); } if (axis < 0) axis += rank; @@ -273,8 +274,7 @@ ir::DataType asRuntimeDataType(::arm_compute::DataType data_type) case ::arm_compute::DataType::QSYMM16: return ir::DataType::QUANT_INT16_SYMM; default: - throw std::runtime_error{"Not supported acl data type, yet"}; - break; + throw UnsupportedDataTypeException("asRuntimeDataType", data_type); } } @@ -312,7 +312,8 @@ arm_compute::PixelValue asPixelValue(const ir::Operand &operand) { assert(operand.isConstant()); assert(operand.shape().num_elements() == 1); - switch (operand.typeInfo().type()) + const auto data_type = operand.typeInfo().type(); + switch (data_type) { case ir::DataType::INT32: return arm_compute::PixelValue(operand.asScalar()); @@ -325,7 +326,7 @@ arm_compute::PixelValue asPixelValue(const ir::Operand &operand) case ir::DataType::FLOAT32: return arm_compute::PixelValue(operand.asScalar()); default: - throw std::runtime_error("asPixelValue : Not supported datatype yet"); + throw UnsupportedDataTypeException("asPixelValue", data_type); } } diff --git a/runtime/onert/backend/cpu/ops/AddNLayer.cc b/runtime/onert/backend/cpu/ops/AddNLayer.cc index 5d9985c3ffb..226112a0fbf 100644 --- a/runtime/onert/backend/cpu/ops/AddNLayer.cc +++ b/runtime/onert/backend/cpu/ops/AddNLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include #include namespace onert::backend::cpu::ops @@ -53,7 +54,7 @@ void AddNLayer::run() } else { - throw std::runtime_error("AddN: unsupported data type"); + throw UnsupportedDataTypeException{"AddN", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc index 68191ab35f1..aafcb3be14d 100644 --- a/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc +++ b/runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include #include namespace onert::backend::cpu::ops @@ -79,7 +80,7 @@ void ArgMinMaxLayer::run() TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); break; default: - throw std::runtime_error("ArgMinMax: unsupported data type"); + throw UnsupportedDataTypeException{"ArgMinMax", _input->data_type()}; } } else if (_output->data_type() == ir::DataType::INT64) @@ -100,12 +101,12 @@ void ArgMinMaxLayer::run() TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); break; default: - throw std::runtime_error("ArgMinMax: unsupported data type"); + throw UnsupportedDataTypeException{"ArgMinMax", _input->data_type()}; } } else { - throw std::runtime_error("ArgMinMax: unsupported data type"); + throw UnsupportedDataTypeException{"ArgMinMax", _output->data_type()}; } #undef TF_LITE_ARG_MIN_MAX diff --git a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc index 6fc05802722..8d9cb8fa433 100644 --- a/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc +++ b/runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc @@ -17,6 +17,7 @@ #include "BatchMatMulLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -60,14 +61,11 @@ void BatchMatMulLayer::configure(const IPortableTensor *lhs, const IPortableTens void BatchMatMulLayer::run() { - if ((_lhs->data_type() == OperandType::FLOAT32) && (_rhs->data_type() == OperandType::FLOAT32)) - { - batchMatMulFloat32(); - } - else - { - throw std::runtime_error{"BatchMatMul: unsupported data type"}; - } + if (_lhs->data_type() != OperandType::FLOAT32) + throw UnsupportedDataTypeException{"BatchMatMul", _lhs->data_type()}; + if (_rhs->data_type() != OperandType::FLOAT32) + throw UnsupportedDataTypeException{"BatchMatMul", _rhs->data_type()}; + batchMatMulFloat32(); } #undef AVGPOOLING_PARAMETERS diff --git a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc index 8d61592e9f7..69814325274 100644 --- a/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc +++ b/runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc @@ -17,6 +17,7 @@ #include "BatchToSpaceNDLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -66,7 +67,7 @@ void BatchToSpaceNDLayer::run() } else { - throw std::runtime_error{"NYI"}; + throw UnsupportedDataTypeException{"BatchToSpaceND", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc index 9d95aa4d525..a6e4c5d0eae 100644 --- a/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc +++ b/runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc @@ -17,6 +17,7 @@ #include "BinaryArithmeticLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -120,7 +121,7 @@ generateKernelGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs, break; } default: - throw std::runtime_error{"BinaryArithmetic(generic): Unsupported data type"}; + throw UnsupportedDataTypeException{"BinaryArithmetic(generic)", lhs->data_type()}; } } @@ -205,7 +206,6 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl _kernel = Eval(_lhs, _rhs, _output, op_params); } - else { _kernel = generateKernelGeneric( @@ -227,7 +227,6 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl _kernel = Eval(_lhs, _rhs, _output, op_params); } - else { _kernel = generateKernelGeneric( @@ -265,8 +264,7 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl { // TODO Support quantized type // TODO Support integer type with zero check - throw std::runtime_error{ - "BinaryArithmetic(Div): Div operation does not support non-float data types yet"}; + throw UnsupportedDataTypeException{"BinaryArithmetic(Div)", lhs->data_type()}; } break; default: diff --git a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc b/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc index d11e7cebe35..52e5b87e382 100644 --- a/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc +++ b/runtime/onert/backend/cpu/ops/BroadcastToLayer.cc @@ -17,6 +17,7 @@ #include "BroadcastToLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -57,7 +58,7 @@ void BroadcastToLayer::run() getShape(_output), getBuffer(_output)); break; default: - throw std::runtime_error{"BroadcastToLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"BroadcastTo", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/ComparisonLayer.cc b/runtime/onert/backend/cpu/ops/ComparisonLayer.cc index 48a308d0656..3978f0576ef 100644 --- a/runtime/onert/backend/cpu/ops/ComparisonLayer.cc +++ b/runtime/onert/backend/cpu/ops/ComparisonLayer.cc @@ -19,6 +19,8 @@ #include #include +#include + using namespace nnfw::cker; namespace onert::backend::cpu::ops { @@ -156,7 +158,7 @@ void CompareLayer::run() } else { - throw std::runtime_error{"Compare: unsupported data type"}; + throw UnsupportedDataTypeException{"Compare", _lhs->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/ConcatLayer.cc b/runtime/onert/backend/cpu/ops/ConcatLayer.cc index b1ea8372a91..8dea9bf197e 100644 --- a/runtime/onert/backend/cpu/ops/ConcatLayer.cc +++ b/runtime/onert/backend/cpu/ops/ConcatLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -131,7 +132,7 @@ void ConcatLayer::run() concatenationGeneral(); break; default: - throw std::runtime_error("Concat: unsupported data type"); + throw UnsupportedDataTypeException{"Concat", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/Conv2DLayer.cc b/runtime/onert/backend/cpu/ops/Conv2DLayer.cc index 42f166ffe58..0ebdeb9fbe7 100644 --- a/runtime/onert/backend/cpu/ops/Conv2DLayer.cc +++ b/runtime/onert/backend/cpu/ops/Conv2DLayer.cc @@ -21,6 +21,7 @@ #include "../Tensor.h" #include "ir/Padding.h" #include +#include namespace onert::backend::cpu::ops { @@ -271,7 +272,7 @@ void ConvolutionLayer::run() } else { - throw std::runtime_error{"Conv: unsupported data type"}; + throw UnsupportedDataTypeException{"Conv2D", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/DepthToSpaceLayer.cc b/runtime/onert/backend/cpu/ops/DepthToSpaceLayer.cc index fdba608b61a..cae5370cbd8 100644 --- a/runtime/onert/backend/cpu/ops/DepthToSpaceLayer.cc +++ b/runtime/onert/backend/cpu/ops/DepthToSpaceLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -61,7 +62,7 @@ void DepthToSpaceLayer::run() depthToSpace(); break; default: - throw std::runtime_error{"DepthToSpace: unsupported data type"}; + throw UnsupportedDataTypeException{"DepthToSpace", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/DepthwiseConv2DLayer.cc b/runtime/onert/backend/cpu/ops/DepthwiseConv2DLayer.cc index e88f77b580c..da385c277f0 100644 --- a/runtime/onert/backend/cpu/ops/DepthwiseConv2DLayer.cc +++ b/runtime/onert/backend/cpu/ops/DepthwiseConv2DLayer.cc @@ -18,6 +18,7 @@ #include "cker/PortableTensorUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -317,7 +318,7 @@ void DepthwiseConvolutionLayer::run() } else { - throw std::runtime_error{"DepthwiseConv: unsupported data type"}; + throw UnsupportedDataTypeException{"DepthwiseConv", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/DynamicUpdateSliceLayer.cc b/runtime/onert/backend/cpu/ops/DynamicUpdateSliceLayer.cc index 2833c969642..98ba8f58c7b 100644 --- a/runtime/onert/backend/cpu/ops/DynamicUpdateSliceLayer.cc +++ b/runtime/onert/backend/cpu/ops/DynamicUpdateSliceLayer.cc @@ -18,6 +18,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -80,7 +81,7 @@ void DynamicUpdateSliceLayer::run() getBuffer(_output)); break; default: - throw std::runtime_error{"DynamicUpdateSlice: NYI - unsupported data type"}; + throw UnsupportedDataTypeException{"DynamicUpdateSlice", _operand->data_type()}; break; } } diff --git a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc index 6c44e4c499d..f8a910095d0 100644 --- a/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc +++ b/runtime/onert/backend/cpu/ops/ElementwiseActivationLayer.cc @@ -25,6 +25,7 @@ #include #include #include +#include namespace onert::backend::cpu::ops { @@ -98,7 +99,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(Elu): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(Elu)", input->data_type()}; } break; case ElementwiseActivationType::kLogistic: @@ -117,7 +118,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(Logistic): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(Logistic)", input->data_type()}; } break; case ElementwiseActivationType::kReLU: @@ -145,7 +146,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(ReLU): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(ReLU)", input->data_type()}; } break; case ElementwiseActivationType::kTanh: @@ -164,7 +165,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(Tanh): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(Tanh)", input->data_type()}; } break; case ElementwiseActivationType::kLeakyReLU: @@ -178,7 +179,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(LeakyReLU): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(LeakyReLU)", input->data_type()}; } break; case ElementwiseActivationType::kGELU: @@ -191,7 +192,7 @@ void ElementwiseActivationLayer::configure(const IPortableTensor *input, IPortab } else { - throw std::runtime_error{"ElementwiseActivationLayer(GELU): unsupported data type"}; + throw UnsupportedDataTypeException{"ElementwiseActivation(GELU)", input->data_type()}; } break; default: diff --git a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc index 441fadd8309..6d12613daf5 100644 --- a/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc +++ b/runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc @@ -23,6 +23,7 @@ #include #include #include +#include namespace onert::backend::cpu::ops { @@ -140,7 +141,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"Max: unsupported data type"}; + throw UnsupportedDataTypeException{"FloorDiv", _lhs->data_type()}; } break; case ElementwiseBinaryType::kFloorMod: @@ -154,7 +155,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"FloorMod: unsupported data type"}; + throw UnsupportedDataTypeException{"FloorMod", _lhs->data_type()}; } break; case ElementwiseBinaryType::kLogicalAnd: @@ -165,7 +166,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"LogicalOr: Unsupported data type"}; + throw UnsupportedDataTypeException{"LogicalAnd", _lhs->data_type()}; } break; case ElementwiseBinaryType::kLogicalOr: @@ -176,7 +177,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"LogicalOr: Unsupported data type"}; + throw UnsupportedDataTypeException{"LogicalOr", _lhs->data_type()}; } break; case ElementwiseBinaryType::kMax: @@ -194,7 +195,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"Max: unsupported data type"}; + throw UnsupportedDataTypeException{"Max", _lhs->data_type()}; } break; case ElementwiseBinaryType::kMin: @@ -216,7 +217,7 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab } else { - throw std::runtime_error{"Min: unsupported data type"}; + throw UnsupportedDataTypeException{"Min", _lhs->data_type()}; } break; default: diff --git a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc index ba30d88805a..24ff11c2a14 100644 --- a/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc +++ b/runtime/onert/backend/cpu/ops/ElementwiseUnaryLayer.cc @@ -25,6 +25,7 @@ #include #include #include +#include namespace onert::backend::cpu::ops { @@ -66,8 +67,7 @@ void castPtr(const FromT *in, DataPtr out, int num_elements, ir::DataType data_t [](FromT a) { return static_cast(a); }); return; default: - throw std::runtime_error("Cast: Not supported output type" + - std::to_string((int)data_type_out)); + throw UnsupportedDataTypeException{"Cast", data_type_out}; } } @@ -103,8 +103,7 @@ void cast(const IPortableTensor *input, IPortableTensor *output) castPtr(in.i64, out, num_elements, output->data_type()); return; default: - throw std::runtime_error("Cast: unsupported data type" + - std::to_string((int)input->data_type())); + throw UnsupportedDataTypeException{"Cast", input->data_type()}; } } @@ -226,7 +225,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Abs: Unsupported data type"}; + throw UnsupportedDataTypeException{"Abs", input->data_type()}; } break; case ElementwiseUnaryType::kCast: @@ -239,7 +238,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Cos: Unsupported data type"}; + throw UnsupportedDataTypeException{"Cos", input->data_type()}; } break; case ElementwiseUnaryType::kDequantize: @@ -258,7 +257,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Dequantize: Unsupported data type"}; + throw UnsupportedDataTypeException{"Dequantize", input->data_type()}; } break; case ElementwiseUnaryType::kExp: @@ -268,7 +267,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Exp: Unsupported data type"}; + throw UnsupportedDataTypeException{"Exp", input->data_type()}; } break; case ElementwiseUnaryType::kErf: @@ -278,7 +277,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Exp: Unsupported data type"}; + throw UnsupportedDataTypeException{"Exp", input->data_type()}; } break; case ElementwiseUnaryType::kFloor: @@ -288,7 +287,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Floor: Unsupported data type"}; + throw UnsupportedDataTypeException{"Floor", input->data_type()}; } break; case ElementwiseUnaryType::kLog: @@ -298,7 +297,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Log: Unsupported data type"}; + throw UnsupportedDataTypeException{"Log", input->data_type()}; } break; case ElementwiseUnaryType::kLogicalNot: @@ -309,7 +308,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"LogicalNot: Unsupported data type"}; + throw UnsupportedDataTypeException{"LogicalNot", input->data_type()}; } break; case ElementwiseUnaryType::kNeg: @@ -327,7 +326,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Neg: Unsupported data type"}; + throw UnsupportedDataTypeException{"Neg", input->data_type()}; } break; case ElementwiseUnaryType::kRound: @@ -337,7 +336,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Round: Unsupported data type"}; + throw UnsupportedDataTypeException{"Round", input->data_type()}; } break; case ElementwiseUnaryType::kRSqrt: @@ -347,7 +346,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"RSqrt: Unsupported data type"}; + throw UnsupportedDataTypeException{"RSqrt", input->data_type()}; } break; case ElementwiseUnaryType::kSin: @@ -357,7 +356,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Sin: Unsupported data type"}; + throw UnsupportedDataTypeException{"Sin", input->data_type()}; } break; case ElementwiseUnaryType::kSqrt: @@ -367,7 +366,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Sqrt: Unsupported data type"}; + throw UnsupportedDataTypeException{"Sqrt", input->data_type()}; } break; case ElementwiseUnaryType::kSquare: @@ -377,7 +376,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"Square: Unsupported data type"}; + throw UnsupportedDataTypeException{"Square", input->data_type()}; } break; case ElementwiseUnaryType::kZerosLike: @@ -391,7 +390,7 @@ void ElementwiseUnaryLayer::configure(const IPortableTensor *input, IPortableTen } else { - throw std::runtime_error{"ZerosLike: Unsupported data type"}; + throw UnsupportedDataTypeException{"ZerosLike", input->data_type()}; } break; default: @@ -431,7 +430,7 @@ void QuantizeLayer::configure(const IPortableTensor *input, IPortableTensor *out } else { - throw std::runtime_error{"Quantize: Unsupported data type"}; + throw UnsupportedDataTypeException{"Quantize", input->data_type()}; } } @@ -444,7 +443,7 @@ void QuantizeLayer::run() else if (_output->data_type() == OperandType::QUANT_INT16_SYMM) affineQuantize(_input, _output); else - throw std::runtime_error{"Quantize: Unsupported data type"}; + throw UnsupportedDataTypeException{"Quantize", _output->data_type()}; } else if ((_input->data_type() == OperandType::QUANT_UINT8_ASYMM) && (_output->data_type() == OperandType::QUANT_INT8_ASYMM)) @@ -464,7 +463,7 @@ void QuantizeLayer::run() } else { - throw std::runtime_error{"Quantize: Unsupported data type"}; + throw UnsupportedDataTypeException{"Quantize", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/FillLayer.cc b/runtime/onert/backend/cpu/ops/FillLayer.cc index 128caa79496..4ad9eaa4cf8 100644 --- a/runtime/onert/backend/cpu/ops/FillLayer.cc +++ b/runtime/onert/backend/cpu/ops/FillLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -55,7 +56,7 @@ void FillLayer::run() getBuffer(_output)); break; default: - throw std::runtime_error{"Fill: unsupported data type"}; + throw UnsupportedDataTypeException{"Fill", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc index 4f7e6bdca78..e9a6a3aa00f 100644 --- a/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc +++ b/runtime/onert/backend/cpu/ops/FullyConnectedLayer.cc @@ -22,6 +22,7 @@ #include #include #include +#include namespace onert::backend::cpu::ops { @@ -179,9 +180,9 @@ void FullyConnectedLayer::fullyConnectedGGMLWeight() throw std::runtime_error{"FullyConnected: GGML weights format does not support bias yet."}; // convert tensor - auto input = getGGMLTensor(_input); - auto weights = getGGMLTensor(_weights); - auto output = getGGMLTensor(_output); + auto input = getGGMLTensor("FullyConnected", _input); + auto weights = getGGMLTensor("FullyConnected", _weights); + auto output = getGGMLTensor("FullyConnected", _output); { output.op = GGML_OP_MUL_MAT; output.src[0] = &weights; @@ -277,7 +278,7 @@ void FullyConnectedLayer::run() } else { - throw std::runtime_error{"FullyConnected: unsupported data type"}; + throw UnsupportedDataTypeException{"FullyConnected", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc index 6fc8824943a..0a1fa4902b5 100644 --- a/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc +++ b/runtime/onert/backend/cpu/ops/FusedBatchNormLayer.cc @@ -17,6 +17,7 @@ #include "FusedBatchNormLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -63,7 +64,7 @@ void FusedBatchNormLayer::run() } else { - throw std::runtime_error{"FusedBatchNorm: unsupported data type"}; + throw UnsupportedDataTypeException{"FusedBatchNorm", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/GGMLHelper.cc b/runtime/onert/backend/cpu/ops/GGMLHelper.cc index 8b66f024171..bcf24113382 100644 --- a/runtime/onert/backend/cpu/ops/GGMLHelper.cc +++ b/runtime/onert/backend/cpu/ops/GGMLHelper.cc @@ -16,10 +16,12 @@ #include "GGMLHelper.h" +#include + namespace onert::backend::cpu::ops { -ggml_type getGGMLType(ir::DataType type) +ggml_type getGGMLType(std::string op, ir::DataType type) { switch (type) { @@ -34,15 +36,15 @@ ggml_type getGGMLType(ir::DataType type) case ir::DataType::INT64: return GGML_TYPE_I64; default: - throw std::runtime_error("Unsupported data type"); + throw UnsupportedDataTypeException{std::move(op), type}; } } -struct ggml_tensor getGGMLTensor(const IPortableTensor *tensor) +struct ggml_tensor getGGMLTensor(std::string op, const IPortableTensor *tensor) { struct ggml_tensor res; - res.type = getGGMLType(tensor->data_type()); + res.type = getGGMLType(std::move(op), tensor->data_type()); const auto rank = tensor->getShape().rank(); for (int i = 0; i < GGML_MAX_DIMS; ++i) { diff --git a/runtime/onert/backend/cpu/ops/GGMLHelper.h b/runtime/onert/backend/cpu/ops/GGMLHelper.h index d692dc23d7d..bed6b4d7597 100644 --- a/runtime/onert/backend/cpu/ops/GGMLHelper.h +++ b/runtime/onert/backend/cpu/ops/GGMLHelper.h @@ -17,6 +17,8 @@ #ifndef __ONERT_BACKEND_CPU_GGML_HELPER_H__ #define __ONERT_BACKEND_CPU_GGML_HELPER_H__ +#include + #include #include @@ -24,7 +26,7 @@ namespace onert::backend::cpu::ops { -struct ggml_tensor getGGMLTensor(const IPortableTensor *tensor); +struct ggml_tensor getGGMLTensor(std::string op, const IPortableTensor *tensor); } // namespace onert::backend::cpu::ops diff --git a/runtime/onert/backend/cpu/ops/GatherLayer.cc b/runtime/onert/backend/cpu/ops/GatherLayer.cc index 91bbf48e3b5..c9967bce33c 100644 --- a/runtime/onert/backend/cpu/ops/GatherLayer.cc +++ b/runtime/onert/backend/cpu/ops/GatherLayer.cc @@ -20,6 +20,7 @@ #include "GGMLHelper.h" #include +#include namespace onert::backend::cpu::ops { @@ -64,7 +65,7 @@ template void GatherLayer::runByInputType() break; } default: - throw std::runtime_error("Gather: unsupported indices data type"); + throw UnsupportedDataTypeException{"Gather", _indices->data_type()}; } } @@ -82,15 +83,15 @@ void GatherLayer::runByGGMLQuantInputType() throw std::runtime_error("Gather: invalid indices tensor shape"); if (_indices->data_type() != ir::DataType::INT32) - throw std::runtime_error("Gather: indices tensor must be int32 type"); + throw UnsupportedDataTypeException{"Gather", _indices->data_type()}; if (_axis != 0) throw std::runtime_error("Gather: axis must be 0"); // convert tensor - auto input = getGGMLTensor(_input); - auto indices = getGGMLTensor(_indices); - auto output = getGGMLTensor(_output); + auto input = getGGMLTensor("FullyConnected", _input); + auto indices = getGGMLTensor("FullyConnected", _indices); + auto output = getGGMLTensor("FullyConnected", _output); { output.op = GGML_OP_GET_ROWS; output.src[0] = &input; @@ -135,7 +136,7 @@ void GatherLayer::run() runByInputType(); break; default: - throw std::runtime_error("Gather: unsupported input data type"); + throw UnsupportedDataTypeException{"Gather", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/L2NormalizationLayer.cc b/runtime/onert/backend/cpu/ops/L2NormalizationLayer.cc index 92d1edc0e24..cff249f9030 100644 --- a/runtime/onert/backend/cpu/ops/L2NormalizationLayer.cc +++ b/runtime/onert/backend/cpu/ops/L2NormalizationLayer.cc @@ -20,6 +20,7 @@ #include #include +#include namespace onert::backend::cpu::ops { @@ -53,7 +54,7 @@ void L2NormLayer::run() break; default: - throw std::runtime_error{"L2Norm: Unsupported data type"}; + throw UnsupportedDataTypeException{"L2Norm", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/LSTMLayer.cc b/runtime/onert/backend/cpu/ops/LSTMLayer.cc index c3986efecf7..43e49c72411 100644 --- a/runtime/onert/backend/cpu/ops/LSTMLayer.cc +++ b/runtime/onert/backend/cpu/ops/LSTMLayer.cc @@ -20,6 +20,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -303,7 +304,7 @@ void LSTMLayer::run() } else { - throw std::runtime_error{"LSTMLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"LSTMLayer", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/LogSoftmaxLayer.cc b/runtime/onert/backend/cpu/ops/LogSoftmaxLayer.cc index e2385bdb5cf..69d98af8854 100644 --- a/runtime/onert/backend/cpu/ops/LogSoftmaxLayer.cc +++ b/runtime/onert/backend/cpu/ops/LogSoftmaxLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -85,7 +86,7 @@ void LogSoftMaxLayer::run() } else { - throw std::runtime_error{"LogSoftmax : unsupported data type"}; + throw UnsupportedDataTypeException{"LogSoftmax", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/OneHotLayer.cc b/runtime/onert/backend/cpu/ops/OneHotLayer.cc index 83acc564163..34ef1c81af4 100644 --- a/runtime/onert/backend/cpu/ops/OneHotLayer.cc +++ b/runtime/onert/backend/cpu/ops/OneHotLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -51,7 +52,7 @@ void OneHotLayer::run() } else { - throw std::runtime_error{"OneHot: unsupported data type"}; + throw UnsupportedDataTypeException{"OneHot", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.cc b/runtime/onert/backend/cpu/ops/OperationUtils.cc index f6db75d9df5..c9c76df91fa 100644 --- a/runtime/onert/backend/cpu/ops/OperationUtils.cc +++ b/runtime/onert/backend/cpu/ops/OperationUtils.cc @@ -16,6 +16,8 @@ #include "OperationUtils.h" +#include + #include #include #include @@ -246,8 +248,7 @@ uint32_t sizeOfData(OperandType type, const std::vector &dimensions) size = 8; break; default: - throw std::runtime_error("Not supported operand type."); - break; + throw UnsupportedDataTypeException{type}; } for (auto &&d : dimensions) @@ -275,7 +276,7 @@ nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type) } } -std::vector getReducerAxes(const IPortableTensor *axes) +std::vector getReducerAxes(std::string op, const IPortableTensor *axes) { std::vector ret; @@ -296,7 +297,7 @@ std::vector getReducerAxes(const IPortableTensor *axes) break; } default: - throw std::runtime_error("getReducerAxes: Not supported data type"); + throw UnsupportedDataTypeException{std::move(op), axes->data_type()}; break; } return ret; diff --git a/runtime/onert/backend/cpu/ops/OperationUtils.h b/runtime/onert/backend/cpu/ops/OperationUtils.h index f0bfd7e79ec..5311dbc5688 100644 --- a/runtime/onert/backend/cpu/ops/OperationUtils.h +++ b/runtime/onert/backend/cpu/ops/OperationUtils.h @@ -162,7 +162,7 @@ uint32_t sizeOfData(OperandType type, const std::vector &dimensions); nnfw::cker::PaddingType getPaddingType(ir::PaddingType ir_padding_type); -std::vector getReducerAxes(const IPortableTensor *axes); +std::vector getReducerAxes(std::string op, const IPortableTensor *axes); nnfw::cker::RoPEMode getRoPEMode(ir::operation::RoPE::RoPEMode rope_mode); diff --git a/runtime/onert/backend/cpu/ops/PackLayer.cc b/runtime/onert/backend/cpu/ops/PackLayer.cc index 69791ac4294..0f97d36c070 100644 --- a/runtime/onert/backend/cpu/ops/PackLayer.cc +++ b/runtime/onert/backend/cpu/ops/PackLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -79,7 +80,7 @@ void PackLayer::run() } else { - throw std::runtime_error{"Pack: unsupported data type"}; + throw UnsupportedDataTypeException{"Pack", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/PadLayer.cc b/runtime/onert/backend/cpu/ops/PadLayer.cc index 8035ea2fb11..af4b8c329af 100644 --- a/runtime/onert/backend/cpu/ops/PadLayer.cc +++ b/runtime/onert/backend/cpu/ops/PadLayer.cc @@ -17,6 +17,7 @@ #include "PadLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -82,7 +83,7 @@ void PadLayer::run() } break; default: - throw std::runtime_error{"Pad: unsupported data type"}; + throw UnsupportedDataTypeException{"Pad", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/Pool2DLayer.cc b/runtime/onert/backend/cpu/ops/Pool2DLayer.cc index 2d48f8d8a0a..42ac0f14638 100644 --- a/runtime/onert/backend/cpu/ops/Pool2DLayer.cc +++ b/runtime/onert/backend/cpu/ops/Pool2DLayer.cc @@ -18,6 +18,7 @@ #include #include +#include #include @@ -129,7 +130,7 @@ void PoolLayer::configure(const IPortableTensor *input, const uint32_t paddingLe break; } default: - throw std::runtime_error{"Pool: unsupported data type"}; + throw UnsupportedDataTypeException{"Pool", input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/PowLayer.cc b/runtime/onert/backend/cpu/ops/PowLayer.cc index fb78bcfe61d..96a2b15f4dd 100644 --- a/runtime/onert/backend/cpu/ops/PowLayer.cc +++ b/runtime/onert/backend/cpu/ops/PowLayer.cc @@ -18,6 +18,7 @@ #include #include +#include namespace onert::backend::cpu::ops { @@ -56,7 +57,7 @@ void PowLayer::run() if (_output->data_type() == OperandType::FLOAT32) powFloat32(); else - throw std::runtime_error{"Pow: unsupportted data type"}; + throw UnsupportedDataTypeException{"Pow", _output->data_type()}; } } // namespace onert::backend::cpu::ops diff --git a/runtime/onert/backend/cpu/ops/RangeLayer.cc b/runtime/onert/backend/cpu/ops/RangeLayer.cc index f14f6a9f6bc..f2ff1663121 100644 --- a/runtime/onert/backend/cpu/ops/RangeLayer.cc +++ b/runtime/onert/backend/cpu/ops/RangeLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -49,7 +50,7 @@ void RangeLayer::run() getBuffer(_delta), getBuffer(_output)); break; default: - throw std::runtime_error{"Range: unsupported data type"}; + throw UnsupportedDataTypeException{"Range", _output->data_type()}; break; } } diff --git a/runtime/onert/backend/cpu/ops/ReduceLayer.cc b/runtime/onert/backend/cpu/ops/ReduceLayer.cc index 399ca5c100d..0e95098d8a8 100644 --- a/runtime/onert/backend/cpu/ops/ReduceLayer.cc +++ b/runtime/onert/backend/cpu/ops/ReduceLayer.cc @@ -21,6 +21,7 @@ #include "cker/neon/neon_check.h" #include #include +#include namespace onert::backend::cpu::ops { @@ -113,7 +114,7 @@ generateKernelGeneric(const IPortableTensor *input, bool keep_dims, case OperandType::BOOL8: return evalType(keep_dims, reduce_kernel, reduce_type); default: - throw std::runtime_error{"Reduce(generic): unsupported data type"}; + throw UnsupportedDataTypeException{"Reduce(generic)", input->data_type()}; } } @@ -203,7 +204,7 @@ void ReduceLayer::configure(const IPortableTensor *input, const IPortableTensor void ReduceLayer::run() { - const auto axes = getReducerAxes(_axes); + const auto axes = getReducerAxes("Reduce", _axes); #ifdef USE_NEON int32_t rank = _input->getShape().rank(); if (_input->data_type() == ir::DataType::FLOAT32 && _reduceType == ReduceType::kSum && @@ -224,7 +225,7 @@ MeanLayer::MeanLayer() : _input(nullptr), _axes(nullptr), _output(nullptr), _kee void MeanLayer::MeanFloat32() { const auto inputShape = getShape(_input); - const auto axisVec = getReducerAxes(_axes); + const auto axisVec = getReducerAxes("Mean", _axes); bool axis_is_1_and_2 = _keep_dims && inputShape.DimensionsCount() == 4 && axisVec.size() == 2 && ((axisVec[0] == 1 && axisVec[1] == 2) || (axisVec[0] == 2 && axisVec[1] == 1)); @@ -245,7 +246,8 @@ void MeanLayer::MeanQuant8() { nnfw::cker::MeanQ8Asymm(getShape(_input), getBuffer(_input), _input->data_scale(), _input->data_zero_point(), getShape(_output), getBuffer(_output), - _output->data_scale(), _output->data_zero_point(), getReducerAxes(_axes)); + _output->data_scale(), _output->data_zero_point(), + getReducerAxes("Mean", _axes)); } void MeanLayer::configure(const IPortableTensor *input, const IPortableTensor *axes, @@ -258,7 +260,7 @@ void MeanLayer::configure(const IPortableTensor *input, const IPortableTensor *a if (_input->data_type() != OperandType::FLOAT32 && _input->data_type() != OperandType::QUANT_UINT8_ASYMM) - throw std::runtime_error{"Mean: unsupported data type"}; + throw UnsupportedDataTypeException{"Mean", _input->data_type()}; } void MeanLayer::run() @@ -273,7 +275,7 @@ void MeanLayer::run() } else { - throw std::runtime_error{"Mean: unsupported data type"}; + throw UnsupportedDataTypeException{"Mean", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc index 9b46d049b7a..ab298892302 100644 --- a/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc +++ b/runtime/onert/backend/cpu/ops/ResizeBilinearLayer.cc @@ -17,6 +17,7 @@ #include "ResizeBilinearLayer.h" #include "cker/operation/ResizeBilinear.h" #include +#include namespace onert::backend::cpu::ops { @@ -106,7 +107,7 @@ void ResizeBilinearLayer::run() std::runtime_error("ResizeBilinear NYI"); break; default: - std::runtime_error("ResizeBilinear unsupported data type"); + UnsupportedDataTypeException{"ResizeBilinear", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/ReverseLayer.cc b/runtime/onert/backend/cpu/ops/ReverseLayer.cc index 8ae4c39ca0f..ba6ba20a033 100644 --- a/runtime/onert/backend/cpu/ops/ReverseLayer.cc +++ b/runtime/onert/backend/cpu/ops/ReverseLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -43,7 +44,7 @@ void ReverseLayer::run() getShape(_output), getBuffer(_output)); break; default: - throw std::runtime_error{"Reverse: unsupported data type"}; + throw UnsupportedDataTypeException{"Reverse", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/RmsNormLayer.cc b/runtime/onert/backend/cpu/ops/RmsNormLayer.cc index a8dad9b59df..9c600f80f02 100644 --- a/runtime/onert/backend/cpu/ops/RmsNormLayer.cc +++ b/runtime/onert/backend/cpu/ops/RmsNormLayer.cc @@ -20,6 +20,7 @@ #include #include +#include namespace onert::backend::cpu::ops { @@ -48,7 +49,7 @@ void RmsNormLayer::run() break; default: - throw std::runtime_error{"RmsNorm: Unsupported data type"}; + throw UnsupportedDataTypeException{"RmsNorm", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/RoPELayer.cc b/runtime/onert/backend/cpu/ops/RoPELayer.cc index 009a52f8d6f..cec5bb2766b 100644 --- a/runtime/onert/backend/cpu/ops/RoPELayer.cc +++ b/runtime/onert/backend/cpu/ops/RoPELayer.cc @@ -17,6 +17,7 @@ #include "RoPELayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -76,7 +77,7 @@ void RoPELayer::run() rope(); break; default: - throw std::runtime_error("RoPE: unsupported data type"); + throw UnsupportedDataTypeException{"RoPE", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SelectLayer.cc b/runtime/onert/backend/cpu/ops/SelectLayer.cc index 814103b4ebf..73cad3a55ff 100644 --- a/runtime/onert/backend/cpu/ops/SelectLayer.cc +++ b/runtime/onert/backend/cpu/ops/SelectLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -46,15 +47,15 @@ void SelectLayer::run() getBuffer(_input_true), getShape(_input_false), \ getBuffer(_input_false), getShape(_output), getBuffer(_output)); -#define KERNEL_SWITCH(type, op) \ - switch (type) \ - { \ - break; \ - case OperandType::FLOAT32: \ - KERNEL_SELECT(float, op); \ - break; \ - default: \ - throw std::runtime_error{"Select: unsupported data type"}; \ +#define KERNEL_SWITCH(type, op) \ + switch (type) \ + { \ + break; \ + case OperandType::FLOAT32: \ + KERNEL_SELECT(float, op); \ + break; \ + default: \ + throw UnsupportedDataTypeException{"Select", type}; \ } auto input_type = _input_true->data_type(); diff --git a/runtime/onert/backend/cpu/ops/ShapeLayer.cc b/runtime/onert/backend/cpu/ops/ShapeLayer.cc index 13e32d58978..72a5a49cfe6 100644 --- a/runtime/onert/backend/cpu/ops/ShapeLayer.cc +++ b/runtime/onert/backend/cpu/ops/ShapeLayer.cc @@ -18,6 +18,8 @@ #include "OperationUtils.h" +#include + namespace onert::backend::cpu::ops { @@ -57,7 +59,7 @@ void ShapeLayer::run() } else { - throw std::runtime_error{"NYI : not supported output type for ShapeLayer"}; + throw UnsupportedDataTypeException{"Shape", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SliceLayer.cc b/runtime/onert/backend/cpu/ops/SliceLayer.cc index 23c9af95c7f..b03713a8ca3 100644 --- a/runtime/onert/backend/cpu/ops/SliceLayer.cc +++ b/runtime/onert/backend/cpu/ops/SliceLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -59,7 +60,7 @@ template void SliceLayer::sliceImpl() } else { - throw std::runtime_error{"Slice: unsupported begin and/or size data type"}; + throw UnsupportedDataTypeException{"Slice", _begin->data_type()}; } // begins : 0-based, sizes : 1-based @@ -103,7 +104,7 @@ void SliceLayer::run() } else { - throw std::runtime_error{"Slice: unsupported data type"}; + throw UnsupportedDataTypeException{"Slice", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SoftmaxLayer.cc b/runtime/onert/backend/cpu/ops/SoftmaxLayer.cc index 821764df41d..b977bcd810a 100644 --- a/runtime/onert/backend/cpu/ops/SoftmaxLayer.cc +++ b/runtime/onert/backend/cpu/ops/SoftmaxLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -114,7 +115,7 @@ void SoftMaxLayer::run() softmaxQuant8(); break; default: - throw std::runtime_error{"SoftMax: unsupported data type"}; + throw UnsupportedDataTypeException{"SoftMax", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc index d4b7478e581..99d9e3fe40f 100644 --- a/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc +++ b/runtime/onert/backend/cpu/ops/SpaceToBatchNDLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -96,7 +97,7 @@ void SpaceToBatchNDLayer::run() } else { - throw std::runtime_error{"SpaceToBatchND: unsupported data type"}; + throw UnsupportedDataTypeException{"SpaceToBatchND", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc index b5011ce4e68..96ff065092a 100644 --- a/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc +++ b/runtime/onert/backend/cpu/ops/SpaceToDepthLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -57,7 +58,7 @@ void SpaceToDepthLayer::run() } else { - throw std::runtime_error{"SpaceToDepth: unsupported data type"}; + throw UnsupportedDataTypeException{"SpaceToDepth", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SplitLayer.cc b/runtime/onert/backend/cpu/ops/SplitLayer.cc index 05ab632b2a5..20b549e8ffb 100644 --- a/runtime/onert/backend/cpu/ops/SplitLayer.cc +++ b/runtime/onert/backend/cpu/ops/SplitLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -87,7 +88,7 @@ void SplitLayer::run() } else { - throw std::runtime_error{"Split: unsupported input type"}; + throw UnsupportedDataTypeException{"Split", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SplitVLayer.cc b/runtime/onert/backend/cpu/ops/SplitVLayer.cc index 3fa41be44f9..17e99f5cc89 100644 --- a/runtime/onert/backend/cpu/ops/SplitVLayer.cc +++ b/runtime/onert/backend/cpu/ops/SplitVLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -83,7 +84,7 @@ void SplitVLayer::run() } else { - throw std::runtime_error{"SplitV: unsupported input type"}; + throw UnsupportedDataTypeException{"SplitV", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/SquaredDifferenceLayer.cc b/runtime/onert/backend/cpu/ops/SquaredDifferenceLayer.cc index 273b3730327..73bdd87b331 100644 --- a/runtime/onert/backend/cpu/ops/SquaredDifferenceLayer.cc +++ b/runtime/onert/backend/cpu/ops/SquaredDifferenceLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -50,7 +51,7 @@ void SqDiffLayer::run() } else { - throw std::runtime_error{"SquaredDiff: unsupported data type"}; + throw UnsupportedDataTypeException{"SquaredDiff", _input1->data_type()}; } } } // namespace onert::backend::cpu::ops diff --git a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc index ffd0b7fb5ed..a01ca75e18f 100644 --- a/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc +++ b/runtime/onert/backend/cpu/ops/StatelessRandomUniformLayer.cc @@ -17,6 +17,7 @@ #include "StatelessRandomUniformLayer.h" #include +#include namespace onert::backend::cpu::ops { @@ -51,7 +52,7 @@ void StatelessRandomUniformLayer::run() StatelessRandomUniformFloat32(); break; default: - throw std::runtime_error{"StatelessRandomUniformLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"StatelessRandomUniform", _output->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc b/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc index 3012272f186..9b7d18e5b80 100644 --- a/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc +++ b/runtime/onert/backend/cpu/ops/StridedSliceLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -81,7 +82,7 @@ void StridedSliceLayer::run() } else { - throw std::runtime_error{"StridedSlice: unsupported data type"}; + throw UnsupportedDataTypeException{"StridedSlice", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/TileLayer.cc b/runtime/onert/backend/cpu/ops/TileLayer.cc index 9b0b1dad947..c62c56abeb2 100644 --- a/runtime/onert/backend/cpu/ops/TileLayer.cc +++ b/runtime/onert/backend/cpu/ops/TileLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -60,7 +61,7 @@ void TileLayer::run() } else { - throw std::runtime_error{"Tile: unsupported data type"}; + throw UnsupportedDataTypeException{"Tile", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/TopKV2Layer.cc b/runtime/onert/backend/cpu/ops/TopKV2Layer.cc index 5951dd43ab7..a892c4fe24d 100644 --- a/runtime/onert/backend/cpu/ops/TopKV2Layer.cc +++ b/runtime/onert/backend/cpu/ops/TopKV2Layer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -41,7 +42,7 @@ void TopKV2Layer::configure(const IPortableTensor *input, IPortableTensor *outpu void TopKV2Layer::run() { if (_output_indices->data_type() != OperandType::INT32) - throw std::runtime_error{"TopKV2: unsupported output indices type"}; + throw UnsupportedDataTypeException{"TopKV2", _output_indices->data_type()}; if (_input->data_type() == OperandType::FLOAT32) { @@ -63,7 +64,7 @@ void TopKV2Layer::run() } else { - throw std::runtime_error{"TopKV2: unsupported data type"}; + throw UnsupportedDataTypeException{"TopKV2", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/TransposeLayer.cc b/runtime/onert/backend/cpu/ops/TransposeLayer.cc index a2592cdcbb8..b2561dc5747 100644 --- a/runtime/onert/backend/cpu/ops/TransposeLayer.cc +++ b/runtime/onert/backend/cpu/ops/TransposeLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include #include namespace onert::backend::cpu::ops @@ -95,7 +96,7 @@ void TransposeLayer::run() } else { - throw std::runtime_error{"Transpose: unsupported data type"}; + throw UnsupportedDataTypeException{"Transpose", _input->data_type()}; } } diff --git a/runtime/onert/backend/cpu/ops/UnpackLayer.cc b/runtime/onert/backend/cpu/ops/UnpackLayer.cc index bb592b8e953..fb4ffc887c3 100644 --- a/runtime/onert/backend/cpu/ops/UnpackLayer.cc +++ b/runtime/onert/backend/cpu/ops/UnpackLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::cpu::ops { @@ -76,7 +77,7 @@ void UnpackLayer::run() else if (_input->data_type() == OperandType::INT32) unpackImpl(); else - throw std::runtime_error{"Unpack: Unsupported data type"}; + throw UnsupportedDataTypeException{"Unpack", _input->data_type()}; } } // namespace onert::backend::cpu::ops diff --git a/runtime/onert/backend/ruy/ops/Conv2DLayer.cc b/runtime/onert/backend/ruy/ops/Conv2DLayer.cc index 9a1f7309c2d..d9d633b1262 100644 --- a/runtime/onert/backend/ruy/ops/Conv2DLayer.cc +++ b/runtime/onert/backend/ruy/ops/Conv2DLayer.cc @@ -18,6 +18,7 @@ #include "../Tensor.h" #include "ir/Padding.h" +#include namespace onert::backend::ruy::ops { @@ -123,7 +124,7 @@ void ConvolutionLayer::run() } else { - throw std::runtime_error{"Conv: unsupported data type"}; + throw UnsupportedDataTypeException{"Conv2D", _input->data_type()}; } } diff --git a/runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc b/runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc index cddefc4dbf4..33225e5c6db 100644 --- a/runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc +++ b/runtime/onert/backend/ruy/ops/FullyConnectedLayer.cc @@ -19,6 +19,7 @@ #include "../Tensor.h" #include #include +#include namespace onert::backend::ruy::ops { @@ -73,7 +74,7 @@ void FullyConnectedLayer::run() } else { - throw std::runtime_error{"FullyConnected: unsupported data type"}; + throw UnsupportedDataTypeException{"FullyConnected", _input->data_type()}; } } diff --git a/runtime/onert/backend/train/ops/BinaryArithmeticLayer.cc b/runtime/onert/backend/train/ops/BinaryArithmeticLayer.cc index 61ed838b8c6..9ddfb2c62ed 100644 --- a/runtime/onert/backend/train/ops/BinaryArithmeticLayer.cc +++ b/runtime/onert/backend/train/ops/BinaryArithmeticLayer.cc @@ -23,6 +23,7 @@ #include #include #include +#include namespace onert::backend::train::ops { @@ -60,7 +61,7 @@ void BinaryArithmeticLayer::backward() { // Calculate gradient for activation if (_back_prop_output->data_type() != OperandType::FLOAT32) - throw std::runtime_error{"Unsupported Data Type"}; + throw UnsupportedDataTypeException{"train BinaryArithmetic", _back_prop_output->data_type()}; const IPortableTensor *backprop_act; try diff --git a/runtime/onert/backend/train/ops/ConvolutionLayer.cc b/runtime/onert/backend/train/ops/ConvolutionLayer.cc index eeb56ffc244..06648c517d4 100644 --- a/runtime/onert/backend/train/ops/ConvolutionLayer.cc +++ b/runtime/onert/backend/train/ops/ConvolutionLayer.cc @@ -22,8 +22,8 @@ #include #include #include - #include +#include namespace { @@ -108,7 +108,7 @@ void ConvolutionLayer::backward() break; } default: - throw std::runtime_error{"train ConvolutionLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"train Conv", data_type}; } } diff --git a/runtime/onert/backend/train/ops/DepthwiseConvolutionLayer.cc b/runtime/onert/backend/train/ops/DepthwiseConvolutionLayer.cc index 4d6f6a284be..ae1595cd9e3 100644 --- a/runtime/onert/backend/train/ops/DepthwiseConvolutionLayer.cc +++ b/runtime/onert/backend/train/ops/DepthwiseConvolutionLayer.cc @@ -21,6 +21,7 @@ #include #include #include +#include namespace onert::backend::train::ops { @@ -64,7 +65,7 @@ void DepthwiseConvolutionLayer::configureBackward(IPortableTensor *back_prop_inp return nnfw::cker::eigen_support::kPacketSize(); } default: - throw std::runtime_error("train DepthwiseConvolutionLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train DepthwiseConv", data_type}; } }(); @@ -121,7 +122,7 @@ void DepthwiseConvolutionLayer::backward() break; } default: - throw std::runtime_error{"train DepthwiseConvolutionLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"train DepthwiseConv", data_type}; } } diff --git a/runtime/onert/backend/train/ops/ElementwiseActivationLayer.cc b/runtime/onert/backend/train/ops/ElementwiseActivationLayer.cc index 8e2ce254e8d..0306f45644c 100644 --- a/runtime/onert/backend/train/ops/ElementwiseActivationLayer.cc +++ b/runtime/onert/backend/train/ops/ElementwiseActivationLayer.cc @@ -20,6 +20,7 @@ #include #include +#include namespace onert::backend::train::ops { @@ -75,7 +76,7 @@ void ElementwiseActivationLayer::configureBackward(const IPortableTensor *input, } else { - throw std::runtime_error("train ElementwiseActivationLayer: Unsupported datatype"); + throw UnsupportedDataTypeException{"train ElementwiseActivation", input->data_type()}; } break; default: diff --git a/runtime/onert/backend/train/ops/FullyConnectedLayer.cc b/runtime/onert/backend/train/ops/FullyConnectedLayer.cc index c581742ea4f..7e34145c14a 100644 --- a/runtime/onert/backend/train/ops/FullyConnectedLayer.cc +++ b/runtime/onert/backend/train/ops/FullyConnectedLayer.cc @@ -22,6 +22,7 @@ #include #include #include +#include namespace { @@ -113,7 +114,7 @@ void FullyConnectedLayer::backward() break; } default: - throw std::runtime_error{"train FullyConnectedLayer: unsupported data type"}; + throw UnsupportedDataTypeException{"train FullyConnected", data_type}; } } diff --git a/runtime/onert/backend/train/ops/LossCategoricalCrossentropyLayer.cc b/runtime/onert/backend/train/ops/LossCategoricalCrossentropyLayer.cc index 17b54d2333a..aef4a640748 100644 --- a/runtime/onert/backend/train/ops/LossCategoricalCrossentropyLayer.cc +++ b/runtime/onert/backend/train/ops/LossCategoricalCrossentropyLayer.cc @@ -18,6 +18,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::train::ops { @@ -44,7 +45,7 @@ void LossCategoricalCrossentropyLayer::forward(bool) } else { - throw std::runtime_error("LossCategoricalCrossentropyLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train LossCategoricalCrossEntropy", _y_pred->data_type()}; } } @@ -72,7 +73,7 @@ void LossCategoricalCrossentropyLayer::backward() } else { - throw std::runtime_error("LossCategoricalCrossentropyLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train LossCategoricalCrossEntropy", _y_pred->data_type()}; } } diff --git a/runtime/onert/backend/train/ops/LossMeanSquaredErrorLayer.cc b/runtime/onert/backend/train/ops/LossMeanSquaredErrorLayer.cc index 95ab2e376f0..1740a538cc5 100644 --- a/runtime/onert/backend/train/ops/LossMeanSquaredErrorLayer.cc +++ b/runtime/onert/backend/train/ops/LossMeanSquaredErrorLayer.cc @@ -18,6 +18,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::train::ops { @@ -40,7 +41,7 @@ void LossMeanSquaredErrorLayer::forward(bool) } else { - throw std::runtime_error("LossMeanSquaredErrorLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train LossMeanSquaredError", _y_pred->data_type()}; } } @@ -57,7 +58,7 @@ void LossMeanSquaredErrorLayer::backward() } else { - throw std::runtime_error("LossMeanSquaredErrorLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train LossMeanSquaredError", _y_pred->data_type()}; } } diff --git a/runtime/onert/backend/train/ops/MeanLayer.cc b/runtime/onert/backend/train/ops/MeanLayer.cc index 82de9007e14..4cd90ac804f 100644 --- a/runtime/onert/backend/train/ops/MeanLayer.cc +++ b/runtime/onert/backend/train/ops/MeanLayer.cc @@ -21,6 +21,7 @@ #include #include #include +#include namespace onert::backend::train::ops { @@ -49,7 +50,7 @@ void MeanLayer::backward() if (_keep_dims == false) { keep_dim_shape.ReplaceWith(getShape(_input)); - auto axes_vec = cpu::ops::getReducerAxes(_axes); + auto axes_vec = cpu::ops::getReducerAxes("train Mean", _axes); for (const auto &axis : axes_vec) { keep_dim_shape.SetDim(axis, 1); @@ -69,7 +70,7 @@ void MeanLayer::backward() break; } default: - throw std::runtime_error("train MeanLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train Mean", _back_prop_output->data_type()}; } } diff --git a/runtime/onert/backend/train/ops/PadLayer.cc b/runtime/onert/backend/train/ops/PadLayer.cc index 8851dcd97a8..80b47b87c8a 100644 --- a/runtime/onert/backend/train/ops/PadLayer.cc +++ b/runtime/onert/backend/train/ops/PadLayer.cc @@ -17,6 +17,7 @@ #include "PadLayer.h" #include +#include namespace onert::backend::train::ops { @@ -60,7 +61,7 @@ void PadLayer::backward() depad(); break; default: - throw std::runtime_error{"Pad: unsupported data type"}; + throw UnsupportedDataTypeException{"train Pad", _back_prop_output->data_type()}; } } diff --git a/runtime/onert/backend/train/ops/PoolLayer.cc b/runtime/onert/backend/train/ops/PoolLayer.cc index 44e88b8d955..61121da3548 100644 --- a/runtime/onert/backend/train/ops/PoolLayer.cc +++ b/runtime/onert/backend/train/ops/PoolLayer.cc @@ -23,6 +23,7 @@ #include #include #include +#include namespace onert::backend::train::ops { @@ -198,7 +199,7 @@ void PoolLayer::configureBackward(const uint32_t paddingLeft, const uint32_t pad if (output->data_type() != OperandType::FLOAT32) { - throw std::runtime_error("PoolLayer : Unsupported data type for training"); + throw UnsupportedDataTypeException{"train Pool", output->data_type()}; } // ready training kernel diff --git a/runtime/onert/backend/train/ops/SoftMaxLayer.cc b/runtime/onert/backend/train/ops/SoftMaxLayer.cc index a13c64eb91a..727d9bc0f60 100644 --- a/runtime/onert/backend/train/ops/SoftMaxLayer.cc +++ b/runtime/onert/backend/train/ops/SoftMaxLayer.cc @@ -19,6 +19,7 @@ #include "OperationUtils.h" #include +#include namespace onert::backend::train::ops { @@ -52,7 +53,7 @@ void SoftMaxLayer::backward() break; } default: - throw std::runtime_error("train SoftMaxLayer: unsupported data type"); + throw UnsupportedDataTypeException{"train SoftMax", _back_prop_output->data_type()}; } } diff --git a/runtime/onert/backend/train/optimizer/Adam.cc b/runtime/onert/backend/train/optimizer/Adam.cc index 9443b56bcd2..a881e6c8867 100644 --- a/runtime/onert/backend/train/optimizer/Adam.cc +++ b/runtime/onert/backend/train/optimizer/Adam.cc @@ -20,6 +20,7 @@ #include #include #include +#include namespace onert::backend::train::optimizer { @@ -63,7 +64,7 @@ void Adam::applyGradient(const UpdateFactors &factors) const use_nesterov); break; default: - throw std::runtime_error("Adam: Not supported data type"); + throw UnsupportedDataTypeException{"train Adam", grad_tensor.data_type()}; } } diff --git a/runtime/onert/backend/train/optimizer/SGD.cc b/runtime/onert/backend/train/optimizer/SGD.cc index 826617f561d..08b26c018c5 100644 --- a/runtime/onert/backend/train/optimizer/SGD.cc +++ b/runtime/onert/backend/train/optimizer/SGD.cc @@ -19,6 +19,7 @@ #include "../ops/OperationUtils.h" #include +#include namespace onert::backend::train::optimizer { @@ -48,7 +49,7 @@ void SGD::applyGradient(const UpdateFactors &factors) const ops::getShape(&grad_tensor), ops::getBuffer(&grad_tensor), lr); break; default: - throw std::runtime_error("SGD: Not supported data type"); + throw UnsupportedDataTypeException{"SGD", grad_tensor.data_type()}; } } diff --git a/runtime/onert/backend/trix/Convert.cc b/runtime/onert/backend/trix/Convert.cc index 26992c9e932..b8e3d64f529 100644 --- a/runtime/onert/backend/trix/Convert.cc +++ b/runtime/onert/backend/trix/Convert.cc @@ -16,6 +16,8 @@ #include "Convert.h" +#include + namespace onert::backend::trix { @@ -28,7 +30,7 @@ data_type convertDataType(const ir::DataType type) case ir::DataType::QUANT_INT16_SYMM: return DATA_TYPE_QSYMM16; default: - throw std::runtime_error("Unsupported data type"); + throw UnsupportedDataTypeException{type}; } } diff --git a/runtime/onert/core/include/ir/DataType.h b/runtime/onert/core/include/ir/DataType.h index a4bc87bf458..548f959c1bc 100644 --- a/runtime/onert/core/include/ir/DataType.h +++ b/runtime/onert/core/include/ir/DataType.h @@ -18,6 +18,7 @@ #define __ONERT_IR_DATATYPE_H__ #include +#include namespace onert::ir { @@ -43,6 +44,7 @@ enum class DataType size_t sizeOfDataType(DataType data_type); bool requireQuantParam(DataType data_type); +std::string toString(DataType data_type); } // namespace onert::ir diff --git a/runtime/onert/core/include/util/Exceptions.h b/runtime/onert/core/include/util/Exceptions.h index e7768659365..67234d15616 100644 --- a/runtime/onert/core/include/util/Exceptions.h +++ b/runtime/onert/core/include/util/Exceptions.h @@ -19,14 +19,16 @@ #include +#include + namespace onert { -class OnertException : public std::exception +class Exception : public std::exception { public: - OnertException(const std::string &msg) : _msg{msg} {} - OnertException(const std::string &tag, const std::string &msg) : _msg{tag + " : " + msg} {} + Exception(const std::string &msg) : _msg{msg} {} + Exception(const std::string &tag, const std::string &msg) : _msg{tag + ": " + msg} {} const char *what() const noexcept override { return _msg.c_str(); } @@ -34,11 +36,24 @@ class OnertException : public std::exception std::string _msg; }; -class InsufficientBufferSizeException : public OnertException +class InsufficientBufferSizeException : public Exception { public: InsufficientBufferSizeException(const std::string &msg) - : OnertException{"InsufficientBufferSize", msg} + : Exception{"Insufficient buffer size", msg} + { + } +}; + +class UnsupportedDataTypeException : public Exception +{ +public: + UnsupportedDataTypeException(ir::DataType dt) + : Exception{"Unsupported data type", ir::toString(dt)} + { + } + UnsupportedDataTypeException(const std::string &tag, ir::DataType dt) + : Exception{tag + ": Unsupported data type", ir::toString(dt)} { } }; diff --git a/runtime/onert/core/src/compiler/StaticShapeInferer.cc b/runtime/onert/core/src/compiler/StaticShapeInferer.cc index 60c690c280e..671f2aae6a4 100644 --- a/runtime/onert/core/src/compiler/StaticShapeInferer.cc +++ b/runtime/onert/core/src/compiler/StaticShapeInferer.cc @@ -19,6 +19,7 @@ #include "util/logging.h" #include +#include #include #include @@ -980,21 +981,17 @@ void StaticShapeInferer::visit(const ir::operation::Reduce &op) std::vector axes_vec; for (size_t i = 0; i < axes.shape().num_elements(); ++i) { - switch (axes.typeInfo().type()) + const auto type = axes.typeInfo().type(); + switch (type) { case ir::DataType::INT32: - { axes_vec.emplace_back(reinterpret_cast(axes.data()->base())[i]); break; - } case ir::DataType::INT64: - { axes_vec.emplace_back(reinterpret_cast(axes.data()->base())[i]); break; - } default: - throw std::runtime_error("StaticShapeInferer " + op.name() + ": Not supported data type"); - break; + throw UnsupportedDataTypeException{"StaticShapeInferer " + op.name(), type}; } } const auto keep_dims = op.param().keep_dims; diff --git a/runtime/onert/core/src/exec/DynamicShapeInferer.cc b/runtime/onert/core/src/exec/DynamicShapeInferer.cc index 0f26623f3c4..f3b653a9309 100644 --- a/runtime/onert/core/src/exec/DynamicShapeInferer.cc +++ b/runtime/onert/core/src/exec/DynamicShapeInferer.cc @@ -16,6 +16,7 @@ #include "exec/DynamicShapeInferer.h" #include "util/ShapeInference.h" +#include #include namespace onert::exec @@ -846,22 +847,18 @@ void DynamicShapeInferer::visit(const ir::operation::Reduce &op) std::vector axes_vec; for (uint32_t i = 0; i < axes->getShape().num_elements(); ++i) { + const auto type = axes->data_type(); const auto buffer = axes->buffer() + axes->calcOffset({i}); - switch (axes->data_type()) + switch (type) { case ir::DataType::INT32: - { axes_vec.emplace_back(*reinterpret_cast(buffer)); break; - } case ir::DataType::INT64: - { axes_vec.emplace_back(*reinterpret_cast(buffer)); break; - } default: - throw std::runtime_error("DynamicShapeInferer " + op.name() + ": Not supported data type"); - break; + throw UnsupportedDataTypeException{"DynamicShapeInferer " + op.name(), type}; } } const auto keep_dims = op.param().keep_dims; @@ -978,7 +975,7 @@ void DynamicShapeInferer::visit(const ir::operation::ResizeBilinear &op) } else { - throw std::runtime_error("DynamicShapeInferer ResizeBilinear : Unsupported data type"); + throw UnsupportedDataTypeException{"DynamicShapeInferer ResizeBilinear", size->data_type()}; } } else diff --git a/runtime/onert/core/src/exec/IPermuteFunction.cc b/runtime/onert/core/src/exec/IPermuteFunction.cc index 2ad47fb340f..08ad0016486 100644 --- a/runtime/onert/core/src/exec/IPermuteFunction.cc +++ b/runtime/onert/core/src/exec/IPermuteFunction.cc @@ -24,6 +24,7 @@ #include "ir/Shape.h" #include #include +#include #include #include "util/Utils.h" #include @@ -144,10 +145,12 @@ template = true> void typeAwareQuantize(const SRC_T *src_tensor, DST_T *dst_tensor, const ir::PermuteType &type) { + const auto src_data_type = src_tensor->data_type(); + const auto dst_data_type = dst_tensor->data_type(); // TODO Support other types - if (src_tensor->data_type() == ir::DataType::FLOAT32) + if (src_data_type == ir::DataType::FLOAT32) { - switch (dst_tensor->data_type()) + switch (dst_data_type) { case ir::DataType::QUANT_UINT8_ASYMM: { @@ -166,14 +169,14 @@ void typeAwareQuantize(const SRC_T *src_tensor, DST_T *dst_tensor, const ir::Per } default: { - throw std::runtime_error("IPermuteFunction: Unsupported quantization type"); + throw UnsupportedDataTypeException{"IPermuteFunction: Quantize", dst_data_type}; break; } } } - else if (dst_tensor->data_type() == ir::DataType::FLOAT32) + else if (dst_data_type == ir::DataType::FLOAT32) { - switch (src_tensor->data_type()) + switch (src_data_type) { case ir::DataType::QUANT_UINT8_ASYMM: { @@ -192,7 +195,7 @@ void typeAwareQuantize(const SRC_T *src_tensor, DST_T *dst_tensor, const ir::Per } default: { - throw std::runtime_error("IPermuteFunction: Unsupported dequantization type"); + throw UnsupportedDataTypeException{"IPermuteFunction: Dequantize", src_data_type}; break; } } @@ -280,7 +283,7 @@ void IPermuteFunction::permute(backend::ITensor *src_tensor, backend::ITensor *d permute(src_tensor, dst_tensor, rank, src_offsets, dst_offsets, permute_type); break; default: - throw std::runtime_error("IPermuteFunction: Not supported data type"); + throw UnsupportedDataTypeException{"IPermuteFunction", src_tensor->data_type()}; break; } } @@ -307,7 +310,7 @@ const std::type_info &IPermuteFunction::underlying_type(ir::DataType type) const case ir::DataType::QUANT_INT16_SYMM: return typeid(int16_t); default: - throw std::runtime_error("IPermuteFunction: Not supported data type"); + throw UnsupportedDataTypeException{"IPermuteFunction", type}; } } diff --git a/runtime/onert/core/src/ir/DataType.cc b/runtime/onert/core/src/ir/DataType.cc index a4ce84a98bf..dfc379fd393 100644 --- a/runtime/onert/core/src/ir/DataType.cc +++ b/runtime/onert/core/src/ir/DataType.cc @@ -16,8 +16,8 @@ #include "ir/DataType.h" -#include #include +#include using float16 = Half; @@ -48,10 +48,12 @@ size_t sizeOfDataType(DataType data_type) return sizeof(int64_t); case DataType::QUANT_INT16_SYMM: return sizeof(int16_t); - default: - // ggml block quantize type data size is not supported - throw std::runtime_error{"Unsupported type size"}; + case DataType::QUANT_GGML_Q4_0: + case DataType::QUANT_GGML_Q8_0: + // GGML block quantize type data size is not supported + break; } + throw UnsupportedDataTypeException{data_type}; } bool requireQuantParam(DataType data_type) @@ -85,9 +87,47 @@ bool requireQuantParam(DataType data_type) case DataType::QUANT_GGML_Q8_0: // Quantize type, but no quantization parameter return false; - default: - throw std::runtime_error{"Unsupported type"}; } + throw UnsupportedDataTypeException{data_type}; +} + +std::string toString(DataType data_type) +{ + switch (data_type) + { + case DataType::FLOAT32: + return "FLOAT32"; + case DataType::INT32: + return "INT32"; + case DataType::UINT32: + return "UINT32"; + case DataType::QUANT_UINT8_ASYMM: + return "QUANT_UINT8_ASYMM"; + case DataType::BOOL8: + return "BOOL8"; + case DataType::UINT8: + return "UINT8"; + case DataType::QUANT_INT8_SYMM: + return "QUANT_INT8_SYMM"; + case DataType::FLOAT16: + return "FLOAT16"; + case DataType::INT64: + return "INT64"; + case DataType::QUANT_INT8_ASYMM: + return "QUANT_INT8_ASYMM"; + case DataType::QUANT_INT8_SYMM_PER_CHANNEL: + return "QUANT_INT8_SYMM_PER_CHANNEL"; + case DataType::QUANT_INT16_SYMM: + return "QUANT_INT16_SYMM"; + case DataType::QUANT_GGML_Q4_0: + return "QUANT_GGML_Q4_0"; + case DataType::QUANT_GGML_Q8_0: + return "QUANT_GGML_Q8_0"; + } + // The list of data types is fixed (enum type), so compiler should warn us + // if we miss any case. However, if for some reason we reach here, at least + // we can provide some information to the caller. + return std::to_string(static_cast(data_type)); } } // namespace onert::ir diff --git a/runtime/onert/core/src/ir/OperandInfo.cc b/runtime/onert/core/src/ir/OperandInfo.cc index d4f18bb86ee..653d3a1ded4 100644 --- a/runtime/onert/core/src/ir/OperandInfo.cc +++ b/runtime/onert/core/src/ir/OperandInfo.cc @@ -15,6 +15,7 @@ */ #include "ir/OperandInfo.h" +#include "util/Exceptions.h" namespace onert::ir { @@ -26,7 +27,7 @@ size_t OperandInfo::total_size() const { return _shape.num_elements() * sizeOfDataType(data_type); } - catch (const std::runtime_error &e) + catch (const onert::UnsupportedDataTypeException &e) { // Calculate total size for ggml block quantization type on exception handling // because it is rare case and we should care about performance on non-block case. diff --git a/runtime/onert/core/src/ir/OperandInfo.test.cc b/runtime/onert/core/src/ir/OperandInfo.test.cc index 4a04189df43..baf3b13a50f 100644 --- a/runtime/onert/core/src/ir/OperandInfo.test.cc +++ b/runtime/onert/core/src/ir/OperandInfo.test.cc @@ -15,6 +15,7 @@ */ #include "ir/OperandInfo.h" +#include "util/Exceptions.h" #include @@ -37,7 +38,7 @@ TEST(ir_OperandInfo, total_size) TEST(ir_OperandInfo, neg_total_size_type) { auto info = OperandInfo::createStaticInfo(Shape{1, 2, 3}, TypeInfo{DataType{-1}}); - EXPECT_THROW(info.total_size(), std::runtime_error); + EXPECT_THROW(info.total_size(), onert::UnsupportedDataTypeException); } // Unsupported shape