Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions runtime/onert/api/nnfw/include/nnfw.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,8 @@ typedef enum
NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE = 5,
/** When API is deprecated */
NNFW_STATUS_DEPRECATED_API = 6,
/** When given input or ouput data type is not supported. */
NNFW_STATUS_UNSUPPORTED_DATA_TYPE = 7,
} NNFW_STATUS;

/**
Expand Down
4 changes: 3 additions & 1 deletion runtime/onert/api/nnfw/src/CustomKernel.cc
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@

#include "CustomKernel.h"

#include <util/Exceptions.h>

namespace onert::api
{

Expand Down Expand Up @@ -54,7 +56,7 @@ class APIConverter
api_type.dtype = NNFW_TYPE_TENSOR_BOOL;
break;
default:
throw std::runtime_error("Unsupported tensor datatype");
throw UnsupportedDataTypeException("Converter", type.dtype);
}
return api_type;
}
Expand Down
5 changes: 5 additions & 0 deletions runtime/onert/api/nnfw/src/nnfw_session.cc
Original file line number Diff line number Diff line change
Expand Up @@ -502,6 +502,11 @@ NNFW_STATUS nnfw_session::run()
std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
return NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE;
}
catch (const onert::UnsupportedDataTypeException &e)
{
std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
return NNFW_STATUS_UNSUPPORTED_DATA_TYPE;
}
catch (const std::exception &e)
{
std::cerr << "Error during nnfw_session::run : " << e.what() << std::endl;
Expand Down
4 changes: 4 additions & 0 deletions runtime/onert/api/python/include/nnfw_exceptions.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,10 @@ struct NnfwDeprecatedApiError : public NnfwError
{
using NnfwError::NnfwError;
};
struct NnfwUnsupportedDataTypeError : public NnfwError
{
using NnfwError::NnfwError;
};

} // namespace onert::api::python

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ void bind_nnfw_exceptions(py::module_ &m)
m.attr("OnertError").cast<py::object>());
py::register_exception<NnfwDeprecatedApiError>(m, "OnertDeprecatedApiError",
m.attr("OnertError").cast<py::object>());
py::register_exception<NnfwUnsupportedDataTypeError>(m, "OnertUnsupportedDataTypeError",
m.attr("OnertError").cast<py::object>());
}

} // namespace onert::api::python
5 changes: 3 additions & 2 deletions runtime/onert/api/python/src/wrapper/nnfw_api_wrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,10 @@ void ensure_status(NNFW_STATUS status)
throw NnfwInsufficientOutputError("NNFW_STATUS_INSUFFICIENT_OUTPUT_SIZE");
case NNFW_STATUS::NNFW_STATUS_DEPRECATED_API:
throw NnfwDeprecatedApiError("NNFW_STATUS_DEPRECATED_API");
default:
throw NnfwError("NNFW_UNKNOWN_ERROR");
case NNFW_STATUS::NNFW_STATUS_UNSUPPORTED_DATA_TYPE:
throw NnfwUnsupportedDataTypeError("NNFW_STATUS_UNSUPPORTED_DATA_TYPE");
}
throw NnfwError("NNFW_UNKNOWN_ERROR");
}

NNFW_LAYOUT getLayout(const char *layout)
Expand Down
17 changes: 9 additions & 8 deletions runtime/onert/backend/acl_common/Convert.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "Swizzle.h"
#include "ir/DataType.h"
#include "ir/operation/ElementwiseActivation.h"
#include <util/Exceptions.h>
#include <memory>

namespace onert::backend::acl_common
Expand Down Expand Up @@ -96,8 +97,7 @@ ::arm_compute::DataType asDataType(const ir::DataType type)
case ir::DataType::QUANT_INT8_SYMM_PER_CHANNEL:
return ::arm_compute::DataType::QSYMM8_PER_CHANNEL;
default:
throw std::runtime_error("Not supported internal data type, yet");
break;
throw UnsupportedDataTypeException(type);
}
}

Expand Down Expand Up @@ -224,7 +224,8 @@ std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank)
for (size_t i = 0; i < operand.shape().num_elements(); ++i)
{
int32_t axis = 0;
switch (operand.typeInfo().type())
const auto data_type = operand.typeInfo().type();
switch (data_type)
{
case ir::DataType::INT32:
axis = reinterpret_cast<const int32_t *>(operand.data()->base())[i];
Expand All @@ -233,7 +234,7 @@ std::set<uint32_t> asSet(const ir::Operand &operand, int32_t rank)
axis = reinterpret_cast<const int64_t *>(operand.data()->base())[i];
break;
default:
throw std::runtime_error("acl_common::asSet: Not supported data type");
throw UnsupportedDataTypeException("asSet", data_type);
}
if (axis < 0)
axis += rank;
Expand Down Expand Up @@ -273,8 +274,7 @@ ir::DataType asRuntimeDataType(::arm_compute::DataType data_type)
case ::arm_compute::DataType::QSYMM16:
return ir::DataType::QUANT_INT16_SYMM;
default:
throw std::runtime_error{"Not supported acl data type, yet"};
break;
throw UnsupportedDataTypeException("asRuntimeDataType", data_type);
}
}

Expand Down Expand Up @@ -312,7 +312,8 @@ arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
{
assert(operand.isConstant());
assert(operand.shape().num_elements() == 1);
switch (operand.typeInfo().type())
const auto data_type = operand.typeInfo().type();
switch (data_type)
{
case ir::DataType::INT32:
return arm_compute::PixelValue(operand.asScalar<int32_t>());
Expand All @@ -325,7 +326,7 @@ arm_compute::PixelValue asPixelValue(const ir::Operand &operand)
case ir::DataType::FLOAT32:
return arm_compute::PixelValue(operand.asScalar<float>());
default:
throw std::runtime_error("asPixelValue : Not supported datatype yet");
throw UnsupportedDataTypeException("asPixelValue", data_type);
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/AddNLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "OperationUtils.h"

#include <cker/operation/AddN.h>
#include <util/Exceptions.h>
#include <assert.h>

namespace onert::backend::cpu::ops
Expand Down Expand Up @@ -53,7 +54,7 @@ void AddNLayer::run()
}
else
{
throw std::runtime_error("AddN: unsupported data type");
throw UnsupportedDataTypeException{"AddN", _output->data_type()};
}
}

Expand Down
7 changes: 4 additions & 3 deletions runtime/onert/backend/cpu/ops/ArgMinMaxLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "OperationUtils.h"

#include <cker/operation/ArgMinMax.h>
#include <util/Exceptions.h>
#include <assert.h>

namespace onert::backend::cpu::ops
Expand Down Expand Up @@ -79,7 +80,7 @@ void ArgMinMaxLayer::run()
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t);
break;
default:
throw std::runtime_error("ArgMinMax: unsupported data type");
throw UnsupportedDataTypeException{"ArgMinMax", _input->data_type()};
}
}
else if (_output->data_type() == ir::DataType::INT64)
Expand All @@ -100,12 +101,12 @@ void ArgMinMaxLayer::run()
TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t);
break;
default:
throw std::runtime_error("ArgMinMax: unsupported data type");
throw UnsupportedDataTypeException{"ArgMinMax", _input->data_type()};
}
}
else
{
throw std::runtime_error("ArgMinMax: unsupported data type");
throw UnsupportedDataTypeException{"ArgMinMax", _output->data_type()};
}

#undef TF_LITE_ARG_MIN_MAX
Expand Down
14 changes: 6 additions & 8 deletions runtime/onert/backend/cpu/ops/BatchMatMulLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "BatchMatMulLayer.h"

#include <cker/operation/BatchMatMul.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -60,14 +61,11 @@ void BatchMatMulLayer::configure(const IPortableTensor *lhs, const IPortableTens

void BatchMatMulLayer::run()
{
if ((_lhs->data_type() == OperandType::FLOAT32) && (_rhs->data_type() == OperandType::FLOAT32))
{
batchMatMulFloat32();
}
else
{
throw std::runtime_error{"BatchMatMul: unsupported data type"};
}
if (_lhs->data_type() != OperandType::FLOAT32)
throw UnsupportedDataTypeException{"BatchMatMul", _lhs->data_type()};
if (_rhs->data_type() != OperandType::FLOAT32)
throw UnsupportedDataTypeException{"BatchMatMul", _rhs->data_type()};
batchMatMulFloat32();
}

#undef AVGPOOLING_PARAMETERS
Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/BatchToSpaceNDLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "BatchToSpaceNDLayer.h"

#include <cker/operation/BatchToSpaceND.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -66,7 +67,7 @@ void BatchToSpaceNDLayer::run()
}
else
{
throw std::runtime_error{"NYI"};
throw UnsupportedDataTypeException{"BatchToSpaceND", _output->data_type()};
}
}

Expand Down
8 changes: 3 additions & 5 deletions runtime/onert/backend/cpu/ops/BinaryArithmeticLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "BinaryArithmeticLayer.h"

#include <cker/operation/BinaryArithmeticOps.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -120,7 +121,7 @@ generateKernelGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs,
break;
}
default:
throw std::runtime_error{"BinaryArithmetic(generic): Unsupported data type"};
throw UnsupportedDataTypeException{"BinaryArithmetic(generic)", lhs->data_type()};
}
}

Expand Down Expand Up @@ -205,7 +206,6 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl
_kernel =
Eval<nnfw::cker::BinaryArithmeticOpType::ADD, int8_t>(_lhs, _rhs, _output, op_params);
}

else
{
_kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::ADD>(
Expand All @@ -227,7 +227,6 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl
_kernel =
Eval<nnfw::cker::BinaryArithmeticOpType::SUB, int8_t>(_lhs, _rhs, _output, op_params);
}

else
{
_kernel = generateKernelGeneric<nnfw::cker::BinaryArithmeticOpType::SUB>(
Expand Down Expand Up @@ -265,8 +264,7 @@ void BinaryArithmeticLayer::configure(const IPortableTensor *lhs, const IPortabl
{
// TODO Support quantized type
// TODO Support integer type with zero check
throw std::runtime_error{
"BinaryArithmetic(Div): Div operation does not support non-float data types yet"};
throw UnsupportedDataTypeException{"BinaryArithmetic(Div)", lhs->data_type()};
}
break;
default:
Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/BroadcastToLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "BroadcastToLayer.h"

#include <cker/operation/BroadcastTo.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -57,7 +58,7 @@ void BroadcastToLayer::run()
getShape(_output), getBuffer<uint32_t>(_output));
break;
default:
throw std::runtime_error{"BroadcastToLayer: unsupported data type"};
throw UnsupportedDataTypeException{"BroadcastTo", _output->data_type()};
}
}

Expand Down
4 changes: 3 additions & 1 deletion runtime/onert/backend/cpu/ops/ComparisonLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@

#include <assert.h>
#include <cker/operation/Comparison.h>
#include <util/Exceptions.h>

using namespace nnfw::cker;
namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -156,7 +158,7 @@ void CompareLayer::run()
}
else
{
throw std::runtime_error{"Compare: unsupported data type"};
throw UnsupportedDataTypeException{"Compare", _lhs->data_type()};
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/ConcatLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "OperationUtils.h"

#include <cker/operation/Concatenation.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -131,7 +132,7 @@ void ConcatLayer::run()
concatenationGeneral<int64_t>();
break;
default:
throw std::runtime_error("Concat: unsupported data type");
throw UnsupportedDataTypeException{"Concat", _output->data_type()};
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/Conv2DLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
#include "../Tensor.h"
#include "ir/Padding.h"
#include <cker/operation/Conv.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -271,7 +272,7 @@ void ConvolutionLayer::run()
}
else
{
throw std::runtime_error{"Conv: unsupported data type"};
throw UnsupportedDataTypeException{"Conv2D", _input->data_type()};
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/DepthToSpaceLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include "OperationUtils.h"

#include <cker/operation/DepthToSpace.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -61,7 +62,7 @@ void DepthToSpaceLayer::run()
depthToSpace<int8_t>();
break;
default:
throw std::runtime_error{"DepthToSpace: unsupported data type"};
throw UnsupportedDataTypeException{"DepthToSpace", _input->data_type()};
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/DepthwiseConv2DLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include "cker/PortableTensorUtils.h"
#include <cker/operation/DepthwiseConv.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -317,7 +318,7 @@ void DepthwiseConvolutionLayer::run()
}
else
{
throw std::runtime_error{"DepthwiseConv: unsupported data type"};
throw UnsupportedDataTypeException{"DepthwiseConv", _input->data_type()};
}
}

Expand Down
3 changes: 2 additions & 1 deletion runtime/onert/backend/cpu/ops/DynamicUpdateSliceLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "OperationUtils.h"

#include <cker/operation/DynamicUpdateSlice.h>
#include <util/Exceptions.h>

namespace onert::backend::cpu::ops
{
Expand Down Expand Up @@ -80,7 +81,7 @@ void DynamicUpdateSliceLayer::run()
getBuffer<int8_t>(_output));
break;
default:
throw std::runtime_error{"DynamicUpdateSlice: NYI - unsupported data type"};
throw UnsupportedDataTypeException{"DynamicUpdateSlice", _operand->data_type()};
break;
}
}
Expand Down
Loading
Loading