Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#pragma once

#include "openvino/core/partial_shape.hpp"
#include "openvino/core/type/element_type.hpp"
#include "openvino/frontend/decoder.hpp"
#include "openvino/frontend/onnx/visibility.hpp"

namespace ov {
namespace frontend {
namespace onnx {

struct ONNX_FRONTEND_API TensorMetaInfo {
ov::PartialShape m_partial_shape;
ov::element::Type m_element_type;
const uint8_t* m_tensor_data;
size_t m_tensor_data_size;
const std::string* m_tensor_name;
std::shared_ptr<std::string> m_external_location;
bool m_is_raw;
};

class ONNX_FRONTEND_API DecoderBase : public ov::frontend::DecoderBase {
public:
using Ptr = std::shared_ptr<DecoderBase>;
~DecoderBase() override;
};

struct ONNX_FRONTEND_API SparseTensorInfo {
ov::PartialShape m_partial_shape;
ov::frontend::onnx::DecoderBase::Ptr m_values, m_indices;
};

// DecoderBaseOperation corresponds to operation node to retrieve its attributes and information about input and output
// tensors
class ONNX_FRONTEND_API DecoderBaseOperation : public ov::frontend::onnx::DecoderBase {
public:
/// \brief Get input tensor name by index
/// Operation nodes are connected between each other by tensors.
/// Each tensor must have unique name in a graph.
/// The tensor name uniqueness is provided by developer during GraphIterator construction.
/// This method returns tensor name that comes to this operation node by input index idx
/// If idx is out-of-range, it throws std::exception inherited exception
virtual const std::string& get_input_tensor_name(size_t idx) const = 0;

/// \brief Get input tensor type by index
/// If idx is out-of-range, it throws std::exception inherited exception
virtual ov::element::Type get_input_tensor_type(size_t idx) const = 0;

/// \brief Get output tensor name by index
/// Operation nodes are connected between each other by tensors.
/// Each tensor must have unique name in a graph.
/// The tensor name uniqueness is provided by developer during GraphIterator construction.
/// This method returns tensor name that outputs by output index idx from this operation
/// If idx is out-of-range, it throws std::exception inherited exception
virtual const std::string& get_output_tensor_name(size_t idx) const = 0;

/// \brief Get output tensor type by index
/// If idx is out-of-range, it throws std::exception inherited exception
virtual ov::element::Type get_output_tensor_type(size_t idx) const = 0;

/// \brief Get input tensor info
/// returns TensorInfo by input idx index that corresponds to a tensor
/// (it can correspond to Constant, Parameter or intermediate tensor connecting a producer and this current node)
/// If idx is out-of-range, it throws std::exception inherited exception
virtual const TensorMetaInfo& get_input_tensor_info(size_t idx) const = 0;

/// \brief Get output tensor info
/// returns TensorInfo by output idx index that corresponds to a tensor
/// (it can correspond to intermediate tensor connecting this current node and a consumer)
/// If idx is out-of-range, it throws std::exception inherited exception
virtual const TensorMetaInfo& get_output_tensor_info(size_t idx) const = 0;

/// \brief Get a number of outputs
virtual size_t get_output_size() const = 0;

/// \brief Returns operation's opset version
virtual uint64_t get_op_set() const = 0;

/// \brief Returns operation's domain
virtual const std::string& get_domain() const = 0;

/// \brief Returns true if node has attribute
virtual bool has_attribute(const std::string& name) const = 0;

virtual void experimental_get_internal_structures(const void** node_def) const = 0;

~DecoderBaseOperation() override;
};

// DecoderBaseTensor corresponds to tensor node to retrieve information about type, shapem quantization and sparsity
// information
class ONNX_FRONTEND_API DecoderBaseTensor : public ov::frontend::onnx::DecoderBase {
public:
/// \brief Get tensor info
virtual const TensorMetaInfo& get_tensor_info() const = 0;

/// \brief Get input index for tensor
/// returns index of this input in the list of inputs in the model
/// it must be from 0 to n-1, where n - number of inputs in the model
/// if it is not input, returns -1
virtual int64_t get_input_idx() const = 0;

/// \brief Get output index for tensor
/// returns index of this output in the list of outputs in the model
/// it must be from 0 to m-1, where m - number of outputs in the model
/// if it is not input, returns -1
virtual int64_t get_output_idx() const = 0;

~DecoderBaseTensor() override;
};

} // namespace onnx
} // namespace frontend
} // namespace ov
Original file line number Diff line number Diff line change
Expand Up @@ -18,17 +18,25 @@ namespace onnx {
class ONNX_FRONTEND_API FrontEnd : public ov::frontend::FrontEnd {
public:
using Ptr = std::shared_ptr<FrontEnd>;
std::shared_ptr<ov::Model> convert(const InputModel::Ptr& model) const override;
std::shared_ptr<ov::Model> convert(const ov::frontend::InputModel::Ptr& model) const override;
void convert(const std::shared_ptr<ov::Model>& partially_converted) const override;
std::shared_ptr<ov::Model> convert_partially(const InputModel::Ptr& input_model) const override;
std::shared_ptr<ov::Model> decode(const InputModel::Ptr& input_model) const override;
std::shared_ptr<ov::Model> convert_partially(const ov::frontend::InputModel::Ptr& input_model) const override;
std::shared_ptr<ov::Model> decode(const ov::frontend::InputModel::Ptr& input_model) const override;
std::string get_name() const override;
bool supported_impl(const std::vector<ov::Any>& variants) const override;
void add_extension(const std::shared_ptr<ov::Extension>& extension) override;
void normalize(const std::shared_ptr<ov::Model>& model) const override;

protected:
InputModel::Ptr load_impl(const std::vector<ov::Any>& params) const override;
ov::frontend::InputModel::Ptr load_impl(const std::vector<ov::Any>& params) const override;

void translate_graph(const InputModel::Ptr& model,
bool fail_fast,
bool /* no_conversion */, // future use
std::shared_ptr<ov::Model>& ov_model) const;
std::shared_ptr<ov::Model> convert_unify(const InputModel::Ptr& model) const;
std::shared_ptr<ov::Model> convert_partially_unify(const InputModel::Ptr& input_model) const;
std::shared_ptr<ov::Model> decode_unify(const InputModel::Ptr& input_model) const;

// m_other_extensions should be the first member here,
// m_other_extensions can contain SO Extension (holder for other Extensions),
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#pragma once

#include "openvino/core/runtime_attribute.hpp"
#include "openvino/frontend/onnx/decoder.hpp"
#include "openvino/frontend/onnx/visibility.hpp"

namespace ov {
namespace frontend {
namespace onnx {

/// Abstract representation for an input model graph that gives nodes in topologically sorted order
/// It returns decoders for model inputs and outputs (DecoderBaseTensor objects) and for operation nodes
/// (DecoderBaseOperation objects) DecoderBaseOperation objects for operation nodes must be sorted in topological order
/// from producing nodes to consumer nodes when `get_decoder()` is called. DecoderBaseTensor objects for inputs and
/// outputs must be returned first by `get_decoder()` method. Order of DecoderBaseTensor objects for inputs and outputs
/// defines their order in the original model, i.e. model input index and model output index.
/// For example, calling `get_decoder()` during iterating GraphIterator returns
/// DecoderBaseTensor (for input 0), ..., DecoderBaseTensor (for input n-1),
/// DecoderBaseTensor (for output 0), ..., DecoderBaseTensor (for output m-1),
/// DecoderBaseOperation (for op 1), ..., DecoderBaseOperation (for op k),
/// where n - number of inputs in the model, m - number of outputs in the model k - number of operation nodes.
/// NOTE: constants are ignored and no decoder object is returned for constant.
class ONNX_FRONTEND_API GraphIterator : ::ov::RuntimeAttribute {
public:
using Ptr = std::shared_ptr<GraphIterator>;

/// \brief Get a number of operation nodes in the graph
virtual size_t size() const = 0;

/// \brief Set iterator to the start position
virtual void reset() = 0;

/// \brief Move to the next node in the graph
virtual void next() = 0;

/// \brief Returns true if iterator goes out of the range of available nodes
virtual bool is_end() const = 0;

/// \brief Return a pointer to a decoder of the current node
virtual std::shared_ptr<DecoderBase> get_decoder() const = 0;

/// \brief Returns opset version of requested domain, stored in a ModelProto
/// If there are no domain found returns -1
virtual int64_t get_opset_version(const std::string& domain) const = 0;

/// \brief Destructor
virtual ~GraphIterator();
};

} // namespace onnx
} // namespace frontend
} // namespace ov
13 changes: 13 additions & 0 deletions src/frontends/onnx/frontend/src/core/decoder.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "openvino/frontend/onnx/decoder.hpp"

using namespace ov::frontend::onnx;

DecoderBase::~DecoderBase() = default;

DecoderBaseOperation::~DecoderBaseOperation() = default;

DecoderBaseTensor::~DecoderBaseTensor() = default;
130 changes: 130 additions & 0 deletions src/frontends/onnx/frontend/src/core/decoder_proto.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
// Copyright (C) 2018-2025 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//

#include "decoder_proto.hpp"

#include <onnx/onnx_pb.h>

#include <fstream>
#include <openvino/frontend/graph_iterator.hpp>

#include "graph_iterator_proto.hpp"
#include "openvino/frontend/onnx/graph_iterator.hpp"
#include "openvino/util/wstring_convert_util.hpp"

namespace ov {
namespace frontend {
namespace onnx {

const std::string empty_name = "";
const std::string DEFAULT_DOMAIN = "";
const std::string EMPTY_NAME = "";
const std::string EMPTY_OP_TYPE = "";

ov::Any DecoderProto::get_attribute(const std::string& name) const {
for (const auto& attr : m_node->attribute()) {
if (!attr.has_name() || attr.name() != name)
continue;
if (!attr.has_type()) {
throw std::runtime_error("Attribute \"" + name + "\" doesn't have a type");
}
switch (attr.type()) {
case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOAT:
if (attr.has_f())
return attr.f();
else
throw std::runtime_error("Attribute doesn't have value");
break;
case AttributeProto_AttributeType::AttributeProto_AttributeType_FLOATS:
return std::vector<float>{attr.floats().begin(), attr.floats().end()};
case AttributeProto_AttributeType::AttributeProto_AttributeType_INT:
if (attr.has_i())
return attr.i();
else
throw std::runtime_error("Attribute doesn't have value");
break;
case AttributeProto_AttributeType::AttributeProto_AttributeType_INTS:
return std::vector<int64_t>{attr.ints().begin(), attr.ints().end()};
case AttributeProto_AttributeType::AttributeProto_AttributeType_STRING:
if (attr.has_s())
return attr.s();
else
throw std::runtime_error("Attribute doesn't have value");
break;
case AttributeProto_AttributeType::AttributeProto_AttributeType_STRINGS:
return std::vector<std::string>{attr.strings().begin(), attr.strings().end()};
case AttributeProto_AttributeType::AttributeProto_AttributeType_GRAPH:
if (attr.has_g())
return static_cast<ov::frontend::onnx::GraphIterator::Ptr>(
std::make_shared<GraphIteratorProto>(m_parent, &attr.g()));
else
throw std::runtime_error("Attribute doesn't have value");
break;
case AttributeProto_AttributeType::AttributeProto_AttributeType_TENSOR:
return static_cast<ov::frontend::onnx::DecoderBase::Ptr>(
std::make_shared<DecoderProtoTensor>(&attr.t(), m_parent, 0, 0));
case AttributeProto_AttributeType::AttributeProto_AttributeType_SPARSE_TENSOR: {
ov::frontend::onnx::SparseTensorInfo sparse_tensor_info{};
auto& sparse_tensor = attr.sparse_tensor();
sparse_tensor_info.m_partial_shape =
ov::PartialShape{std::vector<int64_t>(sparse_tensor.dims().begin(), sparse_tensor.dims().end())};
if (sparse_tensor.has_values()) {
sparse_tensor_info.m_values = static_cast<ov::frontend::onnx::DecoderBase::Ptr>(
std::make_shared<DecoderProtoTensor>(&sparse_tensor.values(), m_parent, 0, 0));
}
if (sparse_tensor.has_indices()) {
sparse_tensor_info.m_indices = static_cast<ov::frontend::onnx::DecoderBase::Ptr>(
std::make_shared<DecoderProtoTensor>(&sparse_tensor.indices(), m_parent, 0, 0));
}
return sparse_tensor_info;
}
default:
throw std::runtime_error("Unsupported attribute type " +
::ONNX_NAMESPACE::AttributeProto_AttributeType_Name(attr.type()));
}
}
return nullptr;
}

size_t DecoderProto::get_input_size() const {
return m_input_info.size();
}

size_t DecoderProto::get_output_size() const {
return m_output_info.size();
}

void DecoderProto::get_input_node(size_t input_port_idx,
std::string& producer_name,
std::string& producer_output_port_name,
size_t& producer_output_port_index) const {}

const std::string& DecoderProto::get_op_type() const {
if (m_node->has_op_type()) {
return m_node->op_type();
} else {
return EMPTY_OP_TYPE;
}
}

const std::string& DecoderProto::get_op_name() const {
if (m_node->has_name()) {
return m_node->name();
} else {
return EMPTY_NAME;
}
}

bool DecoderProto::has_attribute(const std::string& name) const {
for (const auto& attr : m_node->attribute()) {
if (attr.has_name() && attr.name() == name) {
return true;
}
}
return false;
}

} // namespace onnx
} // namespace frontend
} // namespace ov
Loading
Loading