diff --git a/compiler/circle-operator/driver/Driver.cpp b/compiler/circle-operator/driver/Driver.cpp index f5fd8073cf4..510f7687797 100644 --- a/compiler/circle-operator/driver/Driver.cpp +++ b/compiler/circle-operator/driver/Driver.cpp @@ -42,6 +42,7 @@ int entry(int argc, char **argv) "circle-operator allows users to retrieve operator information from a Circle model file"}; arser.add_argument("--name").nargs(0).help("Dump operators name in circle file"); arser.add_argument("--code").nargs(0).help("Dump operators code in circle file"); + arser.add_argument("--shapes").nargs(0).help("Dump shapes"); arser.add_argument("--output_path").help("Save output to file (default output is console)"); arser.add_argument("circle").help("Circle file to dump"); @@ -59,6 +60,7 @@ int entry(int argc, char **argv) cirops::DumpOption option; option.names = arser["--name"]; option.codes = arser["--code"]; + option.shapes = arser["--shapes"]; std::ofstream oFstream; std::ostream *oStream = &std::cout; diff --git a/compiler/circle-operator/src/Dump.cpp b/compiler/circle-operator/src/Dump.cpp index dc260223821..8e9e804825b 100644 --- a/compiler/circle-operator/src/Dump.cpp +++ b/compiler/circle-operator/src/Dump.cpp @@ -24,6 +24,29 @@ namespace { +// TODO handle multiple outputs case +const circle::Tensor *get_output_tensor(mio::circle::Reader &reader, const circle::Operator *op) +{ + const auto tensors = reader.tensors(); + const auto output_tensors = reader.outputs(op); + const auto output = output_tensors.at(0); + return tensors->Get(output); +} + +void dump_shape(std::ostream &os, const ::flatbuffers::Vector *shape) +{ + os << "["; + for (uint32_t i = 0; i < shape->size(); ++i) + { + os << shape->Get(i); + if (i < shape->size() - 1) + { + os << ","; + } + } + os << "]"; +} + void dump_ops(std::ostream &os, mio::circle::Reader &reader, const cirops::DumpOption &option) { auto ops = reader.operators(); @@ -44,19 +67,19 @@ void dump_ops(std::ostream &os, mio::circle::Reader &reader, const cirops::DumpO const auto op_name = reader.opcode_name(op); os << op_name; } + if (option.names) { - // TODO multiple outputs? - const auto tensors = reader.tensors(); - const auto output_tensors = reader.outputs(op); - const auto output = output_tensors.at(0); - const auto tensor = tensors->Get(output); - const std::string name = mio::circle::tensor_name(tensor); - if (option.codes) - { - os << ","; - } - os << name; + const std::string name = mio::circle::tensor_name(get_output_tensor(reader, op)); + os << (option.codes ? "," : "") << name; + } + + if (option.shapes) + { + os << (option.codes || option.names ? "," : ""); + auto const out_tensor = get_output_tensor(reader, op); + dump_shape(os, (nullptr == out_tensor->shape_signature()) ? out_tensor->shape() + : out_tensor->shape()); } os << std::endl; } diff --git a/compiler/circle-operator/src/Dump.h b/compiler/circle-operator/src/Dump.h index aa1d1be4996..cefcd9870fd 100644 --- a/compiler/circle-operator/src/Dump.h +++ b/compiler/circle-operator/src/Dump.h @@ -29,6 +29,7 @@ struct DumpOption bool names = false; bool codes = false; bool all_graphs = false; + bool shapes = false; }; class DumpOperators diff --git a/compiler/one-cmds/one-import-onnx b/compiler/one-cmds/one-import-onnx index 497e43926c0..01ac272888b 100644 --- a/compiler/one-cmds/one-import-onnx +++ b/compiler/one-cmds/one-import-onnx @@ -24,6 +24,7 @@ import os import sys import tempfile import onnx +from onnx.tools import update_model_dims # ONNX legalizer is an optional feature # It enables conversion of some operations, but in experimental phase for now @@ -204,6 +205,26 @@ def _get_parser(): action='store_true', help='Experimental disable BatchMatMul unfold') + # set static input shape + parser.add_argument( + '--input_shapes', + type=str, + help= + 'Set static shape for input tensors in comma-separated list format, like \'[1,2,3]\'.' + 'If the model has multiple inputs, tensor names should be provided as well.' + 'In such a case, the argument should be in the following format: \'a[1,2,3],b[4,5,6],c[7,8]\'.' + ) + + # set static output shape + parser.add_argument( + '--output_shapes', + type=str, + help= + 'Set static shape for output tensors in comma-separated list format, like \'[1,2,3]\'.' + 'If the model has multiple outputs, tensor names should be provided as well.' + 'In such a case, the argument should be in the following format: \'a[1,2,3],b[4,5,6],c[7,8]\'.' + ) + return parser @@ -299,6 +320,50 @@ def _check_onnx2circle(): return o2c_path +def _extract_origin_tensor_shapes(tensors): + shapes_map = {} + for tensor in tensors: + shapes_map[tensor.name] = [] + for dim_proto in tensor.type.tensor_type.shape.dim: + if dim_proto.HasField("dim_value"): + shapes_map[tensor.name].append(dim_proto.dim_value) + elif dim_proto.HasField("dim_param"): + shapes_map[tensor.name].append(dim_proto.dim_param) + else: + shapes_map[tensor.name].append(-1) # dynamic + return shapes_map + + +def _parse_shapes(shape_str, origin_tensor_shapes_map): + user_shapes_map = {} + for idx, single_shape in enumerate(shape_str.split(']')): + if single_shape: + tensor_name, single_shape = single_shape.split('[') + if not tensor_name: + # using tensor name model allowed only for single input models + if idx == 0 and len(origin_tensor_shapes_map) == 1: + tensor_name = next(iter(origin_tensor_shapes_map.keys())) + else: + raise ValueError( + 'You must provide tenors name for the model with multiple inputs/outputs' + ) + tensor_name = tensor_name.replace(',', '') + user_shapes_map[tensor_name] = [] + for dim in single_shape.split(','): + user_shapes_map[tensor_name].append(int(dim)) + for tensor_name in user_shapes_map.keys(): + if tensor_name not in origin_tensor_shapes_map: + raise ValueError( + f'Tensor with name={tensor_name} do NOT match any input/output of the model' + ) + if len(user_shapes_map[tensor_name]) != len( + origin_tensor_shapes_map[tensor_name]): + raise ValueError( + f'Rank of provided shape must be compatible with the origin tensor={tensor_name} from the model' + ) + return user_shapes_map + + def _convert(args): _apply_verbosity(args.verbose) @@ -323,6 +388,29 @@ def _convert(args): onnx_model = onnx.load(onnx_to_convert_path) _sanitize_io_names(onnx_model) + input_shape_provided = oneutils.is_valid_attr(args, 'input_shapes') + output_shape_provided = oneutils.is_valid_attr(args, 'output_shapes') + if input_shape_provided or output_shape_provided: # any shape arg provided + input_shapes_map = _extract_origin_tensor_shapes(onnx_model.graph.input) + if input_shape_provided: + input_shapes_map = _parse_shapes(getattr(args, 'input_shapes'), + input_shapes_map) + delattr(args, 'input_shapes' + ) # avoid argument colision with tf2tflite conversion step + output_shapes_map = _extract_origin_tensor_shapes(onnx_model.graph.output) + if output_shape_provided: + output_shapes_map = _parse_shapes(getattr(args, 'output_shapes'), + output_shapes_map) + onnx_model = update_model_dims.update_inputs_outputs_dims( + onnx_model, input_shapes_map, output_shapes_map) + try: + print(onnx.__file__) + onnx_model = onnx.shape_inference.infer_shapes(onnx_model, + strict_mode=True) + except Exception as e: + raise RuntimeError( + f'Exception with message="{e}" raised from external onnx library during shape inference' + ) # TODO set model_updated to True only when sanitize updates onnx_model # NOTE with onnx2circle, we may not need sanitize(this needs verification) model_updated = True diff --git a/compiler/one-cmds/tests/one-import-onnx-dynamic.test b/compiler/one-cmds/tests/one-import-onnx-dynamic.test new file mode 100644 index 00000000000..89957ecae3c --- /dev/null +++ b/compiler/one-cmds/tests/one-import-onnx-dynamic.test @@ -0,0 +1,217 @@ +#!/bin/bash + +# Copyright (c) 2024 Samsung Electronics Co., Ltd. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +filename_ext="$(basename -- $0)" +filename="${filename_ext%.*}" + +trap_err_onexit() +{ + echo "${filename_ext} FAILED" + exit 255 +} + +trap trap_err_onexit ERR + +# ---- Positive tests ---- + +# dynamic batch +input="abs_dynamic_batch.onnx" +output="abs_dynamic_batch.one-import-onnx-dynamic.circle" +input_shapes="[1,5,5]" + +rm -rf ${output} +rm -rf ${output}.log + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "ABS,\[1,5,5\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# the same as before but name of input tensor explicitly defined +input_shapes="input[1,5,5]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "ABS,\[1,5,5\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# dynamic batch and other dims +input="abs_dynamic_input.onnx" +output="abs_dynamic_input.one-import-onnx-dynamic.circle" +input_shapes="[1,5,5]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "ABS,\[1,5,5\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# dynamic only output shape (which cannot be calculated via shape inference) +input="reshape_static_inputs_dynamic_output.onnx" +output="reshape_static_inputs_dynamic_output.one-import-onnx-dynamic.circle" +output_shapes="[4,5,6]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--output_shapes ${output_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "RESHAPE,\[4,5,6\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# dynamic input and output shape +input="abs_reshape_dynamic_inputs_and_output.onnx" +output="abs_reshape_dynamic_inputs_and_output.one-import-onnx-dynamic.circle" +input_shapes="input[2,10,6],shape[3]" +output_shapes="[4,5,6]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} \ +--output_shapes ${output_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "ABS,\[2,10,6\]" "${output}.log"; then + trap_err_onexit +fi + +if ! grep -q "RESHAPE,\[4,5,6\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# dynamic input and output shape - 3 inputs and 2 outputs +input="abs_concat_dynamic_inputs_and_outputs.onnx" +output="abs_concat_dynamic_inputs_and_outputs.one-import-onnx-dynamic.circle" +input_shapes="abs_in[1,2,5],con_1_in[1,1,5],con_2_in[1,1,5]" +output_shapes="abs_out[1,2,5],con_out[1,4,5]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} \ +--output_shapes ${output_shapes} > /dev/null 2>&1 + +circle-operator --code --shapes ${output} > ${output}.log 2>&1 + +if ! grep -q "ABS,\[1,2,5\]" "${output}.log"; then + trap_err_onexit +fi + +if ! grep -q "CONCATENATION,\[1,4,5\]" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# ---- Negative tests ---- +trap '' ERR # Stop catching signals + +# Rank of input tensor is not known (the feature not supported by onnx.tools.update_model_dims yet) +input="shape_not_provided_abs.onnx" +output="shape_not_provided_abs.one-import-onnx-dynamic.circle" +input_shapes="[1,5,5]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} > ${output}.log 2>&1 + +if ! grep -q "Rank of provided shape must be compatible with the origin tensor=input from the model" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# incorrent input tensor name passed by the useer +input="abs_dynamic_batch.onnx" +output="abs_dynamic_batch.one-import-onnx-dynamic.circle" +input_shapes="incorrent_input_name[1,5,5]" + +rm -rf ${output} +rm -rf ${output}.log + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} > ${output}.log 2>&1 + +if ! grep -q "Tensor with name=incorrent_input_name do NOT match any input/output of the model" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +# incorrent output tensor name passed by the useer +input="abs_concat_dynamic_inputs_and_outputs.onnx" +output="abs_concat_dynamic_inputs_and_outputs.one-import-onnx-dynamic.circle" +input_shapes="abs_in[1,2,5],con_1_in[1,1,5],con_2_in[1,1,5]" +output_shapes="elu_ot[1,2,5],con_out[1,4,5]" + +one-import-onnx \ +--input_path ${input} \ +--output_path ${output} \ +--input_shapes ${input_shapes} \ +--output_shapes ${output_shapes} > ${output}.log 2>&1 + +if ! grep -q "Tensor with name=elu_ot do NOT match any input/output of the model" "${output}.log"; then + trap_err_onexit +fi + +rm -rf ${output} +rm -rf ${output}.log + +echo "${filename_ext} SUCCESS" +exit 0 diff --git a/compiler/one-cmds/tests/prepare_test_materials.sh b/compiler/one-cmds/tests/prepare_test_materials.sh index 065c2dcc5dd..5ba12246b9c 100644 --- a/compiler/one-cmds/tests/prepare_test_materials.sh +++ b/compiler/one-cmds/tests/prepare_test_materials.sh @@ -179,6 +179,19 @@ if files_missing "${NEG_TEST_RECCURENT_MODELS[@]}"; then # https://github.com/Samsung/ONE/issues/8395#issuecomment-1050364375 fi +declare -a DYN_SHAPES_MODELS=(\ + "abs_concat_dynamic_inputs_and_outputs.onnx" "abs_dynamic_batch.onnx" \ + "abs_dynamic_input.onnx" "abs_reshape_dynamic_inputs_and_output.onnx" \ + "reshape_static_inputs_dynamic_output.onnx" "shape_not_provided_abs.onnx" +) + +if files_missing "${DYN_SHAPES_MODELS[@]}"; then + rm -rf onnx_dyn_shapes_models.zip + wget -nv https://github.com/user-attachments/files/16560918/onnx_dyn_shapes_models.zip + unzip onnx_dyn_shapes_models.zip + # https://github.com/Samsung/ONE/issues/13636#issuecomment-2277704651 +fi + declare -a ADD_000_MODEL_AND_INPUTS=("Add_000.circle" "Add_000.circle.input0" "Add_000.circle.input1") if files_missing "${ADD_000_MODEL_AND_INPUTS}"; then