@@ -105,6 +105,7 @@ std::shared_ptr<TensorWrapper> CreateTensorWrapper(
105105 py::dict& quant_info,
106106 std::uint32_t rank,
107107 const std::vector<uint32_t >& dims,
108+ const std::vector<uint8_t >& dynamic_dims,
108109 py::array& data,
109110 bool copy_data) {
110111 std::unique_ptr<QuantizeParamsWrapper> quantize_param_wrapper =
@@ -117,6 +118,7 @@ std::shared_ptr<TensorWrapper> CreateTensorWrapper(
117118 std::move (quantize_param_wrapper),
118119 rank,
119120 dims.data (),
121+ dynamic_dims.data (),
120122 0 ,
121123 data.size () == 0 ? nullptr : data.data (),
122124 copy_data);
@@ -228,22 +230,27 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
228230 py::list input_tensors_list;
229231 py::list output_tensors_list;
230232 result[" version" ] = op_config.version ;
231- result[" name" ] = op_config.v1 .name ;
232- result[" packageName" ] = op_config.v1 .packageName ;
233- result[" typeName" ] = op_config.v1 .typeName ;
234- result[" numOfParams" ] = op_config.v1 .numOfParams ;
235- for (size_t i = 0 ; i < op_config.v1 .numOfParams ; ++i) {
236- params_list.append (op_config.v1 .params [i]);
233+ result[" name" ] = QNN_OP_VER_PTR (op_config)->name ;
234+ result[" packageName" ] = QNN_OP_VER_PTR (op_config)->packageName ;
235+ result[" typeName" ] = QNN_OP_VER_PTR (op_config)->typeName ;
236+ result[" numOfParams" ] = QNN_OP_VER_PTR (op_config)->numOfParams ;
237+ for (size_t i = 0 ; i < QNN_OP_VER_PTR (op_config)->numOfParams ;
238+ ++i) {
239+ params_list.append (QNN_OP_VER_PTR (op_config)->params [i]);
237240 }
238241 result[" params" ] = params_list;
239- result[" numOfInputs" ] = op_config.v1 .numOfInputs ;
240- for (size_t i = 0 ; i < op_config.v1 .numOfInputs ; ++i) {
241- input_tensors_list.append (op_config.v1 .inputTensors [i]);
242+ result[" numOfInputs" ] = QNN_OP_VER_PTR (op_config)->numOfInputs ;
243+ for (size_t i = 0 ; i < QNN_OP_VER_PTR (op_config)->numOfInputs ;
244+ ++i) {
245+ input_tensors_list.append (
246+ QNN_OP_VER_PTR (op_config)->inputTensors [i]);
242247 }
243248 result[" inputTensors" ] = input_tensors_list;
244- result[" numOfOutputs" ] = op_config.v1 .numOfOutputs ;
245- for (size_t i = 0 ; i < op_config.v1 .numOfOutputs ; ++i) {
246- output_tensors_list.append (op_config.v1 .outputTensors [i]);
249+ result[" numOfOutputs" ] = QNN_OP_VER_PTR (op_config)->numOfOutputs ;
250+ for (size_t i = 0 ; i < QNN_OP_VER_PTR (op_config)->numOfOutputs ;
251+ ++i) {
252+ output_tensors_list.append (
253+ QNN_OP_VER_PTR (op_config)->outputTensors [i]);
247254 }
248255 result[" outputTensors" ] = output_tensors_list;
249256 return result;
@@ -259,6 +266,7 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
259266 py::dict&,
260267 std::uint32_t ,
261268 const std::vector<uint32_t >&,
269+ const std::vector<uint8_t >&,
262270 py::array&,
263271 bool >(&CreateTensorWrapper)));
264272
@@ -376,14 +384,6 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
376384
377385 py::class_<Qnn_Tensor_t>(m, " Qnn_Tensor_t" )
378386 .def_readonly (" version" , &Qnn_Tensor_t::version)
379- .def_property_readonly (
380- " v1" ,
381- [](Qnn_Tensor_t& t) -> Qnn_TensorV1_t& {
382- if (t.version == QNN_TENSOR_VERSION_1) {
383- return t.v1 ;
384- }
385- throw std::runtime_error (" Tensor version is not V1." );
386- })
387387 .def_property_readonly (" v2" , [](Qnn_Tensor_t& t) -> Qnn_TensorV2_t& {
388388 if (t.version == QNN_TENSOR_VERSION_2) {
389389 return t.v2 ;
@@ -399,21 +399,28 @@ PYBIND11_MODULE(PyQnnWrapperAdaptor, m) {
399399 Qnn_TensorVersion_t::QNN_TENSOR_VERSION_UNDEFINED)
400400 .export_values ();
401401
402- py::class_<Qnn_TensorV1_t >(m, " QnnTensorV1 " )
403- .def_readonly (" id" , &Qnn_TensorV1_t ::id)
404- .def_readonly (" name" , &Qnn_TensorV1_t ::name)
405- .def_readonly (" type" , &Qnn_TensorV1_t ::type)
406- .def_readonly (" dataFormat" , &Qnn_TensorV1_t ::dataFormat)
407- .def_readonly (" dataType" , &Qnn_TensorV1_t ::dataType)
408- .def_readonly (" quantizeParams" , &Qnn_TensorV1_t ::quantizeParams)
409- .def_readonly (" rank" , &Qnn_TensorV1_t ::rank)
402+ py::class_<Qnn_TensorV2_t >(m, " Qnn_TensorV2_t " )
403+ .def_readonly (" id" , &Qnn_TensorV2_t ::id)
404+ .def_readonly (" name" , &Qnn_TensorV2_t ::name)
405+ .def_readonly (" type" , &Qnn_TensorV2_t ::type)
406+ .def_readonly (" dataFormat" , &Qnn_TensorV2_t ::dataFormat)
407+ .def_readonly (" dataType" , &Qnn_TensorV2_t ::dataType)
408+ .def_readonly (" quantizeParams" , &Qnn_TensorV2_t ::quantizeParams)
409+ .def_readonly (" rank" , &Qnn_TensorV2_t ::rank)
410410 // change dimensions pointer to vector(begin to rank)
411411 .def_property_readonly (
412412 " dimensions" ,
413- [](const Qnn_TensorV1_t & t) {
413+ [](const Qnn_TensorV2_t & t) {
414414 return std::vector<uint32_t >(t.dimensions , t.dimensions + t.rank );
415415 })
416- .def_readonly (" memType" , &Qnn_TensorV1_t::memType);
416+ .def_property_readonly (
417+ " isDynamicDimensions" ,
418+ [](const Qnn_TensorV2_t& t) {
419+ return t.dimensions == nullptr
420+ ? std::vector<uint32_t >()
421+ : std::vector<uint32_t >(t.dimensions , t.dimensions + t.rank );
422+ })
423+ .def_readonly (" memType" , &Qnn_TensorV2_t::memType);
417424
418425 py::enum_<Qnn_TensorMemType_t>(m, " Qnn_TensorMemType_t" )
419426 .value (
0 commit comments