@@ -76,6 +76,7 @@ using paddle::NativeConfig;
76
76
using paddle::NativePaddlePredictor;
77
77
using paddle::PaddleBuf;
78
78
using paddle::PaddleDType;
79
+ using paddle::PaddleDataLayout;
79
80
using paddle::PaddlePassBuilder;
80
81
using paddle::PaddlePlace;
81
82
using paddle::PaddlePredictor;
@@ -85,6 +86,7 @@ using paddle::ZeroCopyTensor;
85
86
86
87
namespace {
87
88
void BindPaddleDType (py::module *m);
89
+ void BindPaddleDataLayout (py::module *m);
88
90
void BindPaddleBuf (py::module *m);
89
91
void BindPaddleTensor (py::module *m);
90
92
void BindPaddlePlace (py::module *m);
@@ -211,6 +213,34 @@ void PaddleInferTensorCreate(
211
213
tensor.CopyFromCpu (static_cast <const T *>(data.data ()));
212
214
}
213
215
216
+ paddle_infer::PlaceType ToPaddleInferPlace (
217
+ phi::AllocationType allocation_type) {
218
+ if (allocation_type == phi::AllocationType::CPU) {
219
+ return paddle_infer::PlaceType::kCPU ;
220
+ } else if (allocation_type == phi::AllocationType::GPU) {
221
+ return paddle_infer::PlaceType::kGPU ;
222
+ } else {
223
+ return paddle_infer::PlaceType::kCPU ;
224
+ }
225
+ }
226
+
227
+ void PaddleInferShareExternalData (paddle_infer::Tensor &tensor, // NOLINT
228
+ framework::Tensor input_tensor) {
229
+ std::vector<int > shape;
230
+ for (int i = 0 ; i < input_tensor.dims ().size (); ++i) {
231
+ shape.push_back (input_tensor.dims ()[i]);
232
+ }
233
+ if (input_tensor.dtype () == phi::DataType::FLOAT32) {
234
+ tensor.ShareExternalData (
235
+ static_cast <float *>(input_tensor.data ()), shape,
236
+ ToPaddleInferPlace (input_tensor.place ().GetType ()));
237
+ } else if (input_tensor.dtype () == phi::DataType::FLOAT16) {
238
+ tensor.ShareExternalData (
239
+ static_cast <paddle::platform::float16 *>(input_tensor.data ()), shape,
240
+ ToPaddleInferPlace (input_tensor.place ().GetType ()));
241
+ }
242
+ }
243
+
214
244
// / \brief Experimental interface.
215
245
// / Create the Strings tensor from data.
216
246
// / \param tensor The tensor will be created and
@@ -327,6 +357,7 @@ void CopyPaddleInferTensor(paddle_infer::Tensor &dst, // NOLINT
327
357
328
358
void BindInferenceApi (py::module *m) {
329
359
BindPaddleDType (m);
360
+ BindPaddleDataLayout (m);
330
361
BindPaddleBuf (m);
331
362
BindPaddleTensor (m);
332
363
BindPaddlePlace (m);
@@ -372,6 +403,14 @@ void BindPaddleDType(py::module *m) {
372
403
.value (" INT32" , PaddleDType::INT32);
373
404
}
374
405
406
+ void BindPaddleDataLayout (py::module *m) {
407
+ py::enum_<PaddleDataLayout>(*m, " PaddleDataLayout" )
408
+ .value (" UNK" , PaddleDataLayout::kUNK )
409
+ .value (" Any" , PaddleDataLayout::kAny )
410
+ .value (" NHWC" , PaddleDataLayout::kNHWC )
411
+ .value (" NCHW" , PaddleDataLayout::kNCHW );
412
+ }
413
+
375
414
void BindPaddleBuf (py::module *m) {
376
415
py::class_<PaddleBuf>(*m, " PaddleBuf" )
377
416
.def (py::init<size_t >())
@@ -817,6 +856,7 @@ void BindPaddleInferTensor(py::module *m) {
817
856
.def (" copy_from_cpu_bind" ,
818
857
&PaddleInferTensorCreate<paddle_infer::float16>)
819
858
.def (" copy_from_cpu_bind" , &PaddleInferStringTensorCreate)
859
+ .def (" share_external_data_bind" , &PaddleInferShareExternalData)
820
860
.def (" copy_to_cpu" , &PaddleInferTensorToNumpy)
821
861
.def (" shape" , &paddle_infer::Tensor::shape)
822
862
.def (" set_lod" , &paddle_infer::Tensor::SetLoD)
0 commit comments