4343#define TFLITE_INTEGER (x ) (x == kTfLiteInt16 || x == kTfLiteInt32 || x == kTfLiteInt64)
4444#define TFLITE_FLOAT (x ) (x == kTfLiteFloat16 || x == kTfLiteFloat32)
4545
46- void print_tensor_info (const TfLiteTensor * tensor )
46+ void print_tensor_info (struct flb_tensorflow * ctx , const TfLiteTensor * tensor )
4747{
4848 int i ;
4949 TfLiteType type ;
@@ -56,30 +56,30 @@ void print_tensor_info(const TfLiteTensor* tensor)
5656 }
5757 sprintf (dims , "%s%d}" , dims , TfLiteTensorDim (tensor , i ));
5858
59- flb_info ( "%s" , dims );
59+ flb_plg_debug ( ctx -> ins , "%s" , dims );
6060}
6161
62- void print_model_io (TfLiteInterpreter * interpreter )
62+ void print_model_io (struct flb_tensorflow * ctx )
6363{
6464 int i ;
6565 int num ;
6666 const TfLiteTensor * tensor ;
6767 char dims [100 ] = "" ;
6868
69- // Input information
70- num = TfLiteInterpreterGetInputTensorCount (interpreter );
69+ /* Input information */
70+ num = TfLiteInterpreterGetInputTensorCount (ctx -> interpreter );
7171 for (i = 0 ; i < num ; i ++ ) {
72- tensor = TfLiteInterpreterGetInputTensor (interpreter , i );
73- flb_info ( "[tensorflow] ===== input #%d =====" , i + 1 );
74- print_tensor_info (tensor );
72+ tensor = TfLiteInterpreterGetInputTensor (ctx -> interpreter , i );
73+ flb_plg_debug ( ctx -> ins , "[tensorflow] ===== input #%d =====" , i + 1 );
74+ print_tensor_info (ctx , tensor );
7575 }
7676
77- // Output information
78- num = TfLiteInterpreterGetOutputTensorCount (interpreter );
77+ /* Output information */
78+ num = TfLiteInterpreterGetOutputTensorCount (ctx -> interpreter );
7979 for (i = 0 ; i < num ; i ++ ) {
80- tensor = TfLiteInterpreterGetOutputTensor (interpreter , i );
81- flb_info ( "[tensorflow] ===== output #%d ====" , i + 1 );
82- print_tensor_info (tensor );
80+ tensor = TfLiteInterpreterGetOutputTensor (ctx -> interpreter , i );
81+ flb_plg_debug ( ctx -> ins , "[tensorflow] ===== output #%d ====" , i + 1 );
82+ print_tensor_info (ctx , tensor );
8383 }
8484}
8585
@@ -92,7 +92,7 @@ void build_interpreter(struct flb_tensorflow *ctx, char* model_path)
9292 TfLiteInterpreterAllocateTensors (ctx -> interpreter );
9393
9494 flb_info ("Tensorflow Lite interpreter created!" );
95- print_model_io (ctx -> interpreter );
95+ print_model_io (ctx );
9696}
9797
9898void inference (TfLiteInterpreter * interpreter , void * input_data , void * output_data , int input_buf_size , int output_buf_size ) {
0 commit comments