@@ -48,7 +48,7 @@ namespace serving {
48
48
// Map of TFLite tensor name to <TF TensorInfo, TFLite tensor index>.
49
49
namespace {
50
50
51
- Status TfLiteTypeToTfType (TfLiteType tflite_type, DataType* type) {
51
+ absl:: Status TfLiteTypeToTfType (TfLiteType tflite_type, DataType* type) {
52
52
switch (tflite_type) {
53
53
case kTfLiteNoType :
54
54
*type = tensorflow::DT_INVALID;
@@ -96,8 +96,8 @@ std::string TfToTfLiteLegacyTensorName(const string& tf_name) {
96
96
97
97
// Checks that an input/output tensor actually exists. If not, attempts to
98
98
// update the tensor name with legacy TFLite tensor naming.
99
- Status FixTfLiteTensorName (const std::map<string, int >& tensor_name_map,
100
- string& tensor_name) {
99
+ absl:: Status FixTfLiteTensorName (const std::map<string, int >& tensor_name_map,
100
+ string& tensor_name) {
101
101
if (tensor_name_map.find (tensor_name) != tensor_name_map.end ()) {
102
102
return absl::OkStatus ();
103
103
}
@@ -112,8 +112,8 @@ Status FixTfLiteTensorName(const std::map<string, int>& tensor_name_map,
112
112
return errors::Internal (" Unknown tensor '" , tensor_name, " '." );
113
113
}
114
114
115
- Status TfLiteTensorToTensorInfo (const TfLiteTensor* tflite_tensor,
116
- TensorInfo* info) {
115
+ absl:: Status TfLiteTensorToTensorInfo (const TfLiteTensor* tflite_tensor,
116
+ TensorInfo* info) {
117
117
DataType tf_type;
118
118
TF_RETURN_IF_ERROR (TfLiteTypeToTfType (tflite_tensor->type , &tf_type));
119
119
info->set_dtype (tf_type);
@@ -125,8 +125,8 @@ Status TfLiteTensorToTensorInfo(const TfLiteTensor* tflite_tensor,
125
125
return absl::OkStatus ();
126
126
}
127
127
128
- Status GetTensorInfoMap (const tflite::Interpreter* interpreter, bool input ,
129
- TensorInfoMap* infomap) {
128
+ absl:: Status GetTensorInfoMap (const tflite::Interpreter* interpreter,
129
+ bool input, TensorInfoMap* infomap) {
130
130
const std::vector<int >& indices =
131
131
input ? interpreter->inputs () : interpreter->outputs ();
132
132
const string& input_str = input ? " Input" : " Output" ;
@@ -156,7 +156,7 @@ std::vector<int> TensorDims(const Tensor& tensor) {
156
156
}
157
157
158
158
// Create output tensors making sure they are the right size. //
159
- Status CreateOutputTensors (
159
+ absl:: Status CreateOutputTensors (
160
160
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
161
161
const std::vector<string>& output_tensor_names,
162
162
const std::map<string, int >& output_tensor_to_idx,
@@ -184,7 +184,7 @@ Status CreateOutputTensors(
184
184
return absl::OkStatus ();
185
185
}
186
186
187
- Status SetInputAndInvokeMiniBatch (
187
+ absl:: Status SetInputAndInvokeMiniBatch (
188
188
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
189
189
const std::vector<int >& tflite_input_indices,
190
190
const std::vector<std::vector<const Tensor*>>& inputs, int batch_size,
@@ -254,7 +254,7 @@ Status SetInputAndInvokeMiniBatch(
254
254
return absl::OkStatus ();
255
255
}
256
256
257
- Status SetMiniBatchOutput (
257
+ absl:: Status SetMiniBatchOutput (
258
258
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
259
259
const std::map<int , Tensor*>& tflite_idx_to_output_tensor,
260
260
std::vector<Tensor>* outputs) {
@@ -302,7 +302,7 @@ int GetModelBatchSize(const tflite::Model* model) {
302
302
} // namespace
303
303
304
304
// Split an input task up into multiple tasks.
305
- Status TfLiteSession::SplitTfLiteInputTask (
305
+ absl:: Status TfLiteSession::SplitTfLiteInputTask (
306
306
std::unique_ptr<TfLiteBatchTask>* input_task_ptr,
307
307
int open_batch_remaining_slot, int max_batch_size,
308
308
std::vector<std::unique_ptr<TfLiteBatchTask>>* output_tasks) {
@@ -389,7 +389,7 @@ Status TfLiteSession::SplitTfLiteInputTask(
389
389
return absl::OkStatus ();
390
390
}
391
391
392
- Status TfLiteSession::CreateDefaultBasicBatchScheduler (
392
+ absl:: Status TfLiteSession::CreateDefaultBasicBatchScheduler (
393
393
const BasicBatchScheduler<TfLiteBatchTask>::Options& options,
394
394
std::function<void (std::unique_ptr<Batch<TfLiteBatchTask>>)>
395
395
process_batch_callback,
@@ -401,7 +401,7 @@ Status TfLiteSession::CreateDefaultBasicBatchScheduler(
401
401
return absl::OkStatus ();
402
402
}
403
403
404
- Status TfLiteSession::SetScheduler (
404
+ absl:: Status TfLiteSession::SetScheduler (
405
405
const SchedulerCreator& scheduler_creator,
406
406
const BasicBatchScheduler<TfLiteBatchTask>::Options& options) {
407
407
use_fixed_batch_size_ = true ;
@@ -415,10 +415,11 @@ Status TfLiteSession::SetScheduler(
415
415
&scheduler_);
416
416
}
417
417
418
- Status TfLiteSession::Create (string&& buffer, const SessionOptions& options,
419
- int num_pools, int num_interpreters_per_pool,
420
- std::unique_ptr<TfLiteSession>* tflite_session,
421
- ::google::protobuf::Map<string, SignatureDef>* signatures) {
418
+ absl::Status TfLiteSession::Create (
419
+ string&& buffer, const SessionOptions& options, int num_pools,
420
+ int num_interpreters_per_pool,
421
+ std::unique_ptr<TfLiteSession>* tflite_session,
422
+ ::google::protobuf::Map<string, SignatureDef>* signatures) {
422
423
auto model = tflite::FlatBufferModel::BuildFromModel (
423
424
flatbuffers::GetRoot<tflite::Model>(buffer.data ()));
424
425
if (model == nullptr ) {
@@ -544,26 +545,27 @@ TfLiteSession::TfLiteSession(
544
545
model_(std::move(model)),
545
546
interpreter_pool_(std::move(interpreter_pool)) {}
546
547
547
- Status TfLiteSession::Run (const std::vector<std::pair<string, Tensor>>& inputs,
548
- const std::vector<string>& output_tensor_names,
549
- const std::vector<string>& target_node_names,
550
- std::vector<Tensor>* outputs) {
548
+ absl::Status TfLiteSession::Run (
549
+ const std::vector<std::pair<string, Tensor>>& inputs,
550
+ const std::vector<string>& output_tensor_names,
551
+ const std::vector<string>& target_node_names,
552
+ std::vector<Tensor>* outputs) {
551
553
RunMetadata run_metadata;
552
554
return Run (RunOptions (), inputs, output_tensor_names, target_node_names,
553
555
outputs, &run_metadata);
554
556
}
555
557
556
- Status TfLiteSession::Run (const RunOptions& run_options,
557
- const std::vector<std::pair<string, Tensor>>& inputs ,
558
- const std::vector<string>& output_tensor_names ,
559
- const std::vector<string>& target_node_names ,
560
- std::vector<Tensor>* outputs,
561
- RunMetadata* run_metadata) {
558
+ absl:: Status TfLiteSession::Run (
559
+ const RunOptions& run_options ,
560
+ const std::vector<std::pair< string, Tensor>>& inputs ,
561
+ const std::vector<string>& output_tensor_names ,
562
+ const std::vector<string>& target_node_names, std::vector<Tensor>* outputs,
563
+ RunMetadata* run_metadata) {
562
564
return Run (run_options, inputs, output_tensor_names, target_node_names,
563
565
outputs, run_metadata, thread::ThreadPoolOptions ());
564
566
}
565
567
566
- Status TfLiteSession::RunInternal (
568
+ absl:: Status TfLiteSession::RunInternal (
567
569
const std::vector<int >& tflite_input_indices,
568
570
const std::vector<std::vector<const Tensor*>>& merged_inputs,
569
571
const std::vector<string>& output_tensor_names,
@@ -598,7 +600,7 @@ Status TfLiteSession::RunInternal(
598
600
return absl::OkStatus ();
599
601
}
600
602
601
- Status TfLiteSession::Run (
603
+ absl:: Status TfLiteSession::Run (
602
604
const RunOptions& run_options,
603
605
const std::vector<std::pair<string, Tensor>>& inputs,
604
606
const std::vector<string>& output_tensor_names,
@@ -629,7 +631,7 @@ Status TfLiteSession::Run(
629
631
batch_size);
630
632
}
631
633
Notification done;
632
- Status status;
634
+ absl:: Status status;
633
635
std::unique_ptr<TfLiteBatchTask> task;
634
636
TfLiteBatchTask::CreateTfLiteBatchTask (&output_tensor_names, outputs, &done,
635
637
&status, &task);
@@ -642,13 +644,14 @@ Status TfLiteSession::Run(
642
644
return status;
643
645
}
644
646
645
- Status TfLiteSession::ListDevices (std::vector<DeviceAttributes>* response) {
647
+ absl::Status TfLiteSession::ListDevices (
648
+ std::vector<DeviceAttributes>* response) {
646
649
return errors::Unimplemented (" ListDevices is not yet supported." );
647
650
}
648
651
649
- Status MergeInputTensors (const Batch<TfLiteBatchTask>& batch,
650
- std::vector<std::vector< const Tensor*>>* merged_inputs ,
651
- int * batch_size) {
652
+ absl:: Status MergeInputTensors (
653
+ const Batch<TfLiteBatchTask>& batch ,
654
+ std::vector<std::vector< const Tensor*>>* merged_inputs, int * batch_size) {
652
655
if (batch.num_tasks () < 1 ) {
653
656
return errors::Internal (" Batch size expected to be positive; was " ,
654
657
batch.num_tasks ());
@@ -672,8 +675,8 @@ Status MergeInputTensors(const Batch<TfLiteBatchTask>& batch,
672
675
return absl::OkStatus ();
673
676
}
674
677
675
- Status SplitOutputTensors (const std::vector<Tensor>& combined_outputs,
676
- Batch<TfLiteBatchTask>* batch, int batch_size) {
678
+ absl:: Status SplitOutputTensors (const std::vector<Tensor>& combined_outputs,
679
+ Batch<TfLiteBatchTask>* batch, int batch_size) {
677
680
std::vector<int64_t > task_sizes (batch->num_tasks ());
678
681
int total_size = 0 ;
679
682
for (int i = 0 ; i < batch->num_tasks (); ++i) {
@@ -689,7 +692,7 @@ Status SplitOutputTensors(const std::vector<Tensor>& combined_outputs,
689
692
for (int i = 0 ; i < combined_outputs.size (); i++) {
690
693
const auto & output_tensor = combined_outputs[i];
691
694
std::vector<Tensor> split_tensor;
692
- const Status split_status =
695
+ const absl:: Status split_status =
693
696
tensor::Split (output_tensor, task_sizes, &split_tensor);
694
697
if (!split_status.ok ()) {
695
698
return errors::Internal (" Tensor split operation failed: " ,
@@ -720,7 +723,7 @@ void TfLiteSession::ProcessBatch(
720
723
// Regardless of the outcome, we need to propagate the status to the
721
724
// individual tasks and signal that they are done. We use MakeCleanup() to
722
725
// ensure that this happens no matter how we exit the method below.
723
- Status status;
726
+ absl:: Status status;
724
727
auto finally = gtl::MakeCleanup ([&status, &batch] {
725
728
for (int i = 0 ; i < batch->num_tasks (); ++i) {
726
729
TfLiteBatchTask* task = batch->mutable_task (i);
@@ -758,9 +761,9 @@ void TfLiteSession::ProcessBatch(
758
761
}
759
762
}
760
763
if (all_tasks_timeout_exceeded) {
761
- status = Status ( static_cast <tensorflow::errors::Code> (
762
- absl::StatusCode::kResourceExhausted ),
763
- " Run() timeout exceeded while waiting in batching queue" );
764
+ status = absl::Status (
765
+ static_cast <absl::StatusCode>( absl::StatusCode::kResourceExhausted ),
766
+ " Run() timeout exceeded while waiting in batching queue" );
764
767
return ;
765
768
}
766
769
0 commit comments