Skip to content

Commit c98377d

Browse files
Automated Code Change
PiperOrigin-RevId: 690102492
1 parent 9a38b3a commit c98377d

21 files changed

+156
-146
lines changed

tensorflow_serving/servables/tensorflow/tflite_interpreter_pool.cc

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ TfLiteInterpreterWrapper::TfLiteInterpreterWrapper(
5858
}
5959
}
6060

61-
tensorflow::Status TfLiteInterpreterWrapper::SetStringData(
61+
absl::Status TfLiteInterpreterWrapper::SetStringData(
6262
const std::vector<const Tensor*>& tensors, TfLiteTensor* tflite_tensor,
6363
int tensor_index, int batch_size) {
6464
// Format of the buffer for tflite:
@@ -140,7 +140,7 @@ TfLiteStatus TfLiteInterpreterWrapper::Invoke() {
140140
return status;
141141
}
142142

143-
tensorflow::Status TfLiteInterpreterWrapper::CreateTfLiteInterpreterWrapper(
143+
absl::Status TfLiteInterpreterWrapper::CreateTfLiteInterpreterWrapper(
144144
const tflite::FlatBufferModel& model,
145145
const tensorflow::SessionOptions& options,
146146
std::unique_ptr<TfLiteInterpreterWrapper>& wrapper) {
@@ -183,7 +183,7 @@ tensorflow::Status TfLiteInterpreterWrapper::CreateTfLiteInterpreterWrapper(
183183
return absl::OkStatus();
184184
}
185185

186-
tensorflow::Status TfLiteInterpreterPool::CreateTfLiteInterpreterPool(
186+
absl::Status TfLiteInterpreterPool::CreateTfLiteInterpreterPool(
187187
const tflite::FlatBufferModel* model,
188188
const tensorflow::SessionOptions& options, int pool_size,
189189
std::unique_ptr<TfLiteInterpreterPool>& interpreter_pool) {

tensorflow_serving/servables/tensorflow/tflite_session.cc

Lines changed: 44 additions & 41 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ namespace serving {
4848
// Map of TFLite tensor name to <TF TensorInfo, TFLite tensor index>.
4949
namespace {
5050

51-
Status TfLiteTypeToTfType(TfLiteType tflite_type, DataType* type) {
51+
absl::Status TfLiteTypeToTfType(TfLiteType tflite_type, DataType* type) {
5252
switch (tflite_type) {
5353
case kTfLiteNoType:
5454
*type = tensorflow::DT_INVALID;
@@ -96,8 +96,8 @@ std::string TfToTfLiteLegacyTensorName(const string& tf_name) {
9696

9797
// Checks that an input/output tensor actually exists. If not, attempts to
9898
// update the tensor name with legacy TFLite tensor naming.
99-
Status FixTfLiteTensorName(const std::map<string, int>& tensor_name_map,
100-
string& tensor_name) {
99+
absl::Status FixTfLiteTensorName(const std::map<string, int>& tensor_name_map,
100+
string& tensor_name) {
101101
if (tensor_name_map.find(tensor_name) != tensor_name_map.end()) {
102102
return absl::OkStatus();
103103
}
@@ -112,8 +112,8 @@ Status FixTfLiteTensorName(const std::map<string, int>& tensor_name_map,
112112
return errors::Internal("Unknown tensor '", tensor_name, "'.");
113113
}
114114

115-
Status TfLiteTensorToTensorInfo(const TfLiteTensor* tflite_tensor,
116-
TensorInfo* info) {
115+
absl::Status TfLiteTensorToTensorInfo(const TfLiteTensor* tflite_tensor,
116+
TensorInfo* info) {
117117
DataType tf_type;
118118
TF_RETURN_IF_ERROR(TfLiteTypeToTfType(tflite_tensor->type, &tf_type));
119119
info->set_dtype(tf_type);
@@ -125,8 +125,8 @@ Status TfLiteTensorToTensorInfo(const TfLiteTensor* tflite_tensor,
125125
return absl::OkStatus();
126126
}
127127

128-
Status GetTensorInfoMap(const tflite::Interpreter* interpreter, bool input,
129-
TensorInfoMap* infomap) {
128+
absl::Status GetTensorInfoMap(const tflite::Interpreter* interpreter,
129+
bool input, TensorInfoMap* infomap) {
130130
const std::vector<int>& indices =
131131
input ? interpreter->inputs() : interpreter->outputs();
132132
const string& input_str = input ? "Input" : "Output";
@@ -156,7 +156,7 @@ std::vector<int> TensorDims(const Tensor& tensor) {
156156
}
157157

158158
// Create output tensors making sure they are the right size. //
159-
Status CreateOutputTensors(
159+
absl::Status CreateOutputTensors(
160160
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
161161
const std::vector<string>& output_tensor_names,
162162
const std::map<string, int>& output_tensor_to_idx,
@@ -184,7 +184,7 @@ Status CreateOutputTensors(
184184
return absl::OkStatus();
185185
}
186186

187-
Status SetInputAndInvokeMiniBatch(
187+
absl::Status SetInputAndInvokeMiniBatch(
188188
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
189189
const std::vector<int>& tflite_input_indices,
190190
const std::vector<std::vector<const Tensor*>>& inputs, int batch_size,
@@ -254,7 +254,7 @@ Status SetInputAndInvokeMiniBatch(
254254
return absl::OkStatus();
255255
}
256256

257-
Status SetMiniBatchOutput(
257+
absl::Status SetMiniBatchOutput(
258258
std::unique_ptr<internal::TfLiteInterpreterWrapper>& interpreter_wrapper,
259259
const std::map<int, Tensor*>& tflite_idx_to_output_tensor,
260260
std::vector<Tensor>* outputs) {
@@ -302,7 +302,7 @@ int GetModelBatchSize(const tflite::Model* model) {
302302
} // namespace
303303

304304
// Split an input task up into multiple tasks.
305-
Status TfLiteSession::SplitTfLiteInputTask(
305+
absl::Status TfLiteSession::SplitTfLiteInputTask(
306306
std::unique_ptr<TfLiteBatchTask>* input_task_ptr,
307307
int open_batch_remaining_slot, int max_batch_size,
308308
std::vector<std::unique_ptr<TfLiteBatchTask>>* output_tasks) {
@@ -389,7 +389,7 @@ Status TfLiteSession::SplitTfLiteInputTask(
389389
return absl::OkStatus();
390390
}
391391

392-
Status TfLiteSession::CreateDefaultBasicBatchScheduler(
392+
absl::Status TfLiteSession::CreateDefaultBasicBatchScheduler(
393393
const BasicBatchScheduler<TfLiteBatchTask>::Options& options,
394394
std::function<void(std::unique_ptr<Batch<TfLiteBatchTask>>)>
395395
process_batch_callback,
@@ -401,7 +401,7 @@ Status TfLiteSession::CreateDefaultBasicBatchScheduler(
401401
return absl::OkStatus();
402402
}
403403

404-
Status TfLiteSession::SetScheduler(
404+
absl::Status TfLiteSession::SetScheduler(
405405
const SchedulerCreator& scheduler_creator,
406406
const BasicBatchScheduler<TfLiteBatchTask>::Options& options) {
407407
use_fixed_batch_size_ = true;
@@ -415,10 +415,11 @@ Status TfLiteSession::SetScheduler(
415415
&scheduler_);
416416
}
417417

418-
Status TfLiteSession::Create(string&& buffer, const SessionOptions& options,
419-
int num_pools, int num_interpreters_per_pool,
420-
std::unique_ptr<TfLiteSession>* tflite_session,
421-
::google::protobuf::Map<string, SignatureDef>* signatures) {
418+
absl::Status TfLiteSession::Create(
419+
string&& buffer, const SessionOptions& options, int num_pools,
420+
int num_interpreters_per_pool,
421+
std::unique_ptr<TfLiteSession>* tflite_session,
422+
::google::protobuf::Map<string, SignatureDef>* signatures) {
422423
auto model = tflite::FlatBufferModel::BuildFromModel(
423424
flatbuffers::GetRoot<tflite::Model>(buffer.data()));
424425
if (model == nullptr) {
@@ -544,26 +545,27 @@ TfLiteSession::TfLiteSession(
544545
model_(std::move(model)),
545546
interpreter_pool_(std::move(interpreter_pool)) {}
546547

547-
Status TfLiteSession::Run(const std::vector<std::pair<string, Tensor>>& inputs,
548-
const std::vector<string>& output_tensor_names,
549-
const std::vector<string>& target_node_names,
550-
std::vector<Tensor>* outputs) {
548+
absl::Status TfLiteSession::Run(
549+
const std::vector<std::pair<string, Tensor>>& inputs,
550+
const std::vector<string>& output_tensor_names,
551+
const std::vector<string>& target_node_names,
552+
std::vector<Tensor>* outputs) {
551553
RunMetadata run_metadata;
552554
return Run(RunOptions(), inputs, output_tensor_names, target_node_names,
553555
outputs, &run_metadata);
554556
}
555557

556-
Status TfLiteSession::Run(const RunOptions& run_options,
557-
const std::vector<std::pair<string, Tensor>>& inputs,
558-
const std::vector<string>& output_tensor_names,
559-
const std::vector<string>& target_node_names,
560-
std::vector<Tensor>* outputs,
561-
RunMetadata* run_metadata) {
558+
absl::Status TfLiteSession::Run(
559+
const RunOptions& run_options,
560+
const std::vector<std::pair<string, Tensor>>& inputs,
561+
const std::vector<string>& output_tensor_names,
562+
const std::vector<string>& target_node_names, std::vector<Tensor>* outputs,
563+
RunMetadata* run_metadata) {
562564
return Run(run_options, inputs, output_tensor_names, target_node_names,
563565
outputs, run_metadata, thread::ThreadPoolOptions());
564566
}
565567

566-
Status TfLiteSession::RunInternal(
568+
absl::Status TfLiteSession::RunInternal(
567569
const std::vector<int>& tflite_input_indices,
568570
const std::vector<std::vector<const Tensor*>>& merged_inputs,
569571
const std::vector<string>& output_tensor_names,
@@ -598,7 +600,7 @@ Status TfLiteSession::RunInternal(
598600
return absl::OkStatus();
599601
}
600602

601-
Status TfLiteSession::Run(
603+
absl::Status TfLiteSession::Run(
602604
const RunOptions& run_options,
603605
const std::vector<std::pair<string, Tensor>>& inputs,
604606
const std::vector<string>& output_tensor_names,
@@ -629,7 +631,7 @@ Status TfLiteSession::Run(
629631
batch_size);
630632
}
631633
Notification done;
632-
Status status;
634+
absl::Status status;
633635
std::unique_ptr<TfLiteBatchTask> task;
634636
TfLiteBatchTask::CreateTfLiteBatchTask(&output_tensor_names, outputs, &done,
635637
&status, &task);
@@ -642,13 +644,14 @@ Status TfLiteSession::Run(
642644
return status;
643645
}
644646

645-
Status TfLiteSession::ListDevices(std::vector<DeviceAttributes>* response) {
647+
absl::Status TfLiteSession::ListDevices(
648+
std::vector<DeviceAttributes>* response) {
646649
return errors::Unimplemented("ListDevices is not yet supported.");
647650
}
648651

649-
Status MergeInputTensors(const Batch<TfLiteBatchTask>& batch,
650-
std::vector<std::vector<const Tensor*>>* merged_inputs,
651-
int* batch_size) {
652+
absl::Status MergeInputTensors(
653+
const Batch<TfLiteBatchTask>& batch,
654+
std::vector<std::vector<const Tensor*>>* merged_inputs, int* batch_size) {
652655
if (batch.num_tasks() < 1) {
653656
return errors::Internal("Batch size expected to be positive; was ",
654657
batch.num_tasks());
@@ -672,8 +675,8 @@ Status MergeInputTensors(const Batch<TfLiteBatchTask>& batch,
672675
return absl::OkStatus();
673676
}
674677

675-
Status SplitOutputTensors(const std::vector<Tensor>& combined_outputs,
676-
Batch<TfLiteBatchTask>* batch, int batch_size) {
678+
absl::Status SplitOutputTensors(const std::vector<Tensor>& combined_outputs,
679+
Batch<TfLiteBatchTask>* batch, int batch_size) {
677680
std::vector<int64_t> task_sizes(batch->num_tasks());
678681
int total_size = 0;
679682
for (int i = 0; i < batch->num_tasks(); ++i) {
@@ -689,7 +692,7 @@ Status SplitOutputTensors(const std::vector<Tensor>& combined_outputs,
689692
for (int i = 0; i < combined_outputs.size(); i++) {
690693
const auto& output_tensor = combined_outputs[i];
691694
std::vector<Tensor> split_tensor;
692-
const Status split_status =
695+
const absl::Status split_status =
693696
tensor::Split(output_tensor, task_sizes, &split_tensor);
694697
if (!split_status.ok()) {
695698
return errors::Internal("Tensor split operation failed: ",
@@ -720,7 +723,7 @@ void TfLiteSession::ProcessBatch(
720723
// Regardless of the outcome, we need to propagate the status to the
721724
// individual tasks and signal that they are done. We use MakeCleanup() to
722725
// ensure that this happens no matter how we exit the method below.
723-
Status status;
726+
absl::Status status;
724727
auto finally = gtl::MakeCleanup([&status, &batch] {
725728
for (int i = 0; i < batch->num_tasks(); ++i) {
726729
TfLiteBatchTask* task = batch->mutable_task(i);
@@ -758,9 +761,9 @@ void TfLiteSession::ProcessBatch(
758761
}
759762
}
760763
if (all_tasks_timeout_exceeded) {
761-
status = Status(static_cast<tensorflow::errors::Code>(
762-
absl::StatusCode::kResourceExhausted),
763-
"Run() timeout exceeded while waiting in batching queue");
764+
status = absl::Status(
765+
static_cast<absl::StatusCode>(absl::StatusCode::kResourceExhausted),
766+
"Run() timeout exceeded while waiting in batching queue");
764767
return;
765768
}
766769

tensorflow_serving/servables/tensorflow/tflite_session_test.cc

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -638,9 +638,9 @@ TEST(TfLiteSession, SimpleSignatureDefAndRun) {
638638
test::AsTensor<tstring>({"a", "b", "c", "d"}, TensorShape({2, 2})));
639639
}
640640

641-
Status BuildSessionInBatch(std::unique_ptr<TfLiteSession>* sess,
642-
bool use_model_batch_size,
643-
const string& model_path) {
641+
absl::Status BuildSessionInBatch(std::unique_ptr<TfLiteSession>* sess,
642+
bool use_model_batch_size,
643+
const string& model_path) {
644644
std::string model_bytes;
645645
TF_RETURN_IF_ERROR(ReadFileToString(
646646
Env::Default(), test_util::TestSrcDirPath(model_path), &model_bytes));
@@ -653,21 +653,21 @@ Status BuildSessionInBatch(std::unique_ptr<TfLiteSession>* sess,
653653
tflite_model->UnPackTo(mutable_model.get(), nullptr);
654654

655655
if (mutable_model->subgraphs.size() != 1) {
656-
return Status(
656+
return absl::Status(
657657
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
658658
strings::StrCat("Model subgraph size ",
659659
mutable_model->subgraphs.size(), " not equal to 1"));
660660
}
661661
auto* subgraph = mutable_model->subgraphs[0].get();
662662
if (subgraph->inputs.size() != 1) {
663-
return Status(
663+
return absl::Status(
664664
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
665665
strings::StrCat("Model subgraph input size ",
666666
mutable_model->subgraphs.size(), " not equal to 1"));
667667
}
668668
auto* tensor = subgraph->tensors[subgraph->inputs[0]].get();
669669
if (tensor->shape[0] != 1) {
670-
return Status(
670+
return absl::Status(
671671
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
672672
strings::StrCat("Model subgraph input shape[0] ",
673673
mutable_model->subgraphs.size(), " not equal to 1"));
@@ -693,13 +693,13 @@ Status BuildSessionInBatch(std::unique_ptr<TfLiteSession>* sess,
693693
? model_batch_size
694694
: kBatchSize / num_tflite_interpreters;
695695
if (scheduler_options.max_execution_batch_size != expected_batch_size) {
696-
return Status(
696+
return absl::Status(
697697
static_cast<absl::StatusCode>(absl::StatusCode::kInvalidArgument),
698698
strings::StrCat("Scheulder max_execution_batch_size ",
699699
scheduler_options.max_execution_batch_size,
700700
" not equal to expected ", expected_batch_size));
701701
}
702-
return Status();
702+
return absl::Status();
703703
}
704704

705705
using TfLiteSessionBatchSizeTest = ::testing::TestWithParam<bool>;

tensorflow_serving/servables/tensorflow/tfrt_classification_service.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,20 +29,20 @@ limitations under the License.
2929
namespace tensorflow {
3030
namespace serving {
3131

32-
Status TFRTClassificationServiceImpl::Classify(
32+
absl::Status TFRTClassificationServiceImpl::Classify(
3333
const Servable::RunOptions& run_options, ServerCore* core,
3434
const ClassificationRequest& request, ClassificationResponse* response) {
3535
// Verify Request Metadata and create a ServableRequest
3636
if (!request.has_model_spec()) {
37-
return tensorflow::Status(absl::StatusCode::kInvalidArgument,
38-
"Missing ModelSpec");
37+
return absl::Status(absl::StatusCode::kInvalidArgument,
38+
"Missing ModelSpec");
3939
}
4040

4141
return ClassifyWithModelSpec(run_options, core, request.model_spec(), request,
4242
response);
4343
}
4444

45-
Status TFRTClassificationServiceImpl::ClassifyWithModelSpec(
45+
absl::Status TFRTClassificationServiceImpl::ClassifyWithModelSpec(
4646
const Servable::RunOptions& run_options, ServerCore* core,
4747
const ModelSpec& model_spec, const ClassificationRequest& request,
4848
ClassificationResponse* response) {

tensorflow_serving/servables/tensorflow/tfrt_classifier.cc

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ limitations under the License.
3636
namespace tensorflow {
3737
namespace serving {
3838

39-
Status PreProcessClassification(
39+
absl::Status PreProcessClassification(
4040
const tfrt::FunctionMetadata& function_metadata) {
4141
if (function_metadata.GetInputNames().size() != 1) {
4242
return errors::InvalidArgument(
@@ -78,7 +78,7 @@ Status PreProcessClassification(
7878
return absl::OkStatus();
7979
}
8080

81-
Status PostProcessClassificationResult(
81+
absl::Status PostProcessClassificationResult(
8282
int num_examples, const std::vector<string>& output_names,
8383
const std::vector<Tensor>& output_tensors, ClassificationResult* result) {
8484
if (output_tensors.size() != output_names.size()) {
@@ -167,11 +167,11 @@ Status PostProcessClassificationResult(
167167
return absl::OkStatus();
168168
}
169169

170-
Status RunClassify(const tfrt::SavedModel::RunOptions& run_options,
171-
const absl::optional<int64_t>& servable_version,
172-
tfrt::SavedModel* saved_model,
173-
const ClassificationRequest& request,
174-
ClassificationResponse* response) {
170+
absl::Status RunClassify(const tfrt::SavedModel::RunOptions& run_options,
171+
const absl::optional<int64_t>& servable_version,
172+
tfrt::SavedModel* saved_model,
173+
const ClassificationRequest& request,
174+
ClassificationResponse* response) {
175175
const string function_name = request.model_spec().signature_name().empty()
176176
? kDefaultServingSignatureDefKey
177177
: request.model_spec().signature_name();

0 commit comments

Comments
 (0)